Skip to content

Cassandra CQL Examples

Practical CQL examples and usage patterns for HeliosDB's Cassandra compatibility.

Connection Examples

Python (cassandra-driver)

from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider

# Basic connection
cluster = Cluster(['localhost'])
session = cluster.connect()

# With authentication
auth = PlainTextAuthProvider(username='admin', password='password')
cluster = Cluster(['localhost'], auth_provider=auth)
session = cluster.connect('my_keyspace')

Java (DataStax Driver)

import com.datastax.oss.driver.api.core.CqlSession;

CqlSession session = CqlSession.builder()
    .addContactPoint(new InetSocketAddress("localhost", 9042))
    .withLocalDatacenter("datacenter1")
    .withKeyspace("my_keyspace")
    .build();

Node.js

const cassandra = require('cassandra-driver');

const client = new cassandra.Client({
    contactPoints: ['localhost'],
    localDataCenter: 'datacenter1',
    keyspace: 'my_keyspace'
});

await client.connect();

cqlsh

# Connect
cqlsh localhost 9042

# Connect with credentials
cqlsh localhost 9042 -u admin -p password

# Execute file
cqlsh -f schema.cql

Keyspace Management

Create Keyspace

-- Simple replication
CREATE KEYSPACE my_keyspace WITH REPLICATION = {
    'class': 'SimpleStrategy',
    'replication_factor': 3
};

-- Network topology
CREATE KEYSPACE production WITH REPLICATION = {
    'class': 'NetworkTopologyStrategy',
    'dc1': 3,
    'dc2': 2
};

-- With durable writes
CREATE KEYSPACE my_keyspace WITH REPLICATION = {
    'class': 'SimpleStrategy',
    'replication_factor': 1
} AND DURABLE_WRITES = true;

Modify Keyspace

-- Alter replication
ALTER KEYSPACE my_keyspace WITH REPLICATION = {
    'class': 'SimpleStrategy',
    'replication_factor': 5
};

-- Drop keyspace
DROP KEYSPACE IF EXISTS my_keyspace;

Table Management

Create Tables

-- Basic table
CREATE TABLE users (
    user_id UUID PRIMARY KEY,
    username TEXT,
    email TEXT,
    created_at TIMESTAMP
);

-- With clustering columns
CREATE TABLE user_events (
    user_id UUID,
    event_time TIMESTAMP,
    event_type TEXT,
    event_data MAP<TEXT, TEXT>,
    PRIMARY KEY ((user_id), event_time)
) WITH CLUSTERING ORDER BY (event_time DESC);

-- Compound partition key
CREATE TABLE sensor_data (
    sensor_id UUID,
    day DATE,
    reading_time TIMESTAMP,
    value DOUBLE,
    PRIMARY KEY ((sensor_id, day), reading_time)
);

-- With TTL
CREATE TABLE session_data (
    session_id UUID PRIMARY KEY,
    user_id UUID,
    data BLOB
) WITH default_time_to_live = 3600;  -- 1 hour

-- With collections
CREATE TABLE user_profiles (
    user_id UUID PRIMARY KEY,
    name TEXT,
    tags SET<TEXT>,
    preferences MAP<TEXT, TEXT>,
    addresses LIST<FROZEN<address_type>>
);

User-Defined Types

-- Create type
CREATE TYPE address_type (
    street TEXT,
    city TEXT,
    zip TEXT,
    country TEXT
);

-- Use in table
CREATE TABLE contacts (
    id UUID PRIMARY KEY,
    name TEXT,
    home_address FROZEN<address_type>,
    work_address FROZEN<address_type>
);

Indexes

-- Secondary index
CREATE INDEX ON users (email);

-- Custom index name
CREATE INDEX users_by_username ON users (username);

-- Drop index
DROP INDEX users_by_username;

CRUD Operations

INSERT

-- Basic insert
INSERT INTO users (user_id, username, email, created_at)
VALUES (uuid(), 'alice', 'alice@example.com', toTimestamp(now()));

-- With TTL
INSERT INTO users (user_id, username, email)
VALUES (uuid(), 'bob', 'bob@example.com')
USING TTL 86400;

-- With timestamp
INSERT INTO users (user_id, username, email)
VALUES (uuid(), 'charlie', 'charlie@example.com')
USING TIMESTAMP 1640000000000000;

-- If not exists (LWT)
INSERT INTO users (user_id, username, email)
VALUES (123e4567-e89b-12d3-a456-426614174000, 'unique', 'unique@example.com')
IF NOT EXISTS;

-- Insert JSON
INSERT INTO users JSON '{"user_id": "550e8400-e29b-41d4-a716-446655440000", "username": "json_user", "email": "json@example.com"}';

SELECT

-- Basic select
SELECT * FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- Select specific columns
SELECT username, email FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- With ordering (clustering)
SELECT * FROM user_events
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000
ORDER BY event_time DESC
LIMIT 10;

-- Time range
SELECT * FROM user_events
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000
AND event_time >= '2024-01-01'
AND event_time < '2024-02-01';

-- IN clause
SELECT * FROM users
WHERE user_id IN (
    550e8400-e29b-41d4-a716-446655440000,
    550e8400-e29b-41d4-a716-446655440001
);

-- Token range scan
SELECT * FROM users
WHERE token(user_id) > token(550e8400-e29b-41d4-a716-446655440000)
LIMIT 100;

-- Allow filtering
SELECT * FROM users WHERE email = 'alice@example.com' ALLOW FILTERING;

-- As JSON
SELECT JSON * FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- Functions
SELECT user_id, TTL(username), WRITETIME(username) FROM users;

UPDATE

-- Basic update
UPDATE users SET email = 'new@example.com'
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- Multiple columns
UPDATE users
SET email = 'new@example.com', username = 'new_name'
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- With TTL
UPDATE users USING TTL 86400
SET email = 'temp@example.com'
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- Conditional update (LWT)
UPDATE users
SET email = 'verified@example.com'
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000
IF email = 'unverified@example.com';

-- Update collections
UPDATE user_profiles
SET tags = tags + {'premium'}
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

UPDATE user_profiles
SET preferences['theme'] = 'dark'
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

DELETE

-- Delete row
DELETE FROM users WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- Delete column
DELETE email FROM users
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

-- Conditional delete (LWT)
DELETE FROM users
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000
IF EXISTS;

-- Delete with condition
DELETE FROM users
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000
IF email = 'delete@example.com';

-- Delete from collection
UPDATE user_profiles
SET tags = tags - {'old_tag'}
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

Batch Operations

-- Logged batch (atomic)
BEGIN BATCH
    INSERT INTO users (user_id, username, email)
    VALUES (uuid(), 'user1', 'user1@example.com');

    INSERT INTO user_events (user_id, event_time, event_type)
    VALUES (550e8400-e29b-41d4-a716-446655440000, toTimestamp(now()), 'signup');
APPLY BATCH;

-- Unlogged batch
BEGIN UNLOGGED BATCH
    UPDATE users SET email = 'new1@example.com'
    WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

    UPDATE users SET email = 'new2@example.com'
    WHERE user_id = 550e8400-e29b-41d4-a716-446655440001;
APPLY BATCH;

-- Counter batch
BEGIN COUNTER BATCH
    UPDATE page_views SET count = count + 1
    WHERE page_id = 'home';

    UPDATE page_views SET count = count + 1
    WHERE page_id = 'about';
APPLY BATCH;

Prepared Statements

Python

# Prepare statement
prepared = session.prepare(
    "INSERT INTO users (user_id, username, email) VALUES (?, ?, ?)"
)

# Execute with parameters
session.execute(prepared, [uuid.uuid4(), 'alice', 'alice@example.com'])

# Batch with prepared
from cassandra.query import BatchStatement, BatchType

batch = BatchStatement(batch_type=BatchType.LOGGED)
for user in users:
    batch.add(prepared, [user.id, user.name, user.email])
session.execute(batch)

Java

PreparedStatement prepared = session.prepare(
    "INSERT INTO users (user_id, username, email) VALUES (?, ?, ?)"
);

BoundStatement bound = prepared.bind(
    UUID.randomUUID(), "alice", "alice@example.com"
);
session.execute(bound);

Aggregations

-- Count
SELECT COUNT(*) FROM users;

-- Sum and average
SELECT SUM(amount), AVG(amount) FROM transactions
WHERE account_id = 550e8400-e29b-41d4-a716-446655440000;

-- Min and max
SELECT MIN(created_at), MAX(created_at) FROM events
WHERE user_id = 550e8400-e29b-41d4-a716-446655440000;

Time Series Examples

Create Time Series Table

CREATE TABLE sensor_readings (
    sensor_id UUID,
    bucket_date DATE,
    reading_time TIMESTAMP,
    temperature DOUBLE,
    humidity DOUBLE,
    PRIMARY KEY ((sensor_id, bucket_date), reading_time)
) WITH CLUSTERING ORDER BY (reading_time DESC);

Insert Readings

INSERT INTO sensor_readings (sensor_id, bucket_date, reading_time, temperature, humidity)
VALUES (
    550e8400-e29b-41d4-a716-446655440000,
    toDate(now()),
    toTimestamp(now()),
    23.5,
    65.2
);

Query Patterns

-- Latest readings
SELECT * FROM sensor_readings
WHERE sensor_id = 550e8400-e29b-41d4-a716-446655440000
AND bucket_date = '2024-12-01'
LIMIT 100;

-- Time range
SELECT * FROM sensor_readings
WHERE sensor_id = 550e8400-e29b-41d4-a716-446655440000
AND bucket_date = '2024-12-01'
AND reading_time >= '2024-12-01 00:00:00'
AND reading_time < '2024-12-01 12:00:00';

-- Aggregations
SELECT AVG(temperature), MAX(temperature), MIN(temperature)
FROM sensor_readings
WHERE sensor_id = 550e8400-e29b-41d4-a716-446655440000
AND bucket_date = '2024-12-01';

Materialized Views

-- Create view
CREATE MATERIALIZED VIEW users_by_email AS
    SELECT * FROM users
    WHERE email IS NOT NULL AND user_id IS NOT NULL
    PRIMARY KEY (email, user_id);

-- Query view
SELECT * FROM users_by_email WHERE email = 'alice@example.com';

Migration from Cassandra

Schema Export

# Export schema from Cassandra
cqlsh -e "DESCRIBE KEYSPACE my_keyspace" > schema.cql

# Import to HeliosDB
cqlsh heliosdb-host -f schema.cql

Data Migration

from cassandra.cluster import Cluster

# Source (Cassandra)
source = Cluster(['cassandra-host']).connect('my_keyspace')

# Target (HeliosDB)
target = Cluster(['heliosdb-host']).connect('my_keyspace')

# Migrate data
rows = source.execute("SELECT * FROM users")
prepared = target.prepare(
    "INSERT INTO users (user_id, username, email, created_at) VALUES (?, ?, ?, ?)"
)

for row in rows:
    target.execute(prepared, [row.user_id, row.username, row.email, row.created_at])

Related: README.md | CONFIGURATION.md | COMPATIBILITY.md

Last Updated: December 2025