Skip to content

HeliosDB API Quick Reference

Fast reference guide for the most common HeliosDB operations. For comprehensive documentation, see API_REFERENCE.md.

Table of Contents


Getting Started

Installation

[dependencies]
heliosdb-storage = "6.0"
heliosdb-vector = "6.0"
heliosdb-graph = "6.0"
heliosdb-document = "6.0"
heliosdb-replication = "6.0"
heliosdb-unified-cache = "6.0"

Basic Setup

use heliosdb_storage::{LsmStorageEngine, StorageConfig};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    // Initialize storage engine
    let config = StorageConfig::default();
    let engine = LsmStorageEngine::new("/data/helios", config).await?;

    // Ready to use
    Ok(())
}

Common Patterns

Error Handling

use anyhow::Result;

async fn operation() -> Result<()> {
    let value = engine.get(&key).await?;
    // Process value
    Ok(())
}

Shared Access

use std::sync::Arc;

let engine = Arc::new(LsmStorageEngine::new("/data", config).await?);

// Clone for other tasks
let engine_clone = engine.clone();
tokio::spawn(async move {
    engine_clone.get(&key).await
});

Batch Operations

// Efficient batch processing
let mut operations = Vec::new();
for i in 0..1000 {
    operations.push((
        Key::from(format!("key{}", i)),
        Value::from(vec![i as u8])
    ));
}
// Execute batch

Storage Operations

Write Data

use heliosdb_common::{Key, Value};

// Simple put
let key = Key::from("user:123");
let value = Value::from(b"John Doe".to_vec());
engine.put(key, value).await?;

Read Data

// Get by key
let key = Key::from("user:123");
match engine.get(&key).await? {
    Some(value) => println!("Found: {:?}", value),
    None => println!("Not found"),
}

Delete Data

let key = Key::from("user:123");
engine.delete(&key).await?;

Range Scan

// Scan key range
let start = Key::from("user:000");
let end = Key::from("user:999");
let results = engine.scan(&start, &end).await?;

for (key, value) in results {
    println!("{:?}: {:?}", key, value);
}

Time-Series Storage

use heliosdb_storage::timeseries::{TimeSeriesEngine, PartitionStrategy};

let mut ts_engine = TimeSeriesEngine::new(
    "/data/ts",
    PartitionStrategy::Daily
).await?;

// Write point
ts_engine.write_point("cpu.usage", 75.5, None).await?;

// Query range
let points = ts_engine.query_range(
    "cpu.usage",
    1635724800000,  // start
    1635811200000   // end
).await?;

Create Index

use heliosdb_vector::{HnswIndex, DistanceMetric};

let mut index = HnswIndex::new(
    768,                        // dimensions
    16,                         // M
    200,                        // ef_construction
    DistanceMetric::Cosine
);

Insert Vectors

// Insert single vector
let embedding = vec![0.1, 0.2, 0.3, /* ... 768 dims */];
index.insert(12345, embedding).await?;

// Batch insert
for (id, vec) in vectors {
    index.insert(id, vec).await?;
}
// Find 10 nearest neighbors
let query = vec![0.15, 0.25, /* ... */];
let results = index.search(&query, 10).await?;

for (id, distance) in results {
    println!("ID: {}, Distance: {}", id, distance);
}
use heliosdb_vector::hybrid::{HybridSearchEngine, HybridQuery, FilterOp};

let query = HybridQuery {
    vector: Some(vec![0.1, 0.2, /* ... */]),
    text: Some("machine learning".to_string()),
    filters: vec![
        FilterOp::Eq {
            field: "category".to_string(),
            value: "AI".into(),
        },
    ],
    limit: 10,
};

let results = engine.search(query).await?;

Graph Queries

Setup

use heliosdb_graph::{GraphEngine, GraphConfig, Node, Edge};

let config = GraphConfig::default();
let engine = GraphEngine::new(config).await?;

// Create graph
engine.register_graph("social".to_string()).await?;

Add Nodes and Edges

// Add node
let node = Node {
    id: 1,
    label: "Person".to_string(),
    properties: HashMap::from([
        ("name".to_string(), json!("Alice")),
        ("age".to_string(), json!(30)),
    ]),
};
engine.add_node("social", node).await?;

// Add edge
let edge = Edge {
    id: 1,
    source: 1,
    target: 2,
    label: "KNOWS".to_string(),
    weight: 1.0,
    properties: HashMap::new(),
};
engine.add_edge("social", edge).await?;

Traverse

use heliosdb_graph::TraversalMode;

// BFS traversal
let nodes = engine.traverse(
    1,
    TraversalMode::BreadthFirst,
    5  // max depth
).await?;

Find Paths

// Shortest path
if let Some(path) = engine.shortest_path("social", 1, 100).await? {
    println!("Path length: {}", path.len());
    println!("Nodes: {:?}", path.nodes);
}

// All paths (up to length 5)
let paths = engine.all_paths("social", 1, 100, 5).await?;

Detect Cycles

let cycles = engine.detect_cycles("social").await?;
for cycle in cycles {
    println!("Cycle: {:?}", cycle);
}

Document Store

Setup

use heliosdb_document::{DocumentStore, Collection, DocumentId};

let store = DocumentStore::new("/data/docs")?;
let collection = Collection::new("users");

Insert Document

use serde_json::json;

let id = DocumentId::new("user123");
let data = json!({
    "name": "Alice",
    "email": "alice@example.com",
    "age": 30,
    "tags": ["active", "premium"]
});

let doc = store.insert(&collection, &id, data)?;

Query Documents

use heliosdb_document::{Filter, FilterOp};

// Find users older than 25
let filter = Filter {
    op: FilterOp::Gt {
        field: "age".to_string(),
        value: json!(25),
    },
};

let docs = store.find(&collection, filter)?;

Update Document

let update = json!({
    "email": "alice.new@example.com",
    "age": 31
});

store.update(&collection, &id, update)?;

Aggregation

use heliosdb_document::AggregationStage;

let pipeline = vec![
    AggregationStage::Match {
        filter: Filter {
            op: FilterOp::Gte {
                field: "age".to_string(),
                value: json!(18),
            },
        },
    },
    AggregationStage::Group {
        by: "$age".to_string(),
        accumulator: json!({
            "count": { "$sum": 1 }
        }),
    },
];

let results = store.aggregate(&collection, pipeline)?;

Schema Validation

use heliosdb_document::{SchemaBuilder, PropertyBuilder};

let schema = SchemaBuilder::new()
    .property("name", PropertyBuilder::string()
        .min_length(1)
        .max_length(100)
        .build())
    .property("email", PropertyBuilder::string()
        .pattern(r"^[^\s@]+@[^\s@]+\.[^\s@]+$")
        .build())
    .property("age", PropertyBuilder::integer()
        .minimum(0)
        .maximum(150)
        .build())
    .required(vec!["name", "email"])
    .build();

store.register_schema("users", schema)?;

Change Streams

let mut stream = store.watch(collection)?;

tokio::spawn(async move {
    while let Some(event) = stream.next().await {
        match event {
            Ok(change) => {
                println!("Event: {:?}", change.event_type);
                println!("Document: {:?}", change.document_id);
            }
            Err(e) => eprintln!("Error: {}", e),
        }
    }
});

Time-Series Data

Write Points

use heliosdb_storage::timeseries::TimeSeriesEngine;

let mut engine = TimeSeriesEngine::new(
    "/data/ts",
    PartitionStrategy::Daily
).await?;

// Current timestamp
engine.write_point("sensor.temp", 23.5, None).await?;

// Specific timestamp
let ts = 1635724800000;
engine.write_point("sensor.temp", 21.3, Some(ts)).await?;

Query with Windows

use heliosdb_storage::timeseries::{
    TimeSeriesQueryEngine,
    Window,
    WindowType,
};
use std::time::Duration;

let query_engine = TimeSeriesQueryEngine::new(engine_arc);

let window = Window {
    window_type: WindowType::Tumbling,
    size: Duration::from_secs(300),  // 5-minute windows
    aggregations: vec!["avg", "min", "max"],
};

let results = query_engine.query_with_window(
    "cpu.usage",
    start_time,
    end_time,
    window
).await?;

Retention Policy

use heliosdb_storage::timeseries::RetentionPolicy;
use std::time::Duration;

// Keep data for 30 days
let policy = RetentionPolicy::new(
    Duration::from_secs(30 * 24 * 3600)
);
engine.set_retention_policy(policy);

Downsampling

use heliosdb_storage::timeseries::{
    DownsamplingEngine,
    DownsamplingConfig,
    AggregationFunction,
};

let config = DownsamplingConfig {
    interval: Duration::from_secs(3600),  // 1 hour
    function: AggregationFunction::Avg,
};

let downsampler = DownsamplingEngine::new(config);
let downsampled = downsampler.downsample(points, 1000).await?;

Caching

Basic Operations

use heliosdb_unified_cache::{UnifiedCacheManager, CacheConfig, CacheKey};
use std::time::Duration;

let cache = UnifiedCacheManager::new(CacheConfig::default());

// Insert
let key = CacheKey::new("user:123");
let value = vec![1, 2, 3, 4, 5];
cache.insert(key.clone(), value, Some(Duration::from_secs(3600))).await?;

// Get
if let Some(value) = cache.get(&key).await? {
    println!("Cache hit!");
}

// Invalidate
cache.invalidate(&key).await?;

Advanced Configuration

use heliosdb_unified_cache::{
    CacheConfig,
    EvictionPolicyType,
    CompressionType,
};

let config = CacheConfig {
    max_size: 1024 * 1024 * 1024,        // 1GB
    eviction_policy: EvictionPolicyType::Hybrid,
    enable_ml: true,                      // ML-based eviction
    enable_compression: true,
    compression_type: CompressionType::Zstd,
    compression_threshold: 1024,          // Compress > 1KB
    enable_tiered: true,
    l1_size: 256 * 1024 * 1024,          // 256MB L1
    l2_size: Some(1024 * 1024 * 1024),   // 1GB L2
    ..Default::default()
};

let cache = UnifiedCacheManager::new(config);

Cache Statistics

let stats = cache.get_stats();

println!("Hit rate: {:.2}%", stats.hit_rate() * 100.0);
println!("Hits: {}", stats.hits);
println!("Misses: {}", stats.misses);
println!("Evictions: {}", stats.evictions);
println!("Size: {} bytes", stats.current_size);

Transactions

Basic Transaction

use heliosdb_storage::{TransactionParticipant, IsolationLevel};

let participant = TransactionParticipant::new(storage_arc);

// Begin
let txn_id = participant.begin_transaction(
    IsolationLevel::Serializable
).await?;

// Operations
let key = Key::from("account:123");
let value = participant.read(txn_id, &key).await?;

let new_value = Value::from(b"updated".to_vec());
participant.write(txn_id, key, new_value).await?;

// Commit
match participant.commit(txn_id).await {
    Ok(_) => println!("Committed"),
    Err(e) => {
        participant.rollback(txn_id).await?;
        eprintln!("Rolled back: {}", e);
    }
}

Distributed Transaction (XA)

use heliosdb_storage::{XaParticipant, XaParticipantConfig};

let participant = XaParticipant::new(
    XaParticipantConfig::default(),
    storage_arc
);

let xid = "global-txn-123".to_string();

// Phase 1: Execute
let local_id = participant.xa_start(xid.clone()).await?;
// ... perform operations ...
participant.xa_end(local_id).await?;

// Phase 2: Prepare
participant.xa_prepare(local_id).await?;

// Phase 3: Commit
participant.xa_commit(local_id).await?;

Code Snippets

Complete Example: Vector Search Application

use heliosdb_vector::{HnswIndex, DistanceMetric};
use std::sync::Arc;
use tokio;

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    // Create index
    let index = Arc::new(tokio::sync::RwLock::new(
        HnswIndex::new(384, 16, 200, DistanceMetric::Cosine)
    ));

    // Insert embeddings
    let embeddings = vec![
        (1, vec![0.1; 384]),
        (2, vec![0.2; 384]),
        (3, vec![0.3; 384]),
    ];

    {
        let mut idx = index.write().await;
        for (id, vec) in embeddings {
            idx.insert(id, vec).await?;
        }
    }

    // Search
    let query = vec![0.15; 384];
    let results = {
        let idx = index.read().await;
        idx.search(&query, 5).await?
    };

    println!("Found {} results", results.len());
    for (id, distance) in results {
        println!("  ID: {}, Distance: {:.4}", id, distance);
    }

    Ok(())
}

Complete Example: Document Store with Validation

use heliosdb_document::{
    DocumentStore, Collection, DocumentId,
    SchemaBuilder, PropertyBuilder,
    Filter, FilterOp,
};
use serde_json::json;

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    let store = DocumentStore::new("/tmp/docs")?;
    let collection = Collection::new("users");

    // Register schema
    let schema = SchemaBuilder::new()
        .property("name", PropertyBuilder::string()
            .min_length(1)
            .build())
        .property("age", PropertyBuilder::integer()
            .minimum(0)
            .build())
        .required(vec!["name"])
        .build();
    store.register_schema("users", schema)?;

    // Insert documents
    for i in 1..=10 {
        let id = DocumentId::new(format!("user{}", i));
        let data = json!({
            "name": format!("User {}", i),
            "age": 20 + i,
        });
        store.insert(&collection, &id, data)?;
    }

    // Query
    let filter = Filter {
        op: FilterOp::Gt {
            field: "age".to_string(),
            value: json!(25),
        },
    };
    let docs = store.find(&collection, filter)?;

    println!("Found {} users older than 25", docs.len());

    Ok(())
}

Complete Example: Time-Series with Aggregations

use heliosdb_storage::timeseries::{
    TimeSeriesEngine, PartitionStrategy,
    TimeSeriesQueryEngine, Window, WindowType,
};
use std::time::Duration;
use std::sync::Arc;

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    // Create engine
    let mut engine = TimeSeriesEngine::new(
        "/tmp/timeseries",
        PartitionStrategy::Hourly
    ).await?;

    // Write points
    let now = chrono::Utc::now().timestamp_millis() as u64;
    for i in 0..100 {
        let timestamp = now + (i * 60000); // 1-minute intervals
        engine.write_point(
            "sensor.temperature",
            20.0 + (i as f64 * 0.1),
            Some(timestamp)
        ).await?;
    }

    // Query with 5-minute windows
    let query_engine = TimeSeriesQueryEngine::new(Arc::new(engine));

    let window = Window {
        window_type: WindowType::Tumbling,
        size: Duration::from_secs(300),
        aggregations: vec!["avg", "min", "max"],
    };

    let results = query_engine.query_with_window(
        "sensor.temperature",
        now,
        now + 6000000,
        window
    ).await?;

    println!("Windowed results:");
    for result in results {
        println!("  Window: {} - Avg: {:.2}",
            result.window_start, result.avg);
    }

    Ok(())
}

Complete Example: Graph Social Network

use heliosdb_graph::{
    GraphEngine, GraphConfig, Node, Edge, TraversalMode,
};
use serde_json::json;
use std::collections::HashMap;

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    let config = GraphConfig::default();
    let engine = GraphEngine::new(config).await?;

    // Create social network graph
    engine.register_graph("social".to_string()).await?;

    // Add people
    for i in 1..=10 {
        let mut props = HashMap::new();
        props.insert("name".to_string(), json!(format!("Person{}", i)));
        props.insert("age".to_string(), json!(20 + i));

        let node = Node {
            id: i,
            label: "Person".to_string(),
            properties: props,
        };
        engine.add_node("social", node).await?;
    }

    // Add friendships
    let edges = vec![
        (1, 2), (1, 3), (2, 4), (3, 4), (4, 5),
        (5, 6), (6, 7), (7, 8), (8, 9), (9, 10),
    ];

    for (i, (src, tgt)) in edges.iter().enumerate() {
        let edge = Edge {
            id: i as u64 + 1,
            source: *src,
            target: *tgt,
            label: "KNOWS".to_string(),
            weight: 1.0,
            properties: HashMap::new(),
        };
        engine.add_edge("social", edge).await?;
    }

    // Find friends of friends (2 hops from person 1)
    let nodes = engine.traverse(
        1,
        TraversalMode::BreadthFirst,
        2
    ).await?;

    println!("Friends of friends: {:?}", nodes);

    // Find shortest path
    if let Some(path) = engine.shortest_path("social", 1, 10).await? {
        println!("Shortest path from 1 to 10:");
        println!("  Length: {} hops", path.len());
        println!("  Nodes: {:?}", path.nodes);
    }

    Ok(())
}

Performance Tips

  1. Use batch operations when inserting multiple items
  2. Enable compression for cache and storage to reduce memory
  3. Tune index parameters (M, ef) for vector search based on your dataset
  4. Use appropriate partition strategies for time-series data
  5. Enable ML-based eviction in cache for better hit rates
  6. Set retention policies to automatically clean up old data
  7. Use read replicas to distribute read load
  8. Monitor metrics regularly for performance insights

Common Errors

Storage Errors

HeliosError::KeyNotFound("user:123")
HeliosError::TransactionConflict
HeliosError::Corruption("invalid checksum")

Vector Errors

VectorError::DimensionMismatch { expected: 768, got: 384 }
VectorError::IndexNotTrained

Graph Errors

GraphError::NodeNotFound(123)
GraphError::CycleDetected
GraphError::MaxDepthExceeded(100)

Additional Resources


For support and questions, visit: https://github.com/your-org/heliosdb/issues