Skip to content

HeliosDB Caching Quick Start Guide

Version: 7.0 Last Updated: 2025-01-04


Prerequisites

  • HeliosDB 5.0 or later installed
  • Database created and accessible
  • Appropriate permissions (ALTER DATABASE, ALTER TABLE)

5-Minute Setup

Step 1: Enable Query Caching

-- Enable query result caching for the database
ALTER DATABASE ENABLE QUERY_CACHE;

-- Verify caching is enabled
SELECT setting_value FROM database_settings WHERE setting_name = 'query_cache_enabled';

Step 2: Configure Basic Parameters

-- Set cache size and TTL
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 1024,           -- 1 GB cache
  eviction_policy = 'W_TINYLFU', -- Best general-purpose policy
  ttl_seconds = 3600            -- 1 hour default TTL
);

Step 3: Enable Table-Level Caching

-- Enable intelligent caching for specific tables
ALTER TABLE products ENABLE INTELLIGENT_CACHING;
ALTER TABLE customers ENABLE INTELLIGENT_CACHING;
ALTER TABLE orders ENABLE INTELLIGENT_CACHING;

Step 4: Verify Cache is Working

-- Run a query
SELECT * FROM products WHERE category = 'electronics';

-- Run it again and check stats
SELECT * FROM products WHERE category = 'electronics';

-- View cache statistics
SELECT cache_hits, cache_misses, hit_rate
FROM cache_statistics;

Expected output:

cache_hits | cache_misses | hit_rate
-----------|--------------|----------
         1 |            1 |     0.50


Enabling Caching

Database-Level Caching

-- Enable all caching features
ALTER DATABASE ENABLE QUERY_CACHE;
ALTER DATABASE ENABLE STATEMENT_CACHE;
ALTER DATABASE ENABLE BUFFER_POOL_CACHE;

-- Or enable everything at once
ALTER DATABASE ENABLE ALL_CACHES;

Table-Level Caching

-- Standard caching
ALTER TABLE table_name ENABLE CACHING;

-- Intelligent ML-based caching
ALTER TABLE table_name ENABLE INTELLIGENT_CACHING;

-- Distributed/global caching (for multi-node deployments)
ALTER TABLE table_name ENABLE GLOBAL_CACHE;

Query-Level Caching

-- Cache hint
SELECT /*+ CACHE */ * FROM products WHERE id = $1;

-- Cache with custom TTL
SELECT /*+ CACHE TTL=300 */ * FROM products WHERE category = $1;

-- Explicit cache command
CACHE SELECT * FROM frequently_accessed_data;

Basic Configuration

Memory Allocation

-- Set total cache memory
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 2048  -- 2 GB total cache
);

-- Set per-query limit (prevents single query consuming all cache)
ALTER DATABASE SET CACHE_PARAMS (
  per_query_max_mb = 256  -- Max 256 MB per cached query
);

TTL Settings

-- Global default TTL
ALTER DATABASE SET CACHE_PARAMS (
  ttl_seconds = 3600  -- 1 hour
);

-- Table-specific TTL
ALTER TABLE products SET CACHE_PARAMS (
  ttl_seconds = 300  -- 5 minutes (for frequently changing data)
);

ALTER TABLE static_config SET CACHE_PARAMS (
  ttl_seconds = 86400  -- 24 hours (for rarely changing data)
);

Eviction Policy

-- W-TinyLFU (recommended for most workloads)
ALTER DATABASE SET CACHE_PARAMS (
  eviction_policy = 'W_TINYLFU'
);

-- Other options:
-- 'LRU' - Least Recently Used (simple, predictable)
-- 'LFU' - Least Frequently Used (good for hot/cold data)
-- 'ML'  - Machine learning based (adaptive)

Statement Cache Setup (Prepared Statements)

Rust Client

use heliosdb_pooling::{IntelligentConnectionPool, PoolConfig, PoolMode};
use heliosdb_pooling::{Parameters, ParameterValue, PreparedStatement};

// Create connection pool
let config = PoolConfig {
    host: "localhost".to_string(),
    port: 5432,
    database: "mydb".to_string(),
    username: "user".to_string(),
    password: "password".to_string(),
    max_connections: 10,
    ..Default::default()
};

let pool = IntelligentConnectionPool::new(config, PoolMode::Transaction)?;
pool.initialize().await?;

// Prepare statement (automatically cached)
let stmt = pool.prepare("SELECT * FROM users WHERE id = $1")?;

// Execute multiple times (reuses cached plan)
for user_id in user_ids {
    let params = Parameters::Positional(vec![ParameterValue::Int64(user_id)]);
    let result = pool.execute(&stmt, params).await?;
    // Process result...
}

// Check cache performance
let stats = pool.statement_cache_stats();
println!("Cache size: {}", stats.current_size);
println!("Hit rate: {:.1}%", stats.hit_rate * 100.0);
println!("Memory used: {} MB", stats.total_memory_bytes / 1024 / 1024);

Best Practices for Statement Cache

// DO: Prepare once, execute many times
let insert_stmt = pool.prepare("INSERT INTO logs VALUES ($1, $2, $3)")?;
for log_entry in logs {
    pool.execute(&insert_stmt, log_entry.to_params()).await?;
}

// DON'T: Prepare inside loop
for log_entry in logs {
    let stmt = pool.prepare("INSERT INTO logs VALUES ($1, $2, $3)")?; // Wasteful!
    pool.execute(&stmt, log_entry.to_params()).await?;
}

// DO: Use parameters for variable data
let stmt = pool.prepare("SELECT * FROM users WHERE name = $1")?;

// DON'T: Interpolate strings
let stmt = pool.prepare(format!("SELECT * FROM users WHERE name = '{}'", name))?; // Bad!

Monitoring Cache Performance

Quick Health Check

-- Overall cache health
SELECT
  cache_hits,
  cache_misses,
  ROUND(hit_rate * 100, 2) as hit_rate_pct,
  eviction_count,
  ROUND(memory_used_mb, 2) as memory_mb
FROM cache_statistics;

Expected healthy output:

cache_hits | cache_misses | hit_rate_pct | eviction_count | memory_mb
-----------|--------------|--------------|----------------|----------
    50000  |        2500  |        95.24 |           1500 |    512.75

Per-Table Statistics

SELECT
  table_name,
  cache_entries,
  ROUND(avg_entry_size_kb, 2) as avg_kb,
  ROUND(hit_rate * 100, 2) as hit_rate_pct,
  invalidation_count
FROM table_cache_stats
ORDER BY hit_rate DESC
LIMIT 10;

Real-Time Monitoring

-- Watch cache hit rate over time
SELECT
  time_bucket('1 minute', recorded_at) as minute,
  SUM(hits) as total_hits,
  SUM(misses) as total_misses,
  ROUND(SUM(hits)::numeric / NULLIF(SUM(hits) + SUM(misses), 0) * 100, 2) as hit_rate_pct
FROM cache_metrics
WHERE recorded_at > NOW() - INTERVAL '1 hour'
GROUP BY minute
ORDER BY minute DESC;

Cache Invalidation

Automatic Invalidation

HeliosDB automatically invalidates cache entries when data changes:

-- This UPDATE automatically invalidates related cache entries
UPDATE products SET price = 99.99 WHERE id = 123;

-- Affected cached queries are immediately invalidated:
-- - SELECT * FROM products WHERE id = 123
-- - SELECT price FROM products WHERE id = 123
-- - Any query that reads the modified row

Manual Invalidation

-- Invalidate by table
INVALIDATE CACHE WHERE table_name = 'products';

-- Invalidate all product-related caches
INVALIDATE CACHE WHERE table_name LIKE 'product%';

-- Clear entire cache
FLUSH ALL CACHES;

-- Uncache specific query
UNCACHE SELECT * FROM products WHERE featured = true;

Common Configurations

High-Traffic Web Application

-- Large cache, aggressive caching
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 8192,             -- 8 GB
  eviction_policy = 'W_TINYLFU',
  ttl_seconds = 300               -- 5 minutes
);

-- Cache frequently accessed pages
ALTER TABLE products ENABLE INTELLIGENT_CACHING;
ALTER TABLE categories ENABLE INTELLIGENT_CACHING;
ALTER TABLE users ENABLE INTELLIGENT_CACHING;

-- Enable predictive warming
ALTER DATABASE ENABLE PREDICTIVE_CACHE_WARMING;

Analytics/Reporting

-- Larger per-query limit for big result sets
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 16384,            -- 16 GB
  per_query_max_mb = 1024,        -- 1 GB per query
  eviction_policy = 'LFU',        -- Frequency-based for repeated reports
  ttl_seconds = 3600              -- 1 hour
);

-- Cache expensive analytics queries
ALTER TABLE sales_facts ENABLE INTELLIGENT_CACHING;
ALTER TABLE dim_products ENABLE INTELLIGENT_CACHING;

Real-Time Application (Low Latency)

-- Smaller cache, lower TTL for freshness
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 2048,
  eviction_policy = 'W_TINYLFU',
  ttl_seconds = 30                -- 30 seconds max staleness
);

-- Enable strong consistency for critical tables
ALTER TABLE account_balances SET CACHE_PARAMS (
  consistency_level = 'STRONG'
);

ALTER TABLE inventory SET CACHE_PARAMS (
  consistency_level = 'READ_YOUR_WRITES'
);

Development/Testing

-- Small cache, easy debugging
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 256,
  eviction_policy = 'LRU',
  ttl_seconds = 60
);

-- Enable verbose logging
ALTER DATABASE SET CACHE_PARAMS (
  enable_debug_logging = true
);

Distributed Cache Setup

For multi-node deployments, enable distributed caching:

Basic Setup

-- Enable global cache for shared tables
ALTER TABLE products ENABLE GLOBAL_CACHE;

-- Configure replication
ALTER TABLE products SET CACHE_PARAMS (
  replication_factor = 3,
  consistency_level = 'EVENTUAL'
);

Cluster Configuration

use heliosdb_cache::distributed_sync::{DistributedCacheSync, SyncConfig, ConsistencyLevel};

let config = SyncConfig {
    node_id: "node-1".to_string(),
    cluster_nodes: vec![
        "node-1".to_string(),
        "node-2".to_string(),
        "node-3".to_string(),
    ],
    consistency_level: ConsistencyLevel::EventualConsistency,
    sync_timeout_ms: 5000,
    heartbeat_interval_ms: 1000,
    failure_detection_timeout_ms: 10000,
    max_queue_size: 10000,
    enable_partition_detection: true,
};

let mut sync = DistributedCacheSync::new(config)?;
sync.start().await?;

Troubleshooting Quick Fixes

Low Hit Rate (<70%)

-- Increase cache size
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 4096  -- Double current size
);

-- Check if working set exceeds cache
SELECT
  SUM(entry_size_bytes) / 1024 / 1024 as working_set_mb,
  (SELECT setting_value FROM database_settings WHERE setting_name = 'cache_max_size_mb') as cache_size_mb
FROM cache_entries;

High Memory Usage

-- Reduce cache size
ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 1024,
  per_query_max_mb = 128
);

-- Identify large cached entries
SELECT query_hash, size_bytes / 1024 / 1024 as size_mb
FROM cache_entries
ORDER BY size_bytes DESC
LIMIT 10;

Stale Data Issues

-- Reduce TTL
ALTER TABLE products SET CACHE_PARAMS (
  ttl_seconds = 60
);

-- Enable stronger consistency
ALTER TABLE products SET CACHE_PARAMS (
  consistency_level = 'STRONG'
);

-- Force immediate invalidation
INVALIDATE CACHE WHERE table_name = 'products';

Cache Not Being Used

-- Verify caching is enabled
SELECT setting_name, setting_value
FROM database_settings
WHERE setting_name LIKE '%cache%';

-- Check table caching status
SELECT table_name, caching_enabled
FROM table_caching_status;

-- Ensure queries are cacheable
SELECT * FROM cache_diagnostic_report;

Next Steps


Quick Reference

Enable Caching

ALTER DATABASE ENABLE QUERY_CACHE;
ALTER TABLE mytable ENABLE INTELLIGENT_CACHING;

Configure Cache

ALTER DATABASE SET CACHE_PARAMS (
  max_size_mb = 2048,
  eviction_policy = 'W_TINYLFU',
  ttl_seconds = 3600
);

Monitor Cache

SELECT * FROM cache_statistics;
SELECT * FROM table_cache_stats;

Invalidate Cache

INVALIDATE CACHE WHERE table_name = 'mytable';
FLUSH ALL CACHES;

Statement Cache (Rust)

let stmt = pool.prepare("SELECT * FROM users WHERE id = $1")?;
let params = Parameters::Positional(vec![ParameterValue::Int64(123)]);
pool.execute(&stmt, params).await?;