PostgreSQL Protocol Examples¶
Practical SQL examples and usage patterns for HeliosDB's PostgreSQL compatibility.
Connection Examples¶
Basic Connection¶
# psql connection
psql -h localhost -p 5432 -U admin -d mydb
# Connection string
psql "postgresql://admin:password@localhost:5432/mydb"
# With SSL
psql "postgresql://admin@localhost/mydb?sslmode=require"
Python (psycopg2)¶
import psycopg2
conn = psycopg2.connect(
host="localhost",
port=5432,
database="mydb",
user="admin",
password="password"
)
cursor = conn.cursor()
cursor.execute("SELECT * FROM users LIMIT 10")
rows = cursor.fetchall()
conn.close()
Node.js (node-postgres)¶
const { Pool } = require('pg');
const pool = new Pool({
host: 'localhost',
port: 5432,
database: 'mydb',
user: 'admin',
password: 'password'
});
const result = await pool.query('SELECT * FROM users LIMIT 10');
console.log(result.rows);
Go (lib/pq)¶
import (
"database/sql"
_ "github.com/lib/pq"
)
db, err := sql.Open("postgres",
"host=localhost port=5432 user=admin password=password dbname=mydb sslmode=disable")
if err != nil {
log.Fatal(err)
}
defer db.Close()
rows, err := db.Query("SELECT * FROM users LIMIT 10")
DDL Examples¶
Create Tables¶
-- Basic table
CREATE TABLE users (
id SERIAL PRIMARY KEY,
email VARCHAR(255) UNIQUE NOT NULL,
name VARCHAR(100),
created_at TIMESTAMP DEFAULT NOW()
);
-- With constraints
CREATE TABLE orders (
id BIGSERIAL PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES users(id),
total DECIMAL(10,2) NOT NULL CHECK (total >= 0),
status VARCHAR(20) DEFAULT 'pending',
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Partitioned table
CREATE TABLE events (
id BIGSERIAL,
event_time TIMESTAMP NOT NULL,
event_type VARCHAR(50),
data JSONB
) PARTITION BY RANGE (event_time);
CREATE TABLE events_2025_01 PARTITION OF events
FOR VALUES FROM ('2025-01-01') TO ('2025-02-01');
Create Indexes¶
-- B-tree index
CREATE INDEX idx_users_email ON users(email);
-- Unique index
CREATE UNIQUE INDEX idx_users_email_unique ON users(email);
-- Partial index
CREATE INDEX idx_active_users ON users(email)
WHERE status = 'active';
-- Expression index
CREATE INDEX idx_users_lower_email ON users(LOWER(email));
-- GIN index for JSONB
CREATE INDEX idx_events_data ON events USING GIN(data);
-- Covering index
CREATE INDEX idx_orders_user_total ON orders(user_id)
INCLUDE (total, status);
DML Examples¶
INSERT Operations¶
-- Simple insert
INSERT INTO users (email, name)
VALUES ('user@example.com', 'John Doe');
-- Multiple rows
INSERT INTO users (email, name) VALUES
('user1@example.com', 'User 1'),
('user2@example.com', 'User 2'),
('user3@example.com', 'User 3');
-- Insert with RETURNING
INSERT INTO users (email, name)
VALUES ('new@example.com', 'New User')
RETURNING id, created_at;
-- Upsert (INSERT ON CONFLICT)
INSERT INTO users (email, name)
VALUES ('user@example.com', 'Updated Name')
ON CONFLICT (email)
DO UPDATE SET name = EXCLUDED.name,
updated_at = NOW();
UPDATE Operations¶
-- Simple update
UPDATE users SET name = 'Jane Doe' WHERE id = 1;
-- Update with FROM
UPDATE orders o
SET status = 'shipped'
FROM users u
WHERE o.user_id = u.id
AND u.email = 'vip@example.com';
-- Update with RETURNING
UPDATE orders
SET status = 'completed', completed_at = NOW()
WHERE id = 100
RETURNING id, status, completed_at;
DELETE Operations¶
-- Simple delete
DELETE FROM users WHERE id = 1;
-- Delete with USING
DELETE FROM orders o
USING users u
WHERE o.user_id = u.id AND u.status = 'inactive';
-- Delete with RETURNING
DELETE FROM orders
WHERE created_at < NOW() - INTERVAL '1 year'
RETURNING id, total;
PostgreSQL 17 Features¶
MERGE with RETURNING¶
-- MERGE with $action pseudo-column
MERGE INTO inventory i
USING updates u ON i.product_id = u.product_id
WHEN MATCHED AND u.quantity = 0 THEN
DELETE
WHEN MATCHED THEN
UPDATE SET quantity = u.quantity, updated_at = NOW()
WHEN NOT MATCHED THEN
INSERT (product_id, quantity, created_at)
VALUES (u.product_id, u.quantity, NOW())
RETURNING i.product_id, i.quantity, $action AS operation;
-- Result:
-- product_id | quantity | operation
-- ------------+----------+-----------
-- 1 | 100 | UPDATE
-- 2 | NULL | DELETE
-- 3 | 50 | INSERT
COPY with ON_ERROR¶
-- Skip bad rows, continue loading
COPY users (id, email, name)
FROM '/data/users.csv'
WITH (
FORMAT csv,
HEADER true,
ON_ERROR ignore
);
-- Result: Loaded 9,500 of 10,000 rows (500 errors skipped)
JSON_TABLE¶
-- Convert JSON array to rows
SELECT * FROM JSON_TABLE(
'[{"name":"Alice","age":30},{"name":"Bob","age":25}]',
'$[*]' COLUMNS (
row_num FOR ORDINALITY,
name TEXT PATH '$.name',
age INTEGER PATH '$.age'
)
) AS jt;
-- Result:
-- row_num | name | age
-- ---------+-------+-----
-- 1 | Alice | 30
-- 2 | Bob | 25
Query Examples¶
Window Functions¶
-- Running total
SELECT
date,
amount,
SUM(amount) OVER (ORDER BY date) AS running_total
FROM transactions
ORDER BY date;
-- Ranking
SELECT
name,
score,
RANK() OVER (ORDER BY score DESC) AS rank,
DENSE_RANK() OVER (ORDER BY score DESC) AS dense_rank
FROM players;
-- Partitioned window
SELECT
department,
employee,
salary,
AVG(salary) OVER (PARTITION BY department) AS dept_avg,
salary - AVG(salary) OVER (PARTITION BY department) AS vs_dept_avg
FROM employees;
Common Table Expressions (CTEs)¶
-- Simple CTE
WITH active_users AS (
SELECT * FROM users WHERE status = 'active'
)
SELECT * FROM active_users
WHERE created_at > NOW() - INTERVAL '30 days';
-- Recursive CTE (hierarchy)
WITH RECURSIVE org_chart AS (
-- Base case: top-level managers
SELECT id, name, manager_id, 1 AS level
FROM employees
WHERE manager_id IS NULL
UNION ALL
-- Recursive case: subordinates
SELECT e.id, e.name, e.manager_id, oc.level + 1
FROM employees e
JOIN org_chart oc ON e.manager_id = oc.id
)
SELECT * FROM org_chart ORDER BY level, name;
JSONB Operations¶
-- Query JSONB
SELECT * FROM events
WHERE data->>'type' = 'purchase'
AND (data->'amount')::numeric > 100;
-- JSONB path queries
SELECT * FROM events
WHERE data @? '$.items[*] ? (@.price > 50)';
-- JSONB aggregation
SELECT
data->>'category' AS category,
COUNT(*),
SUM((data->>'amount')::numeric) AS total
FROM events
GROUP BY data->>'category';
-- Update JSONB
UPDATE events
SET data = jsonb_set(data, '{processed}', 'true')
WHERE id = 100;
Aggregations¶
-- GROUPING SETS
SELECT
COALESCE(region, 'ALL') AS region,
COALESCE(product, 'ALL') AS product,
SUM(sales) AS total_sales
FROM sales_data
GROUP BY GROUPING SETS (
(region, product),
(region),
(product),
()
);
-- FILTER clause
SELECT
COUNT(*) AS total,
COUNT(*) FILTER (WHERE status = 'active') AS active,
COUNT(*) FILTER (WHERE status = 'inactive') AS inactive
FROM users;
Transaction Examples¶
-- Basic transaction
BEGIN;
UPDATE accounts SET balance = balance - 100 WHERE id = 1;
UPDATE accounts SET balance = balance + 100 WHERE id = 2;
COMMIT;
-- With savepoints
BEGIN;
INSERT INTO orders (user_id, total) VALUES (1, 100);
SAVEPOINT sp1;
INSERT INTO order_items (order_id, product_id) VALUES (1, 1);
-- Oops, rollback just the item
ROLLBACK TO sp1;
INSERT INTO order_items (order_id, product_id) VALUES (1, 2);
COMMIT;
-- Read-only transaction
BEGIN READ ONLY;
SELECT * FROM sensitive_data;
COMMIT;
Bulk Operations¶
COPY Import¶
-- Import from CSV
COPY users (email, name, created_at)
FROM '/data/users.csv'
WITH (FORMAT csv, HEADER true);
-- Import with custom delimiter
COPY orders FROM '/data/orders.tsv'
WITH (FORMAT csv, DELIMITER E'\t', HEADER true);
-- Import binary
COPY large_table FROM '/data/large.bin' WITH (FORMAT binary);
COPY Export¶
-- Export to CSV
COPY (SELECT * FROM users WHERE status = 'active')
TO '/tmp/active_users.csv'
WITH (FORMAT csv, HEADER true);
-- Export specific columns
COPY users (id, email)
TO '/tmp/user_emails.csv'
WITH (FORMAT csv);
Migration from PostgreSQL¶
pg_dump Import¶
# Dump from PostgreSQL
pg_dump -h pg-host -U pguser -d pgdb > backup.sql
# Import to HeliosDB
psql -h helios-host -U admin -d heliosdb < backup.sql
Schema Migration¶
-- Export schema only
pg_dump -h pg-host -s -d pgdb > schema.sql
-- Import schema
psql -h helios-host -d heliosdb < schema.sql
-- Verify tables
\dt
Related: README.md | CONFIGURATION.md | COMPATIBILITY.md
Last Updated: December 2025