Skip to content

HeliosDB v3.0 SQL Reference

HeliosDB v3.0 SQL Reference

Complete SQL Language Reference for HeliosDB v3.0


Table of Contents

  1. Data Definition Language (DDL)
  2. Data Manipulation Language (DML)
  3. Data Query Language (DQL)
  4. v3.0 Extensions
  5. Functions and Operators

Data Definition Language (DDL)

CREATE TABLE

-- Basic table
CREATE TABLE users (
id INTEGER PRIMARY KEY,
name VARCHAR(100) NOT NULL,
email VARCHAR(255) UNIQUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Time-series table
CREATE TABLE metrics (
timestamp TIMESTAMP NOT NULL,
sensor_id INTEGER,
value DOUBLE,
PRIMARY KEY (timestamp, sensor_id)
) WITH (
timeseries_retention = '30 days',
timeseries_downsample = 'avg:5m',
timeseries_compression = 'gorilla'
);
-- Temporal table (system-versioned)
CREATE TABLE employees (
id INTEGER PRIMARY KEY,
name VARCHAR(100),
salary DECIMAL(10,2)
) WITH SYSTEM VERSIONING;
-- Encrypted columns
CREATE TABLE sensitive_data (
id INTEGER PRIMARY KEY,
name VARCHAR(100),
ssn CHAR(11) ENCRYPTED SEARCHABLE,
email VARCHAR(255) ENCRYPTED
);
-- Sharded table
CREATE TABLE orders (
id INTEGER PRIMARY KEY,
user_id INTEGER,
amount DECIMAL(10,2)
) SHARD BY HASH(user_id) SHARDS 8;
-- Geospatial table
CREATE TABLE locations (
id INTEGER PRIMARY KEY,
name VARCHAR(100),
coordinates GEOMETRY(Point, 4326)
);

CREATE INDEX

-- B-tree index
CREATE INDEX idx_users_email ON users(email);
-- Full-text index
CREATE FULLTEXT INDEX fts_products ON products(name, description)
WITH LANGUAGE 'English';
-- Geospatial index
CREATE SPATIAL INDEX idx_locations_coords ON locations USING RTREE(coordinates);
-- Global secondary index (cross-shard)
CREATE GLOBAL INDEX idx_orders_amount ON orders(amount);
-- Partial index
CREATE INDEX idx_active_users ON users(email) WHERE active = true;
-- Covering index
CREATE INDEX idx_users_email_name ON users(email) INCLUDE (name);

CREATE MATERIALIZED VIEW

-- Basic materialized view
CREATE MATERIALIZED VIEW sales_summary AS
SELECT region, DATE(order_date) as date, SUM(amount) as total
FROM sales
GROUP BY region, DATE(order_date);
-- Auto-refresh on commit
CREATE MATERIALIZED VIEW daily_metrics AS
SELECT DATE(timestamp), AVG(value)
FROM metrics
GROUP BY DATE(timestamp)
WITH REFRESH ON COMMIT;
-- Scheduled refresh
CREATE MATERIALIZED VIEW weekly_report AS
SELECT * FROM sales WHERE week = CURRENT_WEEK()
WITH REFRESH SCHEDULE '0 0 * * 0'; -- Every Sunday midnight

CREATE REGION

-- Multi-region deployment
CREATE REGION us_east DATACENTER 'aws-us-east-1';
CREATE REGION eu_west DATACENTER 'aws-eu-west-1';
CREATE REGION ap_south DATACENTER 'aws-ap-south-1';

CREATE READ REPLICA

CREATE READ REPLICA replica_1
WITH (
source => 'primary',
async_lag_limit => '1 second',
region => 'us_east'
);

CREATE SAMPLE

-- Stratified sample for approximate queries
CREATE SAMPLE sales_sample ON sales
STRATIFIED BY region, category
WITH SIZE 5% ERROR 5%;
-- Uniform sample
CREATE SAMPLE users_sample ON users
WITH SIZE 1%;

CREATE FOREIGN TABLE

-- S3 Parquet data
CREATE FOREIGN TABLE s3_logs (
timestamp TIMESTAMP,
level VARCHAR(10),
message TEXT
) SERVER s3_server OPTIONS (
bucket => 'my-logs',
format => 'parquet',
compression => 'snappy'
);
-- PostgreSQL foreign table
CREATE FOREIGN TABLE pg_users (
id INTEGER,
name VARCHAR(100)
) SERVER postgres_server OPTIONS (
schema => 'public',
table => 'users'
);
-- REST API foreign table
CREATE FOREIGN TABLE api_users (
id INTEGER,
name VARCHAR(100)
) SERVER rest_server OPTIONS (
endpoint => 'https://api.example.com/users',
method => 'GET',
auth => 'bearer_token'
);

CREATE PROCEDURE

-- PL/pgSQL procedure
CREATE PROCEDURE calculate_bonus(emp_id INTEGER)
LANGUAGE plpgsql
AS $$
DECLARE
v_salary DECIMAL(10,2);
v_bonus DECIMAL(10,2);
BEGIN
SELECT salary INTO v_salary FROM employees WHERE id = emp_id;
v_bonus := v_salary * 0.10;
UPDATE employees SET bonus = v_bonus WHERE id = emp_id;
END;
$$;
-- PL/SQL procedure (Oracle-compatible)
CREATE PROCEDURE process_orders(p_date DATE)
LANGUAGE plsql
AS
BEGIN
FOR rec IN (SELECT * FROM orders WHERE order_date = p_date) LOOP
-- Process each order
UPDATE order_status SET status = 'processed' WHERE order_id = rec.id;
END LOOP;
COMMIT;
EXCEPTION
WHEN OTHERS THEN
ROLLBACK;
RAISE;
END;
/

CREATE TRIGGER

-- BEFORE trigger
CREATE TRIGGER audit_user_changes
BEFORE UPDATE ON users
FOR EACH ROW
EXECUTE FUNCTION log_user_changes();
-- AFTER trigger
CREATE TRIGGER update_statistics
AFTER INSERT ON sales
FOR EACH ROW
EXECUTE FUNCTION update_sales_stats();
-- INSTEAD OF trigger (for views)
CREATE TRIGGER user_view_insert
INSTEAD OF INSERT ON user_view
FOR EACH ROW
EXECUTE FUNCTION handle_user_insert();

Row-Level Security

-- Create policy
CREATE POLICY user_isolation ON user_data
FOR SELECT
USING (user_id = current_user_id());
CREATE POLICY tenant_isolation ON tenant_data
FOR ALL
USING (tenant_id = current_tenant_id());
-- Enable RLS
ALTER TABLE user_data ENABLE ROW LEVEL SECURITY;
ALTER TABLE tenant_data ENABLE ROW LEVEL SECURITY;

Data Manipulation Language (DML)

INSERT

-- Basic insert
INSERT INTO users (id, name, email) VALUES (1, 'Alice', 'alice@example.com');
-- Multi-row insert
INSERT INTO users (id, name, email) VALUES
(1, 'Alice', 'alice@example.com'),
(2, 'Bob', 'bob@example.com'),
(3, 'Charlie', 'charlie@example.com');
-- Insert from SELECT
INSERT INTO archived_orders
SELECT * FROM orders WHERE order_date < '2024-01-01';
-- Insert with RETURNING
INSERT INTO users (name, email) VALUES ('David', 'david@example.com')
RETURNING id, created_at;

UPDATE

-- Basic update
UPDATE users SET email = 'newemail@example.com' WHERE id = 1;
-- Multi-column update
UPDATE employees
SET salary = salary * 1.1, last_raise = CURRENT_DATE
WHERE department = 'engineering';
-- Update with JOIN
UPDATE orders o
SET status = 'shipped'
FROM shipments s
WHERE o.id = s.order_id AND s.shipped_date = CURRENT_DATE;

DELETE

-- Basic delete
DELETE FROM users WHERE id = 1;
-- Delete with subquery
DELETE FROM orders WHERE user_id IN (
SELECT id FROM users WHERE inactive = true
);

Data Query Language (DQL)

SELECT

-- Basic SELECT
SELECT id, name, email FROM users;
-- WHERE clause
SELECT * FROM users WHERE age > 25 AND active = true;
-- ORDER BY
SELECT * FROM users ORDER BY created_at DESC LIMIT 10;
-- GROUP BY with HAVING
SELECT department, AVG(salary) as avg_salary
FROM employees
GROUP BY department
HAVING AVG(salary) > 50000;
-- JOINs
SELECT u.name, o.amount
FROM users u
INNER JOIN orders o ON u.id = o.user_id;
-- Subqueries
SELECT * FROM users WHERE id IN (
SELECT DISTINCT user_id FROM orders WHERE amount > 1000
);
-- Common Table Expressions (CTE)
WITH high_value_customers AS (
SELECT user_id, SUM(amount) as total
FROM orders
GROUP BY user_id
HAVING SUM(amount) > 10000
)
SELECT u.name, h.total
FROM users u
JOIN high_value_customers h ON u.id = h.user_id;
-- Window functions
SELECT
name,
salary,
RANK() OVER (PARTITION BY department ORDER BY salary DESC) as rank
FROM employees;

v3.0 Extensions

Time-Series Queries

-- Query time range
SELECT * FROM metrics
WHERE timestamp BETWEEN '2025-01-01' AND '2025-01-31';
-- Downsampled aggregation
SELECT
TIME_BUCKET('5 minutes', timestamp) as bucket,
AVG(value) as avg_value
FROM metrics
GROUP BY bucket;

Temporal Queries

-- Point-in-time query
SELECT * FROM employees
FOR SYSTEM_TIME AS OF '2024-01-01 00:00:00';
-- Time range query
SELECT * FROM employees
FOR SYSTEM_TIME BETWEEN '2024-01-01' AND '2024-12-31';
-- All versions
SELECT * FROM employees
FOR SYSTEM_TIME ALL
WHERE id = 123;

Approximate Queries

-- Approximate COUNT with confidence bounds
SELECT
region,
APPROX_COUNT(*) as count,
APPROX_SUM(amount) as total
FROM sales_sample
GROUP BY region
WITH CONFIDENCE 95%;
-- Approximate percentile
SELECT APPROX_PERCENTILE(amount, 0.95) FROM sales_sample;
-- Result with error bounds
-- count = 1,000,000 ± 50,000 (95% confidence)

Streaming Queries

-- Tumbling window (5-minute aggregations)
SELECT
TUMBLE(timestamp, INTERVAL '5 minutes') as window,
COUNT(*) as event_count,
AVG(value) as avg_value
FROM event_stream
GROUP BY TUMBLE(timestamp, INTERVAL '5 minutes');
-- Sliding window
SELECT
HOP(timestamp, INTERVAL '1 minute', INTERVAL '5 minutes') as window,
COUNT(*) as event_count
FROM event_stream
GROUP BY HOP(timestamp, INTERVAL '1 minute', INTERVAL '5 minutes');
-- Session window (10-minute inactivity gap)
SELECT
user_id,
SESSION(timestamp, INTERVAL '10 minutes') as session_id,
COUNT(*) as events_per_session
FROM user_events
GROUP BY user_id, SESSION(timestamp, INTERVAL '10 minutes');

Machine Learning

-- Register ONNX model
REGISTER MODEL fraud_detector FROM '/models/fraud_v1.onnx';
-- In-database inference
SELECT
transaction_id,
PREDICT(fraud_detector, amount, merchant_category, user_age) as fraud_score
FROM transactions
WHERE fraud_score > 0.8;
-- Batch prediction
INSERT INTO fraud_predictions
SELECT
id,
PREDICT(fraud_detector, amount, merchant_category, user_age) as score
FROM transactions
WHERE processed = false;

Graph Queries

-- Recursive CTE (find all descendants)
WITH RECURSIVE descendants AS (
SELECT id, name, manager_id, 0 AS depth
FROM employees
WHERE id = 1
UNION ALL
SELECT e.id, e.name, e.manager_id, d.depth + 1
FROM employees e
JOIN descendants d ON e.manager_id = d.id
)
SELECT * FROM descendants;
-- Shortest path
SELECT shortest_path(
graph => 'org_chart',
source => 1,
target => 100,
algorithm => 'dijkstra'
);
-- Cycle detection
SELECT detect_cycles('dependency_graph');

Geospatial Queries

-- Distance query
SELECT id, name
FROM locations
WHERE ST_Distance(coordinates, ST_Point(0, 0)) < 1000;
-- Contains query
SELECT * FROM locations
WHERE ST_Contains(
ST_Polygon('POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))'),
coordinates
);
-- K-Nearest Neighbors
SELECT id, name, ST_Distance(coordinates, ST_Point(5, 5)) as distance
FROM locations
ORDER BY distance
LIMIT 10;
-- Intersection
SELECT l1.name, l2.name
FROM locations l1, locations l2
WHERE ST_Intersects(l1.coordinates, l2.coordinates) AND l1.id < l2.id;
-- Basic search
SELECT * FROM products
WHERE MATCH(name, description) AGAINST('laptop');
-- Boolean search
SELECT * FROM products
WHERE MATCH(name, description) AGAINST('+laptop -refurbished' IN BOOLEAN MODE);
-- Phrase search
SELECT * FROM products
WHERE MATCH(name, description) AGAINST('"gaming laptop"' IN BOOLEAN MODE);
-- Ranked results
SELECT
id,
name,
MATCH(name, description) AGAINST('laptop') as relevance
FROM products
WHERE MATCH(name, description) AGAINST('laptop')
ORDER BY relevance DESC;

Functions and Operators

Aggregate Functions

-- Standard aggregates
COUNT(*), SUM(column), AVG(column), MIN(column), MAX(column)
-- Statistical
STDDEV(column), VARIANCE(column)
-- Approximate (on samples)
APPROX_COUNT(*), APPROX_SUM(column), APPROX_AVG(column)
APPROX_PERCENTILE(column, percentile)

Window Functions

ROW_NUMBER() OVER (...)
RANK() OVER (...)
DENSE_RANK() OVER (...)
LAG(column, offset) OVER (...)
LEAD(column, offset) OVER (...)
FIRST_VALUE(column) OVER (...)
LAST_VALUE(column) OVER (...)

String Functions

CONCAT(str1, str2, ...)
SUBSTRING(str, start, length)
UPPER(str), LOWER(str)
TRIM(str), LTRIM(str), RTRIM(str)
LENGTH(str)
REPLACE(str, old, new)

Date/Time Functions

CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP
DATE_ADD(date, INTERVAL n unit)
DATE_SUB(date, INTERVAL n unit)
EXTRACT(part FROM date)
TIME_BUCKET(interval, timestamp)

Geospatial Functions

-- Constructors
ST_Point(x, y), ST_LineString(points), ST_Polygon(rings)
-- Measurements
ST_Distance(geom1, geom2)
ST_Area(geom)
ST_Length(geom)
-- Relationships
ST_Contains(geom1, geom2)
ST_Within(geom1, geom2)
ST_Intersects(geom1, geom2)
ST_Overlaps(geom1, geom2)
-- Transformations
ST_Transform(geom, srid)
ST_Buffer(geom, distance)

Cryptographic Functions

-- Hashing
SHA256(data), MD5(data)
-- Encryption (requires column encryption)
ENCRYPT(data, key, algorithm)
DECRYPT(data, key, algorithm)

System Tables

Information Schema

-- List tables
SELECT * FROM information_schema.tables;
-- List columns
SELECT * FROM information_schema.columns WHERE table_name = 'users';
-- List indexes
SELECT * FROM information_schema.indexes;

HeliosDB System Tables

-- Cache statistics
SELECT * FROM heliosdb_cache_stats;
-- Index recommendations
SELECT * FROM heliosdb_index_recommendations;
-- Slow queries
SELECT * FROM heliosdb_slow_queries ORDER BY duration DESC;
-- Replication lag
SELECT * FROM heliosdb_replication_status;
-- Shard distribution
SELECT * FROM heliosdb_shard_distribution;

Data Types

Numeric Types

  • INTEGER, BIGINT, SMALLINT
  • DECIMAL(p, s), NUMERIC(p, s)
  • FLOAT, DOUBLE

String Types

  • VARCHAR(n), CHAR(n)
  • TEXT
  • VARCHAR(n) ENCRYPTED (v3.0)

Date/Time Types

  • DATE
  • TIME
  • TIMESTAMP
  • INTERVAL

Boolean

  • BOOLEAN

Geospatial Types (v3.0)

  • GEOMETRY(type, srid)
  • GEOGRAPHY(type, srid)
  • POINT, LINESTRING, POLYGON

Special Types

  • JSON, JSONB
  • ARRAY
  • UUID

Transaction Control

-- Begin transaction
BEGIN;
START TRANSACTION;
-- Commit
COMMIT;
-- Rollback
ROLLBACK;
-- Savepoint
SAVEPOINT sp1;
ROLLBACK TO SAVEPOINT sp1;
RELEASE SAVEPOINT sp1;
-- Isolation levels
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;

Examples by Use Case

E-Commerce Platform

-- Create schema
CREATE TABLE products (
id INTEGER PRIMARY KEY,
name VARCHAR(200),
description TEXT,
price DECIMAL(10,2)
);
CREATE FULLTEXT INDEX fts_products ON products(name, description);
-- Search products
SELECT id, name, price,
MATCH(name, description) AGAINST('laptop') as relevance
FROM products
WHERE MATCH(name, description) AGAINST('laptop')
ORDER BY relevance DESC
LIMIT 10;

IoT Platform

-- Create time-series table
CREATE TABLE sensor_data (
timestamp TIMESTAMP,
sensor_id INTEGER,
temperature DOUBLE,
humidity DOUBLE,
PRIMARY KEY (timestamp, sensor_id)
) WITH (
timeseries_retention = '90 days',
timeseries_downsample = 'avg:5m'
);
-- Query aggregated data
SELECT
TIME_BUCKET('1 hour', timestamp) as hour,
AVG(temperature) as avg_temp,
MAX(temperature) as max_temp
FROM sensor_data
WHERE timestamp >= NOW() - INTERVAL '24 hours'
GROUP BY hour;

Multi-Tenant SaaS

-- Enable row-level security
CREATE TABLE tenant_data (
id INTEGER PRIMARY KEY,
tenant_id INTEGER,
data TEXT
);
CREATE POLICY tenant_isolation ON tenant_data
FOR ALL
USING (tenant_id = current_tenant_id());
ALTER TABLE tenant_data ENABLE ROW LEVEL SECURITY;

Performance Tips

  1. Use indexes wisely: Create indexes on columns used in WHERE, JOIN, ORDER BY
  2. Use materialized views: Pre-compute expensive aggregations
  3. Enable query caching: For repeated queries
  4. Use approximate queries: For exploratory analytics on large datasets
  5. Partition large tables: Use time-based or hash partitioning
  6. Use connection pooling: Enabled by default in v3.0

See Also