Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
662feab881 | ||
|
|
227c579147 |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
nostr_core_lib/
|
||||||
|
nips/
|
||||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[submodule "nostr_core_lib"]
|
||||||
|
path = nostr_core_lib
|
||||||
|
url = https://git.laantungir.net/laantungir/nostr_core_lib.git
|
||||||
72
Makefile
Normal file
72
Makefile
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
# C-Relay Makefile
|
||||||
|
|
||||||
|
CC = gcc
|
||||||
|
CFLAGS = -Wall -Wextra -std=c99 -g -O2
|
||||||
|
INCLUDES = -I. -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket
|
||||||
|
LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl
|
||||||
|
|
||||||
|
# Source files
|
||||||
|
MAIN_SRC = src/main.c
|
||||||
|
NOSTR_CORE_LIB = nostr_core_lib/libnostr_core_x64.a
|
||||||
|
|
||||||
|
# Target binary
|
||||||
|
TARGET = src/main
|
||||||
|
|
||||||
|
# Default target
|
||||||
|
all: $(TARGET)
|
||||||
|
|
||||||
|
# Check if nostr_core_lib is built
|
||||||
|
$(NOSTR_CORE_LIB):
|
||||||
|
@echo "Building nostr_core_lib..."
|
||||||
|
cd nostr_core_lib && ./build.sh
|
||||||
|
|
||||||
|
# Build the relay
|
||||||
|
$(TARGET): $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||||
|
@echo "Compiling C-Relay..."
|
||||||
|
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(LIBS)
|
||||||
|
@echo "Build complete: $(TARGET)"
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
test: $(TARGET)
|
||||||
|
@echo "Running tests..."
|
||||||
|
./tests/1_nip_test.sh
|
||||||
|
|
||||||
|
# Initialize database
|
||||||
|
init-db:
|
||||||
|
@echo "Initializing database..."
|
||||||
|
./db/init.sh --force
|
||||||
|
|
||||||
|
# Clean build artifacts
|
||||||
|
clean:
|
||||||
|
rm -f $(TARGET)
|
||||||
|
@echo "Clean complete"
|
||||||
|
|
||||||
|
# Clean everything including nostr_core_lib
|
||||||
|
clean-all: clean
|
||||||
|
cd nostr_core_lib && make clean 2>/dev/null || true
|
||||||
|
|
||||||
|
# Install dependencies (Ubuntu/Debian)
|
||||||
|
install-deps:
|
||||||
|
@echo "Installing dependencies..."
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y build-essential libsqlite3-dev libssl-dev libcurl4-openssl-dev libsecp256k1-dev zlib1g-dev jq curl
|
||||||
|
|
||||||
|
# Help
|
||||||
|
help:
|
||||||
|
@echo "C-Relay Build System"
|
||||||
|
@echo ""
|
||||||
|
@echo "Targets:"
|
||||||
|
@echo " all Build the relay (default)"
|
||||||
|
@echo " test Build and run tests"
|
||||||
|
@echo " init-db Initialize the database"
|
||||||
|
@echo " clean Clean build artifacts"
|
||||||
|
@echo " clean-all Clean everything including dependencies"
|
||||||
|
@echo " install-deps Install system dependencies"
|
||||||
|
@echo " help Show this help"
|
||||||
|
@echo ""
|
||||||
|
@echo "Usage:"
|
||||||
|
@echo " make # Build the relay"
|
||||||
|
@echo " make test # Run tests"
|
||||||
|
@echo " make init-db # Set up database"
|
||||||
|
|
||||||
|
.PHONY: all test init-db clean clean-all install-deps help
|
||||||
@@ -1,2 +1,4 @@
|
|||||||
A nostr relay in C.
|
A nostr relay in C with sqlite on the back end.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
228
db/README.md
Normal file
228
db/README.md
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
# C Nostr Relay Database
|
||||||
|
|
||||||
|
This directory contains the SQLite database schema and initialization scripts for the C Nostr Relay implementation.
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
- **`schema.sql`** - Complete database schema based on nostr-rs-relay v18
|
||||||
|
- **`init.sh`** - Database initialization script
|
||||||
|
- **`c_nostr_relay.db`** - SQLite database file (created after running init.sh)
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
1. **Initialize the database:**
|
||||||
|
```bash
|
||||||
|
cd db
|
||||||
|
./init.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Force reinitialize (removes existing database):**
|
||||||
|
```bash
|
||||||
|
./init.sh --force
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Initialize with optimization and info:**
|
||||||
|
```bash
|
||||||
|
./init.sh --info --optimize
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Schema
|
||||||
|
|
||||||
|
The schema is fully compatible with the Nostr protocol and includes:
|
||||||
|
|
||||||
|
### Core Tables
|
||||||
|
|
||||||
|
- **`event`** - Main event storage with all Nostr event data
|
||||||
|
- **`tag`** - Denormalized tag index for efficient queries
|
||||||
|
- **`user_verification`** - NIP-05 verification tracking
|
||||||
|
- **`account`** - User account management (optional)
|
||||||
|
- **`invoice`** - Lightning payment tracking (optional)
|
||||||
|
|
||||||
|
### Key Features
|
||||||
|
|
||||||
|
- ✅ **NIP-01 compliant** - Full basic protocol support
|
||||||
|
- ✅ **Replaceable events** - Supports kinds 0, 3, 10000-19999
|
||||||
|
- ✅ **Parameterized replaceable** - Supports kinds 30000-39999 with `d` tags
|
||||||
|
- ✅ **Event deletion** - NIP-09 soft deletion with `hidden` column
|
||||||
|
- ✅ **Event expiration** - NIP-40 automatic cleanup
|
||||||
|
- ✅ **Authentication** - NIP-42 client authentication
|
||||||
|
- ✅ **NIP-05 verification** - Domain-based identity verification
|
||||||
|
- ✅ **Performance optimized** - Comprehensive indexing strategy
|
||||||
|
|
||||||
|
### Schema Version
|
||||||
|
|
||||||
|
Current version: **v18** (compatible with nostr-rs-relay v18)
|
||||||
|
|
||||||
|
## Database Structure
|
||||||
|
|
||||||
|
### Event Storage
|
||||||
|
```sql
|
||||||
|
CREATE TABLE event (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
event_hash BLOB NOT NULL, -- 32-byte SHA256 hash
|
||||||
|
first_seen INTEGER NOT NULL, -- relay receive timestamp
|
||||||
|
created_at INTEGER NOT NULL, -- event creation timestamp
|
||||||
|
expires_at INTEGER, -- NIP-40 expiration
|
||||||
|
author BLOB NOT NULL, -- 32-byte pubkey
|
||||||
|
delegated_by BLOB, -- NIP-26 delegator
|
||||||
|
kind INTEGER NOT NULL, -- event kind
|
||||||
|
hidden INTEGER DEFAULT FALSE, -- soft deletion flag
|
||||||
|
content TEXT NOT NULL -- complete JSON event
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tag Indexing
|
||||||
|
```sql
|
||||||
|
CREATE TABLE tag (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
event_id INTEGER NOT NULL,
|
||||||
|
name TEXT, -- tag name ("e", "p", etc.)
|
||||||
|
value TEXT, -- tag value
|
||||||
|
created_at INTEGER NOT NULL, -- denormalized for performance
|
||||||
|
kind INTEGER NOT NULL -- denormalized for performance
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Features
|
||||||
|
|
||||||
|
### Optimized Indexes
|
||||||
|
- **Hash-based lookups** - `event_hash_index` for O(1) event retrieval
|
||||||
|
- **Author queries** - `author_index`, `author_created_at_index`
|
||||||
|
- **Kind filtering** - `kind_index`, `kind_created_at_index`
|
||||||
|
- **Tag searching** - `tag_covering_index` for efficient tag queries
|
||||||
|
- **Composite queries** - Multi-column indexes for complex filters
|
||||||
|
|
||||||
|
### Query Optimization
|
||||||
|
- **Denormalized tags** - Includes `kind` and `created_at` in tag table
|
||||||
|
- **Binary storage** - BLOBs for hex data (pubkeys, hashes)
|
||||||
|
- **WAL mode** - Write-Ahead Logging for concurrent access
|
||||||
|
- **Automatic cleanup** - Triggers for data integrity
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Basic Operations
|
||||||
|
|
||||||
|
1. **Insert an event:**
|
||||||
|
```sql
|
||||||
|
INSERT INTO event (event_hash, first_seen, created_at, author, kind, content)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?);
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Query by author:**
|
||||||
|
```sql
|
||||||
|
SELECT content FROM event
|
||||||
|
WHERE author = ? AND hidden != TRUE
|
||||||
|
ORDER BY created_at DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Filter by tags:**
|
||||||
|
```sql
|
||||||
|
SELECT e.content FROM event e
|
||||||
|
JOIN tag t ON e.id = t.event_id
|
||||||
|
WHERE t.name = 'p' AND t.value = ? AND e.hidden != TRUE;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Advanced Queries
|
||||||
|
|
||||||
|
1. **Get replaceable event (latest only):**
|
||||||
|
```sql
|
||||||
|
SELECT content FROM event
|
||||||
|
WHERE author = ? AND kind = ? AND hidden != TRUE
|
||||||
|
ORDER BY created_at DESC LIMIT 1;
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Tag-based filtering (NIP-01 filters):**
|
||||||
|
```sql
|
||||||
|
SELECT e.content FROM event e
|
||||||
|
WHERE e.id IN (
|
||||||
|
SELECT t.event_id FROM tag t
|
||||||
|
WHERE t.name = ? AND t.value IN (?, ?, ?)
|
||||||
|
) AND e.hidden != TRUE;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Maintenance
|
||||||
|
|
||||||
|
### Regular Operations
|
||||||
|
|
||||||
|
1. **Check database integrity:**
|
||||||
|
```bash
|
||||||
|
sqlite3 c_nostr_relay.db "PRAGMA integrity_check;"
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Optimize database:**
|
||||||
|
```bash
|
||||||
|
sqlite3 c_nostr_relay.db "PRAGMA optimize; VACUUM; ANALYZE;"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Clean expired events:**
|
||||||
|
```sql
|
||||||
|
DELETE FROM event WHERE expires_at <= strftime('%s', 'now');
|
||||||
|
```
|
||||||
|
|
||||||
|
### Monitoring
|
||||||
|
|
||||||
|
1. **Database size:**
|
||||||
|
```bash
|
||||||
|
ls -lh c_nostr_relay.db
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Table statistics:**
|
||||||
|
```sql
|
||||||
|
SELECT name, COUNT(*) as count FROM (
|
||||||
|
SELECT 'events' as name FROM event UNION ALL
|
||||||
|
SELECT 'tags' as name FROM tag UNION ALL
|
||||||
|
SELECT 'verifications' as name FROM user_verification
|
||||||
|
) GROUP BY name;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration Support
|
||||||
|
|
||||||
|
The schema includes a migration system for future updates:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE schema_info (
|
||||||
|
version INTEGER PRIMARY KEY,
|
||||||
|
applied_at INTEGER NOT NULL,
|
||||||
|
description TEXT
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **Input validation** - Always validate event JSON and signatures
|
||||||
|
2. **Rate limiting** - Implement at application level
|
||||||
|
3. **Access control** - Use `account` table for permissions
|
||||||
|
4. **Backup strategy** - Regular database backups recommended
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
- **SQLite version** - Requires SQLite 3.8.0+
|
||||||
|
- **nostr-rs-relay** - Schema compatible with v18
|
||||||
|
- **NIPs supported** - 01, 02, 05, 09, 10, 11, 26, 40, 42
|
||||||
|
- **C libraries** - Compatible with sqlite3 C API
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Database locked error:**
|
||||||
|
- Ensure proper connection closing in your C code
|
||||||
|
- Check for long-running transactions
|
||||||
|
|
||||||
|
2. **Performance issues:**
|
||||||
|
- Run `PRAGMA optimize;` regularly
|
||||||
|
- Consider `VACUUM` if database grew significantly
|
||||||
|
|
||||||
|
3. **Schema errors:**
|
||||||
|
- Verify SQLite version compatibility
|
||||||
|
- Check foreign key constraints
|
||||||
|
|
||||||
|
### Getting Help
|
||||||
|
|
||||||
|
- Check the main project README for C implementation details
|
||||||
|
- Review nostr-rs-relay documentation for reference implementation
|
||||||
|
- Consult Nostr NIPs for protocol specifications
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This database schema is part of the C Nostr Relay project and follows the same license terms.
|
||||||
BIN
db/c_nostr_relay.db
Normal file
BIN
db/c_nostr_relay.db
Normal file
Binary file not shown.
BIN
db/c_nostr_relay.db-shm
Normal file
BIN
db/c_nostr_relay.db-shm
Normal file
Binary file not shown.
BIN
db/c_nostr_relay.db-wal
Normal file
BIN
db/c_nostr_relay.db-wal
Normal file
Binary file not shown.
234
db/init.sh
Executable file
234
db/init.sh
Executable file
@@ -0,0 +1,234 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# C Nostr Relay Database Initialization Script
|
||||||
|
# Creates and initializes the SQLite database with proper schema
|
||||||
|
|
||||||
|
set -e # Exit on any error
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
DB_DIR="$(dirname "$0")"
|
||||||
|
DB_NAME="c_nostr_relay.db"
|
||||||
|
DB_PATH="${DB_DIR}/${DB_NAME}"
|
||||||
|
SCHEMA_FILE="${DB_DIR}/schema.sql"
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Logging functions
|
||||||
|
log_info() {
|
||||||
|
echo -e "${BLUE}[INFO]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_success() {
|
||||||
|
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_error() {
|
||||||
|
echo -e "${RED}[ERROR]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if SQLite3 is installed
|
||||||
|
check_sqlite() {
|
||||||
|
if ! command -v sqlite3 &> /dev/null; then
|
||||||
|
log_error "sqlite3 is not installed. Please install it first:"
|
||||||
|
echo " Ubuntu/Debian: sudo apt-get install sqlite3"
|
||||||
|
echo " CentOS/RHEL: sudo yum install sqlite"
|
||||||
|
echo " macOS: brew install sqlite3"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local version=$(sqlite3 --version | cut -d' ' -f1)
|
||||||
|
log_info "Using SQLite version: $version"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create database directory if it doesn't exist
|
||||||
|
create_db_directory() {
|
||||||
|
if [ ! -d "$DB_DIR" ]; then
|
||||||
|
log_info "Creating database directory: $DB_DIR"
|
||||||
|
mkdir -p "$DB_DIR"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Backup existing database if it exists
|
||||||
|
backup_existing_db() {
|
||||||
|
if [ -f "$DB_PATH" ]; then
|
||||||
|
local backup_path="${DB_PATH}.backup.$(date +%Y%m%d_%H%M%S)"
|
||||||
|
log_warning "Existing database found. Creating backup: $backup_path"
|
||||||
|
cp "$DB_PATH" "$backup_path"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize the database with schema
|
||||||
|
init_database() {
|
||||||
|
log_info "Initializing database: $DB_PATH"
|
||||||
|
|
||||||
|
if [ ! -f "$SCHEMA_FILE" ]; then
|
||||||
|
log_error "Schema file not found: $SCHEMA_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove existing database if --force flag is used
|
||||||
|
if [ "$1" = "--force" ] && [ -f "$DB_PATH" ]; then
|
||||||
|
log_warning "Force flag detected. Removing existing database."
|
||||||
|
rm -f "$DB_PATH"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create the database and apply schema
|
||||||
|
log_info "Applying schema from: $SCHEMA_FILE"
|
||||||
|
if sqlite3 "$DB_PATH" < "$SCHEMA_FILE"; then
|
||||||
|
log_success "Database schema applied successfully"
|
||||||
|
else
|
||||||
|
log_error "Failed to apply database schema"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Verify database integrity
|
||||||
|
verify_database() {
|
||||||
|
log_info "Verifying database integrity..."
|
||||||
|
|
||||||
|
# Check if database file exists and is not empty
|
||||||
|
if [ ! -s "$DB_PATH" ]; then
|
||||||
|
log_error "Database file is empty or doesn't exist"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run SQLite integrity check
|
||||||
|
local integrity_result=$(sqlite3 "$DB_PATH" "PRAGMA integrity_check;")
|
||||||
|
if [ "$integrity_result" = "ok" ]; then
|
||||||
|
log_success "Database integrity check passed"
|
||||||
|
else
|
||||||
|
log_error "Database integrity check failed: $integrity_result"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify schema version
|
||||||
|
local schema_version=$(sqlite3 "$DB_PATH" "PRAGMA user_version;")
|
||||||
|
log_info "Database schema version: $schema_version"
|
||||||
|
|
||||||
|
# Check that main tables exist
|
||||||
|
local table_count=$(sqlite3 "$DB_PATH" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name IN ('events', 'schema_info');")
|
||||||
|
if [ "$table_count" -eq 2 ]; then
|
||||||
|
log_success "Core tables created successfully"
|
||||||
|
else
|
||||||
|
log_error "Missing core tables (expected 2, found $table_count)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Display database information
|
||||||
|
show_db_info() {
|
||||||
|
log_info "Database Information:"
|
||||||
|
echo " Location: $DB_PATH"
|
||||||
|
echo " Size: $(du -h "$DB_PATH" | cut -f1)"
|
||||||
|
|
||||||
|
log_info "Database Tables:"
|
||||||
|
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;" | sed 's/^/ - /'
|
||||||
|
|
||||||
|
log_info "Database Indexes:"
|
||||||
|
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='index' AND name NOT LIKE 'sqlite_%' ORDER BY name;" | sed 's/^/ - /'
|
||||||
|
|
||||||
|
log_info "Database Views:"
|
||||||
|
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='view' ORDER BY name;" | sed 's/^/ - /'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run database optimization
|
||||||
|
optimize_database() {
|
||||||
|
log_info "Running database optimization..."
|
||||||
|
sqlite3 "$DB_PATH" "PRAGMA optimize; VACUUM; ANALYZE;"
|
||||||
|
log_success "Database optimization completed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Print usage information
|
||||||
|
print_usage() {
|
||||||
|
echo "Usage: $0 [OPTIONS]"
|
||||||
|
echo ""
|
||||||
|
echo "Initialize SQLite database for C Nostr Relay"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " --force Remove existing database before initialization"
|
||||||
|
echo " --info Show database information after initialization"
|
||||||
|
echo " --optimize Run database optimization after initialization"
|
||||||
|
echo " --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " $0 # Initialize database (with backup if exists)"
|
||||||
|
echo " $0 --force # Force reinitialize database"
|
||||||
|
echo " $0 --info --optimize # Initialize with info and optimization"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
local force_flag=false
|
||||||
|
local show_info=false
|
||||||
|
local optimize=false
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
--force)
|
||||||
|
force_flag=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--info)
|
||||||
|
show_info=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--optimize)
|
||||||
|
optimize=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--help)
|
||||||
|
print_usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
log_error "Unknown option: $1"
|
||||||
|
print_usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
log_info "Starting C Nostr Relay database initialization..."
|
||||||
|
|
||||||
|
# Execute initialization steps
|
||||||
|
check_sqlite
|
||||||
|
create_db_directory
|
||||||
|
|
||||||
|
if [ "$force_flag" = false ]; then
|
||||||
|
backup_existing_db
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$force_flag" = true ]; then
|
||||||
|
init_database --force
|
||||||
|
else
|
||||||
|
init_database
|
||||||
|
fi
|
||||||
|
|
||||||
|
verify_database
|
||||||
|
|
||||||
|
if [ "$optimize" = true ]; then
|
||||||
|
optimize_database
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$show_info" = true ]; then
|
||||||
|
show_db_info
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_success "Database initialization completed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "Database ready at: $DB_PATH"
|
||||||
|
echo "You can now start your C Nostr Relay application."
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute main function with all arguments
|
||||||
|
main "$@"
|
||||||
90
db/schema.sql
Normal file
90
db/schema.sql
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
-- C Nostr Relay Database Schema
|
||||||
|
-- SQLite schema for storing Nostr events with JSON tags support
|
||||||
|
|
||||||
|
-- Schema version tracking
|
||||||
|
PRAGMA user_version = 2;
|
||||||
|
|
||||||
|
-- Enable foreign key support
|
||||||
|
PRAGMA foreign_keys = ON;
|
||||||
|
|
||||||
|
-- Optimize for performance
|
||||||
|
PRAGMA journal_mode = WAL;
|
||||||
|
PRAGMA synchronous = NORMAL;
|
||||||
|
PRAGMA cache_size = 10000;
|
||||||
|
|
||||||
|
-- Core events table with hybrid single-table design
|
||||||
|
CREATE TABLE events (
|
||||||
|
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)
|
||||||
|
pubkey TEXT NOT NULL, -- Public key of event author (hex string)
|
||||||
|
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)
|
||||||
|
kind INTEGER NOT NULL, -- Event kind (0-65535)
|
||||||
|
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||||
|
content TEXT NOT NULL, -- Event content (text content only)
|
||||||
|
sig TEXT NOT NULL, -- Event signature (hex string)
|
||||||
|
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array
|
||||||
|
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Core performance indexes
|
||||||
|
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||||
|
CREATE INDEX idx_events_kind ON events(kind);
|
||||||
|
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||||
|
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||||
|
|
||||||
|
-- Composite indexes for common query patterns
|
||||||
|
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||||
|
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||||
|
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);
|
||||||
|
|
||||||
|
-- Schema information table
|
||||||
|
CREATE TABLE schema_info (
|
||||||
|
key TEXT PRIMARY KEY,
|
||||||
|
value TEXT NOT NULL,
|
||||||
|
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Insert schema metadata
|
||||||
|
INSERT INTO schema_info (key, value) VALUES
|
||||||
|
('version', '2'),
|
||||||
|
('description', 'Hybrid single-table Nostr relay schema with JSON tags'),
|
||||||
|
('created_at', strftime('%s', 'now'));
|
||||||
|
|
||||||
|
-- Helper views for common queries
|
||||||
|
CREATE VIEW recent_events AS
|
||||||
|
SELECT id, pubkey, created_at, kind, event_type, content
|
||||||
|
FROM events
|
||||||
|
WHERE event_type != 'ephemeral'
|
||||||
|
ORDER BY created_at DESC
|
||||||
|
LIMIT 1000;
|
||||||
|
|
||||||
|
CREATE VIEW event_stats AS
|
||||||
|
SELECT
|
||||||
|
event_type,
|
||||||
|
COUNT(*) as count,
|
||||||
|
AVG(length(content)) as avg_content_length,
|
||||||
|
MIN(created_at) as earliest,
|
||||||
|
MAX(created_at) as latest
|
||||||
|
FROM events
|
||||||
|
GROUP BY event_type;
|
||||||
|
|
||||||
|
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour
|
||||||
|
CREATE TRIGGER cleanup_ephemeral_events
|
||||||
|
AFTER INSERT ON events
|
||||||
|
WHEN NEW.event_type = 'ephemeral'
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM events
|
||||||
|
WHERE event_type = 'ephemeral'
|
||||||
|
AND first_seen < (strftime('%s', 'now') - 3600);
|
||||||
|
END;
|
||||||
|
|
||||||
|
-- Replaceable event handling trigger
|
||||||
|
CREATE TRIGGER handle_replaceable_events
|
||||||
|
AFTER INSERT ON events
|
||||||
|
WHEN NEW.event_type = 'replaceable'
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM events
|
||||||
|
WHERE pubkey = NEW.pubkey
|
||||||
|
AND kind = NEW.kind
|
||||||
|
AND event_type = 'replaceable'
|
||||||
|
AND id != NEW.id;
|
||||||
|
END;
|
||||||
337
docs/advanced_schema_design.md
Normal file
337
docs/advanced_schema_design.md
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
# Advanced Nostr Relay Schema Design
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document outlines the design for an advanced multi-table schema that enforces Nostr protocol compliance at the database level, with separate tables for different event types based on their storage and replacement characteristics.
|
||||||
|
|
||||||
|
## Event Type Classification
|
||||||
|
|
||||||
|
Based on the Nostr specification, events are classified into four categories:
|
||||||
|
|
||||||
|
### 1. Regular Events
|
||||||
|
- **Kinds**: `1000 <= n < 10000` || `4 <= n < 45` || `n == 1` || `n == 2`
|
||||||
|
- **Storage Policy**: All events stored permanently
|
||||||
|
- **Examples**: Text notes (1), Reposts (6), Reactions (7), Direct Messages (4)
|
||||||
|
|
||||||
|
### 2. Replaceable Events
|
||||||
|
- **Kinds**: `10000 <= n < 20000` || `n == 0` || `n == 3`
|
||||||
|
- **Storage Policy**: Only latest per `(pubkey, kind)` combination
|
||||||
|
- **Replacement Logic**: Latest `created_at`, then lowest `id` lexically
|
||||||
|
- **Examples**: Metadata (0), Contacts (3), Mute List (10000)
|
||||||
|
|
||||||
|
### 3. Ephemeral Events
|
||||||
|
- **Kinds**: `20000 <= n < 30000`
|
||||||
|
- **Storage Policy**: Not expected to be stored (optional temporary storage)
|
||||||
|
- **Examples**: Typing indicators, presence updates, ephemeral messages
|
||||||
|
|
||||||
|
### 4. Addressable Events
|
||||||
|
- **Kinds**: `30000 <= n < 40000`
|
||||||
|
- **Storage Policy**: Only latest per `(pubkey, kind, d_tag)` combination
|
||||||
|
- **Replacement Logic**: Same as replaceable events
|
||||||
|
- **Examples**: Long-form content (30023), Application-specific data
|
||||||
|
|
||||||
|
## SQLite JSON Capabilities Research
|
||||||
|
|
||||||
|
SQLite provides powerful JSON functions that could be leveraged for tag storage:
|
||||||
|
|
||||||
|
### Core JSON Functions
|
||||||
|
```sql
|
||||||
|
-- Extract specific values
|
||||||
|
json_extract(column, '$.path')
|
||||||
|
|
||||||
|
-- Iterate through arrays
|
||||||
|
json_each(json_array_column)
|
||||||
|
|
||||||
|
-- Flatten nested structures
|
||||||
|
json_tree(json_column)
|
||||||
|
|
||||||
|
-- Validate JSON structure
|
||||||
|
json_valid(column)
|
||||||
|
|
||||||
|
-- Array operations
|
||||||
|
json_array_length(column)
|
||||||
|
json_extract(column, '$[0]') -- First element
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tag Query Examples
|
||||||
|
|
||||||
|
#### Find all 'e' tag references:
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
json_extract(value, '$[1]') as referenced_event_id,
|
||||||
|
json_extract(value, '$[2]') as relay_hint,
|
||||||
|
json_extract(value, '$[3]') as marker
|
||||||
|
FROM events, json_each(tags)
|
||||||
|
WHERE json_extract(value, '$[0]') = 'e';
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Find events with specific hashtags:
|
||||||
|
```sql
|
||||||
|
SELECT id, content
|
||||||
|
FROM events, json_each(tags)
|
||||||
|
WHERE json_extract(value, '$[0]') = 't'
|
||||||
|
AND json_extract(value, '$[1]') = 'bitcoin';
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Extract 'd' tag for addressable events:
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
json_extract(value, '$[1]') as d_tag_value
|
||||||
|
FROM events, json_each(tags)
|
||||||
|
WHERE json_extract(value, '$[0]') = 'd'
|
||||||
|
LIMIT 1;
|
||||||
|
```
|
||||||
|
|
||||||
|
### JSON Functional Indexes
|
||||||
|
```sql
|
||||||
|
-- Index on hashtags
|
||||||
|
CREATE INDEX idx_hashtags ON events(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 't';
|
||||||
|
|
||||||
|
-- Index on 'd' tags for addressable events
|
||||||
|
CREATE INDEX idx_d_tags ON events_addressable(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'd';
|
||||||
|
```
|
||||||
|
|
||||||
|
## Proposed Schema Design
|
||||||
|
|
||||||
|
### Option 1: Separate Tables with JSON Tags
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Regular Events (permanent storage)
|
||||||
|
CREATE TABLE events_regular (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON,
|
||||||
|
first_seen INTEGER DEFAULT (strftime('%s', 'now')),
|
||||||
|
CONSTRAINT kind_regular CHECK (
|
||||||
|
(kind >= 1000 AND kind < 10000) OR
|
||||||
|
(kind >= 4 AND kind < 45) OR
|
||||||
|
kind = 1 OR kind = 2
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Replaceable Events (latest per pubkey+kind)
|
||||||
|
CREATE TABLE events_replaceable (
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
id TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON,
|
||||||
|
replaced_at INTEGER DEFAULT (strftime('%s', 'now')),
|
||||||
|
PRIMARY KEY (pubkey, kind),
|
||||||
|
CONSTRAINT kind_replaceable CHECK (
|
||||||
|
(kind >= 10000 AND kind < 20000) OR
|
||||||
|
kind = 0 OR kind = 3
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Ephemeral Events (temporary/optional storage)
|
||||||
|
CREATE TABLE events_ephemeral (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON,
|
||||||
|
expires_at INTEGER DEFAULT (strftime('%s', 'now', '+1 hour')),
|
||||||
|
CONSTRAINT kind_ephemeral CHECK (
|
||||||
|
kind >= 20000 AND kind < 30000
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Addressable Events (latest per pubkey+kind+d_tag)
|
||||||
|
CREATE TABLE events_addressable (
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
d_tag TEXT NOT NULL,
|
||||||
|
id TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON,
|
||||||
|
replaced_at INTEGER DEFAULT (strftime('%s', 'now')),
|
||||||
|
PRIMARY KEY (pubkey, kind, d_tag),
|
||||||
|
CONSTRAINT kind_addressable CHECK (
|
||||||
|
kind >= 30000 AND kind < 40000
|
||||||
|
)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Indexes for Performance
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Regular events indexes
|
||||||
|
CREATE INDEX idx_regular_pubkey ON events_regular(pubkey);
|
||||||
|
CREATE INDEX idx_regular_kind ON events_regular(kind);
|
||||||
|
CREATE INDEX idx_regular_created_at ON events_regular(created_at);
|
||||||
|
CREATE INDEX idx_regular_kind_created_at ON events_regular(kind, created_at);
|
||||||
|
|
||||||
|
-- Replaceable events indexes
|
||||||
|
CREATE INDEX idx_replaceable_created_at ON events_replaceable(created_at);
|
||||||
|
CREATE INDEX idx_replaceable_id ON events_replaceable(id);
|
||||||
|
|
||||||
|
-- Ephemeral events indexes
|
||||||
|
CREATE INDEX idx_ephemeral_expires_at ON events_ephemeral(expires_at);
|
||||||
|
CREATE INDEX idx_ephemeral_pubkey ON events_ephemeral(pubkey);
|
||||||
|
|
||||||
|
-- Addressable events indexes
|
||||||
|
CREATE INDEX idx_addressable_created_at ON events_addressable(created_at);
|
||||||
|
CREATE INDEX idx_addressable_id ON events_addressable(id);
|
||||||
|
|
||||||
|
-- JSON tag indexes (examples)
|
||||||
|
CREATE INDEX idx_regular_e_tags ON events_regular(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'e';
|
||||||
|
|
||||||
|
CREATE INDEX idx_regular_p_tags ON events_regular(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'p';
|
||||||
|
```
|
||||||
|
|
||||||
|
### Option 2: Unified Tag Table Approach
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Unified tag storage (alternative to JSON)
|
||||||
|
CREATE TABLE tags_unified (
|
||||||
|
event_id TEXT NOT NULL,
|
||||||
|
event_type TEXT NOT NULL, -- 'regular', 'replaceable', 'ephemeral', 'addressable'
|
||||||
|
tag_index INTEGER NOT NULL, -- Position in tag array
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
value TEXT NOT NULL,
|
||||||
|
param_2 TEXT, -- Third element if present
|
||||||
|
param_3 TEXT, -- Fourth element if present
|
||||||
|
param_json TEXT, -- JSON for additional parameters
|
||||||
|
PRIMARY KEY (event_id, tag_index)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_tags_name_value ON tags_unified(name, value);
|
||||||
|
CREATE INDEX idx_tags_event_type ON tags_unified(event_type);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Strategy
|
||||||
|
|
||||||
|
### 1. Kind Classification Function (C Code)
|
||||||
|
```c
|
||||||
|
typedef enum {
|
||||||
|
EVENT_TYPE_REGULAR,
|
||||||
|
EVENT_TYPE_REPLACEABLE,
|
||||||
|
EVENT_TYPE_EPHEMERAL,
|
||||||
|
EVENT_TYPE_ADDRESSABLE,
|
||||||
|
EVENT_TYPE_INVALID
|
||||||
|
} event_type_t;
|
||||||
|
|
||||||
|
event_type_t classify_event_kind(int kind) {
|
||||||
|
if ((kind >= 1000 && kind < 10000) ||
|
||||||
|
(kind >= 4 && kind < 45) ||
|
||||||
|
kind == 1 || kind == 2) {
|
||||||
|
return EVENT_TYPE_REGULAR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((kind >= 10000 && kind < 20000) ||
|
||||||
|
kind == 0 || kind == 3) {
|
||||||
|
return EVENT_TYPE_REPLACEABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (kind >= 20000 && kind < 30000) {
|
||||||
|
return EVENT_TYPE_EPHEMERAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (kind >= 30000 && kind < 40000) {
|
||||||
|
return EVENT_TYPE_ADDRESSABLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return EVENT_TYPE_INVALID;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Replacement Logic for Replaceable Events
|
||||||
|
```sql
|
||||||
|
-- Trigger for replaceable events
|
||||||
|
CREATE TRIGGER replace_event_on_insert
|
||||||
|
BEFORE INSERT ON events_replaceable
|
||||||
|
FOR EACH ROW
|
||||||
|
WHEN EXISTS (
|
||||||
|
SELECT 1 FROM events_replaceable
|
||||||
|
WHERE pubkey = NEW.pubkey AND kind = NEW.kind
|
||||||
|
)
|
||||||
|
BEGIN
|
||||||
|
DELETE FROM events_replaceable
|
||||||
|
WHERE pubkey = NEW.pubkey
|
||||||
|
AND kind = NEW.kind
|
||||||
|
AND (
|
||||||
|
created_at < NEW.created_at OR
|
||||||
|
(created_at = NEW.created_at AND id > NEW.id)
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. D-Tag Extraction for Addressable Events
|
||||||
|
```c
|
||||||
|
char* extract_d_tag(cJSON* tags) {
|
||||||
|
if (!tags || !cJSON_IsArray(tags)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* tag;
|
||||||
|
cJSON_ArrayForEach(tag, tags) {
|
||||||
|
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
|
||||||
|
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
|
||||||
|
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
|
||||||
|
|
||||||
|
if (cJSON_IsString(tag_name) && cJSON_IsString(tag_value)) {
|
||||||
|
if (strcmp(cJSON_GetStringValue(tag_name), "d") == 0) {
|
||||||
|
return strdup(cJSON_GetStringValue(tag_value));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strdup(""); // Default empty d-tag
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advantages of This Design
|
||||||
|
|
||||||
|
### 1. Protocol Compliance
|
||||||
|
- **Enforced at DB level**: Schema constraints prevent invalid event storage
|
||||||
|
- **Automatic replacement**: Triggers handle replaceable/addressable event logic
|
||||||
|
- **Type safety**: Separate tables ensure correct handling per event type
|
||||||
|
|
||||||
|
### 2. Performance Benefits
|
||||||
|
- **Targeted indexes**: Each table optimized for its access patterns
|
||||||
|
- **Reduced storage**: Ephemeral events can be auto-expired
|
||||||
|
- **Query optimization**: SQLite can optimize queries per table structure
|
||||||
|
|
||||||
|
### 3. JSON Tag Benefits
|
||||||
|
- **Atomic storage**: Tags stored with their event
|
||||||
|
- **Rich querying**: SQLite JSON functions enable complex tag queries
|
||||||
|
- **Schema flexibility**: Can handle arbitrary tag structures
|
||||||
|
- **Functional indexes**: Index specific tag patterns efficiently
|
||||||
|
|
||||||
|
## Migration Strategy
|
||||||
|
|
||||||
|
1. **Phase 1**: Create new schema alongside existing
|
||||||
|
2. **Phase 2**: Implement kind classification and routing logic
|
||||||
|
3. **Phase 3**: Migrate existing data to appropriate tables
|
||||||
|
4. **Phase 4**: Update application logic to use new tables
|
||||||
|
5. **Phase 5**: Drop old schema after verification
|
||||||
|
|
||||||
|
## Next Steps for Implementation
|
||||||
|
|
||||||
|
1. **Prototype JSON performance**: Create test database with sample data
|
||||||
|
2. **Benchmark query patterns**: Compare JSON vs normalized approaches
|
||||||
|
3. **Implement kind classification**: Add routing logic to C code
|
||||||
|
4. **Create migration scripts**: Handle existing data transformation
|
||||||
|
5. **Update test suite**: Verify compliance with new schema
|
||||||
416
docs/final_schema_recommendation.md
Normal file
416
docs/final_schema_recommendation.md
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
# Final Schema Recommendation: Hybrid Single Table Approach
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
After analyzing the subscription query complexity, **the multi-table approach creates more problems than it solves**. REQ filters don't align with storage semantics - clients filter by kind, author, and tags regardless of event type classification.
|
||||||
|
|
||||||
|
**Recommendation: Modified Single Table with Event Type Classification**
|
||||||
|
|
||||||
|
## The Multi-Table Problem
|
||||||
|
|
||||||
|
### REQ Filter Reality Check
|
||||||
|
- Clients send: `{"kinds": [1, 0, 30023], "authors": ["pubkey"], "#p": ["target"]}`
|
||||||
|
- Multi-table requires: 3 separate queries + UNION + complex ordering
|
||||||
|
- Single table requires: 1 query with simple WHERE conditions
|
||||||
|
|
||||||
|
### Query Complexity Explosion
|
||||||
|
```sql
|
||||||
|
-- Multi-table nightmare for simple filter
|
||||||
|
WITH results AS (
|
||||||
|
SELECT * FROM events_regular WHERE kind = 1 AND pubkey = ?
|
||||||
|
UNION ALL
|
||||||
|
SELECT * FROM events_replaceable WHERE kind = 0 AND pubkey = ?
|
||||||
|
UNION ALL
|
||||||
|
SELECT * FROM events_addressable WHERE kind = 30023 AND pubkey = ?
|
||||||
|
)
|
||||||
|
SELECT r.* FROM results r
|
||||||
|
JOIN multiple_tag_tables t ON complex_conditions
|
||||||
|
ORDER BY created_at DESC, id ASC LIMIT ?;
|
||||||
|
|
||||||
|
-- vs Single table simplicity
|
||||||
|
SELECT e.* FROM events e, json_each(e.tags) t
|
||||||
|
WHERE e.kind IN (1, 0, 30023)
|
||||||
|
AND e.pubkey = ?
|
||||||
|
AND json_extract(t.value, '$[0]') = 'p'
|
||||||
|
AND json_extract(t.value, '$[1]') = ?
|
||||||
|
ORDER BY e.created_at DESC, e.id ASC LIMIT ?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Recommended Schema: Hybrid Approach
|
||||||
|
|
||||||
|
### Core Design Philosophy
|
||||||
|
- **Single table for REQ query simplicity**
|
||||||
|
- **Event type classification for protocol compliance**
|
||||||
|
- **JSON tags for atomic storage and rich querying**
|
||||||
|
- **Partial unique constraints for replacement logic**
|
||||||
|
|
||||||
|
### Schema Definition
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE events (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON NOT NULL DEFAULT '[]',
|
||||||
|
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||||
|
|
||||||
|
-- Additional fields for addressable events
|
||||||
|
d_tag TEXT GENERATED ALWAYS AS (
|
||||||
|
CASE
|
||||||
|
WHEN event_type = 'addressable' THEN
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
FROM json_each(tags)
|
||||||
|
WHERE json_extract(value, '$[0]') = 'd'
|
||||||
|
LIMIT 1
|
||||||
|
ELSE NULL
|
||||||
|
END
|
||||||
|
) STORED,
|
||||||
|
|
||||||
|
-- Replacement tracking
|
||||||
|
replaced_at INTEGER,
|
||||||
|
|
||||||
|
-- Protocol compliance constraints
|
||||||
|
CONSTRAINT unique_replaceable
|
||||||
|
UNIQUE (pubkey, kind)
|
||||||
|
WHERE event_type = 'replaceable',
|
||||||
|
|
||||||
|
CONSTRAINT unique_addressable
|
||||||
|
UNIQUE (pubkey, kind, d_tag)
|
||||||
|
WHERE event_type = 'addressable' AND d_tag IS NOT NULL
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Event Type Classification Function
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Function to determine event type from kind
|
||||||
|
CREATE VIEW event_type_lookup AS
|
||||||
|
SELECT
|
||||||
|
CASE
|
||||||
|
WHEN (kind >= 1000 AND kind < 10000) OR
|
||||||
|
(kind >= 4 AND kind < 45) OR
|
||||||
|
kind = 1 OR kind = 2 THEN 'regular'
|
||||||
|
WHEN (kind >= 10000 AND kind < 20000) OR
|
||||||
|
kind = 0 OR kind = 3 THEN 'replaceable'
|
||||||
|
WHEN kind >= 20000 AND kind < 30000 THEN 'ephemeral'
|
||||||
|
WHEN kind >= 30000 AND kind < 40000 THEN 'addressable'
|
||||||
|
ELSE 'unknown'
|
||||||
|
END as event_type,
|
||||||
|
kind
|
||||||
|
FROM (
|
||||||
|
-- Generate all possible kind values for lookup
|
||||||
|
WITH RECURSIVE kinds(kind) AS (
|
||||||
|
SELECT 0
|
||||||
|
UNION ALL
|
||||||
|
SELECT kind + 1 FROM kinds WHERE kind < 65535
|
||||||
|
)
|
||||||
|
SELECT kind FROM kinds
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Performance Indexes
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Core query patterns
|
||||||
|
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||||
|
CREATE INDEX idx_events_kind ON events(kind);
|
||||||
|
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||||
|
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||||
|
|
||||||
|
-- Composite indexes for common filters
|
||||||
|
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||||
|
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||||
|
CREATE INDEX idx_events_type_created_at ON events(event_type, created_at DESC);
|
||||||
|
|
||||||
|
-- JSON tag indexes for common patterns
|
||||||
|
CREATE INDEX idx_events_e_tags ON events(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'e';
|
||||||
|
|
||||||
|
CREATE INDEX idx_events_p_tags ON events(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'p';
|
||||||
|
|
||||||
|
CREATE INDEX idx_events_hashtags ON events(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 't';
|
||||||
|
|
||||||
|
-- Addressable events d_tag index
|
||||||
|
CREATE INDEX idx_events_d_tag ON events(d_tag)
|
||||||
|
WHERE event_type = 'addressable' AND d_tag IS NOT NULL;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Replacement Logic Implementation
|
||||||
|
|
||||||
|
#### Replaceable Events Trigger
|
||||||
|
```sql
|
||||||
|
CREATE TRIGGER handle_replaceable_events
|
||||||
|
BEFORE INSERT ON events
|
||||||
|
FOR EACH ROW
|
||||||
|
WHEN NEW.event_type = 'replaceable'
|
||||||
|
BEGIN
|
||||||
|
-- Delete older replaceable events with same pubkey+kind
|
||||||
|
DELETE FROM events
|
||||||
|
WHERE event_type = 'replaceable'
|
||||||
|
AND pubkey = NEW.pubkey
|
||||||
|
AND kind = NEW.kind
|
||||||
|
AND (
|
||||||
|
created_at < NEW.created_at OR
|
||||||
|
(created_at = NEW.created_at AND id > NEW.id)
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Addressable Events Trigger
|
||||||
|
```sql
|
||||||
|
CREATE TRIGGER handle_addressable_events
|
||||||
|
BEFORE INSERT ON events
|
||||||
|
FOR EACH ROW
|
||||||
|
WHEN NEW.event_type = 'addressable'
|
||||||
|
BEGIN
|
||||||
|
-- Delete older addressable events with same pubkey+kind+d_tag
|
||||||
|
DELETE FROM events
|
||||||
|
WHERE event_type = 'addressable'
|
||||||
|
AND pubkey = NEW.pubkey
|
||||||
|
AND kind = NEW.kind
|
||||||
|
AND d_tag = NEW.d_tag
|
||||||
|
AND (
|
||||||
|
created_at < NEW.created_at OR
|
||||||
|
(created_at = NEW.created_at AND id > NEW.id)
|
||||||
|
);
|
||||||
|
END;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Strategy
|
||||||
|
|
||||||
|
### C Code Integration
|
||||||
|
|
||||||
|
#### Event Type Classification
|
||||||
|
```c
|
||||||
|
typedef enum {
|
||||||
|
EVENT_TYPE_REGULAR,
|
||||||
|
EVENT_TYPE_REPLACEABLE,
|
||||||
|
EVENT_TYPE_EPHEMERAL,
|
||||||
|
EVENT_TYPE_ADDRESSABLE,
|
||||||
|
EVENT_TYPE_UNKNOWN
|
||||||
|
} event_type_t;
|
||||||
|
|
||||||
|
event_type_t classify_event_kind(int kind) {
|
||||||
|
if ((kind >= 1000 && kind < 10000) ||
|
||||||
|
(kind >= 4 && kind < 45) ||
|
||||||
|
kind == 1 || kind == 2) {
|
||||||
|
return EVENT_TYPE_REGULAR;
|
||||||
|
}
|
||||||
|
if ((kind >= 10000 && kind < 20000) ||
|
||||||
|
kind == 0 || kind == 3) {
|
||||||
|
return EVENT_TYPE_REPLACEABLE;
|
||||||
|
}
|
||||||
|
if (kind >= 20000 && kind < 30000) {
|
||||||
|
return EVENT_TYPE_EPHEMERAL;
|
||||||
|
}
|
||||||
|
if (kind >= 30000 && kind < 40000) {
|
||||||
|
return EVENT_TYPE_ADDRESSABLE;
|
||||||
|
}
|
||||||
|
return EVENT_TYPE_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* event_type_to_string(event_type_t type) {
|
||||||
|
switch (type) {
|
||||||
|
case EVENT_TYPE_REGULAR: return "regular";
|
||||||
|
case EVENT_TYPE_REPLACEABLE: return "replaceable";
|
||||||
|
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
|
||||||
|
case EVENT_TYPE_ADDRESSABLE: return "addressable";
|
||||||
|
default: return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Simplified Event Storage
|
||||||
|
```c
|
||||||
|
int store_event(cJSON* event) {
|
||||||
|
// Extract fields
|
||||||
|
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||||
|
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
|
||||||
|
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
|
||||||
|
cJSON* kind = cJSON_GetObjectItem(event, "kind");
|
||||||
|
cJSON* content = cJSON_GetObjectItem(event, "content");
|
||||||
|
cJSON* sig = cJSON_GetObjectItem(event, "sig");
|
||||||
|
|
||||||
|
// Classify event type
|
||||||
|
event_type_t type = classify_event_kind(cJSON_GetNumberValue(kind));
|
||||||
|
|
||||||
|
// Serialize tags to JSON
|
||||||
|
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||||
|
char* tags_json = cJSON_Print(tags ? tags : cJSON_CreateArray());
|
||||||
|
|
||||||
|
// Single INSERT statement - database handles replacement via triggers
|
||||||
|
const char* sql =
|
||||||
|
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
|
||||||
|
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
free(tags_json);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
|
||||||
|
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
|
||||||
|
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
|
||||||
|
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
free(tags_json);
|
||||||
|
|
||||||
|
return (rc == SQLITE_DONE) ? 0 : -1;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Simple REQ Query Building
|
||||||
|
```c
|
||||||
|
char* build_filter_query(cJSON* filter) {
|
||||||
|
// Build single query against events table
|
||||||
|
// Much simpler than multi-table approach
|
||||||
|
|
||||||
|
GString* query = g_string_new("SELECT * FROM events WHERE 1=1");
|
||||||
|
|
||||||
|
// Handle ids filter
|
||||||
|
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
|
||||||
|
if (ids && cJSON_IsArray(ids)) {
|
||||||
|
g_string_append(query, " AND id IN (");
|
||||||
|
// Add parameter placeholders
|
||||||
|
g_string_append(query, ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle authors filter
|
||||||
|
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
|
||||||
|
if (authors && cJSON_IsArray(authors)) {
|
||||||
|
g_string_append(query, " AND pubkey IN (");
|
||||||
|
// Add parameter placeholders
|
||||||
|
g_string_append(query, ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle kinds filter
|
||||||
|
cJSON* kinds = cJSON_GetObjectItem(filter, "kinds");
|
||||||
|
if (kinds && cJSON_IsArray(kinds)) {
|
||||||
|
g_string_append(query, " AND kind IN (");
|
||||||
|
// Add parameter placeholders
|
||||||
|
g_string_append(query, ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle tag filters (#e, #p, etc.)
|
||||||
|
cJSON* item;
|
||||||
|
cJSON_ArrayForEach(item, filter) {
|
||||||
|
char* key = item->string;
|
||||||
|
if (key && key[0] == '#' && strlen(key) == 2) {
|
||||||
|
char tag_name = key[1];
|
||||||
|
g_string_append_printf(query,
|
||||||
|
" AND EXISTS (SELECT 1 FROM json_each(tags) "
|
||||||
|
"WHERE json_extract(value, '$[0]') = '%c' "
|
||||||
|
"AND json_extract(value, '$[1]') IN (", tag_name);
|
||||||
|
// Add parameter placeholders
|
||||||
|
g_string_append(query, "))");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle time range
|
||||||
|
cJSON* since = cJSON_GetObjectItem(filter, "since");
|
||||||
|
if (since) {
|
||||||
|
g_string_append(query, " AND created_at >= ?");
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* until = cJSON_GetObjectItem(filter, "until");
|
||||||
|
if (until) {
|
||||||
|
g_string_append(query, " AND created_at <= ?");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard ordering and limit
|
||||||
|
g_string_append(query, " ORDER BY created_at DESC, id ASC");
|
||||||
|
|
||||||
|
cJSON* limit = cJSON_GetObjectItem(filter, "limit");
|
||||||
|
if (limit) {
|
||||||
|
g_string_append(query, " LIMIT ?");
|
||||||
|
}
|
||||||
|
|
||||||
|
return g_string_free(query, FALSE);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benefits of This Approach
|
||||||
|
|
||||||
|
### 1. Query Simplicity
|
||||||
|
- ✅ Single table = simple REQ queries
|
||||||
|
- ✅ No UNION complexity
|
||||||
|
- ✅ Familiar SQL patterns
|
||||||
|
- ✅ Easy LIMIT and ORDER BY handling
|
||||||
|
|
||||||
|
### 2. Protocol Compliance
|
||||||
|
- ✅ Event type classification enforced
|
||||||
|
- ✅ Replacement logic via triggers
|
||||||
|
- ✅ Unique constraints prevent duplicates
|
||||||
|
- ✅ Proper handling of all event types
|
||||||
|
|
||||||
|
### 3. Performance
|
||||||
|
- ✅ Unified indexes across all events
|
||||||
|
- ✅ No join overhead for basic queries
|
||||||
|
- ✅ JSON tag indexes for complex filters
|
||||||
|
- ✅ Single table scan for cross-kind queries
|
||||||
|
|
||||||
|
### 4. Implementation Simplicity
|
||||||
|
- ✅ Minimal changes from current code
|
||||||
|
- ✅ Database handles replacement logic
|
||||||
|
- ✅ Simple event storage function
|
||||||
|
- ✅ No complex routing logic needed
|
||||||
|
|
||||||
|
### 5. Future Flexibility
|
||||||
|
- ✅ Can add columns for new event types
|
||||||
|
- ✅ Can split tables later if needed
|
||||||
|
- ✅ Easy to add new indexes
|
||||||
|
- ✅ Extensible constraint system
|
||||||
|
|
||||||
|
## Migration Path
|
||||||
|
|
||||||
|
### Phase 1: Schema Update
|
||||||
|
1. Add `event_type` column to existing events table
|
||||||
|
2. Add JSON `tags` column
|
||||||
|
3. Create classification triggers
|
||||||
|
4. Add partial unique constraints
|
||||||
|
|
||||||
|
### Phase 2: Data Migration
|
||||||
|
1. Classify existing events by kind
|
||||||
|
2. Convert existing tag table data to JSON
|
||||||
|
3. Verify constraint compliance
|
||||||
|
4. Update indexes
|
||||||
|
|
||||||
|
### Phase 3: Code Updates
|
||||||
|
1. Update event storage to use new schema
|
||||||
|
2. Simplify REQ query building
|
||||||
|
3. Remove tag table JOIN logic
|
||||||
|
4. Test subscription filtering
|
||||||
|
|
||||||
|
### Phase 4: Optimization
|
||||||
|
1. Monitor query performance
|
||||||
|
2. Add specialized indexes as needed
|
||||||
|
3. Tune replacement triggers
|
||||||
|
4. Consider ephemeral event cleanup
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
This hybrid approach achieves the best of both worlds:
|
||||||
|
|
||||||
|
- **Protocol compliance** through event type classification and constraints
|
||||||
|
- **Query simplicity** through unified storage
|
||||||
|
- **Performance** through targeted indexes
|
||||||
|
- **Implementation ease** through minimal complexity
|
||||||
|
|
||||||
|
The multi-table approach, while theoretically cleaner, creates a subscription query nightmare that would significantly burden the implementation. The hybrid single-table approach provides all the benefits with manageable complexity.
|
||||||
326
docs/implementation_plan.md
Normal file
326
docs/implementation_plan.md
Normal file
@@ -0,0 +1,326 @@
|
|||||||
|
# Implementation Plan: Hybrid Schema Migration
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Migrating from the current two-table design (event + tag tables) to a single event table with JSON tags column and event type classification.
|
||||||
|
|
||||||
|
## Current Schema → Target Schema
|
||||||
|
|
||||||
|
### Current Schema (to be replaced)
|
||||||
|
```sql
|
||||||
|
CREATE TABLE event (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE tag (
|
||||||
|
id TEXT NOT NULL, -- references event.id
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
value TEXT NOT NULL,
|
||||||
|
parameters TEXT
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Target Schema (simplified from final recommendation)
|
||||||
|
```sql
|
||||||
|
CREATE TABLE events (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON NOT NULL DEFAULT '[]',
|
||||||
|
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||||
|
|
||||||
|
-- Optional: Protocol compliance constraints (can be added later)
|
||||||
|
CONSTRAINT unique_replaceable
|
||||||
|
UNIQUE (pubkey, kind) WHERE event_type = 'replaceable',
|
||||||
|
CONSTRAINT unique_addressable
|
||||||
|
UNIQUE (pubkey, kind, json_extract(tags, '$[?(@[0]=="d")][1]'))
|
||||||
|
WHERE event_type = 'addressable'
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Steps
|
||||||
|
|
||||||
|
### Phase 1: Update Schema File
|
||||||
|
|
||||||
|
**File**: `db/schema.sql`
|
||||||
|
|
||||||
|
1. Replace current event table definition
|
||||||
|
2. Remove tag table completely
|
||||||
|
3. Add new indexes for performance
|
||||||
|
4. Add event type classification logic
|
||||||
|
|
||||||
|
### Phase 2: Update C Code
|
||||||
|
|
||||||
|
**File**: `src/main.c`
|
||||||
|
|
||||||
|
1. Add event type classification function
|
||||||
|
2. Update `store_event()` function to use JSON tags
|
||||||
|
3. Update `retrieve_event()` function to return JSON tags
|
||||||
|
4. Remove all tag table related code
|
||||||
|
5. Update REQ query handling to use JSON tag queries
|
||||||
|
|
||||||
|
### Phase 3: Update Database Initialization
|
||||||
|
|
||||||
|
**File**: `db/init.sh`
|
||||||
|
|
||||||
|
1. Update table count validation (expect 1 table instead of 2)
|
||||||
|
2. Update schema verification logic
|
||||||
|
|
||||||
|
### Phase 4: Update Tests
|
||||||
|
|
||||||
|
**File**: `tests/1_nip_test.sh`
|
||||||
|
|
||||||
|
1. Verify events are stored with JSON tags
|
||||||
|
2. Test query functionality with new schema
|
||||||
|
3. Validate event type classification
|
||||||
|
|
||||||
|
### Phase 5: Migration Strategy
|
||||||
|
|
||||||
|
Create migration script to handle existing data (if any).
|
||||||
|
|
||||||
|
## Detailed Implementation
|
||||||
|
|
||||||
|
### 1. Event Type Classification
|
||||||
|
|
||||||
|
```c
|
||||||
|
// Add to src/main.c
|
||||||
|
typedef enum {
|
||||||
|
EVENT_TYPE_REGULAR,
|
||||||
|
EVENT_TYPE_REPLACEABLE,
|
||||||
|
EVENT_TYPE_EPHEMERAL,
|
||||||
|
EVENT_TYPE_ADDRESSABLE,
|
||||||
|
EVENT_TYPE_UNKNOWN
|
||||||
|
} event_type_t;
|
||||||
|
|
||||||
|
event_type_t classify_event_kind(int kind) {
|
||||||
|
if ((kind >= 1000 && kind < 10000) ||
|
||||||
|
(kind >= 4 && kind < 45) ||
|
||||||
|
kind == 1 || kind == 2) {
|
||||||
|
return EVENT_TYPE_REGULAR;
|
||||||
|
}
|
||||||
|
if ((kind >= 10000 && kind < 20000) ||
|
||||||
|
kind == 0 || kind == 3) {
|
||||||
|
return EVENT_TYPE_REPLACEABLE;
|
||||||
|
}
|
||||||
|
if (kind >= 20000 && kind < 30000) {
|
||||||
|
return EVENT_TYPE_EPHEMERAL;
|
||||||
|
}
|
||||||
|
if (kind >= 30000 && kind < 40000) {
|
||||||
|
return EVENT_TYPE_ADDRESSABLE;
|
||||||
|
}
|
||||||
|
return EVENT_TYPE_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* event_type_to_string(event_type_t type) {
|
||||||
|
switch (type) {
|
||||||
|
case EVENT_TYPE_REGULAR: return "regular";
|
||||||
|
case EVENT_TYPE_REPLACEABLE: return "replaceable";
|
||||||
|
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
|
||||||
|
case EVENT_TYPE_ADDRESSABLE: return "addressable";
|
||||||
|
default: return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Updated store_event Function
|
||||||
|
|
||||||
|
```c
|
||||||
|
// Replace existing store_event function
|
||||||
|
int store_event(cJSON* event) {
|
||||||
|
if (!g_db || !event) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract event fields
|
||||||
|
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||||
|
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
|
||||||
|
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
|
||||||
|
cJSON* kind = cJSON_GetObjectItem(event, "kind");
|
||||||
|
cJSON* content = cJSON_GetObjectItem(event, "content");
|
||||||
|
cJSON* sig = cJSON_GetObjectItem(event, "sig");
|
||||||
|
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||||
|
|
||||||
|
if (!id || !pubkey || !created_at || !kind || !content || !sig) {
|
||||||
|
log_error("Invalid event - missing required fields");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Classify event type
|
||||||
|
event_type_t type = classify_event_kind((int)cJSON_GetNumberValue(kind));
|
||||||
|
|
||||||
|
// Serialize tags to JSON (use empty array if no tags)
|
||||||
|
char* tags_json = NULL;
|
||||||
|
if (tags && cJSON_IsArray(tags)) {
|
||||||
|
tags_json = cJSON_Print(tags);
|
||||||
|
} else {
|
||||||
|
tags_json = strdup("[]");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!tags_json) {
|
||||||
|
log_error("Failed to serialize tags to JSON");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single INSERT statement
|
||||||
|
const char* sql =
|
||||||
|
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
|
||||||
|
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
log_error("Failed to prepare event insert statement");
|
||||||
|
free(tags_json);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind parameters
|
||||||
|
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
|
||||||
|
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
|
||||||
|
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
|
||||||
|
|
||||||
|
// Execute statement
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
if (rc != SQLITE_DONE) {
|
||||||
|
if (rc == SQLITE_CONSTRAINT) {
|
||||||
|
log_warning("Event already exists in database");
|
||||||
|
free(tags_json);
|
||||||
|
return 0; // Not an error, just duplicate
|
||||||
|
}
|
||||||
|
char error_msg[256];
|
||||||
|
snprintf(error_msg, sizeof(error_msg), "Failed to insert event: %s", sqlite3_errmsg(g_db));
|
||||||
|
log_error(error_msg);
|
||||||
|
free(tags_json);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(tags_json);
|
||||||
|
log_success("Event stored in database");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Updated retrieve_event Function
|
||||||
|
|
||||||
|
```c
|
||||||
|
// Replace existing retrieve_event function
|
||||||
|
cJSON* retrieve_event(const char* event_id) {
|
||||||
|
if (!g_db || !event_id) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* sql =
|
||||||
|
"SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE id = ?";
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_bind_text(stmt, 1, event_id, -1, SQLITE_STATIC);
|
||||||
|
|
||||||
|
cJSON* event = NULL;
|
||||||
|
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
event = cJSON_CreateObject();
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||||
|
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
|
||||||
|
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
|
||||||
|
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
|
||||||
|
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
|
||||||
|
|
||||||
|
// Parse tags JSON
|
||||||
|
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
|
||||||
|
if (tags_json) {
|
||||||
|
cJSON* tags = cJSON_Parse(tags_json);
|
||||||
|
if (tags) {
|
||||||
|
cJSON_AddItemToObject(event, "tags", tags);
|
||||||
|
} else {
|
||||||
|
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
return event;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration Considerations
|
||||||
|
|
||||||
|
### Handling Existing Data
|
||||||
|
|
||||||
|
If there's existing data in the current schema:
|
||||||
|
|
||||||
|
1. **Export existing events and tags**
|
||||||
|
2. **Transform tag data to JSON format**
|
||||||
|
3. **Classify events by kind**
|
||||||
|
4. **Import into new schema**
|
||||||
|
|
||||||
|
### Backward Compatibility
|
||||||
|
|
||||||
|
- API remains the same - events still have the same JSON structure
|
||||||
|
- Internal storage changes but external interface is unchanged
|
||||||
|
- Tests should pass with minimal modifications
|
||||||
|
|
||||||
|
## Performance Optimizations
|
||||||
|
|
||||||
|
### Essential Indexes
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Core performance indexes
|
||||||
|
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||||
|
CREATE INDEX idx_events_kind ON events(kind);
|
||||||
|
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||||
|
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||||
|
|
||||||
|
-- Composite indexes for common query patterns
|
||||||
|
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||||
|
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||||
|
|
||||||
|
-- JSON tag indexes for common tag patterns
|
||||||
|
CREATE INDEX idx_events_e_tags ON events(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'e';
|
||||||
|
|
||||||
|
CREATE INDEX idx_events_p_tags ON events(
|
||||||
|
json_extract(tags, '$[*][1]')
|
||||||
|
) WHERE json_extract(tags, '$[*][0]') = 'p';
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
1. **Switch to code mode** to implement the schema changes
|
||||||
|
2. **Update db/schema.sql** with new table definition
|
||||||
|
3. **Modify src/main.c** with new functions
|
||||||
|
4. **Update db/init.sh** for single table validation
|
||||||
|
5. **Test with existing test suite**
|
||||||
|
|
||||||
|
This approach will provide:
|
||||||
|
- ✅ Simplified schema management
|
||||||
|
- ✅ Protocol compliance preparation
|
||||||
|
- ✅ JSON tag query capabilities
|
||||||
|
- ✅ Performance optimization opportunities
|
||||||
|
- ✅ Easy REQ subscription handling
|
||||||
|
|
||||||
|
Ready to proceed with implementation?
|
||||||
331
docs/subscription_query_analysis.md
Normal file
331
docs/subscription_query_analysis.md
Normal file
@@ -0,0 +1,331 @@
|
|||||||
|
# Subscription Query Complexity Analysis
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document analyzes how Nostr REQ subscription filters would be implemented across different schema designs, focusing on query complexity, performance implications, and implementation burden.
|
||||||
|
|
||||||
|
## Nostr REQ Filter Specification Recap
|
||||||
|
|
||||||
|
Clients send REQ messages with filters containing:
|
||||||
|
- **`ids`**: List of specific event IDs
|
||||||
|
- **`authors`**: List of pubkeys
|
||||||
|
- **`kinds`**: List of event kinds
|
||||||
|
- **`#<letter>`**: Tag filters (e.g., `#e` for event refs, `#p` for pubkey mentions)
|
||||||
|
- **`since`/`until`**: Time range filters
|
||||||
|
- **`limit`**: Maximum events to return
|
||||||
|
|
||||||
|
### Key Filter Behaviors:
|
||||||
|
- **Multiple filters = OR logic**: Match any filter
|
||||||
|
- **Within filter = AND logic**: Match all specified conditions
|
||||||
|
- **Lists = IN logic**: Match any value in the list
|
||||||
|
- **Tag filters**: Must have at least one matching tag
|
||||||
|
|
||||||
|
## Schema Comparison for REQ Handling
|
||||||
|
|
||||||
|
### Current Simple Schema (Single Table)
|
||||||
|
```sql
|
||||||
|
CREATE TABLE event (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE tag (
|
||||||
|
id TEXT NOT NULL, -- event ID
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
value TEXT NOT NULL,
|
||||||
|
parameters TEXT
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sample REQ Query Implementation:
|
||||||
|
```sql
|
||||||
|
-- Filter: {"authors": ["pubkey1", "pubkey2"], "kinds": [1, 6], "#p": ["target_pubkey"]}
|
||||||
|
SELECT DISTINCT e.*
|
||||||
|
FROM event e
|
||||||
|
WHERE e.pubkey IN ('pubkey1', 'pubkey2')
|
||||||
|
AND e.kind IN (1, 6)
|
||||||
|
AND EXISTS (
|
||||||
|
SELECT 1 FROM tag t
|
||||||
|
WHERE t.id = e.id AND t.name = 'p' AND t.value = 'target_pubkey'
|
||||||
|
)
|
||||||
|
ORDER BY e.created_at DESC, e.id ASC
|
||||||
|
LIMIT ?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multi-Table Schema Challenge
|
||||||
|
|
||||||
|
With separate tables (`events_regular`, `events_replaceable`, `events_ephemeral`, `events_addressable`), a REQ filter could potentially match events across ALL tables.
|
||||||
|
|
||||||
|
#### Problem Example:
|
||||||
|
Filter: `{"kinds": [1, 0, 20001, 30023]}`
|
||||||
|
- Kind 1 → `events_regular`
|
||||||
|
- Kind 0 → `events_replaceable`
|
||||||
|
- Kind 20001 → `events_ephemeral`
|
||||||
|
- Kind 30023 → `events_addressable`
|
||||||
|
|
||||||
|
This requires **4 separate queries + UNION**, significantly complicating the implementation.
|
||||||
|
|
||||||
|
## Multi-Table Query Complexity
|
||||||
|
|
||||||
|
### Scenario 1: Cross-Table Kind Filter
|
||||||
|
```sql
|
||||||
|
-- Filter: {"kinds": [1, 0, 30023]}
|
||||||
|
-- Requires querying 3 different tables
|
||||||
|
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_regular
|
||||||
|
WHERE kind = 1
|
||||||
|
UNION ALL
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_replaceable
|
||||||
|
WHERE kind = 0
|
||||||
|
UNION ALL
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_addressable
|
||||||
|
WHERE kind = 30023
|
||||||
|
ORDER BY created_at DESC, id ASC
|
||||||
|
LIMIT ?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Scenario 2: Cross-Table Author Filter
|
||||||
|
```sql
|
||||||
|
-- Filter: {"authors": ["pubkey1"]}
|
||||||
|
-- Must check ALL tables for this author
|
||||||
|
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_regular
|
||||||
|
WHERE pubkey = 'pubkey1'
|
||||||
|
UNION ALL
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_replaceable
|
||||||
|
WHERE pubkey = 'pubkey1'
|
||||||
|
UNION ALL
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_ephemeral
|
||||||
|
WHERE pubkey = 'pubkey1'
|
||||||
|
UNION ALL
|
||||||
|
SELECT id, pubkey, created_at, kind, content, sig FROM events_addressable
|
||||||
|
WHERE pubkey = 'pubkey1'
|
||||||
|
ORDER BY created_at DESC, id ASC
|
||||||
|
LIMIT ?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Scenario 3: Complex Multi-Condition Filter
|
||||||
|
```sql
|
||||||
|
-- Filter: {"authors": ["pubkey1"], "kinds": [1, 0], "#p": ["target"], "since": 1234567890}
|
||||||
|
-- Extremely complex with multiple UNIONs and tag JOINs
|
||||||
|
|
||||||
|
WITH regular_results AS (
|
||||||
|
SELECT DISTINCT r.*
|
||||||
|
FROM events_regular r
|
||||||
|
JOIN tags_regular tr ON r.id = tr.event_id
|
||||||
|
WHERE r.pubkey = 'pubkey1'
|
||||||
|
AND r.kind = 1
|
||||||
|
AND r.created_at >= 1234567890
|
||||||
|
AND tr.name = 'p' AND tr.value = 'target'
|
||||||
|
),
|
||||||
|
replaceable_results AS (
|
||||||
|
SELECT DISTINCT rp.*
|
||||||
|
FROM events_replaceable rp
|
||||||
|
JOIN tags_replaceable trp ON (rp.pubkey, rp.kind) = (trp.event_pubkey, trp.event_kind)
|
||||||
|
WHERE rp.pubkey = 'pubkey1'
|
||||||
|
AND rp.kind = 0
|
||||||
|
AND rp.created_at >= 1234567890
|
||||||
|
AND trp.name = 'p' AND trp.value = 'target'
|
||||||
|
)
|
||||||
|
SELECT * FROM regular_results
|
||||||
|
UNION ALL
|
||||||
|
SELECT * FROM replaceable_results
|
||||||
|
ORDER BY created_at DESC, id ASC
|
||||||
|
LIMIT ?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Burden Analysis
|
||||||
|
|
||||||
|
### Single Table Approach
|
||||||
|
```c
|
||||||
|
// Simple - one query builder function
|
||||||
|
char* build_filter_query(cJSON* filters) {
|
||||||
|
// Build single SELECT with WHERE conditions
|
||||||
|
// Single ORDER BY and LIMIT
|
||||||
|
// One execution path
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multi-Table Approach
|
||||||
|
```c
|
||||||
|
// Complex - requires routing and union logic
|
||||||
|
char* build_multi_table_query(cJSON* filters) {
|
||||||
|
// 1. Analyze kinds to determine which tables to query
|
||||||
|
// 2. Split filters per table type
|
||||||
|
// 3. Build separate queries for each table
|
||||||
|
// 4. Union results with complex ORDER BY
|
||||||
|
// 5. Handle LIMIT across UNION (tricky!)
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
bool needs_regular;
|
||||||
|
bool needs_replaceable;
|
||||||
|
bool needs_ephemeral;
|
||||||
|
bool needs_addressable;
|
||||||
|
cJSON* regular_filter;
|
||||||
|
cJSON* replaceable_filter;
|
||||||
|
cJSON* ephemeral_filter;
|
||||||
|
cJSON* addressable_filter;
|
||||||
|
} filter_routing_t;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query Routing Complexity
|
||||||
|
|
||||||
|
For each REQ filter, we must:
|
||||||
|
|
||||||
|
1. **Analyze kinds** → Determine which tables to query
|
||||||
|
2. **Split filters** → Create per-table filter conditions
|
||||||
|
3. **Handle tag filters** → Different tag table references per event type
|
||||||
|
4. **Union results** → Merge with proper ordering
|
||||||
|
5. **Apply LIMIT** → Complex with UNION queries
|
||||||
|
|
||||||
|
## Performance Implications
|
||||||
|
|
||||||
|
### Single Table Advantages:
|
||||||
|
- ✅ **Single query execution**
|
||||||
|
- ✅ **One index strategy**
|
||||||
|
- ✅ **Simple LIMIT handling**
|
||||||
|
- ✅ **Unified ORDER BY**
|
||||||
|
- ✅ **No UNION overhead**
|
||||||
|
|
||||||
|
### Multi-Table Disadvantages:
|
||||||
|
- ❌ **Multiple query executions**
|
||||||
|
- ❌ **UNION sorting overhead**
|
||||||
|
- ❌ **Complex LIMIT application**
|
||||||
|
- ❌ **Index fragmentation across tables**
|
||||||
|
- ❌ **Result set merging complexity**
|
||||||
|
|
||||||
|
## Specific REQ Filter Challenges
|
||||||
|
|
||||||
|
### 1. LIMIT Handling with UNION
|
||||||
|
```sql
|
||||||
|
-- WRONG: Limit applies to each subquery
|
||||||
|
(SELECT * FROM events_regular WHERE ... LIMIT 100)
|
||||||
|
UNION ALL
|
||||||
|
(SELECT * FROM events_replaceable WHERE ... LIMIT 100)
|
||||||
|
-- Could return 200 events!
|
||||||
|
|
||||||
|
-- CORRECT: Limit applies to final result
|
||||||
|
SELECT * FROM (
|
||||||
|
SELECT * FROM events_regular WHERE ...
|
||||||
|
UNION ALL
|
||||||
|
SELECT * FROM events_replaceable WHERE ...
|
||||||
|
ORDER BY created_at DESC, id ASC
|
||||||
|
) LIMIT 100;
|
||||||
|
-- But this sorts ALL results before limiting!
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Tag Filter Complexity
|
||||||
|
Each event type needs different tag table joins:
|
||||||
|
- `events_regular` → `tags_regular`
|
||||||
|
- `events_replaceable` → `tags_replaceable` (with composite key)
|
||||||
|
- `events_addressable` → `tags_addressable` (with composite key)
|
||||||
|
- `events_ephemeral` → `tags_ephemeral`
|
||||||
|
|
||||||
|
### 3. Subscription State Management
|
||||||
|
With multiple tables, subscription state becomes complex:
|
||||||
|
- Which tables does this subscription monitor?
|
||||||
|
- How to efficiently check new events across tables?
|
||||||
|
- Different trigger/notification patterns per table
|
||||||
|
|
||||||
|
## Alternative: Unified Event View
|
||||||
|
|
||||||
|
### Hybrid Approach: Views Over Multi-Tables
|
||||||
|
```sql
|
||||||
|
-- Create unified view for queries
|
||||||
|
CREATE VIEW all_events AS
|
||||||
|
SELECT
|
||||||
|
'regular' as event_type,
|
||||||
|
id, pubkey, created_at, kind, content, sig
|
||||||
|
FROM events_regular
|
||||||
|
UNION ALL
|
||||||
|
SELECT
|
||||||
|
'replaceable' as event_type,
|
||||||
|
id, pubkey, created_at, kind, content, sig
|
||||||
|
FROM events_replaceable
|
||||||
|
UNION ALL
|
||||||
|
SELECT
|
||||||
|
'ephemeral' as event_type,
|
||||||
|
id, pubkey, created_at, kind, content, sig
|
||||||
|
FROM events_ephemeral
|
||||||
|
UNION ALL
|
||||||
|
SELECT
|
||||||
|
'addressable' as event_type,
|
||||||
|
id, pubkey, created_at, kind, content, sig
|
||||||
|
FROM events_addressable;
|
||||||
|
|
||||||
|
-- Unified tag view
|
||||||
|
CREATE VIEW all_tags AS
|
||||||
|
SELECT event_id, name, value, parameters FROM tags_regular
|
||||||
|
UNION ALL
|
||||||
|
SELECT CONCAT(event_pubkey, ':', event_kind), name, value, parameters FROM tags_replaceable
|
||||||
|
UNION ALL
|
||||||
|
SELECT event_id, name, value, parameters FROM tags_ephemeral
|
||||||
|
UNION ALL
|
||||||
|
SELECT CONCAT(event_pubkey, ':', event_kind, ':', d_tag), name, value, parameters FROM tags_addressable;
|
||||||
|
```
|
||||||
|
|
||||||
|
### REQ Query Against Views:
|
||||||
|
```sql
|
||||||
|
-- Much simpler - back to single-table complexity
|
||||||
|
SELECT DISTINCT e.*
|
||||||
|
FROM all_events e
|
||||||
|
JOIN all_tags t ON e.id = t.event_id
|
||||||
|
WHERE e.pubkey IN (?)
|
||||||
|
AND e.kind IN (?)
|
||||||
|
AND t.name = 'p' AND t.value = ?
|
||||||
|
ORDER BY e.created_at DESC, e.id ASC
|
||||||
|
LIMIT ?;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Recommendation
|
||||||
|
|
||||||
|
**The multi-table approach creates significant subscription query complexity that may outweigh its benefits.**
|
||||||
|
|
||||||
|
### Key Issues:
|
||||||
|
1. **REQ filters don't map to event types** - clients filter by kind, author, tags, not storage semantics
|
||||||
|
2. **UNION query complexity** - much harder to optimize and implement
|
||||||
|
3. **Subscription management burden** - must monitor multiple tables
|
||||||
|
4. **Performance uncertainty** - UNION queries may be slower than single table
|
||||||
|
|
||||||
|
### Alternative Recommendation:
|
||||||
|
|
||||||
|
**Modified Single Table with Event Type Column:**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE events (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
pubkey TEXT NOT NULL,
|
||||||
|
created_at INTEGER NOT NULL,
|
||||||
|
kind INTEGER NOT NULL,
|
||||||
|
event_type TEXT NOT NULL, -- 'regular', 'replaceable', 'ephemeral', 'addressable'
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
sig TEXT NOT NULL,
|
||||||
|
tags JSON,
|
||||||
|
|
||||||
|
-- Replaceable event fields
|
||||||
|
replaced_at INTEGER,
|
||||||
|
|
||||||
|
-- Addressable event fields
|
||||||
|
d_tag TEXT,
|
||||||
|
|
||||||
|
-- Unique constraints per event type
|
||||||
|
CONSTRAINT unique_replaceable
|
||||||
|
UNIQUE (pubkey, kind) WHERE event_type = 'replaceable',
|
||||||
|
CONSTRAINT unique_addressable
|
||||||
|
UNIQUE (pubkey, kind, d_tag) WHERE event_type = 'addressable'
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Benefits:
|
||||||
|
- ✅ **Simple REQ queries** - single table, familiar patterns
|
||||||
|
- ✅ **Type enforcement** - partial unique constraints handle replacement logic
|
||||||
|
- ✅ **Performance** - unified indexes, no UNIONs
|
||||||
|
- ✅ **Implementation simplicity** - minimal changes from current code
|
||||||
|
- ✅ **Future flexibility** - can split tables later if needed
|
||||||
|
|
||||||
|
This approach gets the best of both worlds: protocol compliance through constraints, but query simplicity through unified storage.
|
||||||
94
make_and_restart_relay.sh
Executable file
94
make_and_restart_relay.sh
Executable file
@@ -0,0 +1,94 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# C-Relay Build and Restart Script
|
||||||
|
# Builds the project first, then stops any running relay and starts a new one in the background
|
||||||
|
|
||||||
|
echo "=== C Nostr Relay Build and Restart Script ==="
|
||||||
|
|
||||||
|
# Build the project first
|
||||||
|
echo "Building project..."
|
||||||
|
make clean all
|
||||||
|
|
||||||
|
# Check if build was successful
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "ERROR: Build failed. Cannot restart relay."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if relay binary exists after build
|
||||||
|
if [ ! -f "./src/main" ]; then
|
||||||
|
echo "ERROR: Relay binary not found after build. Build may have failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Build successful. Proceeding with relay restart..."
|
||||||
|
|
||||||
|
# Kill existing relay if running
|
||||||
|
echo "Stopping any existing relay servers..."
|
||||||
|
pkill -f "./src/main" 2>/dev/null
|
||||||
|
sleep 2 # Give time for shutdown
|
||||||
|
|
||||||
|
# Check if port is still bound
|
||||||
|
if lsof -i :8888 >/dev/null 2>&1; then
|
||||||
|
echo "Port 8888 still in use, force killing..."
|
||||||
|
fuser -k 8888/tcp 2>/dev/null || echo "No process on port 8888"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get any remaining processes
|
||||||
|
REMAINING_PIDS=$(pgrep -f "./src/main" || echo "")
|
||||||
|
if [ -n "$REMAINING_PIDS" ]; then
|
||||||
|
echo "Force killing remaining processes: $REMAINING_PIDS"
|
||||||
|
kill -9 $REMAINING_PIDS 2>/dev/null
|
||||||
|
sleep 1
|
||||||
|
else
|
||||||
|
echo "No existing relay found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up PID file
|
||||||
|
rm -f relay.pid
|
||||||
|
|
||||||
|
# Initialize database if needed
|
||||||
|
if [ ! -f "./db/c_nostr_relay.db" ]; then
|
||||||
|
echo "Initializing database..."
|
||||||
|
./db/init.sh --force >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start relay in background with output redirection
|
||||||
|
echo "Starting relay server..."
|
||||||
|
echo "Debug: Current processes: $(ps aux | grep './src/main' | grep -v grep || echo 'None')"
|
||||||
|
|
||||||
|
# Start relay in background and capture its PID
|
||||||
|
./src/main > relay.log 2>&1 &
|
||||||
|
RELAY_PID=$!
|
||||||
|
|
||||||
|
echo "Started with PID: $RELAY_PID"
|
||||||
|
|
||||||
|
# Check if server is still running after short delay
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
# Check if process is still alive
|
||||||
|
if ps -p "$RELAY_PID" >/dev/null 2>&1; then
|
||||||
|
echo "Relay started successfully!"
|
||||||
|
echo "PID: $RELAY_PID"
|
||||||
|
echo "WebSocket endpoint: ws://127.0.0.1:8888"
|
||||||
|
echo "Log file: relay.log"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Save PID for debugging
|
||||||
|
echo $RELAY_PID > relay.pid
|
||||||
|
|
||||||
|
echo "=== Relay server running in background ==="
|
||||||
|
echo "To kill relay: pkill -f './src/main'"
|
||||||
|
echo "To check status: ps aux | grep src/main"
|
||||||
|
echo "To view logs: tail -f relay.log"
|
||||||
|
echo "Ready for Nostr client connections!"
|
||||||
|
else
|
||||||
|
echo "ERROR: Relay failed to start"
|
||||||
|
echo "Debug: Check relay.log for error details:"
|
||||||
|
echo "--- Last 10 lines of relay.log ---"
|
||||||
|
tail -n 10 relay.log 2>/dev/null || echo "No log file found"
|
||||||
|
echo "--- End log ---"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
1
nips
Submodule
1
nips
Submodule
Submodule nips added at 8c45ff5d96
1
nostr_core_lib
Submodule
1
nostr_core_lib
Submodule
Submodule nostr_core_lib added at 33129d82fd
240
relay.log
Normal file
240
relay.log
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
[34m[1m=== C Nostr Relay Server ===[0m
|
||||||
|
[32m[SUCCESS][0m Database connection established
|
||||||
|
[34m[INFO][0m Starting relay server...
|
||||||
|
[34m[INFO][0m Starting libwebsockets-based Nostr relay server...
|
||||||
|
[32m[SUCCESS][0m WebSocket relay started on ws://127.0.0.1:8888
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 5 rows
|
||||||
|
[34m[INFO][0m Total events sent: 5
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 17 rows
|
||||||
|
[34m[INFO][0m Total events sent: 17
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 7 rows
|
||||||
|
[34m[INFO][0m Total events sent: 7
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 1 rows
|
||||||
|
[34m[INFO][0m Total events sent: 1
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND pubkey IN ('aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4') ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 17 rows
|
||||||
|
[34m[INFO][0m Total events sent: 17
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND created_at >= 1756983802 ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 6 rows
|
||||||
|
[34m[INFO][0m Total events sent: 6
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 17 rows
|
||||||
|
[34m[INFO][0m Total events sent: 17
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0,1) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 8 rows
|
||||||
|
[34m[INFO][0m Total events sent: 8
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 1
|
||||||
|
[34m[INFO][0m Query returned 1 rows
|
||||||
|
[34m[INFO][0m Total events sent: 1
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling EVENT message
|
||||||
|
[32m[SUCCESS][0m Event stored in database
|
||||||
|
[32m[SUCCESS][0m Event stored successfully
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 22 rows
|
||||||
|
[34m[INFO][0m Total events sent: 22
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 9 rows
|
||||||
|
[34m[INFO][0m Total events sent: 9
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 1 rows
|
||||||
|
[34m[INFO][0m Total events sent: 1
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND pubkey IN ('aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4') ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 22 rows
|
||||||
|
[34m[INFO][0m Total events sent: 22
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND created_at >= 1756983945 ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 9 rows
|
||||||
|
[34m[INFO][0m Total events sent: 9
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 22 rows
|
||||||
|
[34m[INFO][0m Total events sent: 22
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0,1) ORDER BY created_at DESC LIMIT 500
|
||||||
|
[34m[INFO][0m Query returned 10 rows
|
||||||
|
[34m[INFO][0m Total events sent: 10
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
|
[34m[INFO][0m WebSocket connection established
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Handling REQ message
|
||||||
|
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 1
|
||||||
|
[34m[INFO][0m Query returned 1 rows
|
||||||
|
[34m[INFO][0m Total events sent: 1
|
||||||
|
[34m[INFO][0m Received WebSocket message
|
||||||
|
[34m[INFO][0m Subscription closed
|
||||||
|
[34m[INFO][0m WebSocket connection closed
|
||||||
726
src/main.c
Normal file
726
src/main.c
Normal file
@@ -0,0 +1,726 @@
|
|||||||
|
#define _GNU_SOURCE
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <time.h>
|
||||||
|
#include <pthread.h>
|
||||||
|
#include <sqlite3.h>
|
||||||
|
#include <libwebsockets.h>
|
||||||
|
|
||||||
|
// Include nostr_core_lib for Nostr functionality
|
||||||
|
#include "../nostr_core_lib/cjson/cJSON.h"
|
||||||
|
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
#define DEFAULT_PORT 8888
|
||||||
|
#define DEFAULT_HOST "127.0.0.1"
|
||||||
|
#define DATABASE_PATH "db/c_nostr_relay.db"
|
||||||
|
#define MAX_CLIENTS 100
|
||||||
|
|
||||||
|
// Global state
|
||||||
|
static sqlite3* g_db = NULL;
|
||||||
|
static int g_server_running = 1;
|
||||||
|
|
||||||
|
// Color constants for logging
|
||||||
|
#define RED "\033[31m"
|
||||||
|
#define GREEN "\033[32m"
|
||||||
|
#define YELLOW "\033[33m"
|
||||||
|
#define BLUE "\033[34m"
|
||||||
|
#define BOLD "\033[1m"
|
||||||
|
#define RESET "\033[0m"
|
||||||
|
|
||||||
|
// Logging functions
|
||||||
|
void log_info(const char* message) {
|
||||||
|
printf(BLUE "[INFO]" RESET " %s\n", message);
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
void log_success(const char* message) {
|
||||||
|
printf(GREEN "[SUCCESS]" RESET " %s\n", message);
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
void log_error(const char* message) {
|
||||||
|
printf(RED "[ERROR]" RESET " %s\n", message);
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
void log_warning(const char* message) {
|
||||||
|
printf(YELLOW "[WARNING]" RESET " %s\n", message);
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal handler for graceful shutdown
|
||||||
|
void signal_handler(int sig) {
|
||||||
|
if (sig == SIGINT || sig == SIGTERM) {
|
||||||
|
log_info("Received shutdown signal");
|
||||||
|
g_server_running = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize database connection
|
||||||
|
int init_database() {
|
||||||
|
int rc = sqlite3_open(DATABASE_PATH, &g_db);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
log_error("Cannot open database");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
log_success("Database connection established");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close database connection
|
||||||
|
void close_database() {
|
||||||
|
if (g_db) {
|
||||||
|
sqlite3_close(g_db);
|
||||||
|
g_db = NULL;
|
||||||
|
log_info("Database connection closed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event type classification
|
||||||
|
typedef enum {
|
||||||
|
EVENT_TYPE_REGULAR,
|
||||||
|
EVENT_TYPE_REPLACEABLE,
|
||||||
|
EVENT_TYPE_EPHEMERAL,
|
||||||
|
EVENT_TYPE_ADDRESSABLE,
|
||||||
|
EVENT_TYPE_UNKNOWN
|
||||||
|
} event_type_t;
|
||||||
|
|
||||||
|
event_type_t classify_event_kind(int kind) {
|
||||||
|
if ((kind >= 1000 && kind < 10000) ||
|
||||||
|
(kind >= 4 && kind < 45) ||
|
||||||
|
kind == 1 || kind == 2) {
|
||||||
|
return EVENT_TYPE_REGULAR;
|
||||||
|
}
|
||||||
|
if ((kind >= 10000 && kind < 20000) ||
|
||||||
|
kind == 0 || kind == 3) {
|
||||||
|
return EVENT_TYPE_REPLACEABLE;
|
||||||
|
}
|
||||||
|
if (kind >= 20000 && kind < 30000) {
|
||||||
|
return EVENT_TYPE_EPHEMERAL;
|
||||||
|
}
|
||||||
|
if (kind >= 30000 && kind < 40000) {
|
||||||
|
return EVENT_TYPE_ADDRESSABLE;
|
||||||
|
}
|
||||||
|
return EVENT_TYPE_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* event_type_to_string(event_type_t type) {
|
||||||
|
switch (type) {
|
||||||
|
case EVENT_TYPE_REGULAR: return "regular";
|
||||||
|
case EVENT_TYPE_REPLACEABLE: return "replaceable";
|
||||||
|
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
|
||||||
|
case EVENT_TYPE_ADDRESSABLE: return "addressable";
|
||||||
|
default: return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store event in database
|
||||||
|
int store_event(cJSON* event) {
|
||||||
|
if (!g_db || !event) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract event fields
|
||||||
|
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||||
|
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
|
||||||
|
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
|
||||||
|
cJSON* kind = cJSON_GetObjectItem(event, "kind");
|
||||||
|
cJSON* content = cJSON_GetObjectItem(event, "content");
|
||||||
|
cJSON* sig = cJSON_GetObjectItem(event, "sig");
|
||||||
|
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||||
|
|
||||||
|
if (!id || !pubkey || !created_at || !kind || !content || !sig) {
|
||||||
|
log_error("Invalid event - missing required fields");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Classify event type
|
||||||
|
event_type_t type = classify_event_kind((int)cJSON_GetNumberValue(kind));
|
||||||
|
|
||||||
|
// Serialize tags to JSON (use empty array if no tags)
|
||||||
|
char* tags_json = NULL;
|
||||||
|
if (tags && cJSON_IsArray(tags)) {
|
||||||
|
tags_json = cJSON_Print(tags);
|
||||||
|
} else {
|
||||||
|
tags_json = strdup("[]");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!tags_json) {
|
||||||
|
log_error("Failed to serialize tags to JSON");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare SQL statement for event insertion
|
||||||
|
const char* sql =
|
||||||
|
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
|
||||||
|
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
log_error("Failed to prepare event insert statement");
|
||||||
|
free(tags_json);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind parameters
|
||||||
|
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
|
||||||
|
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
|
||||||
|
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
|
||||||
|
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
|
||||||
|
|
||||||
|
// Execute statement
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
if (rc != SQLITE_DONE) {
|
||||||
|
if (rc == SQLITE_CONSTRAINT) {
|
||||||
|
log_warning("Event already exists in database");
|
||||||
|
free(tags_json);
|
||||||
|
return 0; // Not an error, just duplicate
|
||||||
|
}
|
||||||
|
char error_msg[256];
|
||||||
|
snprintf(error_msg, sizeof(error_msg), "Failed to insert event: %s", sqlite3_errmsg(g_db));
|
||||||
|
log_error(error_msg);
|
||||||
|
free(tags_json);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(tags_json);
|
||||||
|
log_success("Event stored in database");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve event from database
|
||||||
|
cJSON* retrieve_event(const char* event_id) {
|
||||||
|
if (!g_db || !event_id) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* sql =
|
||||||
|
"SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE id = ?";
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_bind_text(stmt, 1, event_id, -1, SQLITE_STATIC);
|
||||||
|
|
||||||
|
cJSON* event = NULL;
|
||||||
|
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
event = cJSON_CreateObject();
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||||
|
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
|
||||||
|
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
|
||||||
|
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
|
||||||
|
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
|
||||||
|
|
||||||
|
// Parse tags JSON
|
||||||
|
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
|
||||||
|
if (tags_json) {
|
||||||
|
cJSON* tags = cJSON_Parse(tags_json);
|
||||||
|
if (tags) {
|
||||||
|
cJSON_AddItemToObject(event, "tags", tags);
|
||||||
|
} else {
|
||||||
|
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
return event;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle REQ message (subscription) - send events matching filters
|
||||||
|
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi) {
|
||||||
|
log_info("Handling REQ message");
|
||||||
|
|
||||||
|
if (!cJSON_IsArray(filters)) {
|
||||||
|
log_error("REQ filters is not an array");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int events_sent = 0;
|
||||||
|
|
||||||
|
// Process each filter in the array
|
||||||
|
for (int i = 0; i < cJSON_GetArraySize(filters); i++) {
|
||||||
|
cJSON* filter = cJSON_GetArrayItem(filters, i);
|
||||||
|
if (!filter || !cJSON_IsObject(filter)) {
|
||||||
|
log_warning("Invalid filter object");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build SQL query based on filter
|
||||||
|
char sql[1024] = "SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1";
|
||||||
|
char* sql_ptr = sql + strlen(sql);
|
||||||
|
int remaining = sizeof(sql) - strlen(sql);
|
||||||
|
|
||||||
|
// Handle kinds filter
|
||||||
|
cJSON* kinds = cJSON_GetObjectItem(filter, "kinds");
|
||||||
|
if (kinds && cJSON_IsArray(kinds)) {
|
||||||
|
int kind_count = cJSON_GetArraySize(kinds);
|
||||||
|
if (kind_count > 0) {
|
||||||
|
snprintf(sql_ptr, remaining, " AND kind IN (");
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
|
||||||
|
for (int k = 0; k < kind_count; k++) {
|
||||||
|
cJSON* kind = cJSON_GetArrayItem(kinds, k);
|
||||||
|
if (cJSON_IsNumber(kind)) {
|
||||||
|
if (k > 0) {
|
||||||
|
snprintf(sql_ptr, remaining, ",");
|
||||||
|
sql_ptr++;
|
||||||
|
remaining--;
|
||||||
|
}
|
||||||
|
snprintf(sql_ptr, remaining, "%d", (int)cJSON_GetNumberValue(kind));
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
snprintf(sql_ptr, remaining, ")");
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle authors filter
|
||||||
|
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
|
||||||
|
if (authors && cJSON_IsArray(authors)) {
|
||||||
|
int author_count = cJSON_GetArraySize(authors);
|
||||||
|
if (author_count > 0) {
|
||||||
|
snprintf(sql_ptr, remaining, " AND pubkey IN (");
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
|
||||||
|
for (int a = 0; a < author_count; a++) {
|
||||||
|
cJSON* author = cJSON_GetArrayItem(authors, a);
|
||||||
|
if (cJSON_IsString(author)) {
|
||||||
|
if (a > 0) {
|
||||||
|
snprintf(sql_ptr, remaining, ",");
|
||||||
|
sql_ptr++;
|
||||||
|
remaining--;
|
||||||
|
}
|
||||||
|
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(author));
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
snprintf(sql_ptr, remaining, ")");
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle since filter
|
||||||
|
cJSON* since = cJSON_GetObjectItem(filter, "since");
|
||||||
|
if (since && cJSON_IsNumber(since)) {
|
||||||
|
snprintf(sql_ptr, remaining, " AND created_at >= %ld", (long)cJSON_GetNumberValue(since));
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle until filter
|
||||||
|
cJSON* until = cJSON_GetObjectItem(filter, "until");
|
||||||
|
if (until && cJSON_IsNumber(until)) {
|
||||||
|
snprintf(sql_ptr, remaining, " AND created_at <= %ld", (long)cJSON_GetNumberValue(until));
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add ordering and limit
|
||||||
|
snprintf(sql_ptr, remaining, " ORDER BY created_at DESC");
|
||||||
|
sql_ptr += strlen(sql_ptr);
|
||||||
|
remaining = sizeof(sql) - strlen(sql);
|
||||||
|
|
||||||
|
// Handle limit filter
|
||||||
|
cJSON* limit = cJSON_GetObjectItem(filter, "limit");
|
||||||
|
if (limit && cJSON_IsNumber(limit)) {
|
||||||
|
int limit_val = (int)cJSON_GetNumberValue(limit);
|
||||||
|
if (limit_val > 0 && limit_val <= 5000) {
|
||||||
|
snprintf(sql_ptr, remaining, " LIMIT %d", limit_val);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Default limit to prevent excessive queries
|
||||||
|
snprintf(sql_ptr, remaining, " LIMIT 500");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug: Log the SQL query being executed
|
||||||
|
char debug_msg[1280];
|
||||||
|
snprintf(debug_msg, sizeof(debug_msg), "Executing SQL: %s", sql);
|
||||||
|
log_info(debug_msg);
|
||||||
|
|
||||||
|
// Execute query and send events
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
char error_msg[256];
|
||||||
|
snprintf(error_msg, sizeof(error_msg), "Failed to prepare subscription query: %s", sqlite3_errmsg(g_db));
|
||||||
|
log_error(error_msg);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int row_count = 0;
|
||||||
|
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
row_count++;
|
||||||
|
|
||||||
|
// Build event JSON
|
||||||
|
cJSON* event = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||||
|
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
|
||||||
|
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
|
||||||
|
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
|
||||||
|
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
|
||||||
|
|
||||||
|
// Parse tags JSON
|
||||||
|
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
|
||||||
|
cJSON* tags = NULL;
|
||||||
|
if (tags_json) {
|
||||||
|
tags = cJSON_Parse(tags_json);
|
||||||
|
}
|
||||||
|
if (!tags) {
|
||||||
|
tags = cJSON_CreateArray();
|
||||||
|
}
|
||||||
|
cJSON_AddItemToObject(event, "tags", tags);
|
||||||
|
|
||||||
|
// Send EVENT message
|
||||||
|
cJSON* event_msg = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
|
||||||
|
cJSON_AddItemToArray(event_msg, cJSON_CreateString(sub_id));
|
||||||
|
cJSON_AddItemToArray(event_msg, event);
|
||||||
|
|
||||||
|
char* msg_str = cJSON_Print(event_msg);
|
||||||
|
if (msg_str) {
|
||||||
|
size_t msg_len = strlen(msg_str);
|
||||||
|
unsigned char* buf = malloc(LWS_PRE + msg_len);
|
||||||
|
if (buf) {
|
||||||
|
memcpy(buf + LWS_PRE, msg_str, msg_len);
|
||||||
|
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
|
||||||
|
free(buf);
|
||||||
|
}
|
||||||
|
free(msg_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON_Delete(event_msg);
|
||||||
|
events_sent++;
|
||||||
|
}
|
||||||
|
|
||||||
|
char row_debug[128];
|
||||||
|
snprintf(row_debug, sizeof(row_debug), "Query returned %d rows", row_count);
|
||||||
|
log_info(row_debug);
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
}
|
||||||
|
|
||||||
|
char events_debug[128];
|
||||||
|
snprintf(events_debug, sizeof(events_debug), "Total events sent: %d", events_sent);
|
||||||
|
log_info(events_debug);
|
||||||
|
|
||||||
|
return events_sent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle EVENT message (publish)
|
||||||
|
int handle_event_message(cJSON* event) {
|
||||||
|
log_info("Handling EVENT message");
|
||||||
|
|
||||||
|
// Validate event structure (basic check)
|
||||||
|
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||||
|
if (!id || !cJSON_IsString(id)) {
|
||||||
|
log_error("Invalid event - no ID");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store event in database
|
||||||
|
if (store_event(event) == 0) {
|
||||||
|
log_success("Event stored successfully");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Global WebSocket context
|
||||||
|
static struct lws_context *ws_context = NULL;
|
||||||
|
|
||||||
|
// Per-session data structure
|
||||||
|
struct per_session_data {
|
||||||
|
int authenticated;
|
||||||
|
char subscription_id[64];
|
||||||
|
};
|
||||||
|
|
||||||
|
// WebSocket callback function for Nostr relay protocol
|
||||||
|
static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reason,
|
||||||
|
void *user, void *in, size_t len) {
|
||||||
|
struct per_session_data *pss = (struct per_session_data *)user;
|
||||||
|
|
||||||
|
switch (reason) {
|
||||||
|
case LWS_CALLBACK_ESTABLISHED:
|
||||||
|
log_info("WebSocket connection established");
|
||||||
|
memset(pss, 0, sizeof(*pss));
|
||||||
|
break;
|
||||||
|
|
||||||
|
case LWS_CALLBACK_RECEIVE:
|
||||||
|
if (len > 0) {
|
||||||
|
char *message = malloc(len + 1);
|
||||||
|
if (message) {
|
||||||
|
memcpy(message, in, len);
|
||||||
|
message[len] = '\0';
|
||||||
|
|
||||||
|
log_info("Received WebSocket message");
|
||||||
|
|
||||||
|
// Parse JSON message
|
||||||
|
cJSON* json = cJSON_Parse(message);
|
||||||
|
if (json && cJSON_IsArray(json)) {
|
||||||
|
// Get message type
|
||||||
|
cJSON* type = cJSON_GetArrayItem(json, 0);
|
||||||
|
if (type && cJSON_IsString(type)) {
|
||||||
|
const char* msg_type = cJSON_GetStringValue(type);
|
||||||
|
|
||||||
|
if (strcmp(msg_type, "EVENT") == 0) {
|
||||||
|
// Handle EVENT message
|
||||||
|
cJSON* event = cJSON_GetArrayItem(json, 1);
|
||||||
|
if (event && cJSON_IsObject(event)) {
|
||||||
|
int result = handle_event_message(event);
|
||||||
|
|
||||||
|
// Send OK response
|
||||||
|
cJSON* event_id = cJSON_GetObjectItem(event, "id");
|
||||||
|
if (event_id && cJSON_IsString(event_id)) {
|
||||||
|
cJSON* response = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(response, cJSON_CreateString("OK"));
|
||||||
|
cJSON_AddItemToArray(response, cJSON_CreateString(cJSON_GetStringValue(event_id)));
|
||||||
|
cJSON_AddItemToArray(response, cJSON_CreateBool(result == 0));
|
||||||
|
cJSON_AddItemToArray(response, cJSON_CreateString(result == 0 ? "" : "error: failed to store event"));
|
||||||
|
|
||||||
|
char *response_str = cJSON_Print(response);
|
||||||
|
if (response_str) {
|
||||||
|
size_t response_len = strlen(response_str);
|
||||||
|
unsigned char *buf = malloc(LWS_PRE + response_len);
|
||||||
|
if (buf) {
|
||||||
|
memcpy(buf + LWS_PRE, response_str, response_len);
|
||||||
|
lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
|
||||||
|
free(buf);
|
||||||
|
}
|
||||||
|
free(response_str);
|
||||||
|
}
|
||||||
|
cJSON_Delete(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (strcmp(msg_type, "REQ") == 0) {
|
||||||
|
// Handle REQ message
|
||||||
|
cJSON* sub_id = cJSON_GetArrayItem(json, 1);
|
||||||
|
|
||||||
|
if (sub_id && cJSON_IsString(sub_id)) {
|
||||||
|
const char* subscription_id = cJSON_GetStringValue(sub_id);
|
||||||
|
strncpy(pss->subscription_id, subscription_id, sizeof(pss->subscription_id) - 1);
|
||||||
|
|
||||||
|
// Create array of filter objects from position 2 onwards
|
||||||
|
cJSON* filters = cJSON_CreateArray();
|
||||||
|
int json_size = cJSON_GetArraySize(json);
|
||||||
|
for (int i = 2; i < json_size; i++) {
|
||||||
|
cJSON* filter = cJSON_GetArrayItem(json, i);
|
||||||
|
if (filter) {
|
||||||
|
cJSON_AddItemToArray(filters, cJSON_Duplicate(filter, 1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handle_req_message(subscription_id, filters, wsi);
|
||||||
|
|
||||||
|
// Clean up the filters array we created
|
||||||
|
cJSON_Delete(filters);
|
||||||
|
|
||||||
|
// Send EOSE (End of Stored Events)
|
||||||
|
cJSON* eose_response = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(eose_response, cJSON_CreateString("EOSE"));
|
||||||
|
cJSON_AddItemToArray(eose_response, cJSON_CreateString(subscription_id));
|
||||||
|
|
||||||
|
char *eose_str = cJSON_Print(eose_response);
|
||||||
|
if (eose_str) {
|
||||||
|
size_t eose_len = strlen(eose_str);
|
||||||
|
unsigned char *buf = malloc(LWS_PRE + eose_len);
|
||||||
|
if (buf) {
|
||||||
|
memcpy(buf + LWS_PRE, eose_str, eose_len);
|
||||||
|
lws_write(wsi, buf + LWS_PRE, eose_len, LWS_WRITE_TEXT);
|
||||||
|
free(buf);
|
||||||
|
}
|
||||||
|
free(eose_str);
|
||||||
|
}
|
||||||
|
cJSON_Delete(eose_response);
|
||||||
|
}
|
||||||
|
} else if (strcmp(msg_type, "CLOSE") == 0) {
|
||||||
|
// Handle CLOSE message
|
||||||
|
log_info("Subscription closed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (json) cJSON_Delete(json);
|
||||||
|
free(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case LWS_CALLBACK_CLOSED:
|
||||||
|
log_info("WebSocket connection closed");
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// WebSocket protocol definition
|
||||||
|
static struct lws_protocols protocols[] = {
|
||||||
|
{
|
||||||
|
"nostr-relay-protocol",
|
||||||
|
nostr_relay_callback,
|
||||||
|
sizeof(struct per_session_data),
|
||||||
|
4096, // rx buffer size
|
||||||
|
0, NULL, 0
|
||||||
|
},
|
||||||
|
{ NULL, NULL, 0, 0, 0, NULL, 0 } // terminator
|
||||||
|
};
|
||||||
|
|
||||||
|
// Start libwebsockets-based WebSocket Nostr relay server
|
||||||
|
int start_websocket_relay() {
|
||||||
|
struct lws_context_creation_info info;
|
||||||
|
|
||||||
|
log_info("Starting libwebsockets-based Nostr relay server...");
|
||||||
|
|
||||||
|
memset(&info, 0, sizeof(info));
|
||||||
|
info.port = DEFAULT_PORT;
|
||||||
|
info.protocols = protocols;
|
||||||
|
info.gid = -1;
|
||||||
|
info.uid = -1;
|
||||||
|
|
||||||
|
// Minimal libwebsockets configuration
|
||||||
|
info.options = LWS_SERVER_OPTION_VALIDATE_UTF8;
|
||||||
|
|
||||||
|
// Remove interface restrictions - let system choose
|
||||||
|
// info.vhost_name = NULL;
|
||||||
|
// info.iface = NULL;
|
||||||
|
|
||||||
|
// Increase max connections for relay usage
|
||||||
|
info.max_http_header_pool = 16;
|
||||||
|
info.timeout_secs = 10;
|
||||||
|
|
||||||
|
// Max payload size for Nostr events
|
||||||
|
info.max_http_header_data = 4096;
|
||||||
|
|
||||||
|
ws_context = lws_create_context(&info);
|
||||||
|
if (!ws_context) {
|
||||||
|
log_error("Failed to create libwebsockets context");
|
||||||
|
perror("libwebsockets creation error");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
log_success("WebSocket relay started on ws://127.0.0.1:8888");
|
||||||
|
|
||||||
|
// Main event loop with proper signal handling
|
||||||
|
while (g_server_running) {
|
||||||
|
int result = lws_service(ws_context, 1000);
|
||||||
|
|
||||||
|
if (result < 0) {
|
||||||
|
log_error("libwebsockets service error");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log_info("Shutting down WebSocket server...");
|
||||||
|
lws_context_destroy(ws_context);
|
||||||
|
ws_context = NULL;
|
||||||
|
|
||||||
|
log_success("WebSocket relay shut down cleanly");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print usage information
|
||||||
|
void print_usage(const char* program_name) {
|
||||||
|
printf("Usage: %s [OPTIONS]\n", program_name);
|
||||||
|
printf("\n");
|
||||||
|
printf("C Nostr Relay Server\n");
|
||||||
|
printf("\n");
|
||||||
|
printf("Options:\n");
|
||||||
|
printf(" -p, --port PORT Listen port (default: %d)\n", DEFAULT_PORT);
|
||||||
|
printf(" -h, --help Show this help message\n");
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
int port = DEFAULT_PORT;
|
||||||
|
|
||||||
|
// Parse command line arguments
|
||||||
|
for (int i = 1; i < argc; i++) {
|
||||||
|
if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) {
|
||||||
|
print_usage(argv[0]);
|
||||||
|
return 0;
|
||||||
|
} else if (strcmp(argv[i], "-p") == 0 || strcmp(argv[i], "--port") == 0) {
|
||||||
|
if (i + 1 < argc) {
|
||||||
|
port = atoi(argv[++i]);
|
||||||
|
if (port <= 0 || port > 65535) {
|
||||||
|
log_error("Invalid port number");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log_error("Port argument requires a value");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log_error("Unknown argument");
|
||||||
|
print_usage(argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up signal handlers
|
||||||
|
signal(SIGINT, signal_handler);
|
||||||
|
signal(SIGTERM, signal_handler);
|
||||||
|
|
||||||
|
printf(BLUE BOLD "=== C Nostr Relay Server ===" RESET "\n");
|
||||||
|
|
||||||
|
// Initialize database
|
||||||
|
if (init_database() != 0) {
|
||||||
|
log_error("Failed to initialize database");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize nostr library
|
||||||
|
if (nostr_init() != 0) {
|
||||||
|
log_error("Failed to initialize nostr library");
|
||||||
|
close_database();
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
log_info("Starting relay server...");
|
||||||
|
|
||||||
|
// Start WebSocket Nostr relay server
|
||||||
|
int result = start_websocket_relay();
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
nostr_cleanup();
|
||||||
|
close_database();
|
||||||
|
|
||||||
|
if (result == 0) {
|
||||||
|
log_success("Server shutdown complete");
|
||||||
|
} else {
|
||||||
|
log_error("Server shutdown with errors");
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
281
tests/1_nip_test.sh
Executable file
281
tests/1_nip_test.sh
Executable file
@@ -0,0 +1,281 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Comprehensive C-Relay Test - Test event types and subscriptions
|
||||||
|
# Uses nak to generate and publish various event types, then tests subscriptions
|
||||||
|
|
||||||
|
set -e # Exit on any error
|
||||||
|
|
||||||
|
# Color constants
|
||||||
|
RED='\033[31m'
|
||||||
|
GREEN='\033[32m'
|
||||||
|
YELLOW='\033[33m'
|
||||||
|
BLUE='\033[34m'
|
||||||
|
BOLD='\033[1m'
|
||||||
|
RESET='\033[0m'
|
||||||
|
|
||||||
|
# Test configuration
|
||||||
|
RELAY_URL="ws://127.0.0.1:8888"
|
||||||
|
TEST_PRIVATE_KEY="nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99"
|
||||||
|
|
||||||
|
# Print functions
|
||||||
|
print_header() {
|
||||||
|
echo -e "${BLUE}${BOLD}=== $1 ===${RESET}"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_step() {
|
||||||
|
echo -e "${YELLOW}[STEP]${RESET} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}✓${RESET} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}✗${RESET} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Global arrays to store event IDs for subscription tests
|
||||||
|
declare -a REGULAR_EVENT_IDS=()
|
||||||
|
declare -a REPLACEABLE_EVENT_IDS=()
|
||||||
|
declare -a EPHEMERAL_EVENT_IDS=()
|
||||||
|
declare -a ADDRESSABLE_EVENT_IDS=()
|
||||||
|
|
||||||
|
# Helper function to publish event and extract ID
|
||||||
|
publish_event() {
|
||||||
|
local event_json="$1"
|
||||||
|
local event_type="$2"
|
||||||
|
local description="$3"
|
||||||
|
|
||||||
|
# Extract event ID
|
||||||
|
local event_id=$(echo "$event_json" | jq -r '.id' 2>/dev/null)
|
||||||
|
if [[ "$event_id" == "null" || -z "$event_id" ]]; then
|
||||||
|
print_error "Could not extract event ID from $description"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_info "Publishing $description..."
|
||||||
|
|
||||||
|
# Create EVENT message in Nostr format
|
||||||
|
local event_message="[\"EVENT\",$event_json]"
|
||||||
|
|
||||||
|
# Publish to relay
|
||||||
|
local response=""
|
||||||
|
if command -v websocat &> /dev/null; then
|
||||||
|
response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||||
|
else
|
||||||
|
print_error "websocat not found - required for testing"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Check response
|
||||||
|
if [[ "$response" == *"Connection failed"* ]]; then
|
||||||
|
print_error "Failed to connect to relay for $description"
|
||||||
|
return 1
|
||||||
|
elif [[ "$response" == *"true"* ]]; then
|
||||||
|
print_success "$description uploaded (ID: ${event_id:0:16}...)"
|
||||||
|
|
||||||
|
# Store event ID in appropriate array
|
||||||
|
case "$event_type" in
|
||||||
|
"regular") REGULAR_EVENT_IDS+=("$event_id") ;;
|
||||||
|
"replaceable") REPLACEABLE_EVENT_IDS+=("$event_id") ;;
|
||||||
|
"ephemeral") EPHEMERAL_EVENT_IDS+=("$event_id") ;;
|
||||||
|
"addressable") ADDRESSABLE_EVENT_IDS+=("$event_id") ;;
|
||||||
|
esac
|
||||||
|
echo # Add blank line for readability
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
print_warning "$description might have failed: $response"
|
||||||
|
echo # Add blank line for readability
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test subscription with filters
|
||||||
|
test_subscription() {
|
||||||
|
local sub_id="$1"
|
||||||
|
local filter="$2"
|
||||||
|
local description="$3"
|
||||||
|
local expected_count="$4"
|
||||||
|
|
||||||
|
print_step "Testing subscription: $description"
|
||||||
|
|
||||||
|
# Create REQ message
|
||||||
|
local req_message="[\"REQ\",\"$sub_id\",$filter]"
|
||||||
|
|
||||||
|
print_info "Testing filter: $filter"
|
||||||
|
|
||||||
|
# Send subscription and collect events
|
||||||
|
local response=""
|
||||||
|
if command -v websocat &> /dev/null; then
|
||||||
|
response=$(echo -e "$req_message\n[\"CLOSE\",\"$sub_id\"]" | timeout 3s websocat "$RELAY_URL" 2>/dev/null || echo "")
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Count EVENT responses (lines containing ["EVENT","sub_id",...])
|
||||||
|
local event_count=0
|
||||||
|
if [[ -n "$response" ]]; then
|
||||||
|
event_count=$(echo "$response" | grep -c "\"EVENT\"" 2>/dev/null || echo "0")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$expected_count" == "any" ]]; then
|
||||||
|
if [[ $event_count -gt 0 ]]; then
|
||||||
|
print_success "$description - Found $event_count events"
|
||||||
|
else
|
||||||
|
print_warning "$description - No events found"
|
||||||
|
fi
|
||||||
|
elif [[ $event_count -eq $expected_count ]]; then
|
||||||
|
print_success "$description - Found expected $event_count events"
|
||||||
|
else
|
||||||
|
print_warning "$description - Expected $expected_count events, found $event_count"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show a few sample events for verification (first 2)
|
||||||
|
if [[ $event_count -gt 0 && "$description" == "All events" ]]; then
|
||||||
|
print_info "Sample events (first 2):"
|
||||||
|
echo "$response" | grep "\"EVENT\"" | head -2 | while IFS= read -r line; do
|
||||||
|
local event_content=$(echo "$line" | jq -r '.[2].content' 2>/dev/null || echo "N/A")
|
||||||
|
local event_kind=$(echo "$line" | jq -r '.[2].kind' 2>/dev/null || echo "N/A")
|
||||||
|
local event_id=$(echo "$line" | jq -r '.[2].id' 2>/dev/null || echo "N/A")
|
||||||
|
echo " - ID: ${event_id:0:16}... Kind: $event_kind Content: ${event_content:0:30}..."
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo # Add blank line for readability
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main test function
|
||||||
|
run_comprehensive_test() {
|
||||||
|
print_header "C-Relay Comprehensive Test"
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
print_step "Checking dependencies..."
|
||||||
|
if ! command -v nak &> /dev/null; then
|
||||||
|
print_error "nak command not found"
|
||||||
|
print_info "Please install nak: go install github.com/fiatjaf/nak@latest"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if ! command -v websocat &> /dev/null; then
|
||||||
|
print_error "websocat command not found"
|
||||||
|
print_info "Please install websocat for testing"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if ! command -v jq &> /dev/null; then
|
||||||
|
print_error "jq command not found"
|
||||||
|
print_info "Please install jq for JSON processing"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
print_success "All dependencies found"
|
||||||
|
|
||||||
|
print_header "PHASE 1: Publishing Various Event Types"
|
||||||
|
|
||||||
|
# Test 1: Regular Events (kind 1)
|
||||||
|
print_step "Creating regular events (kind 1)..."
|
||||||
|
local regular1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Regular event #1" -k 1 --ts $(($(date +%s) - 100)) -t "type=regular" -t "test=phase1" 2>/dev/null)
|
||||||
|
local regular2=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Regular event #2 with tags" -k 1 --ts $(($(date +%s) - 90)) -e "previous_event_id" -p "test_pubkey" -t "type=regular" -t "test=phase1" 2>/dev/null)
|
||||||
|
|
||||||
|
publish_event "$regular1" "regular" "Regular event #1"
|
||||||
|
publish_event "$regular2" "regular" "Regular event #2"
|
||||||
|
|
||||||
|
# Test 2: Replaceable Events (kind 0 - metadata)
|
||||||
|
print_step "Creating replaceable events (kind 0)..."
|
||||||
|
local replaceable1=$(nak event --sec "$TEST_PRIVATE_KEY" -c '{"name":"Test User","about":"Testing C-Relay"}' -k 0 --ts $(($(date +%s) - 80)) -t "type=replaceable" 2>/dev/null)
|
||||||
|
local replaceable2=$(nak event --sec "$TEST_PRIVATE_KEY" -c '{"name":"Test User Updated","about":"Updated profile"}' -k 0 --ts $(($(date +%s) - 70)) -t "type=replaceable" 2>/dev/null)
|
||||||
|
|
||||||
|
publish_event "$replaceable1" "replaceable" "Replaceable event #1 (metadata)"
|
||||||
|
publish_event "$replaceable2" "replaceable" "Replaceable event #2 (metadata update)"
|
||||||
|
|
||||||
|
# Test 3: Ephemeral Events (kind 20000+)
|
||||||
|
print_step "Creating ephemeral events (kind 20001)..."
|
||||||
|
local ephemeral1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Ephemeral event - should not be stored permanently" -k 20001 --ts $(date +%s) -t "type=ephemeral" 2>/dev/null)
|
||||||
|
|
||||||
|
publish_event "$ephemeral1" "ephemeral" "Ephemeral event"
|
||||||
|
|
||||||
|
# Test 4: Addressable Events (kind 30000+)
|
||||||
|
print_step "Creating addressable events (kind 30001)..."
|
||||||
|
local addressable1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Addressable event with d-tag" -k 30001 --ts $(($(date +%s) - 50)) -t "d=test-article" -t "type=addressable" 2>/dev/null)
|
||||||
|
local addressable2=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Updated addressable event" -k 30001 --ts $(($(date +%s) - 40)) -t "d=test-article" -t "type=addressable" -t "updated=true" 2>/dev/null)
|
||||||
|
|
||||||
|
publish_event "$addressable1" "addressable" "Addressable event #1"
|
||||||
|
publish_event "$addressable2" "addressable" "Addressable event #2 (update)"
|
||||||
|
|
||||||
|
# Brief pause to let events settle
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
print_header "PHASE 2: Testing Subscriptions and Filters"
|
||||||
|
|
||||||
|
# Test subscription filters
|
||||||
|
print_step "Testing various subscription filters..."
|
||||||
|
|
||||||
|
# Test 1: Get all events
|
||||||
|
test_subscription "test_all" '{}' "All events" "any"
|
||||||
|
|
||||||
|
# Test 2: Get events by kind
|
||||||
|
test_subscription "test_kind1" '{"kinds":[1]}' "Kind 1 events only" "2"
|
||||||
|
test_subscription "test_kind0" '{"kinds":[0]}' "Kind 0 events only" "any"
|
||||||
|
|
||||||
|
# Test 3: Get events by author (pubkey)
|
||||||
|
local test_pubkey=$(echo "$regular1" | jq -r '.pubkey' 2>/dev/null)
|
||||||
|
test_subscription "test_author" "{\"authors\":[\"$test_pubkey\"]}" "Events by specific author" "any"
|
||||||
|
|
||||||
|
# Test 4: Get recent events (time-based)
|
||||||
|
local recent_timestamp=$(($(date +%s) - 200))
|
||||||
|
test_subscription "test_recent" "{\"since\":$recent_timestamp}" "Recent events" "any"
|
||||||
|
|
||||||
|
# Test 5: Get events with specific tags
|
||||||
|
test_subscription "test_tag_type" '{"#type":["regular"]}' "Events with type=regular tag" "any"
|
||||||
|
|
||||||
|
# Test 6: Multiple kinds
|
||||||
|
test_subscription "test_multi_kinds" '{"kinds":[0,1]}' "Multiple kinds (0,1)" "any"
|
||||||
|
|
||||||
|
# Test 7: Limit results
|
||||||
|
test_subscription "test_limit" '{"kinds":[1],"limit":1}' "Limited to 1 event" "1"
|
||||||
|
|
||||||
|
print_header "PHASE 3: Database Verification"
|
||||||
|
|
||||||
|
# Check what's actually stored in the database
|
||||||
|
print_step "Verifying database contents..."
|
||||||
|
|
||||||
|
if command -v sqlite3 &> /dev/null; then
|
||||||
|
print_info "Events by type in database:"
|
||||||
|
sqlite3 db/c_nostr_relay.db "SELECT event_type, COUNT(*) as count FROM events GROUP BY event_type;" | while read line; do
|
||||||
|
echo " $line"
|
||||||
|
done
|
||||||
|
|
||||||
|
print_info "Recent events in database:"
|
||||||
|
sqlite3 db/c_nostr_relay.db "SELECT substr(id, 1, 16) || '...' as short_id, event_type, kind, substr(content, 1, 30) || '...' as short_content FROM events ORDER BY created_at DESC LIMIT 5;" | while read line; do
|
||||||
|
echo " $line"
|
||||||
|
done
|
||||||
|
|
||||||
|
print_success "Database verification complete"
|
||||||
|
else
|
||||||
|
print_warning "sqlite3 not available for database verification"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run the comprehensive test
|
||||||
|
print_header "Starting C-Relay Comprehensive Test Suite"
|
||||||
|
echo
|
||||||
|
|
||||||
|
if run_comprehensive_test; then
|
||||||
|
echo
|
||||||
|
print_success "All tests completed successfully!"
|
||||||
|
print_info "The C-Relay hybrid schema implementation is working correctly"
|
||||||
|
echo
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo
|
||||||
|
print_error "Some tests failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
Reference in New Issue
Block a user