Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ed4b96058 | ||
|
|
c0051b22be | ||
|
|
4cc2d2376e | ||
|
|
30dc4bf67d | ||
|
|
a1928cc5d7 | ||
|
|
7bf0757b1f | ||
|
|
11b0a88cdd |
@@ -107,13 +107,13 @@ COPY Makefile /build/Makefile
|
||||
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
|
||||
# Use conditional compilation flags based on DEBUG_BUILD argument
|
||||
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
|
||||
CFLAGS="-g -O0 -DDEBUG"; \
|
||||
STRIP_CMD=""; \
|
||||
echo "Building with DEBUG symbols enabled"; \
|
||||
CFLAGS="-g -O2 -DDEBUG"; \
|
||||
STRIP_CMD="echo 'Keeping debug symbols'"; \
|
||||
echo "Building with DEBUG symbols enabled (optimized with -O2)"; \
|
||||
else \
|
||||
CFLAGS="-O2"; \
|
||||
STRIP_CMD="strip /build/c_relay_static"; \
|
||||
echo "Building optimized production binary"; \
|
||||
echo "Building optimized production binary (symbols stripped)"; \
|
||||
fi && \
|
||||
gcc -static $CFLAGS -Wall -Wextra -std=c99 \
|
||||
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
|
||||
|
||||
174
Real-Time Traffic Monitoring Commands.md
Normal file
174
Real-Time Traffic Monitoring Commands.md
Normal file
@@ -0,0 +1,174 @@
|
||||
# Real-Time Traffic Monitoring Commands (Direct Server Use)
|
||||
|
||||
Copy and paste these commands directly on your server.
|
||||
|
||||
## Quick Status Checks
|
||||
|
||||
### See IPs visiting in the last few minutes:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '{print $1}' | sort | uniq -c | sort -rn | head -20
|
||||
```
|
||||
|
||||
### See what status codes they're getting:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '{print $1, $9}' | grep '216.73.216.38'
|
||||
```
|
||||
|
||||
### Count status codes (200 vs 403):
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '{print $9}' | sort | uniq -c
|
||||
```
|
||||
|
||||
## Real-Time Monitoring
|
||||
|
||||
### Watch live traffic (updates every 2 seconds):
|
||||
```bash
|
||||
watch -n 2 'sudo tail -200 /var/log/nginx/access.log | awk "{print \$1}" | sort | uniq -c | sort -rn | head -15'
|
||||
```
|
||||
|
||||
### See live log entries as they happen:
|
||||
```bash
|
||||
sudo tail -f /var/log/nginx/access.log
|
||||
```
|
||||
|
||||
### Live GoAccess dashboard:
|
||||
```bash
|
||||
sudo tail -f /var/log/nginx/access.log | goaccess -
|
||||
```
|
||||
|
||||
## Active Connections
|
||||
|
||||
### See who's connected RIGHT NOW:
|
||||
```bash
|
||||
sudo netstat -tn | grep ':443' | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -rn
|
||||
```
|
||||
|
||||
### Alternative (using ss command):
|
||||
```bash
|
||||
sudo ss -tn | grep ':443' | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -rn
|
||||
```
|
||||
|
||||
## Detailed Analysis
|
||||
|
||||
### Last 100 requests with timestamps:
|
||||
```bash
|
||||
sudo tail -100 /var/log/nginx/access.log | awk '{print $4, $1}' | sed 's/\[//'
|
||||
```
|
||||
|
||||
### See what blocked IPs are trying to access:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | grep '216.73.216.38' | awk '{print $7}' | head -10
|
||||
```
|
||||
|
||||
### Show all 403 (blocked) requests:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '$9==403 {print $1}' | sort | uniq -c | sort -rn
|
||||
```
|
||||
|
||||
### Show all successful (200) requests:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '$9==200 {print $1}' | sort | uniq -c | sort -rn | head -10
|
||||
```
|
||||
|
||||
## Comprehensive Monitoring Script
|
||||
|
||||
### Create a monitoring script:
|
||||
```bash
|
||||
cat > /tmp/monitor-traffic.sh << 'EOF'
|
||||
#!/bin/bash
|
||||
echo "=== Traffic in last 5 minutes ==="
|
||||
echo "Time: $(date)"
|
||||
echo ""
|
||||
echo "Top IPs:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $1}' | sort | uniq -c | sort -rn | head -10
|
||||
echo ""
|
||||
echo "Blocked IPs (403 errors):"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '$9==403 {print $1}' | sort | uniq -c | sort -rn
|
||||
echo ""
|
||||
echo "Successful requests (200):"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '$9==200 {print $1}' | sort | uniq -c | sort -rn | head -5
|
||||
echo ""
|
||||
echo "Status Code Summary:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $9}' | sort | uniq -c | sort -rn
|
||||
EOF
|
||||
chmod +x /tmp/monitor-traffic.sh
|
||||
```
|
||||
|
||||
### Run the monitoring script:
|
||||
```bash
|
||||
/tmp/monitor-traffic.sh
|
||||
```
|
||||
|
||||
## Auto-Refreshing Dashboard
|
||||
|
||||
### Live dashboard (refreshes every 5 seconds):
|
||||
```bash
|
||||
watch -n 5 'echo "=== Last 5 minutes ==="
|
||||
date
|
||||
echo ""
|
||||
echo "Top IPs:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk "{print \$1}" | sort | uniq -c | sort -rn | head -10
|
||||
echo ""
|
||||
echo "Status Codes:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk "{print \$9}" | sort | uniq -c | sort -rn'
|
||||
```
|
||||
|
||||
Press `Ctrl+C` to exit.
|
||||
|
||||
## GoAccess HTML Report (Live Updating)
|
||||
|
||||
### Generate live HTML report:
|
||||
```bash
|
||||
sudo goaccess /var/log/nginx/access.log -o /var/www/html/live-stats.html --real-time-html --daemonize
|
||||
```
|
||||
|
||||
Then visit: https://git.laantungir.net/live-stats.html
|
||||
|
||||
### Stop the live report:
|
||||
```bash
|
||||
sudo pkill -f "goaccess.*live-stats"
|
||||
```
|
||||
|
||||
## Filter by Time
|
||||
|
||||
### Get timestamp from 5 minutes ago:
|
||||
```bash
|
||||
date -d '5 minutes ago' '+%d/%b/%Y:%H:%M'
|
||||
```
|
||||
|
||||
### Analyze only recent logs (replace timestamp):
|
||||
```bash
|
||||
sudo awk '/01\/Feb\/2026:19:09/,0' /var/log/nginx/access.log | goaccess -
|
||||
```
|
||||
|
||||
## Check Gitea CPU
|
||||
|
||||
### Current CPU usage:
|
||||
```bash
|
||||
ps aux | grep gitea | grep -v grep
|
||||
```
|
||||
|
||||
### Watch CPU in real-time:
|
||||
```bash
|
||||
watch -n 2 'ps aux | grep gitea | grep -v grep'
|
||||
```
|
||||
|
||||
## Most Useful Command for Quick Check
|
||||
|
||||
This one-liner shows everything you need:
|
||||
```bash
|
||||
|
||||
echo "=== Quick Status ===" && \
|
||||
echo "Time: $(date)" && \
|
||||
echo "" && \
|
||||
echo "Top 10 IPs (last 1000 requests):" && \
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $1}' | sort | uniq -c | sort -rn | head -10 && \
|
||||
echo "" && \
|
||||
echo "Status Codes:" && \
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $9}' | sort | uniq -c && \
|
||||
echo "" && \
|
||||
echo "Gitea CPU:" && \
|
||||
ps aux | grep gitea | grep -v grep
|
||||
```
|
||||
|
||||
Copy any of these commands and run them directly on your server!
|
||||
17
api/index.js
17
api/index.js
@@ -4324,6 +4324,12 @@ function populateSubscriptionDetailsTable(subscriptionsData) {
|
||||
const oldestDuration = Math.max(...subscriptions.map(s => now - s.created_at));
|
||||
const oldestDurationStr = formatDuration(oldestDuration);
|
||||
|
||||
// Calculate total query stats for this connection
|
||||
const totalQueries = subscriptions.reduce((sum, s) => sum + (s.db_queries_executed || 0), 0);
|
||||
const totalRows = subscriptions.reduce((sum, s) => sum + (s.db_rows_returned || 0), 0);
|
||||
const avgQueryRate = subscriptions.length > 0 ? (subscriptions[0].query_rate_per_min || 0) : 0;
|
||||
const clientIp = subscriptions.length > 0 ? (subscriptions[0].client_ip || 'unknown') : 'unknown';
|
||||
|
||||
// Create header row (summary)
|
||||
const headerRow = document.createElement('tr');
|
||||
headerRow.className = 'subscription-group-header';
|
||||
@@ -4334,9 +4340,14 @@ function populateSubscriptionDetailsTable(subscriptionsData) {
|
||||
headerRow.innerHTML = `
|
||||
<td colspan="4" style="padding: 8px;">
|
||||
<span class="expand-icon" style="display: inline-block; width: 20px; transition: transform 0.2s;">▶</span>
|
||||
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">Websocket: ${wsiPointer}</strong>
|
||||
<span style="color: #666; margin-left: 15px;">
|
||||
Subscriptions: ${subCount} | Oldest: ${oldestDurationStr}
|
||||
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">IP: ${clientIp}</strong>
|
||||
<span style="color: #666; margin-left: 10px; font-size: 11px;">
|
||||
WS: ${wsiPointer} |
|
||||
Subs: ${subCount} |
|
||||
Queries: ${totalQueries.toLocaleString()} |
|
||||
Rows: ${totalRows.toLocaleString()} |
|
||||
Rate: ${avgQueryRate.toFixed(1)} q/min |
|
||||
Duration: ${oldestDurationStr}
|
||||
</span>
|
||||
</td>
|
||||
`;
|
||||
|
||||
@@ -188,17 +188,17 @@ update_version_in_header() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update VERSION macro
|
||||
sed -i "s/#define VERSION \".*\"/#define VERSION \"$new_version\"/" src/main.h
|
||||
# Update CRELAY_VERSION macro
|
||||
sed -i "s/#define CRELAY_VERSION \".*\"/#define CRELAY_VERSION \"$new_version\"/" src/main.h
|
||||
|
||||
# Update VERSION_MAJOR macro
|
||||
sed -i "s/#define VERSION_MAJOR [0-9]\+/#define VERSION_MAJOR $major/" src/main.h
|
||||
# Update CRELAY_VERSION_MAJOR macro
|
||||
sed -i "s/#define CRELAY_VERSION_MAJOR [0-9]\+/#define CRELAY_VERSION_MAJOR $major/" src/main.h
|
||||
|
||||
# Update VERSION_MINOR macro
|
||||
sed -i "s/#define VERSION_MINOR .*/#define VERSION_MINOR $minor/" src/main.h
|
||||
# Update CRELAY_VERSION_MINOR macro
|
||||
sed -i "s/#define CRELAY_VERSION_MINOR .*/#define CRELAY_VERSION_MINOR $minor/" src/main.h
|
||||
|
||||
# Update VERSION_PATCH macro
|
||||
sed -i "s/#define VERSION_PATCH [0-9]\+/#define VERSION_PATCH $patch/" src/main.h
|
||||
# Update CRELAY_VERSION_PATCH macro
|
||||
sed -i "s/#define CRELAY_VERSION_PATCH [0-9]\+/#define CRELAY_VERSION_PATCH $patch/" src/main.h
|
||||
|
||||
print_success "Updated version in src/main.h to $new_version"
|
||||
}
|
||||
|
||||
93
src/api.c
93
src/api.c
@@ -1,6 +1,11 @@
|
||||
// Define _GNU_SOURCE to ensure all POSIX features are available
|
||||
#define _GNU_SOURCE
|
||||
|
||||
// Forward declaration for query logging (defined in main.c)
|
||||
extern void log_query_execution(const char* query_type, const char* sub_id,
|
||||
const char* client_ip, const char* sql,
|
||||
long elapsed_us, int rows_returned);
|
||||
|
||||
// API module for serving embedded web content and admin API functions
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@@ -66,6 +71,10 @@ cJSON* query_event_kind_distribution(void) {
|
||||
sqlite3_stmt* stmt;
|
||||
const char* sql = "SELECT kind, COUNT(*) as count FROM events GROUP BY kind ORDER BY count DESC";
|
||||
|
||||
// Start timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare event kind distribution query");
|
||||
return NULL;
|
||||
@@ -77,8 +86,10 @@ cJSON* query_event_kind_distribution(void) {
|
||||
|
||||
cJSON* kinds_array = cJSON_CreateArray();
|
||||
long long total_events = 0;
|
||||
int row_count = 0;
|
||||
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
row_count++;
|
||||
int kind = sqlite3_column_int(stmt, 0);
|
||||
long long count = sqlite3_column_int64(stmt, 1);
|
||||
total_events += count;
|
||||
@@ -90,6 +101,13 @@ cJSON* query_event_kind_distribution(void) {
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Stop timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("MONITOR", "event_kinds", NULL, sql, elapsed_us, row_count);
|
||||
|
||||
cJSON_AddNumberToObject(distribution, "total_events", total_events);
|
||||
cJSON_AddItemToObject(distribution, "kinds", kinds_array);
|
||||
@@ -245,6 +263,10 @@ cJSON* query_subscription_details(void) {
|
||||
DEBUG_LOG("=== SUBSCRIPTION_DETAILS QUERY DEBUG ===");
|
||||
DEBUG_LOG("Query: %s", sql);
|
||||
|
||||
// Start timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare subscription details query");
|
||||
return NULL;
|
||||
@@ -287,6 +309,46 @@ cJSON* query_subscription_details(void) {
|
||||
cJSON_AddBoolToObject(sub_obj, "active", 1); // All from this view are active
|
||||
cJSON_AddStringToObject(sub_obj, "wsi_pointer", wsi_pointer ? wsi_pointer : "N/A");
|
||||
|
||||
// Extract query stats from per_session_data if wsi is still valid
|
||||
int db_queries = 0;
|
||||
int db_rows = 0;
|
||||
double query_rate = 0.0;
|
||||
double row_rate = 0.0;
|
||||
double avg_rows_per_query = 0.0;
|
||||
|
||||
if (wsi_pointer && strlen(wsi_pointer) > 2) { // Check for valid pointer string
|
||||
// Parse wsi pointer from hex string
|
||||
struct lws* wsi = NULL;
|
||||
if (sscanf(wsi_pointer, "%p", (void**)&wsi) == 1 && wsi != NULL) {
|
||||
// Get per_session_data from wsi
|
||||
struct per_session_data* pss = (struct per_session_data*)lws_wsi_user(wsi);
|
||||
if (pss) {
|
||||
db_queries = pss->db_queries_executed;
|
||||
db_rows = pss->db_rows_returned;
|
||||
|
||||
// Calculate rates (per minute)
|
||||
time_t connection_duration = current_time - pss->query_tracking_start;
|
||||
if (connection_duration > 0) {
|
||||
double minutes = connection_duration / 60.0;
|
||||
query_rate = db_queries / minutes;
|
||||
row_rate = db_rows / minutes;
|
||||
}
|
||||
|
||||
// Calculate average rows per query
|
||||
if (db_queries > 0) {
|
||||
avg_rows_per_query = (double)db_rows / (double)db_queries;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add query stats to subscription object
|
||||
cJSON_AddNumberToObject(sub_obj, "db_queries_executed", db_queries);
|
||||
cJSON_AddNumberToObject(sub_obj, "db_rows_returned", db_rows);
|
||||
cJSON_AddNumberToObject(sub_obj, "query_rate_per_min", query_rate);
|
||||
cJSON_AddNumberToObject(sub_obj, "row_rate_per_min", row_rate);
|
||||
cJSON_AddNumberToObject(sub_obj, "avg_rows_per_query", avg_rows_per_query);
|
||||
|
||||
// Parse and add filter JSON if available
|
||||
if (filter_json) {
|
||||
cJSON* filters = cJSON_Parse(filter_json);
|
||||
@@ -311,8 +373,15 @@ cJSON* query_subscription_details(void) {
|
||||
|
||||
cJSON_AddItemToObject(subscriptions_data, "data", data);
|
||||
|
||||
// Stop timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("MONITOR", "subscription_details", NULL, sql, elapsed_us, row_count);
|
||||
|
||||
// DEBUG: Log final summary
|
||||
DEBUG_LOG("Total subscriptions found: %d", cJSON_GetArraySize(subscriptions_array));
|
||||
DEBUG_LOG("Total subscriptions found: %d", row_count);
|
||||
DEBUG_LOG("=== END SUBSCRIPTION_DETAILS QUERY DEBUG ===");
|
||||
|
||||
return subscriptions_data;
|
||||
@@ -459,10 +528,15 @@ int generate_monitoring_event_for_type(const char* d_tag_value, cJSON* (*query_f
|
||||
|
||||
// Monitoring hook called when an event is stored
|
||||
void monitoring_on_event_stored(void) {
|
||||
// Check throttling first (cheapest check)
|
||||
// Check if monitoring is disabled (throttle = 0)
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
if (throttle_seconds == 0) {
|
||||
return; // Monitoring disabled
|
||||
}
|
||||
|
||||
// Check throttling
|
||||
static time_t last_monitoring_time = 0;
|
||||
time_t current_time = time(NULL);
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
|
||||
if (current_time - last_monitoring_time < throttle_seconds) {
|
||||
return;
|
||||
@@ -481,10 +555,15 @@ void monitoring_on_event_stored(void) {
|
||||
|
||||
// Monitoring hook called when subscriptions change (create/close)
|
||||
void monitoring_on_subscription_change(void) {
|
||||
// Check throttling first (cheapest check)
|
||||
// Check if monitoring is disabled (throttle = 0)
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
if (throttle_seconds == 0) {
|
||||
return; // Monitoring disabled
|
||||
}
|
||||
|
||||
// Check throttling
|
||||
static time_t last_monitoring_time = 0;
|
||||
time_t current_time = time(NULL);
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
|
||||
if (current_time - last_monitoring_time < throttle_seconds) {
|
||||
return;
|
||||
@@ -2721,8 +2800,8 @@ int handle_monitoring_command(cJSON* event, const char* command, char* error_mes
|
||||
|
||||
char* endptr;
|
||||
long throttle_seconds = strtol(arg, &endptr, 10);
|
||||
if (*endptr != '\0' || throttle_seconds < 1 || throttle_seconds > 3600) {
|
||||
char* response_content = "❌ Invalid throttle value\n\nThrottle must be between 1 and 3600 seconds.";
|
||||
if (*endptr != '\0' || throttle_seconds < 0 || throttle_seconds > 3600) {
|
||||
char* response_content = "❌ Invalid throttle value\n\nThrottle must be between 0 and 3600 seconds (0 = disabled).";
|
||||
return send_admin_response(sender_pubkey, response_content, request_id, error_message, error_size, wsi);
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
116
src/main.c
116
src/main.c
@@ -20,6 +20,7 @@
|
||||
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||
#include "../nostr_core_lib/nostr_core/nip013.h" // NIP-13: Proof of Work
|
||||
#include "../nostr_core_lib/nostr_core/nip019.h" // NIP-19: bech32-encoded entities
|
||||
#include "main.h" // Version and relay metadata
|
||||
#include "config.h" // Configuration management system
|
||||
#include "sql_schema.h" // Embedded database schema
|
||||
#include "websockets.h" // WebSocket protocol implementation
|
||||
@@ -228,6 +229,65 @@ void send_notice_message(struct lws* wsi, struct per_session_data* pss, const ch
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// DATABASE QUERY LOGGING
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Log database query execution with timing and context
|
||||
* Only logs at debug level 3 (DEBUG) or higher
|
||||
* Warns if query takes >10ms (slow query)
|
||||
*
|
||||
* @param query_type Type of query (REQ, COUNT, INSERT, CONFIG, etc.)
|
||||
* @param sub_id Subscription ID (NULL if not applicable)
|
||||
* @param client_ip Client IP address (NULL if not applicable)
|
||||
* @param sql SQL query text
|
||||
* @param elapsed_us Execution time in microseconds
|
||||
* @param rows_returned Number of rows returned or affected
|
||||
*/
|
||||
void log_query_execution(const char* query_type, const char* sub_id,
|
||||
const char* client_ip, const char* sql,
|
||||
long elapsed_us, int rows_returned) {
|
||||
// Only log at debug level 3 (INFO) or higher
|
||||
if (g_debug_level < DEBUG_LEVEL_INFO) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Truncate SQL if too long (keep first 500 chars)
|
||||
char sql_truncated[512];
|
||||
if (strlen(sql) > 500) {
|
||||
snprintf(sql_truncated, sizeof(sql_truncated), "%.497s...", sql);
|
||||
} else {
|
||||
snprintf(sql_truncated, sizeof(sql_truncated), "%s", sql);
|
||||
}
|
||||
|
||||
// Get timestamp
|
||||
time_t now = time(NULL);
|
||||
struct tm* tm_info = localtime(&now);
|
||||
char timestamp[32];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||
|
||||
// Log query with all context (direct to stdout/stderr, not through DEBUG_LOG)
|
||||
fprintf(stderr, "[%s] [QUERY] type=%s sub=%s ip=%s time=%ldus rows=%d sql=%s\n",
|
||||
timestamp,
|
||||
query_type,
|
||||
sub_id ? sub_id : "N/A",
|
||||
client_ip ? client_ip : "N/A",
|
||||
elapsed_us,
|
||||
rows_returned,
|
||||
sql_truncated);
|
||||
|
||||
// Warn if query is slow (>10ms = 10000us)
|
||||
if (elapsed_us > 10000) {
|
||||
fprintf(stderr, "[%s] [SLOW_QUERY] %ldms: %s\n",
|
||||
timestamp, elapsed_us / 1000, sql_truncated);
|
||||
}
|
||||
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// DATABASE FUNCTIONS
|
||||
@@ -369,12 +429,17 @@ int init_database(const char* database_path_override) {
|
||||
if (!db_version || strcmp(db_version, "5") == 0) {
|
||||
needs_migration = 1;
|
||||
} else if (strcmp(db_version, "6") == 0) {
|
||||
// Database is already at current schema version v6
|
||||
// Database is at schema version v6 (compatible)
|
||||
} else if (strcmp(db_version, "7") == 0) {
|
||||
// Database is at schema version v7 (compatible)
|
||||
} else if (strcmp(db_version, "8") == 0) {
|
||||
// Database is at schema version v8 (compatible)
|
||||
} else if (strcmp(db_version, EMBEDDED_SCHEMA_VERSION) == 0) {
|
||||
// Database is at current schema version
|
||||
} else {
|
||||
char warning_msg[256];
|
||||
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s", db_version);
|
||||
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s (expected %s)",
|
||||
db_version, EMBEDDED_SCHEMA_VERSION);
|
||||
DEBUG_WARN(warning_msg);
|
||||
}
|
||||
} else {
|
||||
@@ -1191,6 +1256,10 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
snprintf(sql_ptr, remaining, " LIMIT 500");
|
||||
}
|
||||
|
||||
// Start query timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
// Execute query and send events
|
||||
sqlite3_stmt* stmt;
|
||||
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||
@@ -1198,9 +1267,30 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Failed to prepare subscription query: %s", sqlite3_errmsg(g_db));
|
||||
DEBUG_ERROR(error_msg);
|
||||
|
||||
// Log the failed query so we can see what SQL was generated
|
||||
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
|
||||
time_t now = time(NULL);
|
||||
struct tm* tm_info = localtime(&now);
|
||||
char timestamp[32];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||
fprintf(stderr, "[%s] [QUERY_FAILED] type=REQ sub=%s ip=%s error=%s sql=%s\n",
|
||||
timestamp,
|
||||
sub_id,
|
||||
pss ? pss->client_ip : "N/A",
|
||||
sqlite3_errmsg(g_db),
|
||||
sql);
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Track query execution for abuse detection
|
||||
if (pss) {
|
||||
pss->db_queries_executed++;
|
||||
}
|
||||
|
||||
// Bind parameters
|
||||
for (int i = 0; i < bind_param_count; i++) {
|
||||
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
|
||||
@@ -1210,6 +1300,11 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
row_count++;
|
||||
|
||||
// Track rows returned for abuse detection
|
||||
if (pss) {
|
||||
pss->db_rows_returned++;
|
||||
}
|
||||
|
||||
// Build event JSON
|
||||
cJSON* event = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||
@@ -1264,6 +1359,14 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Stop query timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("REQ", sub_id, pss ? pss->client_ip : NULL,
|
||||
sql, elapsed_us, row_count);
|
||||
}
|
||||
|
||||
// Cleanup bind params
|
||||
@@ -1426,7 +1529,7 @@ void print_usage(const char* program_name) {
|
||||
|
||||
// Print version information
|
||||
void print_version() {
|
||||
printf("C Nostr Relay Server v1.0.0\n");
|
||||
printf("C Nostr Relay Server %s\n", CRELAY_VERSION);
|
||||
printf("Event-based configuration system\n");
|
||||
printf("Built with nostr_core_lib integration\n");
|
||||
printf("\n");
|
||||
@@ -1562,6 +1665,10 @@ int main(int argc, char* argv[]) {
|
||||
signal(SIGINT, signal_handler);
|
||||
signal(SIGTERM, signal_handler);
|
||||
|
||||
// Print version at startup (always, regardless of debug level)
|
||||
fprintf(stderr, "[RELAY_VERSION] C Nostr Relay Server %s\n", CRELAY_VERSION);
|
||||
fflush(stderr);
|
||||
|
||||
printf(BLUE BOLD "=== C Nostr Relay Server ===" RESET "\n");
|
||||
|
||||
|
||||
@@ -1807,6 +1914,9 @@ int main(int argc, char* argv[]) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize kind-based index for fast subscription lookup
|
||||
init_kind_index();
|
||||
|
||||
// Cleanup orphaned subscriptions from previous runs
|
||||
cleanup_all_subscriptions_on_startup();
|
||||
|
||||
|
||||
15
src/main.h
15
src/main.h
@@ -10,21 +10,18 @@
|
||||
#define MAIN_H
|
||||
|
||||
// Version information (auto-updated by build system)
|
||||
#define VERSION_MAJOR 1
|
||||
#define VERSION_MINOR 1
|
||||
#define VERSION_PATCH 1
|
||||
#define VERSION "v1.1.1"
|
||||
|
||||
// Avoid VERSION_MAJOR redefinition warning from nostr_core_lib
|
||||
#undef VERSION_MAJOR
|
||||
#define VERSION_MAJOR 1
|
||||
// Using CRELAY_ prefix to avoid conflicts with nostr_core_lib VERSION macros
|
||||
#define CRELAY_VERSION_MAJOR 1
|
||||
#define CRELAY_VERSION_MINOR 1
|
||||
#define CRELAY_VERSION_PATCH 8
|
||||
#define CRELAY_VERSION "v1.1.8"
|
||||
|
||||
// Relay metadata (authoritative source for NIP-11 information)
|
||||
#define RELAY_NAME "C-Relay"
|
||||
#define RELAY_DESCRIPTION "High-performance C Nostr relay with SQLite storage"
|
||||
#define RELAY_CONTACT ""
|
||||
#define RELAY_SOFTWARE "https://git.laantungir.net/laantungir/c-relay.git"
|
||||
#define RELAY_VERSION VERSION // Use the same version as the build
|
||||
#define RELAY_VERSION CRELAY_VERSION // Use the same version as the build
|
||||
#define SUPPORTED_NIPS "1,2,4,9,11,12,13,15,16,20,22,33,40,42,50,70"
|
||||
#define LANGUAGE_TAGS ""
|
||||
#define RELAY_COUNTRIES ""
|
||||
|
||||
@@ -37,6 +37,135 @@ extern int get_config_bool(const char* key, int default_value);
|
||||
// Global subscription manager
|
||||
extern subscription_manager_t g_subscription_manager;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// KIND-BASED INDEX FOR FAST SUBSCRIPTION LOOKUP
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Initialize the kind index (called once at startup)
|
||||
void init_kind_index() {
|
||||
DEBUG_LOG("Initializing kind index for 65536 possible kinds");
|
||||
|
||||
// Initialize all kind index entries to NULL
|
||||
for (int i = 0; i < 65536; i++) {
|
||||
g_subscription_manager.kind_index[i] = NULL;
|
||||
}
|
||||
|
||||
// Initialize no-kind-filter list
|
||||
g_subscription_manager.no_kind_filter_subs = NULL;
|
||||
|
||||
DEBUG_LOG("Kind index initialized successfully");
|
||||
}
|
||||
|
||||
// Add a subscription to the kind index for all kinds it's interested in
|
||||
// Must be called with subscriptions_lock held
|
||||
void add_subscription_to_kind_index(subscription_t* sub) {
|
||||
if (!sub) return;
|
||||
|
||||
int has_kind_filter = 0;
|
||||
|
||||
// Track which kinds we've already added to avoid duplicates
|
||||
// Use a bitmap for memory efficiency: 65536 bits = 8192 bytes
|
||||
unsigned char added_kinds[8192] = {0}; // 65536 / 8 = 8192 bytes
|
||||
|
||||
// Iterate through all filters in this subscription
|
||||
subscription_filter_t* filter = sub->filters;
|
||||
while (filter) {
|
||||
// Check if this filter has a kinds array
|
||||
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
|
||||
has_kind_filter = 1;
|
||||
|
||||
// Add subscription to index for each kind in the filter
|
||||
cJSON* kind_item = NULL;
|
||||
cJSON_ArrayForEach(kind_item, filter->kinds) {
|
||||
if (cJSON_IsNumber(kind_item)) {
|
||||
int kind = (int)cJSON_GetNumberValue(kind_item);
|
||||
|
||||
// Bounds check
|
||||
if (kind < 0 || kind > 65535) {
|
||||
DEBUG_WARN("add_subscription_to_kind_index: kind %d out of range, skipping", kind);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we've already added this kind (deduplication)
|
||||
int byte_index = kind / 8;
|
||||
int bit_index = kind % 8;
|
||||
if (added_kinds[byte_index] & (1 << bit_index)) {
|
||||
DEBUG_TRACE("KIND_INDEX: Skipping duplicate kind %d for subscription '%s'", kind, sub->id);
|
||||
continue; // Already added this kind
|
||||
}
|
||||
|
||||
// Mark this kind as added
|
||||
added_kinds[byte_index] |= (1 << bit_index);
|
||||
|
||||
// Create new index node
|
||||
kind_subscription_node_t* node = malloc(sizeof(kind_subscription_node_t));
|
||||
if (!node) {
|
||||
DEBUG_ERROR("add_subscription_to_kind_index: failed to allocate node for kind %d", kind);
|
||||
continue;
|
||||
}
|
||||
|
||||
node->subscription = sub;
|
||||
node->next = g_subscription_manager.kind_index[kind];
|
||||
g_subscription_manager.kind_index[kind] = node;
|
||||
|
||||
DEBUG_TRACE("KIND_INDEX: Added subscription '%s' to kind %d index", sub->id, kind);
|
||||
}
|
||||
}
|
||||
}
|
||||
filter = filter->next;
|
||||
}
|
||||
|
||||
// If subscription has no kind filter, add to no-kind-filter list using wrapper node
|
||||
if (!has_kind_filter) {
|
||||
no_kind_filter_node_t* node = malloc(sizeof(no_kind_filter_node_t));
|
||||
if (!node) {
|
||||
DEBUG_ERROR("add_subscription_to_kind_index: failed to allocate no-kind-filter node");
|
||||
return;
|
||||
}
|
||||
|
||||
node->subscription = sub;
|
||||
node->next = g_subscription_manager.no_kind_filter_subs;
|
||||
g_subscription_manager.no_kind_filter_subs = node;
|
||||
DEBUG_TRACE("KIND_INDEX: Added subscription '%s' to no-kind-filter list", sub->id);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove a subscription from the kind index
|
||||
// Must be called with subscriptions_lock held
|
||||
void remove_subscription_from_kind_index(subscription_t* sub) {
|
||||
if (!sub) return;
|
||||
|
||||
// Remove from all kind indexes
|
||||
for (int kind = 0; kind < 65536; kind++) {
|
||||
kind_subscription_node_t** current = &g_subscription_manager.kind_index[kind];
|
||||
|
||||
while (*current) {
|
||||
if ((*current)->subscription == sub) {
|
||||
kind_subscription_node_t* to_free = *current;
|
||||
*current = (*current)->next;
|
||||
free(to_free);
|
||||
DEBUG_TRACE("KIND_INDEX: Removed subscription '%s' from kind %d index", sub->id, kind);
|
||||
// Don't break - subscription might be in index multiple times if it has duplicate kinds
|
||||
} else {
|
||||
current = &((*current)->next);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from no-kind-filter list if present
|
||||
no_kind_filter_node_t** current = &g_subscription_manager.no_kind_filter_subs;
|
||||
while (*current) {
|
||||
if ((*current)->subscription == sub) {
|
||||
no_kind_filter_node_t* to_free = *current;
|
||||
*current = (*current)->next;
|
||||
free(to_free);
|
||||
DEBUG_TRACE("KIND_INDEX: Removed subscription '%s' from no-kind-filter list", sub->id);
|
||||
break;
|
||||
}
|
||||
current = &((*current)->next);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -284,6 +413,14 @@ int add_subscription_to_manager(subscription_t* sub) {
|
||||
g_subscription_manager.total_created++;
|
||||
}
|
||||
|
||||
// Add to kind index for fast lookup (must be done while holding lock)
|
||||
add_subscription_to_kind_index(sub);
|
||||
|
||||
// If we found a duplicate, remove it from the kind index
|
||||
if (duplicate_old) {
|
||||
remove_subscription_from_kind_index(duplicate_old);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
|
||||
|
||||
// If we replaced an existing subscription, unlink it from the per-session list before freeing
|
||||
@@ -341,6 +478,9 @@ int remove_subscription_from_manager(const char* sub_id, struct lws* wsi) {
|
||||
|
||||
// Match by ID and WebSocket connection
|
||||
if (strcmp(sub->id, sub_id) == 0 && (!wsi || sub->wsi == wsi)) {
|
||||
// Remove from kind index first (while still holding lock)
|
||||
remove_subscription_from_kind_index(sub);
|
||||
|
||||
// Remove from list
|
||||
*current = sub->next;
|
||||
g_subscription_manager.total_subscriptions--;
|
||||
@@ -654,19 +794,47 @@ int broadcast_event_to_subscriptions(cJSON* event) {
|
||||
temp_sub_t* matching_subs = NULL;
|
||||
int matching_count = 0;
|
||||
|
||||
// Get event kind for index lookup
|
||||
int event_kind_val = -1;
|
||||
if (event_kind && cJSON_IsNumber(event_kind)) {
|
||||
event_kind_val = (int)cJSON_GetNumberValue(event_kind);
|
||||
}
|
||||
|
||||
// First pass: collect matching subscriptions while holding lock
|
||||
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
|
||||
|
||||
int total_subs = 0;
|
||||
subscription_t* count_sub = g_subscription_manager.active_subscriptions;
|
||||
while (count_sub) {
|
||||
total_subs++;
|
||||
count_sub = count_sub->next;
|
||||
}
|
||||
DEBUG_TRACE("BROADCAST: Checking %d active subscriptions", total_subs);
|
||||
// Use kind index for fast lookup instead of checking all subscriptions
|
||||
subscription_t* candidates_to_check[MAX_TOTAL_SUBSCRIPTIONS];
|
||||
int candidate_count = 0;
|
||||
|
||||
subscription_t* sub = g_subscription_manager.active_subscriptions;
|
||||
while (sub) {
|
||||
// Add subscriptions from kind index (if event has valid kind)
|
||||
if (event_kind_val >= 0 && event_kind_val <= 65535) {
|
||||
DEBUG_TRACE("BROADCAST: Using kind index for kind=%d", event_kind_val);
|
||||
|
||||
kind_subscription_node_t* node = g_subscription_manager.kind_index[event_kind_val];
|
||||
while (node && candidate_count < MAX_TOTAL_SUBSCRIPTIONS) {
|
||||
if (node->subscription && node->subscription->active) {
|
||||
candidates_to_check[candidate_count++] = node->subscription;
|
||||
}
|
||||
node = node->next;
|
||||
}
|
||||
}
|
||||
|
||||
// Add subscriptions with no kind filter (must check against all events)
|
||||
no_kind_filter_node_t* no_kind_node = g_subscription_manager.no_kind_filter_subs;
|
||||
while (no_kind_node && candidate_count < MAX_TOTAL_SUBSCRIPTIONS) {
|
||||
if (no_kind_node->subscription && no_kind_node->subscription->active) {
|
||||
candidates_to_check[candidate_count++] = no_kind_node->subscription;
|
||||
}
|
||||
no_kind_node = no_kind_node->next;
|
||||
}
|
||||
|
||||
DEBUG_TRACE("BROADCAST: Checking %d candidate subscriptions (kind index optimization)", candidate_count);
|
||||
|
||||
// Test each candidate subscription
|
||||
for (int i = 0; i < candidate_count; i++) {
|
||||
subscription_t* sub = candidates_to_check[i];
|
||||
|
||||
if (sub->active && sub->wsi && event_matches_subscription(event, sub)) {
|
||||
temp_sub_t* temp = malloc(sizeof(temp_sub_t));
|
||||
if (temp) {
|
||||
@@ -695,7 +863,6 @@ int broadcast_event_to_subscriptions(cJSON* event) {
|
||||
DEBUG_ERROR("broadcast_event_to_subscriptions: failed to allocate temp subscription");
|
||||
}
|
||||
}
|
||||
sub = sub->next;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
|
||||
|
||||
@@ -63,6 +63,18 @@ struct subscription {
|
||||
struct subscription* session_next; // Next subscription for this session
|
||||
};
|
||||
|
||||
// Kind index entry - linked list of subscriptions interested in a specific kind
|
||||
typedef struct kind_subscription_node {
|
||||
subscription_t* subscription; // Pointer to subscription
|
||||
struct kind_subscription_node* next; // Next subscription for this kind
|
||||
} kind_subscription_node_t;
|
||||
|
||||
// No-kind-filter list entry - wrapper to avoid corrupting subscription->next pointer
|
||||
typedef struct no_kind_filter_node {
|
||||
subscription_t* subscription; // Pointer to subscription
|
||||
struct no_kind_filter_node* next; // Next subscription in no-kind list
|
||||
} no_kind_filter_node_t;
|
||||
|
||||
// Per-IP connection tracking
|
||||
typedef struct ip_connection_info {
|
||||
char ip_address[CLIENT_IP_MAX_LENGTH]; // IP address
|
||||
@@ -79,6 +91,10 @@ struct subscription_manager {
|
||||
pthread_mutex_t subscriptions_lock; // Global thread safety
|
||||
int total_subscriptions; // Current count
|
||||
|
||||
// Kind-based index for fast subscription lookup (10x performance improvement)
|
||||
kind_subscription_node_t* kind_index[65536]; // Array of subscription lists, one per kind
|
||||
no_kind_filter_node_t* no_kind_filter_subs; // Subscriptions with no kind filter (wrapper nodes)
|
||||
|
||||
// Configuration
|
||||
int max_subscriptions_per_client; // Default: 20
|
||||
int max_total_subscriptions; // Default: 5000
|
||||
@@ -104,6 +120,11 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter);
|
||||
int event_matches_subscription(cJSON* event, subscription_t* subscription);
|
||||
int broadcast_event_to_subscriptions(cJSON* event);
|
||||
|
||||
// Kind index functions for performance optimization
|
||||
void init_kind_index(void);
|
||||
void add_subscription_to_kind_index(subscription_t* sub);
|
||||
void remove_subscription_from_kind_index(subscription_t* sub);
|
||||
|
||||
// Per-IP connection tracking functions
|
||||
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip);
|
||||
void update_ip_connection_activity(const char* client_ip);
|
||||
|
||||
@@ -86,6 +86,11 @@ int is_event_expired(cJSON* event, time_t current_time);
|
||||
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
|
||||
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
|
||||
|
||||
// Forward declaration for query logging (defined in main.c)
|
||||
extern void log_query_execution(const char* query_type, const char* sub_id,
|
||||
const char* client_ip, const char* sql,
|
||||
long elapsed_us, int rows_returned);
|
||||
|
||||
// Forward declarations for rate limiting
|
||||
int is_client_rate_limited_for_malformed_requests(struct per_session_data *pss);
|
||||
void record_malformed_request(struct per_session_data *pss);
|
||||
@@ -391,6 +396,11 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
|
||||
memset(pss, 0, sizeof(*pss));
|
||||
pthread_mutex_init(&pss->session_lock, NULL);
|
||||
|
||||
// Initialize database query tracking
|
||||
pss->db_queries_executed = 0;
|
||||
pss->db_rows_returned = 0;
|
||||
pss->query_tracking_start = time(NULL);
|
||||
|
||||
// Get real client IP address
|
||||
char client_ip[CLIENT_IP_MAX_LENGTH];
|
||||
memset(client_ip, 0, sizeof(client_ip));
|
||||
@@ -2429,7 +2439,7 @@ int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_
|
||||
|
||||
// Handle NIP-45 COUNT message
|
||||
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss) {
|
||||
(void)pss; // Suppress unused parameter warning
|
||||
// pss is now used for query tracking, so remove unused warning suppression
|
||||
|
||||
if (!cJSON_IsArray(filters)) {
|
||||
DEBUG_ERROR("COUNT filters is not an array");
|
||||
@@ -2687,6 +2697,10 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
|
||||
}
|
||||
|
||||
// Execute count query
|
||||
|
||||
// Start query timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
// Execute count query
|
||||
sqlite3_stmt* stmt;
|
||||
@@ -2711,6 +2725,15 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
|
||||
// Filter count calculated
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Stop query timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("COUNT", sub_id, pss ? pss->client_ip : NULL,
|
||||
sql, elapsed_us, 1); // COUNT always returns 1 row
|
||||
|
||||
total_count += filter_count;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,6 +79,11 @@ struct per_session_data {
|
||||
size_t reassembly_size; // Current size of accumulated data
|
||||
size_t reassembly_capacity; // Allocated capacity of reassembly buffer
|
||||
int reassembly_active; // Flag: 1 if currently reassembling a message
|
||||
|
||||
// Database query tracking for abuse detection and monitoring
|
||||
int db_queries_executed; // Total SELECT queries executed by this connection
|
||||
int db_rows_returned; // Total rows returned across all queries
|
||||
time_t query_tracking_start; // When connection was established (for rate calculation)
|
||||
};
|
||||
|
||||
// NIP-11 HTTP session data structure for managing buffer lifetime
|
||||
|
||||
Reference in New Issue
Block a user