itsgoin/crates/core/src/storage.rs
Scott Reimers 800388cda4 ItsGoin v0.3.2 — Decentralized social media network
No central server, user-owned data, reverse-chronological feed.
Rust core + Tauri desktop + Android app + plain HTML/CSS/JS frontend.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 20:23:09 -04:00

5514 lines
206 KiB
Rust
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

use std::net::SocketAddr;
use std::path::Path;
use rusqlite::{params, Connection};
use crate::types::{
Attachment, AudienceDirection, AudienceRecord, AudienceStatus, Circle, CircleProfile,
CommentPolicy, DeleteRecord, FollowVisibility, GossipPeerInfo, GroupEpoch, GroupId,
GroupKeyRecord, GroupMemberKey, InlineComment, ManifestEntry, NodeId, PeerRecord,
PeerSlotKind, PeerWithAddress, Post, PostId, PostVisibility, PublicProfile, Reaction,
ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus, ThreadMeta,
VisibilityIntent,
};
/// Blob metadata for eviction scoring.
pub struct EvictionCandidate {
pub cid: [u8; 32],
pub post_id: PostId,
pub author: NodeId,
pub size_bytes: u64,
pub created_at: u64,
pub last_accessed_at: u64,
pub pinned: bool,
pub peer_copies: u32,
}
pub struct Storage {
conn: Connection,
}
impl Storage {
pub fn open(path: impl AsRef<Path>) -> anyhow::Result<Self> {
let conn = Connection::open(path)?;
conn.execute_batch("PRAGMA journal_mode=WAL;")?;
let storage = Self { conn };
storage.init_tables()?;
storage.migrate()?;
Ok(storage)
}
fn init_tables(&self) -> anyhow::Result<()> {
self.conn.execute_batch(
"CREATE TABLE IF NOT EXISTS posts (
id BLOB PRIMARY KEY,
author BLOB NOT NULL,
content TEXT NOT NULL,
attachments TEXT NOT NULL DEFAULT '[]',
timestamp_ms INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS peers (
node_id BLOB PRIMARY KEY,
addresses TEXT NOT NULL DEFAULT '[]',
last_seen INTEGER NOT NULL,
introduced_by BLOB,
is_anchor INTEGER NOT NULL DEFAULT 0,
first_seen INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS follows (
node_id BLOB PRIMARY KEY,
visibility TEXT NOT NULL DEFAULT 'public'
);
CREATE TABLE IF NOT EXISTS profiles (
node_id BLOB PRIMARY KEY,
display_name TEXT NOT NULL,
bio TEXT NOT NULL DEFAULT '',
updated_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS circles (
name TEXT PRIMARY KEY,
created_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS circle_members (
circle_name TEXT NOT NULL,
node_id BLOB NOT NULL,
added_at INTEGER NOT NULL,
PRIMARY KEY (circle_name, node_id)
);
CREATE TABLE IF NOT EXISTS deleted_posts (
post_id BLOB PRIMARY KEY,
author BLOB NOT NULL,
deleted_at INTEGER NOT NULL,
signature BLOB NOT NULL
);
CREATE TABLE IF NOT EXISTS post_replicas (
post_id BLOB NOT NULL,
node_id BLOB NOT NULL,
last_confirmed_ms INTEGER NOT NULL,
PRIMARY KEY (post_id, node_id)
);
CREATE TABLE IF NOT EXISTS peer_neighbors (
peer_id BLOB NOT NULL,
neighbor_id BLOB NOT NULL,
is_anchor INTEGER NOT NULL DEFAULT 0,
reported_at INTEGER NOT NULL,
PRIMARY KEY (peer_id, neighbor_id)
);
CREATE INDEX IF NOT EXISTS idx_peer_neighbors_neighbor ON peer_neighbors(neighbor_id);
CREATE TABLE IF NOT EXISTS audience (
node_id BLOB NOT NULL,
direction TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
requested_at INTEGER NOT NULL,
approved_at INTEGER,
PRIMARY KEY (node_id, direction)
);
CREATE TABLE IF NOT EXISTS worm_cooldowns (
target_id BLOB PRIMARY KEY,
failed_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS reachable_n2 (
reporter_node_id BLOB NOT NULL,
reachable_node_id BLOB NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (reporter_node_id, reachable_node_id)
);
CREATE INDEX IF NOT EXISTS idx_n2_reachable ON reachable_n2(reachable_node_id);
CREATE TABLE IF NOT EXISTS reachable_n3 (
reporter_node_id BLOB NOT NULL,
reachable_node_id BLOB NOT NULL,
updated_at INTEGER NOT NULL,
PRIMARY KEY (reporter_node_id, reachable_node_id)
);
CREATE INDEX IF NOT EXISTS idx_n3_reachable ON reachable_n3(reachable_node_id);
CREATE TABLE IF NOT EXISTS mesh_peers (
node_id BLOB NOT NULL PRIMARY KEY,
slot_kind TEXT NOT NULL,
priority INTEGER NOT NULL DEFAULT 0,
connected_at INTEGER NOT NULL,
last_diff_seq INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS social_routes (
node_id BLOB NOT NULL PRIMARY KEY,
addresses TEXT NOT NULL DEFAULT '[]',
peer_addresses TEXT NOT NULL DEFAULT '[]',
relation TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'online',
last_connected_ms INTEGER NOT NULL DEFAULT 0,
last_seen_ms INTEGER NOT NULL DEFAULT 0,
reach_method TEXT NOT NULL DEFAULT 'direct'
);
CREATE TABLE IF NOT EXISTS reconnect_watchers (
target_node_id BLOB NOT NULL,
watcher_node_id BLOB NOT NULL,
added_at INTEGER NOT NULL,
PRIMARY KEY (target_node_id, watcher_node_id)
);
CREATE TABLE IF NOT EXISTS blobs (
cid BLOB PRIMARY KEY,
post_id BLOB NOT NULL,
author BLOB NOT NULL,
size_bytes INTEGER NOT NULL,
mime_type TEXT NOT NULL,
created_at INTEGER NOT NULL,
stored_at INTEGER NOT NULL,
last_accessed_at INTEGER NOT NULL,
pinned INTEGER NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_blobs_post_id ON blobs(post_id);
CREATE INDEX IF NOT EXISTS idx_posts_author ON posts(author);
CREATE INDEX IF NOT EXISTS idx_posts_timestamp ON posts(timestamp_ms DESC);
CREATE TABLE IF NOT EXISTS cdn_manifests (
cid BLOB PRIMARY KEY,
manifest_json TEXT NOT NULL,
author BLOB NOT NULL,
updated_at INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_cdn_manifests_author ON cdn_manifests(author);
CREATE TABLE IF NOT EXISTS blob_upstream (
cid BLOB PRIMARY KEY,
source_node_id BLOB NOT NULL,
source_addresses TEXT NOT NULL DEFAULT '[]',
stored_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS blob_downstream (
cid BLOB NOT NULL,
peer_node_id BLOB NOT NULL,
peer_addresses TEXT NOT NULL DEFAULT '[]',
registered_at INTEGER NOT NULL,
PRIMARY KEY (cid, peer_node_id)
);
CREATE INDEX IF NOT EXISTS idx_blob_downstream_cid ON blob_downstream(cid);
CREATE TABLE IF NOT EXISTS group_keys (
group_id BLOB PRIMARY KEY,
circle_name TEXT NOT NULL,
epoch INTEGER NOT NULL DEFAULT 1,
group_public_key BLOB NOT NULL,
group_seed BLOB,
admin BLOB NOT NULL,
created_at INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_group_keys_circle ON group_keys(circle_name);
CREATE TABLE IF NOT EXISTS group_member_keys (
group_id BLOB NOT NULL,
member BLOB NOT NULL,
epoch INTEGER NOT NULL,
wrapped_group_key BLOB NOT NULL,
PRIMARY KEY (group_id, member, epoch)
);
CREATE TABLE IF NOT EXISTS group_seeds (
group_id BLOB NOT NULL,
epoch INTEGER NOT NULL,
group_seed BLOB NOT NULL,
PRIMARY KEY (group_id, epoch)
);
CREATE TABLE IF NOT EXISTS relay_cooldowns (
target_id BLOB PRIMARY KEY,
failed_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS preferred_peers (
node_id BLOB PRIMARY KEY,
agreed_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS circle_profiles (
author BLOB NOT NULL,
circle_name TEXT NOT NULL,
display_name TEXT NOT NULL DEFAULT '',
bio TEXT NOT NULL DEFAULT '',
avatar_cid BLOB,
updated_at INTEGER NOT NULL,
encrypted_payload TEXT,
wrapped_cek BLOB,
group_id BLOB,
epoch INTEGER,
PRIMARY KEY (author, circle_name)
);
CREATE TABLE IF NOT EXISTS known_anchors (
node_id BLOB NOT NULL PRIMARY KEY,
addresses TEXT NOT NULL DEFAULT '[]',
last_seen_ms INTEGER NOT NULL,
success_count INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS post_downstream (
post_id BLOB NOT NULL,
peer_node_id BLOB NOT NULL,
registered_at INTEGER NOT NULL,
PRIMARY KEY (post_id, peer_node_id)
);
CREATE INDEX IF NOT EXISTS idx_post_downstream_post ON post_downstream(post_id);
CREATE TABLE IF NOT EXISTS post_upstream (
post_id BLOB PRIMARY KEY,
peer_node_id BLOB NOT NULL
);
CREATE TABLE IF NOT EXISTS blob_headers (
post_id BLOB PRIMARY KEY,
author BLOB NOT NULL,
header_json TEXT NOT NULL,
updated_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS reactions (
reactor BLOB NOT NULL,
post_id BLOB NOT NULL,
emoji TEXT NOT NULL,
timestamp_ms INTEGER NOT NULL,
encrypted_payload TEXT,
PRIMARY KEY (reactor, post_id, emoji)
);
CREATE INDEX IF NOT EXISTS idx_reactions_post ON reactions(post_id);
CREATE TABLE IF NOT EXISTS comments (
author BLOB NOT NULL,
post_id BLOB NOT NULL,
content TEXT NOT NULL,
timestamp_ms INTEGER NOT NULL,
signature BLOB NOT NULL,
PRIMARY KEY (author, post_id, timestamp_ms)
);
CREATE INDEX IF NOT EXISTS idx_comments_post ON comments(post_id);
CREATE TABLE IF NOT EXISTS comment_policies (
post_id BLOB PRIMARY KEY,
policy_json TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS thread_meta (
post_id BLOB PRIMARY KEY,
parent_post_id BLOB NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_thread_meta_parent ON thread_meta(parent_post_id);
CREATE TABLE IF NOT EXISTS settings (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS post_hosts (
post_id BLOB NOT NULL,
host TEXT NOT NULL,
last_seen_ms INTEGER NOT NULL,
PRIMARY KEY (post_id, host)
);",
)?;
Ok(())
}
/// Run schema migrations for existing databases
fn migrate(&self) -> anyhow::Result<()> {
// Add attachments column if missing (old schema)
let has_attachments = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='attachments'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_attachments == 0 {
self.conn.execute_batch(
"ALTER TABLE posts ADD COLUMN attachments TEXT NOT NULL DEFAULT '[]';"
)?;
}
// Add visibility column if missing (old schema)
let has_visibility = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('follows') WHERE name='visibility'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_visibility == 0 {
self.conn.execute_batch(
"ALTER TABLE follows ADD COLUMN visibility TEXT NOT NULL DEFAULT 'public';"
)?;
}
// Add visibility column to posts if missing (Phase D migration)
let has_post_visibility = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='visibility'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_post_visibility == 0 {
self.conn.execute_batch(
"ALTER TABLE posts ADD COLUMN visibility TEXT NOT NULL DEFAULT '\"Public\"';"
)?;
}
// Add visibility_intent column to posts if missing (Phase D-2 migration)
let has_visibility_intent = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='visibility_intent'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_visibility_intent == 0 {
self.conn.execute_batch(
"ALTER TABLE posts ADD COLUMN visibility_intent TEXT;"
)?;
}
// Add new peer columns if missing (Phase B migration)
let has_addresses = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='addresses'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_addresses == 0 {
self.conn.execute_batch(
"ALTER TABLE peers ADD COLUMN addresses TEXT NOT NULL DEFAULT '[]';
ALTER TABLE peers ADD COLUMN introduced_by BLOB;
ALTER TABLE peers ADD COLUMN is_anchor INTEGER NOT NULL DEFAULT 0;
ALTER TABLE peers ADD COLUMN first_seen INTEGER NOT NULL DEFAULT 0;"
)?;
// Backfill first_seen from last_seen for existing rows
self.conn.execute_batch(
"UPDATE peers SET first_seen = last_seen WHERE first_seen = 0;"
)?;
}
// Add anchors column to profiles if missing (Phase C migration)
let has_anchors = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='anchors'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_anchors == 0 {
self.conn.execute_batch(
"ALTER TABLE profiles ADD COLUMN anchors TEXT NOT NULL DEFAULT '[]';"
)?;
}
// Add is_wide_peer column to peers if missing (Phase F migration)
let has_wide_peer = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='is_wide_peer'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_wide_peer == 0 {
self.conn.execute_batch(
"ALTER TABLE peers ADD COLUMN is_wide_peer INTEGER NOT NULL DEFAULT 0;"
)?;
}
// Add recent_peers column to profiles if missing (Phase 7b migration)
let has_recent_peers = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='recent_peers'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_recent_peers == 0 {
self.conn.execute_batch(
"ALTER TABLE profiles ADD COLUMN recent_peers TEXT NOT NULL DEFAULT '[]';"
)?;
}
// Rename persistent_peers → routing_peers → mesh_peers (existing DBs)
let _ = self.conn.execute_batch(
"ALTER TABLE persistent_peers RENAME TO mesh_peers;"
);
let _ = self.conn.execute_batch(
"ALTER TABLE routing_peers RENAME TO mesh_peers;"
);
// Add post_id/author/created_at/last_accessed_at columns to blobs if missing
// (blobs are recoverable from filesystem + posts, so drop-and-recreate is safe)
let has_blob_post_id = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('blobs') WHERE name='post_id'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_blob_post_id == 0 {
self.conn.execute_batch(
"DROP TABLE IF EXISTS blobs;
CREATE TABLE blobs (
cid BLOB PRIMARY KEY,
post_id BLOB NOT NULL,
author BLOB NOT NULL,
size_bytes INTEGER NOT NULL,
mime_type TEXT NOT NULL,
created_at INTEGER NOT NULL,
stored_at INTEGER NOT NULL,
last_accessed_at INTEGER NOT NULL,
pinned INTEGER NOT NULL DEFAULT 0
);"
)?;
}
// Add pinned column to blobs if missing
let has_pinned = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('blobs') WHERE name='pinned'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_pinned == 0 {
self.conn.execute_batch(
"ALTER TABLE blobs ADD COLUMN pinned INTEGER NOT NULL DEFAULT 0;"
)?;
}
// Add preferred_peers column to profiles if missing (Preferred Mesh Peers migration)
let has_preferred_peers = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='preferred_peers'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_preferred_peers == 0 {
self.conn.execute_batch(
"ALTER TABLE profiles ADD COLUMN preferred_peers TEXT NOT NULL DEFAULT '[]';"
)?;
}
// Add preferred_tree column to social_routes if missing (Preferred Tree migration)
let has_pref_tree = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('social_routes') WHERE name='preferred_tree'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_pref_tree == 0 {
self.conn.execute_batch(
"ALTER TABLE social_routes ADD COLUMN preferred_tree TEXT NOT NULL DEFAULT '[]';"
)?;
}
// Add preferred_tree column to blob_upstream if missing (CDN Preferred Tree migration)
let has_blob_pref_tree = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('blob_upstream') WHERE name='preferred_tree'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_blob_pref_tree == 0 {
self.conn.execute_batch(
"ALTER TABLE blob_upstream ADD COLUMN preferred_tree TEXT NOT NULL DEFAULT '[]';"
)?;
}
// Add public_visible column to profiles if missing (Phase D-4 migration)
let has_public_visible = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='public_visible'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_public_visible == 0 {
self.conn.execute_batch(
"ALTER TABLE profiles ADD COLUMN public_visible INTEGER NOT NULL DEFAULT 1;"
)?;
}
// Add avatar_cid column to profiles if missing (Phase D-4 migration)
let has_avatar_cid = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='avatar_cid'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_avatar_cid == 0 {
self.conn.execute_batch(
"ALTER TABLE profiles ADD COLUMN avatar_cid BLOB;"
)?;
}
// Add nat_type column to peers if missing (STUN NAT detection)
let has_nat_type = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='nat_type'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_nat_type == 0 {
self.conn.execute_batch(
"ALTER TABLE peers ADD COLUMN nat_type TEXT DEFAULT 'unknown';"
)?;
}
// Add nat_mapping column to peers if missing (Advanced NAT traversal)
let has_nat_mapping = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='nat_mapping'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_nat_mapping == 0 {
self.conn.execute_batch(
"ALTER TABLE peers ADD COLUMN nat_mapping TEXT DEFAULT NULL;"
)?;
}
// Add nat_filtering column to peers if missing (Advanced NAT traversal)
let has_nat_filtering = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='nat_filtering'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_nat_filtering == 0 {
self.conn.execute_batch(
"ALTER TABLE peers ADD COLUMN nat_filtering TEXT DEFAULT NULL;"
)?;
}
// Add http_capable and http_addr columns to peers (TCP direct serve)
let has_http_capable = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='http_capable'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_http_capable == 0 {
self.conn.execute_batch(
"ALTER TABLE peers ADD COLUMN http_capable INTEGER DEFAULT 0;
ALTER TABLE peers ADD COLUMN http_addr TEXT DEFAULT NULL;"
)?;
}
Ok(())
}
// ---- Posts ----
/// Store a post with default Public visibility. Returns true if it was new.
pub fn store_post(&self, id: &PostId, post: &Post) -> anyhow::Result<bool> {
self.store_post_with_visibility(id, post, &PostVisibility::Public)
}
/// Store a post with explicit visibility. Returns true if it was new.
pub fn store_post_with_visibility(
&self,
id: &PostId,
post: &Post,
visibility: &PostVisibility,
) -> anyhow::Result<bool> {
let attachments_json = serde_json::to_string(&post.attachments)?;
let visibility_json = serde_json::to_string(visibility)?;
let inserted = self.conn.execute(
"INSERT OR IGNORE INTO posts (id, author, content, attachments, timestamp_ms, visibility) VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![
id.as_slice(),
post.author.as_slice(),
post.content,
attachments_json,
post.timestamp_ms as i64,
visibility_json,
],
)?;
Ok(inserted > 0)
}
pub fn get_post(&self, id: &PostId) -> anyhow::Result<Option<Post>> {
let mut stmt = self
.conn
.prepare("SELECT author, content, attachments, timestamp_ms FROM posts WHERE id = ?1")?;
let mut rows = stmt.query(params![id.as_slice()])?;
if let Some(row) = rows.next()? {
let attachments: Vec<Attachment> = serde_json::from_str(
&row.get::<_, String>(2)?
).unwrap_or_default();
Ok(Some(Post {
author: blob_to_nodeid(row.get(0)?)?,
content: row.get(1)?,
attachments,
timestamp_ms: row.get::<_, i64>(3)? as u64,
}))
} else {
Ok(None)
}
}
pub fn get_post_with_visibility(
&self,
id: &PostId,
) -> anyhow::Result<Option<(Post, PostVisibility)>> {
let mut stmt = self.conn.prepare(
"SELECT author, content, attachments, timestamp_ms, visibility FROM posts WHERE id = ?1",
)?;
let mut rows = stmt.query(params![id.as_slice()])?;
if let Some(row) = rows.next()? {
let attachments: Vec<Attachment> =
serde_json::from_str(&row.get::<_, String>(2)?).unwrap_or_default();
let vis_json: String = row.get(4)?;
let visibility: PostVisibility =
serde_json::from_str(&vis_json).unwrap_or_default();
Ok(Some((
Post {
author: blob_to_nodeid(row.get(0)?)?,
content: row.get(1)?,
attachments,
timestamp_ms: row.get::<_, i64>(3)? as u64,
},
visibility,
)))
} else {
Ok(None)
}
}
pub fn list_post_ids(&self) -> anyhow::Result<Vec<PostId>> {
let mut stmt = self.conn.prepare("SELECT id FROM posts")?;
let rows = stmt.query_map([], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?;
let mut ids = Vec::new();
for row in rows {
ids.push(blob_to_postid(row?)?);
}
Ok(ids)
}
/// All posts, newest first (with visibility)
pub fn list_posts_reverse_chron(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let mut stmt = self.conn.prepare(
"SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts ORDER BY timestamp_ms DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_bytes: Vec<u8> = row.get(0)?;
let author_bytes: Vec<u8> = row.get(1)?;
let content: String = row.get(2)?;
let attachments_json: String = row.get(3)?;
let timestamp_ms: i64 = row.get(4)?;
let vis_json: String = row.get(5)?;
Ok((id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json))
})?;
let mut posts = Vec::new();
for row in rows {
let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?;
let attachments: Vec<Attachment> = serde_json::from_str(&attachments_json).unwrap_or_default();
let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default();
posts.push((
blob_to_postid(id_bytes)?,
Post {
author: blob_to_nodeid(author_bytes)?,
content,
attachments,
timestamp_ms: timestamp_ms as u64,
},
visibility,
));
}
Ok(posts)
}
/// Feed: posts from followed users, reverse chronological (with visibility)
pub fn get_feed(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let mut stmt = self.conn.prepare(
"SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility
FROM posts p
INNER JOIN follows f ON p.author = f.node_id
ORDER BY p.timestamp_ms DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_bytes: Vec<u8> = row.get(0)?;
let author_bytes: Vec<u8> = row.get(1)?;
let content: String = row.get(2)?;
let attachments_json: String = row.get(3)?;
let timestamp_ms: i64 = row.get(4)?;
let vis_json: String = row.get(5)?;
Ok((id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json))
})?;
let mut posts = Vec::new();
for row in rows {
let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?;
let attachments: Vec<Attachment> = serde_json::from_str(&attachments_json).unwrap_or_default();
let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default();
posts.push((
blob_to_postid(id_bytes)?,
Post {
author: blob_to_nodeid(author_bytes)?,
content,
attachments,
timestamp_ms: timestamp_ms as u64,
},
visibility,
));
}
Ok(posts)
}
/// All posts with visibility (for sync protocol)
pub fn list_posts_with_visibility(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
self.list_posts_reverse_chron()
}
// ---- Follows ----
pub fn add_follow(&self, node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"INSERT OR IGNORE INTO follows (node_id, visibility) VALUES (?1, 'public')",
params![node_id.as_slice()],
)?;
Ok(())
}
pub fn add_follow_with_visibility(
&self,
node_id: &NodeId,
visibility: FollowVisibility,
) -> anyhow::Result<()> {
let vis_str = match visibility {
FollowVisibility::Public => "public",
FollowVisibility::Private => "private",
};
self.conn.execute(
"INSERT OR REPLACE INTO follows (node_id, visibility) VALUES (?1, ?2)",
params![node_id.as_slice(), vis_str],
)?;
Ok(())
}
pub fn remove_follow(&self, node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM follows WHERE node_id = ?1",
params![node_id.as_slice()],
)?;
Ok(())
}
pub fn list_follows(&self) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare("SELECT node_id FROM follows")?;
let rows = stmt.query_map([], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?;
let mut ids = Vec::new();
for row in rows {
ids.push(blob_to_nodeid(row?)?);
}
Ok(ids)
}
/// List only public follows (for gossip)
pub fn list_public_follows(&self) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self
.conn
.prepare("SELECT node_id FROM follows WHERE visibility = 'public'")?;
let rows = stmt.query_map([], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?;
let mut ids = Vec::new();
for row in rows {
ids.push(blob_to_nodeid(row?)?);
}
Ok(ids)
}
// ---- Peers ----
/// Add or update a peer (backward-compat: no addresses)
pub fn add_peer(&self, node_id: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT INTO peers (node_id, addresses, last_seen, first_seen)
VALUES (?1, '[]', ?2, ?2)
ON CONFLICT(node_id) DO UPDATE SET last_seen = ?2",
params![node_id.as_slice(), now],
)?;
Ok(())
}
/// List just node IDs (backward-compat)
pub fn list_peers(&self) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self
.conn
.prepare("SELECT node_id FROM peers ORDER BY last_seen DESC")?;
let rows = stmt.query_map([], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?;
let mut ids = Vec::new();
for row in rows {
ids.push(blob_to_nodeid(row?)?);
}
Ok(ids)
}
/// Insert or update a peer with full details
pub fn upsert_peer(
&self,
node_id: &NodeId,
addresses: &[SocketAddr],
introduced_by: Option<&NodeId>,
) -> anyhow::Result<()> {
let now = now_ms();
let addrs_json = serde_json::to_string(
&addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>()
)?;
self.conn.execute(
"INSERT INTO peers (node_id, addresses, last_seen, introduced_by, first_seen)
VALUES (?1, ?2, ?3, ?4, ?3)
ON CONFLICT(node_id) DO UPDATE SET
addresses = ?2, last_seen = ?3,
introduced_by = COALESCE(peers.introduced_by, ?4)",
params![
node_id.as_slice(),
addrs_json,
now,
introduced_by.map(|n| n.as_slice()),
],
)?;
Ok(())
}
/// Get a single peer record
pub fn get_peer_record(&self, node_id: &NodeId) -> anyhow::Result<Option<PeerRecord>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen
FROM peers WHERE node_id = ?1",
)?;
let mut rows = stmt.query(params![node_id.as_slice()])?;
if let Some(row) = rows.next()? {
Ok(Some(row_to_peer_record(row)?))
} else {
Ok(None)
}
}
/// List all peer records with full info
pub fn list_peer_records(&self) -> anyhow::Result<Vec<PeerRecord>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen
FROM peers ORDER BY last_seen DESC",
)?;
let mut records = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
records.push(row_to_peer_record(row)?);
}
Ok(records)
}
/// Merge additional addresses into an existing peer's record
pub fn merge_peer_addresses(
&self,
node_id: &NodeId,
new_addrs: &[SocketAddr],
) -> anyhow::Result<()> {
let now = now_ms();
// Get existing addresses
let existing: Option<String> = self.conn.query_row(
"SELECT addresses FROM peers WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
).ok();
if let Some(existing_json) = existing {
let mut addrs: Vec<String> =
serde_json::from_str(&existing_json).unwrap_or_default();
for a in new_addrs {
let s = a.to_string();
if !addrs.contains(&s) {
addrs.push(s);
}
}
let merged = serde_json::to_string(&addrs)?;
self.conn.execute(
"UPDATE peers SET addresses = ?1, last_seen = ?2 WHERE node_id = ?3",
params![merged, now, node_id.as_slice()],
)?;
} else {
// Peer doesn't exist yet — upsert
self.upsert_peer(node_id, new_addrs, None)?;
}
Ok(())
}
/// Set a peer's NAT type
pub fn set_peer_nat_type(&self, node_id: &NodeId, nat_type: crate::types::NatType) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE peers SET nat_type = ?1 WHERE node_id = ?2",
params![nat_type.to_string(), node_id.as_slice()],
)?;
Ok(())
}
/// Get a peer's NAT type
pub fn get_peer_nat_type(&self, node_id: &NodeId) -> crate::types::NatType {
self.conn.query_row(
"SELECT nat_type FROM peers WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get::<_, String>(0),
)
.map(|s| crate::types::NatType::from_str_label(&s))
.unwrap_or(crate::types::NatType::Unknown)
}
/// Set a peer's NAT profile (mapping + filtering)
pub fn set_peer_nat_profile(&self, node_id: &NodeId, profile: &crate::types::NatProfile) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE peers SET nat_mapping = ?1, nat_filtering = ?2 WHERE node_id = ?3",
params![profile.mapping.to_string(), profile.filtering.to_string(), node_id.as_slice()],
)?;
Ok(())
}
/// Get a peer's NAT profile (mapping + filtering)
pub fn get_peer_nat_profile(&self, node_id: &NodeId) -> crate::types::NatProfile {
self.conn.query_row(
"SELECT nat_mapping, nat_filtering FROM peers WHERE node_id = ?1",
params![node_id.as_slice()],
|row| {
let mapping: Option<String> = row.get(0)?;
let filtering: Option<String> = row.get(1)?;
Ok((mapping, filtering))
},
)
.map(|(m, f)| {
let mapping = m.map(|s| crate::types::NatMapping::from_str_label(&s))
.unwrap_or(crate::types::NatMapping::Unknown);
let filtering = f.map(|s| crate::types::NatFiltering::from_str_label(&s))
.unwrap_or(crate::types::NatFiltering::Unknown);
crate::types::NatProfile::new(mapping, filtering)
})
.unwrap_or_else(|_| crate::types::NatProfile::unknown())
}
/// Set a peer's HTTP capability info
pub fn set_peer_http_info(&self, node_id: &NodeId, capable: bool, addr: Option<&str>) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE peers SET http_capable = ?1, http_addr = ?2 WHERE node_id = ?3",
params![capable as i32, addr, node_id.as_slice()],
)?;
Ok(())
}
/// Get a peer's HTTP capability (http_capable, http_addr)
pub fn get_peer_http_info(&self, node_id: &NodeId) -> (bool, Option<String>) {
self.conn.query_row(
"SELECT http_capable, http_addr FROM peers WHERE node_id = ?1",
params![node_id.as_slice()],
|row| {
let capable: i32 = row.get(0)?;
let addr: Option<String> = row.get(1)?;
Ok((capable != 0, addr))
},
)
.unwrap_or((false, None))
}
/// Get a random N2 stranger (node in reachable_n2 but not in our connections)
/// Returns (witness_node_id, reporter_node_id) for anchor probe
pub fn random_n2_stranger(&self, our_connections: &std::collections::HashSet<NodeId>) -> anyhow::Result<Option<(NodeId, NodeId)>> {
let mut stmt = self.conn.prepare(
"SELECT reachable_node_id, reporter_node_id FROM reachable_n2 ORDER BY RANDOM() LIMIT 10"
)?;
let rows = stmt.query_map([], |row| {
let rn: Vec<u8> = row.get(0)?;
let rep: Vec<u8> = row.get(1)?;
Ok((rn, rep))
})?;
for row in rows {
if let Ok((rn_bytes, rep_bytes)) = row {
if rn_bytes.len() == 32 && rep_bytes.len() == 32 {
let mut witness: NodeId = [0u8; 32];
let mut reporter: NodeId = [0u8; 32];
witness.copy_from_slice(&rn_bytes);
reporter.copy_from_slice(&rep_bytes);
// Witness must NOT be in our connections (stranger requirement)
if !our_connections.contains(&witness) {
// Reporter MUST be in our connections (we need to send them the request)
if our_connections.contains(&reporter) {
return Ok(Some((witness, reporter)));
}
}
}
}
}
Ok(None)
}
/// Set a peer's anchor status
pub fn set_peer_anchor(&self, node_id: &NodeId, is_anchor: bool) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE peers SET is_anchor = ?1 WHERE node_id = ?2",
params![is_anchor as i32, node_id.as_slice()],
)?;
Ok(())
}
/// Check if the peers table has any rows
pub fn has_peers(&self) -> anyhow::Result<bool> {
let count: i64 = self
.conn
.query_row("SELECT COUNT(*) FROM peers", [], |row| row.get(0))?;
Ok(count > 0)
}
/// Build GossipPeerInfo list from all peers seen in the last 7 days.
/// Address-free since sync/6 — only node IDs and anchor status.
pub fn build_gossip_list(&self) -> anyhow::Result<Vec<GossipPeerInfo>> {
let cutoff = now_ms() - 7 * 24 * 60 * 60 * 1000;
let mut stmt = self.conn.prepare(
"SELECT node_id, is_anchor
FROM peers
WHERE last_seen > ?1",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![cutoff])?;
while let Some(row) = rows.next()? {
let node_id = blob_to_nodeid(row.get(0)?)?;
let is_anchor = row.get::<_, i32>(1)? != 0;
result.push(GossipPeerInfo {
node_id,
is_anchor,
});
}
Ok(result)
}
// ---- Profiles ----
/// Store or update a profile. Only updates if the new profile is newer.
/// Returns true if the profile was stored/updated.
pub fn store_profile(&self, profile: &PublicProfile) -> anyhow::Result<bool> {
// Check if we already have a newer version
let existing_ts: Option<i64> = self.conn.query_row(
"SELECT updated_at FROM profiles WHERE node_id = ?1",
params![profile.node_id.as_slice()],
|row| row.get(0),
).ok();
if let Some(ts) = existing_ts {
if ts as u64 > profile.updated_at {
return Ok(false);
}
}
let anchors_json = serde_json::to_string(
&profile.anchors.iter().map(hex::encode).collect::<Vec<_>>()
)?;
let recent_peers_json = serde_json::to_string(
&profile.recent_peers.iter().map(hex::encode).collect::<Vec<_>>()
)?;
let preferred_peers_json = serde_json::to_string(
&profile.preferred_peers.iter().map(hex::encode).collect::<Vec<_>>()
)?;
let avatar_cid_slice = profile.avatar_cid.as_ref().map(|c| c.as_slice());
self.conn.execute(
"INSERT OR REPLACE INTO profiles (node_id, display_name, bio, updated_at, anchors, recent_peers, preferred_peers, public_visible, avatar_cid) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
params![
profile.node_id.as_slice(),
profile.display_name,
profile.bio,
profile.updated_at as i64,
anchors_json,
recent_peers_json,
preferred_peers_json,
profile.public_visible as i64,
avatar_cid_slice,
],
)?;
Ok(true)
}
// ---- Settings ----
/// Get a setting value by key.
pub fn get_setting(&self, key: &str) -> anyhow::Result<Option<String>> {
let mut stmt = self.conn.prepare("SELECT value FROM settings WHERE key = ?1")?;
let mut rows = stmt.query(params![key])?;
if let Some(row) = rows.next()? {
Ok(Some(row.get(0)?))
} else {
Ok(None)
}
}
/// Set a setting value (upsert).
pub fn set_setting(&self, key: &str, value: &str) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO settings (key, value) VALUES (?1, ?2) ON CONFLICT(key) DO UPDATE SET value = excluded.value",
params![key, value],
)?;
Ok(())
}
/// Initialize post_hosts table (called by web handler).
pub fn init_post_hosts_table(&self) -> anyhow::Result<()> {
// Already in init_tables, but safe to call again
Ok(())
}
/// Get known-good hosts for a post.
pub fn get_post_hosts(&self, post_id: &PostId) -> anyhow::Result<Vec<SocketAddr>> {
let mut stmt = self.conn.prepare(
"SELECT host FROM post_hosts WHERE post_id = ?1 AND last_seen_ms > ?2 ORDER BY last_seen_ms DESC LIMIT 10"
)?;
// Only return hosts seen in last 30 minutes
let cutoff = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as i64 - 30 * 60 * 1000;
let hosts: Vec<SocketAddr> = stmt.query_map(params![post_id.as_slice(), cutoff], |row| {
row.get::<_, String>(0)
})?.filter_map(|r| r.ok())
.filter_map(|s| s.parse::<SocketAddr>().ok())
.collect();
Ok(hosts)
}
/// Upsert a known-good host for a post.
pub fn upsert_post_host(&self, post_id: &PostId, host: &SocketAddr) -> anyhow::Result<()> {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as i64;
self.conn.execute(
"INSERT INTO post_hosts (post_id, host, last_seen_ms) VALUES (?1, ?2, ?3) \
ON CONFLICT(post_id, host) DO UPDATE SET last_seen_ms = excluded.last_seen_ms",
params![post_id.as_slice(), host.to_string(), now],
)?;
Ok(())
}
/// Get a profile by node ID
pub fn get_profile(&self, node_id: &NodeId) -> anyhow::Result<Option<PublicProfile>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, display_name, bio, updated_at, anchors, recent_peers, preferred_peers, public_visible, avatar_cid FROM profiles WHERE node_id = ?1",
)?;
let mut rows = stmt.query(params![node_id.as_slice()])?;
if let Some(row) = rows.next()? {
let anchors = parse_anchors_json(&row.get::<_, String>(4)?);
let recent_peers = parse_anchors_json(&row.get::<_, String>(5).unwrap_or_else(|_| "[]".to_string()));
let preferred_peers = parse_anchors_json(&row.get::<_, String>(6).unwrap_or_else(|_| "[]".to_string()));
let public_visible = row.get::<_, i64>(7).unwrap_or(1) != 0;
let avatar_cid = row.get::<_, Option<Vec<u8>>>(8).unwrap_or(None)
.and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok());
Ok(Some(PublicProfile {
node_id: blob_to_nodeid(row.get(0)?)?,
display_name: row.get(1)?,
bio: row.get(2)?,
updated_at: row.get::<_, i64>(3)? as u64,
anchors,
recent_peers,
preferred_peers,
public_visible,
avatar_cid,
}))
} else {
Ok(None)
}
}
/// List all known profiles
pub fn list_profiles(&self) -> anyhow::Result<Vec<PublicProfile>> {
let mut stmt = self
.conn
.prepare("SELECT node_id, display_name, bio, updated_at, anchors, recent_peers, preferred_peers, public_visible, avatar_cid FROM profiles")?;
let rows = stmt.query_map([], |row| {
let node_id_bytes: Vec<u8> = row.get(0)?;
let display_name: String = row.get(1)?;
let bio: String = row.get(2)?;
let updated_at: i64 = row.get(3)?;
let anchors_json: String = row.get(4)?;
let recent_peers_json: String = row.get::<_, String>(5).unwrap_or_else(|_| "[]".to_string());
let preferred_peers_json: String = row.get::<_, String>(6).unwrap_or_else(|_| "[]".to_string());
let public_visible: i64 = row.get::<_, i64>(7).unwrap_or(1);
let avatar_cid_bytes: Option<Vec<u8>> = row.get::<_, Option<Vec<u8>>>(8).unwrap_or(None);
Ok((node_id_bytes, display_name, bio, updated_at, anchors_json, recent_peers_json, preferred_peers_json, public_visible, avatar_cid_bytes))
})?;
let mut profiles = Vec::new();
for row in rows {
let (node_id_bytes, display_name, bio, updated_at, anchors_json, recent_peers_json, preferred_peers_json, public_visible, avatar_cid_bytes) = row?;
let avatar_cid = avatar_cid_bytes.and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok());
profiles.push(PublicProfile {
node_id: blob_to_nodeid(node_id_bytes)?,
display_name,
bio,
updated_at: updated_at as u64,
anchors: parse_anchors_json(&anchors_json),
recent_peers: parse_anchors_json(&recent_peers_json),
preferred_peers: parse_anchors_json(&preferred_peers_json),
public_visible: public_visible != 0,
avatar_cid,
});
}
Ok(profiles)
}
/// Get the anchor list from a peer's profile
pub fn get_peer_anchors(&self, node_id: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let result: Option<String> = self.conn.query_row(
"SELECT anchors FROM profiles WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
).ok();
Ok(result.map(|j| parse_anchors_json(&j)).unwrap_or_default())
}
/// List peers that are known anchors (is_anchor = true in peers table)
pub fn list_anchor_peers(&self) -> anyhow::Result<Vec<PeerRecord>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen
FROM peers WHERE is_anchor = 1 ORDER BY last_seen DESC",
)?;
let mut records = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
records.push(row_to_peer_record(row)?);
}
Ok(records)
}
// ---- Known anchors (persistent anchor cache for NAT traversal) ----
/// Upsert a known anchor. Increments success_count on conflict. Auto-prunes to 5.
pub fn upsert_known_anchor(&self, node_id: &NodeId, addresses: &[SocketAddr]) -> anyhow::Result<()> {
let addr_json = serde_json::to_string(
&addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>(),
)?;
let now = now_ms();
self.conn.execute(
"INSERT INTO known_anchors (node_id, addresses, last_seen_ms, success_count)
VALUES (?1, ?2, ?3, 1)
ON CONFLICT(node_id) DO UPDATE SET
addresses = ?2, last_seen_ms = ?3, success_count = success_count + 1",
params![node_id.as_slice(), addr_json, now],
)?;
self.prune_known_anchors(5)?;
Ok(())
}
/// List known anchors, ordered by success_count descending.
pub fn list_known_anchors(&self) -> anyhow::Result<Vec<(NodeId, Vec<SocketAddr>)>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses FROM known_anchors
ORDER BY success_count DESC LIMIT 5",
)?;
let mut result = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
let node_id = blob_to_nodeid(row.get(0)?)?;
let addr_json: String = row.get(1)?;
let addrs: Vec<SocketAddr> = serde_json::from_str::<Vec<String>>(&addr_json)
.unwrap_or_default()
.iter()
.filter_map(|a| a.parse().ok())
.collect();
result.push((node_id, addrs));
}
Ok(result)
}
/// Prune known anchors to keep at most `max` entries (by highest success_count).
pub fn prune_known_anchors(&self, max: usize) -> anyhow::Result<usize> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM known_anchors",
[],
|row| row.get(0),
)?;
if count as usize <= max {
return Ok(0);
}
let excess = count as usize - max;
self.conn.execute(
"DELETE FROM known_anchors WHERE node_id IN (
SELECT node_id FROM known_anchors
ORDER BY success_count ASC, last_seen_ms ASC
LIMIT ?1
)",
params![excess as i64],
)?;
Ok(excess)
}
/// Check if a peer is marked as an anchor in the peers table.
pub fn is_peer_anchor(&self, node_id: &NodeId) -> anyhow::Result<bool> {
let result: Option<i32> = self.conn.query_row(
"SELECT is_anchor FROM peers WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
).ok();
Ok(result.unwrap_or(0) != 0)
}
/// Get the display name for a node, or None if no profile exists
pub fn get_display_name(&self, node_id: &NodeId) -> anyhow::Result<Option<String>> {
let result: Option<String> = self.conn.query_row(
"SELECT display_name FROM profiles WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
).ok();
Ok(result)
}
// ---- Circles ----
pub fn create_circle(&self, name: &str) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT OR IGNORE INTO circles (name, created_at) VALUES (?1, ?2)",
params![name, now],
)?;
Ok(())
}
pub fn delete_circle(&self, name: &str) -> anyhow::Result<()> {
self.conn
.execute("DELETE FROM circle_members WHERE circle_name = ?1", params![name])?;
self.conn
.execute("DELETE FROM circles WHERE name = ?1", params![name])?;
Ok(())
}
pub fn add_circle_member(&self, circle_name: &str, node_id: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT OR IGNORE INTO circle_members (circle_name, node_id, added_at) VALUES (?1, ?2, ?3)",
params![circle_name, node_id.as_slice(), now],
)?;
Ok(())
}
pub fn remove_circle_member(
&self,
circle_name: &str,
node_id: &NodeId,
) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM circle_members WHERE circle_name = ?1 AND node_id = ?2",
params![circle_name, node_id.as_slice()],
)?;
Ok(())
}
pub fn get_circle_members(&self, name: &str) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self
.conn
.prepare("SELECT node_id FROM circle_members WHERE circle_name = ?1")?;
let rows = stmt.query_map(params![name], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?;
let mut members = Vec::new();
for row in rows {
members.push(blob_to_nodeid(row?)?);
}
Ok(members)
}
pub fn list_circles(&self) -> anyhow::Result<Vec<Circle>> {
let mut stmt = self.conn.prepare("SELECT name, created_at FROM circles ORDER BY name")?;
let mut circles = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
let name: String = row.get(0)?;
let created_at = row.get::<_, i64>(1)? as u64;
let members = self.get_circle_members(&name)?;
circles.push(Circle {
name,
members,
created_at,
});
}
Ok(circles)
}
// ---- Circle Profiles ----
/// Upsert our own circle profile (plaintext, for local circles we admin)
pub fn set_circle_profile(&self, profile: &CircleProfile) -> anyhow::Result<()> {
let avatar_cid_slice = profile.avatar_cid.as_ref().map(|c| c.as_slice());
self.conn.execute(
"INSERT OR REPLACE INTO circle_profiles (author, circle_name, display_name, bio, avatar_cid, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![
profile.author.as_slice(),
profile.circle_name,
profile.display_name,
profile.bio,
avatar_cid_slice,
profile.updated_at as i64,
],
)?;
Ok(())
}
/// Store encrypted form alongside decrypted circle profile (for relay + remote profiles)
pub fn store_remote_circle_profile(
&self,
author: &NodeId,
circle_name: &str,
cp: &CircleProfile,
encrypted_payload: &str,
wrapped_cek: &[u8],
group_id: &GroupId,
epoch: GroupEpoch,
) -> anyhow::Result<()> {
let avatar_cid_slice = cp.avatar_cid.as_ref().map(|c| c.as_slice());
self.conn.execute(
"INSERT OR REPLACE INTO circle_profiles (author, circle_name, display_name, bio, avatar_cid, updated_at, encrypted_payload, wrapped_cek, group_id, epoch) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
params![
author.as_slice(),
circle_name,
cp.display_name,
cp.bio,
avatar_cid_slice,
cp.updated_at as i64,
encrypted_payload,
wrapped_cek,
group_id.as_slice(),
epoch as i64,
],
)?;
Ok(())
}
/// Store only encrypted form (we don't have the group seed to decrypt)
pub fn store_encrypted_circle_profile(
&self,
author: &NodeId,
circle_name: &str,
encrypted_payload: &str,
wrapped_cek: &[u8],
group_id: &GroupId,
epoch: GroupEpoch,
updated_at: u64,
) -> anyhow::Result<()> {
self.conn.execute(
"INSERT OR REPLACE INTO circle_profiles (author, circle_name, display_name, bio, updated_at, encrypted_payload, wrapped_cek, group_id, epoch) VALUES (?1, ?2, '', '', ?3, ?4, ?5, ?6, ?7)",
params![
author.as_slice(),
circle_name,
updated_at as i64,
encrypted_payload,
wrapped_cek,
group_id.as_slice(),
epoch as i64,
],
)?;
Ok(())
}
/// Get a circle profile by author + circle_name
pub fn get_circle_profile(&self, author: &NodeId, circle_name: &str) -> anyhow::Result<Option<CircleProfile>> {
let mut stmt = self.conn.prepare(
"SELECT author, circle_name, display_name, bio, avatar_cid, updated_at FROM circle_profiles WHERE author = ?1 AND circle_name = ?2",
)?;
let mut rows = stmt.query(params![author.as_slice(), circle_name])?;
if let Some(row) = rows.next()? {
let avatar_cid = row.get::<_, Option<Vec<u8>>>(4).unwrap_or(None)
.and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok());
Ok(Some(CircleProfile {
author: blob_to_nodeid(row.get(0)?)?,
circle_name: row.get(1)?,
display_name: row.get(2)?,
bio: row.get(3)?,
avatar_cid,
updated_at: row.get::<_, i64>(5)? as u64,
}))
} else {
Ok(None)
}
}
/// List all circle profiles for a given author
pub fn list_circle_profiles_for_author(&self, author: &NodeId) -> anyhow::Result<Vec<CircleProfile>> {
let mut stmt = self.conn.prepare(
"SELECT author, circle_name, display_name, bio, avatar_cid, updated_at FROM circle_profiles WHERE author = ?1 ORDER BY updated_at DESC",
)?;
let mut profiles = Vec::new();
let mut rows = stmt.query(params![author.as_slice()])?;
while let Some(row) = rows.next()? {
let avatar_cid = row.get::<_, Option<Vec<u8>>>(4).unwrap_or(None)
.and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok());
profiles.push(CircleProfile {
author: blob_to_nodeid(row.get(0)?)?,
circle_name: row.get(1)?,
display_name: row.get(2)?,
bio: row.get(3)?,
avatar_cid,
updated_at: row.get::<_, i64>(5)? as u64,
});
}
Ok(profiles)
}
/// Delete a circle profile
pub fn delete_circle_profile(&self, author: &NodeId, circle_name: &str) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM circle_profiles WHERE author = ?1 AND circle_name = ?2",
params![author.as_slice(), circle_name],
)?;
Ok(())
}
/// Get the encrypted form of a circle profile for redistribution
pub fn get_encrypted_circle_profile(
&self,
author: &NodeId,
circle_name: &str,
) -> anyhow::Result<Option<(String, Vec<u8>, GroupId, GroupEpoch)>> {
let mut stmt = self.conn.prepare(
"SELECT encrypted_payload, wrapped_cek, group_id, epoch FROM circle_profiles WHERE author = ?1 AND circle_name = ?2 AND encrypted_payload IS NOT NULL",
)?;
let mut rows = stmt.query(params![author.as_slice(), circle_name])?;
if let Some(row) = rows.next()? {
let encrypted_payload: String = row.get(0)?;
let wrapped_cek: Vec<u8> = row.get(1)?;
let group_id = blob_to_nodeid(row.get::<_, Vec<u8>>(2)?)?;
let epoch = row.get::<_, i64>(3)? as u64;
Ok(Some((encrypted_payload, wrapped_cek, group_id, epoch)))
} else {
Ok(None)
}
}
/// Resolve display info for a peer: check circle profiles the viewer belongs to,
/// then fall back to public profile.
/// Returns (display_name, bio, avatar_cid).
pub fn resolve_display_for_peer(
&self,
author: &NodeId,
viewer: &NodeId,
) -> anyhow::Result<(String, String, Option<[u8; 32]>)> {
// Find circles where viewer is a member and author has a circle profile
let mut stmt = self.conn.prepare(
"SELECT cp.display_name, cp.bio, cp.avatar_cid, cp.updated_at
FROM circle_profiles cp
INNER JOIN circle_members cm ON cp.circle_name = cm.circle_name
WHERE cp.author = ?1 AND cm.node_id = ?2
ORDER BY cp.updated_at DESC
LIMIT 1",
)?;
let mut rows = stmt.query(params![author.as_slice(), viewer.as_slice()])?;
if let Some(row) = rows.next()? {
let dn: String = row.get(0)?;
// Only use circle profile if it has actual content
if !dn.is_empty() {
let bio: String = row.get(1)?;
let avatar_cid = row.get::<_, Option<Vec<u8>>>(2).unwrap_or(None)
.and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok());
return Ok((dn, bio, avatar_cid));
}
}
// Fall back to public profile
if let Some(profile) = self.get_profile(author)? {
if profile.public_visible {
return Ok((profile.display_name, profile.bio, profile.avatar_cid));
}
// Hidden profile — return empty
return Ok((String::new(), String::new(), None));
}
Ok((String::new(), String::new(), None))
}
// ---- Group Keys ----
pub fn create_group_key(&self, record: &GroupKeyRecord, group_seed: Option<&[u8; 32]>) -> anyhow::Result<()> {
self.conn.execute(
"INSERT OR REPLACE INTO group_keys (group_id, circle_name, epoch, group_public_key, group_seed, admin, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
params![
record.group_id.as_slice(),
record.circle_name,
record.epoch as i64,
record.group_public_key.as_slice(),
group_seed.map(|s| s.as_slice()),
record.admin.as_slice(),
record.created_at as i64,
],
)?;
Ok(())
}
pub fn get_group_key(&self, group_id: &GroupId) -> anyhow::Result<Option<GroupKeyRecord>> {
let result = self.conn.query_row(
"SELECT group_id, circle_name, epoch, group_public_key, admin, created_at FROM group_keys WHERE group_id = ?1",
params![group_id.as_slice()],
|row| {
let gid: Vec<u8> = row.get(0)?;
let circle_name: String = row.get(1)?;
let epoch: i64 = row.get(2)?;
let gpk: Vec<u8> = row.get(3)?;
let admin: Vec<u8> = row.get(4)?;
let created_at: i64 = row.get(5)?;
Ok((gid, circle_name, epoch, gpk, admin, created_at))
},
);
match result {
Ok((gid, circle_name, epoch, gpk, admin, created_at)) => {
Ok(Some(GroupKeyRecord {
group_id: blob_to_nodeid(gid)?,
circle_name,
epoch: epoch as u64,
group_public_key: <[u8; 32]>::try_from(gpk.as_slice())
.map_err(|_| anyhow::anyhow!("invalid group public key"))?,
admin: blob_to_nodeid(admin)?,
created_at: created_at as u64,
}))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
pub fn get_group_key_by_circle(&self, circle_name: &str) -> anyhow::Result<Option<GroupKeyRecord>> {
let result = self.conn.query_row(
"SELECT group_id, circle_name, epoch, group_public_key, admin, created_at FROM group_keys WHERE circle_name = ?1",
params![circle_name],
|row| {
let gid: Vec<u8> = row.get(0)?;
let circle_name: String = row.get(1)?;
let epoch: i64 = row.get(2)?;
let gpk: Vec<u8> = row.get(3)?;
let admin: Vec<u8> = row.get(4)?;
let created_at: i64 = row.get(5)?;
Ok((gid, circle_name, epoch, gpk, admin, created_at))
},
);
match result {
Ok((gid, circle_name, epoch, gpk, admin, created_at)) => {
Ok(Some(GroupKeyRecord {
group_id: blob_to_nodeid(gid)?,
circle_name,
epoch: epoch as u64,
group_public_key: <[u8; 32]>::try_from(gpk.as_slice())
.map_err(|_| anyhow::anyhow!("invalid group public key"))?,
admin: blob_to_nodeid(admin)?,
created_at: created_at as u64,
}))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
pub fn update_group_epoch(
&self,
group_id: &GroupId,
new_epoch: GroupEpoch,
new_public_key: &[u8; 32],
new_seed: Option<&[u8; 32]>,
) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE group_keys SET epoch = ?1, group_public_key = ?2, group_seed = ?3 WHERE group_id = ?4",
params![
new_epoch as i64,
new_public_key.as_slice(),
new_seed.map(|s| s.as_slice()),
group_id.as_slice(),
],
)?;
Ok(())
}
pub fn delete_group_key(&self, group_id: &GroupId) -> anyhow::Result<()> {
self.conn.execute("DELETE FROM group_member_keys WHERE group_id = ?1", params![group_id.as_slice()])?;
self.conn.execute("DELETE FROM group_seeds WHERE group_id = ?1", params![group_id.as_slice()])?;
self.conn.execute("DELETE FROM group_keys WHERE group_id = ?1", params![group_id.as_slice()])?;
Ok(())
}
pub fn store_group_member_key(&self, group_id: &GroupId, mk: &GroupMemberKey) -> anyhow::Result<()> {
self.conn.execute(
"INSERT OR REPLACE INTO group_member_keys (group_id, member, epoch, wrapped_group_key) VALUES (?1, ?2, ?3, ?4)",
params![
group_id.as_slice(),
mk.member.as_slice(),
mk.epoch as i64,
mk.wrapped_group_key,
],
)?;
Ok(())
}
pub fn get_group_member_keys(&self, group_id: &GroupId, epoch: GroupEpoch) -> anyhow::Result<Vec<GroupMemberKey>> {
let mut stmt = self.conn.prepare(
"SELECT member, epoch, wrapped_group_key FROM group_member_keys WHERE group_id = ?1 AND epoch = ?2",
)?;
let rows = stmt.query_map(params![group_id.as_slice(), epoch as i64], |row| {
let member: Vec<u8> = row.get(0)?;
let ep: i64 = row.get(1)?;
let wrapped: Vec<u8> = row.get(2)?;
Ok((member, ep, wrapped))
})?;
let mut keys = Vec::new();
for row in rows {
let (member, ep, wrapped) = row?;
keys.push(GroupMemberKey {
member: blob_to_nodeid(member)?,
epoch: ep as u64,
wrapped_group_key: wrapped,
});
}
Ok(keys)
}
pub fn get_my_group_member_key(
&self,
group_id: &GroupId,
epoch: GroupEpoch,
our_node_id: &NodeId,
) -> anyhow::Result<Option<GroupMemberKey>> {
let result = self.conn.query_row(
"SELECT member, epoch, wrapped_group_key FROM group_member_keys WHERE group_id = ?1 AND epoch = ?2 AND member = ?3",
params![group_id.as_slice(), epoch as i64, our_node_id.as_slice()],
|row| {
let member: Vec<u8> = row.get(0)?;
let ep: i64 = row.get(1)?;
let wrapped: Vec<u8> = row.get(2)?;
Ok((member, ep, wrapped))
},
);
match result {
Ok((member, ep, wrapped)) => Ok(Some(GroupMemberKey {
member: blob_to_nodeid(member)?,
epoch: ep as u64,
wrapped_group_key: wrapped,
})),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
pub fn store_group_seed(&self, group_id: &GroupId, epoch: GroupEpoch, seed: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute(
"INSERT OR REPLACE INTO group_seeds (group_id, epoch, group_seed) VALUES (?1, ?2, ?3)",
params![group_id.as_slice(), epoch as i64, seed.as_slice()],
)?;
Ok(())
}
pub fn get_group_seed(&self, group_id: &GroupId, epoch: GroupEpoch) -> anyhow::Result<Option<[u8; 32]>> {
let result = self.conn.query_row(
"SELECT group_seed FROM group_seeds WHERE group_id = ?1 AND epoch = ?2",
params![group_id.as_slice(), epoch as i64],
|row| row.get::<_, Vec<u8>>(0),
);
match result {
Ok(bytes) => {
let seed = <[u8; 32]>::try_from(bytes.as_slice())
.map_err(|_| anyhow::anyhow!("invalid group seed"))?;
Ok(Some(seed))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
pub fn delete_group_seeds(&self, group_id: &GroupId) -> anyhow::Result<()> {
self.conn.execute("DELETE FROM group_seeds WHERE group_id = ?1", params![group_id.as_slice()])?;
Ok(())
}
/// Get all group seeds we have (for batch decrypt). Returns (group_id, epoch, seed, public_key).
pub fn get_all_group_seeds(&self) -> anyhow::Result<Vec<(GroupId, GroupEpoch, [u8; 32], [u8; 32])>> {
let mut stmt = self.conn.prepare(
"SELECT gs.group_id, gs.epoch, gs.group_seed, gk.group_public_key
FROM group_seeds gs
INNER JOIN group_keys gk ON gs.group_id = gk.group_id",
)?;
let rows = stmt.query_map([], |row| {
let gid: Vec<u8> = row.get(0)?;
let epoch: i64 = row.get(1)?;
let seed: Vec<u8> = row.get(2)?;
let pubkey: Vec<u8> = row.get(3)?;
Ok((gid, epoch, seed, pubkey))
})?;
let mut results = Vec::new();
for row in rows {
let (gid, epoch, seed, pubkey) = row?;
let group_id = blob_to_nodeid(gid)?;
let seed_arr = <[u8; 32]>::try_from(seed.as_slice())
.map_err(|_| anyhow::anyhow!("invalid seed"))?;
let pubkey_arr = <[u8; 32]>::try_from(pubkey.as_slice())
.map_err(|_| anyhow::anyhow!("invalid pubkey"))?;
results.push((group_id, epoch as u64, seed_arr, pubkey_arr));
}
Ok(results)
}
/// Get all group seeds we have, including for older epochs (where pubkey comes from current record).
/// For decrypting older-epoch posts, we need the pubkey that was current at that epoch.
/// We store the seed per-epoch but not the pubkey per-epoch, so for now we use the current pubkey
/// which only works when the group_id matches. For rotated keys we need the epoch's pubkey.
/// Returns HashMap<(GroupId, GroupEpoch), (seed, pubkey)>.
pub fn get_all_group_seeds_map(&self) -> anyhow::Result<std::collections::HashMap<(GroupId, GroupEpoch), ([u8; 32], [u8; 32])>> {
// For each group_seed entry, join with group_keys to get pubkey.
// But after rotation, the pubkey changes. We need the pubkey for that specific epoch.
// Since we store group_seed per epoch and the DH is seed×pubkey, we need the matching pubkey.
// After rotation, the new seed has a new pubkey. The old seed had the old pubkey.
// We derive pubkey from seed: SigningKey::from_bytes(seed).verifying_key().to_bytes()
let mut stmt = self.conn.prepare(
"SELECT group_id, epoch, group_seed FROM group_seeds",
)?;
let rows = stmt.query_map([], |row| {
let gid: Vec<u8> = row.get(0)?;
let epoch: i64 = row.get(1)?;
let seed: Vec<u8> = row.get(2)?;
Ok((gid, epoch, seed))
})?;
let mut map = std::collections::HashMap::new();
for row in rows {
let (gid, epoch, seed) = row?;
let group_id = blob_to_nodeid(gid)?;
let seed_arr = <[u8; 32]>::try_from(seed.as_slice())
.map_err(|_| anyhow::anyhow!("invalid seed"))?;
// Derive pubkey from seed
let signing_key = ed25519_dalek::SigningKey::from_bytes(&seed_arr);
let pubkey = signing_key.verifying_key().to_bytes();
map.insert((group_id, epoch as u64), (seed_arr, pubkey));
}
Ok(map)
}
/// Get all group member sets: group_id → set of member NodeIds.
pub fn get_all_group_members(&self) -> anyhow::Result<std::collections::HashMap<GroupId, std::collections::HashSet<NodeId>>> {
// Get group_id → circle_name mapping, then circle_name → members
let mut stmt = self.conn.prepare("SELECT group_id, circle_name FROM group_keys")?;
let rows = stmt.query_map([], |row| {
let gid: Vec<u8> = row.get(0)?;
let circle_name: String = row.get(1)?;
Ok((gid, circle_name))
})?;
let mut map = std::collections::HashMap::new();
for row in rows {
let (gid, circle_name) = row?;
let group_id = blob_to_nodeid(gid)?;
let members = self.get_circle_members(&circle_name)?;
let member_set: std::collections::HashSet<NodeId> = members.into_iter().collect();
map.insert(group_id, member_set);
}
Ok(map)
}
// ---- Delete records ----
/// Store a delete record. Returns true if it was new (not already stored).
pub fn store_delete(&self, record: &DeleteRecord) -> anyhow::Result<bool> {
let inserted = self.conn.execute(
"INSERT OR IGNORE INTO deleted_posts (post_id, author, deleted_at, signature) VALUES (?1, ?2, ?3, ?4)",
params![
record.post_id.as_slice(),
record.author.as_slice(),
record.timestamp_ms as i64,
record.signature,
],
)?;
Ok(inserted > 0)
}
/// Apply a delete: remove the post from the posts table if author matches.
pub fn apply_delete(&self, record: &DeleteRecord) -> anyhow::Result<bool> {
let deleted = self.conn.execute(
"DELETE FROM posts WHERE id = ?1 AND author = ?2",
params![record.post_id.as_slice(), record.author.as_slice()],
)?;
Ok(deleted > 0)
}
/// Check if a post has been deleted.
pub fn is_deleted(&self, post_id: &PostId) -> anyhow::Result<bool> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM deleted_posts WHERE post_id = ?1",
params![post_id.as_slice()],
|row| row.get(0),
)?;
Ok(count > 0)
}
/// List all delete records (for sync).
pub fn list_delete_records(&self) -> anyhow::Result<Vec<DeleteRecord>> {
let mut stmt = self.conn.prepare(
"SELECT post_id, author, deleted_at, signature FROM deleted_posts",
)?;
let mut records = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
records.push(DeleteRecord {
post_id: blob_to_postid(row.get(0)?)?,
author: blob_to_nodeid(row.get(1)?)?,
timestamp_ms: row.get::<_, i64>(2)? as u64,
signature: row.get(3)?,
});
}
Ok(records)
}
// ---- Visibility updates ----
/// Update a post's visibility (e.g., after rewrap or re-encrypt).
pub fn update_post_visibility(
&self,
post_id: &PostId,
new_visibility: &PostVisibility,
) -> anyhow::Result<bool> {
let vis_json = serde_json::to_string(new_visibility)?;
let updated = self.conn.execute(
"UPDATE posts SET visibility = ?1 WHERE id = ?2",
params![vis_json, post_id.as_slice()],
)?;
Ok(updated > 0)
}
// ---- Posts with intent ----
/// Store a post with visibility and the original intent (for circle lookups).
pub fn store_post_with_intent(
&self,
id: &PostId,
post: &Post,
visibility: &PostVisibility,
intent: &VisibilityIntent,
) -> anyhow::Result<bool> {
let attachments_json = serde_json::to_string(&post.attachments)?;
let visibility_json = serde_json::to_string(visibility)?;
let intent_json = serde_json::to_string(intent)?;
let inserted = self.conn.execute(
"INSERT OR IGNORE INTO posts (id, author, content, attachments, timestamp_ms, visibility, visibility_intent) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
params![
id.as_slice(),
post.author.as_slice(),
post.content,
attachments_json,
post.timestamp_ms as i64,
visibility_json,
intent_json,
],
)?;
Ok(inserted > 0)
}
/// Find posts authored by us that were intended for a specific circle.
pub fn find_posts_by_circle_intent(
&self,
circle_name: &str,
our_node_id: &NodeId,
) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
// Use LIKE to find posts whose visibility_intent JSON contains the circle name.
// The serialized form is {"Circle":"name"} so we search for that pattern.
let pattern = format!("%\"Circle\":\"{}\"%", circle_name);
let mut stmt = self.conn.prepare(
"SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts WHERE author = ?1 AND visibility_intent LIKE ?2",
)?;
let mut posts = Vec::new();
let mut rows = stmt.query(params![our_node_id.as_slice(), pattern])?;
while let Some(row) = rows.next()? {
let attachments: Vec<Attachment> =
serde_json::from_str(&row.get::<_, String>(3)?).unwrap_or_default();
let visibility: PostVisibility =
serde_json::from_str(&row.get::<_, String>(5)?).unwrap_or_default();
posts.push((
blob_to_postid(row.get(0)?)?,
Post {
author: blob_to_nodeid(row.get(1)?)?,
content: row.get(2)?,
attachments,
timestamp_ms: row.get::<_, i64>(4)? as u64,
},
visibility,
));
}
Ok(posts)
}
// ---- Replica tracking ----
/// Record that a peer has a copy of a post (UPSERT).
pub fn record_replica(&self, post_id: &PostId, node_id: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)
ON CONFLICT(post_id, node_id) DO UPDATE SET last_confirmed_ms = ?3",
params![post_id.as_slice(), node_id.as_slice(), now],
)?;
Ok(())
}
/// Count how many peers have a replica of a post (excluding stale ones).
pub fn get_replica_count(&self, post_id: &PostId, staleness_ms: u64) -> anyhow::Result<usize> {
let cutoff = now_ms() - staleness_ms as i64;
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM post_replicas WHERE post_id = ?1 AND last_confirmed_ms >= ?2",
params![post_id.as_slice(), cutoff],
|row| row.get(0),
)?;
Ok(count as usize)
}
/// Get node IDs of peers that have replicas of a post (within staleness window).
pub fn get_replica_peers(&self, post_id: &PostId, staleness_ms: u64) -> anyhow::Result<Vec<NodeId>> {
let cutoff = now_ms() - staleness_ms as i64;
let mut stmt = self.conn.prepare(
"SELECT node_id FROM post_replicas WHERE post_id = ?1 AND last_confirmed_ms >= ?2"
)?;
let peers: Vec<NodeId> = stmt.query_map(params![post_id.as_slice(), cutoff], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?
.filter_map(|r| r.ok())
.filter_map(|bytes| bytes.try_into().ok())
.collect();
Ok(peers)
}
/// Get a summary of redundancy across all our authored posts.
/// Returns (total, zero_replicas, one_replica, two_plus_replicas).
pub fn get_redundancy_summary(
&self,
our_node_id: &NodeId,
staleness_ms: u64,
) -> anyhow::Result<(usize, usize, usize, usize)> {
let cutoff = now_ms() - staleness_ms as i64;
let mut stmt = self.conn.prepare(
"SELECT p.id FROM posts p WHERE p.author = ?1",
)?;
let post_ids: Vec<PostId> = {
let mut rows = stmt.query(params![our_node_id.as_slice()])?;
let mut ids = Vec::new();
while let Some(row) = rows.next()? {
ids.push(blob_to_postid(row.get(0)?)?);
}
ids
};
let total = post_ids.len();
let mut zero = 0usize;
let mut one = 0usize;
let mut two_plus = 0usize;
for pid in &post_ids {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM post_replicas WHERE post_id = ?1 AND last_confirmed_ms >= ?2",
params![pid.as_slice(), cutoff],
|row| row.get(0),
)?;
match count {
0 => zero += 1,
1 => one += 1,
_ => two_plus += 1,
}
}
Ok((total, zero, one, two_plus))
}
// ---- Peer Neighbors (2-hop table) ----
/// Store a neighbor relationship: peer_id reported knowing neighbor_id.
pub fn store_peer_neighbor(
&self,
peer_id: &NodeId,
neighbor_id: &NodeId,
is_anchor: bool,
) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT INTO peer_neighbors (peer_id, neighbor_id, is_anchor, reported_at)
VALUES (?1, ?2, ?3, ?4)
ON CONFLICT(peer_id, neighbor_id) DO UPDATE SET
is_anchor = ?3, reported_at = ?4",
params![peer_id.as_slice(), neighbor_id.as_slice(), is_anchor as i32, now],
)?;
Ok(())
}
/// Store all neighbors reported by a peer during gossip.
pub fn store_peer_neighbors(
&self,
reporting_peer: &NodeId,
neighbors: &[GossipPeerInfo],
our_node_id: &NodeId,
) -> anyhow::Result<usize> {
let mut count = 0;
for gp in neighbors {
if &gp.node_id == our_node_id || &gp.node_id == reporting_peer {
continue;
}
self.store_peer_neighbor(reporting_peer, &gp.node_id, gp.is_anchor)?;
count += 1;
}
Ok(count)
}
/// Prune neighbor entries older than the given max age in milliseconds.
pub fn prune_stale_neighbors(&self, max_age_ms: i64) -> anyhow::Result<usize> {
let cutoff = now_ms() - max_age_ms;
let deleted = self.conn.execute(
"DELETE FROM peer_neighbors WHERE reported_at < ?1",
params![cutoff],
)?;
Ok(deleted)
}
/// Find which direct peers reported a given neighbor (for address resolution).
pub fn list_peers_with_neighbor(&self, neighbor_id: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT peer_id FROM peer_neighbors WHERE neighbor_id = ?1 ORDER BY reported_at DESC",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![neighbor_id.as_slice()])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Lookup a node in the 2-hop table. Returns true if found as a neighbor of any direct peer.
pub fn lookup_in_two_hop(&self, target: &NodeId) -> anyhow::Result<bool> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM peer_neighbors WHERE neighbor_id = ?1",
params![target.as_slice()],
|row| row.get(0),
)?;
Ok(count > 0)
}
/// Get all neighbors reported by a specific peer.
pub fn get_peer_neighbor_ids(&self, peer_id: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT neighbor_id FROM peer_neighbors WHERE peer_id = ?1",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![peer_id.as_slice()])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Get the full 2-hop set: all unique neighbor IDs across all reporting peers.
pub fn get_two_hop_set(&self) -> anyhow::Result<std::collections::HashSet<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT DISTINCT neighbor_id FROM peer_neighbors",
)?;
let mut result = std::collections::HashSet::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
result.insert(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
// ---- Wide Peers ----
/// Set or clear the wide_peer flag on a peer.
pub fn set_wide_peer(&self, node_id: &NodeId, is_wide: bool) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE peers SET is_wide_peer = ?1 WHERE node_id = ?2",
params![is_wide as i32, node_id.as_slice()],
)?;
Ok(())
}
/// Clear all wide_peer flags.
pub fn clear_all_wide_peers(&self) -> anyhow::Result<()> {
self.conn.execute("UPDATE peers SET is_wide_peer = 0", [])?;
Ok(())
}
/// List peers marked as wide peers.
pub fn list_wide_peers(&self) -> anyhow::Result<Vec<PeerRecord>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen
FROM peers WHERE is_wide_peer = 1 ORDER BY last_seen DESC",
)?;
let mut records = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
records.push(row_to_peer_record(row)?);
}
Ok(records)
}
/// Compute diversity scores for all direct peers.
/// Returns (node_id, score) pairs sorted by score descending.
/// Score = |peer.neighbors our_two_hop_set| / |peer.neighbors|
pub fn compute_peer_diversity_scores(
&self,
our_two_hop_set: &std::collections::HashSet<NodeId>,
) -> anyhow::Result<Vec<(NodeId, f64)>> {
let peers = self.list_peers()?;
let mut scores = Vec::new();
for peer_id in peers {
let neighbors = self.get_peer_neighbor_ids(&peer_id)?;
if neighbors.is_empty() {
continue;
}
let neighbor_set: std::collections::HashSet<NodeId> =
neighbors.into_iter().collect();
let unique_count = neighbor_set.difference(our_two_hop_set).count();
let score = unique_count as f64 / neighbor_set.len() as f64;
scores.push((peer_id, score));
}
scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
Ok(scores)
}
// ---- Worm cooldowns ----
/// Record that a worm search for a target failed (for cooldown).
pub fn record_worm_miss(&self, target: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT INTO worm_cooldowns (target_id, failed_at) VALUES (?1, ?2)
ON CONFLICT(target_id) DO UPDATE SET failed_at = ?2",
params![target.as_slice(), now],
)?;
Ok(())
}
/// Check if a target is on worm cooldown (failed within cooldown_ms).
pub fn is_worm_cooldown(&self, target: &NodeId, cooldown_ms: i64) -> anyhow::Result<bool> {
let cutoff = now_ms() - cooldown_ms;
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM worm_cooldowns WHERE target_id = ?1 AND failed_at > ?2",
params![target.as_slice(), cutoff],
|row| row.get(0),
)?;
Ok(count > 0)
}
/// Clear expired worm cooldowns.
pub fn prune_worm_cooldowns(&self, cooldown_ms: i64) -> anyhow::Result<usize> {
let cutoff = now_ms() - cooldown_ms;
let deleted = self.conn.execute(
"DELETE FROM worm_cooldowns WHERE failed_at < ?1",
params![cutoff],
)?;
Ok(deleted)
}
// ---- Relay cooldowns ----
/// Record that a relay introduction for a target failed (for cooldown).
pub fn record_relay_miss(&self, target: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT INTO relay_cooldowns (target_id, failed_at) VALUES (?1, ?2)
ON CONFLICT(target_id) DO UPDATE SET failed_at = ?2",
params![target.as_slice(), now],
)?;
Ok(())
}
/// Check if a target is on relay cooldown (failed within cooldown_ms).
pub fn is_relay_cooldown(&self, target: &NodeId, cooldown_ms: i64) -> anyhow::Result<bool> {
let cutoff = now_ms() - cooldown_ms;
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM relay_cooldowns WHERE target_id = ?1 AND failed_at > ?2",
params![target.as_slice(), cutoff],
|row| row.get(0),
)?;
Ok(count > 0)
}
// ---- Audience ----
/// Store an audience relationship.
pub fn store_audience(
&self,
node_id: &NodeId,
direction: AudienceDirection,
status: AudienceStatus,
) -> anyhow::Result<()> {
let now = now_ms();
let dir_str = match direction {
AudienceDirection::Inbound => "inbound",
AudienceDirection::Outbound => "outbound",
};
let status_str = match status {
AudienceStatus::Pending => "pending",
AudienceStatus::Approved => "approved",
AudienceStatus::Denied => "denied",
};
let approved_at = if status == AudienceStatus::Approved {
Some(now)
} else {
None
};
self.conn.execute(
"INSERT INTO audience (node_id, direction, status, requested_at, approved_at)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(node_id, direction) DO UPDATE SET
status = ?3, approved_at = COALESCE(?5, audience.approved_at)",
params![node_id.as_slice(), dir_str, status_str, now, approved_at],
)?;
Ok(())
}
/// Get audience members by direction and status.
pub fn list_audience(
&self,
direction: AudienceDirection,
status: Option<AudienceStatus>,
) -> anyhow::Result<Vec<AudienceRecord>> {
let dir_str = match direction {
AudienceDirection::Inbound => "inbound",
AudienceDirection::Outbound => "outbound",
};
let (query, bind_status) = match status {
Some(s) => {
let s_str = match s {
AudienceStatus::Pending => "pending",
AudienceStatus::Approved => "approved",
AudienceStatus::Denied => "denied",
};
(
"SELECT node_id, direction, status, requested_at, approved_at FROM audience WHERE direction = ?1 AND status = ?2",
Some(s_str),
)
}
None => (
"SELECT node_id, direction, status, requested_at, approved_at FROM audience WHERE direction = ?1",
None,
),
};
let mut records = Vec::new();
if let Some(status_str) = bind_status {
let mut stmt = self.conn.prepare(query)?;
let mut rows = stmt.query(params![dir_str, status_str])?;
while let Some(row) = rows.next()? {
records.push(row_to_audience_record(row)?);
}
} else {
let mut stmt = self.conn.prepare(query)?;
let mut rows = stmt.query(params![dir_str])?;
while let Some(row) = rows.next()? {
records.push(row_to_audience_record(row)?);
}
}
Ok(records)
}
/// Get approved inbound audience members (nodes we push posts to).
pub fn list_audience_members(&self) -> anyhow::Result<Vec<NodeId>> {
let records = self.list_audience(
AudienceDirection::Inbound,
Some(AudienceStatus::Approved),
)?;
Ok(records.into_iter().map(|r| r.node_id).collect())
}
/// Remove an audience relationship.
pub fn remove_audience(
&self,
node_id: &NodeId,
direction: AudienceDirection,
) -> anyhow::Result<()> {
let dir_str = match direction {
AudienceDirection::Inbound => "inbound",
AudienceDirection::Outbound => "outbound",
};
self.conn.execute(
"DELETE FROM audience WHERE node_id = ?1 AND direction = ?2",
params![node_id.as_slice(), dir_str],
)?;
Ok(())
}
// ---- Reach: N2/N3 ----
/// Replace a peer's entire N1 set in reachable_n2 (their N1 share → our N2).
pub fn set_peer_n1(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"DELETE FROM reachable_n2 WHERE reporter_node_id = ?1",
params![reporter.as_slice()],
)?;
let mut stmt = self.conn.prepare(
"INSERT OR REPLACE INTO reachable_n2 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)",
)?;
for nid in node_ids {
stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?;
}
Ok(())
}
/// Add NodeIds to a peer's N1 set in reachable_n2.
pub fn add_peer_n1(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> {
let now = now_ms();
let mut stmt = self.conn.prepare(
"INSERT OR REPLACE INTO reachable_n2 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)",
)?;
for nid in node_ids {
stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?;
}
Ok(())
}
/// Remove NodeIds from a peer's N1 set in reachable_n2.
pub fn remove_peer_n1(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> {
let mut stmt = self.conn.prepare(
"DELETE FROM reachable_n2 WHERE reporter_node_id = ?1 AND reachable_node_id = ?2",
)?;
for nid in node_ids {
stmt.execute(params![reporter.as_slice(), nid.as_slice()])?;
}
Ok(())
}
/// Remove all N2 entries from a specific reporter (on disconnect).
pub fn clear_peer_n2(&self, reporter: &NodeId) -> anyhow::Result<usize> {
let deleted = self.conn.execute(
"DELETE FROM reachable_n2 WHERE reporter_node_id = ?1",
params![reporter.as_slice()],
)?;
Ok(deleted)
}
/// Replace a peer's N2-reported entries in reachable_n3 (their N2 share → our N3).
pub fn set_peer_n2(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"DELETE FROM reachable_n3 WHERE reporter_node_id = ?1",
params![reporter.as_slice()],
)?;
let mut stmt = self.conn.prepare(
"INSERT OR REPLACE INTO reachable_n3 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)",
)?;
for nid in node_ids {
stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?;
}
Ok(())
}
/// Add to N3 from a peer's N2 changes.
pub fn add_peer_n2(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> {
let now = now_ms();
let mut stmt = self.conn.prepare(
"INSERT OR REPLACE INTO reachable_n3 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)",
)?;
for nid in node_ids {
stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?;
}
Ok(())
}
/// Remove from N3.
pub fn remove_peer_n2(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> {
let mut stmt = self.conn.prepare(
"DELETE FROM reachable_n3 WHERE reporter_node_id = ?1 AND reachable_node_id = ?2",
)?;
for nid in node_ids {
stmt.execute(params![reporter.as_slice(), nid.as_slice()])?;
}
Ok(())
}
/// Remove all N3 entries from a specific reporter.
pub fn clear_peer_n3(&self, reporter: &NodeId) -> anyhow::Result<usize> {
let deleted = self.conn.execute(
"DELETE FROM reachable_n3 WHERE reporter_node_id = ?1",
params![reporter.as_slice()],
)?;
Ok(deleted)
}
/// Which reporters have this node in N2?
pub fn find_in_n2(&self, node_id: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT reporter_node_id FROM reachable_n2 WHERE reachable_node_id = ?1",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![node_id.as_slice()])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Which reporters have this node in N3?
pub fn find_in_n3(&self, node_id: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT reporter_node_id FROM reachable_n3 WHERE reachable_node_id = ?1",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![node_id.as_slice()])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Batch lookup: find any of the given node IDs in N2 or N3.
/// Returns Vec<(target, reporter, level)> where level is 2 or 3, sorted by level ASC.
pub fn find_any_in_n2_n3(&self, ids: &[NodeId]) -> anyhow::Result<Vec<(NodeId, NodeId, u8)>> {
if ids.is_empty() {
return Ok(vec![]);
}
let mut results = Vec::new();
// Check N2 first (closer)
for id in ids {
let reporters = self.find_in_n2(id)?;
for r in reporters {
results.push((*id, r, 2u8));
}
}
// Then N3
for id in ids {
let reporters = self.find_in_n3(id)?;
for r in reporters {
results.push((*id, r, 3u8));
}
}
results.sort_by_key(|&(_, _, level)| level);
Ok(results)
}
/// All NodeIds this peer can reach (from N2 table).
pub fn list_n2_for_reporter(&self, reporter: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT reachable_node_id FROM reachable_n2 WHERE reporter_node_id = ?1",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![reporter.as_slice()])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Build N1 share: merge mesh peers (connections) + social contacts NodeIds (deduplicated).
pub fn build_n1_share(&self) -> anyhow::Result<Vec<NodeId>> {
let mut ids = std::collections::HashSet::new();
// Add mesh peers (connections)
let mesh_peers = self.list_mesh_peers()?;
for (nid, _, _) in mesh_peers {
ids.insert(nid);
}
// Add social routes
let routes = self.list_social_routes()?;
for route in routes {
ids.insert(route.node_id);
}
Ok(ids.into_iter().collect())
}
/// Build N2 share (reach): deduplicated unique NodeIds from all N2 entries.
pub fn build_n2_share(&self) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT DISTINCT reachable_node_id FROM reachable_n2",
)?;
let mut result = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Count distinct reachable NodeIds in the N2 table.
pub fn count_distinct_n2(&self) -> anyhow::Result<usize> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(DISTINCT reachable_node_id) FROM reachable_n2",
[],
|row| row.get(0),
)?;
Ok(count as usize)
}
/// Count distinct reachable NodeIds in the N3 table.
pub fn count_distinct_n3(&self) -> anyhow::Result<usize> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(DISTINCT reachable_node_id) FROM reachable_n3",
[],
|row| row.get(0),
)?;
Ok(count as usize)
}
/// List distinct reachable NodeIds in the N3 table.
pub fn list_distinct_n3(&self) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT DISTINCT reachable_node_id FROM reachable_n3",
)?;
let mut result = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Diversity score: how many unique NodeIds does this reporter contribute
/// that no other reporter provides?
pub fn count_unique_n2_for_reporter(
&self,
reporter: &NodeId,
exclude_reporters: &[NodeId],
) -> anyhow::Result<usize> {
// Get this reporter's N2 set
let reporter_set: std::collections::HashSet<NodeId> =
self.list_n2_for_reporter(reporter)?.into_iter().collect();
if reporter_set.is_empty() {
return Ok(0);
}
// Get all other reporters' N2 sets (excluding specified reporters)
let exclude_set: std::collections::HashSet<NodeId> =
exclude_reporters.iter().copied().collect();
let mut other_nodes = std::collections::HashSet::new();
let mut stmt = self.conn.prepare(
"SELECT reachable_node_id FROM reachable_n2 WHERE reporter_node_id != ?1",
)?;
let mut rows = stmt.query(params![reporter.as_slice()])?;
while let Some(row) = rows.next()? {
let rn: Vec<u8> = row.get(0)?;
// Check if the reporter of this entry is excluded
// (simplified: we just exclude the reporter itself)
if let Ok(nid) = blob_to_nodeid(rn) {
other_nodes.insert(nid);
}
}
let unique = reporter_set.difference(&other_nodes).count();
let _ = exclude_set; // used for future filtering
Ok(unique)
}
/// Remove stale N2/N3 entries.
/// Clear ALL N2/N3 entries (startup sweep after unclean shutdown).
pub fn clear_all_n2_n3(&self) -> anyhow::Result<usize> {
let d1 = self.conn.execute("DELETE FROM reachable_n2", [])?;
let d2 = self.conn.execute("DELETE FROM reachable_n3", [])?;
Ok(d1 + d2)
}
/// Clear ALL mesh_peers entries (no connections exist at startup).
pub fn clear_all_mesh_peers(&self) -> anyhow::Result<usize> {
let deleted = self.conn.execute("DELETE FROM mesh_peers", [])?;
Ok(deleted)
}
pub fn prune_n2_n3(&self, max_age_ms: u64) -> anyhow::Result<usize> {
let cutoff = now_ms() - max_age_ms as i64;
let d1 = self.conn.execute(
"DELETE FROM reachable_n2 WHERE updated_at < ?1",
params![cutoff],
)?;
let d2 = self.conn.execute(
"DELETE FROM reachable_n3 WHERE updated_at < ?1",
params![cutoff],
)?;
Ok(d1 + d2)
}
/// Score all N2 candidates for growth loop diversity selection.
/// Returns (node_id, reporter_count, in_n3) for each unique N2 candidate.
/// Lower reporter_count = more unique neighborhood = higher diversity value.
pub fn score_n2_candidates_batch(&self) -> anyhow::Result<Vec<(NodeId, usize, bool)>> {
let mut stmt = self.conn.prepare(
"SELECT n2.reachable_node_id,
COUNT(DISTINCT n2.reporter_node_id) as reporter_count,
CASE WHEN n3.reachable_node_id IS NOT NULL THEN 1 ELSE 0 END as in_n3
FROM reachable_n2 n2
LEFT JOIN (SELECT DISTINCT reachable_node_id FROM reachable_n3) n3
ON n2.reachable_node_id = n3.reachable_node_id
GROUP BY n2.reachable_node_id",
)?;
let mut results = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
let nid = blob_to_nodeid(row.get(0)?)?;
let reporter_count: usize = row.get::<_, i64>(1)? as usize;
let in_n3: bool = row.get::<_, i64>(2)? != 0;
results.push((nid, reporter_count, in_n3));
}
Ok(results)
}
/// Get a peer's recent_peers from their stored profile.
pub fn get_recent_peers(&self, node_id: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let result: Option<String> = self.conn.query_row(
"SELECT recent_peers FROM profiles WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
).ok();
Ok(result.map(|j| parse_anchors_json(&j)).unwrap_or_default())
}
// ---- Mesh Peers ----
/// Add a mesh peer connection record.
pub fn add_mesh_peer(
&self,
node_id: &NodeId,
slot_kind: PeerSlotKind,
priority: i32,
) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT INTO mesh_peers (node_id, slot_kind, priority, connected_at, last_diff_seq)
VALUES (?1, ?2, ?3, ?4, 0)
ON CONFLICT(node_id) DO UPDATE SET slot_kind = ?2, priority = ?3, connected_at = ?4",
params![
node_id.as_slice(),
slot_kind.to_string(),
priority,
now,
],
)?;
Ok(())
}
/// Remove a mesh peer.
pub fn remove_mesh_peer(&self, node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM mesh_peers WHERE node_id = ?1",
params![node_id.as_slice()],
)?;
Ok(())
}
/// List all mesh peers: (node_id, slot_kind_str, priority).
pub fn list_mesh_peers(&self) -> anyhow::Result<Vec<(NodeId, String, i32)>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, slot_kind, priority FROM mesh_peers ORDER BY connected_at DESC",
)?;
let mut result = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
let node_id = blob_to_nodeid(row.get(0)?)?;
let slot_kind: String = row.get(1)?;
let priority: i32 = row.get(2)?;
result.push((node_id, slot_kind, priority));
}
Ok(result)
}
/// Count mesh peers of a given slot kind.
pub fn count_mesh_peers_by_kind(&self, slot_kind: PeerSlotKind) -> anyhow::Result<usize> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM mesh_peers WHERE slot_kind = ?1",
params![slot_kind.to_string()],
|row| row.get(0),
)?;
Ok(count as usize)
}
/// Update last_diff_seq for a mesh peer.
pub fn update_mesh_peer_seq(&self, node_id: &NodeId, seq: u64) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE mesh_peers SET last_diff_seq = ?1 WHERE node_id = ?2",
params![seq as i64, node_id.as_slice()],
)?;
Ok(())
}
// ---- Preferred Peers ----
/// Add a bilateral preferred peer agreement.
pub fn add_preferred_peer(&self, node_id: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT OR REPLACE INTO preferred_peers (node_id, agreed_at) VALUES (?1, ?2)",
params![node_id.as_slice(), now],
)?;
Ok(())
}
/// Remove a preferred peer agreement.
pub fn remove_preferred_peer(&self, node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM preferred_peers WHERE node_id = ?1",
params![node_id.as_slice()],
)?;
Ok(())
}
/// List all preferred peers.
pub fn list_preferred_peers(&self) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT node_id FROM preferred_peers ORDER BY agreed_at DESC",
)?;
let mut result = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Check if a peer is a preferred peer.
pub fn is_preferred_peer(&self, node_id: &NodeId) -> anyhow::Result<bool> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM preferred_peers WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
)?;
Ok(count > 0)
}
/// Count preferred peers.
pub fn count_preferred_peers(&self) -> anyhow::Result<usize> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM preferred_peers",
[],
|row| row.get(0),
)?;
Ok(count as usize)
}
// ---- Preferred Tree ----
/// Build 2-layer preferred peer tree from stored profiles.
/// Layer 0: target. Layer 1: target's preferred_peers. Layer 2: each L1 peer's preferred_peers.
/// Returns ~100 unique NodeIds.
pub fn build_preferred_tree_for(&self, target: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut tree = std::collections::HashSet::new();
// Layer 0: target itself
tree.insert(*target);
// Layer 1: target's preferred peers from their profile
let l1_peers = match self.get_profile(target)? {
Some(profile) => profile.preferred_peers,
None => return Ok(tree.into_iter().collect()),
};
for pp in &l1_peers {
tree.insert(*pp);
}
// Layer 2: each L1 peer's preferred peers
for pp in &l1_peers {
if let Some(profile) = self.get_profile(pp)? {
for pp2 in &profile.preferred_peers {
tree.insert(*pp2);
}
}
}
Ok(tree.into_iter().collect())
}
/// Update the preferred_tree JSON for a social route.
pub fn update_social_route_preferred_tree(&self, node_id: &NodeId, tree: &[NodeId]) -> anyhow::Result<()> {
let json = serde_json::to_string(
&tree.iter().map(hex::encode).collect::<Vec<_>>()
)?;
self.conn.execute(
"UPDATE social_routes SET preferred_tree = ?1 WHERE node_id = ?2",
params![json, node_id.as_slice()],
)?;
Ok(())
}
/// Update the preferred_tree JSON for a blob upstream entry.
pub fn update_blob_upstream_preferred_tree(&self, cid: &[u8; 32], tree: &[NodeId]) -> anyhow::Result<()> {
let json = serde_json::to_string(
&tree.iter().map(hex::encode).collect::<Vec<_>>()
)?;
self.conn.execute(
"UPDATE blob_upstream SET preferred_tree = ?1 WHERE cid = ?2",
params![json, cid.as_slice()],
)?;
Ok(())
}
/// Get the preferred_tree for a blob upstream entry.
pub fn get_blob_upstream_preferred_tree(&self, cid: &[u8; 32]) -> anyhow::Result<Vec<NodeId>> {
let json: String = self.conn.query_row(
"SELECT preferred_tree FROM blob_upstream WHERE cid = ?1",
params![cid.as_slice()],
|row| row.get(0),
).unwrap_or_else(|_| "[]".to_string());
Ok(parse_anchors_json(&json))
}
// ---- Social Routes ----
/// Insert or update a social route entry.
pub fn upsert_social_route(&self, entry: &SocialRouteEntry) -> anyhow::Result<()> {
let addrs_json = serde_json::to_string(
&entry.addresses.iter().map(|a| a.to_string()).collect::<Vec<_>>()
)?;
let peer_addrs_json = serde_json::to_string(&entry.peer_addresses)?;
let pref_tree_json = serde_json::to_string(
&entry.preferred_tree.iter().map(hex::encode).collect::<Vec<_>>()
)?;
self.conn.execute(
"INSERT INTO social_routes (node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
ON CONFLICT(node_id) DO UPDATE SET
addresses = ?2, peer_addresses = ?3, relation = ?4, status = ?5,
last_connected_ms = MAX(social_routes.last_connected_ms, ?6),
last_seen_ms = MAX(social_routes.last_seen_ms, ?7),
reach_method = ?8, preferred_tree = ?9",
params![
entry.node_id.as_slice(),
addrs_json,
peer_addrs_json,
entry.relation.to_string(),
entry.status.to_string(),
entry.last_connected_ms as i64,
entry.last_seen_ms as i64,
entry.reach_method.to_string(),
pref_tree_json,
],
)?;
Ok(())
}
/// Get a single social route entry.
pub fn get_social_route(&self, node_id: &NodeId) -> anyhow::Result<Option<SocialRouteEntry>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree
FROM social_routes WHERE node_id = ?1",
)?;
let mut rows = stmt.query(params![node_id.as_slice()])?;
if let Some(row) = rows.next()? {
Ok(Some(row_to_social_route(row)?))
} else {
Ok(None)
}
}
/// Remove a social route entry.
pub fn remove_social_route(&self, node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM social_routes WHERE node_id = ?1",
params![node_id.as_slice()],
)?;
Ok(())
}
/// Update address + last_connected + status=online for a social route.
pub fn touch_social_route_connect(
&self,
node_id: &NodeId,
addrs: &[std::net::SocketAddr],
method: ReachMethod,
) -> anyhow::Result<()> {
let now = now_ms();
let addrs_json = serde_json::to_string(
&addrs.iter().map(|a| a.to_string()).collect::<Vec<_>>()
)?;
self.conn.execute(
"UPDATE social_routes SET addresses = ?1, last_connected_ms = ?2, last_seen_ms = ?2, status = 'online', reach_method = ?3
WHERE node_id = ?4",
params![addrs_json, now, method.to_string(), node_id.as_slice()],
)?;
Ok(())
}
/// Update peer_addresses + last_seen for a social route.
pub fn update_social_route_peer_addrs(
&self,
node_id: &NodeId,
peer_addrs: &[PeerWithAddress],
) -> anyhow::Result<()> {
let now = now_ms();
let json = serde_json::to_string(peer_addrs)?;
self.conn.execute(
"UPDATE social_routes SET peer_addresses = ?1, last_seen_ms = ?2 WHERE node_id = ?3",
params![json, now, node_id.as_slice()],
)?;
Ok(())
}
/// Update just the address of a social route.
pub fn update_social_route_address(
&self,
node_id: &NodeId,
addr: &str,
) -> anyhow::Result<()> {
let now = now_ms();
let addrs_json = serde_json::to_string(&vec![addr])?;
self.conn.execute(
"UPDATE social_routes SET addresses = ?1, last_seen_ms = ?2 WHERE node_id = ?3",
params![addrs_json, now, node_id.as_slice()],
)?;
Ok(())
}
/// Mark a social route as online or disconnected.
pub fn set_social_route_status(&self, node_id: &NodeId, status: SocialStatus) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE social_routes SET status = ?1 WHERE node_id = ?2",
params![status.to_string(), node_id.as_slice()],
)?;
Ok(())
}
/// List all social routes, sorted by last_seen DESC.
pub fn list_social_routes(&self) -> anyhow::Result<Vec<SocialRouteEntry>> {
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree
FROM social_routes ORDER BY last_seen_ms DESC",
)?;
let mut entries = Vec::new();
let mut rows = stmt.query([])?;
while let Some(row) = rows.next()? {
entries.push(row_to_social_route(row)?);
}
Ok(entries)
}
/// List social routes with last_seen older than threshold.
pub fn list_stale_social_routes(&self, max_age_ms: u64) -> anyhow::Result<Vec<SocialRouteEntry>> {
let cutoff = now_ms() - max_age_ms as i64;
let mut stmt = self.conn.prepare(
"SELECT node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree
FROM social_routes WHERE last_seen_ms < ?1 ORDER BY last_seen_ms ASC",
)?;
let mut entries = Vec::new();
let mut rows = stmt.query(params![cutoff])?;
while let Some(row) = rows.next()? {
entries.push(row_to_social_route(row)?);
}
Ok(entries)
}
/// Check if a social route exists for a node.
pub fn has_social_route(&self, node_id: &NodeId) -> anyhow::Result<bool> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM social_routes WHERE node_id = ?1",
params![node_id.as_slice()],
|row| row.get(0),
)?;
Ok(count > 0)
}
/// Bulk-populate social_routes from follows + audience + peers.
/// Returns the number of routes created/updated.
pub fn rebuild_social_routes(&self) -> anyhow::Result<usize> {
let now = now_ms() as u64;
let mut count = 0;
// Collect follows
let follows: std::collections::HashSet<NodeId> =
self.list_follows()?.into_iter().collect();
// Collect approved audience members (inbound = they are in our audience)
let audience_members: std::collections::HashSet<NodeId> =
self.list_audience_members()?.into_iter().collect();
// Union of all social contacts
let mut all_contacts: std::collections::HashSet<NodeId> = std::collections::HashSet::new();
all_contacts.extend(&follows);
all_contacts.extend(&audience_members);
for nid in all_contacts {
let relation = match (follows.contains(&nid), audience_members.contains(&nid)) {
(true, true) => SocialRelation::Mutual,
(true, false) => SocialRelation::Follow,
(false, true) => SocialRelation::Audience,
(false, false) => continue,
};
// Look up addresses from peers table
let addresses: Vec<std::net::SocketAddr> = self
.get_peer_record(&nid)?
.map(|r| r.addresses)
.unwrap_or_default();
// Build peer_addresses from the contact's profile recent_peers
let peer_addresses = self.build_peer_addresses_for(&nid)?;
// Build preferred peer tree
let preferred_tree = self.build_preferred_tree_for(&nid).unwrap_or_default();
// Only insert if not already present (don't overwrite runtime state)
if !self.has_social_route(&nid)? {
self.upsert_social_route(&SocialRouteEntry {
node_id: nid,
addresses,
peer_addresses,
relation,
status: SocialStatus::Disconnected,
last_connected_ms: 0,
last_seen_ms: now,
reach_method: ReachMethod::Direct,
preferred_tree,
})?;
count += 1;
} else {
// Update the preferred tree for existing routes
self.update_social_route_preferred_tree(&nid, &preferred_tree)?;
}
}
Ok(count)
}
/// Build peer_addresses for a contact from their profile's recent_peers.
pub fn build_peer_addresses_for(&self, node_id: &NodeId) -> anyhow::Result<Vec<PeerWithAddress>> {
let recent_peers = self.get_recent_peers(node_id)?;
let mut result = Vec::new();
for rp in recent_peers.iter().take(10) {
let addrs: Vec<String> = self
.get_peer_record(rp)?
.map(|r| r.addresses.iter().map(|a| a.to_string()).collect())
.unwrap_or_default();
result.push(PeerWithAddress {
n: hex::encode(rp),
a: addrs,
});
}
Ok(result)
}
// ---- Reconnect Watchers ----
/// Register a watcher for a disconnected peer.
pub fn add_reconnect_watcher(&self, target: &NodeId, watcher: &NodeId) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT OR REPLACE INTO reconnect_watchers (target_node_id, watcher_node_id, added_at) VALUES (?1, ?2, ?3)",
params![target.as_slice(), watcher.as_slice(), now],
)?;
Ok(())
}
/// Get all watchers for a target.
pub fn get_reconnect_watchers(&self, target: &NodeId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT watcher_node_id FROM reconnect_watchers WHERE target_node_id = ?1",
)?;
let mut result = Vec::new();
let mut rows = stmt.query(params![target.as_slice()])?;
while let Some(row) = rows.next()? {
result.push(blob_to_nodeid(row.get(0)?)?);
}
Ok(result)
}
/// Remove all watchers for a target (after notifying).
pub fn clear_reconnect_watchers(&self, target: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM reconnect_watchers WHERE target_node_id = ?1",
params![target.as_slice()],
)?;
Ok(())
}
/// Remove watchers older than max_age_ms.
pub fn prune_stale_watchers(&self, max_age_ms: i64) -> anyhow::Result<usize> {
let cutoff = now_ms() - max_age_ms;
let deleted = self.conn.execute(
"DELETE FROM reconnect_watchers WHERE added_at < ?1",
params![cutoff],
)?;
Ok(deleted)
}
// ---- Stats ----
// ---- Blobs ----
/// Record blob metadata. INSERT OR IGNORE (idempotent).
pub fn record_blob(
&self,
cid: &[u8; 32],
post_id: &PostId,
author: &NodeId,
size_bytes: u64,
mime_type: &str,
created_at: u64,
) -> anyhow::Result<()> {
let now = now_ms();
self.conn.execute(
"INSERT OR IGNORE INTO blobs (cid, post_id, author, size_bytes, mime_type, created_at, stored_at, last_accessed_at, pinned)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?7, 0)",
params![
cid.as_slice(),
post_id.as_slice(),
author.as_slice(),
size_bytes as i64,
mime_type,
created_at as i64,
now,
],
)?;
Ok(())
}
/// Check if blob metadata exists.
pub fn has_blob(&self, cid: &[u8; 32]) -> bool {
self.conn
.query_row(
"SELECT 1 FROM blobs WHERE cid = ?1",
params![cid.as_slice()],
|_| Ok(()),
)
.is_ok()
}
/// Remove blob metadata (for future eviction).
pub fn remove_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute("DELETE FROM blobs WHERE cid = ?1", params![cid.as_slice()])?;
Ok(())
}
/// Update last_accessed_at for a blob (enables future LRU eviction).
pub fn touch_blob_access(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE blobs SET last_accessed_at = ?1 WHERE cid = ?2",
params![now_ms(), cid.as_slice()],
)?;
Ok(())
}
/// Get the post_id associated with a blob.
pub fn get_blob_post_id(&self, cid: &[u8; 32]) -> anyhow::Result<Option<PostId>> {
let result = self.conn.query_row(
"SELECT post_id FROM blobs WHERE cid = ?1",
params![cid.as_slice()],
|row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
},
);
match result {
Ok(bytes) => {
let pid: PostId = bytes.try_into().map_err(|_| anyhow::anyhow!("invalid post_id in blobs"))?;
Ok(Some(pid))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
/// Search post attachments JSON for a blob CID. Returns (mime_type, author_node_id).
/// Fallback for when the blobs table doesn't have an entry (e.g. posts stored via PostFetch).
pub fn find_blob_in_post_attachments(&self, blob_id: &[u8; 32]) -> anyhow::Result<Option<(String, NodeId)>> {
// Attachment.cid is [u8; 32], serde serializes as JSON array of numbers e.g. [37,147,227,240,...]
// Build a LIKE pattern from the first 8 bytes to narrow the search
let byte_pattern: String = blob_id[..8].iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(",");
let pattern = format!("%{}%", byte_pattern);
let mut stmt = self.conn.prepare(
"SELECT author, attachments, visibility FROM posts WHERE attachments LIKE ?1 LIMIT 10"
)?;
let mut rows = stmt.query(params![pattern])?;
while let Some(row) = rows.next()? {
let author_bytes: Vec<u8> = row.get(0)?;
let att_json: String = row.get(1)?;
let vis_json: String = row.get(2)?;
let visibility: PostVisibility = match serde_json::from_str(&vis_json) {
Ok(v) => v,
Err(_) => continue,
};
if !matches!(visibility, PostVisibility::Public) {
continue;
}
let attachments: Vec<crate::types::Attachment> = match serde_json::from_str(&att_json) {
Ok(a) => a,
Err(_) => continue,
};
for att in &attachments {
if att.cid == *blob_id {
if let Ok(author) = author_bytes.clone().try_into() {
return Ok(Some((att.mime_type.clone(), author)));
}
}
}
}
Ok(None)
}
/// Delete all blob metadata for a post, returning the CIDs for filesystem cleanup.
pub fn delete_blobs_for_post(&self, post_id: &PostId) -> anyhow::Result<Vec<[u8; 32]>> {
let mut stmt = self.conn.prepare(
"SELECT cid FROM blobs WHERE post_id = ?1"
)?;
let cids: Vec<[u8; 32]> = stmt.query_map(params![post_id.as_slice()], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?
.filter_map(|r| r.ok())
.filter_map(|bytes| bytes.try_into().ok())
.collect();
self.conn.execute(
"DELETE FROM blobs WHERE post_id = ?1",
params![post_id.as_slice()],
)?;
Ok(cids)
}
/// Total blob storage in bytes (for future quota).
pub fn total_blob_bytes(&self) -> anyhow::Result<u64> {
let total: i64 = self.conn.query_row(
"SELECT COALESCE(SUM(size_bytes), 0) FROM blobs",
[],
|row| row.get(0),
)?;
Ok(total as u64)
}
/// Pin a blob (prevents eviction priority from being too low).
pub fn pin_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE blobs SET pinned = 1 WHERE cid = ?1",
params![cid.as_slice()],
)?;
Ok(())
}
/// Unpin a blob.
pub fn unpin_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute(
"UPDATE blobs SET pinned = 0 WHERE cid = ?1",
params![cid.as_slice()],
)?;
Ok(())
}
/// Check if a blob is pinned.
pub fn is_blob_pinned(&self, cid: &[u8; 32]) -> bool {
self.conn
.query_row(
"SELECT pinned FROM blobs WHERE cid = ?1",
params![cid.as_slice()],
|row| row.get::<_, i64>(0),
)
.map(|v| v != 0)
.unwrap_or(false)
}
/// Get eviction candidates with replica counts.
/// Returns blob metadata + peer_copies from post_replicas (stale within staleness_ms).
pub fn get_eviction_candidates(&self, staleness_ms: u64) -> anyhow::Result<Vec<EvictionCandidate>> {
let cutoff = now_ms() - staleness_ms as i64;
let mut stmt = self.conn.prepare(
"SELECT b.cid, b.post_id, b.author, b.size_bytes, b.created_at,
b.last_accessed_at, b.pinned,
COALESCE(r.copies, 0) as peer_copies
FROM blobs b
LEFT JOIN (
SELECT post_id, COUNT(*) as copies
FROM post_replicas
WHERE last_confirmed_ms >= ?1
GROUP BY post_id
) r ON b.post_id = r.post_id"
)?;
let rows = stmt.query_map(params![cutoff], |row| {
let cid_bytes: Vec<u8> = row.get(0)?;
let post_id_bytes: Vec<u8> = row.get(1)?;
let author_bytes: Vec<u8> = row.get(2)?;
let size_bytes = row.get::<_, i64>(3)? as u64;
let created_at = row.get::<_, i64>(4)? as u64;
let last_accessed_at = row.get::<_, i64>(5)? as u64;
let pinned = row.get::<_, i64>(6)? != 0;
let peer_copies = row.get::<_, i64>(7)? as u32;
Ok((cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies))
})?;
let mut result = Vec::new();
for row in rows {
let (cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies) = row?;
let cid: [u8; 32] = match cid_bytes.try_into() {
Ok(c) => c,
Err(_) => continue,
};
let post_id: PostId = match post_id_bytes.try_into() {
Ok(p) => p,
Err(_) => continue,
};
let author: NodeId = match author_bytes.try_into() {
Ok(a) => a,
Err(_) => continue,
};
result.push(EvictionCandidate {
cid,
post_id,
author,
size_bytes,
created_at,
last_accessed_at,
pinned,
peer_copies,
});
}
Ok(result)
}
/// Clean up all CDN metadata for a blob (manifests + upstream + downstream).
pub fn cleanup_cdn_for_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute("DELETE FROM cdn_manifests WHERE cid = ?1", params![cid.as_slice()])?;
self.conn.execute("DELETE FROM blob_upstream WHERE cid = ?1", params![cid.as_slice()])?;
self.conn.execute("DELETE FROM blob_downstream WHERE cid = ?1", params![cid.as_slice()])?;
Ok(())
}
/// Get all blob CIDs for a post (without deleting them).
pub fn get_blobs_for_post(&self, post_id: &PostId) -> anyhow::Result<Vec<[u8; 32]>> {
let mut stmt = self.conn.prepare(
"SELECT cid FROM blobs WHERE post_id = ?1"
)?;
let cids: Vec<[u8; 32]> = stmt.query_map(params![post_id.as_slice()], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?
.filter_map(|r| r.ok())
.filter_map(|bytes| bytes.try_into().ok())
.collect();
Ok(cids)
}
/// Remove upstream tracking for a blob CID.
pub fn remove_blob_upstream(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
self.conn.execute("DELETE FROM blob_upstream WHERE cid = ?1", params![cid.as_slice()])?;
Ok(())
}
pub fn post_count(&self) -> anyhow::Result<usize> {
let count: i64 = self
.conn
.query_row("SELECT COUNT(*) FROM posts", [], |row| row.get(0))?;
Ok(count as usize)
}
// ---- CDN Manifests ----
/// Store or update a CDN manifest for a blob CID.
pub fn store_cdn_manifest(
&self,
cid: &[u8; 32],
manifest_json: &str,
author: &NodeId,
updated_at: u64,
) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO cdn_manifests (cid, manifest_json, author, updated_at) VALUES (?1, ?2, ?3, ?4)
ON CONFLICT(cid) DO UPDATE SET manifest_json = ?2, updated_at = ?4",
params![cid.as_slice(), manifest_json, author.as_slice(), updated_at as i64],
)?;
Ok(())
}
/// Get the raw manifest JSON for a blob CID.
pub fn get_cdn_manifest(&self, cid: &[u8; 32]) -> anyhow::Result<Option<String>> {
let result = self.conn.query_row(
"SELECT manifest_json FROM cdn_manifests WHERE cid = ?1",
params![cid.as_slice()],
|row| row.get::<_, String>(0),
);
match result {
Ok(json) => Ok(Some(json)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
/// Get all manifests for blobs by a specific author: (cid, manifest_json).
pub fn get_manifests_for_author_blobs(
&self,
author: &NodeId,
) -> anyhow::Result<Vec<([u8; 32], String)>> {
let mut stmt = self.conn.prepare(
"SELECT cid, manifest_json FROM cdn_manifests WHERE author = ?1"
)?;
let rows = stmt.query_map(params![author.as_slice()], |row| {
let cid_bytes: Vec<u8> = row.get(0)?;
let json: String = row.get(1)?;
Ok((cid_bytes, json))
})?;
let mut result = Vec::new();
for row in rows {
let (cid_bytes, json) = row?;
let cid: [u8; 32] = cid_bytes.try_into()
.map_err(|_| anyhow::anyhow!("invalid cid in cdn_manifests"))?;
result.push((cid, json));
}
Ok(result)
}
/// Record the upstream source for a blob CID.
pub fn store_blob_upstream(
&self,
cid: &[u8; 32],
source_node_id: &NodeId,
source_addresses: &[String],
) -> anyhow::Result<()> {
let addrs_json = serde_json::to_string(source_addresses)?;
self.conn.execute(
"INSERT INTO blob_upstream (cid, source_node_id, source_addresses, stored_at) VALUES (?1, ?2, ?3, ?4)
ON CONFLICT(cid) DO UPDATE SET source_node_id = ?2, source_addresses = ?3, stored_at = ?4",
params![cid.as_slice(), source_node_id.as_slice(), addrs_json, now_ms()],
)?;
Ok(())
}
/// Get the upstream source for a blob CID: (node_id, addresses).
pub fn get_blob_upstream(&self, cid: &[u8; 32]) -> anyhow::Result<Option<(NodeId, Vec<String>)>> {
let result = self.conn.query_row(
"SELECT source_node_id, source_addresses FROM blob_upstream WHERE cid = ?1",
params![cid.as_slice()],
|row| {
let nid_bytes: Vec<u8> = row.get(0)?;
let addrs_json: String = row.get(1)?;
Ok((nid_bytes, addrs_json))
},
);
match result {
Ok((nid_bytes, addrs_json)) => {
let nid = blob_to_nodeid(nid_bytes)?;
let addrs: Vec<String> = serde_json::from_str(&addrs_json).unwrap_or_default();
Ok(Some((nid, addrs)))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
/// Register a downstream peer for a blob CID. Returns false if already at 100 downstream.
pub fn add_blob_downstream(
&self,
cid: &[u8; 32],
peer_node_id: &NodeId,
peer_addresses: &[String],
) -> anyhow::Result<bool> {
let count = self.get_blob_downstream_count(cid)?;
if count >= 100 {
return Ok(false);
}
let addrs_json = serde_json::to_string(peer_addresses)?;
self.conn.execute(
"INSERT INTO blob_downstream (cid, peer_node_id, peer_addresses, registered_at) VALUES (?1, ?2, ?3, ?4)
ON CONFLICT(cid, peer_node_id) DO UPDATE SET peer_addresses = ?3, registered_at = ?4",
params![cid.as_slice(), peer_node_id.as_slice(), addrs_json, now_ms()],
)?;
Ok(true)
}
/// Get all downstream peers for a blob CID: Vec<(node_id, addresses)>.
pub fn get_blob_downstream(&self, cid: &[u8; 32]) -> anyhow::Result<Vec<(NodeId, Vec<String>)>> {
let mut stmt = self.conn.prepare(
"SELECT peer_node_id, peer_addresses FROM blob_downstream WHERE cid = ?1"
)?;
let rows = stmt.query_map(params![cid.as_slice()], |row| {
let nid_bytes: Vec<u8> = row.get(0)?;
let addrs_json: String = row.get(1)?;
Ok((nid_bytes, addrs_json))
})?;
let mut result = Vec::new();
for row in rows {
let (nid_bytes, addrs_json) = row?;
let nid = blob_to_nodeid(nid_bytes)?;
let addrs: Vec<String> = serde_json::from_str(&addrs_json).unwrap_or_default();
result.push((nid, addrs));
}
Ok(result)
}
/// Count downstream peers for a blob CID.
pub fn get_blob_downstream_count(&self, cid: &[u8; 32]) -> anyhow::Result<u32> {
let count: i64 = self.conn.query_row(
"SELECT COUNT(*) FROM blob_downstream WHERE cid = ?1",
params![cid.as_slice()],
|row| row.get(0),
)?;
Ok(count as u32)
}
/// Remove a downstream peer for a blob CID.
pub fn remove_blob_downstream(&self, cid: &[u8; 32], peer_node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM blob_downstream WHERE cid = ?1 AND peer_node_id = ?2",
params![cid.as_slice(), peer_node_id.as_slice()],
)?;
Ok(())
}
/// Get manifests older than a cutoff: Vec<(cid, upstream_node_id, upstream_addresses)>.
pub fn get_stale_manifests(&self, older_than_ms: u64) -> anyhow::Result<Vec<([u8; 32], NodeId, Vec<String>)>> {
let mut stmt = self.conn.prepare(
"SELECT m.cid, u.source_node_id, u.source_addresses
FROM cdn_manifests m
LEFT JOIN blob_upstream u ON m.cid = u.cid
WHERE m.updated_at < ?1"
)?;
let rows = stmt.query_map(params![older_than_ms as i64], |row| {
let cid_bytes: Vec<u8> = row.get(0)?;
let nid_bytes: Option<Vec<u8>> = row.get(1)?;
let addrs_json: Option<String> = row.get(2)?;
Ok((cid_bytes, nid_bytes, addrs_json))
})?;
let mut result = Vec::new();
for row in rows {
let (cid_bytes, nid_bytes, addrs_json) = row?;
let cid: [u8; 32] = match cid_bytes.try_into() {
Ok(c) => c,
Err(_) => continue,
};
let nid = match nid_bytes {
Some(b) => match blob_to_nodeid(b) {
Ok(n) => n,
Err(_) => continue,
},
None => continue,
};
let addrs: Vec<String> = addrs_json
.map(|j| serde_json::from_str(&j).unwrap_or_default())
.unwrap_or_default();
result.push((cid, nid, addrs));
}
Ok(result)
}
/// Get the 10 posts before and 10 posts after a reference timestamp for an author.
/// Returns (previous, following) ManifestEntry vectors.
pub fn get_author_post_neighborhood(
&self,
author: &NodeId,
ref_timestamp_ms: u64,
count: usize,
) -> anyhow::Result<(Vec<ManifestEntry>, Vec<ManifestEntry>)> {
// Previous posts: timestamp < ref, descending, take `count`
let mut prev_stmt = self.conn.prepare(
"SELECT id, timestamp_ms, attachments FROM posts
WHERE author = ?1 AND timestamp_ms < ?2
ORDER BY timestamp_ms DESC LIMIT ?3"
)?;
let prev_rows = prev_stmt.query_map(
params![author.as_slice(), ref_timestamp_ms as i64, count as i64],
|row| {
let id_bytes: Vec<u8> = row.get(0)?;
let ts: i64 = row.get(1)?;
let att_json: String = row.get(2)?;
Ok((id_bytes, ts, att_json))
},
)?;
let mut previous = Vec::new();
for row in prev_rows {
let (id_bytes, ts, att_json) = row?;
let post_id = blob_to_postid(id_bytes)?;
let attachments: Vec<Attachment> = serde_json::from_str(&att_json).unwrap_or_default();
previous.push(ManifestEntry {
post_id,
timestamp_ms: ts as u64,
has_attachments: !attachments.is_empty(),
});
}
// Following posts: timestamp > ref, ascending, take `count`
let mut next_stmt = self.conn.prepare(
"SELECT id, timestamp_ms, attachments FROM posts
WHERE author = ?1 AND timestamp_ms > ?2
ORDER BY timestamp_ms ASC LIMIT ?3"
)?;
let next_rows = next_stmt.query_map(
params![author.as_slice(), ref_timestamp_ms as i64, count as i64],
|row| {
let id_bytes: Vec<u8> = row.get(0)?;
let ts: i64 = row.get(1)?;
let att_json: String = row.get(2)?;
Ok((id_bytes, ts, att_json))
},
)?;
let mut following = Vec::new();
for row in next_rows {
let (id_bytes, ts, att_json) = row?;
let post_id = blob_to_postid(id_bytes)?;
let attachments: Vec<Attachment> = serde_json::from_str(&att_json).unwrap_or_default();
following.push(ManifestEntry {
post_id,
timestamp_ms: ts as u64,
has_attachments: !attachments.is_empty(),
});
}
Ok((previous, following))
}
/// Get mesh peers and N2 peers known to have an author's posts (from post_replicas overlap).
/// Used by the lateral fetch cascade step.
pub fn get_lateral_blob_sources(&self, author: &NodeId, post_id: &PostId) -> anyhow::Result<Vec<NodeId>> {
// Find peers who have replicas of any post by this author, prioritizing those
// who have this specific post, then any other posts by the same author.
// Cross-reference with mesh_peers and reachable_n2 for reachability.
let mut stmt = self.conn.prepare(
"SELECT DISTINCT pr.node_id FROM post_replicas pr
INNER JOIN posts p ON pr.post_id = p.id
WHERE p.author = ?1
AND (
pr.node_id IN (SELECT node_id FROM mesh_peers)
OR pr.node_id IN (SELECT reachable_node_id FROM reachable_n2)
)
ORDER BY CASE WHEN pr.post_id = ?2 THEN 0 ELSE 1 END,
pr.last_confirmed_ms DESC
LIMIT 10"
)?;
let rows = stmt.query_map(params![author.as_slice(), post_id.as_slice()], |row| {
let bytes: Vec<u8> = row.get(0)?;
Ok(bytes)
})?;
let mut result = Vec::new();
for row in rows {
if let Ok(nid) = blob_to_nodeid(row?) {
result.push(nid);
}
}
Ok(result)
}
// --- Engagement: post_downstream ---
/// Register a peer as downstream for a post (max 100 per post).
/// Returns true if added, false if at capacity.
pub fn add_post_downstream(&self, post_id: &PostId, peer_node_id: &NodeId) -> anyhow::Result<bool> {
let count: i64 = self.conn.prepare(
"SELECT COUNT(*) FROM post_downstream WHERE post_id = ?1"
)?.query_row(params![post_id.as_slice()], |row| row.get(0))?;
if count >= 100 {
return Ok(false);
}
self.conn.execute(
"INSERT INTO post_downstream (post_id, peer_node_id, registered_at) VALUES (?1, ?2, ?3)
ON CONFLICT DO NOTHING",
params![post_id.as_slice(), peer_node_id.as_slice(), now_ms()],
)?;
Ok(true)
}
/// Get all downstream peers for a post.
pub fn get_post_downstream(&self, post_id: &PostId) -> anyhow::Result<Vec<NodeId>> {
let mut stmt = self.conn.prepare(
"SELECT peer_node_id FROM post_downstream WHERE post_id = ?1"
)?;
let rows = stmt.query_map(params![post_id.as_slice()], |row| row.get::<_, Vec<u8>>(0))?;
let mut result = Vec::new();
for row in rows {
if let Ok(nid) = blob_to_nodeid(row?) {
result.push(nid);
}
}
Ok(result)
}
/// Remove a downstream peer for a post.
pub fn remove_post_downstream(&self, post_id: &PostId, peer_node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM post_downstream WHERE post_id = ?1 AND peer_node_id = ?2",
params![post_id.as_slice(), peer_node_id.as_slice()],
)?;
Ok(())
}
// --- Engagement: post_upstream ---
/// Set the upstream peer for a post (who we got it from).
pub fn set_post_upstream(&self, post_id: &PostId, peer_node_id: &NodeId) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO post_upstream (post_id, peer_node_id) VALUES (?1, ?2)
ON CONFLICT(post_id) DO UPDATE SET peer_node_id = excluded.peer_node_id",
params![post_id.as_slice(), peer_node_id.as_slice()],
)?;
Ok(())
}
/// Get the upstream peer for a post.
pub fn get_post_upstream(&self, post_id: &PostId) -> anyhow::Result<Option<NodeId>> {
let result = self.conn.query_row(
"SELECT peer_node_id FROM post_upstream WHERE post_id = ?1",
params![post_id.as_slice()],
|row| row.get::<_, Vec<u8>>(0),
);
match result {
Ok(bytes) => Ok(bytes.try_into().ok()),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
/// Count downstream peers for a post.
pub fn get_post_downstream_count(&self, post_id: &PostId) -> anyhow::Result<u32> {
let count: i64 = self.conn.prepare(
"SELECT COUNT(*) FROM post_downstream WHERE post_id = ?1"
)?.query_row(params![post_id.as_slice()], |row| row.get(0))?;
Ok(count as u32)
}
// --- Engagement: reactions ---
/// Store a reaction (upsert by reactor+post_id+emoji).
pub fn store_reaction(&self, reaction: &Reaction) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO reactions (reactor, post_id, emoji, timestamp_ms, encrypted_payload)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(reactor, post_id, emoji) DO UPDATE SET
timestamp_ms = excluded.timestamp_ms,
encrypted_payload = excluded.encrypted_payload",
params![
reaction.reactor.as_slice(),
reaction.post_id.as_slice(),
reaction.emoji,
reaction.timestamp_ms as i64,
reaction.encrypted_payload,
],
)?;
Ok(())
}
/// Remove a reaction.
pub fn remove_reaction(&self, reactor: &NodeId, post_id: &PostId, emoji: &str) -> anyhow::Result<()> {
self.conn.execute(
"DELETE FROM reactions WHERE reactor = ?1 AND post_id = ?2 AND emoji = ?3",
params![reactor.as_slice(), post_id.as_slice(), emoji],
)?;
Ok(())
}
/// Get all reactions for a post.
pub fn get_reactions(&self, post_id: &PostId) -> anyhow::Result<Vec<Reaction>> {
let mut stmt = self.conn.prepare(
"SELECT reactor, post_id, emoji, timestamp_ms, encrypted_payload
FROM reactions WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
)?;
let rows = stmt.query_map(params![post_id.as_slice()], |row| {
let reactor: Vec<u8> = row.get(0)?;
let pid: Vec<u8> = row.get(1)?;
let emoji: String = row.get(2)?;
let ts: i64 = row.get(3)?;
let enc: Option<String> = row.get(4)?;
Ok((reactor, pid, emoji, ts, enc))
})?;
let mut result = Vec::new();
for row in rows {
let (reactor_bytes, pid_bytes, emoji, ts, enc) = row?;
let reactor = blob_to_nodeid(reactor_bytes)?;
let post_id = blob_to_postid(pid_bytes)?;
result.push(Reaction {
reactor,
emoji,
post_id,
timestamp_ms: ts as u64,
encrypted_payload: enc,
});
}
Ok(result)
}
/// Get reaction counts grouped by emoji for a post.
pub fn get_reaction_counts(&self, post_id: &PostId, my_node_id: &NodeId) -> anyhow::Result<Vec<(String, u64, bool)>> {
let mut stmt = self.conn.prepare(
"SELECT emoji, COUNT(*) as cnt,
SUM(CASE WHEN reactor = ?2 THEN 1 ELSE 0 END) as my_count
FROM reactions WHERE post_id = ?1 GROUP BY emoji ORDER BY cnt DESC"
)?;
let rows = stmt.query_map(params![post_id.as_slice(), my_node_id.as_slice()], |row| {
let emoji: String = row.get(0)?;
let count: i64 = row.get(1)?;
let my_count: i64 = row.get(2)?;
Ok((emoji, count as u64, my_count > 0))
})?;
let mut result = Vec::new();
for row in rows {
result.push(row?);
}
Ok(result)
}
// --- Engagement: comments ---
/// Store a comment.
pub fn store_comment(&self, comment: &InlineComment) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO comments (author, post_id, content, timestamp_ms, signature)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT DO NOTHING",
params![
comment.author.as_slice(),
comment.post_id.as_slice(),
comment.content,
comment.timestamp_ms as i64,
comment.signature,
],
)?;
Ok(())
}
/// Get all comments for a post, ordered by timestamp.
pub fn get_comments(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> {
let mut stmt = self.conn.prepare(
"SELECT author, post_id, content, timestamp_ms, signature
FROM comments WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
)?;
let rows = stmt.query_map(params![post_id.as_slice()], |row| {
let author: Vec<u8> = row.get(0)?;
let pid: Vec<u8> = row.get(1)?;
let content: String = row.get(2)?;
let ts: i64 = row.get(3)?;
let sig: Vec<u8> = row.get(4)?;
Ok((author, pid, content, ts, sig))
})?;
let mut result = Vec::new();
for row in rows {
let (author_bytes, pid_bytes, content, ts, sig) = row?;
let author = blob_to_nodeid(author_bytes)?;
let post_id = blob_to_postid(pid_bytes)?;
result.push(InlineComment {
author,
post_id,
content,
timestamp_ms: ts as u64,
signature: sig,
});
}
Ok(result)
}
/// Get comment count for a post.
pub fn get_comment_count(&self, post_id: &PostId) -> anyhow::Result<u64> {
let count: i64 = self.conn.prepare(
"SELECT COUNT(*) FROM comments WHERE post_id = ?1"
)?.query_row(params![post_id.as_slice()], |row| row.get(0))?;
Ok(count as u64)
}
// --- Engagement: comment policies ---
/// Store or update a comment policy for a post.
pub fn set_comment_policy(&self, post_id: &PostId, policy: &CommentPolicy) -> anyhow::Result<()> {
let json = serde_json::to_string(policy)?;
self.conn.execute(
"INSERT INTO comment_policies (post_id, policy_json) VALUES (?1, ?2)
ON CONFLICT(post_id) DO UPDATE SET policy_json = excluded.policy_json",
params![post_id.as_slice(), json],
)?;
Ok(())
}
/// Get the comment policy for a post.
pub fn get_comment_policy(&self, post_id: &PostId) -> anyhow::Result<Option<CommentPolicy>> {
let mut stmt = self.conn.prepare(
"SELECT policy_json FROM comment_policies WHERE post_id = ?1"
)?;
let result = stmt.query_row(params![post_id.as_slice()], |row| {
row.get::<_, String>(0)
});
match result {
Ok(json) => Ok(Some(serde_json::from_str(&json)?)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
// --- Engagement: blob headers ---
/// Store or update an aggregated blob header for a post.
pub fn store_blob_header(&self, post_id: &PostId, author: &NodeId, header_json: &str, updated_at: u64) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO blob_headers (post_id, author, header_json, updated_at) VALUES (?1, ?2, ?3, ?4)
ON CONFLICT(post_id) DO UPDATE SET
header_json = excluded.header_json,
updated_at = excluded.updated_at",
params![post_id.as_slice(), author.as_slice(), header_json, updated_at as i64],
)?;
Ok(())
}
/// Get the blob header for a post.
pub fn get_blob_header(&self, post_id: &PostId) -> anyhow::Result<Option<(String, u64)>> {
let mut stmt = self.conn.prepare(
"SELECT header_json, updated_at FROM blob_headers WHERE post_id = ?1"
)?;
let result = stmt.query_row(params![post_id.as_slice()], |row| {
let json: String = row.get(0)?;
let ts: i64 = row.get(1)?;
Ok((json, ts as u64))
});
match result {
Ok(r) => Ok(Some(r)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
// --- Engagement: thread metadata ---
/// Store a thread split link (child → parent).
pub fn store_thread_meta(&self, meta: &ThreadMeta) -> anyhow::Result<()> {
self.conn.execute(
"INSERT INTO thread_meta (post_id, parent_post_id) VALUES (?1, ?2)
ON CONFLICT DO NOTHING",
params![meta.post_id.as_slice(), meta.parent_post_id.as_slice()],
)?;
Ok(())
}
/// Get all child posts for a parent (thread splits).
pub fn get_thread_children(&self, parent_post_id: &PostId) -> anyhow::Result<Vec<PostId>> {
let mut stmt = self.conn.prepare(
"SELECT post_id FROM thread_meta WHERE parent_post_id = ?1"
)?;
let rows = stmt.query_map(params![parent_post_id.as_slice()], |row| row.get::<_, Vec<u8>>(0))?;
let mut result = Vec::new();
for row in rows {
result.push(blob_to_postid(row?)?);
}
Ok(result)
}
/// Get the parent post for a thread split child.
pub fn get_thread_parent(&self, post_id: &PostId) -> anyhow::Result<Option<PostId>> {
let mut stmt = self.conn.prepare(
"SELECT parent_post_id FROM thread_meta WHERE post_id = ?1"
)?;
let result = stmt.query_row(params![post_id.as_slice()], |row| row.get::<_, Vec<u8>>(0));
match result {
Ok(bytes) => Ok(Some(blob_to_postid(bytes)?)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()),
}
}
}
/// Parse a JSON array of hex-encoded node IDs into Vec<NodeId>.
fn parse_anchors_json(json: &str) -> Vec<NodeId> {
let hex_ids: Vec<String> = serde_json::from_str(json).unwrap_or_default();
hex_ids
.iter()
.filter_map(|h| hex::decode(h).ok())
.filter_map(|b| b.try_into().ok())
.collect()
}
fn blob_to_postid(bytes: Vec<u8>) -> anyhow::Result<PostId> {
bytes
.try_into()
.map_err(|v: Vec<u8>| anyhow::anyhow!("invalid post id length: {}", v.len()))
}
fn blob_to_nodeid(bytes: Vec<u8>) -> anyhow::Result<NodeId> {
bytes
.try_into()
.map_err(|v: Vec<u8>| anyhow::anyhow!("invalid node id length: {}", v.len()))
}
fn now_ms() -> i64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as i64
}
fn row_to_audience_record(row: &rusqlite::Row) -> anyhow::Result<AudienceRecord> {
let node_id = blob_to_nodeid(row.get(0)?)?;
let dir_str: String = row.get(1)?;
let status_str: String = row.get(2)?;
let requested_at = row.get::<_, i64>(3)? as u64;
let approved_at: Option<i64> = row.get(4)?;
let direction = match dir_str.as_str() {
"inbound" => AudienceDirection::Inbound,
_ => AudienceDirection::Outbound,
};
let status = match status_str.as_str() {
"approved" => AudienceStatus::Approved,
"denied" => AudienceStatus::Denied,
_ => AudienceStatus::Pending,
};
Ok(AudienceRecord {
node_id,
direction,
status,
requested_at,
approved_at: approved_at.map(|v| v as u64),
})
}
fn row_to_peer_record(row: &rusqlite::Row) -> anyhow::Result<PeerRecord> {
let node_id = blob_to_nodeid(row.get(0)?)?;
let addrs_json: String = row.get(1)?;
let addr_strings: Vec<String> = serde_json::from_str(&addrs_json).unwrap_or_default();
let addresses: Vec<SocketAddr> = addr_strings
.iter()
.filter_map(|s| s.parse().ok())
.collect();
let last_seen = row.get::<_, i64>(2)? as u64;
let introduced_by: Option<Vec<u8>> = row.get(3)?;
let introduced_by = introduced_by
.map(|b| blob_to_nodeid(b))
.transpose()?;
let is_anchor = row.get::<_, i32>(4)? != 0;
let first_seen = row.get::<_, i64>(5)? as u64;
Ok(PeerRecord {
node_id,
addresses,
last_seen,
introduced_by,
is_anchor,
first_seen,
})
}
fn row_to_social_route(row: &rusqlite::Row) -> anyhow::Result<SocialRouteEntry> {
let node_id = blob_to_nodeid(row.get(0)?)?;
let addrs_json: String = row.get(1)?;
let addr_strings: Vec<String> = serde_json::from_str(&addrs_json).unwrap_or_default();
let addresses: Vec<std::net::SocketAddr> = addr_strings
.iter()
.filter_map(|s| s.parse().ok())
.collect();
let peer_addrs_json: String = row.get(2)?;
let peer_addresses: Vec<PeerWithAddress> =
serde_json::from_str(&peer_addrs_json).unwrap_or_default();
let relation_str: String = row.get(3)?;
let relation: SocialRelation = relation_str.parse().unwrap_or(SocialRelation::Follow);
let status_str: String = row.get(4)?;
let status: SocialStatus = status_str.parse().unwrap_or(SocialStatus::Disconnected);
let last_connected_ms = row.get::<_, i64>(5)? as u64;
let last_seen_ms = row.get::<_, i64>(6)? as u64;
let method_str: String = row.get(7)?;
let reach_method: ReachMethod = method_str.parse().unwrap_or(ReachMethod::Direct);
let pref_tree_json: String = row.get::<_, String>(8).unwrap_or_else(|_| "[]".to_string());
let preferred_tree = parse_anchors_json(&pref_tree_json);
Ok(SocialRouteEntry {
node_id,
addresses,
peer_addresses,
relation,
status,
last_connected_ms,
last_seen_ms,
reach_method,
preferred_tree,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::DeviceProfile;
fn make_node_id(byte: u8) -> NodeId {
[byte; 32]
}
fn make_post_id(byte: u8) -> PostId {
[byte; 32]
}
fn temp_storage() -> Storage {
Storage::open(":memory:").unwrap()
}
fn set_peer_last_seen(storage: &Storage, node_id: &NodeId, last_seen: i64) {
storage.conn.execute(
"UPDATE peers SET last_seen = ?1 WHERE node_id = ?2",
params![last_seen, node_id.as_slice()],
).unwrap();
}
#[test]
fn gossip_includes_recent_peer() {
let s = temp_storage();
let nid = make_node_id(1);
let addr: std::net::SocketAddr = "10.0.0.1:1234".parse().unwrap();
s.upsert_peer(&nid, &[addr], None).unwrap();
let gossip = s.build_gossip_list().unwrap();
assert_eq!(gossip.len(), 1);
assert_eq!(gossip[0].node_id, nid);
}
#[test]
fn gossip_excludes_stale_peer() {
let s = temp_storage();
let nid = make_node_id(2);
let addr: std::net::SocketAddr = "10.0.0.2:1234".parse().unwrap();
s.upsert_peer(&nid, &[addr], None).unwrap();
// Set last_seen to 8 days ago
let eight_days_ago = now_ms() - 8 * 24 * 60 * 60 * 1000;
set_peer_last_seen(&s, &nid, eight_days_ago);
let gossip = s.build_gossip_list().unwrap();
assert!(gossip.is_empty());
}
#[test]
fn gossip_includes_peer_without_addresses() {
let s = temp_storage();
let nid = make_node_id(3);
// add_peer stores with empty addresses — still included in address-free gossip
s.add_peer(&nid).unwrap();
let gossip = s.build_gossip_list().unwrap();
assert_eq!(gossip.len(), 1);
}
#[test]
fn gossip_includes_non_followed_peer() {
let s = temp_storage();
let nid = make_node_id(4);
let addr: std::net::SocketAddr = "10.0.0.4:1234".parse().unwrap();
s.upsert_peer(&nid, &[addr], None).unwrap();
let gossip = s.build_gossip_list().unwrap();
assert_eq!(gossip.len(), 1);
assert_eq!(gossip[0].node_id, nid);
}
// ---- Phase F: neighbor, wide peer, worm, audience tests ----
#[test]
fn store_and_list_peer_neighbors() {
let s = temp_storage();
let peer_a = make_node_id(1);
let neighbor_b = make_node_id(2);
let neighbor_c = make_node_id(3);
let our_id = make_node_id(99);
let gossip = vec![
GossipPeerInfo { node_id: neighbor_b, is_anchor: false },
GossipPeerInfo { node_id: neighbor_c, is_anchor: true },
GossipPeerInfo { node_id: our_id, is_anchor: false }, // should be skipped
];
let count = s.store_peer_neighbors(&peer_a, &gossip, &our_id).unwrap();
assert_eq!(count, 2);
// lookup_in_two_hop
assert!(s.lookup_in_two_hop(&neighbor_b).unwrap());
assert!(s.lookup_in_two_hop(&neighbor_c).unwrap());
assert!(!s.lookup_in_two_hop(&our_id).unwrap());
// list_peers_with_neighbor
let reporters = s.list_peers_with_neighbor(&neighbor_b).unwrap();
assert_eq!(reporters, vec![peer_a]);
// get_two_hop_set
let two_hop = s.get_two_hop_set().unwrap();
assert_eq!(two_hop.len(), 2);
assert!(two_hop.contains(&neighbor_b));
assert!(two_hop.contains(&neighbor_c));
}
#[test]
fn prune_stale_neighbors() {
let s = temp_storage();
let peer = make_node_id(1);
let neighbor = make_node_id(2);
let our_id = make_node_id(99);
s.store_peer_neighbors(&peer, &[GossipPeerInfo { node_id: neighbor, is_anchor: false }], &our_id).unwrap();
// Should find it
assert!(s.lookup_in_two_hop(&neighbor).unwrap());
// Prune with 0ms max age (everything is stale)
// We need to set reported_at in the past
s.conn.execute(
"UPDATE peer_neighbors SET reported_at = ?1",
params![now_ms() - 7200_000],
).unwrap();
let pruned = s.prune_stale_neighbors(3600_000).unwrap();
assert_eq!(pruned, 1);
assert!(!s.lookup_in_two_hop(&neighbor).unwrap());
}
#[test]
fn wide_peer_flag() {
let s = temp_storage();
let nid = make_node_id(1);
s.add_peer(&nid).unwrap();
s.set_wide_peer(&nid, true).unwrap();
let wide = s.list_wide_peers().unwrap();
assert_eq!(wide.len(), 1);
assert_eq!(wide[0].node_id, nid);
s.clear_all_wide_peers().unwrap();
let wide = s.list_wide_peers().unwrap();
assert!(wide.is_empty());
}
#[test]
fn worm_cooldown() {
let s = temp_storage();
let target = make_node_id(1);
assert!(!s.is_worm_cooldown(&target, 3600_000).unwrap());
s.record_worm_miss(&target).unwrap();
assert!(s.is_worm_cooldown(&target, 3600_000).unwrap());
}
#[test]
fn relay_cooldown() {
let s = temp_storage();
let target = make_node_id(1);
assert!(!s.is_relay_cooldown(&target, 3600_000).unwrap());
s.record_relay_miss(&target).unwrap();
assert!(s.is_relay_cooldown(&target, 3600_000).unwrap());
// Very short cooldown should not match (timestamp just set)
// but since we just wrote it, it will be within 1ms, so 0 cooldown should still show
assert!(s.is_relay_cooldown(&target, 1).unwrap());
}
#[test]
fn n2_n3_crud() {
let s = temp_storage();
let reporter_a = make_node_id(1);
let reporter_b = make_node_id(2);
let node_x = make_node_id(10);
let node_y = make_node_id(11);
let node_z = make_node_id(12);
// Set reporter_a's N1 (their connections) → our N2
s.set_peer_n1(&reporter_a, &[node_x, node_y]).unwrap();
let found = s.find_in_n2(&node_x).unwrap();
assert_eq!(found, vec![reporter_a]);
// Set reporter_b's N1 → our N2
s.set_peer_n1(&reporter_b, &[node_y, node_z]).unwrap();
let found = s.find_in_n2(&node_y).unwrap();
assert_eq!(found.len(), 2); // Both reporters have node_y
// Build N2 share (deduplicated)
let n2_share = s.build_n2_share().unwrap();
assert_eq!(n2_share.len(), 3); // node_x, node_y, node_z
// Clear reporter_a's N2 contributions
let cleared = s.clear_peer_n2(&reporter_a).unwrap();
assert_eq!(cleared, 2);
let found = s.find_in_n2(&node_x).unwrap();
assert!(found.is_empty());
// N3 operations
s.set_peer_n2(&reporter_a, &[node_z]).unwrap();
let found = s.find_in_n3(&node_z).unwrap();
assert_eq!(found, vec![reporter_a]);
s.clear_peer_n3(&reporter_a).unwrap();
let found = s.find_in_n3(&node_z).unwrap();
assert!(found.is_empty());
}
#[test]
fn n1_share_build() {
let s = temp_storage();
let peer_a = make_node_id(1);
let follow_b = make_node_id(2);
let addr: std::net::SocketAddr = "10.0.0.1:4433".parse().unwrap();
// Add a mesh peer
s.add_mesh_peer(&peer_a, PeerSlotKind::Local, 0).unwrap();
// Add a follow with social route
s.add_follow(&follow_b).unwrap();
s.upsert_social_route(&SocialRouteEntry {
node_id: follow_b,
addresses: vec![addr],
peer_addresses: vec![],
relation: SocialRelation::Follow,
status: SocialStatus::Disconnected,
last_connected_ms: 0,
last_seen_ms: 1000,
reach_method: ReachMethod::Direct,
preferred_tree: vec![],
}).unwrap();
let n1 = s.build_n1_share().unwrap();
assert!(n1.contains(&peer_a));
assert!(n1.contains(&follow_b));
}
#[test]
fn diversity_scoring() {
let s = temp_storage();
let reporter_a = make_node_id(1);
let reporter_b = make_node_id(2);
let unique_node = make_node_id(10);
let shared_node = make_node_id(11);
// reporter_a has unique_node + shared_node
s.set_peer_n1(&reporter_a, &[unique_node, shared_node]).unwrap();
// reporter_b only has shared_node
s.set_peer_n1(&reporter_b, &[shared_node]).unwrap();
// reporter_a contributes 1 unique node (unique_node)
let unique = s.count_unique_n2_for_reporter(&reporter_a, &[]).unwrap();
assert_eq!(unique, 1);
// reporter_b contributes 0 unique nodes
let unique = s.count_unique_n2_for_reporter(&reporter_b, &[]).unwrap();
assert_eq!(unique, 0);
}
#[test]
fn find_any_in_n2_n3() {
let s = temp_storage();
let reporter = make_node_id(1);
let node_n2 = make_node_id(10);
let node_n3 = make_node_id(11);
let node_nowhere = make_node_id(12);
s.set_peer_n1(&reporter, &[node_n2]).unwrap();
s.set_peer_n2(&reporter, &[node_n3]).unwrap();
let results = s.find_any_in_n2_n3(&[node_n2, node_n3, node_nowhere]).unwrap();
assert_eq!(results.len(), 2);
assert_eq!(results[0].2, 2); // N2 first
assert_eq!(results[1].2, 3); // N3 second
}
#[test]
fn mesh_peers_crud() {
use crate::types::PeerSlotKind;
let s = temp_storage();
let nid = make_node_id(1);
s.add_mesh_peer(&nid, PeerSlotKind::Local, 4).unwrap();
let peers = s.list_mesh_peers().unwrap();
assert_eq!(peers.len(), 1);
assert_eq!(peers[0].0, nid);
assert_eq!(peers[0].1, "local");
assert_eq!(peers[0].2, 4);
assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Local).unwrap(), 1);
assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Wide).unwrap(), 0);
s.remove_mesh_peer(&nid).unwrap();
assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Local).unwrap(), 0);
}
#[test]
fn audience_crud() {
use crate::types::{AudienceDirection, AudienceStatus};
let s = temp_storage();
let nid = make_node_id(1);
s.store_audience(&nid, AudienceDirection::Inbound, AudienceStatus::Pending).unwrap();
let pending = s.list_audience(AudienceDirection::Inbound, Some(AudienceStatus::Pending)).unwrap();
assert_eq!(pending.len(), 1);
assert_eq!(pending[0].status, AudienceStatus::Pending);
// Approve
s.store_audience(&nid, AudienceDirection::Inbound, AudienceStatus::Approved).unwrap();
let members = s.list_audience_members().unwrap();
assert_eq!(members.len(), 1);
assert_eq!(members[0], nid);
// Remove
s.remove_audience(&nid, AudienceDirection::Inbound).unwrap();
let members = s.list_audience_members().unwrap();
assert!(members.is_empty());
}
// ---- Social routes tests ----
#[test]
fn social_route_crud() {
use crate::types::{PeerWithAddress, ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus};
let s = temp_storage();
let nid = make_node_id(1);
let addr: std::net::SocketAddr = "10.0.0.1:4433".parse().unwrap();
let entry = SocialRouteEntry {
node_id: nid,
addresses: vec![addr],
peer_addresses: vec![PeerWithAddress {
n: hex::encode(make_node_id(2)),
a: vec!["10.0.0.2:4433".to_string()],
}],
relation: SocialRelation::Follow,
status: SocialStatus::Online,
last_connected_ms: 1000,
last_seen_ms: 2000,
reach_method: ReachMethod::Direct,
preferred_tree: vec![],
};
s.upsert_social_route(&entry).unwrap();
assert!(s.has_social_route(&nid).unwrap());
let got = s.get_social_route(&nid).unwrap().unwrap();
assert_eq!(got.relation, SocialRelation::Follow);
assert_eq!(got.status, SocialStatus::Online);
assert_eq!(got.addresses.len(), 1);
assert_eq!(got.peer_addresses.len(), 1);
// List all
let routes = s.list_social_routes().unwrap();
assert_eq!(routes.len(), 1);
// Update status
s.set_social_route_status(&nid, SocialStatus::Disconnected).unwrap();
let got = s.get_social_route(&nid).unwrap().unwrap();
assert_eq!(got.status, SocialStatus::Disconnected);
// Remove
s.remove_social_route(&nid).unwrap();
assert!(!s.has_social_route(&nid).unwrap());
}
#[test]
fn social_route_rebuild() {
use crate::types::{AudienceDirection, AudienceStatus, SocialRelation};
let s = temp_storage();
let follow_nid = make_node_id(1);
let audience_nid = make_node_id(2);
let mutual_nid = make_node_id(3);
s.add_follow(&follow_nid).unwrap();
s.add_follow(&mutual_nid).unwrap();
s.store_audience(&audience_nid, AudienceDirection::Inbound, AudienceStatus::Approved).unwrap();
s.store_audience(&mutual_nid, AudienceDirection::Inbound, AudienceStatus::Approved).unwrap();
let count = s.rebuild_social_routes().unwrap();
assert_eq!(count, 3);
let follow_route = s.get_social_route(&follow_nid).unwrap().unwrap();
assert_eq!(follow_route.relation, SocialRelation::Follow);
let audience_route = s.get_social_route(&audience_nid).unwrap().unwrap();
assert_eq!(audience_route.relation, SocialRelation::Audience);
let mutual_route = s.get_social_route(&mutual_nid).unwrap().unwrap();
assert_eq!(mutual_route.relation, SocialRelation::Mutual);
}
#[test]
fn social_route_stale() {
use crate::types::{ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus};
let s = temp_storage();
let nid = make_node_id(1);
let entry = SocialRouteEntry {
node_id: nid,
addresses: vec![],
peer_addresses: vec![],
relation: SocialRelation::Follow,
status: SocialStatus::Online,
last_connected_ms: 0,
last_seen_ms: 0,
reach_method: ReachMethod::Direct,
preferred_tree: vec![],
};
s.upsert_social_route(&entry).unwrap();
// With last_seen_ms=0, it should be stale relative to now
let stale = s.list_stale_social_routes(1000).unwrap();
assert_eq!(stale.len(), 1);
}
#[test]
fn reconnect_watchers() {
let s = temp_storage();
let target = make_node_id(1);
let watcher1 = make_node_id(2);
let watcher2 = make_node_id(3);
s.add_reconnect_watcher(&target, &watcher1).unwrap();
s.add_reconnect_watcher(&target, &watcher2).unwrap();
let watchers = s.get_reconnect_watchers(&target).unwrap();
assert_eq!(watchers.len(), 2);
s.clear_reconnect_watchers(&target).unwrap();
let watchers = s.get_reconnect_watchers(&target).unwrap();
assert!(watchers.is_empty());
}
// ---- CDN manifest tests ----
#[test]
fn cdn_manifest_crud() {
let s = temp_storage();
let cid = [42u8; 32];
let author = make_node_id(1);
let manifest_json = r#"{"test": true}"#;
// Store
s.store_cdn_manifest(&cid, manifest_json, &author, 1000).unwrap();
let got = s.get_cdn_manifest(&cid).unwrap().unwrap();
assert_eq!(got, manifest_json);
// Update
let updated_json = r#"{"test": true, "updated": true}"#;
s.store_cdn_manifest(&cid, updated_json, &author, 2000).unwrap();
let got = s.get_cdn_manifest(&cid).unwrap().unwrap();
assert_eq!(got, updated_json);
// Missing
assert!(s.get_cdn_manifest(&[99u8; 32]).unwrap().is_none());
// By author
let manifests = s.get_manifests_for_author_blobs(&author).unwrap();
assert_eq!(manifests.len(), 1);
assert_eq!(manifests[0].0, cid);
}
#[test]
fn blob_upstream_crud() {
let s = temp_storage();
let cid = [42u8; 32];
let source = make_node_id(1);
let addrs = vec!["10.0.0.1:4433".to_string()];
s.store_blob_upstream(&cid, &source, &addrs).unwrap();
let (nid, got_addrs) = s.get_blob_upstream(&cid).unwrap().unwrap();
assert_eq!(nid, source);
assert_eq!(got_addrs, addrs);
// Missing
assert!(s.get_blob_upstream(&[99u8; 32]).unwrap().is_none());
// Update
let source2 = make_node_id(2);
s.store_blob_upstream(&cid, &source2, &[]).unwrap();
let (nid, _) = s.get_blob_upstream(&cid).unwrap().unwrap();
assert_eq!(nid, source2);
}
#[test]
fn blob_downstream_crud_and_limit() {
let s = temp_storage();
let cid = [42u8; 32];
// Add downstream peers
for i in 0..100u8 {
let peer = make_node_id(i);
let ok = s.add_blob_downstream(&cid, &peer, &[format!("10.0.0.{}:4433", i)]).unwrap();
assert!(ok, "should accept peer {}", i);
}
assert_eq!(s.get_blob_downstream_count(&cid).unwrap(), 100);
// 101st should be rejected
let peer_101 = make_node_id(200);
let ok = s.add_blob_downstream(&cid, &peer_101, &[]).unwrap();
assert!(!ok, "should reject 101st downstream");
// Get all downstream
let downstream = s.get_blob_downstream(&cid).unwrap();
assert_eq!(downstream.len(), 100);
// Remove one
s.remove_blob_downstream(&cid, &make_node_id(0)).unwrap();
assert_eq!(s.get_blob_downstream_count(&cid).unwrap(), 99);
// Now adding one more should work
let ok = s.add_blob_downstream(&cid, &peer_101, &[]).unwrap();
assert!(ok, "should accept after removal");
}
#[test]
fn blob_pin_unpin() {
let s = temp_storage();
let cid = [42u8; 32];
let post_id = make_node_id(1);
let author = make_node_id(2);
s.record_blob(&cid, &post_id, &author, 1000, "image/png", 100).unwrap();
// Not pinned by default
assert!(!s.is_blob_pinned(&cid));
// Pin
s.pin_blob(&cid).unwrap();
assert!(s.is_blob_pinned(&cid));
// Unpin
s.unpin_blob(&cid).unwrap();
assert!(!s.is_blob_pinned(&cid));
}
#[test]
fn eviction_candidates_with_replicas() {
let s = temp_storage();
let author = make_node_id(1);
let post_id = [10u8; 32];
let cid1 = [20u8; 32];
let cid2 = [30u8; 32];
s.record_blob(&cid1, &post_id, &author, 500, "image/png", 100).unwrap();
s.record_blob(&cid2, &post_id, &author, 300, "image/jpeg", 200).unwrap();
// Add replicas for the post
let peer1 = make_node_id(10);
let peer2 = make_node_id(11);
let now = super::now_ms();
s.conn.execute(
"INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)",
params![post_id.as_slice(), peer1.as_slice(), now as i64],
).unwrap();
s.conn.execute(
"INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)",
params![post_id.as_slice(), peer2.as_slice(), now as i64],
).unwrap();
let candidates = s.get_eviction_candidates(3600_000).unwrap();
assert_eq!(candidates.len(), 2);
// Both should have 2 peer_copies
for c in &candidates {
assert_eq!(c.peer_copies, 2);
assert_eq!(c.author, author);
}
}
#[test]
fn eviction_candidates_stale_replicas_excluded() {
let s = temp_storage();
let author = make_node_id(1);
let post_id = [10u8; 32];
let cid = [20u8; 32];
s.record_blob(&cid, &post_id, &author, 500, "image/png", 100).unwrap();
// Add a stale replica (confirmed 2 hours ago)
let peer = make_node_id(10);
let two_hours_ago = super::now_ms() - 7200_000;
s.conn.execute(
"INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)",
params![post_id.as_slice(), peer.as_slice(), two_hours_ago as i64],
).unwrap();
// With 1-hour staleness, the replica should be excluded
let candidates = s.get_eviction_candidates(3600_000).unwrap();
assert_eq!(candidates.len(), 1);
assert_eq!(candidates[0].peer_copies, 0);
}
#[test]
fn cdn_cleanup_for_blob() {
let s = temp_storage();
let cid = [42u8; 32];
let author = make_node_id(1);
let peer = make_node_id(2);
s.store_cdn_manifest(&cid, r#"{"test": true}"#, &author, 100).unwrap();
s.store_blob_upstream(&cid, &peer, &["10.0.0.1:4433".to_string()]).unwrap();
s.add_blob_downstream(&cid, &peer, &["10.0.0.2:4433".to_string()]).unwrap();
assert!(s.get_cdn_manifest(&cid).unwrap().is_some());
assert!(s.get_blob_upstream(&cid).unwrap().is_some());
assert_eq!(s.get_blob_downstream_count(&cid).unwrap(), 1);
s.cleanup_cdn_for_blob(&cid).unwrap();
assert!(s.get_cdn_manifest(&cid).unwrap().is_none());
assert!(s.get_blob_upstream(&cid).unwrap().is_none());
assert_eq!(s.get_blob_downstream_count(&cid).unwrap(), 0);
}
#[test]
fn get_blobs_for_post() {
let s = temp_storage();
let author = make_node_id(1);
let post_id = [10u8; 32];
let cid1 = [20u8; 32];
let cid2 = [30u8; 32];
s.record_blob(&cid1, &post_id, &author, 500, "image/png", 100).unwrap();
s.record_blob(&cid2, &post_id, &author, 300, "image/jpeg", 200).unwrap();
let cids = s.get_blobs_for_post(&post_id).unwrap();
assert_eq!(cids.len(), 2);
assert!(cids.contains(&cid1));
assert!(cids.contains(&cid2));
}
#[test]
fn remove_blob_upstream() {
let s = temp_storage();
let cid = [42u8; 32];
let peer = make_node_id(1);
s.store_blob_upstream(&cid, &peer, &["10.0.0.1:4433".to_string()]).unwrap();
assert!(s.get_blob_upstream(&cid).unwrap().is_some());
s.remove_blob_upstream(&cid).unwrap();
assert!(s.get_blob_upstream(&cid).unwrap().is_none());
}
#[test]
fn author_post_neighborhood() {
let s = temp_storage();
let author = make_node_id(1);
// Create posts at timestamps 100, 200, 300, 400, 500
for ts in [100u64, 200, 300, 400, 500] {
let post = Post {
author,
content: format!("post at {}", ts),
attachments: vec![],
timestamp_ms: ts,
};
let id = blake3::hash(&serde_json::to_vec(&post).unwrap());
s.store_post(id.as_bytes(), &post).unwrap();
}
// Neighborhood around ts=300
let (prev, next) = s.get_author_post_neighborhood(&author, 300, 10).unwrap();
assert_eq!(prev.len(), 2); // ts=200, ts=100
assert_eq!(next.len(), 2); // ts=400, ts=500
assert_eq!(prev[0].timestamp_ms, 200); // most recent first in prev
assert_eq!(next[0].timestamp_ms, 400); // oldest first in next
}
#[test]
fn group_key_crud() {
let s = temp_storage();
let admin = make_node_id(1);
let member = make_node_id(2);
let group_id = [42u8; 32];
let pubkey = [99u8; 32];
let seed = [55u8; 32];
// Create group key
let record = crate::types::GroupKeyRecord {
group_id,
circle_name: "friends".to_string(),
epoch: 1,
group_public_key: pubkey,
admin,
created_at: 1000,
};
s.create_group_key(&record, Some(&seed)).unwrap();
// Retrieve by group_id
let got = s.get_group_key(&group_id).unwrap().unwrap();
assert_eq!(got.circle_name, "friends");
assert_eq!(got.epoch, 1);
assert_eq!(got.group_public_key, pubkey);
assert_eq!(got.admin, admin);
// Retrieve by circle name
let got2 = s.get_group_key_by_circle("friends").unwrap().unwrap();
assert_eq!(got2.group_id, group_id);
// Store member key
let mk = crate::types::GroupMemberKey {
member,
epoch: 1,
wrapped_group_key: vec![0u8; 60],
};
s.store_group_member_key(&group_id, &mk).unwrap();
let keys = s.get_group_member_keys(&group_id, 1).unwrap();
assert_eq!(keys.len(), 1);
assert_eq!(keys[0].member, member);
let my_key = s.get_my_group_member_key(&group_id, 1, &member).unwrap();
assert!(my_key.is_some());
let no_key = s.get_my_group_member_key(&group_id, 1, &admin).unwrap();
assert!(no_key.is_none());
// Store seed
s.store_group_seed(&group_id, 1, &seed).unwrap();
let got_seed = s.get_group_seed(&group_id, 1).unwrap().unwrap();
assert_eq!(got_seed, seed);
// Update epoch
let new_pubkey = [88u8; 32];
let new_seed = [77u8; 32];
s.update_group_epoch(&group_id, 2, &new_pubkey, Some(&new_seed)).unwrap();
let updated = s.get_group_key(&group_id).unwrap().unwrap();
assert_eq!(updated.epoch, 2);
assert_eq!(updated.group_public_key, new_pubkey);
// get_all_group_members requires circle + members
s.create_circle("friends").unwrap();
s.add_circle_member("friends", &member).unwrap();
let all = s.get_all_group_members().unwrap();
assert!(all.contains_key(&group_id));
assert!(all[&group_id].contains(&member));
// Delete
s.delete_group_key(&group_id).unwrap();
assert!(s.get_group_key(&group_id).unwrap().is_none());
assert!(s.get_group_seed(&group_id, 1).unwrap().is_none());
}
#[test]
fn group_seeds_map() {
let s = temp_storage();
let admin = make_node_id(1);
let group_id = [42u8; 32];
let pubkey = [99u8; 32];
let seed = [55u8; 32];
let record = crate::types::GroupKeyRecord {
group_id,
circle_name: "test".to_string(),
epoch: 1,
group_public_key: pubkey,
admin,
created_at: 1000,
};
s.create_group_key(&record, Some(&seed)).unwrap();
s.store_group_seed(&group_id, 1, &seed).unwrap();
let map = s.get_all_group_seeds_map().unwrap();
assert!(map.contains_key(&(group_id, 1)));
let (got_seed, got_pubkey) = map[&(group_id, 1)];
assert_eq!(got_seed, seed);
// pubkey is derived from seed, not the stored one
let expected_pubkey = ed25519_dalek::SigningKey::from_bytes(&seed).verifying_key().to_bytes();
assert_eq!(got_pubkey, expected_pubkey);
}
// ---- Preferred peers tests ----
#[test]
fn preferred_peers_crud() {
let s = temp_storage();
let peer_a = make_node_id(1);
let peer_b = make_node_id(2);
assert_eq!(s.count_preferred_peers().unwrap(), 0);
assert!(!s.is_preferred_peer(&peer_a).unwrap());
s.add_preferred_peer(&peer_a).unwrap();
s.add_preferred_peer(&peer_b).unwrap();
assert_eq!(s.count_preferred_peers().unwrap(), 2);
assert!(s.is_preferred_peer(&peer_a).unwrap());
assert!(s.is_preferred_peer(&peer_b).unwrap());
let list = s.list_preferred_peers().unwrap();
assert_eq!(list.len(), 2);
assert!(list.contains(&peer_a));
assert!(list.contains(&peer_b));
s.remove_preferred_peer(&peer_a).unwrap();
assert!(!s.is_preferred_peer(&peer_a).unwrap());
assert_eq!(s.count_preferred_peers().unwrap(), 1);
}
#[test]
fn preferred_peers_idempotent() {
let s = temp_storage();
let peer = make_node_id(1);
s.add_preferred_peer(&peer).unwrap();
s.add_preferred_peer(&peer).unwrap(); // no error on duplicate
assert_eq!(s.count_preferred_peers().unwrap(), 1);
}
#[test]
fn profile_stores_preferred_peers() {
let s = temp_storage();
let nid = make_node_id(1);
let pref_a = make_node_id(10);
let pref_b = make_node_id(11);
let profile = PublicProfile {
node_id: nid,
display_name: "test".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![pref_a, pref_b],
public_visible: true,
avatar_cid: None,
};
s.store_profile(&profile).unwrap();
let got = s.get_profile(&nid).unwrap().unwrap();
assert_eq!(got.preferred_peers.len(), 2);
assert!(got.preferred_peers.contains(&pref_a));
assert!(got.preferred_peers.contains(&pref_b));
assert!(got.public_visible);
assert!(got.avatar_cid.is_none());
}
#[test]
fn preferred_slot_counts() {
assert_eq!(DeviceProfile::Desktop.preferred_slots(), 10);
assert_eq!(DeviceProfile::Desktop.local_slots(), 71);
assert_eq!(DeviceProfile::Mobile.preferred_slots(), 3);
assert_eq!(DeviceProfile::Mobile.local_slots(), 7);
// Total unchanged
assert_eq!(
DeviceProfile::Desktop.preferred_slots() + DeviceProfile::Desktop.local_slots() + DeviceProfile::Desktop.wide_slots(),
101
);
assert_eq!(
DeviceProfile::Mobile.preferred_slots() + DeviceProfile::Mobile.local_slots() + DeviceProfile::Mobile.wide_slots(),
15
);
}
#[test]
fn peer_slot_kind_preferred_roundtrip() {
let kind: PeerSlotKind = "preferred".parse().unwrap();
assert_eq!(kind, PeerSlotKind::Preferred);
assert_eq!(kind.to_string(), "preferred");
}
// ---- Preferred tree tests ----
#[test]
fn build_preferred_tree_empty() {
let s = temp_storage();
let target = make_node_id(1);
// No profile stored — tree should just contain the target
let tree = s.build_preferred_tree_for(&target).unwrap();
assert_eq!(tree.len(), 1);
assert!(tree.contains(&target));
}
#[test]
fn build_preferred_tree_two_layers() {
let s = temp_storage();
let target = make_node_id(1);
let l1_a = make_node_id(10);
let l1_b = make_node_id(11);
let l2_a1 = make_node_id(20);
let l2_a2 = make_node_id(21);
let l2_b1 = make_node_id(30);
// Target's profile with 2 preferred peers
s.store_profile(&PublicProfile {
node_id: target,
display_name: "target".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![l1_a, l1_b],
public_visible: true,
avatar_cid: None,
}).unwrap();
// L1 peer A's profile with 2 preferred peers
s.store_profile(&PublicProfile {
node_id: l1_a,
display_name: "l1a".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![l2_a1, l2_a2],
public_visible: true,
avatar_cid: None,
}).unwrap();
// L1 peer B's profile with 1 preferred peer
s.store_profile(&PublicProfile {
node_id: l1_b,
display_name: "l1b".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![l2_b1],
public_visible: true,
avatar_cid: None,
}).unwrap();
let tree = s.build_preferred_tree_for(&target).unwrap();
// Should contain: target, l1_a, l1_b, l2_a1, l2_a2, l2_b1 = 6 unique nodes
assert_eq!(tree.len(), 6);
assert!(tree.contains(&target));
assert!(tree.contains(&l1_a));
assert!(tree.contains(&l1_b));
assert!(tree.contains(&l2_a1));
assert!(tree.contains(&l2_a2));
assert!(tree.contains(&l2_b1));
}
#[test]
fn build_preferred_tree_deduplicates() {
let s = temp_storage();
let target = make_node_id(1);
let shared = make_node_id(10);
let l1_a = make_node_id(11);
// Target's preferred peers include shared
s.store_profile(&PublicProfile {
node_id: target,
display_name: "target".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![l1_a, shared],
public_visible: true,
avatar_cid: None,
}).unwrap();
// L1 peer's preferred peers also include shared
s.store_profile(&PublicProfile {
node_id: l1_a,
display_name: "l1a".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![shared, target],
public_visible: true,
avatar_cid: None,
}).unwrap();
let tree = s.build_preferred_tree_for(&target).unwrap();
// Should contain: target, l1_a, shared = 3 unique nodes (no duplicates)
assert_eq!(tree.len(), 3);
}
#[test]
fn social_route_preferred_tree_roundtrip() {
use crate::types::{ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus};
let s = temp_storage();
let nid = make_node_id(1);
let tree_node = make_node_id(10);
let entry = SocialRouteEntry {
node_id: nid,
addresses: vec![],
peer_addresses: vec![],
relation: SocialRelation::Follow,
status: SocialStatus::Online,
last_connected_ms: 0,
last_seen_ms: 1000,
reach_method: ReachMethod::Direct,
preferred_tree: vec![tree_node],
};
s.upsert_social_route(&entry).unwrap();
let got = s.get_social_route(&nid).unwrap().unwrap();
assert_eq!(got.preferred_tree.len(), 1);
assert!(got.preferred_tree.contains(&tree_node));
// Update preferred tree
let new_tree = vec![make_node_id(20), make_node_id(21)];
s.update_social_route_preferred_tree(&nid, &new_tree).unwrap();
let got2 = s.get_social_route(&nid).unwrap().unwrap();
assert_eq!(got2.preferred_tree.len(), 2);
}
#[test]
fn blob_upstream_preferred_tree() {
let s = temp_storage();
let cid = [42u8; 32];
let source = make_node_id(1);
s.store_blob_upstream(&cid, &source, &[]).unwrap();
// Initially empty
let tree = s.get_blob_upstream_preferred_tree(&cid).unwrap();
assert!(tree.is_empty());
// Update
let nodes = vec![make_node_id(10), make_node_id(11)];
s.update_blob_upstream_preferred_tree(&cid, &nodes).unwrap();
let tree2 = s.get_blob_upstream_preferred_tree(&cid).unwrap();
assert_eq!(tree2.len(), 2);
}
// ---- Circle Profile tests ----
#[test]
fn circle_profile_crud() {
let s = temp_storage();
let author = make_node_id(1);
let cp = CircleProfile {
author,
circle_name: "friends".to_string(),
display_name: "Alice (friends)".to_string(),
bio: "Hi friends!".to_string(),
avatar_cid: None,
updated_at: 1000,
};
s.set_circle_profile(&cp).unwrap();
// Get
let got = s.get_circle_profile(&author, "friends").unwrap().unwrap();
assert_eq!(got.display_name, "Alice (friends)");
assert_eq!(got.bio, "Hi friends!");
assert!(got.avatar_cid.is_none());
// List
let list = s.list_circle_profiles_for_author(&author).unwrap();
assert_eq!(list.len(), 1);
// Update
let cp2 = CircleProfile {
author,
circle_name: "friends".to_string(),
display_name: "Alice Updated".to_string(),
bio: "New bio".to_string(),
avatar_cid: Some([42u8; 32]),
updated_at: 2000,
};
s.set_circle_profile(&cp2).unwrap();
let got2 = s.get_circle_profile(&author, "friends").unwrap().unwrap();
assert_eq!(got2.display_name, "Alice Updated");
assert_eq!(got2.avatar_cid, Some([42u8; 32]));
// Delete
s.delete_circle_profile(&author, "friends").unwrap();
assert!(s.get_circle_profile(&author, "friends").unwrap().is_none());
}
#[test]
fn resolve_display_for_peer_circle_member() {
let s = temp_storage();
let author = make_node_id(1);
let viewer = make_node_id(2);
let stranger = make_node_id(3);
// Set up public profile
s.store_profile(&PublicProfile {
node_id: author,
display_name: "Alice Public".to_string(),
bio: "Public bio".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![],
public_visible: true,
avatar_cid: None,
}).unwrap();
// Create circle and add viewer as member
s.create_circle("close-friends").unwrap();
s.add_circle_member("close-friends", &viewer).unwrap();
// Set circle profile
let cp = CircleProfile {
author,
circle_name: "close-friends".to_string(),
display_name: "Alice (CF)".to_string(),
bio: "Circle bio".to_string(),
avatar_cid: Some([99u8; 32]),
updated_at: 2000,
};
s.set_circle_profile(&cp).unwrap();
// Viewer in circle sees circle profile
let (dn, bio, avatar) = s.resolve_display_for_peer(&author, &viewer).unwrap();
assert_eq!(dn, "Alice (CF)");
assert_eq!(bio, "Circle bio");
assert_eq!(avatar, Some([99u8; 32]));
// Stranger sees public profile
let (dn2, bio2, avatar2) = s.resolve_display_for_peer(&author, &stranger).unwrap();
assert_eq!(dn2, "Alice Public");
assert_eq!(bio2, "Public bio");
assert!(avatar2.is_none());
}
#[test]
fn resolve_display_hidden_profile() {
let s = temp_storage();
let author = make_node_id(1);
let stranger = make_node_id(3);
// Hidden public profile
s.store_profile(&PublicProfile {
node_id: author,
display_name: "Alice Hidden".to_string(),
bio: "Secret bio".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![],
public_visible: false,
avatar_cid: None,
}).unwrap();
// Stranger sees nothing
let (dn, bio, avatar) = s.resolve_display_for_peer(&author, &stranger).unwrap();
assert!(dn.is_empty());
assert!(bio.is_empty());
assert!(avatar.is_none());
}
#[test]
fn public_visible_migration_defaults_true() {
let s = temp_storage();
let nid = make_node_id(1);
// Store a profile with public_visible=true (default)
s.store_profile(&PublicProfile {
node_id: nid,
display_name: "test".to_string(),
bio: "".to_string(),
updated_at: 1000,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![],
public_visible: true,
avatar_cid: None,
}).unwrap();
let got = s.get_profile(&nid).unwrap().unwrap();
assert!(got.public_visible);
}
// ---- Known anchors tests ----
#[test]
fn known_anchors_upsert_and_list() {
let s = temp_storage();
let a1 = make_node_id(1);
let a2 = make_node_id(2);
let a3 = make_node_id(3);
let addr: SocketAddr = "10.0.0.1:4433".parse().unwrap();
s.upsert_known_anchor(&a1, &[addr]).unwrap();
s.upsert_known_anchor(&a2, &[addr]).unwrap();
s.upsert_known_anchor(&a3, &[addr]).unwrap();
// a1 gets extra success bumps
s.upsert_known_anchor(&a1, &[addr]).unwrap();
s.upsert_known_anchor(&a1, &[addr]).unwrap();
let anchors = s.list_known_anchors().unwrap();
assert_eq!(anchors.len(), 3);
// a1 should be first (highest success_count = 3)
assert_eq!(anchors[0].0, a1);
}
#[test]
fn known_anchors_prune() {
let s = temp_storage();
let addr: SocketAddr = "10.0.0.1:4433".parse().unwrap();
for i in 0..10u8 {
let nid = make_node_id(i + 1);
s.upsert_known_anchor(&nid, &[addr]).unwrap();
}
// Auto-prune should keep only 5
let anchors = s.list_known_anchors().unwrap();
assert_eq!(anchors.len(), 5);
}
#[test]
fn is_peer_anchor_check() {
let s = temp_storage();
let nid = make_node_id(1);
let addr: SocketAddr = "10.0.0.1:4433".parse().unwrap();
s.upsert_peer(&nid, &[addr], None).unwrap();
assert!(!s.is_peer_anchor(&nid).unwrap());
s.set_peer_anchor(&nid, true).unwrap();
assert!(s.is_peer_anchor(&nid).unwrap());
// Non-existent peer
let unknown = make_node_id(99);
assert!(!s.is_peer_anchor(&unknown).unwrap());
}
// --- Engagement tests ---
#[test]
fn post_downstream_crud() {
let s = temp_storage();
let post_id = make_post_id(1);
let peer1 = make_node_id(1);
let peer2 = make_node_id(2);
// Add downstream peers
assert!(s.add_post_downstream(&post_id, &peer1).unwrap());
assert!(s.add_post_downstream(&post_id, &peer2).unwrap());
let downstream = s.get_post_downstream(&post_id).unwrap();
assert_eq!(downstream.len(), 2);
assert_eq!(s.get_post_downstream_count(&post_id).unwrap(), 2);
// Remove one
s.remove_post_downstream(&post_id, &peer1).unwrap();
assert_eq!(s.get_post_downstream_count(&post_id).unwrap(), 1);
// Capacity limit
let big_post = make_post_id(99);
for i in 0..100u8 {
assert!(s.add_post_downstream(&big_post, &make_node_id(i + 1)).unwrap());
}
assert_eq!(s.get_post_downstream_count(&big_post).unwrap(), 100);
// 101st should fail
assert!(!s.add_post_downstream(&big_post, &make_node_id(200)).unwrap());
}
#[test]
fn reaction_crud() {
use crate::types::Reaction;
let s = temp_storage();
let post_id = make_post_id(1);
let reactor1 = make_node_id(1);
let reactor2 = make_node_id(2);
let me = make_node_id(1);
s.store_reaction(&Reaction {
reactor: reactor1,
emoji: "👍".to_string(),
post_id,
timestamp_ms: 1000,
encrypted_payload: None,
}).unwrap();
s.store_reaction(&Reaction {
reactor: reactor2,
emoji: "👍".to_string(),
post_id,
timestamp_ms: 1001,
encrypted_payload: None,
}).unwrap();
s.store_reaction(&Reaction {
reactor: reactor1,
emoji: "❤️".to_string(),
post_id,
timestamp_ms: 1002,
encrypted_payload: None,
}).unwrap();
let reactions = s.get_reactions(&post_id).unwrap();
assert_eq!(reactions.len(), 3);
let counts = s.get_reaction_counts(&post_id, &me).unwrap();
assert_eq!(counts.len(), 2); // 👍 and ❤️
// 👍 has 2 reactions, one from me
let thumbs = counts.iter().find(|(e, _, _)| e == "👍").unwrap();
assert_eq!(thumbs.1, 2);
assert!(thumbs.2); // I reacted
// Remove
s.remove_reaction(&reactor1, &post_id, "👍").unwrap();
let reactions = s.get_reactions(&post_id).unwrap();
assert_eq!(reactions.len(), 2);
}
#[test]
fn comment_crud() {
use crate::types::InlineComment;
let s = temp_storage();
let post_id = make_post_id(1);
let author1 = make_node_id(1);
let author2 = make_node_id(2);
s.store_comment(&InlineComment {
author: author1,
post_id,
content: "Nice post!".to_string(),
timestamp_ms: 1000,
signature: vec![0u8; 64],
}).unwrap();
s.store_comment(&InlineComment {
author: author2,
post_id,
content: "I agree".to_string(),
timestamp_ms: 1001,
signature: vec![1u8; 64],
}).unwrap();
let comments = s.get_comments(&post_id).unwrap();
assert_eq!(comments.len(), 2);
assert_eq!(comments[0].content, "Nice post!");
assert_eq!(comments[1].content, "I agree");
assert_eq!(s.get_comment_count(&post_id).unwrap(), 2);
}
#[test]
fn comment_policy_crud() {
use crate::types::{CommentPermission, CommentPolicy, ModerationMode, ReactPermission};
let s = temp_storage();
let post_id = make_post_id(1);
// No policy initially
assert!(s.get_comment_policy(&post_id).unwrap().is_none());
let policy = CommentPolicy {
allow_comments: CommentPermission::AudienceOnly,
allow_reacts: ReactPermission::Public,
moderation: ModerationMode::AuthorBlocklist,
blocklist: vec![make_node_id(99)],
};
s.set_comment_policy(&post_id, &policy).unwrap();
let loaded = s.get_comment_policy(&post_id).unwrap().unwrap();
assert_eq!(loaded.allow_comments, CommentPermission::AudienceOnly);
assert_eq!(loaded.allow_reacts, ReactPermission::Public);
assert_eq!(loaded.blocklist.len(), 1);
// Update
let policy2 = CommentPolicy {
allow_comments: CommentPermission::None,
..Default::default()
};
s.set_comment_policy(&post_id, &policy2).unwrap();
let loaded2 = s.get_comment_policy(&post_id).unwrap().unwrap();
assert_eq!(loaded2.allow_comments, CommentPermission::None);
}
#[test]
fn blob_header_crud() {
let s = temp_storage();
let post_id = make_post_id(1);
let author = make_node_id(1);
assert!(s.get_blob_header(&post_id).unwrap().is_none());
s.store_blob_header(&post_id, &author, "{\"test\":true}", 1000).unwrap();
let (json, ts) = s.get_blob_header(&post_id).unwrap().unwrap();
assert_eq!(json, "{\"test\":true}");
assert_eq!(ts, 1000);
// Update
s.store_blob_header(&post_id, &author, "{\"test\":false}", 2000).unwrap();
let (json2, ts2) = s.get_blob_header(&post_id).unwrap().unwrap();
assert_eq!(json2, "{\"test\":false}");
assert_eq!(ts2, 2000);
}
#[test]
fn thread_meta_crud() {
use crate::types::ThreadMeta;
let s = temp_storage();
let parent = make_post_id(1);
let child1 = make_post_id(2);
let child2 = make_post_id(3);
s.store_thread_meta(&ThreadMeta {
post_id: child1,
parent_post_id: parent,
}).unwrap();
s.store_thread_meta(&ThreadMeta {
post_id: child2,
parent_post_id: parent,
}).unwrap();
let children = s.get_thread_children(&parent).unwrap();
assert_eq!(children.len(), 2);
let found_parent = s.get_thread_parent(&child1).unwrap().unwrap();
assert_eq!(found_parent, parent);
assert!(s.get_thread_parent(&parent).unwrap().is_none());
}
}