ItsGoin v0.3.2 — Decentralized social media network

No central server, user-owned data, reverse-chronological feed.
Rust core + Tauri desktop + Android app + plain HTML/CSS/JS frontend.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Scott Reimers 2026-03-15 20:22:08 -04:00
commit 800388cda4
146 changed files with 53227 additions and 0 deletions

24
crates/core/Cargo.toml Normal file
View file

@ -0,0 +1,24 @@
[package]
name = "itsgoin-core"
version = "0.3.0"
edition = "2021"
[dependencies]
iroh = { version = "0.96", features = ["address-lookup-mdns"] }
tokio = { version = "1", features = ["full"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
blake3 = "1"
rusqlite = { version = "0.32", features = ["bundled"] }
anyhow = "1"
tracing = "0.1"
hex = "0.4"
rand = "0.9"
curve25519-dalek = { version = "=5.0.0-pre.1", features = ["rand_core", "zeroize"] }
ed25519-dalek = { version = "=3.0.0-pre.1", features = ["rand_core", "zeroize"] }
chacha20poly1305 = "0.10"
base64 = "0.22"
igd-next = { version = "0.16", features = ["tokio"] }
[dev-dependencies]
tempfile = "3"

View file

@ -0,0 +1,73 @@
use std::collections::VecDeque;
use serde::Serialize;
const MAX_EVENTS: usize = 200;
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum ActivityLevel {
Info,
Warn,
Error,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum ActivityCategory {
Growth,
Rebalance,
Recovery,
Anchor,
Connection,
Relay,
}
#[derive(Debug, Clone, Serialize)]
pub struct ActivityEvent {
pub timestamp_ms: u64,
pub level: ActivityLevel,
pub category: ActivityCategory,
pub message: String,
pub peer_id: Option<[u8; 32]>,
}
pub struct ActivityLog {
events: VecDeque<ActivityEvent>,
}
impl ActivityLog {
pub fn new() -> Self {
Self {
events: VecDeque::with_capacity(MAX_EVENTS),
}
}
pub fn log(
&mut self,
level: ActivityLevel,
cat: ActivityCategory,
msg: String,
peer: Option<[u8; 32]>,
) {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
if self.events.len() >= MAX_EVENTS {
self.events.pop_front();
}
self.events.push_back(ActivityEvent {
timestamp_ms: now,
level,
category: cat,
message: msg,
peer_id: peer,
});
}
pub fn recent(&self, limit: usize) -> Vec<ActivityEvent> {
let start = self.events.len().saturating_sub(limit);
self.events.iter().skip(start).cloned().collect()
}
}

129
crates/core/src/blob.rs Normal file
View file

@ -0,0 +1,129 @@
use std::path::{Path, PathBuf};
/// A blob identifier — BLAKE3 hash of the blob data
pub type BlobId = [u8; 32];
/// Compute the content address of a blob (BLAKE3 hash).
pub fn compute_blob_id(data: &[u8]) -> BlobId {
*blake3::hash(data).as_bytes()
}
/// Verify that blob data matches a claimed CID.
pub fn verify_blob(cid: &BlobId, data: &[u8]) -> bool {
&compute_blob_id(data) == cid
}
/// Filesystem-based blob store with 256-shard directory layout.
/// Blobs are stored at `{base_dir}/{hex[0..2]}/{hex}`.
pub struct BlobStore {
base_dir: PathBuf,
}
impl BlobStore {
/// Open or create a blob store at `{data_dir}/blobs/`.
pub fn open(data_dir: &Path) -> anyhow::Result<Self> {
let base_dir = data_dir.join("blobs");
std::fs::create_dir_all(&base_dir)?;
Ok(Self { base_dir })
}
fn blob_path(&self, cid: &BlobId) -> PathBuf {
let hex = hex::encode(cid);
let shard = &hex[..2];
self.base_dir.join(shard).join(&hex)
}
/// Store a blob. Returns false if it already exists (idempotent).
pub fn store(&self, cid: &BlobId, data: &[u8]) -> anyhow::Result<bool> {
let path = self.blob_path(cid);
if path.exists() {
return Ok(false);
}
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::write(&path, data)?;
Ok(true)
}
/// Retrieve a blob by CID.
pub fn get(&self, cid: &BlobId) -> anyhow::Result<Option<Vec<u8>>> {
let path = self.blob_path(cid);
if !path.exists() {
return Ok(None);
}
Ok(Some(std::fs::read(&path)?))
}
/// Check if a blob exists locally.
pub fn has(&self, cid: &BlobId) -> bool {
self.blob_path(cid).exists()
}
/// Return the filesystem path if the blob exists locally (for streaming).
pub fn file_path(&self, cid: &BlobId) -> Option<PathBuf> {
let path = self.blob_path(cid);
if path.exists() { Some(path) } else { None }
}
/// Delete a blob (for future eviction).
pub fn delete(&self, cid: &BlobId) -> anyhow::Result<bool> {
let path = self.blob_path(cid);
if !path.exists() {
return Ok(false);
}
std::fs::remove_file(&path)?;
Ok(true)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compute_verify_roundtrip() {
let data = b"hello world blob data";
let cid = compute_blob_id(data);
assert!(verify_blob(&cid, data));
assert!(!verify_blob(&[0u8; 32], data));
}
#[test]
fn store_retrieve() {
let dir = tempfile::tempdir().unwrap();
let store = BlobStore::open(dir.path()).unwrap();
let data = b"test blob content";
let cid = compute_blob_id(data);
// Store
assert!(store.store(&cid, data).unwrap()); // first time → true
assert!(!store.store(&cid, data).unwrap()); // idempotent → false
// Has
assert!(store.has(&cid));
assert!(!store.has(&[0u8; 32]));
// Get
let retrieved = store.get(&cid).unwrap().unwrap();
assert_eq!(retrieved, data);
// Missing
assert!(store.get(&[0u8; 32]).unwrap().is_none());
}
#[test]
fn delete_blob() {
let dir = tempfile::tempdir().unwrap();
let store = BlobStore::open(dir.path()).unwrap();
let data = b"to be deleted";
let cid = compute_blob_id(data);
store.store(&cid, data).unwrap();
assert!(store.delete(&cid).unwrap());
assert!(!store.has(&cid));
assert!(!store.delete(&cid).unwrap()); // already gone
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,61 @@
use crate::types::{Post, PostId};
/// Compute the content ID (BLAKE3 hash) of a post.
/// This is deterministic: same post data always produces the same ID.
pub fn compute_post_id(post: &Post) -> PostId {
let bytes = serde_json::to_vec(post).expect("post serialization should not fail");
*blake3::hash(&bytes).as_bytes()
}
/// Verify that a post's content matches the claimed ID
pub fn verify_post_id(id: &PostId, post: &Post) -> bool {
&compute_post_id(post) == id
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_deterministic_hashing() {
let post = Post {
author: [1u8; 32],
content: "hello world".to_string(),
attachments: vec![],
timestamp_ms: 1000,
};
let id1 = compute_post_id(&post);
let id2 = compute_post_id(&post);
assert_eq!(id1, id2);
}
#[test]
fn test_different_content_different_id() {
let post1 = Post {
author: [1u8; 32],
content: "hello".to_string(),
attachments: vec![],
timestamp_ms: 1000,
};
let post2 = Post {
author: [1u8; 32],
content: "world".to_string(),
attachments: vec![],
timestamp_ms: 1000,
};
assert_ne!(compute_post_id(&post1), compute_post_id(&post2));
}
#[test]
fn test_verify() {
let post = Post {
author: [1u8; 32],
content: "test".to_string(),
attachments: vec![],
timestamp_ms: 1000,
};
let id = compute_post_id(&post);
assert!(verify_post_id(&id, &post));
assert!(!verify_post_id(&[0u8; 32], &post));
}
}

976
crates/core/src/crypto.rs Normal file
View file

@ -0,0 +1,976 @@
use anyhow::{bail, Result};
use base64::Engine;
use chacha20poly1305::{
aead::{Aead, KeyInit},
ChaCha20Poly1305, Nonce,
};
use curve25519_dalek::montgomery::MontgomeryPoint;
use ed25519_dalek::{Signer, SigningKey, Verifier, VerifyingKey};
use rand::RngCore;
use crate::types::{GroupEpoch, GroupId, GroupMemberKey, NodeId, PostId, WrappedKey};
const CEK_WRAP_CONTEXT: &str = "itsgoin/cek-wrap/v1";
/// Convert an ed25519 seed (32 bytes from identity.key) to X25519 private scalar bytes.
pub fn ed25519_seed_to_x25519_private(seed: &[u8; 32]) -> [u8; 32] {
let signing_key = SigningKey::from_bytes(seed);
signing_key.to_scalar_bytes()
}
/// Convert an ed25519 public key (NodeId) to X25519 public key bytes.
pub fn ed25519_pubkey_to_x25519_public(pk: &[u8; 32]) -> Result<[u8; 32]> {
let verifying_key = VerifyingKey::from_bytes(pk)
.map_err(|e| anyhow::anyhow!("invalid ed25519 public key: {}", e))?;
Ok(verifying_key.to_montgomery().to_bytes())
}
/// Perform X25519 Diffie-Hellman: our_private (scalar bytes) * their_public (montgomery point).
fn x25519_dh(our_private: &[u8; 32], their_public: &[u8; 32]) -> [u8; 32] {
MontgomeryPoint(*their_public)
.mul_clamped(*our_private)
.to_bytes()
}
/// Derive a symmetric wrapping key from a DH shared secret using BLAKE3.
fn derive_wrapping_key(shared_secret: &[u8; 32]) -> [u8; 32] {
blake3::derive_key(CEK_WRAP_CONTEXT, shared_secret)
}
/// Encrypt a post's plaintext content for the given recipients.
///
/// Returns `(base64_ciphertext, Vec<WrappedKey>)` where:
/// - base64_ciphertext is `base64(nonce(12) || ciphertext || tag(16))` for the content
/// - Each WrappedKey contains the CEK encrypted for one recipient
///
/// The author (our_seed's corresponding NodeId) is always included as a recipient.
pub fn encrypt_post(
plaintext: &str,
our_seed: &[u8; 32],
our_node_id: &NodeId,
recipients: &[NodeId],
) -> Result<(String, Vec<WrappedKey>)> {
// Generate random 32-byte Content Encryption Key
let mut cek = [0u8; 32];
rand::rng().fill_bytes(&mut cek);
// Encrypt content with CEK using ChaCha20-Poly1305
let content_cipher = ChaCha20Poly1305::new_from_slice(&cek)
.map_err(|e| anyhow::anyhow!("cipher init: {}", e))?;
let mut content_nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut content_nonce_bytes);
let content_nonce = Nonce::from_slice(&content_nonce_bytes);
let ciphertext = content_cipher
.encrypt(content_nonce, plaintext.as_bytes())
.map_err(|e| anyhow::anyhow!("encrypt: {}", e))?;
// base64(nonce || ciphertext_with_tag)
let mut payload = Vec::with_capacity(12 + ciphertext.len());
payload.extend_from_slice(&content_nonce_bytes);
payload.extend_from_slice(&ciphertext);
let encoded = base64::engine::general_purpose::STANDARD.encode(&payload);
// Get our X25519 private key
let our_x25519_private = ed25519_seed_to_x25519_private(our_seed);
// Build recipient set (always include ourselves)
let mut all_recipients: Vec<NodeId> = recipients.to_vec();
if !all_recipients.contains(our_node_id) {
all_recipients.push(*our_node_id);
}
// Wrap CEK for each recipient
let mut wrapped_keys = Vec::with_capacity(all_recipients.len());
for recipient in &all_recipients {
let their_x25519_pub = ed25519_pubkey_to_x25519_public(recipient)?;
let shared_secret = x25519_dh(&our_x25519_private, &their_x25519_pub);
let wrapping_key = derive_wrapping_key(&shared_secret);
let wrap_cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("wrap cipher init: {}", e))?;
let mut wrap_nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut wrap_nonce_bytes);
let wrap_nonce = Nonce::from_slice(&wrap_nonce_bytes);
let wrapped = wrap_cipher
.encrypt(wrap_nonce, cek.as_slice())
.map_err(|e| anyhow::anyhow!("wrap: {}", e))?;
// nonce(12) || encrypted_cek(32) || tag(16) = 60 bytes
let mut wrapped_cek = Vec::with_capacity(60);
wrapped_cek.extend_from_slice(&wrap_nonce_bytes);
wrapped_cek.extend_from_slice(&wrapped);
wrapped_keys.push(WrappedKey {
recipient: *recipient,
wrapped_cek,
});
}
Ok((encoded, wrapped_keys))
}
/// Decrypt a post's content if we are among the recipients.
///
/// Returns `Ok(Some(plaintext))` if we can decrypt, `Ok(None)` if we're not a recipient.
pub fn decrypt_post(
encrypted_content_b64: &str,
our_seed: &[u8; 32],
our_node_id: &NodeId,
sender_pubkey: &NodeId,
wrapped_keys: &[WrappedKey],
) -> Result<Option<String>> {
// Find our wrapped key
let our_wk = match wrapped_keys.iter().find(|wk| &wk.recipient == our_node_id) {
Some(wk) => wk,
None => return Ok(None),
};
if our_wk.wrapped_cek.len() != 60 {
bail!(
"invalid wrapped_cek length: expected 60, got {}",
our_wk.wrapped_cek.len()
);
}
// DH with sender to get wrapping key
let our_x25519_private = ed25519_seed_to_x25519_private(our_seed);
let sender_x25519_pub = ed25519_pubkey_to_x25519_public(sender_pubkey)?;
let shared_secret = x25519_dh(&our_x25519_private, &sender_x25519_pub);
let wrapping_key = derive_wrapping_key(&shared_secret);
// Unwrap CEK
let wrap_nonce = Nonce::from_slice(&our_wk.wrapped_cek[..12]);
let wrap_cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("wrap cipher init: {}", e))?;
let cek = wrap_cipher
.decrypt(wrap_nonce, &our_wk.wrapped_cek[12..])
.map_err(|e| anyhow::anyhow!("unwrap CEK: {}", e))?;
if cek.len() != 32 {
bail!("unwrapped CEK wrong length: {}", cek.len());
}
// Decode base64 content
let payload = base64::engine::general_purpose::STANDARD
.decode(encrypted_content_b64)
.map_err(|e| anyhow::anyhow!("base64 decode: {}", e))?;
if payload.len() < 12 + 16 {
bail!("encrypted payload too short");
}
// Decrypt content
let content_nonce = Nonce::from_slice(&payload[..12]);
let content_cipher = ChaCha20Poly1305::new_from_slice(&cek)
.map_err(|e| anyhow::anyhow!("content cipher init: {}", e))?;
let plaintext = content_cipher
.decrypt(content_nonce, &payload[12..])
.map_err(|e| anyhow::anyhow!("decrypt content: {}", e))?;
Ok(Some(String::from_utf8(plaintext)?))
}
/// Sign a delete record: ed25519 sign over post_id bytes using our seed.
pub fn sign_delete(seed: &[u8; 32], post_id: &PostId) -> Vec<u8> {
let signing_key = SigningKey::from_bytes(seed);
let sig = signing_key.sign(post_id);
sig.to_bytes().to_vec()
}
/// Verify an ed25519 delete signature: the author's public key signed the post_id.
pub fn verify_delete_signature(author: &NodeId, post_id: &PostId, signature: &[u8]) -> bool {
if signature.len() != 64 {
return false;
}
let sig_bytes: [u8; 64] = match signature.try_into() {
Ok(b) => b,
Err(_) => return false,
};
let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes);
let Ok(verifying_key) = VerifyingKey::from_bytes(author) else {
return false;
};
verifying_key.verify(post_id, &sig).is_ok()
}
/// Re-wrap a post's CEK for a new set of recipients (excluding revoked ones).
///
/// Given the existing wrapped keys and a new list of recipient NodeIds,
/// unwraps the CEK using our own key, then wraps it for each new recipient.
pub fn rewrap_visibility(
our_seed: &[u8; 32],
our_node_id: &NodeId,
existing_recipients: &[WrappedKey],
new_recipient_ids: &[NodeId],
) -> Result<Vec<WrappedKey>> {
// Find our wrapped key
let our_wk = existing_recipients
.iter()
.find(|wk| &wk.recipient == our_node_id)
.ok_or_else(|| anyhow::anyhow!("we are not a recipient of this post"))?;
if our_wk.wrapped_cek.len() != 60 {
bail!(
"invalid wrapped_cek length: expected 60, got {}",
our_wk.wrapped_cek.len()
);
}
// DH with ourselves (author DH with self) to unwrap CEK
let our_x25519_private = ed25519_seed_to_x25519_private(our_seed);
let our_x25519_pub = ed25519_pubkey_to_x25519_public(our_node_id)?;
let shared_secret = x25519_dh(&our_x25519_private, &our_x25519_pub);
let wrapping_key = derive_wrapping_key(&shared_secret);
let wrap_nonce = Nonce::from_slice(&our_wk.wrapped_cek[..12]);
let wrap_cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("wrap cipher init: {}", e))?;
let cek = wrap_cipher
.decrypt(wrap_nonce, &our_wk.wrapped_cek[12..])
.map_err(|e| anyhow::anyhow!("unwrap CEK: {}", e))?;
if cek.len() != 32 {
bail!("unwrapped CEK wrong length: {}", cek.len());
}
// Re-wrap for each new recipient
let mut wrapped_keys = Vec::with_capacity(new_recipient_ids.len());
for recipient in new_recipient_ids {
let their_x25519_pub = ed25519_pubkey_to_x25519_public(recipient)?;
let shared_secret = x25519_dh(&our_x25519_private, &their_x25519_pub);
let wrapping_key = derive_wrapping_key(&shared_secret);
let wrap_cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("wrap cipher init: {}", e))?;
let mut wrap_nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut wrap_nonce_bytes);
let wrap_nonce = Nonce::from_slice(&wrap_nonce_bytes);
let wrapped = wrap_cipher
.encrypt(wrap_nonce, cek.as_slice())
.map_err(|e| anyhow::anyhow!("wrap: {}", e))?;
let mut wrapped_cek = Vec::with_capacity(60);
wrapped_cek.extend_from_slice(&wrap_nonce_bytes);
wrapped_cek.extend_from_slice(&wrapped);
wrapped_keys.push(WrappedKey {
recipient: *recipient,
wrapped_cek,
});
}
Ok(wrapped_keys)
}
// --- Group Key Encryption ---
const GROUP_KEY_WRAP_CONTEXT: &str = "itsgoin/group-key-wrap/v1";
const GROUP_CEK_WRAP_CONTEXT: &str = "itsgoin/group-cek-wrap/v1";
/// Generate a new group key pair (ed25519 seed + public key).
pub fn generate_group_keypair() -> ([u8; 32], [u8; 32]) {
let signing_key = SigningKey::generate(&mut rand::rng());
let seed = signing_key.to_bytes();
let public_key = signing_key.verifying_key().to_bytes();
(seed, public_key)
}
/// Compute the group ID from the initial public key (BLAKE3 hash).
pub fn compute_group_id(public_key: &[u8; 32]) -> GroupId {
*blake3::hash(public_key).as_bytes()
}
/// Derive a wrapping key for group key distribution (admin → member).
fn derive_group_key_wrapping_key(shared_secret: &[u8; 32]) -> [u8; 32] {
blake3::derive_key(GROUP_KEY_WRAP_CONTEXT, shared_secret)
}
/// Derive a wrapping key for CEK wrapping via group key DH.
fn derive_group_cek_wrapping_key(shared_secret: &[u8; 32]) -> [u8; 32] {
blake3::derive_key(GROUP_CEK_WRAP_CONTEXT, shared_secret)
}
/// Wrap the group seed for a specific member using X25519 DH (admin_seed × member_pubkey).
pub fn wrap_group_key_for_member(
admin_seed: &[u8; 32],
member_node_id: &NodeId,
group_seed: &[u8; 32],
) -> Result<Vec<u8>> {
let admin_x25519 = ed25519_seed_to_x25519_private(admin_seed);
let member_x25519_pub = ed25519_pubkey_to_x25519_public(member_node_id)?;
let shared_secret = x25519_dh(&admin_x25519, &member_x25519_pub);
let wrapping_key = derive_group_key_wrapping_key(&shared_secret);
let cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("cipher init: {}", e))?;
let mut nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let encrypted = cipher
.encrypt(nonce, group_seed.as_slice())
.map_err(|e| anyhow::anyhow!("wrap group key: {}", e))?;
let mut result = Vec::with_capacity(60);
result.extend_from_slice(&nonce_bytes);
result.extend_from_slice(&encrypted);
Ok(result)
}
/// Unwrap a group seed using X25519 DH (our_seed × admin_pubkey).
pub fn unwrap_group_key(
our_seed: &[u8; 32],
admin_node_id: &NodeId,
wrapped: &[u8],
) -> Result<[u8; 32]> {
if wrapped.len() != 60 {
bail!("invalid wrapped group key length: expected 60, got {}", wrapped.len());
}
let our_x25519 = ed25519_seed_to_x25519_private(our_seed);
let admin_x25519_pub = ed25519_pubkey_to_x25519_public(admin_node_id)?;
let shared_secret = x25519_dh(&our_x25519, &admin_x25519_pub);
let wrapping_key = derive_group_key_wrapping_key(&shared_secret);
let cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("cipher init: {}", e))?;
let nonce = Nonce::from_slice(&wrapped[..12]);
let decrypted = cipher
.decrypt(nonce, &wrapped[12..])
.map_err(|e| anyhow::anyhow!("unwrap group key: {}", e))?;
if decrypted.len() != 32 {
bail!("unwrapped group seed wrong length: {}", decrypted.len());
}
let mut seed = [0u8; 32];
seed.copy_from_slice(&decrypted);
Ok(seed)
}
/// Encrypt a post for a group: generates a random CEK, encrypts the content,
/// then wraps the CEK using X25519 DH between the group seed and group public key.
pub fn encrypt_post_for_group(
plaintext: &str,
group_seed: &[u8; 32],
group_public_key: &[u8; 32],
) -> Result<(String, Vec<u8>)> {
// Generate random CEK
let mut cek = [0u8; 32];
rand::rng().fill_bytes(&mut cek);
// Encrypt content with CEK
let content_cipher = ChaCha20Poly1305::new_from_slice(&cek)
.map_err(|e| anyhow::anyhow!("cipher init: {}", e))?;
let mut content_nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut content_nonce_bytes);
let content_nonce = Nonce::from_slice(&content_nonce_bytes);
let ciphertext = content_cipher
.encrypt(content_nonce, plaintext.as_bytes())
.map_err(|e| anyhow::anyhow!("encrypt: {}", e))?;
let mut payload = Vec::with_capacity(12 + ciphertext.len());
payload.extend_from_slice(&content_nonce_bytes);
payload.extend_from_slice(&ciphertext);
let encoded = base64::engine::general_purpose::STANDARD.encode(&payload);
// Wrap CEK using group DH: group_seed (as X25519 private) × group_public_key (as X25519 public)
let group_x25519_private = ed25519_seed_to_x25519_private(group_seed);
let group_x25519_public = ed25519_pubkey_to_x25519_public(group_public_key)?;
let shared_secret = x25519_dh(&group_x25519_private, &group_x25519_public);
let wrapping_key = derive_group_cek_wrapping_key(&shared_secret);
let wrap_cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("wrap cipher init: {}", e))?;
let mut wrap_nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut wrap_nonce_bytes);
let wrap_nonce = Nonce::from_slice(&wrap_nonce_bytes);
let wrapped = wrap_cipher
.encrypt(wrap_nonce, cek.as_slice())
.map_err(|e| anyhow::anyhow!("wrap CEK: {}", e))?;
let mut wrapped_cek = Vec::with_capacity(60);
wrapped_cek.extend_from_slice(&wrap_nonce_bytes);
wrapped_cek.extend_from_slice(&wrapped);
Ok((encoded, wrapped_cek))
}
/// Decrypt a group-encrypted post using the group seed and public key.
pub fn decrypt_group_post(
encrypted_b64: &str,
group_seed: &[u8; 32],
group_public_key: &[u8; 32],
wrapped_cek: &[u8],
) -> Result<String> {
if wrapped_cek.len() != 60 {
bail!("invalid wrapped_cek length: expected 60, got {}", wrapped_cek.len());
}
// Unwrap CEK using group DH
let group_x25519_private = ed25519_seed_to_x25519_private(group_seed);
let group_x25519_public = ed25519_pubkey_to_x25519_public(group_public_key)?;
let shared_secret = x25519_dh(&group_x25519_private, &group_x25519_public);
let wrapping_key = derive_group_cek_wrapping_key(&shared_secret);
let wrap_cipher = ChaCha20Poly1305::new_from_slice(&wrapping_key)
.map_err(|e| anyhow::anyhow!("wrap cipher init: {}", e))?;
let wrap_nonce = Nonce::from_slice(&wrapped_cek[..12]);
let cek = wrap_cipher
.decrypt(wrap_nonce, &wrapped_cek[12..])
.map_err(|e| anyhow::anyhow!("unwrap CEK: {}", e))?;
if cek.len() != 32 {
bail!("unwrapped CEK wrong length: {}", cek.len());
}
// Decode and decrypt content
let payload = base64::engine::general_purpose::STANDARD
.decode(encrypted_b64)
.map_err(|e| anyhow::anyhow!("base64 decode: {}", e))?;
if payload.len() < 12 + 16 {
bail!("encrypted payload too short");
}
let content_nonce = Nonce::from_slice(&payload[..12]);
let content_cipher = ChaCha20Poly1305::new_from_slice(&cek)
.map_err(|e| anyhow::anyhow!("content cipher init: {}", e))?;
let plaintext = content_cipher
.decrypt(content_nonce, &payload[12..])
.map_err(|e| anyhow::anyhow!("decrypt content: {}", e))?;
Ok(String::from_utf8(plaintext)?)
}
/// Rotate a group key: generate new keypair, wrap for remaining members, return new state.
pub fn rotate_group_key(
admin_seed: &[u8; 32],
current_epoch: GroupEpoch,
remaining_members: &[NodeId],
) -> Result<([u8; 32], [u8; 32], GroupEpoch, Vec<GroupMemberKey>)> {
let (new_seed, new_pubkey) = generate_group_keypair();
let new_epoch = current_epoch + 1;
let mut member_keys = Vec::with_capacity(remaining_members.len());
for member in remaining_members {
let wrapped = wrap_group_key_for_member(admin_seed, member, &new_seed)?;
member_keys.push(GroupMemberKey {
member: *member,
epoch: new_epoch,
wrapped_group_key: wrapped,
});
}
Ok((new_seed, new_pubkey, new_epoch, member_keys))
}
// --- CDN Manifest Signing ---
/// Compute the canonical digest for an AuthorManifest (for signing/verification).
/// Digest = BLAKE3(post_id ‖ author ‖ created_at_le ‖ updated_at_le ‖ author_addresses_json ‖ previous_posts_json ‖ following_posts_json)
fn manifest_digest(manifest: &crate::types::AuthorManifest) -> [u8; 32] {
let mut hasher = blake3::Hasher::new();
hasher.update(&manifest.post_id);
hasher.update(&manifest.author);
hasher.update(&manifest.created_at.to_le_bytes());
hasher.update(&manifest.updated_at.to_le_bytes());
let addrs_json = serde_json::to_string(&manifest.author_addresses).unwrap_or_default();
hasher.update(addrs_json.as_bytes());
let prev_json = serde_json::to_string(&manifest.previous_posts).unwrap_or_default();
hasher.update(prev_json.as_bytes());
let next_json = serde_json::to_string(&manifest.following_posts).unwrap_or_default();
hasher.update(next_json.as_bytes());
*hasher.finalize().as_bytes()
}
/// Sign an AuthorManifest: BLAKE3 digest → ed25519 sign.
pub fn sign_manifest(seed: &[u8; 32], manifest: &crate::types::AuthorManifest) -> Vec<u8> {
let digest = manifest_digest(manifest);
let signing_key = SigningKey::from_bytes(seed);
let sig = signing_key.sign(&digest);
sig.to_bytes().to_vec()
}
/// Verify an AuthorManifest signature against the embedded author public key.
pub fn verify_manifest_signature(manifest: &crate::types::AuthorManifest) -> bool {
if manifest.signature.len() != 64 {
return false;
}
let sig_bytes: [u8; 64] = match manifest.signature.as_slice().try_into() {
Ok(b) => b,
Err(_) => return false,
};
let digest = manifest_digest(manifest);
let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes);
let Ok(verifying_key) = VerifyingKey::from_bytes(&manifest.author) else {
return false;
};
verifying_key.verify(&digest, &sig).is_ok()
}
/// Re-encrypt a post with a brand new CEK for a new set of recipients.
///
/// Decrypts with the old keys, then encrypts fresh. Returns new (base64_ciphertext, wrapped_keys).
pub fn re_encrypt_post(
encrypted_b64: &str,
our_seed: &[u8; 32],
our_node_id: &NodeId,
existing_recipients: &[WrappedKey],
new_recipient_ids: &[NodeId],
) -> Result<(String, Vec<WrappedKey>)> {
let plaintext = decrypt_post(encrypted_b64, our_seed, our_node_id, our_node_id, existing_recipients)?
.ok_or_else(|| anyhow::anyhow!("cannot decrypt post for re-encryption"))?;
encrypt_post(&plaintext, our_seed, our_node_id, new_recipient_ids)
}
// --- Engagement crypto ---
const REACTION_WRAP_CONTEXT: &str = "itsgoin/private-reaction/v1";
const COMMENT_SIGN_CONTEXT: &str = "itsgoin/comment-sig/v1";
/// Encrypt a private reaction payload (only the post author can decrypt).
/// Uses X25519 DH between reactor and author, then ChaCha20-Poly1305.
/// Returns base64(nonce(12) || ciphertext || tag(16)).
pub fn encrypt_private_reaction(
reactor_seed: &[u8; 32],
author_node_id: &NodeId,
plaintext: &str,
) -> Result<String> {
let our_private = ed25519_seed_to_x25519_private(reactor_seed);
let their_public = ed25519_pubkey_to_x25519_public(author_node_id)?;
let shared = x25519_dh(&our_private, &their_public);
let wrap_key = blake3::derive_key(REACTION_WRAP_CONTEXT, &shared);
let cipher = ChaCha20Poly1305::new_from_slice(&wrap_key)
.map_err(|e| anyhow::anyhow!("cipher init: {}", e))?;
let mut nonce_bytes = [0u8; 12];
rand::rng().fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
let ciphertext = cipher.encrypt(nonce, plaintext.as_bytes())
.map_err(|e| anyhow::anyhow!("encrypt: {}", e))?;
let mut combined = Vec::with_capacity(12 + ciphertext.len());
combined.extend_from_slice(&nonce_bytes);
combined.extend_from_slice(&ciphertext);
Ok(base64::engine::general_purpose::STANDARD.encode(&combined))
}
/// Decrypt a private reaction (only the post author can do this).
/// Takes the author's seed and the reactor's NodeId.
pub fn decrypt_private_reaction(
author_seed: &[u8; 32],
reactor_node_id: &NodeId,
encrypted_b64: &str,
) -> Result<String> {
let our_private = ed25519_seed_to_x25519_private(author_seed);
let their_public = ed25519_pubkey_to_x25519_public(reactor_node_id)?;
let shared = x25519_dh(&our_private, &their_public);
let wrap_key = blake3::derive_key(REACTION_WRAP_CONTEXT, &shared);
let combined = base64::engine::general_purpose::STANDARD.decode(encrypted_b64)?;
if combined.len() < 12 {
bail!("encrypted reaction too short");
}
let nonce = Nonce::from_slice(&combined[..12]);
let ciphertext = &combined[12..];
let cipher = ChaCha20Poly1305::new_from_slice(&wrap_key)
.map_err(|e| anyhow::anyhow!("cipher init: {}", e))?;
let plaintext = cipher.decrypt(nonce, ciphertext)
.map_err(|_| anyhow::anyhow!("decrypt failed — wrong key or corrupted data"))?;
String::from_utf8(plaintext).map_err(|e| anyhow::anyhow!("invalid utf8: {}", e))
}
/// Sign a comment: ed25519 over BLAKE3(author || post_id || content || timestamp_ms).
pub fn sign_comment(
seed: &[u8; 32],
author: &NodeId,
post_id: &PostId,
content: &str,
timestamp_ms: u64,
) -> Vec<u8> {
let signing_key = SigningKey::from_bytes(seed);
let mut hasher = blake3::Hasher::new_derive_key(COMMENT_SIGN_CONTEXT);
hasher.update(author);
hasher.update(post_id);
hasher.update(content.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
let digest = hasher.finalize();
signing_key.sign(digest.as_bytes()).to_bytes().to_vec()
}
/// Verify a comment's ed25519 signature.
pub fn verify_comment_signature(
author: &NodeId,
post_id: &PostId,
content: &str,
timestamp_ms: u64,
signature: &[u8],
) -> bool {
let Ok(verifying_key) = VerifyingKey::from_bytes(author) else {
return false;
};
let Ok(sig) = ed25519_dalek::Signature::from_slice(signature) else {
return false;
};
let mut hasher = blake3::Hasher::new_derive_key(COMMENT_SIGN_CONTEXT);
hasher.update(author);
hasher.update(post_id);
hasher.update(content.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
let digest = hasher.finalize();
verifying_key.verify(digest.as_bytes(), &sig).is_ok()
}
#[cfg(test)]
mod tests {
use super::*;
fn make_keypair(seed_byte: u8) -> ([u8; 32], NodeId) {
let mut seed = [0u8; 32];
seed[0] = seed_byte;
let signing_key = SigningKey::from_bytes(&seed);
let node_id: NodeId = signing_key.verifying_key().to_bytes();
(seed, node_id)
}
#[test]
fn test_encrypt_decrypt_roundtrip() {
let (alice_seed, alice_id) = make_keypair(1);
let (bob_seed, bob_id) = make_keypair(2);
let plaintext = "Hello, Bob! This is a secret message.";
let (encrypted, wrapped_keys) =
encrypt_post(plaintext, &alice_seed, &alice_id, &[bob_id]).unwrap();
// Alice (sender) can decrypt
let decrypted =
decrypt_post(&encrypted, &alice_seed, &alice_id, &alice_id, &wrapped_keys).unwrap();
assert_eq!(decrypted.as_deref(), Some(plaintext));
// Bob (recipient) can decrypt
let decrypted =
decrypt_post(&encrypted, &bob_seed, &bob_id, &alice_id, &wrapped_keys).unwrap();
assert_eq!(decrypted.as_deref(), Some(plaintext));
}
#[test]
fn test_non_recipient_cannot_decrypt() {
let (alice_seed, alice_id) = make_keypair(1);
let (_bob_seed, bob_id) = make_keypair(2);
let (carol_seed, carol_id) = make_keypair(3);
let plaintext = "Secret for Bob only";
let (encrypted, wrapped_keys) =
encrypt_post(plaintext, &alice_seed, &alice_id, &[bob_id]).unwrap();
// Carol is not a recipient
let result =
decrypt_post(&encrypted, &carol_seed, &carol_id, &alice_id, &wrapped_keys).unwrap();
assert_eq!(result, None);
}
#[test]
fn test_author_always_included() {
let (alice_seed, alice_id) = make_keypair(1);
let (_bob_seed, bob_id) = make_keypair(2);
let (_encrypted, wrapped_keys) =
encrypt_post("test", &alice_seed, &alice_id, &[bob_id]).unwrap();
// Alice should be in recipients even though only Bob was passed
assert!(wrapped_keys.iter().any(|wk| wk.recipient == alice_id));
assert!(wrapped_keys.iter().any(|wk| wk.recipient == bob_id));
}
#[test]
fn test_multiple_recipients() {
let (alice_seed, alice_id) = make_keypair(1);
let (bob_seed, bob_id) = make_keypair(2);
let (carol_seed, carol_id) = make_keypair(3);
let plaintext = "Group message!";
let (encrypted, wrapped_keys) =
encrypt_post(plaintext, &alice_seed, &alice_id, &[bob_id, carol_id]).unwrap();
// All three can decrypt
for (seed, nid) in [
(&alice_seed, &alice_id),
(&bob_seed, &bob_id),
(&carol_seed, &carol_id),
] {
let result =
decrypt_post(&encrypted, seed, nid, &alice_id, &wrapped_keys).unwrap();
assert_eq!(result.as_deref(), Some(plaintext));
}
}
#[test]
fn test_x25519_conversion() {
let (seed, node_id) = make_keypair(42);
let x_priv = ed25519_seed_to_x25519_private(&seed);
let x_pub = ed25519_pubkey_to_x25519_public(&node_id).unwrap();
// Verify: private * basepoint == public
let derived_pub = MontgomeryPoint::mul_base_clamped(x_priv);
assert_eq!(derived_pub.to_bytes(), x_pub);
}
#[test]
fn test_sign_verify_delete() {
let (seed, node_id) = make_keypair(1);
let post_id = [42u8; 32];
let sig = sign_delete(&seed, &post_id);
assert_eq!(sig.len(), 64);
assert!(verify_delete_signature(&node_id, &post_id, &sig));
}
#[test]
fn test_forged_delete_rejected() {
let (seed, _alice_id) = make_keypair(1);
let (_bob_seed, bob_id) = make_keypair(2);
let post_id = [42u8; 32];
// Alice signs, but we check against Bob's key
let sig = sign_delete(&seed, &post_id);
assert!(!verify_delete_signature(&bob_id, &post_id, &sig));
// Wrong post_id
let wrong_id = [99u8; 32];
assert!(!verify_delete_signature(&_alice_id, &wrong_id, &sig));
}
#[test]
fn test_rewrap_roundtrip() {
let (alice_seed, alice_id) = make_keypair(1);
let (bob_seed, bob_id) = make_keypair(2);
let (_carol_seed, carol_id) = make_keypair(3);
let plaintext = "secret message";
let (encrypted, original_keys) =
encrypt_post(plaintext, &alice_seed, &alice_id, &[bob_id, carol_id]).unwrap();
// Re-wrap excluding carol (only alice + bob remain)
let new_keys =
rewrap_visibility(&alice_seed, &alice_id, &original_keys, &[alice_id, bob_id]).unwrap();
// Alice can still decrypt
let dec = decrypt_post(&encrypted, &alice_seed, &alice_id, &alice_id, &new_keys).unwrap();
assert_eq!(dec.as_deref(), Some(plaintext));
// Bob can still decrypt
let dec = decrypt_post(&encrypted, &bob_seed, &bob_id, &alice_id, &new_keys).unwrap();
assert_eq!(dec.as_deref(), Some(plaintext));
}
#[test]
fn test_revoked_cannot_decrypt_after_rewrap() {
let (alice_seed, alice_id) = make_keypair(1);
let (_bob_seed, bob_id) = make_keypair(2);
let (carol_seed, carol_id) = make_keypair(3);
let plaintext = "secret message";
let (encrypted, original_keys) =
encrypt_post(plaintext, &alice_seed, &alice_id, &[bob_id, carol_id]).unwrap();
// Re-wrap excluding carol
let new_keys =
rewrap_visibility(&alice_seed, &alice_id, &original_keys, &[alice_id, bob_id]).unwrap();
// Carol cannot decrypt with new keys
let dec = decrypt_post(&encrypted, &carol_seed, &carol_id, &alice_id, &new_keys).unwrap();
assert_eq!(dec, None);
}
#[test]
fn test_re_encrypt_roundtrip() {
let (alice_seed, alice_id) = make_keypair(1);
let (bob_seed, bob_id) = make_keypair(2);
let (carol_seed, carol_id) = make_keypair(3);
let plaintext = "re-encrypt test";
let (encrypted, original_keys) =
encrypt_post(plaintext, &alice_seed, &alice_id, &[bob_id, carol_id]).unwrap();
// Re-encrypt excluding carol
let (new_encrypted, new_keys) =
re_encrypt_post(&encrypted, &alice_seed, &alice_id, &original_keys, &[bob_id]).unwrap();
// Bob can decrypt new ciphertext
let dec = decrypt_post(&new_encrypted, &bob_seed, &bob_id, &alice_id, &new_keys).unwrap();
assert_eq!(dec.as_deref(), Some(plaintext));
// Carol cannot decrypt new ciphertext (not a recipient + different CEK)
let dec = decrypt_post(&new_encrypted, &carol_seed, &carol_id, &alice_id, &new_keys).unwrap();
assert_eq!(dec, None);
// Carol cannot decrypt new ciphertext even with old keys (different CEK — will error or return wrong plaintext)
let dec = decrypt_post(&new_encrypted, &carol_seed, &carol_id, &alice_id, &original_keys);
// Either returns None (not a recipient in new keys) or an error (wrong CEK for new ciphertext)
match dec {
Ok(None) => {} // Not a recipient
Err(_) => {} // AEAD decryption failure — expected with wrong CEK
Ok(Some(_)) => panic!("carol should not be able to decrypt re-encrypted post"),
}
}
#[test]
fn test_sign_verify_manifest() {
use crate::types::{AuthorManifest, ManifestEntry};
let (seed, node_id) = make_keypair(1);
let mut manifest = AuthorManifest {
post_id: [42u8; 32],
author: node_id,
author_addresses: vec!["10.0.0.1:4433".to_string()],
created_at: 1000,
updated_at: 2000,
previous_posts: vec![ManifestEntry {
post_id: [1u8; 32],
timestamp_ms: 900,
has_attachments: false,
}],
following_posts: vec![],
signature: vec![],
};
manifest.signature = sign_manifest(&seed, &manifest);
assert_eq!(manifest.signature.len(), 64);
assert!(verify_manifest_signature(&manifest));
}
#[test]
fn test_forged_manifest_rejected() {
use crate::types::AuthorManifest;
let (seed, node_id) = make_keypair(1);
let (_bob_seed, bob_id) = make_keypair(2);
let mut manifest = AuthorManifest {
post_id: [42u8; 32],
author: node_id,
author_addresses: vec![],
created_at: 1000,
updated_at: 2000,
previous_posts: vec![],
following_posts: vec![],
signature: vec![],
};
manifest.signature = sign_manifest(&seed, &manifest);
// Tamper with author → verification fails
manifest.author = bob_id;
assert!(!verify_manifest_signature(&manifest));
// Restore author, tamper with updated_at → fails
manifest.author = node_id;
manifest.updated_at = 9999;
assert!(!verify_manifest_signature(&manifest));
}
#[test]
fn test_group_key_gen_and_id() {
let (seed1, pubkey1) = generate_group_keypair();
let (seed2, pubkey2) = generate_group_keypair();
assert_ne!(seed1, seed2);
assert_ne!(pubkey1, pubkey2);
let id1 = compute_group_id(&pubkey1);
let id2 = compute_group_id(&pubkey2);
assert_ne!(id1, id2);
// Deterministic
assert_eq!(compute_group_id(&pubkey1), id1);
}
#[test]
fn test_group_key_wrap_unwrap_roundtrip() {
let (admin_seed, admin_id) = make_keypair(1);
let (bob_seed, bob_id) = make_keypair(2);
let (group_seed, _group_pubkey) = generate_group_keypair();
// Admin wraps for Bob
let wrapped = wrap_group_key_for_member(&admin_seed, &bob_id, &group_seed).unwrap();
assert_eq!(wrapped.len(), 60);
// Bob unwraps using admin's public key
let unwrapped = unwrap_group_key(&bob_seed, &admin_id, &wrapped).unwrap();
assert_eq!(unwrapped, group_seed);
}
#[test]
fn test_group_key_wrap_unwrap_self() {
let (admin_seed, admin_id) = make_keypair(1);
let (group_seed, _) = generate_group_keypair();
let wrapped = wrap_group_key_for_member(&admin_seed, &admin_id, &group_seed).unwrap();
let unwrapped = unwrap_group_key(&admin_seed, &admin_id, &wrapped).unwrap();
assert_eq!(unwrapped, group_seed);
}
#[test]
fn test_group_encrypt_decrypt_roundtrip() {
let (group_seed, group_pubkey) = generate_group_keypair();
let plaintext = "Hello group members!";
let (encrypted, wrapped_cek) = encrypt_post_for_group(plaintext, &group_seed, &group_pubkey).unwrap();
assert_eq!(wrapped_cek.len(), 60);
let decrypted = decrypt_group_post(&encrypted, &group_seed, &group_pubkey, &wrapped_cek).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn test_group_decrypt_wrong_seed_fails() {
let (group_seed, group_pubkey) = generate_group_keypair();
let (wrong_seed, _) = generate_group_keypair();
let plaintext = "Secret message";
let (encrypted, wrapped_cek) = encrypt_post_for_group(plaintext, &group_seed, &group_pubkey).unwrap();
let result = decrypt_group_post(&encrypted, &wrong_seed, &group_pubkey, &wrapped_cek);
assert!(result.is_err());
}
#[test]
fn test_rotate_group_key() {
let (admin_seed, admin_id) = make_keypair(1);
let (_bob_seed, bob_id) = make_keypair(2);
let (_carol_seed, carol_id) = make_keypair(3);
let (new_seed, new_pubkey, new_epoch, member_keys) =
rotate_group_key(&admin_seed, 1, &[admin_id, bob_id, carol_id]).unwrap();
assert_eq!(new_epoch, 2);
assert_eq!(member_keys.len(), 3);
for mk in &member_keys {
assert_eq!(mk.epoch, 2);
assert_eq!(mk.wrapped_group_key.len(), 60);
}
// Verify the new seed can encrypt/decrypt
let plaintext = "New epoch message";
let (encrypted, wrapped_cek) = encrypt_post_for_group(plaintext, &new_seed, &new_pubkey).unwrap();
let decrypted = decrypt_group_post(&encrypted, &new_seed, &new_pubkey, &wrapped_cek).unwrap();
assert_eq!(decrypted, plaintext);
}
#[test]
fn test_rotate_then_decrypt_old_epoch() {
let (group_seed_v1, group_pubkey_v1) = generate_group_keypair();
let plaintext_v1 = "Old epoch message";
let (encrypted_v1, wrapped_cek_v1) = encrypt_post_for_group(plaintext_v1, &group_seed_v1, &group_pubkey_v1).unwrap();
// Rotate — new key pair
let (group_seed_v2, group_pubkey_v2) = generate_group_keypair();
let plaintext_v2 = "New epoch message";
let (encrypted_v2, wrapped_cek_v2) = encrypt_post_for_group(plaintext_v2, &group_seed_v2, &group_pubkey_v2).unwrap();
// Old epoch still decryptable with old seed
let dec_v1 = decrypt_group_post(&encrypted_v1, &group_seed_v1, &group_pubkey_v1, &wrapped_cek_v1).unwrap();
assert_eq!(dec_v1, plaintext_v1);
// New epoch decryptable with new seed
let dec_v2 = decrypt_group_post(&encrypted_v2, &group_seed_v2, &group_pubkey_v2, &wrapped_cek_v2).unwrap();
assert_eq!(dec_v2, plaintext_v2);
// Old seed cannot decrypt new epoch
let result = decrypt_group_post(&encrypted_v2, &group_seed_v1, &group_pubkey_v1, &wrapped_cek_v2);
assert!(result.is_err());
}
}

748
crates/core/src/http.rs Normal file
View file

@ -0,0 +1,748 @@
//! Minimal HTTP/1.1 server for serving public posts to browsers.
//! Zero external dependencies — raw TCP with tokio.
//! Runs alongside the QUIC listener on the same port number (TCP vs UDP).
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::Mutex;
use tracing::{debug, info};
use crate::blob::BlobStore;
use crate::storage::Storage;
use crate::types::PostVisibility;
/// Connection budget: 5 content slots, 15 redirect slots, 1 per IP.
const MAX_CONTENT_SLOTS: usize = 5;
const MAX_REDIRECT_SLOTS: usize = 15;
const MAX_TOTAL: usize = MAX_CONTENT_SLOTS + MAX_REDIRECT_SLOTS;
const MAX_PER_IP: usize = 1;
const HEADER_TIMEOUT_SECS: u64 = 5;
/// Static HTML footer appended to every post page.
pub const POST_FOOTER: &str = r#"<footer style="margin-top:2rem;padding-top:1rem;border-top:1px solid #333;color:#888;font-size:0.85rem;text-align:center">
This post is on the ItsGoin network &mdash; content lives on people&rsquo;s devices, not servers.
<a href="https://itsgoin.com" style="color:#5b8def">Get ItsGoin</a>
</footer>"#;
/// Tracks active HTTP connections.
struct HttpBudget {
ip_counts: HashMap<IpAddr, usize>,
content_slots: usize,
redirect_slots: usize,
}
impl HttpBudget {
fn new() -> Self {
Self {
ip_counts: HashMap::new(),
content_slots: 0,
redirect_slots: 0,
}
}
fn total(&self) -> usize {
self.content_slots + self.redirect_slots
}
fn try_acquire_content(&mut self, ip: IpAddr) -> bool {
if self.total() >= MAX_TOTAL {
return false;
}
if self.content_slots >= MAX_CONTENT_SLOTS {
return false;
}
let count = self.ip_counts.entry(ip).or_insert(0);
if *count >= MAX_PER_IP {
return false;
}
*count += 1;
self.content_slots += 1;
true
}
fn try_acquire_redirect(&mut self, ip: IpAddr) -> bool {
if self.total() >= MAX_TOTAL {
return false;
}
if self.redirect_slots >= MAX_REDIRECT_SLOTS {
return false;
}
let count = self.ip_counts.entry(ip).or_insert(0);
if *count >= MAX_PER_IP {
return false;
}
*count += 1;
self.redirect_slots += 1;
true
}
fn release_content(&mut self, ip: IpAddr) {
self.content_slots = self.content_slots.saturating_sub(1);
if let Some(count) = self.ip_counts.get_mut(&ip) {
*count = count.saturating_sub(1);
if *count == 0 {
self.ip_counts.remove(&ip);
}
}
}
fn release_redirect(&mut self, ip: IpAddr) {
self.redirect_slots = self.redirect_slots.saturating_sub(1);
if let Some(count) = self.ip_counts.get_mut(&ip) {
*count = count.saturating_sub(1);
if *count == 0 {
self.ip_counts.remove(&ip);
}
}
}
}
/// Run the HTTP server on the given port. Blocks forever.
pub async fn run_http_server(
port: u16,
storage: Arc<Mutex<Storage>>,
blob_store: Arc<BlobStore>,
downstream_addrs: Arc<Mutex<HashMap<[u8; 32], Vec<SocketAddr>>>>,
) -> anyhow::Result<()> {
let addr: SocketAddr = ([0, 0, 0, 0], port).into();
// Use SO_REUSEADDR + SO_REUSEPORT so TCP punch sockets can share the port
let socket = tokio::net::TcpSocket::new_v4()?;
socket.set_reuseaddr(true)?;
#[cfg(unix)]
socket.set_reuseport(true)?;
socket.bind(addr)?;
let listener = socket.listen(128)?;
info!("HTTP server listening on TCP port {}", port);
let budget = Arc::new(std::sync::Mutex::new(HttpBudget::new()));
loop {
let (stream, peer_addr) = match listener.accept().await {
Ok(v) => v,
Err(e) => {
debug!("HTTP accept error: {}", e);
continue;
}
};
let ip = peer_addr.ip();
// Try to acquire a content slot first (keeps connection alive for blob pulls).
// If content slots full, try redirect slot.
let slot = {
let mut b = budget.lock().unwrap();
if b.try_acquire_content(ip) {
Some(SlotKind::Content)
} else if b.try_acquire_redirect(ip) {
Some(SlotKind::Redirect)
} else {
None
}
};
let slot = match slot {
Some(s) => s,
None => {
// Over budget — hard close
drop(stream);
continue;
}
};
let storage = Arc::clone(&storage);
let blob_store = Arc::clone(&blob_store);
let budget = Arc::clone(&budget);
let downstream_addrs = Arc::clone(&downstream_addrs);
tokio::spawn(async move {
handle_connection(stream, ip, slot, &storage, &blob_store, &downstream_addrs).await;
let mut b = budget.lock().unwrap();
match slot {
SlotKind::Content => b.release_content(ip),
SlotKind::Redirect => b.release_redirect(ip),
}
});
}
}
#[derive(Debug, Clone, Copy)]
enum SlotKind {
Content,
Redirect,
}
/// Handle one HTTP connection (potentially keep-alive with multiple requests).
async fn handle_connection(
mut stream: TcpStream,
_ip: IpAddr,
slot: SlotKind,
storage: &Arc<Mutex<Storage>>,
blob_store: &Arc<BlobStore>,
downstream_addrs: &Arc<Mutex<HashMap<[u8; 32], Vec<SocketAddr>>>>,
) {
// Keep-alive loop: handle sequential requests on the same connection
loop {
let mut buf = vec![0u8; 4096];
let n = match tokio::time::timeout(
std::time::Duration::from_secs(HEADER_TIMEOUT_SECS),
stream.read(&mut buf),
)
.await
{
Ok(Ok(0)) => return, // connection closed
Ok(Ok(n)) => n,
Ok(Err(_)) | Err(_) => return, // error or timeout — hard close
};
let request = &buf[..n];
// Parse method and path from first line
let (method, path) = match parse_request_line(request) {
Some(v) => v,
None => return, // malformed — hard close
};
if method != "GET" {
return; // only GET — hard close
}
if let Some(hex) = path.strip_prefix("/p/") {
let post_id = match validate_hex64(hex) {
Some(id) => id,
None => return, // malformed — hard close
};
match slot {
SlotKind::Content => {
if !serve_post(&mut stream, &post_id, storage, blob_store).await {
return;
}
}
SlotKind::Redirect => {
if !try_redirect(&mut stream, &post_id, storage, downstream_addrs).await {
return;
}
}
}
} else if let Some(hex) = path.strip_prefix("/b/") {
let blob_id = match validate_hex64(hex) {
Some(id) => id,
None => return, // malformed — hard close
};
match slot {
SlotKind::Content => {
if !serve_blob(&mut stream, &blob_id, storage, blob_store).await {
return;
}
}
SlotKind::Redirect => {
// Redirect blob requests to the same host as the post
// (browser will follow the redirect and pull from there)
return; // hard close — blobs only served on content slots
}
}
} else {
return; // unknown path — hard close
}
}
}
/// Parse "GET /path HTTP/1.x\r\n..." → ("GET", "/path")
fn parse_request_line(buf: &[u8]) -> Option<(&str, &str)> {
let line_end = buf.iter().position(|&b| b == b'\r' || b == b'\n')?;
let line = std::str::from_utf8(&buf[..line_end]).ok()?;
let mut parts = line.split(' ');
let method = parts.next()?;
let path = parts.next()?;
// Must have HTTP/1.x version
let version = parts.next()?;
if !version.starts_with("HTTP/") {
return None;
}
Some((method, path))
}
/// Validate a string is exactly 64 lowercase hex chars and decode to 32 bytes.
fn validate_hex64(s: &str) -> Option<[u8; 32]> {
if s.len() != 64 {
return None;
}
if !s.chars().all(|c| c.is_ascii_hexdigit() && !c.is_ascii_uppercase()) {
return None;
}
let bytes = hex::decode(s).ok()?;
bytes.try_into().ok()
}
/// Serve a post as HTML. Returns true if connection should stay alive.
async fn serve_post(
stream: &mut TcpStream,
post_id: &[u8; 32],
storage: &Arc<Mutex<Storage>>,
blob_store: &Arc<BlobStore>,
) -> bool {
// Look up post + visibility
let result = {
let store = storage.lock().await;
store.get_post_with_visibility(post_id)
};
let (post, visibility) = match result {
Ok(Some((p, v))) => (p, v),
_ => return false, // not found — hard close (same as "not public")
};
if !matches!(visibility, PostVisibility::Public) {
return false; // not public — hard close
}
// Look up author name
let author_name = {
let store = storage.lock().await;
store
.get_profile(&post.author)
.ok()
.flatten()
.map(|p| p.display_name)
.unwrap_or_default()
};
let _ = blob_store; // blob data served via /b/ route, not inlined
// Build HTML
let html = render_post_html(&post, post_id, &author_name);
write_http_response(stream, 200, "text/html; charset=utf-8", html.as_bytes()).await
}
/// Serve a blob's raw bytes. Returns true if connection should stay alive.
async fn serve_blob(
stream: &mut TcpStream,
blob_id: &[u8; 32],
storage: &Arc<Mutex<Storage>>,
blob_store: &Arc<BlobStore>,
) -> bool {
// Verify this blob belongs to a public post
let (mime_type, _post_id) = {
let store = storage.lock().await;
match find_public_blob_info(&store, blob_id) {
Some(info) => info,
None => return false, // not found or not public — hard close
}
};
// Read blob data from filesystem
let data = match blob_store.get(blob_id) {
Ok(Some(data)) => data,
_ => return false, // blob not on disk — hard close
};
write_http_response(stream, 200, &mime_type, &data).await
}
/// Find a blob's mime type and verify it belongs to a public post.
/// Returns (mime_type, post_id) or None.
fn find_public_blob_info(store: &Storage, blob_id: &[u8; 32]) -> Option<(String, [u8; 32])> {
// Search posts for one that has this blob as an attachment and is public
// Use the blobs table to find which post owns this blob
let post_id = store.get_blob_post_id(blob_id).ok()??;
let (post, visibility) = store.get_post_with_visibility(&post_id).ok()??;
if !matches!(visibility, PostVisibility::Public) {
return None;
}
// Find the mime type from the post's attachments
for att in &post.attachments {
if att.cid == *blob_id {
return Some((att.mime_type.clone(), post_id));
}
}
// Blob exists but isn't in this post's attachments (shouldn't happen)
None
}
/// Try to 302 redirect to a downstream host that has this post.
/// Returns true if redirect was sent, false to hard close.
async fn try_redirect(
stream: &mut TcpStream,
post_id: &[u8; 32],
storage: &Arc<Mutex<Storage>>,
_downstream_addrs: &Arc<Mutex<HashMap<[u8; 32], Vec<SocketAddr>>>>,
) -> bool {
// Get downstream peers for this post
let downstream_peers = {
let store = storage.lock().await;
// Verify post exists and is public first
match store.get_post_with_visibility(post_id) {
Ok(Some((_, PostVisibility::Public))) => {}
_ => return false, // not found or not public — hard close
}
store.get_post_downstream(post_id).unwrap_or_default()
};
// Get addresses for downstream peers
let candidates: Vec<SocketAddr> = {
let store = storage.lock().await;
let mut addrs = Vec::new();
for peer_id in &downstream_peers {
if let Ok(Some(peer)) = store.get_peer_record(peer_id) {
for addr in &peer.addresses {
if crate::network::is_publicly_routable(addr) {
addrs.push(*addr);
break; // one address per peer is enough
}
}
}
}
addrs
};
// TCP probe candidates (200ms timeout) and redirect to first live one
let post_hex = hex::encode(post_id);
for candidate in &candidates {
if tcp_probe(candidate, 200).await {
let location = format_http_url(candidate, &format!("/p/{}", post_hex));
return write_redirect(stream, &location).await;
}
}
false // all dead — hard close
}
/// TCP handshake probe with timeout in milliseconds.
async fn tcp_probe(addr: &SocketAddr, timeout_ms: u64) -> bool {
tokio::time::timeout(
std::time::Duration::from_millis(timeout_ms),
TcpStream::connect(addr),
)
.await
.map(|r| r.is_ok())
.unwrap_or(false)
}
/// Format an HTTP URL for a socket address (handles IPv6 bracket notation).
fn format_http_url(addr: &SocketAddr, path: &str) -> String {
match addr {
SocketAddr::V4(v4) => format!("http://{}:{}{}", v4.ip(), v4.port(), path),
SocketAddr::V6(v6) => format!("http://[{}]:{}{}", v6.ip(), v6.port(), path),
}
}
/// Write a 302 redirect response. Returns true on success.
async fn write_redirect(stream: &mut TcpStream, location: &str) -> bool {
let response = format!(
"HTTP/1.1 302 Found\r\nLocation: {}\r\nContent-Length: 0\r\nConnection: close\r\n\r\n",
location
);
stream.write_all(response.as_bytes()).await.is_ok()
}
/// Write an HTTP response with status, content type, and body. Returns true on success.
async fn write_http_response(
stream: &mut TcpStream,
status: u16,
content_type: &str,
body: &[u8],
) -> bool {
let status_text = match status {
200 => "OK",
_ => "Error",
};
let header = format!(
"HTTP/1.1 {} {}\r\nContent-Type: {}\r\nContent-Length: {}\r\nAccess-Control-Allow-Origin: *\r\nConnection: keep-alive\r\n\r\n",
status, status_text, content_type, body.len()
);
if stream.write_all(header.as_bytes()).await.is_err() {
return false;
}
stream.write_all(body).await.is_ok()
}
/// Render a post as a minimal HTML page.
pub fn render_post_html(post: &crate::types::Post, _post_id: &[u8; 32], author_name: &str) -> String {
let escaped_content = html_escape(&post.content);
let display_name = if author_name.is_empty() {
&hex::encode(&post.author)[..12]
} else {
author_name
};
let escaped_name = html_escape(display_name);
let mut attachments_html = String::new();
for att in &post.attachments {
let cid_hex = hex::encode(&att.cid);
if att.mime_type.starts_with("video/") {
attachments_html.push_str(&format!(
r#"<video src="/b/{}" controls style="max-width:100%;margin:0.5rem 0;border-radius:8px"></video>"#,
cid_hex
));
} else {
attachments_html.push_str(&format!(
r#"<img src="/b/{}" style="max-width:100%;margin:0.5rem 0;border-radius:8px" alt="attachment">"#,
cid_hex
));
}
}
let timestamp = post.timestamp_ms / 1000;
format!(
r#"<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{name} on ItsGoin</title>
<style>
body{{font-family:-apple-system,BlinkMacSystemFont,sans-serif;max-width:600px;margin:2rem auto;padding:0 1rem;background:#0d0d0d;color:#e0e0e0}}
.post{{background:#1a1a1a;border-radius:12px;padding:1.5rem;margin-bottom:1rem}}
.author{{color:#5b8def;font-weight:600;margin-bottom:0.5rem}}
.content{{white-space:pre-wrap;line-height:1.5}}
.time{{color:#666;font-size:0.8rem;margin-top:0.75rem}}
a{{color:#5b8def;text-decoration:none}}
</style>
</head>
<body>
<div class="post">
<div class="author">{name}</div>
<div class="content">{content}</div>
{attachments}
<div class="time"><script>document.currentScript.parentElement.textContent=new Date({ts}*1000).toLocaleString()</script></div>
</div>
{footer}
</body>
</html>"#,
name = escaped_name,
content = escaped_content,
attachments = attachments_html,
ts = timestamp,
footer = POST_FOOTER,
)
}
/// Execute a TCP hole punch: send a SYN toward the browser's IP from our HTTP port.
/// The connect will almost certainly fail (browser isn't listening), but the outbound
/// SYN creates a NAT mapping allowing the browser's inbound HTTP connection.
pub async fn tcp_punch(http_port: u16, browser_ip: std::net::IpAddr) -> bool {
use std::net::SocketAddr;
// Bind to the same port as our HTTP server (SO_REUSEPORT allows this)
let socket = match browser_ip {
std::net::IpAddr::V4(_) => tokio::net::TcpSocket::new_v4(),
std::net::IpAddr::V6(_) => tokio::net::TcpSocket::new_v6(),
};
let socket = match socket {
Ok(s) => s,
Err(e) => {
debug!("TCP punch: failed to create socket: {}", e);
return false;
}
};
let _ = socket.set_reuseaddr(true);
#[cfg(unix)]
let _ = socket.set_reuseport(true);
let local_addr: SocketAddr = match browser_ip {
std::net::IpAddr::V4(_) => ([0, 0, 0, 0], http_port).into(),
std::net::IpAddr::V6(_) => (std::net::Ipv6Addr::UNSPECIFIED, http_port).into(),
};
if let Err(e) = socket.bind(local_addr) {
debug!("TCP punch: failed to bind port {}: {}", http_port, e);
return false;
}
// Connect to browser IP on port 80 (destination port doesn't matter for EIM NAT,
// the purpose is to create a NAT mapping entry). 500ms timeout — fire and forget.
let target: SocketAddr = (browser_ip, 80).into();
let _ = tokio::time::timeout(
std::time::Duration::from_millis(500),
socket.connect(target),
).await;
// Success or failure doesn't matter — the SYN left our NAT
debug!(browser_ip = %browser_ip, port = http_port, "TCP punch SYN sent");
true
}
/// Minimal HTML entity escaping.
pub fn html_escape(s: &str) -> String {
let mut out = String::with_capacity(s.len());
for c in s.chars() {
match c {
'<' => out.push_str("&lt;"),
'>' => out.push_str("&gt;"),
'&' => out.push_str("&amp;"),
'"' => out.push_str("&quot;"),
'\'' => out.push_str("&#39;"),
_ => out.push(c),
}
}
out
}
// --- Share link generation ---
/// Encode a list of socket addresses as compact binary, then base64url.
/// Per IPv4: [0x04][4 bytes IP][2 bytes port] = 7 bytes
/// Per IPv6: [0x06][16 bytes IP][2 bytes port] = 19 bytes
pub fn encode_hostlist(hosts: &[SocketAddr]) -> String {
let mut buf = Vec::with_capacity(hosts.len() * 19);
for host in hosts.iter().take(5) {
match host {
SocketAddr::V4(v4) => {
buf.push(0x04);
buf.extend_from_slice(&v4.ip().octets());
buf.extend_from_slice(&v4.port().to_be_bytes());
}
SocketAddr::V6(v6) => {
buf.push(0x06);
buf.extend_from_slice(&v6.ip().octets());
buf.extend_from_slice(&v6.port().to_be_bytes());
}
}
}
base64url_encode(&buf)
}
/// Decode a base64url-encoded hostlist back to socket addresses.
pub fn decode_hostlist(encoded: &str) -> Vec<SocketAddr> {
let buf = match base64url_decode(encoded) {
Some(b) => b,
None => return Vec::new(),
};
let mut addrs = Vec::new();
let mut i = 0;
while i < buf.len() {
match buf[i] {
0x04 if i + 7 <= buf.len() => {
let ip = std::net::Ipv4Addr::new(buf[i + 1], buf[i + 2], buf[i + 3], buf[i + 4]);
let port = u16::from_be_bytes([buf[i + 5], buf[i + 6]]);
addrs.push(SocketAddr::new(ip.into(), port));
i += 7;
}
0x06 if i + 19 <= buf.len() => {
let mut octets = [0u8; 16];
octets.copy_from_slice(&buf[i + 1..i + 17]);
let ip = std::net::Ipv6Addr::from(octets);
let port = u16::from_be_bytes([buf[i + 17], buf[i + 18]]);
addrs.push(SocketAddr::new(ip.into(), port));
i += 19;
}
_ => break, // malformed
}
}
addrs
}
// --- Minimal base64url implementation (no external dependency) ---
const B64_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
fn base64url_encode(data: &[u8]) -> String {
let mut out = String::with_capacity((data.len() * 4 + 2) / 3);
let mut i = 0;
while i + 2 < data.len() {
let n = ((data[i] as u32) << 16) | ((data[i + 1] as u32) << 8) | data[i + 2] as u32;
out.push(B64_CHARS[((n >> 18) & 0x3F) as usize] as char);
out.push(B64_CHARS[((n >> 12) & 0x3F) as usize] as char);
out.push(B64_CHARS[((n >> 6) & 0x3F) as usize] as char);
out.push(B64_CHARS[(n & 0x3F) as usize] as char);
i += 3;
}
let remaining = data.len() - i;
if remaining == 2 {
let n = ((data[i] as u32) << 16) | ((data[i + 1] as u32) << 8);
out.push(B64_CHARS[((n >> 18) & 0x3F) as usize] as char);
out.push(B64_CHARS[((n >> 12) & 0x3F) as usize] as char);
out.push(B64_CHARS[((n >> 6) & 0x3F) as usize] as char);
} else if remaining == 1 {
let n = (data[i] as u32) << 16;
out.push(B64_CHARS[((n >> 18) & 0x3F) as usize] as char);
out.push(B64_CHARS[((n >> 12) & 0x3F) as usize] as char);
}
out // no padding
}
fn base64url_decode(s: &str) -> Option<Vec<u8>> {
let mut buf = Vec::with_capacity(s.len() * 3 / 4);
let mut accum: u32 = 0;
let mut bits: u32 = 0;
for c in s.bytes() {
let val = match c {
b'A'..=b'Z' => c - b'A',
b'a'..=b'z' => c - b'a' + 26,
b'0'..=b'9' => c - b'0' + 52,
b'-' => 62,
b'_' => 63,
b'=' => continue, // skip padding
_ => return None,
};
accum = (accum << 6) | val as u32;
bits += 6;
if bits >= 8 {
bits -= 8;
buf.push((accum >> bits) as u8);
accum &= (1 << bits) - 1;
}
}
Some(buf)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_validate_hex64() {
let valid = "a".repeat(64);
assert!(validate_hex64(&valid).is_some());
let short = "a".repeat(63);
assert!(validate_hex64(&short).is_none());
let upper = "A".repeat(64);
assert!(validate_hex64(&upper).is_none());
}
#[test]
fn test_html_escape() {
assert_eq!(html_escape("<script>"), "&lt;script&gt;");
assert_eq!(html_escape("a&b"), "a&amp;b");
}
#[test]
fn test_base64url_roundtrip() {
let data = b"hello world";
let encoded = base64url_encode(data);
let decoded = base64url_decode(&encoded).unwrap();
assert_eq!(decoded, data);
}
#[test]
fn test_hostlist_roundtrip() {
use std::net::{Ipv4Addr, Ipv6Addr};
let hosts = vec![
SocketAddr::new(Ipv4Addr::new(192, 168, 1, 1).into(), 4433),
SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 8080),
];
let encoded = encode_hostlist(&hosts);
let decoded = decode_hostlist(&encoded);
assert_eq!(decoded, hosts);
}
#[test]
fn test_parse_request_line() {
let req = b"GET /p/abc123 HTTP/1.1\r\nHost: example.com\r\n\r\n";
let (method, path) = parse_request_line(req).unwrap();
assert_eq!(method, "GET");
assert_eq!(path, "/p/abc123");
}
#[test]
fn test_format_http_url() {
let v4: SocketAddr = "1.2.3.4:4433".parse().unwrap();
assert_eq!(format_http_url(&v4, "/p/abc"), "http://1.2.3.4:4433/p/abc");
let v6: SocketAddr = "[::1]:8080".parse().unwrap();
assert_eq!(format_http_url(&v6, "/p/abc"), "http://[::1]:8080/p/abc");
}
}

54
crates/core/src/lib.rs Normal file
View file

@ -0,0 +1,54 @@
pub mod activity;
pub mod blob;
pub mod connection;
pub mod content;
pub mod crypto;
pub mod http;
pub mod network;
pub mod node;
pub mod protocol;
pub mod storage;
pub mod stun;
pub mod types;
pub mod upnp;
pub mod web;
// Re-export iroh types needed by consumers
pub use iroh::{EndpointAddr, EndpointId};
use types::NodeId;
/// Parse a connect string "nodeid_hex@ip:port" or "nodeid_hex@host:port" or bare "nodeid_hex"
/// into (NodeId, EndpointAddr). Supports DNS hostnames via `ToSocketAddrs`.
/// Shared utility used by CLI, Tauri, and bootstrap.
pub fn parse_connect_string(s: &str) -> anyhow::Result<(NodeId, EndpointAddr)> {
use std::net::ToSocketAddrs;
if let Some((id_hex, addr_str)) = s.split_once('@') {
let nid = parse_node_id_hex(id_hex)?;
let endpoint_id = EndpointId::from_bytes(&nid)?;
let all_addrs: Vec<std::net::SocketAddr> = addr_str
.to_socket_addrs()?
.collect();
if all_addrs.is_empty() {
anyhow::bail!("could not resolve address: {}", addr_str);
}
let mut addr = EndpointAddr::from(endpoint_id);
for sock_addr in all_addrs {
addr = addr.with_ip_addr(sock_addr);
}
Ok((nid, addr))
} else {
let nid = parse_node_id_hex(s)?;
let endpoint_id = EndpointId::from_bytes(&nid)?;
Ok((nid, EndpointAddr::from(endpoint_id)))
}
}
/// Parse a hex-encoded node ID string into NodeId bytes.
pub fn parse_node_id_hex(hex_str: &str) -> anyhow::Result<NodeId> {
let bytes = hex::decode(hex_str)?;
let id: NodeId = bytes
.try_into()
.map_err(|v: Vec<u8>| anyhow::anyhow!("expected 32 bytes, got {}", v.len()))?;
Ok(id)
}

2345
crates/core/src/network.rs Normal file

File diff suppressed because it is too large Load diff

3460
crates/core/src/node.rs Normal file

File diff suppressed because it is too large Load diff

987
crates/core/src/protocol.rs Normal file
View file

@ -0,0 +1,987 @@
use serde::{Deserialize, Serialize};
use crate::types::{
BlobHeaderDiffOp, CdnManifest, DeleteRecord, GroupEpoch, GroupId, GroupMemberKey, NodeId,
PeerWithAddress, Post, PostId, PostVisibility, PublicProfile, VisibilityUpdate, WormId,
};
/// Single ALPN for Discovery Protocol v3 (N1/N2/N3 architecture)
pub const ALPN_V2: &[u8] = b"itsgoin/3";
/// A post bundled with its visibility metadata for sync
#[derive(Debug, Serialize, Deserialize)]
pub struct SyncPost {
pub id: PostId,
pub post: Post,
pub visibility: PostVisibility,
}
/// Message type byte for stream multiplexing
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum MessageType {
NodeListUpdate = 0x01,
InitialExchange = 0x02,
AddressRequest = 0x03,
AddressResponse = 0x04,
RefuseRedirect = 0x05,
PullSyncRequest = 0x40,
PullSyncResponse = 0x41,
PostNotification = 0x42,
PostPush = 0x43,
AudienceRequest = 0x44,
AudienceResponse = 0x45,
ProfileUpdate = 0x50,
DeleteRecord = 0x51,
VisibilityUpdate = 0x52,
WormQuery = 0x60,
WormResponse = 0x61,
SocialAddressUpdate = 0x70,
SocialDisconnectNotice = 0x71,
SocialCheckin = 0x72,
// 0x80-0x81 reserved
BlobRequest = 0x90,
BlobResponse = 0x91,
ManifestRefreshRequest = 0x92,
ManifestRefreshResponse = 0x93,
ManifestPush = 0x94,
BlobDeleteNotice = 0x95,
GroupKeyDistribute = 0xA0,
GroupKeyRequest = 0xA1,
GroupKeyResponse = 0xA2,
RelayIntroduce = 0xB0,
RelayIntroduceResult = 0xB1,
SessionRelay = 0xB2,
MeshPrefer = 0xB3,
CircleProfileUpdate = 0xB4,
AnchorRegister = 0xC0,
AnchorReferralRequest = 0xC1,
AnchorReferralResponse = 0xC2,
AnchorProbeRequest = 0xC3,
AnchorProbeResult = 0xC4,
PortScanHeartbeat = 0xC5,
NatFilterProbe = 0xC6,
NatFilterProbeResult = 0xC7,
BlobHeaderDiff = 0xD0,
BlobHeaderRequest = 0xD1,
BlobHeaderResponse = 0xD2,
PostDownstreamRegister = 0xD3,
PostFetchRequest = 0xD4,
PostFetchResponse = 0xD5,
TcpPunchRequest = 0xD6,
TcpPunchResult = 0xD7,
MeshKeepalive = 0xE0,
}
impl MessageType {
pub fn from_byte(b: u8) -> Option<Self> {
match b {
0x01 => Some(Self::NodeListUpdate),
0x02 => Some(Self::InitialExchange),
0x03 => Some(Self::AddressRequest),
0x04 => Some(Self::AddressResponse),
0x05 => Some(Self::RefuseRedirect),
0x40 => Some(Self::PullSyncRequest),
0x41 => Some(Self::PullSyncResponse),
0x42 => Some(Self::PostNotification),
0x43 => Some(Self::PostPush),
0x44 => Some(Self::AudienceRequest),
0x45 => Some(Self::AudienceResponse),
0x50 => Some(Self::ProfileUpdate),
0x51 => Some(Self::DeleteRecord),
0x52 => Some(Self::VisibilityUpdate),
0x60 => Some(Self::WormQuery),
0x61 => Some(Self::WormResponse),
0x70 => Some(Self::SocialAddressUpdate),
0x71 => Some(Self::SocialDisconnectNotice),
0x72 => Some(Self::SocialCheckin),
0x90 => Some(Self::BlobRequest),
0x91 => Some(Self::BlobResponse),
0x92 => Some(Self::ManifestRefreshRequest),
0x93 => Some(Self::ManifestRefreshResponse),
0x94 => Some(Self::ManifestPush),
0x95 => Some(Self::BlobDeleteNotice),
0xA0 => Some(Self::GroupKeyDistribute),
0xA1 => Some(Self::GroupKeyRequest),
0xA2 => Some(Self::GroupKeyResponse),
0xB0 => Some(Self::RelayIntroduce),
0xB1 => Some(Self::RelayIntroduceResult),
0xB2 => Some(Self::SessionRelay),
0xB3 => Some(Self::MeshPrefer),
0xB4 => Some(Self::CircleProfileUpdate),
0xC0 => Some(Self::AnchorRegister),
0xC1 => Some(Self::AnchorReferralRequest),
0xC2 => Some(Self::AnchorReferralResponse),
0xC3 => Some(Self::AnchorProbeRequest),
0xC4 => Some(Self::AnchorProbeResult),
0xC5 => Some(Self::PortScanHeartbeat),
0xC6 => Some(Self::NatFilterProbe),
0xC7 => Some(Self::NatFilterProbeResult),
0xD0 => Some(Self::BlobHeaderDiff),
0xD1 => Some(Self::BlobHeaderRequest),
0xD2 => Some(Self::BlobHeaderResponse),
0xD3 => Some(Self::PostDownstreamRegister),
0xD4 => Some(Self::PostFetchRequest),
0xD5 => Some(Self::PostFetchResponse),
0xD6 => Some(Self::TcpPunchRequest),
0xD7 => Some(Self::TcpPunchResult),
0xE0 => Some(Self::MeshKeepalive),
_ => None,
}
}
pub fn as_byte(self) -> u8 {
self as u8
}
}
// --- Payload structs ---
/// Initial exchange: N1/N2 node lists + profile + deletes + post_ids + peer addresses
#[derive(Debug, Serialize, Deserialize)]
pub struct InitialExchangePayload {
/// Our connections + social contacts NodeIds (no addresses)
pub n1_node_ids: Vec<NodeId>,
/// Our deduplicated N2 NodeIds (no addresses)
pub n2_node_ids: Vec<NodeId>,
/// Our profile
pub profile: Option<PublicProfile>,
/// Our delete records
pub deletes: Vec<DeleteRecord>,
/// Our post IDs (for replica tracking)
pub post_ids: Vec<PostId>,
/// Our N+10:Addresses (connected peers with addresses) for social routing
#[serde(default)]
pub peer_addresses: Vec<PeerWithAddress>,
/// If sender is an anchor, their stable advertised address (e.g. "174.127.120.52:4433")
#[serde(default)]
pub anchor_addr: Option<String>,
/// What the sender sees as the receiver's address (STUN-like observed addr)
#[serde(default)]
pub your_observed_addr: Option<String>,
/// Sender's NAT type ("public", "easy", "hard", "unknown")
#[serde(default)]
pub nat_type: Option<String>,
/// Sender's NAT mapping behavior ("eim", "edm", "unknown")
#[serde(default)]
pub nat_mapping: Option<String>,
/// Sender's NAT filtering behavior ("open", "port_restricted", "unknown")
#[serde(default)]
pub nat_filtering: Option<String>,
/// Whether the sender is running an HTTP server for direct browser access
#[serde(default)]
pub http_capable: bool,
/// External HTTP address if known (e.g. "1.2.3.4:4433")
#[serde(default)]
pub http_addr: Option<String>,
}
/// Incremental N1/N2 changes
#[derive(Debug, Serialize, Deserialize)]
pub struct NodeListUpdatePayload {
pub seq: u64,
pub n1_added: Vec<NodeId>,
pub n1_removed: Vec<NodeId>,
pub n2_added: Vec<NodeId>,
pub n2_removed: Vec<NodeId>,
}
/// Pull-based post sync request
#[derive(Debug, Serialize, Deserialize)]
pub struct PullSyncRequestPayload {
/// Our follows (for the responder to filter)
pub follows: Vec<NodeId>,
/// Post IDs we already have
pub have_post_ids: Vec<PostId>,
}
/// Pull-based post sync response
#[derive(Debug, Serialize, Deserialize)]
pub struct PullSyncResponsePayload {
pub posts: Vec<SyncPost>,
pub visibility_updates: Vec<VisibilityUpdate>,
}
/// Profile update (pushed via uni-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct ProfileUpdatePayload {
pub profiles: Vec<PublicProfile>,
}
/// Delete record (pushed via uni-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct DeleteRecordPayload {
pub records: Vec<DeleteRecord>,
}
/// Visibility update (pushed via uni-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct VisibilityUpdatePayload {
pub updates: Vec<VisibilityUpdate>,
}
/// Post notification: lightweight push when a new post is created
#[derive(Debug, Serialize, Deserialize)]
pub struct PostNotificationPayload {
pub post_id: PostId,
pub author: NodeId,
}
/// Audience request: ask a peer to join their audience
#[derive(Debug, Serialize, Deserialize)]
pub struct AudienceRequestPayload {
pub requester: NodeId,
}
/// Audience response: approve or deny an audience request
#[derive(Debug, Serialize, Deserialize)]
pub struct AudienceResponsePayload {
pub responder: NodeId,
pub approved: bool,
}
/// Post push: full post content pushed directly to a recipient
#[derive(Debug, Serialize, Deserialize)]
pub struct PostPushPayload {
pub post: SyncPost,
}
/// Address resolution request (bi-stream: ask reporter for a hop-2 peer's address)
#[derive(Debug, Serialize, Deserialize)]
pub struct AddressRequestPayload {
pub target: NodeId,
}
/// Address resolution response
#[derive(Debug, Serialize, Deserialize)]
pub struct AddressResponsePayload {
pub target: NodeId,
pub address: Option<String>,
/// Set when the target is known-disconnected (requester registered as watcher)
#[serde(default)]
pub disconnected_at: Option<u64>,
/// Target's N+10:Addresses if known
#[serde(default)]
pub peer_addresses: Vec<PeerWithAddress>,
}
/// Refuse mesh connection with optional redirect to another peer
#[derive(Debug, Serialize, Deserialize)]
pub struct RefuseRedirectPayload {
pub reason: String,
pub redirect: Option<PeerWithAddress>,
}
/// Worm lookup query (bi-stream) — searches for nodes, posts, or blobs
#[derive(Debug, Serialize, Deserialize)]
pub struct WormQueryPayload {
pub worm_id: WormId,
pub target: NodeId,
/// Additional IDs to search for (up to 10 recent_peers of target)
#[serde(default)]
pub needle_peers: Vec<NodeId>,
pub ttl: u8,
pub visited: Vec<NodeId>,
/// Optional: also search for a specific post by ID
#[serde(default)]
pub post_id: Option<PostId>,
/// Optional: also search for a specific blob by CID
#[serde(default)]
pub blob_id: Option<[u8; 32]>,
}
/// Worm lookup response (bi-stream, paired with query)
#[derive(Debug, Serialize, Deserialize)]
pub struct WormResponsePayload {
pub worm_id: WormId,
pub found: bool,
/// Which needle was actually found (target or one of its recent_peers)
#[serde(default)]
pub found_id: Option<NodeId>,
pub addresses: Vec<String>,
pub reporter: Option<NodeId>,
pub hop: Option<u8>,
/// One random wide-peer referral: (node_id, address) for bloom round
#[serde(default)]
pub wide_referral: Option<(NodeId, String)>,
/// Node that holds the requested post (may differ from found_id)
#[serde(default)]
pub post_holder: Option<NodeId>,
/// Node that holds the requested blob
#[serde(default)]
pub blob_holder: Option<NodeId>,
}
// --- Social routing payloads ---
/// Address update notification: "here's N+10:Addresses for a peer"
#[derive(Debug, Serialize, Deserialize)]
pub struct SocialAddressUpdatePayload {
pub node_id: NodeId,
pub addresses: Vec<String>,
pub peer_addresses: Vec<PeerWithAddress>,
}
/// Disconnect notice: "peer X disconnected"
#[derive(Debug, Serialize, Deserialize)]
pub struct SocialDisconnectNoticePayload {
pub node_id: NodeId,
}
/// Lightweight keepalive checkin (bidirectional)
#[derive(Debug, Serialize, Deserialize)]
pub struct SocialCheckinPayload {
pub node_id: NodeId,
pub addresses: Vec<String>,
pub peer_addresses: Vec<PeerWithAddress>,
}
// --- Blob transfer payloads ---
/// Request a blob by CID (bi-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct BlobRequestPayload {
pub cid: [u8; 32],
/// Requester's addresses so the host can record downstream
#[serde(default)]
pub requester_addresses: Vec<String>,
}
/// Blob response: found flag + base64-encoded data + CDN manifest
#[derive(Debug, Serialize, Deserialize)]
pub struct BlobResponsePayload {
pub cid: [u8; 32],
pub found: bool,
/// Base64-encoded blob bytes (empty if not found)
#[serde(default)]
pub data_b64: String,
/// Author manifest + host info (if available)
#[serde(default)]
pub manifest: Option<CdnManifest>,
/// Whether host accepted requester as downstream
#[serde(default)]
pub cdn_registered: bool,
/// If not registered (host full), try these peers
#[serde(default)]
pub cdn_redirect_peers: Vec<PeerWithAddress>,
}
/// Request a manifest refresh for a CID (bi-stream: ask upstream)
#[derive(Debug, Serialize, Deserialize)]
pub struct ManifestRefreshRequestPayload {
pub cid: [u8; 32],
pub current_updated_at: u64,
}
/// Manifest refresh response
#[derive(Debug, Serialize, Deserialize)]
pub struct ManifestRefreshResponsePayload {
pub cid: [u8; 32],
pub updated: bool,
pub manifest: Option<CdnManifest>,
}
/// Push updated manifests to downstream peers (uni-stream)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifestPushPayload {
pub manifests: Vec<ManifestPushEntry>,
}
/// A single manifest push entry
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManifestPushEntry {
pub cid: [u8; 32],
pub manifest: CdnManifest,
}
/// Notify upstream/downstream that a blob has been deleted (uni-stream)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlobDeleteNoticePayload {
pub cid: [u8; 32],
/// If sender was upstream and is providing their own upstream for tree healing
#[serde(default)]
pub upstream_node: Option<PeerWithAddress>,
}
// --- Group key distribution payloads ---
/// Admin pushes wrapped group key to a member (uni-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct GroupKeyDistributePayload {
pub group_id: GroupId,
pub circle_name: String,
pub epoch: GroupEpoch,
pub group_public_key: [u8; 32],
pub admin: NodeId,
pub member_keys: Vec<GroupMemberKey>,
}
/// Member requests current group key (bi-stream request)
#[derive(Debug, Serialize, Deserialize)]
pub struct GroupKeyRequestPayload {
pub group_id: GroupId,
pub known_epoch: GroupEpoch,
}
/// Admin responds with wrapped key (bi-stream response)
#[derive(Debug, Serialize, Deserialize)]
pub struct GroupKeyResponsePayload {
pub group_id: GroupId,
pub epoch: GroupEpoch,
pub group_public_key: [u8; 32],
pub admin: NodeId,
pub member_key: Option<GroupMemberKey>,
}
// --- Relay introduction payloads ---
/// Relay introduction identifier for deduplication
pub type IntroId = [u8; 16];
/// Request introduction to a target through a relay peer (bi-stream)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelayIntroducePayload {
pub intro_id: IntroId,
pub target: NodeId,
pub requester: NodeId,
pub requester_addresses: Vec<String>,
/// Max forwarding hops remaining (0 = relay must know target directly)
pub ttl: u8,
}
/// Target's response to a relay introduction (bi-stream response)
#[derive(Debug, Serialize, Deserialize)]
pub struct RelayIntroduceResultPayload {
pub intro_id: IntroId,
pub accepted: bool,
pub target_addresses: Vec<String>,
/// Relay is willing to serve as stream relay fallback
pub relay_available: bool,
pub reject_reason: Option<String>,
}
/// Open a relay pipe — intermediary splices two bi-streams (bi-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct SessionRelayPayload {
pub intro_id: IntroId,
pub target: NodeId,
}
/// Mesh preference negotiation (bi-stream: request + response)
#[derive(Debug, Serialize, Deserialize)]
pub struct MeshPreferPayload {
/// true = "I want us to be preferred peers" (request)
pub requesting: bool,
/// true = "I agree to be preferred peers" (response only)
pub accepted: bool,
/// Reason for rejection (response only, when accepted=false)
#[serde(default)]
pub reject_reason: Option<String>,
}
/// Circle profile update: encrypted profile variant for a circle (uni-stream push)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CircleProfileUpdatePayload {
pub author: NodeId,
pub circle_name: String,
pub group_id: GroupId,
pub epoch: GroupEpoch,
/// base64(ChaCha20-Poly1305 encrypted JSON of CircleProfile)
pub encrypted_payload: String,
/// 60 bytes: nonce(12) || encrypted_cek(32) || tag(16)
pub wrapped_cek: Vec<u8>,
pub updated_at: u64,
}
// --- Anchor referral payloads ---
/// Node registers its address with an anchor (uni-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct AnchorRegisterPayload {
pub node_id: NodeId,
pub addresses: Vec<String>,
}
/// Node requests peer referrals from an anchor (bi-stream request)
#[derive(Debug, Serialize, Deserialize)]
pub struct AnchorReferralRequestPayload {
pub requester: NodeId,
pub requester_addresses: Vec<String>,
}
/// Anchor responds with peer referrals (bi-stream response)
#[derive(Debug, Serialize, Deserialize)]
pub struct AnchorReferralResponsePayload {
pub referrals: Vec<AnchorReferral>,
}
/// A single peer referral from an anchor
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnchorReferral {
pub node_id: NodeId,
pub addresses: Vec<String>,
}
// --- Anchor probe payloads ---
/// Request an anchor self-verification probe (bi-stream: sent to reporter)
#[derive(Debug, Serialize, Deserialize)]
pub struct AnchorProbeRequestPayload {
/// Address to cold-connect to (the candidate's external address)
pub target_addr: String,
/// Stranger from N2 who will perform the cold connect test
pub witness: NodeId,
/// The node requesting verification (us)
pub candidate: NodeId,
/// Candidate's addresses for the witness to deliver result directly
pub candidate_addresses: Vec<String>,
/// Dedup identifier
pub probe_id: [u8; 16],
}
/// Result of an anchor self-verification probe
#[derive(Debug, Serialize, Deserialize)]
pub struct AnchorProbeResultPayload {
pub probe_id: [u8; 16],
pub reachable: bool,
pub observed_addr: Option<String>,
}
/// Port scan heartbeat during scanning hole punch (informational)
#[derive(Debug, Serialize, Deserialize)]
pub struct PortScanHeartbeatPayload {
pub peer: NodeId,
/// Port the peer was seen from (if any)
pub seen_from_port: Option<u16>,
}
/// Request NAT filtering probe from anchor (bi-stream).
/// Anchor will attempt to reach us from a different source port.
#[derive(Debug, Serialize, Deserialize)]
pub struct NatFilterProbePayload {
/// Our node ID
pub node_id: NodeId,
}
/// Result of NAT filtering probe
#[derive(Debug, Serialize, Deserialize)]
pub struct NatFilterProbeResultPayload {
/// true = we reached you from a different port (address-restricted / Open)
/// false = could not reach you (port-restricted)
pub reachable: bool,
}
// --- Engagement payloads ---
/// Incremental engagement diff (uni-stream: propagated through post_downstream tree)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlobHeaderDiffPayload {
pub post_id: PostId,
pub author: NodeId,
pub ops: Vec<BlobHeaderDiffOp>,
/// Timestamp of this diff batch
pub timestamp_ms: u64,
}
/// Request full engagement header for a post (bi-stream request)
#[derive(Debug, Serialize, Deserialize)]
pub struct BlobHeaderRequestPayload {
pub post_id: PostId,
/// Requester's current header timestamp (0 = never seen)
pub current_updated_at: u64,
}
/// Full engagement header response (bi-stream response)
#[derive(Debug, Serialize, Deserialize)]
pub struct BlobHeaderResponsePayload {
pub post_id: PostId,
/// True if the sender has a newer header than requested
pub updated: bool,
/// JSON-serialized BlobHeader (if updated)
#[serde(default)]
pub header_json: Option<String>,
}
/// Register as a downstream holder of a post (uni-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct PostDownstreamRegisterPayload {
pub post_id: PostId,
}
/// Request a single post by ID (bi-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct PostFetchRequestPayload {
pub post_id: PostId,
}
/// Single-post fetch response (bi-stream)
#[derive(Debug, Serialize, Deserialize)]
pub struct PostFetchResponsePayload {
pub post_id: PostId,
pub found: bool,
pub post: Option<SyncPost>,
}
/// Request a TCP hole punch toward a browser IP (bi-stream).
/// Sent by the anchor to a node that holds a post, so the node's NAT
/// opens a pinhole allowing the browser to connect directly via HTTP.
#[derive(Debug, Serialize, Deserialize)]
pub struct TcpPunchRequestPayload {
/// Browser's public IP (from X-Forwarded-For)
pub browser_ip: String,
/// Post being requested (for validation — node must hold this post)
pub post_id: PostId,
}
/// Result of a TCP punch attempt (bi-stream response).
#[derive(Debug, Serialize, Deserialize)]
pub struct TcpPunchResultPayload {
/// Whether the punch SYN was sent
pub success: bool,
/// External HTTP address the browser should be redirected to
pub http_addr: Option<String>,
}
// --- Wire helpers ---
/// Write a typed message: 1-byte type + length-prefixed JSON payload
pub async fn write_typed_message<T: Serialize>(
send: &mut iroh::endpoint::SendStream,
msg_type: MessageType,
payload: &T,
) -> anyhow::Result<()> {
let bytes = serde_json::to_vec(payload)?;
send.write_all(&[msg_type.as_byte()]).await?;
let len = (bytes.len() as u32).to_be_bytes();
send.write_all(&len).await?;
send.write_all(&bytes).await?;
Ok(())
}
/// Read the 1-byte message type header from a stream
pub async fn read_message_type(
recv: &mut iroh::endpoint::RecvStream,
) -> anyhow::Result<MessageType> {
let mut buf = [0u8; 1];
recv.read_exact(&mut buf).await?;
MessageType::from_byte(buf[0])
.ok_or_else(|| anyhow::anyhow!("unknown message type: 0x{:02x}", buf[0]))
}
/// Read a length-prefixed JSON payload (after type byte has been consumed)
pub async fn read_payload<T: serde::de::DeserializeOwned>(
recv: &mut iroh::endpoint::RecvStream,
max_size: usize,
) -> anyhow::Result<T> {
let mut len_buf = [0u8; 4];
recv.read_exact(&mut len_buf).await?;
let len = u32::from_be_bytes(len_buf) as usize;
if len > max_size {
anyhow::bail!("payload too large: {} bytes (max {})", len, max_size);
}
let mut buf = vec![0u8; len];
recv.read_exact(&mut buf).await?;
Ok(serde_json::from_slice(&buf)?)
}
/// Generic length-prefixed JSON write for any serializable type
pub async fn write_framed<T: Serialize>(
send: &mut iroh::endpoint::SendStream,
msg: &T,
) -> anyhow::Result<()> {
let bytes = serde_json::to_vec(msg)?;
let len = (bytes.len() as u32).to_be_bytes();
send.write_all(&len).await?;
send.write_all(&bytes).await?;
Ok(())
}
/// Generic length-prefixed JSON read for any deserializable type
pub async fn read_framed<T: serde::de::DeserializeOwned>(
recv: &mut iroh::endpoint::RecvStream,
max_size: usize,
) -> anyhow::Result<T> {
let mut len_buf = [0u8; 4];
recv.read_exact(&mut len_buf).await?;
let len = u32::from_be_bytes(len_buf) as usize;
if len > max_size {
anyhow::bail!("framed message too large: {} bytes (max {})", len, max_size);
}
let mut buf = vec![0u8; len];
recv.read_exact(&mut buf).await?;
Ok(serde_json::from_slice(&buf)?)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn message_type_roundtrip() {
let types = [
MessageType::NodeListUpdate,
MessageType::InitialExchange,
MessageType::AddressRequest,
MessageType::AddressResponse,
MessageType::RefuseRedirect,
MessageType::PullSyncRequest,
MessageType::PullSyncResponse,
MessageType::PostNotification,
MessageType::PostPush,
MessageType::AudienceRequest,
MessageType::AudienceResponse,
MessageType::ProfileUpdate,
MessageType::DeleteRecord,
MessageType::VisibilityUpdate,
MessageType::WormQuery,
MessageType::WormResponse,
MessageType::SocialAddressUpdate,
MessageType::SocialDisconnectNotice,
MessageType::SocialCheckin,
MessageType::BlobRequest,
MessageType::BlobResponse,
MessageType::ManifestRefreshRequest,
MessageType::ManifestRefreshResponse,
MessageType::ManifestPush,
MessageType::BlobDeleteNotice,
MessageType::GroupKeyDistribute,
MessageType::GroupKeyRequest,
MessageType::GroupKeyResponse,
MessageType::RelayIntroduce,
MessageType::RelayIntroduceResult,
MessageType::SessionRelay,
MessageType::MeshPrefer,
MessageType::CircleProfileUpdate,
MessageType::AnchorRegister,
MessageType::AnchorReferralRequest,
MessageType::AnchorReferralResponse,
MessageType::AnchorProbeRequest,
MessageType::AnchorProbeResult,
MessageType::PortScanHeartbeat,
MessageType::NatFilterProbe,
MessageType::NatFilterProbeResult,
MessageType::BlobHeaderDiff,
MessageType::BlobHeaderRequest,
MessageType::BlobHeaderResponse,
MessageType::PostDownstreamRegister,
MessageType::PostFetchRequest,
MessageType::PostFetchResponse,
];
for mt in types {
let byte = mt.as_byte();
let recovered = MessageType::from_byte(byte).expect("roundtrip failed");
assert_eq!(mt, recovered);
}
}
#[test]
fn unknown_message_type_returns_none() {
assert!(MessageType::from_byte(0xFF).is_none());
assert!(MessageType::from_byte(0x00).is_none());
assert!(MessageType::from_byte(0x06).is_none());
}
#[test]
fn blob_delete_notice_payload_roundtrip() {
use crate::types::PeerWithAddress;
// Without upstream
let payload = BlobDeleteNoticePayload {
cid: [42u8; 32],
upstream_node: None,
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: BlobDeleteNoticePayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.cid, [42u8; 32]);
assert!(decoded.upstream_node.is_none());
// With upstream
let payload_with_up = BlobDeleteNoticePayload {
cid: [99u8; 32],
upstream_node: Some(PeerWithAddress {
n: hex::encode([1u8; 32]),
a: vec!["10.0.0.1:4433".to_string()],
}),
};
let json2 = serde_json::to_string(&payload_with_up).unwrap();
let decoded2: BlobDeleteNoticePayload = serde_json::from_str(&json2).unwrap();
assert_eq!(decoded2.cid, [99u8; 32]);
assert!(decoded2.upstream_node.is_some());
let up = decoded2.upstream_node.unwrap();
assert_eq!(up.a, vec!["10.0.0.1:4433".to_string()]);
}
#[test]
fn relay_introduce_payload_roundtrip() {
let payload = RelayIntroducePayload {
intro_id: [42u8; 16],
target: [1u8; 32],
requester: [2u8; 32],
requester_addresses: vec!["10.0.0.2:4433".to_string()],
ttl: 1,
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: RelayIntroducePayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.intro_id, [42u8; 16]);
assert_eq!(decoded.target, [1u8; 32]);
assert_eq!(decoded.requester, [2u8; 32]);
assert_eq!(decoded.requester_addresses, vec!["10.0.0.2:4433".to_string()]);
assert_eq!(decoded.ttl, 1);
}
#[test]
fn relay_introduce_result_payload_roundtrip() {
let payload = RelayIntroduceResultPayload {
intro_id: [7u8; 16],
accepted: true,
target_addresses: vec!["10.0.0.1:4433".to_string(), "192.168.1.1:4433".to_string()],
relay_available: true,
reject_reason: None,
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: RelayIntroduceResultPayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.intro_id, [7u8; 16]);
assert!(decoded.accepted);
assert_eq!(decoded.target_addresses.len(), 2);
assert!(decoded.relay_available);
assert!(decoded.reject_reason.is_none());
// Test rejected case
let rejected = RelayIntroduceResultPayload {
intro_id: [8u8; 16],
accepted: false,
target_addresses: vec![],
relay_available: false,
reject_reason: Some("target not reachable".to_string()),
};
let json2 = serde_json::to_string(&rejected).unwrap();
let decoded2: RelayIntroduceResultPayload = serde_json::from_str(&json2).unwrap();
assert!(!decoded2.accepted);
assert_eq!(decoded2.reject_reason.unwrap(), "target not reachable");
}
#[test]
fn mesh_prefer_payload_roundtrip() {
// Request
let request = MeshPreferPayload {
requesting: true,
accepted: false,
reject_reason: None,
};
let json = serde_json::to_string(&request).unwrap();
let decoded: MeshPreferPayload = serde_json::from_str(&json).unwrap();
assert!(decoded.requesting);
assert!(!decoded.accepted);
assert!(decoded.reject_reason.is_none());
// Accepted response
let accept = MeshPreferPayload {
requesting: false,
accepted: true,
reject_reason: None,
};
let json2 = serde_json::to_string(&accept).unwrap();
let decoded2: MeshPreferPayload = serde_json::from_str(&json2).unwrap();
assert!(!decoded2.requesting);
assert!(decoded2.accepted);
// Rejected response
let reject = MeshPreferPayload {
requesting: false,
accepted: false,
reject_reason: Some("slots full".to_string()),
};
let json3 = serde_json::to_string(&reject).unwrap();
let decoded3: MeshPreferPayload = serde_json::from_str(&json3).unwrap();
assert!(!decoded3.accepted);
assert_eq!(decoded3.reject_reason.unwrap(), "slots full");
}
#[test]
fn session_relay_payload_roundtrip() {
let payload = SessionRelayPayload {
intro_id: [55u8; 16],
target: [3u8; 32],
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: SessionRelayPayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.intro_id, [55u8; 16]);
assert_eq!(decoded.target, [3u8; 32]);
}
#[test]
fn circle_profile_update_payload_roundtrip() {
let payload = CircleProfileUpdatePayload {
author: [1u8; 32],
circle_name: "friends".to_string(),
group_id: [2u8; 32],
epoch: 3,
encrypted_payload: "base64data==".to_string(),
wrapped_cek: vec![0u8; 60],
updated_at: 1700000000000,
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: CircleProfileUpdatePayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.author, [1u8; 32]);
assert_eq!(decoded.circle_name, "friends");
assert_eq!(decoded.group_id, [2u8; 32]);
assert_eq!(decoded.epoch, 3);
assert_eq!(decoded.encrypted_payload, "base64data==");
assert_eq!(decoded.wrapped_cek.len(), 60);
assert_eq!(decoded.updated_at, 1700000000000);
}
#[test]
fn anchor_register_payload_roundtrip() {
let payload = AnchorRegisterPayload {
node_id: [1u8; 32],
addresses: vec!["192.168.1.5:4433".to_string(), "10.0.0.1:4433".to_string()],
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: AnchorRegisterPayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.node_id, [1u8; 32]);
assert_eq!(decoded.addresses.len(), 2);
assert_eq!(decoded.addresses[0], "192.168.1.5:4433");
}
#[test]
fn anchor_referral_request_payload_roundtrip() {
let payload = AnchorReferralRequestPayload {
requester: [2u8; 32],
requester_addresses: vec!["10.0.0.2:4433".to_string()],
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: AnchorReferralRequestPayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.requester, [2u8; 32]);
assert_eq!(decoded.requester_addresses, vec!["10.0.0.2:4433"]);
}
#[test]
fn anchor_referral_response_payload_roundtrip() {
let payload = AnchorReferralResponsePayload {
referrals: vec![
AnchorReferral {
node_id: [3u8; 32],
addresses: vec!["10.0.0.3:4433".to_string()],
},
AnchorReferral {
node_id: [4u8; 32],
addresses: vec!["10.0.0.4:4433".to_string(), "192.168.1.4:4433".to_string()],
},
],
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: AnchorReferralResponsePayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.referrals.len(), 2);
assert_eq!(decoded.referrals[0].node_id, [3u8; 32]);
assert_eq!(decoded.referrals[0].addresses, vec!["10.0.0.3:4433"]);
assert_eq!(decoded.referrals[1].node_id, [4u8; 32]);
assert_eq!(decoded.referrals[1].addresses.len(), 2);
// Empty referrals
let empty = AnchorReferralResponsePayload { referrals: vec![] };
let json2 = serde_json::to_string(&empty).unwrap();
let decoded2: AnchorReferralResponsePayload = serde_json::from_str(&json2).unwrap();
assert!(decoded2.referrals.is_empty());
}
}

5514
crates/core/src/storage.rs Normal file

File diff suppressed because it is too large Load diff

185
crates/core/src/stun.rs Normal file
View file

@ -0,0 +1,185 @@
//! Minimal raw STUN client for NAT type detection.
//! Sends STUN Binding Requests to two servers and compares mapped ports.
use std::net::SocketAddr;
use tokio::net::UdpSocket;
use tracing::{debug, warn};
use crate::types::{NatMapping, NatType};
const STUN_SERVERS: &[&str] = &[
"stun.l.google.com:19302",
"stun.cloudflare.com:3478",
];
const STUN_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(3);
/// STUN Binding Request (RFC 5389): 20 bytes
/// Type: 0x0001 (Binding Request), Length: 0, Magic: 0x2112A442, Transaction ID: 12 random bytes
fn build_binding_request() -> [u8; 20] {
let mut buf = [0u8; 20];
// Message type: Binding Request (0x0001)
buf[0] = 0x00;
buf[1] = 0x01;
// Message length: 0
buf[2] = 0x00;
buf[3] = 0x00;
// Magic cookie: 0x2112A442
buf[4] = 0x21;
buf[5] = 0x12;
buf[6] = 0xA4;
buf[7] = 0x42;
// Transaction ID: 12 random bytes
use rand::Rng;
let mut rng = rand::rng();
rng.fill(&mut buf[8..20]);
buf
}
/// Parse XOR-MAPPED-ADDRESS from a STUN Binding Response.
/// Returns the mapped SocketAddr or None if not found/parseable.
fn parse_xor_mapped_address(resp: &[u8], txn_id: &[u8; 12]) -> Option<SocketAddr> {
if resp.len() < 20 {
return None;
}
// Verify it's a Binding Response (0x0101)
if resp[0] != 0x01 || resp[1] != 0x01 {
return None;
}
let magic: [u8; 4] = [0x21, 0x12, 0xA4, 0x42];
// Walk attributes
let msg_len = u16::from_be_bytes([resp[2], resp[3]]) as usize;
let end = std::cmp::min(20 + msg_len, resp.len());
let mut pos = 20;
while pos + 4 <= end {
let attr_type = u16::from_be_bytes([resp[pos], resp[pos + 1]]);
let attr_len = u16::from_be_bytes([resp[pos + 2], resp[pos + 3]]) as usize;
pos += 4;
if pos + attr_len > end {
break;
}
// XOR-MAPPED-ADDRESS = 0x0020, MAPPED-ADDRESS = 0x0001
if attr_type == 0x0020 && attr_len >= 8 {
// byte 0: reserved, byte 1: family (0x01=IPv4, 0x02=IPv6)
let family = resp[pos + 1];
if family == 0x01 {
// IPv4
let xport = u16::from_be_bytes([resp[pos + 2], resp[pos + 3]])
^ u16::from_be_bytes([magic[0], magic[1]]);
let xip = [
resp[pos + 4] ^ magic[0],
resp[pos + 5] ^ magic[1],
resp[pos + 6] ^ magic[2],
resp[pos + 7] ^ magic[3],
];
let addr = SocketAddr::new(
std::net::IpAddr::V4(std::net::Ipv4Addr::new(xip[0], xip[1], xip[2], xip[3])),
xport,
);
return Some(addr);
} else if family == 0x02 && attr_len >= 20 {
// IPv6: XOR with magic + txn_id
let xport = u16::from_be_bytes([resp[pos + 2], resp[pos + 3]])
^ u16::from_be_bytes([magic[0], magic[1]]);
let mut ip6 = [0u8; 16];
let xor_key: Vec<u8> = magic.iter().chain(txn_id.iter()).copied().collect();
for i in 0..16 {
ip6[i] = resp[pos + 4 + i] ^ xor_key[i];
}
let addr = SocketAddr::new(
std::net::IpAddr::V6(std::net::Ipv6Addr::from(ip6)),
xport,
);
return Some(addr);
}
}
// Pad to 4-byte boundary
pos += (attr_len + 3) & !3;
}
None
}
/// Query a single STUN server and return the mapped address.
async fn stun_query(sock: &UdpSocket, server: &str) -> Option<SocketAddr> {
use std::net::ToSocketAddrs;
let server_addr = match server.to_socket_addrs() {
Ok(mut addrs) => addrs.next()?,
Err(e) => {
debug!(server, error = %e, "STUN DNS resolution failed");
return None;
}
};
let request = build_binding_request();
let txn_id: [u8; 12] = request[8..20].try_into().unwrap();
if let Err(e) = sock.send_to(&request, server_addr).await {
debug!(server, error = %e, "STUN send failed");
return None;
}
let mut buf = [0u8; 256];
match tokio::time::timeout(STUN_TIMEOUT, sock.recv_from(&mut buf)).await {
Ok(Ok((len, _))) => parse_xor_mapped_address(&buf[..len], &txn_id),
Ok(Err(e)) => {
debug!(server, error = %e, "STUN recv failed");
None
}
Err(_) => {
debug!(server, "STUN query timed out (3s)");
None
}
}
}
/// Detect NAT type by comparing mapped addresses from two STUN servers.
/// Must be called with the local port we're interested in (for Public detection).
/// Also returns the NatMapping classification for the advanced NAT profile.
pub async fn detect_nat_type(local_port: u16) -> (NatType, NatMapping) {
let sock = match UdpSocket::bind("0.0.0.0:0").await {
Ok(s) => s,
Err(e) => {
warn!(error = %e, "Failed to bind UDP socket for STUN");
return (NatType::Unknown, NatMapping::Unknown);
}
};
let local_addr = sock.local_addr().ok();
// Query both servers from the same socket
let result1 = stun_query(&sock, STUN_SERVERS[0]).await;
let result2 = stun_query(&sock, STUN_SERVERS[1]).await;
match (result1, result2) {
(Some(addr1), Some(addr2)) => {
debug!(
server1 = STUN_SERVERS[0], mapped1 = %addr1,
server2 = STUN_SERVERS[1], mapped2 = %addr2,
local_port,
"STUN results"
);
// If mapped port matches our local port, we might be public/no-NAT
if let Some(local) = local_addr {
if addr1.port() == local.port() && addr2.port() == local.port() {
return (NatType::Public, NatMapping::EndpointIndependent);
}
}
// Same mapped port from both = cone NAT (Easy / EIM)
// Different ports = symmetric NAT (Hard / EDM)
if addr1.port() == addr2.port() {
(NatType::Easy, NatMapping::EndpointIndependent)
} else {
(NatType::Hard, NatMapping::EndpointDependent)
}
}
(Some(addr), None) | (None, Some(addr)) => {
debug!(mapped = %addr, "Only one STUN server responded, assuming Easy");
(NatType::Easy, NatMapping::EndpointIndependent)
}
(None, None) => {
warn!("Both STUN servers unreachable, NAT type unknown");
(NatType::Unknown, NatMapping::Unknown)
}
}
}

845
crates/core/src/types.rs Normal file
View file

@ -0,0 +1,845 @@
use std::net::SocketAddr;
use serde::{Deserialize, Serialize};
/// A post identifier — BLAKE3 hash of the canonical serialized post
pub type PostId = [u8; 32];
/// A node identifier — ed25519 public key bytes (same as iroh EndpointId)
pub type NodeId = [u8; 32];
/// A public post on the network
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Post {
/// Author's node ID (ed25519 public key bytes)
pub author: NodeId,
/// Post text content
pub content: String,
/// Media attachments (empty for text-only posts)
#[serde(default)]
pub attachments: Vec<Attachment>,
/// Unix timestamp in milliseconds
pub timestamp_ms: u64,
}
/// A reference to a media blob attached to a post
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Attachment {
/// BLAKE3 hash of the blob data
pub cid: [u8; 32],
/// MIME type (e.g. "image/jpeg")
pub mime_type: String,
/// Size of the blob in bytes
pub size_bytes: u64,
}
/// Public profile — plaintext, synced to all peers
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct PublicProfile {
pub node_id: NodeId,
pub display_name: String,
/// Short bio text (can be empty)
#[serde(default)]
pub bio: String,
/// Unix timestamp in milliseconds of last update
pub updated_at: u64,
/// Anchor nodes this peer advertises for reachability
#[serde(default)]
pub anchors: Vec<NodeId>,
/// Up to 10 currently-connected peer NodeIds (for 11-needle worm search)
#[serde(default)]
pub recent_peers: Vec<NodeId>,
/// Bilateral preferred peer NodeIds (stable relay hubs)
#[serde(default)]
pub preferred_peers: Vec<NodeId>,
/// Whether display_name/bio are visible to non-circle peers
#[serde(default = "default_true")]
pub public_visible: bool,
/// Avatar blob CID (BLAKE3 hash)
#[serde(default)]
pub avatar_cid: Option<[u8; 32]>,
}
fn default_true() -> bool {
true
}
/// Per-circle profile variant — encrypted with the circle's group key
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CircleProfile {
pub author: NodeId,
pub circle_name: String,
pub display_name: String,
pub bio: String,
pub avatar_cid: Option<[u8; 32]>,
pub updated_at: u64,
}
/// Visibility of a follow relationship
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum FollowVisibility {
/// Shared in gossip with all peers
Public,
/// Never shared unless encrypted for a circle (future)
Private,
}
impl Default for FollowVisibility {
fn default() -> Self {
Self::Public
}
}
/// Full peer record stored in the database
#[derive(Debug, Clone)]
pub struct PeerRecord {
pub node_id: NodeId,
pub addresses: Vec<SocketAddr>,
pub last_seen: u64,
pub introduced_by: Option<NodeId>,
pub is_anchor: bool,
pub first_seen: u64,
}
/// Compact peer info exchanged during gossip (address-free since sync/6)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GossipPeerInfo {
pub node_id: NodeId,
pub is_anchor: bool,
}
/// Worm identifier for deduplication
pub type WormId = [u8; 16];
/// Result of a worm lookup for a single target
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WormResult {
pub node_id: NodeId,
pub addresses: Vec<String>,
pub reporter: NodeId,
pub freshness_ms: u64,
pub post_holder: Option<NodeId>,
pub blob_holder: Option<NodeId>,
}
/// Audience relationship direction
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudienceDirection {
/// They are in our audience (we push to them)
Inbound,
/// We are in their audience (they push to us)
Outbound,
}
/// Audience membership status
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudienceStatus {
Pending,
Approved,
Denied,
}
/// An audience membership record
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudienceRecord {
pub node_id: NodeId,
pub direction: AudienceDirection,
pub status: AudienceStatus,
pub requested_at: u64,
pub approved_at: Option<u64>,
}
/// Audience approval mode setting
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudienceApprovalMode {
/// Auto-accept all audience join requests
PublicApprove,
/// Queue requests for manual review
ApprovalQueue,
}
// --- Encryption / Circles ---
/// Circle name (unique per node)
pub type CircleId = String;
/// Group identifier — BLAKE3 hash of the initial group public key
pub type GroupId = [u8; 32];
/// Group key epoch — incremented on each key rotation
pub type GroupEpoch = u64;
/// A group's private key wrapped for a specific member
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GroupMemberKey {
pub member: NodeId,
pub epoch: GroupEpoch,
/// 60 bytes: nonce(12) || encrypted_group_seed(32) || tag(16)
pub wrapped_group_key: Vec<u8>,
}
/// A group key record (circle ↔ group key binding)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GroupKeyRecord {
pub group_id: GroupId,
pub circle_name: String,
pub epoch: GroupEpoch,
pub group_public_key: [u8; 32],
pub admin: NodeId,
pub created_at: u64,
}
/// Visibility of a post — separate from Post struct so it doesn't affect PostId
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum PostVisibility {
Public,
Encrypted { recipients: Vec<WrappedKey> },
GroupEncrypted {
group_id: GroupId,
epoch: GroupEpoch,
/// 60 bytes: nonce(12) || encrypted_cek(32) || tag(16)
wrapped_cek: Vec<u8>,
},
}
impl Default for PostVisibility {
fn default() -> Self {
Self::Public
}
}
/// A CEK wrapped for a specific recipient via X25519 DH
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct WrappedKey {
pub recipient: NodeId,
/// 60 bytes: nonce(12) || encrypted_cek(32) || tag(16)
pub wrapped_cek: Vec<u8>,
}
/// User-facing intent for post visibility (resolved to recipients before encryption)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum VisibilityIntent {
Public,
/// All public follows
Friends,
/// Named circle
Circle(String),
/// Specific recipients
Direct(Vec<NodeId>),
}
/// A named group of recipients
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Circle {
pub name: String,
pub members: Vec<NodeId>,
pub created_at: u64,
}
// --- Delete / Revocation ---
/// A signed record attesting that an author deleted one of their posts
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeleteRecord {
pub post_id: PostId,
pub author: NodeId,
pub timestamp_ms: u64,
/// 64-byte ed25519 signature over post_id bytes
pub signature: Vec<u8>,
}
/// An update to a post's visibility (new wrapped keys after revocation)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VisibilityUpdate {
pub post_id: PostId,
pub author: NodeId,
pub visibility: PostVisibility,
}
/// How to handle revoking a recipient's access to past encrypted posts
#[derive(Debug, Clone, Copy)]
pub enum RevocationMode {
/// Re-wrap the CEK for remaining recipients (fast, but revoked user still has ciphertext)
SyncAccessList,
/// Re-encrypt the post with a new CEK (slower, but revoked user can't decrypt even cached copy)
ReEncrypt,
}
// --- Discovery Protocol v3 (N1/N2/N3 architecture) ---
/// A node reachable through one of our peers (stored in reachable_n2 or reachable_n3)
#[derive(Debug, Clone)]
pub struct ReachableEntry {
pub reachable_node_id: NodeId,
pub reporter_node_id: NodeId,
pub updated_at: u64,
}
/// NAT type classification from STUN probing
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum NatType {
/// Observed addr matches local, or UPnP mapped — fully reachable
Public,
/// Same mapped port from multiple STUN servers — cone NAT, hole punch works
Easy,
/// Different mapped ports — symmetric NAT, hole punch unlikely to work
Hard,
/// Detection failed or not yet run
Unknown,
}
impl std::fmt::Display for NatType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NatType::Public => write!(f, "public"),
NatType::Easy => write!(f, "easy"),
NatType::Hard => write!(f, "hard"),
NatType::Unknown => write!(f, "unknown"),
}
}
}
impl NatType {
pub fn from_str_label(s: &str) -> Self {
match s {
"public" => NatType::Public,
"easy" => NatType::Easy,
"hard" => NatType::Hard,
_ => NatType::Unknown,
}
}
}
/// NAT mapping behavior — whether port allocation depends on destination
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum NatMapping {
/// Endpoint-Independent Mapping: same port for all destinations (cone NAT)
EndpointIndependent,
/// Endpoint-Dependent Mapping: different port per destination (symmetric NAT)
EndpointDependent,
/// Not yet determined
Unknown,
}
impl std::fmt::Display for NatMapping {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NatMapping::EndpointIndependent => write!(f, "eim"),
NatMapping::EndpointDependent => write!(f, "edm"),
NatMapping::Unknown => write!(f, "unknown"),
}
}
}
impl NatMapping {
pub fn from_str_label(s: &str) -> Self {
match s {
"eim" => NatMapping::EndpointIndependent,
"edm" => NatMapping::EndpointDependent,
_ => NatMapping::Unknown,
}
}
}
/// NAT filtering behavior — what inbound packets are allowed through
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum NatFiltering {
/// Full cone or address-restricted — no scanning needed
Open,
/// Port-restricted — only exact IP:port gets through, needs scanning
PortRestricted,
/// Not yet determined
Unknown,
}
impl std::fmt::Display for NatFiltering {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NatFiltering::Open => write!(f, "open"),
NatFiltering::PortRestricted => write!(f, "port_restricted"),
NatFiltering::Unknown => write!(f, "unknown"),
}
}
}
impl NatFiltering {
pub fn from_str_label(s: &str) -> Self {
match s {
"open" => NatFiltering::Open,
"port_restricted" => NatFiltering::PortRestricted,
_ => NatFiltering::Unknown,
}
}
}
/// Combined NAT profile: mapping + filtering behavior
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct NatProfile {
pub mapping: NatMapping,
pub filtering: NatFiltering,
}
impl NatProfile {
pub fn new(mapping: NatMapping, filtering: NatFiltering) -> Self {
Self { mapping, filtering }
}
pub fn unknown() -> Self {
Self { mapping: NatMapping::Unknown, filtering: NatFiltering::Unknown }
}
/// Whether this NAT profile requires port scanning for hole punch
pub fn needs_scanning(&self) -> bool {
self.mapping == NatMapping::EndpointDependent
&& self.filtering == NatFiltering::PortRestricted
}
/// Convert from legacy NatType (filtering is always Unknown —
/// "Public" may only be public on IPv6, not IPv4. Use anchor filter probe
/// to determine filtering reliably.)
pub fn from_nat_type(nat: NatType) -> Self {
match nat {
NatType::Public => Self::new(NatMapping::EndpointIndependent, NatFiltering::Unknown),
NatType::Easy => Self::new(NatMapping::EndpointIndependent, NatFiltering::Unknown),
NatType::Hard => Self::new(NatMapping::EndpointDependent, NatFiltering::Unknown),
NatType::Unknown => Self::unknown(),
}
}
/// Whether standard hole punch should work (no scanning needed)
pub fn standard_punch_likely(&self, peer: &NatProfile) -> bool {
// If either side is EIM or Public, standard punch works
if self.mapping == NatMapping::EndpointIndependent
|| peer.mapping == NatMapping::EndpointIndependent
{
return true;
}
// Both EDM — only works if at least one has Open filtering
if self.filtering == NatFiltering::Open || peer.filtering == NatFiltering::Open {
return true;
}
false
}
/// Whether scanning should be attempted after standard punch fails.
/// If both sides have Open filtering, scanning is never needed — standard punch
/// works regardless of mapping. Scanning is only needed when at least one side
/// is PortRestricted (or Unknown, conservatively).
pub fn should_try_scanning(&self, peer: &NatProfile) -> bool {
// If both sides have confirmed Open filtering, no scanning needed
if self.filtering == NatFiltering::Open && peer.filtering == NatFiltering::Open {
return false;
}
// Need EDM or Unknown mapping on at least one side, AND at least one side
// with PortRestricted or Unknown filtering
let has_edm_or_unknown = self.mapping == NatMapping::EndpointDependent
|| peer.mapping == NatMapping::EndpointDependent
|| self.mapping == NatMapping::Unknown
|| peer.mapping == NatMapping::Unknown;
let has_restricted_filtering = self.filtering != NatFiltering::Open
|| peer.filtering != NatFiltering::Open;
has_edm_or_unknown && has_restricted_filtering
}
}
/// Device profile — determines connection slot budget
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DeviceProfile {
/// Desktop: 81 local + 20 wide = 101 mesh connections
Desktop,
/// Mobile: 10 local + 5 wide = 15 mesh connections
Mobile,
}
impl DeviceProfile {
pub fn preferred_slots(&self) -> usize {
match self {
DeviceProfile::Desktop => 10,
DeviceProfile::Mobile => 3,
}
}
pub fn local_slots(&self) -> usize {
match self {
DeviceProfile::Desktop => 71,
DeviceProfile::Mobile => 7,
}
}
pub fn wide_slots(&self) -> usize {
match self {
DeviceProfile::Desktop => 20,
DeviceProfile::Mobile => 5,
}
}
pub fn session_slots(&self) -> usize {
match self {
DeviceProfile::Desktop => 20,
DeviceProfile::Mobile => 5,
}
}
pub fn max_relay_pipes(&self) -> usize {
match self {
DeviceProfile::Desktop => 10,
DeviceProfile::Mobile => 2,
}
}
}
/// How a session connection was established
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum SessionReachMethod {
/// Direct QUIC (address from relay introduction)
Direct,
/// Coordinated hole punch via relay introduction
HolePunch,
/// Piped through intermediary relay
Relayed,
}
impl std::fmt::Display for SessionReachMethod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SessionReachMethod::Direct => write!(f, "direct"),
SessionReachMethod::HolePunch => write!(f, "holepunch"),
SessionReachMethod::Relayed => write!(f, "relayed"),
}
}
}
/// Slot kind for mesh connections
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum PeerSlotKind {
/// Bilateral preferred connections (Desktop: 10, Mobile: 3)
Preferred,
/// Diverse local connections (Desktop: 71, Mobile: 7)
Local,
/// Bloom-sourced random distant connections (20 slots)
Wide,
}
impl std::fmt::Display for PeerSlotKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PeerSlotKind::Preferred => write!(f, "preferred"),
PeerSlotKind::Local => write!(f, "local"),
PeerSlotKind::Wide => write!(f, "wide"),
}
}
}
impl std::str::FromStr for PeerSlotKind {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"preferred" => Ok(PeerSlotKind::Preferred),
"local" | "social" => Ok(PeerSlotKind::Local),
"wide" => Ok(PeerSlotKind::Wide),
_ => Err(anyhow::anyhow!("unknown slot kind: {}", s)),
}
}
}
// --- Social Routing Cache ---
/// How we last reached a social contact
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ReachMethod {
Direct,
Referral,
Worm,
Inbound,
}
impl std::fmt::Display for ReachMethod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ReachMethod::Direct => write!(f, "direct"),
ReachMethod::Referral => write!(f, "referral"),
ReachMethod::Worm => write!(f, "worm"),
ReachMethod::Inbound => write!(f, "inbound"),
}
}
}
impl std::str::FromStr for ReachMethod {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"direct" => Ok(ReachMethod::Direct),
"referral" => Ok(ReachMethod::Referral),
"worm" => Ok(ReachMethod::Worm),
"inbound" => Ok(ReachMethod::Inbound),
_ => Err(anyhow::anyhow!("unknown reach method: {}", s)),
}
}
}
/// Social relationship type
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SocialRelation {
Follow,
Audience,
Mutual,
}
impl std::fmt::Display for SocialRelation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SocialRelation::Follow => write!(f, "follow"),
SocialRelation::Audience => write!(f, "audience"),
SocialRelation::Mutual => write!(f, "mutual"),
}
}
}
impl std::str::FromStr for SocialRelation {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"follow" => Ok(SocialRelation::Follow),
"audience" => Ok(SocialRelation::Audience),
"mutual" => Ok(SocialRelation::Mutual),
_ => Err(anyhow::anyhow!("unknown social relation: {}", s)),
}
}
}
/// Online/disconnected status
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SocialStatus {
Online,
Disconnected,
}
impl std::fmt::Display for SocialStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SocialStatus::Online => write!(f, "online"),
SocialStatus::Disconnected => write!(f, "disconnected"),
}
}
}
impl std::str::FromStr for SocialStatus {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"online" => Ok(SocialStatus::Online),
"disconnected" => Ok(SocialStatus::Disconnected),
_ => Err(anyhow::anyhow!("unknown social status: {}", s)),
}
}
}
/// A peer with resolved addresses (for peer_addresses in social_routes)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerWithAddress {
pub n: String,
pub a: Vec<String>,
}
// --- CDN Manifest Layer ---
/// Lightweight post reference in a manifest
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ManifestEntry {
pub post_id: PostId,
pub timestamp_ms: u64,
pub has_attachments: bool,
}
/// Author-signed manifest — cannot be forged by hosts. Max 256KB serialized.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuthorManifest {
/// The post this manifest is anchored to
pub post_id: PostId,
pub author: NodeId,
/// Author's N+10:A (ip:port strings)
pub author_addresses: Vec<String>,
/// Original post creation time (ms)
pub created_at: u64,
/// When manifest was last updated (ms)
pub updated_at: u64,
/// Up to 10 posts before this one
pub previous_posts: Vec<ManifestEntry>,
/// Up to 10 posts after (grows via updates)
pub following_posts: Vec<ManifestEntry>,
/// ed25519 over canonical digest
pub signature: Vec<u8>,
}
/// CDN manifest traveling with blobs (author-signed part + host metadata)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CdnManifest {
pub author_manifest: AuthorManifest,
/// Serving host's NodeId
pub host: NodeId,
/// Serving host's N+10:A
pub host_addresses: Vec<String>,
/// Who the host got it from
pub source: NodeId,
/// Source's N+10:A
pub source_addresses: Vec<String>,
/// How many downstream this host has
pub downstream_count: u32,
}
/// Cached routing info for a social contact
#[derive(Debug, Clone)]
pub struct SocialRouteEntry {
pub node_id: NodeId,
pub addresses: Vec<SocketAddr>,
pub peer_addresses: Vec<PeerWithAddress>,
pub relation: SocialRelation,
pub status: SocialStatus,
pub last_connected_ms: u64,
pub last_seen_ms: u64,
pub reach_method: ReachMethod,
/// 2-layer preferred peer tree (~100 nodes) for fast relay candidate search
pub preferred_tree: Vec<NodeId>,
}
// --- Engagement System ---
/// Maximum engagement header size before splitting oldest comments into a separate post
pub const BLOB_HEADER_ENGAGEMENT_THRESHOLD: usize = 16 * 1024;
/// A reaction to a post
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Reaction {
/// Who reacted
pub reactor: NodeId,
/// Emoji string (e.g. "👍", "❤️")
pub emoji: String,
/// Which post
pub post_id: PostId,
/// When the reaction was created (ms)
pub timestamp_ms: u64,
/// If private: X25519-encrypted payload (only author can decrypt)
#[serde(default)]
pub encrypted_payload: Option<String>,
}
/// An inline comment on a post
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct InlineComment {
/// Comment author
pub author: NodeId,
/// Which post this comment is on
pub post_id: PostId,
/// Comment text
pub content: String,
/// When the comment was created (ms)
pub timestamp_ms: u64,
/// ed25519 signature over BLAKE3(author || post_id || content || timestamp_ms)
pub signature: Vec<u8>,
}
/// Permission level for comments on a post
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum CommentPermission {
/// Anyone can comment
Public,
/// Only people in author's audience can comment
AudienceOnly,
/// Comments disabled
None,
}
impl Default for CommentPermission {
fn default() -> Self {
Self::Public
}
}
/// Permission level for reactions on a post
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ReactPermission {
/// Public reactions visible to all
Public,
/// Private reactions (encrypted, only author sees)
Private,
/// Both public and private allowed
Both,
/// Reactions disabled
None,
}
impl Default for ReactPermission {
fn default() -> Self {
Self::Both
}
}
/// Moderation mode for comments
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ModerationMode {
/// Author maintains a blocklist of users
AuthorBlocklist,
/// Only audience members can engage
AudienceOnly,
}
impl Default for ModerationMode {
fn default() -> Self {
Self::AuthorBlocklist
}
}
/// Author-controlled engagement policy for a post
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CommentPolicy {
pub allow_comments: CommentPermission,
pub allow_reacts: ReactPermission,
pub moderation: ModerationMode,
/// Blocked NodeIds — rejected from commenting/reacting
#[serde(default)]
pub blocklist: Vec<NodeId>,
}
impl Default for CommentPolicy {
fn default() -> Self {
Self {
allow_comments: CommentPermission::default(),
allow_reacts: ReactPermission::default(),
moderation: ModerationMode::default(),
blocklist: Vec::new(),
}
}
}
/// Incremental engagement operation (sent in BlobHeaderDiff)
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum BlobHeaderDiffOp {
AddReaction(Reaction),
RemoveReaction { reactor: NodeId, emoji: String, post_id: PostId },
AddComment(InlineComment),
SetPolicy(CommentPolicy),
ThreadSplit { new_post_id: PostId },
}
/// Aggregated engagement header for a post (stored locally, propagated as diffs)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlobHeader {
pub post_id: PostId,
pub author: NodeId,
pub reactions: Vec<Reaction>,
pub comments: Vec<InlineComment>,
pub policy: CommentPolicy,
pub updated_at: u64,
/// PostIds of split-off comment overflow posts
#[serde(default)]
pub thread_splits: Vec<PostId>,
}
/// Links a split-off comment post back to its original parent
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ThreadMeta {
/// The split-off post's ID
pub post_id: PostId,
/// The original parent post this was split from
pub parent_post_id: PostId,
}

243
crates/core/src/upnp.rs Normal file
View file

@ -0,0 +1,243 @@
//! Best-effort UPnP port mapping for NAT traversal.
//! Skipped entirely on mobile platforms where UPnP is unsupported.
use std::net::SocketAddr;
#[cfg(not(any(target_os = "android", target_os = "ios")))]
use tracing::{info, debug};
/// Result of a successful UPnP port mapping.
pub struct UpnpMapping {
pub external_addr: SocketAddr,
pub lease_secs: u32,
pub local_port: u16,
}
/// Best-effort UPnP port mapping.
/// 3s gateway discovery timeout, 1800s (30 min) lease, UDP protocol.
/// Returns None on any failure (no router, unsupported, timeout, port conflict).
#[cfg(not(any(target_os = "android", target_os = "ios")))]
pub async fn try_upnp_mapping(local_port: u16) -> Option<UpnpMapping> {
use igd_next::SearchOptions;
let search_opts = SearchOptions {
timeout: Some(std::time::Duration::from_secs(3)),
..Default::default()
};
let gateway = match igd_next::aio::tokio::search_gateway(search_opts).await {
Ok(gw) => gw,
Err(e) => {
debug!("UPnP gateway discovery failed (expected behind non-UPnP router): {}", e);
return None;
}
};
let external_ip = match gateway.get_external_ip().await {
Ok(ip) => ip,
Err(e) => {
debug!("UPnP: could not get external IP: {}", e);
return None;
}
};
// Local address for the mapping — bind to all interfaces
let local_addr = SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), local_port);
let lease_secs: u32 = 1800; // 30 minutes
// Try mapping the same external port first
let result = gateway.add_port(
igd_next::PortMappingProtocol::UDP,
local_port,
local_addr,
lease_secs,
"itsgoin",
).await;
let external_port = match result {
Ok(()) => local_port,
Err(_) => {
// Port taken — try any available port
match gateway.add_any_port(
igd_next::PortMappingProtocol::UDP,
local_addr,
lease_secs,
"itsgoin",
).await {
Ok(port) => port,
Err(e) => {
debug!("UPnP: port mapping failed: {}", e);
return None;
}
}
}
};
let external_addr = SocketAddr::new(external_ip, external_port);
info!("UPnP: mapped {}:{} → :{}", external_ip, external_port, local_port);
Some(UpnpMapping {
external_addr,
lease_secs,
local_port,
})
}
#[cfg(any(target_os = "android", target_os = "ios"))]
pub async fn try_upnp_mapping(_local_port: u16) -> Option<UpnpMapping> {
None
}
/// Renew an existing UPnP lease. Returns true on success.
#[cfg(not(any(target_os = "android", target_os = "ios")))]
pub async fn renew_upnp_mapping(local_port: u16, external_port: u16) -> bool {
use igd_next::SearchOptions;
let search_opts = SearchOptions {
timeout: Some(std::time::Duration::from_secs(3)),
..Default::default()
};
let gateway = match igd_next::aio::tokio::search_gateway(search_opts).await {
Ok(gw) => gw,
Err(_) => return false,
};
let local_addr = SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), local_port);
gateway.add_port(
igd_next::PortMappingProtocol::UDP,
external_port,
local_addr,
1800,
"itsgoin",
).await.is_ok()
}
#[cfg(any(target_os = "android", target_os = "ios"))]
pub async fn renew_upnp_mapping(_local_port: u16, _external_port: u16) -> bool {
false
}
/// Remove UPnP mapping on shutdown. Best-effort, errors are silently ignored.
#[cfg(not(any(target_os = "android", target_os = "ios")))]
pub async fn remove_upnp_mapping(external_port: u16) {
use igd_next::SearchOptions;
let search_opts = SearchOptions {
timeout: Some(std::time::Duration::from_secs(3)),
..Default::default()
};
if let Ok(gateway) = igd_next::aio::tokio::search_gateway(search_opts).await {
let _ = gateway.remove_port(igd_next::PortMappingProtocol::UDP, external_port).await;
info!("UPnP: removed port mapping for external port {}", external_port);
}
}
#[cfg(any(target_os = "android", target_os = "ios"))]
pub async fn remove_upnp_mapping(_external_port: u16) {}
// --- TCP port mapping (for HTTP post delivery) ---
/// Best-effort UPnP TCP port mapping on the same port as QUIC UDP.
/// Returns true on success. Reuses the already-discovered gateway.
#[cfg(not(any(target_os = "android", target_os = "ios")))]
pub async fn try_upnp_tcp_mapping(local_port: u16, external_port: u16) -> bool {
use igd_next::SearchOptions;
let search_opts = SearchOptions {
timeout: Some(std::time::Duration::from_secs(3)),
..Default::default()
};
let gateway = match igd_next::aio::tokio::search_gateway(search_opts).await {
Ok(gw) => gw,
Err(_) => return false,
};
let local_addr = SocketAddr::new(
std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
local_port,
);
match gateway
.add_port(
igd_next::PortMappingProtocol::TCP,
external_port,
local_addr,
1800,
"itsgoin-http",
)
.await
{
Ok(()) => {
info!("UPnP: TCP port {} mapped for HTTP serving", external_port);
true
}
Err(e) => {
debug!("UPnP: TCP port mapping failed (non-fatal): {}", e);
false
}
}
}
#[cfg(any(target_os = "android", target_os = "ios"))]
pub async fn try_upnp_tcp_mapping(_local_port: u16, _external_port: u16) -> bool {
false
}
/// Renew an existing UPnP TCP lease. Returns true on success.
#[cfg(not(any(target_os = "android", target_os = "ios")))]
pub async fn renew_upnp_tcp_mapping(local_port: u16, external_port: u16) -> bool {
use igd_next::SearchOptions;
let search_opts = SearchOptions {
timeout: Some(std::time::Duration::from_secs(3)),
..Default::default()
};
let gateway = match igd_next::aio::tokio::search_gateway(search_opts).await {
Ok(gw) => gw,
Err(_) => return false,
};
let local_addr = SocketAddr::new(
std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
local_port,
);
gateway
.add_port(
igd_next::PortMappingProtocol::TCP,
external_port,
local_addr,
1800,
"itsgoin-http",
)
.await
.is_ok()
}
#[cfg(any(target_os = "android", target_os = "ios"))]
pub async fn renew_upnp_tcp_mapping(_local_port: u16, _external_port: u16) -> bool {
false
}
/// Remove UPnP TCP mapping on shutdown.
#[cfg(not(any(target_os = "android", target_os = "ios")))]
pub async fn remove_upnp_tcp_mapping(external_port: u16) {
use igd_next::SearchOptions;
let search_opts = SearchOptions {
timeout: Some(std::time::Duration::from_secs(3)),
..Default::default()
};
if let Ok(gateway) = igd_next::aio::tokio::search_gateway(search_opts).await {
let _ = gateway
.remove_port(igd_next::PortMappingProtocol::TCP, external_port)
.await;
info!("UPnP: removed TCP port mapping for port {}", external_port);
}
}
#[cfg(any(target_os = "android", target_os = "ios"))]
pub async fn remove_upnp_tcp_mapping(_external_port: u16) {}

456
crates/core/src/web.rs Normal file
View file

@ -0,0 +1,456 @@
//! itsgoin.net web handler — serves shared posts by proxying content through
//! the anchor node. On-demand: connects to the author via QUIC, pulls the post,
//! renders HTML, serves blobs. No permanent storage of fetched content.
//!
//! Routes (behind Apache reverse proxy):
//! GET /p/<postid_hex>/<author_hex> → render post HTML (fetched on-demand)
//! GET /b/<blobid_hex> → serve blob (images/videos)
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tracing::{debug, info, warn};
use crate::http::render_post_html;
use crate::node::Node;
use crate::types::{NodeId, PostId, PostVisibility};
/// Run the web handler on the given port. Blocks forever.
pub async fn run_web_handler(
port: u16,
node: Arc<Node>,
) -> anyhow::Result<()> {
let addr: SocketAddr = ([127, 0, 0, 1], port).into();
let listener = TcpListener::bind(addr).await?;
info!("Web handler listening on {}", addr);
loop {
let (stream, _peer_addr) = match listener.accept().await {
Ok(v) => v,
Err(e) => {
debug!("Web accept error: {}", e);
continue;
}
};
let node = Arc::clone(&node);
tokio::spawn(async move {
handle_web_request(stream, &node).await;
});
}
}
async fn handle_web_request(mut stream: TcpStream, node: &Arc<Node>) {
let mut buf = vec![0u8; 4096];
let n = match tokio::time::timeout(
std::time::Duration::from_secs(5),
stream.read(&mut buf),
)
.await
{
Ok(Ok(0)) | Ok(Err(_)) | Err(_) => return,
Ok(Ok(n)) => n,
};
let request_bytes = &buf[..n];
let (method, path) = match parse_request_line(request_bytes) {
Some(v) => v,
None => return,
};
if method != "GET" {
return;
}
// Extract X-Forwarded-For header (set by Apache reverse proxy)
let browser_ip = extract_header(request_bytes, "x-forwarded-for")
.and_then(|v| v.split(',').next().map(|s| s.trim().to_string()));
if path.starts_with("/p/") {
serve_post(&mut stream, path, node, browser_ip.as_deref()).await;
} else if path.starts_with("/b/") {
serve_blob(&mut stream, path, node).await;
}
}
/// Extract a header value from raw HTTP request bytes (case-insensitive).
fn extract_header<'a>(buf: &'a [u8], name: &str) -> Option<&'a str> {
let text = std::str::from_utf8(buf).ok()?;
let name_lower = name.to_lowercase();
for line in text.split("\r\n") {
if let Some(colon) = line.find(':') {
if line[..colon].to_lowercase() == name_lower {
return Some(line[colon + 1..].trim());
}
}
}
None
}
/// Handle GET /p/<postid_hex>/<author_hex>
///
/// Three-tier serving:
/// 1. Redirect to a CDN holder with a public/punchable HTTP server
/// 2. TCP hole-punch + redirect for EIM NAT holders
/// 3. QUIC proxy fallback (fetch post + render HTML here)
async fn serve_post(stream: &mut TcpStream, path: &str, node: &Arc<Node>, browser_ip: Option<&str>) {
let rest = &path[3..]; // strip "/p/"
// Parse post_id (64 hex chars)
if rest.len() < 64 {
let _ = write_http_response(stream, 404, "text/plain", b"Not found").await;
return;
}
let post_hex = &rest[..64];
if !post_hex.chars().all(|c| c.is_ascii_hexdigit()) {
return;
}
let post_id: PostId = match hex::decode(post_hex) {
Ok(b) if b.len() == 32 => b.try_into().unwrap(),
_ => return,
};
// Parse optional author_id (after the slash)
let author_id: Option<NodeId> = if rest.len() > 65 {
let author_hex = &rest[65..];
if author_hex.len() == 64 && author_hex.chars().all(|c| c.is_ascii_hexdigit()) {
hex::decode(author_hex).ok().and_then(|b| b.try_into().ok())
} else {
None
}
} else {
None
};
// Gather all known holders: author + CDN downstream peers
let (holders, local_post) = {
let store = node.storage.lock().await;
let mut holders = Vec::new();
if let Some(author) = author_id {
holders.push(author);
}
if let Ok(downstream) = store.get_post_downstream(&post_id) {
for peer in downstream {
if !holders.contains(&peer) {
holders.push(peer);
}
}
}
let local = store.get_post_with_visibility(&post_id).ok().flatten();
(holders, local)
};
// --- Tier 1 & 2: Try direct redirect to an HTTP-capable holder ---
if let Some(redirect_url) = try_redirect(node, &holders, &post_id, browser_ip).await {
let header = format!(
"HTTP/1.1 302 Found\r\nLocation: {}\r\nConnection: close\r\n\r\n",
redirect_url
);
let _ = stream.write_all(header.as_bytes()).await;
info!("Web: redirected post {} to {}", post_hex, redirect_url);
return;
}
// --- Tier 3: QUIC proxy fallback ---
// Check local storage first
if let Some((post, visibility)) = local_post {
if matches!(visibility, PostVisibility::Public) {
let author_name = {
let store = node.storage.lock().await;
store.get_profile(&post.author).ok().flatten()
.map(|p| p.display_name).unwrap_or_default()
};
let html = render_post_html(&post, &post_id, &author_name);
let _ = write_http_response(stream, 200, "text/html; charset=utf-8", html.as_bytes()).await;
return;
}
}
// Fetch via content search + PostFetch
let author = author_id.unwrap_or([0u8; 32]);
info!("Web: proxying post {} via QUIC (no redirect candidate found)", post_hex);
let search_result = tokio::time::timeout(
std::time::Duration::from_secs(15),
fetch_post_from_network(node, &author, &post_id),
).await;
match search_result {
Ok(Ok(Some(sync_post))) => {
{
let store = node.storage.lock().await;
let _ = store.store_post_with_visibility(
&sync_post.id, &sync_post.post, &sync_post.visibility,
);
}
let author_name = {
let store = node.storage.lock().await;
store.get_profile(&sync_post.post.author).ok().flatten()
.map(|p| p.display_name).unwrap_or_default()
};
let html = render_post_html(&sync_post.post, &post_id, &author_name);
let _ = write_http_response(stream, 200, "text/html; charset=utf-8", html.as_bytes()).await;
return;
}
Ok(Ok(None)) => {
debug!("Web: post not found via network search");
}
Ok(Err(e)) => {
warn!("Web: network search failed: {}", e);
}
Err(_) => {
warn!("Web: network search timed out (15s)");
}
}
let html = render_unavailable_screen();
let _ = write_http_response(stream, 200, "text/html; charset=utf-8", html.as_bytes()).await;
}
/// Try to redirect to an HTTP-capable holder of this post.
/// Returns a redirect URL (http://ip:port/p/<hex>) if a suitable holder is found.
///
/// Tier 1: Holders with http_capable=true and a known http_addr (publicly reachable).
/// Tier 2: Holders with EIM NAT — send TCP punch request, then redirect.
async fn try_redirect(
node: &Arc<Node>,
holders: &[NodeId],
post_id: &PostId,
browser_ip: Option<&str>,
) -> Option<String> {
use crate::types::NatMapping;
let post_hex = hex::encode(post_id);
let store = node.storage.lock().await;
// Classify holders into tiers
let mut direct_candidates: Vec<(NodeId, String)> = Vec::new(); // http_addr known
let mut punch_candidates: Vec<NodeId> = Vec::new(); // EIM NAT, connected
for holder in holders {
let (capable, addr) = store.get_peer_http_info(holder);
if capable {
if let Some(ref addr) = addr {
direct_candidates.push((*holder, addr.clone()));
continue;
}
}
// Check if this peer has EIM NAT (punchable)
let profile = store.get_peer_nat_profile(holder);
if profile.mapping == NatMapping::EndpointIndependent {
punch_candidates.push(*holder);
}
}
drop(store);
// Tier 1: Direct redirect to a publicly-reachable holder
for (_holder, addr) in &direct_candidates {
// Skip unroutable addresses (0.0.0.0, 127.x, etc.)
if let Some(ip_str) = addr.split(':').next() {
if let Ok(ip) = ip_str.parse::<std::net::IpAddr>() {
if ip.is_unspecified() || ip.is_loopback() {
continue;
}
}
}
// Verify holder is actually connected (likely still alive)
if node.network.is_connected(_holder).await
|| node.network.has_session(_holder).await
{
return Some(format!("http://{}/p/{}", addr, post_hex));
}
}
// Tier 2: TCP punch + redirect for EIM NAT holders
if let Some(browser_ip) = browser_ip {
for holder in &punch_candidates {
// Must be connected to send the punch request
if !node.network.is_connected(holder).await
&& !node.network.has_session(holder).await
{
continue;
}
match node.network.tcp_punch(holder, browser_ip.to_string(), post_id).await {
Ok(Some(http_addr)) => {
info!("Web: TCP punch succeeded for {}, redirecting to {}",
hex::encode(holder), http_addr);
return Some(format!("http://{}/p/{}", http_addr, post_hex));
}
Ok(None) => {
debug!("Web: TCP punch failed for {}", hex::encode(holder));
}
Err(e) => {
debug!("Web: TCP punch error for {}: {}", hex::encode(holder), e);
}
}
}
}
None
}
/// Search the network for a post using extended worm search, then fetch it.
async fn fetch_post_from_network(
node: &Arc<Node>,
author: &NodeId,
post_id: &PostId,
) -> anyhow::Result<Option<crate::protocol::SyncPost>> {
// Step 1: Try direct connect to author + pull (fast path)
if *author != [0u8; 32] {
if let Ok(()) = node.connect_by_node_id(*author).await {
// Try PostFetch directly from author
if let Ok(Some(sp)) = node.network.post_fetch(author, post_id).await {
return Ok(Some(sp));
}
}
}
// Step 2: Content search — worm with post_id
let search = node.network.content_search(author, Some(*post_id), None).await?;
if let Some(result) = search {
// Try the post_holder first, then the found node
let holders: Vec<NodeId> = [result.post_holder, Some(result.node_id)]
.into_iter()
.flatten()
.collect();
for holder in holders {
// Connect if needed
let _ = node.connect_by_node_id(holder).await;
if let Ok(Some(sp)) = node.network.post_fetch(&holder, post_id).await {
return Ok(Some(sp));
}
}
}
Ok(None)
}
/// Handle GET /b/<blobid_hex>
async fn serve_blob(stream: &mut TcpStream, path: &str, node: &Arc<Node>) {
let blob_hex = &path[3..]; // strip "/b/"
if blob_hex.len() != 64 || !blob_hex.chars().all(|c| c.is_ascii_hexdigit()) {
return;
}
let blob_id: [u8; 32] = match hex::decode(blob_hex) {
Ok(b) if b.len() == 32 => b.try_into().unwrap(),
_ => return,
};
// Find which public post owns this blob and get the mime type + author.
// Check blobs table first, then scan post attachments (for posts stored via PostFetch
// which don't populate the blobs table).
let (mime_type, author_id) = {
let store = node.storage.lock().await;
// Try blobs table first
if let Some(mime) = find_public_blob_mime(&store, &blob_id) {
let author = store.get_blob_post_id(&blob_id).ok().flatten().and_then(|pid| {
store.get_post_with_visibility(&pid).ok().flatten().map(|(p, _)| p.author)
});
(mime, author)
} else {
// Scan recent posts for this blob CID in their attachments
match find_blob_in_posts(&store, &blob_id) {
Some((mime, author)) => (mime, Some(author)),
None => return,
}
}
};
// Try local blob store first
if let Ok(Some(data)) = node.blob_store.get(&blob_id) {
let _ = write_http_response(stream, 200, &mime_type, &data).await;
return;
}
// Blob not on disk — fetch from author via BlobRequest
if let Some(author) = author_id {
info!("Web: fetching blob {} from author {}", blob_hex, hex::encode(author));
// Connect to author if needed
let _ = node.connect_by_node_id(author).await;
if let Ok(Some(blob_data)) = node.network.fetch_blob(&blob_id, &author).await {
let _ = write_http_response(stream, 200, &mime_type, &blob_data).await;
return;
}
}
// Not found
let _ = write_http_response(stream, 404, "text/plain", b"Blob not found").await;
}
/// Search post attachments for a blob CID. Returns (mime_type, author).
/// Used when the blobs table doesn't have an entry (e.g. posts stored via PostFetch).
fn find_blob_in_posts(store: &crate::storage::Storage, blob_id: &[u8; 32]) -> Option<(String, NodeId)> {
store.find_blob_in_post_attachments(blob_id).ok()?
}
/// Find a blob's mime type, verifying it belongs to a public post.
fn find_public_blob_mime(store: &crate::storage::Storage, blob_id: &[u8; 32]) -> Option<String> {
let post_id = store.get_blob_post_id(blob_id).ok()??;
let (post, visibility) = store.get_post_with_visibility(&post_id).ok()??;
if !matches!(visibility, PostVisibility::Public) {
return None;
}
for att in &post.attachments {
if att.cid == *blob_id {
return Some(att.mime_type.clone());
}
}
None
}
fn render_unavailable_screen() -> String {
r##"<!DOCTYPE html>
<html lang="en"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>ItsGoin</title>
<style>body{font-family:-apple-system,BlinkMacSystemFont,sans-serif;background:#0d0d0d;color:#e0e0e0;display:flex;justify-content:center;align-items:center;min-height:100vh;margin:0}
.card{background:#1a1a1a;border-radius:16px;padding:2.5rem;max-width:400px;text-align:center}
.hex{font-size:3rem;margin-bottom:1rem}h2{color:#5b8def;margin:0 0 0.5rem;font-size:1.3rem}
p{color:#888;font-size:0.9rem;line-height:1.5;margin:0.5rem 0}
.btn{display:inline-block;padding:0.7rem 1.5rem;border-radius:8px;text-decoration:none;font-weight:600;font-size:0.9rem;margin-top:1rem;background:#5b8def;color:#fff}
.btn:hover{background:#4a7cde}</style></head><body>
<div class="card"><div class="hex">&#x2B21;</div>
<h2>This content isn't currently reachable.</h2>
<p>It may be available again when someone who has it comes back online.</p>
<a class="btn" href="https://itsgoin.com">Install ItsGoin to find it when it resurfaces</a>
</div></body></html>"##.to_string()
}
/// Parse "GET /path HTTP/1.x\r\n..." → ("GET", "/path")
fn parse_request_line(buf: &[u8]) -> Option<(&str, &str)> {
let line_end = buf.iter().position(|&b| b == b'\r' || b == b'\n')?;
let line = std::str::from_utf8(&buf[..line_end]).ok()?;
let mut parts = line.split(' ');
let method = parts.next()?;
let path = parts.next()?;
let version = parts.next()?;
if !version.starts_with("HTTP/") {
return None;
}
Some((method, path))
}
async fn write_http_response(stream: &mut TcpStream, status: u16, content_type: &str, body: &[u8]) -> bool {
let status_text = match status {
200 => "OK",
404 => "Not Found",
_ => "Error",
};
let header = format!(
"HTTP/1.1 {} {}\r\nContent-Type: {}\r\nContent-Length: {}\r\nAccess-Control-Allow-Origin: *\r\nConnection: close\r\n\r\n",
status, status_text, content_type, body.len()
);
if stream.write_all(header.as_bytes()).await.is_err() {
return false;
}
stream.write_all(body).await.is_ok()
}