Init orama vault

This commit is contained in:
anonpenguin23 2026-02-27 06:53:06 +02:00
commit 2dc5dbe0ef
54 changed files with 10498 additions and 0 deletions

7
.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
.zig-cache/
zig-out/
/release/
/debug/
/build/
/build-*/
/docgen_tmp/

46
build.zig Normal file
View File

@ -0,0 +1,46 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
// Main executable
const root_mod = b.createModule(.{
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
const exe = b.addExecutable(.{
.name = "vault-guardian",
.root_module = root_mod,
});
b.installArtifact(exe);
// Run step
const run_cmd = b.addRunArtifact(exe);
run_cmd.step.dependOn(b.getInstallStep());
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("run", "Run the vault guardian");
run_step.dependOn(&run_cmd.step);
// Unit tests
const test_mod = b.createModule(.{
.root_source_file = b.path("src/tests.zig"),
.target = target,
.optimize = optimize,
});
const unit_tests = b.addTest(.{
.root_module = test_mod,
});
const run_unit_tests = b.addRunArtifact(unit_tests);
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_unit_tests.step);
}

12
build.zig.zon Normal file
View File

@ -0,0 +1,12 @@
.{
.name = .orama_vault,
.version = "0.1.0",
.fingerprint = 0xb3eae57574026b46,
.minimum_zig_version = "0.14.0",
.paths = .{
"build.zig",
"build.zig.zon",
"src",
"tests",
},
}

682
docs/API.md Normal file
View File

@ -0,0 +1,682 @@
# Orama Vault -- API Reference
## Base URL
All endpoints are prefixed with `/v1/vault/` (V1) or `/v2/vault/` (V2). The guardian listens on the configured client port (default: **7500**).
```
http://<guardian-ip>:7500/v1/vault/...
http://<guardian-ip>:7500/v2/vault/...
```
In production, the Orama gateway reverse-proxies these endpoints over TLS (port 443). Direct access to port 7500 is only available within the WireGuard overlay network.
> **Note:** TLS termination is not yet implemented in the guardian itself (Phase 3). Currently plain TCP.
---
## V1 Endpoints
### GET /v1/vault/health
Liveness check. Returns immediately with a static response. No authentication required. Used by load balancers and monitoring.
**Request:**
```
GET /v1/vault/health HTTP/1.1
```
**Response (200 OK):**
```json
{
"status": "ok",
"version": "0.1.0"
}
```
This endpoint never returns an error. If the process is running and the TCP listener is accepting connections, it returns 200.
---
### GET /v1/vault/status
Guardian status information. Returns configuration and runtime state. No authentication required.
**Request:**
```
GET /v1/vault/status HTTP/1.1
```
**Response (200 OK):**
```json
{
"status": "ok",
"version": "0.1.0",
"data_dir": "/opt/orama/.orama/data/vault",
"client_port": 7500,
"peer_port": 7501
}
```
---
### GET /v1/vault/guardians
List known guardian nodes. In the current MVP, returns only the local node. Phase 3 will query RQLite for the full cluster list.
**Request:**
```
GET /v1/vault/guardians HTTP/1.1
```
**Response (200 OK):**
```json
{
"guardians": [
{
"address": "0.0.0.0",
"port": 7500
}
],
"threshold": 3,
"total": 1
}
```
| Field | Type | Description |
|-------|------|-------------|
| `guardians` | array | List of known guardian nodes |
| `guardians[].address` | string | Node IP address |
| `guardians[].port` | number | Node client port |
| `threshold` | number | Shamir threshold K (minimum shares to reconstruct) |
| `total` | number | Total known guardians |
---
### POST /v1/vault/push
Store an encrypted share for a user. The client has already performed the Shamir split locally and sends one share to each guardian.
**Request:**
```
POST /v1/vault/push HTTP/1.1
Content-Type: application/json
Content-Length: <length>
{
"identity": "<64 hex chars>",
"share": "<base64-encoded share data>",
"version": <uint64>
}
```
| Field | Type | Required | Constraints |
|-------|------|----------|-------------|
| `identity` | string | yes | Exactly 64 lowercase hex characters (SHA-256 of user identity) |
| `share` | string | yes | Base64-encoded encrypted share data. Decoded size must be > 0 and <= 512 KiB |
| `version` | number | yes | Unsigned 64-bit integer. Must be strictly greater than the currently stored version (monotonic counter) |
**Success Response (200 OK):**
```json
{
"status": "stored"
}
```
**Error Responses:**
| Status | Body | Condition |
|--------|------|-----------|
| 400 | `{"error":"empty body"}` | Request body is empty |
| 400 | `{"error":"request body too large"}` | Body exceeds 1 MiB |
| 400 | `{"error":"missing identity field"}` | `identity` field not found in JSON |
| 400 | `{"error":"missing share field"}` | `share` field not found in JSON |
| 400 | `{"error":"identity must be exactly 64 hex characters"}` | Identity is not 64 chars |
| 400 | `{"error":"identity must be hex"}` | Identity contains non-hex characters |
| 400 | `{"error":"invalid base64 in share"}` | Share data is not valid base64 |
| 400 | `{"error":"share data too large"}` | Decoded share exceeds 512 KiB |
| 400 | `{"error":"share data is empty"}` | Decoded share is 0 bytes |
| 400 | `{"error":"missing or invalid version field"}` | `version` field missing or not a valid unsigned integer |
| 400 | `{"error":"version must be greater than current stored version"}` | Anti-rollback: version <= stored version |
| 405 | `{"error":"method not allowed"}` | Non-POST method used |
| 500 | `{"error":"internal server error"}` | Disk write failure or allocation error |
**Storage Behavior:**
1. Share data is written atomically: first to `share.bin.tmp`, then renamed to `share.bin`.
2. Version counter is written atomically: first to `version.tmp`, then renamed to `version`.
3. Anti-rollback: if a version file exists for this identity, the new version must be strictly greater. Equal versions are also rejected.
4. Directory path: `<data_dir>/shares/<identity>/share.bin`
**Size Limits:**
| Limit | Value |
|-------|-------|
| Max request body | 1 MiB (1,048,576 bytes) |
| Max decoded share | 512 KiB (524,288 bytes) |
| Identity length | Exactly 64 hex characters |
| Max version value | 2^64 - 1 |
---
### POST /v1/vault/pull
Retrieve an encrypted share for a user. The client contacts multiple guardians to collect K shares for reconstruction.
**Request:**
```
POST /v1/vault/pull HTTP/1.1
Content-Type: application/json
Content-Length: <length>
{
"identity": "<64 hex chars>"
}
```
| Field | Type | Required | Constraints |
|-------|------|----------|-------------|
| `identity` | string | yes | Exactly 64 lowercase hex characters |
**Success Response (200 OK):**
```json
{
"share": "<base64-encoded share data>"
}
```
**Error Responses:**
| Status | Body | Condition |
|--------|------|-----------|
| 400 | `{"error":"empty body"}` | Request body is empty |
| 400 | `{"error":"request body too large"}` | Body exceeds 4 KiB |
| 400 | `{"error":"missing identity field"}` | `identity` not found |
| 400 | `{"error":"identity must be exactly 64 hex characters"}` | Wrong length |
| 400 | `{"error":"identity must be hex"}` | Non-hex characters |
| 404 | `{"error":"share not found"}` | No share stored for this identity |
| 405 | `{"error":"method not allowed"}` | Non-POST method used |
| 500 | `{"error":"internal server error"}` | Disk read failure |
**Size Limits:**
| Limit | Value |
|-------|-------|
| Max request body | 4 KiB (4,096 bytes) |
| Max share read | 1 MiB (1,048,576 bytes) |
---
### POST /v1/vault/auth/challenge
Request a challenge nonce to begin authentication.
**Request:**
```
POST /v1/vault/auth/challenge HTTP/1.1
Content-Type: application/json
{
"identity": "<64 hex chars>"
}
```
**Response (200 OK):**
```json
{
"nonce": "<base64 32 bytes>",
"created_ns": <i128>,
"tag": "<base64 32 bytes>",
"expires_in_seconds": 60
}
```
The client must return this exact challenge (nonce + created_ns + tag) along with their identity within 60 seconds.
---
### POST /v1/vault/auth/session
Exchange a verified challenge for a session token.
**Request:**
```
POST /v1/vault/auth/session HTTP/1.1
Content-Type: application/json
{
"identity": "<64 hex chars>",
"nonce": "<base64 32 bytes>",
"created_ns": <i128>,
"tag": "<base64 32 bytes>"
}
```
**Response (200 OK):**
```json
{
"session_token": "<base64-encoded token>",
"expires_in_seconds": 3600
}
```
The session token is valid for 1 hour. It should be included in subsequent requests as a Bearer token in the Authorization header.
---
## V1 Authentication Flow
The authentication flow is challenge-response:
```
Client Guardian
| |
| POST /v1/vault/auth/challenge |
| {"identity":"<hex>"} |
|----------------------------------->|
| | Generate 32-byte random nonce
| | HMAC(server_secret, identity || nonce || timestamp)
| {"nonce":"..","tag":".."} |
|<-----------------------------------|
| |
| POST /v1/vault/auth/session |
| {"identity":"..","nonce":"..","tag":".."}
|----------------------------------->|
| | Verify HMAC tag
| | Check nonce not expired (60s)
| {"session_token":".."} | Issue HMAC-based session token (1h)
|<-----------------------------------|
| |
| POST /v1/vault/push |
| Authorization: Bearer <token> |
|----------------------------------->|
| | Verify session token
| | Process request
```
Key properties:
- Challenge expires in 60 seconds.
- Session tokens expire in 1 hour.
- All HMAC verifications use constant-time comparison to prevent timing attacks.
- Server secret is generated randomly at startup (not persisted -- sessions invalidate on restart).
- Phase 3 adds Ed25519 signature verification for true public-key authentication.
---
## V2 Endpoints
V2 introduces a generic secrets API. Instead of storing a single anonymous share per identity, V2 allows multiple named secrets per identity with full CRUD operations.
All V2 secrets endpoints require mandatory session authentication via the `X-Session-Token` header. The identity is extracted from the session token -- it is never passed in the request body. Authenticate first using the V2 auth endpoints below.
### POST /v2/vault/auth/challenge
Request a challenge nonce to begin authentication. Same protocol as V1 auth/challenge.
**Request:**
```
POST /v2/vault/auth/challenge HTTP/1.1
Content-Type: application/json
{
"identity": "<64 hex chars>"
}
```
**Response (200 OK):**
```json
{
"nonce": "<base64 32 bytes>",
"created_ns": <i128>,
"tag": "<base64 32 bytes>",
"expires_in_seconds": 60
}
```
---
### POST /v2/vault/auth/session
Exchange a verified challenge for a session token. Same protocol as V1 auth/session.
**Request:**
```
POST /v2/vault/auth/session HTTP/1.1
Content-Type: application/json
{
"identity": "<64 hex chars>",
"nonce": "<base64 32 bytes>",
"created_ns": <i128>,
"tag": "<base64 32 bytes>"
}
```
**Response (200 OK):**
```json
{
"session_token": "<base64-encoded token>",
"expires_in_seconds": 3600
}
```
The session token is valid for 1 hour. Include it in all subsequent V2 requests as the `X-Session-Token` header.
---
### PUT /v2/vault/secrets/{name}
Store a named secret. Requires session authentication. The identity is extracted from the session token.
**Request:**
```
PUT /v2/vault/secrets/my-api-key HTTP/1.1
Content-Type: application/json
X-Session-Token: <session_token>
{
"share": "<base64-encoded secret data>",
"version": <u64>
}
```
| Field | Type | Required | Constraints |
|-------|------|----------|-------------|
| `name` (URL path) | string | yes | Alphanumeric, `_`, `-`. Max 128 characters |
| `share` | string | yes | Base64-encoded data. Decoded size must be > 0 and <= 512 KiB |
| `version` | number | yes | Unsigned 64-bit integer. Must be strictly greater than the currently stored version (anti-rollback) |
**Success Response (200 OK):**
```json
{
"status": "stored",
"name": "my-api-key",
"version": 1
}
```
**Error Responses:**
| Status | Body | Condition |
|--------|------|-----------|
| 400 | `{"error":"empty body"}` | Request body is empty |
| 400 | `{"error":"invalid secret name"}` | Name contains disallowed characters or exceeds 128 chars |
| 400 | `{"error":"missing share field"}` | `share` not found in JSON |
| 400 | `{"error":"invalid base64 in share"}` | Share is not valid base64 |
| 400 | `{"error":"share data too large"}` | Decoded share exceeds 512 KiB |
| 400 | `{"error":"share data is empty"}` | Decoded share is 0 bytes |
| 400 | `{"error":"missing or invalid version field"}` | `version` missing or invalid |
| 400 | `{"error":"version must be greater than current stored version"}` | Anti-rollback: version <= stored version |
| 401 | `{"error":"missing session token"}` | `X-Session-Token` header not provided |
| 401 | `{"error":"invalid session token"}` | Token is malformed or expired |
| 409 | `{"error":"too many secrets"}` | Identity has reached the 1000 secret limit |
| 500 | `{"error":"internal server error"}` | Disk write failure |
**Storage Layout:**
```
<data_dir>/vaults/<identity_hex>/<secret_name>/
share.bin -- Encrypted share data
checksum.bin -- HMAC-SHA256 integrity checksum
meta.json -- {"version":1,"created_ns":...,"updated_ns":...,"size":123}
```
**Limits:**
| Limit | Value |
|-------|-------|
| Max secrets per identity | 1000 |
| Max decoded share size | 512 KiB (524,288 bytes) |
| Max secret name length | 128 characters |
| Secret name charset | `[a-zA-Z0-9_-]` |
| Max version value | 2^64 - 1 |
---
### GET /v2/vault/secrets/{name}
Retrieve a named secret. Requires session authentication. The identity is extracted from the session token.
**Request:**
```
GET /v2/vault/secrets/my-api-key HTTP/1.1
X-Session-Token: <session_token>
```
**Success Response (200 OK):**
```json
{
"share": "<base64-encoded secret data>",
"name": "my-api-key",
"version": 1,
"created_ns": 1700000000000000000,
"updated_ns": 1700000000000000000
}
```
| Field | Type | Description |
|-------|------|-------------|
| `share` | string | Base64-encoded secret data |
| `name` | string | Secret name |
| `version` | number | Current version |
| `created_ns` | number | Creation timestamp in nanoseconds |
| `updated_ns` | number | Last update timestamp in nanoseconds |
**Error Responses:**
| Status | Body | Condition |
|--------|------|-----------|
| 401 | `{"error":"missing session token"}` | `X-Session-Token` header not provided |
| 401 | `{"error":"invalid session token"}` | Token is malformed or expired |
| 404 | `{"error":"secret not found"}` | No secret with this name for this identity |
| 500 | `{"error":"internal server error"}` | Disk read failure |
---
### DELETE /v2/vault/secrets/{name}
Delete a named secret. Requires session authentication. The identity is extracted from the session token.
**Request:**
```
DELETE /v2/vault/secrets/my-api-key HTTP/1.1
X-Session-Token: <session_token>
```
**Success Response (200 OK):**
```json
{
"status": "deleted",
"name": "my-api-key"
}
```
**Error Responses:**
| Status | Body | Condition |
|--------|------|-----------|
| 401 | `{"error":"missing session token"}` | `X-Session-Token` header not provided |
| 401 | `{"error":"invalid session token"}` | Token is malformed or expired |
| 404 | `{"error":"secret not found"}` | No secret with this name for this identity |
| 500 | `{"error":"internal server error"}` | Disk delete failure |
---
### GET /v2/vault/secrets
List all secrets for the authenticated identity. Requires session authentication. The identity is extracted from the session token.
**Request:**
```
GET /v2/vault/secrets HTTP/1.1
X-Session-Token: <session_token>
```
**Success Response (200 OK):**
```json
{
"secrets": [
{
"name": "my-api-key",
"version": 1,
"size": 256
},
{
"name": "db-password",
"version": 3,
"size": 48
}
]
}
```
| Field | Type | Description |
|-------|------|-------------|
| `secrets` | array | List of secret metadata entries |
| `secrets[].name` | string | Secret name |
| `secrets[].version` | number | Current version |
| `secrets[].size` | number | Size of stored data in bytes |
**Error Responses:**
| Status | Body | Condition |
|--------|------|-----------|
| 401 | `{"error":"missing session token"}` | `X-Session-Token` header not provided |
| 401 | `{"error":"invalid session token"}` | Token is malformed or expired |
| 500 | `{"error":"internal server error"}` | Disk read failure |
If no secrets exist for the identity, returns an empty array: `{"secrets":[]}`.
---
## V2 Authentication Flow
The V2 authentication flow is identical to V1 but uses V2 path prefixes and the `X-Session-Token` header:
```
Client Guardian
| |
| POST /v2/vault/auth/challenge |
| {"identity":"<hex>"} |
|----------------------------------->|
| | Generate 32-byte random nonce
| | HMAC(server_secret, identity || nonce || timestamp)
| {"nonce":"..","tag":".."} |
|<-----------------------------------|
| |
| POST /v2/vault/auth/session |
| {"identity":"..","nonce":"..","tag":".."}
|----------------------------------->|
| | Verify HMAC tag
| | Check nonce not expired (60s)
| {"session_token":".."} | Issue HMAC-based session token (1h)
|<-----------------------------------|
| |
| PUT /v2/vault/secrets/my-key |
| X-Session-Token: <token> |
| {"share":"..","version":1} |
|----------------------------------->|
| | Verify session token
| | Extract identity from token
| {"status":"stored",...} | Store secret under identity
|<-----------------------------------|
```
Key differences from V1:
- Session token is sent via `X-Session-Token` header (not Authorization Bearer).
- Identity is extracted from the session token, not from the request body.
- All V2 secrets endpoints require authentication (mandatory, not optional).
---
## Error Response Format
All error responses use a consistent JSON format:
```json
{
"error": "<human-readable error message>"
}
```
Standard HTTP status codes:
| Code | Meaning |
|------|---------|
| 200 | Success |
| 400 | Client error (bad request, validation failure) |
| 401 | Authentication required or token invalid |
| 404 | Resource not found (share/secret not found, unknown endpoint) |
| 405 | Method not allowed |
| 409 | Conflict (e.g., too many secrets) |
| 500 | Internal server error |
All responses include `Connection: close` and `Content-Type: application/json` headers.
---
## Rate Limiting (not yet implemented)
> **Status:** Rate limiting is planned for Phase 3. Currently there are no request rate limits.
Planned behavior:
- Per-identity rate limiting on push/pull endpoints.
- Health and status endpoints exempt from rate limiting.
- Rate limit headers in responses (`X-RateLimit-Limit`, `X-RateLimit-Remaining`).
---
## Request Size Limits
| Endpoint | Max Body Size |
|----------|---------------|
| `/v1/vault/push` | 1 MiB |
| `/v1/vault/pull` | 4 KiB |
| `/v2/vault/secrets/{name}` (PUT) | 1 MiB |
| All others | 64 KiB (read buffer size) |
The HTTP listener uses a 64 KiB read buffer. Requests larger than this may be truncated. The push handler and V2 PUT handler have an explicit 1 MiB limit check before processing.
---
## Peer Protocol (Port 7501)
The guardian-to-guardian protocol is a binary TCP protocol, **not** HTTP. It runs on port 7501 and is restricted to the WireGuard overlay network (10.0.0.x).
### Wire Format
```
[version:1 byte][msg_type:1 byte][payload_length:4 bytes big-endian][payload:N bytes]
```
- **version**: Protocol version (currently 1). Messages with wrong version are silently dropped.
- **msg_type**: One of the defined message types.
- **payload_length**: 32-bit big-endian unsigned integer. Maximum 1 MiB.
### Message Types
| Code | Name | Direction | Payload Size |
|------|------|-----------|--------------|
| 0x01 | heartbeat | initiator -> peer | 18 bytes |
| 0x02 | heartbeat_ack | peer -> initiator | 0 bytes |
| 0x03 | verify_request | initiator -> peer | 65 bytes |
| 0x04 | verify_response | peer -> initiator | 98 bytes |
| 0x05 | repair_offer | leader -> all | (Phase 2) |
| 0x06 | repair_accept | peer -> leader | (Phase 2) |
### Heartbeat Payload (18 bytes)
```
[sender_ip:4 bytes][sender_port:2 bytes BE][share_count:4 bytes BE][timestamp:8 bytes BE]
```
### Verify Request Payload (65 bytes)
```
[identity:64 bytes][identity_len:1 byte]
```
### Verify Response Payload (98 bytes)
```
[identity:64 bytes][identity_len:1 byte][has_share:1 byte (0/1)][commitment_root:32 bytes SHA-256]
```

350
docs/ARCHITECTURE.md Normal file
View File

@ -0,0 +1,350 @@
# Orama Vault -- Architecture
## What is Orama Vault?
Orama Vault is a distributed secrets store. It runs as a guardian daemon (`vault-guardian`) on every node in the Orama Network, similar to how IPFS nodes run on every machine. Clients can store any sensitive data -- API keys, database passwords, SSH keys, crypto seeds, wallet recovery shares, or arbitrary encrypted blobs. The client splits each secret into Shamir shares and pushes one share to each guardian. To retrieve, the client pulls shares from K guardians and reconstructs the original secret via Lagrange interpolation.
The system provides information-theoretic security: compromising fewer than K guardians reveals zero information about the original secret. This is not computational security -- it is mathematically impossible to learn anything from K-1 shares, regardless of computing power.
## How It Fits Into Orama Network
```
Orama Network Node
+------------------------------------------------------------+
| orama-gateway (port 443) |
| | |
| +-- reverse-proxy --> vault-guardian (port 7500, client) |
| vault-guardian (port 7501, peer) |
| |
| RQLite (port 4001) -- cluster membership source of truth |
| Olric (port 10003) -- distributed cache |
| WireGuard (10.0.0.x) -- encrypted overlay network |
+------------------------------------------------------------+
```
Every Orama node runs a `vault-guardian` process alongside the gateway, RQLite, and Olric. The guardian:
- Listens on **port 7500** for client HTTP requests (V1 push/pull shares, V2 CRUD secrets).
- Listens on **port 7501** for guardian-to-guardian binary protocol (heartbeat, verify, repair), restricted to the WireGuard overlay network.
- Discovers peers via RQLite (the cluster's membership source of truth).
## Data Flow
### V1 Push (Backup)
```
Client Guardian-1 Guardian-2 Guardian-N
| | | |
|-- POST /v1/vault/push --> [store share.bin] | |
|-- POST /v1/vault/push ------------------> [store share.bin] |
|-- POST /v1/vault/push -------------------------------------> [store]
| | | |
|<--- {"status":"stored"} ---| | |
```
1. Client generates encrypted key material (DEK-wrapped secret, KEK1/KEK2 wrapped DEKs).
2. Client runs Shamir split locally: secret -> N shares with threshold K.
3. Client pushes each share to a different guardian via `POST /v1/vault/push`.
4. Each guardian stores the share atomically to disk (temp file + rename).
5. Each guardian writes a monotonic version counter for anti-rollback protection.
### V1 Pull (Recovery)
```
Client Guardian-1 Guardian-2 Guardian-K
| | | |
|-- POST /v1/vault/pull --> [read share.bin] | |
|<-- {"share":"<base64>"} ---| | |
|-- POST /v1/vault/pull ------------------> [read share.bin] |
|<-- {"share":"<base64>"} ------------------| |
| ...collect K shares... |
| |
[Lagrange interpolation at x=0 -> reconstruct secret]
```
1. Client contacts guardians and requests its share via `POST /v1/vault/pull`.
2. Each guardian reads the share from disk and returns it base64-encoded.
3. Client collects at least K shares and reconstructs the secret via Lagrange interpolation over GF(2^8).
### V2 Store (Named Secret)
```
Client Guardian-1 Guardian-2 Guardian-N
| | | |
|-- auth/challenge -------->| | |
|<-- {nonce,tag} -----------| | |
|-- auth/session ---------->| | |
|<-- {session_token} -------| | |
| | | |
| [Shamir split: secret -> N shares] |
| | | |
|-- PUT /v2/vault/secrets/my-key ------------> [store share] |
| X-Session-Token: <tok> | | |
|-- PUT /v2/vault/secrets/my-key --> [store] | |
|-- PUT /v2/vault/secrets/my-key -----------------------------> [store]
| | | |
|<-- {"status":"stored"} ---| | |
```
1. Client authenticates with each guardian (challenge-response).
2. Client runs Shamir split locally: secret -> N shares with threshold K.
3. Client pushes each share to a different guardian via `PUT /v2/vault/secrets/{name}` with `X-Session-Token` header.
4. Each guardian extracts identity from the session token and stores the share under `<data_dir>/vaults/<identity>/<name>/`.
5. Anti-rollback: version must be strictly greater than the stored version.
### V2 Retrieve (Named Secret)
```
Client Guardian-1 Guardian-2 Guardian-K
| | | |
|-- GET /v2/vault/secrets/my-key --> [read] | |
|<-- {"share":"<b64>"} ------| | |
|-- GET /v2/vault/secrets/my-key ------------> [read] |
|<-- {"share":"<b64>"} -----------------------| |
| ...collect K shares... |
| |
[Lagrange interpolation at x=0 -> reconstruct secret]
```
## Component Diagram
```
+------------------------------------------------------------------+
| vault-guardian |
| |
| +------------------+ +------------------+ |
| | HTTP Server | | Peer Protocol | |
| | (port 7500) | | (port 7501) | |
| | | | | |
| | /v1/vault/health | | heartbeat (5s) | |
| | /v1/vault/status | | verify_request | |
| | /v1/vault/guard. | | verify_response | |
| | /v1/vault/push | | repair_offer | |
| | /v1/vault/pull | | repair_accept | |
| | /v1/vault/auth/* | +--------+---------+ |
| | | | |
| | /v2/vault/auth/* | +--------+---------+ |
| | /v2/vault/secrets| | Heartbeat Mgr | |
| +--------+---------+ | (heartbeat.zig) | |
| | +--------+---------+ |
| +--------+---------+ | |
| | Router | | |
| | (router.zig) | | |
| +--------+---------+ | |
| | | |
| +--------+------------------------+---------+ |
| | Guardian Struct | |
| | (guardian.zig) | |
| | server_secret, node_list, share_count | |
| +---+----------+-----------+--------+-------+ |
| | | | | |
| +---+---+ +---+----+ +----+---+ +--+--------+ |
| | Auth | | Storage| | SSS | |Membership | |
| | | | | | Core | | | |
| |chall. | |file_ | |field | |node_list | |
| |session| |store | |poly | |discovery | |
| +-------+ |vault_ | |split | |quorum | |
| |store | |combine | +-----------+ |
| +---+----+ |commit. | |
| | |reshare | |
| +-------------+ +--------+ |
| | Crypto | |
| | | |
| | aes (GCM) | |
| | hmac | |
| | hkdf | |
| | secure_mem | |
| | pq_kem * | * = stub, Phase 2 |
| | pq_sig * | |
| | hybrid * | |
| +-------------+ |
+------------------------------------------------------------------+
```
## Key Design Decisions
### Why File-Per-User Storage (Not a Database)
Each user's share is stored as a plain file. In V1 the layout is `<data_dir>/shares/<identity_hash>/share.bin`. In V2 the layout is `<data_dir>/vaults/<identity_hex>/<secret_name>/`. This design was chosen because:
1. **No external dependencies.** The guardian binary is fully self-contained. No PostgreSQL, SQLite, or RQLite dependency for storage.
2. **Atomic writes.** The write-to-temp + rename pattern guarantees that a share is either fully written or not at all. No partial writes, no journal corruption.
3. **Simple backup.** The entire data directory can be backed up with rsync or tar.
4. **Predictable performance.** No query planning, no lock contention, no WAL growth. Each operation is a single file read or write.
5. **Natural sharding.** Files are already sharded by identity hash. No rebalancing needed.
### Storage Layouts
**V1 (Single Share per Identity):**
```
<data_dir>/shares/<identity_hash_hex>/
share.bin -- Raw encrypted share data
share.bin.tmp -- Temp file during atomic write
version -- Monotonic version counter (anti-rollback)
checksum.bin -- HMAC-SHA256 integrity checksum
meta.json -- Share metadata (Phase 2)
wrapped_dek1.bin -- KEK1-wrapped DEK (Phase 2)
wrapped_dek2.bin -- KEK2-wrapped DEK (Phase 2)
```
**V2 (Generic Secrets):**
```
<data_dir>/vaults/<identity_hex>/<secret_name>/
share.bin -- Encrypted share data
checksum.bin -- HMAC-SHA256 integrity checksum
meta.json -- {"version":1,"created_ns":...,"updated_ns":...,"size":123}
```
V2 supports up to 1000 named secrets per identity, each up to 512 KiB. Secret names are restricted to `[a-zA-Z0-9_-]` and max 128 characters.
### Why GF(2^8)
Shamir's Secret Sharing operates over a finite field. We use GF(2^8) -- the Galois field with 256 elements -- because:
1. **Byte-aligned.** Each field element is exactly one byte. No encoding overhead, no bignum arithmetic.
2. **Same field as AES.** GF(2^8) with irreducible polynomial x^8 + x^4 + x^3 + x + 1 (0x11B) is the same field used by AES. Well-studied, well-understood.
3. **Fast arithmetic.** Precomputed log/exp tables (generated at comptime in Zig) give O(1) multiplication, inversion, and division with zero runtime cost.
4. **255 nonzero elements.** Supports up to 255 shares (evaluation points x=1..255), which is more than sufficient for the Orama network (up to ~100 nodes per environment).
Addition and subtraction in GF(2^8) are both XOR. Multiplication uses log/exp table lookups. Division uses `a / b = a * inv(b)` where `inv(a) = exp[255 - log[a]]`.
### Why All-Node Replication
Every guardian stores one share per user. In a 14-node cluster, each user has 14 shares with an adaptive threshold K = max(3, floor(N/3)). This means:
- With 14 nodes: K=4, so any 4 guardians can reconstruct.
- With 100 nodes: K=33, so any 33 guardians can reconstruct.
- Up to N-K nodes can be completely destroyed before data is lost.
All-node replication was chosen because:
1. **Maximum fault tolerance.** The more shares that exist, the more nodes can fail.
2. **Simple push logic.** Client pushes to all nodes, no routing or placement decisions.
3. **Low per-share cost.** Each share is the same size as the original secret (~1KB typically). Even at 100 nodes, total storage per user is ~100KB.
## Guardian Lifecycle
### Startup
1. Parse CLI arguments and load config from `vault.yaml` (or defaults).
2. Ensure data directory exists (`<data_dir>/shares/`, `<data_dir>/vaults/`).
3. Generate a random 32-byte server secret (for HMAC-based auth).
4. Attempt to fetch node list from RQLite; fall back to single-node mode.
5. Mark self as alive in the node list.
6. Count existing shares on disk.
7. Start HTTP server on port 7500 (blocks in accept loop).
### Heartbeat
The peer protocol runs on port 7501 (WireGuard-only). Every 5 seconds, each guardian sends a heartbeat to all known peers. The heartbeat includes:
- Sender IP (4 bytes, WireGuard address)
- Sender port (2 bytes)
- Share count (4 bytes)
- Timestamp (8 bytes, Unix seconds)
Peer state transitions:
```
5s heartbeat received
unknown --------------------------> alive
alive --- no heartbeat for 15s -> suspect
suspect --- no heartbeat for 60s -> dead
dead --- heartbeat received ---> alive
```
### Verify
Periodic verification ensures share integrity across guardians. A guardian:
1. Selects a share and computes its SHA-256 hash (commitment root).
2. Sends a `verify_request` to a peer with the identity hash.
3. The peer reads its copy, computes SHA-256, and sends a `verify_response`.
4. The initiator compares commitment roots. Mismatch indicates tampering or corruption.
### Repair (Proactive Re-sharing)
When the cluster topology changes (node join/leave) or every 24 hours, the repair protocol refreshes all shares using the Herzberg-Jarecki-Krawczyk-Yung protocol:
1. Leader broadcasts `repair_offer` to all guardians.
2. Each guardian generates a random polynomial q_i(x) of degree K-1 with q_i(0)=0.
3. Guardian i sends q_i(j) to guardian j for all j.
4. Each guardian computes: `new_share = old_share + sum(received deltas)` over GF(2^8).
5. Guardians exchange new Merkle commitments to verify consistency.
The secret is preserved because `sum(q_i(0)) = 0`. Old shares become algebraically independent from new shares, so compromising old shares provides zero information about the current secret.
Repair triggers:
- Node topology change detected (join or departure)
- Periodic timer (every 24 hours)
- Manual admin trigger (Phase 2)
Safety requirement: at least 3 alive guardians to initiate repair.
### Shutdown
On shutdown, the server secret is securely zeroed (`@memset(&self.server_secret, 0)`). Share data on disk persists across restarts.
## Recovery Paths
### Path A: Mnemonic Recovery
The user has their BIP-39 mnemonic phrase. They derive the root seed locally and use it to decrypt the vault contents. No guardian interaction needed -- the guardians only store encrypted shares as an additional backup.
### Path B: Username + Passphrase Recovery
The user does not have their mnemonic but remembers their username and passphrase. The recovery flow:
1. Client derives identity hash from username (SHA-256 of identity).
2. Client contacts guardians and pulls K shares via `POST /v1/vault/pull`.
3. Client reconstructs the encrypted blob via Lagrange interpolation.
4. Client derives the decryption key from the passphrase (via HKDF).
5. Client decrypts the blob to recover the root seed/mnemonic.
This path depends on the key wrapping scheme (DEK encrypted by KEK1 from mnemonic, KEK2 from passphrase). The dual-KEK design ensures either recovery path works independently.
## Protocol Versions
- **Protocol version 1** (current): 6-byte wire header `[version:1][type:1][length:4]`, big-endian length, TCP transport on port 7501.
- **Binary v0.1.0**: MVP with single-threaded HTTP server, file-per-user storage, HMAC-based auth, stub post-quantum crypto.
## What Is Implemented vs. Planned
| Component | Status | Notes |
|-----------|--------|-------|
| SSS field arithmetic (GF(2^8)) | Complete | Exhaustive test coverage |
| SSS split/combine | Complete | Verified across all C(N,K) subsets |
| SSS reshare (Herzberg protocol) | Complete | Unit tested, not yet wired to peer protocol |
| Merkle commitments | Complete | Build, prove, verify all tested |
| AES-256-GCM encryption | Complete | Round-trip, tamper, wrong-key tests |
| HMAC-SHA256 integrity | Complete | Constant-time verification |
| HKDF-SHA256 key derivation | Complete | Cross-platform test vectors |
| Secure memory (mlock, secureZero) | Complete | Linux mlock, volatile zero |
| File-per-user storage (V1) | Complete | Atomic writes, HMAC integrity |
| V2 multi-secret storage engine | Complete | Named secrets, per-identity vaults |
| V2 CRUD HTTP handlers | Complete | PUT, GET, DELETE, LIST |
| V1-to-V2 migration tool | Complete | Migrates V1 shares into V2 layout |
| HTTP server (push/pull/health/status) | Complete | Single-threaded MVP |
| Peer binary protocol | Complete | Encode/decode with tests |
| Heartbeat state machine | Complete | alive/suspect/dead transitions |
| Peer verify protocol | Complete | Commitment comparison |
| Repair round state machine | Complete | Timeout, delta tracking |
| Node list / discovery | Complete | Static + RQLite (RQLite fetch is stub) |
| Quorum logic | Complete | Write quorum W=ceil(2N/3), read quorum K |
| Challenge-response auth | Complete | HMAC-based, 60s expiry, wired to router |
| Session tokens | Complete | HMAC-based, 1h expiry, wired to router |
| Auth enforcement on V2 | Complete | Mandatory session auth on all V2 secrets endpoints |
| Config file parsing | Stub | Returns defaults, YAML parsing Phase 2 |
| RQLite node discovery | Stub | Returns empty list, HTTP fetch Phase 2 |
| Post-quantum KEM (ML-KEM-768) | Stub | Interface only, random bytes |
| Post-quantum signatures (ML-DSA-65) | Stub | Interface only, verify always succeeds |
| Hybrid key exchange (X25519 + ML-KEM) | Partial | X25519 works, ML-KEM is stub |
| Multi-threaded HTTP server | Not started | Phase 3 |
| TLS termination | Not started | Phase 3, currently plain TCP |
| Auth enforcement on V1 push/pull | Not started | Auth module exists but not wired to V1 handlers |
| Peer heartbeat loop | Not started | State machine exists, loop not wired |
| Peer repair orchestration | Not started | State machine exists, coordination not wired |
| Rate limiting | Not started | Phase 3 |
| Admin API | Not started | Phase 3 |

305
docs/DEPLOYMENT.md Normal file
View File

@ -0,0 +1,305 @@
# Orama Vault -- Deployment Guide
## Prerequisites
- **Zig 0.14.0+** (specified in `build.zig.zon` as `minimum_zig_version`). Zig 0.15+ recommended for the latest std library improvements.
- **Linux x86_64** target for production nodes.
- **macOS (arm64 or x86_64)** for local development and testing.
No external dependencies. The guardian is a single static binary with no runtime library requirements (musl libc, statically linked).
---
## Building
### Production Build (Linux target)
From the project root:
```bash
zig build -Dtarget=x86_64-linux-musl -Doptimize=ReleaseSafe
```
This produces:
```
zig-out/bin/vault-guardian
```
The binary is statically linked against musl libc, so it runs on any Linux x86_64 system regardless of glibc version.
**Build options:**
| Flag | Description |
|------|-------------|
| `-Dtarget=x86_64-linux-musl` | Cross-compile for Linux x86_64 with musl (static) |
| `-Doptimize=ReleaseSafe` | Optimized with safety checks (recommended for production) |
| `-Doptimize=ReleaseFast` | Maximum optimization, no safety checks |
| `-Doptimize=Debug` | Debug build with full debug info |
### Development Build (native)
```bash
zig build
```
Produces a debug binary for the host platform at `zig-out/bin/vault-guardian`.
### Running Tests
```bash
zig build test
```
Runs all unit tests across every module (SSS, crypto, storage, auth, membership, peer protocol, server). The test entry point is `src/tests.zig`, which imports all test modules via comptime.
### Run Locally
```bash
zig build run -- --data-dir /tmp/vault-test --port 7500 --bind 127.0.0.1
```
Or after building:
```bash
./zig-out/bin/vault-guardian --data-dir /tmp/vault-test --port 7500 --bind 127.0.0.1
```
---
## CLI Arguments
```
Usage: vault-guardian [OPTIONS]
Orama Vault Guardian -- distributed secret share storage
Options:
--config <path> Path to config file (default: /opt/orama/.orama/data/vault/vault.yaml)
--data-dir <path> Override data directory
--port <port> Override client port (default: 7500)
--bind <addr> Override bind address (default: 0.0.0.0)
--help, -h Show this help
--version, -v Show version
```
CLI arguments override config file values.
---
## Configuration
The guardian reads a config file from `--config` (default: `/opt/orama/.orama/data/vault/vault.yaml`).
> **Note:** YAML parsing is not yet implemented (Phase 2). The config loader currently returns defaults regardless of file contents. All configuration must be done via CLI arguments for now.
### Config Fields
| Field | Default | Description |
|-------|---------|-------------|
| `listen_address` | `0.0.0.0` | Address to bind both client and peer listeners |
| `client_port` | `7500` | Client-facing HTTP port |
| `peer_port` | `7501` | Guardian-to-guardian binary protocol port |
| `data_dir` | `/opt/orama/.orama/data/vault` | Directory for share storage |
| `rqlite_url` | `http://127.0.0.1:4001` | RQLite endpoint for node discovery |
---
## Data Directory Setup
The guardian creates the data directory on startup if it does not exist. The directory structure:
```
/opt/orama/.orama/data/vault/
shares/
<identity_hash_hex>/
share.bin -- Encrypted share data
version -- Monotonic version counter
checksum.bin -- HMAC-SHA256 integrity checksum
```
### Permissions
The data directory must be writable by the guardian process. With the systemd service, only `/opt/orama/.orama/data/vault` is writable (`ReadWritePaths`).
```bash
# Create data directory
sudo mkdir -p /opt/orama/.orama/data/vault
sudo chown orama:orama /opt/orama/.orama/data/vault
sudo chmod 700 /opt/orama/.orama/data/vault
```
---
## Systemd Service
The project includes a systemd service file at `systemd/orama-vault.service`:
```ini
[Unit]
Description=Orama Vault Guardian
Documentation=https://github.com/orama-network/debros
After=network.target
PartOf=orama-node.service
[Service]
Type=simple
ExecStart=/opt/orama/bin/vault-guardian --config /opt/orama/.orama/data/vault/vault.yaml
Restart=on-failure
RestartSec=5s
# Security hardening
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=/opt/orama/.orama/data/vault
NoNewPrivileges=yes
# Allow mlock for secure memory
LimitMEMLOCK=67108864
# Resource limits
MemoryMax=512M
[Install]
WantedBy=multi-user.target
```
### Installation
```bash
# Copy binary
sudo cp zig-out/bin/vault-guardian /opt/orama/bin/vault-guardian
sudo chmod 755 /opt/orama/bin/vault-guardian
# Copy service file
sudo cp systemd/orama-vault.service /etc/systemd/system/orama-vault.service
# Reload systemd
sudo systemctl daemon-reload
# Enable and start
sudo systemctl enable orama-vault
sudo systemctl start orama-vault
# Check status
sudo systemctl status orama-vault
```
### Service Dependencies
The service is `PartOf=orama-node.service`, meaning:
- When `orama-node.service` is stopped, `orama-vault` is also stopped.
- When `orama-node.service` is restarted, `orama-vault` is also restarted.
- `After=network.target` ensures the network stack is up before the guardian starts.
### Restart Behavior
- `Restart=on-failure`: The guardian restarts if it exits with a non-zero status.
- `RestartSec=5s`: Wait 5 seconds between restarts.
- The guardian generates a new server secret on each start, which invalidates all existing session tokens. This is intentional -- sessions should not survive restarts.
---
## Firewall Rules
### UFW Configuration
```bash
# Client port: accessible from WireGuard overlay and optionally from gateway
sudo ufw allow from 10.0.0.0/24 to any port 7500 proto tcp comment "vault-guardian client"
# Peer port: WireGuard overlay ONLY
sudo ufw allow from 10.0.0.0/24 to any port 7501 proto tcp comment "vault-guardian peer"
```
### Port Summary
| Port | Protocol | Interface | Purpose |
|------|----------|-----------|---------|
| 7500 | TCP | WireGuard (10.0.0.x) | Client-facing HTTP API |
| 7501 | TCP | WireGuard (10.0.0.x) only | Guardian-to-guardian binary protocol |
**Port 7501 must NEVER be exposed on the public interface.** The peer protocol has no authentication beyond WireGuard -- it trusts that only authorized nodes can reach it.
Port 7500 may be exposed on the public interface if the node runs without the Orama gateway reverse proxy, but this is not recommended for production.
---
## Cross-Compilation
Zig's cross-compilation makes it trivial to build for any target:
```bash
# Linux x86_64 (production)
zig build -Dtarget=x86_64-linux-musl -Doptimize=ReleaseSafe
# Linux aarch64 (ARM servers)
zig build -Dtarget=aarch64-linux-musl -Doptimize=ReleaseSafe
# macOS x86_64
zig build -Dtarget=x86_64-macos -Doptimize=ReleaseSafe
# macOS aarch64 (Apple Silicon)
zig build -Dtarget=aarch64-macos -Doptimize=ReleaseSafe
```
All targets produce a single static binary. No runtime dependencies to install on the target system.
---
## Deployment to Orama Network Nodes
The vault guardian is deployed alongside other Orama services. The typical deployment workflow:
1. Build the binary on the development machine (cross-compile for Linux).
2. Copy the binary to the target node via `scp` or the `orama` CLI deploy tool.
3. Place the binary at `/opt/orama/bin/vault-guardian`.
4. Ensure the systemd service is installed and enabled.
5. Restart the service: `sudo systemctl restart orama-vault`.
For rolling upgrades across the cluster, follow the standard Orama network rolling upgrade protocol: upgrade one node at a time, verify health between each node.
### Health Verification After Deploy
```bash
# Check systemd service
sudo systemctl status orama-vault
# Check health endpoint
curl http://127.0.0.1:7500/v1/vault/health
# Expected response:
# {"status":"ok","version":"0.1.0"}
# Check status endpoint
curl http://127.0.0.1:7500/v1/vault/status
```
---
## Environment-Specific Notes
### Development / Local Testing
```bash
# Run with a local data directory, non-default port
./zig-out/bin/vault-guardian --data-dir /tmp/vault-dev --port 7500 --bind 127.0.0.1
```
The guardian runs in single-node mode when it cannot reach RQLite. This is normal for local development.
### Staging / Testnet
Same binary, deployed to testnet nodes. Use the standard config path:
```bash
vault-guardian --config /opt/orama/.orama/data/vault/vault.yaml
```
### Production / Mainnet
- Ensure WireGuard is up and peers are connected before starting the guardian.
- Ensure RQLite is running and healthy (the guardian queries it for node discovery).
- Verify firewall rules restrict port 7501 to WireGuard interfaces only.
- Monitor via the health endpoint and systemd journal.

462
docs/OPERATOR_GUIDE.md Normal file
View File

@ -0,0 +1,462 @@
# Orama Vault -- Operator Guide
## Monitoring
### Health Endpoint
The simplest way to check if a guardian is running:
```bash
curl -s http://127.0.0.1:7500/v1/vault/health | jq .
```
Expected response:
```json
{
"status": "ok",
"version": "0.1.0"
}
```
If this endpoint does not respond, the guardian process is not running or the port is blocked. Check systemd status first.
### Status Endpoint
Provides runtime configuration:
```bash
curl -s http://127.0.0.1:7500/v1/vault/status | jq .
```
Expected response:
```json
{
"status": "ok",
"version": "0.1.0",
"data_dir": "/opt/orama/.orama/data/vault",
"client_port": 7500,
"peer_port": 7501
}
```
### Guardians Endpoint
Lists known guardian nodes in the cluster:
```bash
curl -s http://127.0.0.1:7500/v1/vault/guardians | jq .
```
> **Note (v0.1.0):** This currently returns only the local node. Full cluster listing requires RQLite integration (Phase 2).
### Systemd Journal
The guardian logs to stderr using Zig's structured logging, which is captured by the systemd journal:
```bash
# View recent logs
sudo journalctl -u orama-vault -n 50 --no-pager
# Follow live logs
sudo journalctl -u orama-vault -f
# View logs since last boot
sudo journalctl -u orama-vault -b
# View error-level logs only
sudo journalctl -u orama-vault -p err
```
Log messages include:
- `vault-guardian v0.1.0 starting` -- startup confirmation
- `config: <path>` -- config file path
- `listening on <addr>:<port> (client)` -- client listener bound
- `listening on <addr>:<port> (peer)` -- peer listener bound
- `data directory: <path>` -- data directory path
- `guardian ready -- starting HTTP server` -- initialization complete
- `stored share for identity <hex> (<n> bytes, version <v>)` -- successful push
- `served share for identity <hex> (<n> bytes)` -- successful pull
- `rejected rollback for <hex>: version <v> <= current <v>` -- anti-rollback rejection
- `accept error: <err>` -- TCP accept failure (non-fatal, retried)
- `connection error: <err>` -- individual connection handling error
- `failed to write share for <hex>: <err>` -- disk write failure
### Service Status
```bash
sudo systemctl status orama-vault
```
Check for:
- `Active: active (running)` -- service is up
- `Main PID: <pid>` -- process ID
- Memory and CPU usage in the status output
---
## Troubleshooting
### Port Already In Use
**Symptom:** Guardian fails to start with `failed to bind 0.0.0.0:7500: error.AddressInUse`
**Diagnosis:**
```bash
# Find what's using the port
sudo ss -tlnp | grep 7500
```
**Resolution:**
- If another vault-guardian is running: `sudo systemctl stop orama-vault` first.
- If another service is using port 7500: change the vault port with `--port <other>`.
- If the port is in TIME_WAIT state from a recent restart: wait 30-60 seconds. The guardian sets `SO_REUSEADDR` which should handle most cases.
### Data Directory Permissions
**Symptom:** `failed to create data directory <path>: error.AccessDenied`
**Diagnosis:**
```bash
ls -la /opt/orama/.orama/data/vault/
```
**Resolution:**
```bash
sudo chown -R orama:orama /opt/orama/.orama/data/vault
sudo chmod 700 /opt/orama/.orama/data/vault
```
The systemd service uses `ProtectSystem=strict` with `ReadWritePaths=/opt/orama/.orama/data/vault`, so the data directory must be under this exact path or a CLI override must be used.
### RQLite Connectivity
**Symptom:** Log shows `failed to fetch node list from RQLite, running in single-node mode`
**Diagnosis:**
```bash
# Check if RQLite is running
sudo systemctl status orama-*-rqlite
# Test RQLite endpoint
curl -s http://127.0.0.1:4001/status | jq .store.raft.state
```
**Resolution:**
- This warning is non-fatal. The guardian continues in single-node mode.
- Ensure RQLite is started before the vault guardian (normal dependency ordering).
- Verify the `rqlite_url` in config matches the actual RQLite address.
> **Note (v0.1.0):** RQLite node discovery is a stub. The guardian always falls back to single-node mode. This warning is expected in the current version.
### Share Write Failures
**Symptom:** Push returns 500 Internal Server Error, logs show `failed to write share for <hex>: <err>`
**Diagnosis:**
```bash
# Check disk space
df -h /opt/orama/.orama/data/vault
# Check inode usage
df -i /opt/orama/.orama/data/vault
# Check directory permissions
ls -la /opt/orama/.orama/data/vault/shares/
```
**Resolution:**
- If disk is full: free space or expand the partition.
- If inodes are exhausted (unlikely but possible with millions of users): clean up orphaned temp files.
- If permissions are wrong: fix ownership as shown above.
### Anti-Rollback Rejections
**Symptom:** Push returns 400 with `"version must be greater than current stored version"`
This is normal behavior -- the client tried to push an older version of a share. Common causes:
- Client retry after a network timeout (the first push actually succeeded).
- Client software bug sending stale version numbers.
**Diagnosis:**
```bash
# Check current stored version for an identity
cat /opt/orama/.orama/data/vault/shares/<identity_hex>/version
```
**Resolution:** The client must send a version number strictly greater than the stored value. This is not a guardian bug.
### Guardian Crash Loop
**Symptom:** `systemctl status` shows rapid restarts.
**Diagnosis:**
```bash
# View recent crash logs
sudo journalctl -u orama-vault -n 100 --no-pager | tail -50
# Check for OOM kills
sudo journalctl -k | grep -i "oom\|kill"
```
**Resolution:**
- If OOM killed: the 512 MiB memory limit may be too low. Check if share data has grown unexpectedly.
- If config parse error: check the config file syntax (or remove it to use defaults).
- If bind error: another process is using the port.
---
## Manual Operations
### Check Stored Shares
List all identities with stored shares:
```bash
ls /opt/orama/.orama/data/vault/shares/
```
Check a specific identity's share:
```bash
# View version
cat /opt/orama/.orama/data/vault/shares/<identity_hex>/version
# View share size
ls -la /opt/orama/.orama/data/vault/shares/<identity_hex>/share.bin
# View checksum
xxd /opt/orama/.orama/data/vault/shares/<identity_hex>/checksum.bin
```
### Count Total Shares
```bash
ls -d /opt/orama/.orama/data/vault/shares/*/ 2>/dev/null | wc -l
```
### Verify Share Integrity Manually
The guardian verifies HMAC integrity on every read. To manually check if a share file has been corrupted:
```bash
# If you know the integrity key, you can compute HMAC externally:
# (The integrity key is internal to the guardian and not stored on disk in the current version)
# Check file exists and is non-empty
test -s /opt/orama/.orama/data/vault/shares/<identity>/share.bin && echo "OK" || echo "MISSING/EMPTY"
test -s /opt/orama/.orama/data/vault/shares/<identity>/checksum.bin && echo "OK" || echo "MISSING/EMPTY"
```
### Test Push/Pull
```bash
# Push a test share
curl -X POST http://127.0.0.1:7500/v1/vault/push \
-H "Content-Type: application/json" \
-d '{
"identity": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"share": "dGVzdCBzaGFyZSBkYXRh",
"version": 1
}'
# Expected: {"status":"stored"}
# Pull it back
curl -X POST http://127.0.0.1:7500/v1/vault/pull \
-H "Content-Type: application/json" \
-d '{
"identity": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}'
# Expected: {"share":"dGVzdCBzaGFyZSBkYXRh"}
```
### Delete a Share (Emergency)
> **Warning:** Deleting a share is destructive and cannot be undone. Only do this if you understand the implications for the user's recovery capability.
```bash
# Remove a specific identity's share directory
rm -rf /opt/orama/.orama/data/vault/shares/<identity_hex>
```
---
## Disaster Recovery
### Failure Scenarios
| Scenario | Impact | Recovery |
|----------|--------|----------|
| 1 node dies | No impact. K-1 other nodes can still reconstruct. | Replace node, shares will be re-pushed by clients. |
| K-1 nodes die | No impact. K remaining nodes can still reconstruct. | Replace nodes, reshare when quorum recovers. |
| N-K nodes die | No impact. K+1 or more nodes survive. | Replace nodes. |
| N-K+1 nodes die | **CRITICAL.** Only K-1 nodes remain. Cannot reconstruct. | Data loss for users who only have vault-based recovery. Users with mnemonic (Path A) are unaffected. |
| All nodes die | **TOTAL LOSS.** All shares destroyed. | Users must recover via mnemonic (Path A). Vault recovery (Path B) is permanently lost. |
| Data directory corrupted on 1 node | HMAC integrity check fails on read. Node returns errors for affected shares. | Delete corrupted share directory. Repair protocol will re-distribute once implemented. |
### Key Insight
The system can tolerate losing up to **N - K** nodes without any data loss. With default thresholds:
| Cluster Size | K | Max Node Loss |
|-------------|---|---------------|
| 5 nodes | 3 | 2 nodes |
| 14 nodes | 4 | 10 nodes |
| 50 nodes | 16 | 34 nodes |
| 100 nodes | 33 | 67 nodes |
### Backup Strategy
The data directory can be backed up with standard tools:
```bash
# Rsync backup
rsync -av /opt/orama/.orama/data/vault/ /backup/vault/
# Tarball backup
tar czf vault-backup-$(date +%Y%m%d).tar.gz -C /opt/orama/.orama/data/vault .
```
However, backups are generally unnecessary because:
1. Every node stores a share, so the cluster itself is the redundancy.
2. Shares are re-pushed by clients on updates.
3. The proactive re-sharing protocol (when fully wired) will redistribute shares automatically.
Backups are only useful if you fear simultaneous catastrophic failure of many nodes.
---
## Capacity Planning
### Per-Share Storage
Each user's share directory contains:
| File | Typical Size | Description |
|------|-------------|-------------|
| `share.bin` | ~1 KB | Encrypted share data (same size as original secret) |
| `version` | ~1-20 bytes | Version counter as ASCII digits |
| `checksum.bin` | 32 bytes | HMAC-SHA256 checksum |
| Directory entry | ~4 KB | Filesystem overhead (depends on filesystem) |
**Total per user per node: ~5 KB** (including filesystem overhead).
### Cluster-Wide Storage
With all-node replication, each user has one share on every node:
| Users | Per Node | Per Cluster (14 nodes) |
|-------|----------|------------------------|
| 1,000 | ~5 MB | ~70 MB |
| 10,000 | ~50 MB | ~700 MB |
| 100,000 | ~500 MB | ~7 GB |
| 1,000,000 | ~5 GB | ~70 GB |
At 1 million users, each node stores approximately 5 GB. This is well within the capability of any modern VPS.
### Memory Usage
The guardian uses minimal memory:
- Static binary: ~2-5 MB RSS.
- Per-connection: ~128 KB (64 KB read buffer + 64 KB write buffer).
- Single-threaded: only one connection is active at a time (MVP).
- No in-memory caching of shares: every read/write goes to disk.
The systemd `MemoryMax=512M` limit is generous for the current architecture. Actual usage is typically under 20 MB.
### Inode Usage
Each user creates one directory and 2-3 files. At 1 million users:
- ~4 million inodes.
- Most Linux filesystems default to millions of inodes (ext4: 1 inode per 16 KB).
- A 100 GB partition with ext4 defaults has ~6.5 million inodes.
This is unlikely to be a bottleneck but is worth monitoring on small partitions.
---
## Cluster Scaling
### Adding Nodes
When a new node joins the Orama network:
1. The vault guardian starts and registers itself via RQLite.
2. Other guardians detect the new node via the discovery module (join event).
3. The new node initially has zero shares.
4. Shares are populated in two ways:
- **Client push:** When clients push new versions, they include the new node.
- **Repair protocol:** The re-sharing protocol redistributes shares to include the new node (Phase 2).
The threshold K is recomputed based on the alive count: `K = max(3, floor(N/3))`.
### Removing Nodes
When a node leaves (graceful shutdown or failure):
1. Other guardians detect the departure via missed heartbeats (suspect at 15s, dead at 60s).
2. The departed node's shares are lost.
3. If N - K nodes remain alive, no data is lost.
4. If the departure drops below the safety threshold (K+1 alive), the repair protocol triggers re-sharing to adjust.
### Threshold Adjustment
The threshold is dynamic and automatic:
```
K = max(3, floor(alive_count / 3))
```
- Adding nodes generally does not change K until the cluster grows significantly.
- Removing nodes may reduce K if the alive count drops enough.
- K never drops below 3, ensuring a minimum collusion resistance.
### Write Quorum
Write quorum requires supermajority acknowledgment:
```
W = ceil(2/3 * alive_count)
```
| Alive | Write Quorum (W) |
|-------|-------------------|
| 1 | 1 |
| 2 | 2 |
| 3 | 2 |
| 4 | 3 |
| 5 | 4 |
| 14 | 10 |
| 100 | 67 |
A push succeeds only if W guardians acknowledge storage. This ensures consistency even with some nodes being slow or temporarily unreachable.
> **Note (v0.1.0):** Write quorum is computed but not enforced in the current single-node push handler. Multi-guardian fan-out is Phase 2.
---
## Operational Checklist
### Pre-Deploy
- [ ] Binary built with `-Doptimize=ReleaseSafe` for the correct target.
- [ ] Data directory exists with correct ownership and permissions.
- [ ] systemd service file installed and enabled.
- [ ] Firewall rules allow port 7500 (client) and 7501 (peer, WireGuard only).
- [ ] WireGuard is up and peers are connected.
### Post-Deploy
- [ ] Health endpoint responds: `curl http://127.0.0.1:7500/v1/vault/health`
- [ ] Status endpoint shows correct config: `curl http://127.0.0.1:7500/v1/vault/status`
- [ ] No error-level log messages: `sudo journalctl -u orama-vault -p err -n 10`
- [ ] Test push/pull cycle works (see "Test Push/Pull" section above).
### Periodic Checks
- [ ] Health endpoint responds on all nodes.
- [ ] Share count is consistent across nodes (same identities on each).
- [ ] Disk usage is within expected bounds.
- [ ] No repeated error messages in the journal.
- [ ] systemd reports `active (running)` with uptime matching expectations.

130
docs/PQ_INTEGRATION.md Normal file
View File

@ -0,0 +1,130 @@
# Post-Quantum Crypto Integration Guide
## Current Status
The PQ modules (`pq_kem.zig`, `pq_sig.zig`) are **stub implementations**:
- **ML-KEM-768 (pq_kem.zig):** Uses HMAC-based key derivation instead of real lattice-based KEM. Provides classical security via HMAC-SHA256 but zero post-quantum security.
- **ML-DSA-65 (pq_sig.zig):** Uses SHA-256 hash-based signatures instead of real lattice-based signatures. Provides tamper detection but zero post-quantum security.
Both stubs preserve the correct interface (key sizes, function signatures) so callers don't need to change when liboqs is integrated.
**Important:** The stubs now **fail-closed**`verify()` rejects tampered messages/signatures, and `encaps()`/`decaps()` produce deterministic (though incompatible) shared secrets. The previous stub where `verify()` always succeeded has been fixed.
## Integration Plan
### Prerequisites
1. **liboqs** — Open Quantum Safe library (C)
- Source: https://github.com/open-quantum-safe/liboqs
- Required version: 0.10.0+
- Algorithms needed: ML-KEM-768 (FIPS 203), ML-DSA-65 (FIPS 204)
### Step 1: Build liboqs as a Static Library
```bash
# Clone
git clone https://github.com/open-quantum-safe/liboqs.git
cd liboqs
# Build for Linux x86_64 (production target)
mkdir build && cd build
cmake -DCMAKE_INSTALL_PREFIX=/opt/orama/lib/liboqs \
-DBUILD_SHARED_LIBS=OFF \
-DOQS_MINIMAL_BUILD="KEM_ml_kem_768;SIG_ml_dsa_65" \
-DCMAKE_C_COMPILER=zig-cc \
..
make -j$(nproc)
make install
```
For cross-compilation with Zig:
```bash
# Use Zig as the C compiler for cross-compilation
CC="zig cc -target x86_64-linux-musl" cmake ...
```
### Step 2: Update build.zig
Add liboqs as a system dependency:
```zig
// In build.zig, after creating the module:
root_mod.addIncludePath(.{ .cwd_relative = "/opt/orama/lib/liboqs/include" });
root_mod.addLibraryPath(.{ .cwd_relative = "/opt/orama/lib/liboqs/lib" });
root_mod.linkSystemLibrary("oqs");
```
### Step 3: Replace Stubs
**pq_kem.zig** — Replace stub functions with:
```zig
const oqs = @cImport({
@cInclude("oqs/oqs.h");
});
pub fn keygen() KEMError!Keypair {
var kp = Keypair{ .public_key = undefined, .secret_key = undefined };
const kem = oqs.OQS_KEM_new(oqs.OQS_KEM_alg_ml_kem_768) orelse return KEMError.KeygenFailed;
defer oqs.OQS_KEM_free(kem);
if (oqs.OQS_KEM_keypair(kem, &kp.public_key, &kp.secret_key) != oqs.OQS_SUCCESS) {
return KEMError.KeygenFailed;
}
return kp;
}
pub fn encaps(public_key: [PK_SIZE]u8) KEMError!EncapsulationResult {
var result = EncapsulationResult{ .ciphertext = undefined, .shared_secret = undefined };
const kem = oqs.OQS_KEM_new(oqs.OQS_KEM_alg_ml_kem_768) orelse return KEMError.EncapsFailed;
defer oqs.OQS_KEM_free(kem);
if (oqs.OQS_KEM_encaps(kem, &result.ciphertext, &result.shared_secret, &public_key) != oqs.OQS_SUCCESS) {
return KEMError.EncapsFailed;
}
return result;
}
pub fn decaps(ciphertext: [CT_SIZE]u8, secret_key: [SK_SIZE]u8) KEMError![SS_SIZE]u8 {
const kem = oqs.OQS_KEM_new(oqs.OQS_KEM_alg_ml_kem_768) orelse return KEMError.DecapsFailed;
defer oqs.OQS_KEM_free(kem);
var ss: [SS_SIZE]u8 = undefined;
if (oqs.OQS_KEM_decaps(kem, &ss, &ciphertext, &secret_key) != oqs.OQS_SUCCESS) {
return KEMError.DecapsFailed;
}
return ss;
}
```
**pq_sig.zig** — Replace stub functions similarly using `OQS_SIG_ml_dsa_65`.
### Step 4: Test
```bash
zig build test # Unit tests with real PQ operations
```
Key test: `keygen() → encaps(pk) → decaps(ct, sk)` must produce matching shared secrets.
### Step 5: Wire into Protocol
Once stubs are replaced:
1. **Hybrid key exchange:** X25519 + ML-KEM-768 for guardian-to-guardian and client-to-guardian (see `crypto/hybrid.zig`)
2. **PQ auth:** ML-DSA-65 signatures on challenge-response tokens
3. **Key rotation:** Re-key PQ parameters on each session
## Security Note
The current system is still secure without PQ crypto because:
- **Shamir SSS** is information-theoretic (immune to quantum)
- **AES-256-GCM** has 128-bit post-quantum security (Grover's algorithm halves the key strength)
- **WireGuard** provides transport encryption (ChaCha20-Poly1305)
- **HMAC-SHA256** has 128-bit post-quantum security
PQ crypto adds protection against quantum attacks on:
- **Key exchange** (currently X25519 — vulnerable to Shor's algorithm)
- **Digital signatures** (currently Ed25519 — vulnerable to Shor's algorithm)
The urgency is moderate: harvest-now-decrypt-later attacks are a concern for long-lived secrets, but Shamir shares are re-generated on each sync push.

379
docs/SECURITY_MODEL.md Normal file
View File

@ -0,0 +1,379 @@
# Orama Vault -- Security Model
## Threat Model
| Threat | Severity | Mitigation | Status |
|--------|----------|------------|--------|
| **Single node compromise** | Medium | Shamir SSS: a single share reveals zero information about the secret. Attacker gets one share out of N; needs K to reconstruct. | Implemented |
| **K-1 node collusion** | High | Information-theoretic security: K-1 shares provide exactly zero bits of information about the secret. This is not computational -- it is mathematically proven. | Implemented |
| **All N nodes collude** | Critical | Not defended. If all N guardians collude, they can reconstruct the secret. Mitigated by: (1) nodes operated by different parties, (2) geographic distribution, (3) proactive re-sharing invalidates old shares. | By design |
| **Quantum adversary** | Future | Post-quantum KEM (ML-KEM-768) and signatures (ML-DSA-65) interfaces are defined. Hybrid key exchange (X25519 + ML-KEM-768) is implemented. | Stub (Phase 2) |
| **Replay attack on push** | Medium | Monotonic version counter. Each push must have a version strictly greater than the stored version. Replaying an old push is rejected. | Implemented |
| **Rollback attack** | Medium | Anti-rollback via monotonic version file per identity. Attacker cannot downgrade a share to an older version. | Implemented |
| **Disk corruption** | Medium | HMAC-SHA256 checksum per share file. On read, the checksum is verified before returning data. Corruption is detected and surfaced as an error. | Implemented |
| **Disk tampering** | Medium | Same HMAC integrity check. An attacker who modifies share.bin on disk cannot forge a valid checksum without the integrity key. | Implemented |
| **Network eavesdropping** | High | All inter-node traffic uses WireGuard (encrypted tunnel). Client-to-guardian will use TLS in Phase 3. | Partial (WireGuard: yes, TLS: Phase 3) |
| **Timing side-channels** | Low | All HMAC verifications and auth token checks use constant-time comparison (`diff |= x ^ y` accumulator). | Implemented |
| **Memory disclosure** | Low | Secure memory: `secureZero` (volatile zero-fill that cannot be optimized away), `mlock` (prevents swap to disk), `SecureBuffer` RAII wrapper. Server secret zeroed on Guardian deinit. | Implemented |
| **Resource exhaustion** | Medium | Request body size limits (1 MiB push, 4 KiB pull), share size limit (512 KiB), peer protocol max payload (1 MiB). Systemd MemoryMax=512M. | Implemented |
| **Man-in-the-middle (peer)** | High | WireGuard provides authenticated encryption between peers. Only nodes with valid WireGuard keys can communicate on port 7501. | Implemented (via WireGuard) |
| **Man-in-the-middle (client)** | High | TLS termination planned for Phase 3. Currently plain TCP on port 7500. In production, the Orama gateway provides TLS. | Gateway-level |
| **Unauthorized push/pull** | Medium | Challenge-response auth module exists with HMAC-based tokens. Not yet wired to HTTP handlers. | Phase 2 |
| **Share epoch mixing** | High | After proactive re-sharing, old and new shares are algebraically independent. Mixing shares from different epochs does NOT reconstruct the secret. Tested and verified. | Implemented |
---
## Shamir Secret Sharing Security
### Information-Theoretic Security
Shamir's Secret Sharing provides **perfect secrecy** -- this is the strongest possible security guarantee:
- **K shares** can reconstruct the secret (Lagrange interpolation at x=0).
- **K-1 shares** provide exactly **zero** information about the secret.
- This is not a computational assumption. It holds against adversaries with unlimited computing power, including quantum computers.
**Proof sketch:** A polynomial of degree K-1 is uniquely determined by K points. With only K-1 points, there are exactly 256 (= |GF(2^8)|) distinct polynomials passing through those points, one for each possible value of the constant term (the secret byte). Each is equally likely. Therefore, the conditional probability distribution of the secret given K-1 shares is uniform over GF(2^8) -- identical to the prior distribution. No information is gained.
For a multi-byte secret of length L, this applies independently to each byte position, since each byte uses an independent random polynomial.
### Threshold and Share Count
The system uses an adaptive threshold:
```
K = max(3, floor(N/3))
```
Where N is the number of alive guardians. This means:
| Alive Nodes (N) | Threshold (K) | Fault Tolerance (N-K) |
|------------------|---------------|------------------------|
| 3 | 3 | 0 |
| 5 | 3 | 2 |
| 9 | 3 | 6 |
| 10 | 3 | 7 |
| 14 | 4 | 10 |
| 50 | 16 | 34 |
| 100 | 33 | 67 |
The minimum threshold of 3 ensures that at least 3 guardians must cooperate to reconstruct, even in small clusters. This prevents trivial collusion.
---
## GF(2^8) Choice Rationale
The finite field GF(2^8) = GF(256) was chosen for Shamir arithmetic:
1. **Same field as AES.** The irreducible polynomial x^8 + x^4 + x^3 + x + 1 (0x11B) is the AES field polynomial. This is the most studied and battle-tested GF(2^8) instantiation in cryptography.
2. **Byte-aligned.** Each field element is exactly one byte. No encoding overhead, no multi-precision arithmetic, no serialization complexity.
3. **O(1) arithmetic.** Precomputed exp/log tables (512 + 256 = 768 bytes total, generated at Zig comptime) give constant-time multiplication, inversion, and division via table lookups. The generator element is 3 (0x03), a primitive element of the multiplicative group of order 255.
4. **255 distinct evaluation points.** Shares are evaluated at x = 1, 2, ..., N (never x=0, which would reveal the secret). This supports up to 255 shares per secret, far exceeding the Orama network size.
5. **Exhaustively verified.** The implementation includes tests that verify:
- All 256x256 multiplication pairs produce valid results.
- Multiplicative identity: 1 * a = a for all a.
- Multiplicative inverse: a * inv(a) = 1 for all nonzero a.
- Commutativity, associativity, and distributivity (sampled).
- The exp table generates all 255 nonzero elements exactly once (confirming 3 is a primitive element).
---
## Key Wrapping (Planned Architecture)
> **Status:** The key wrapping scheme is designed but not yet fully implemented. The crypto primitives (AES-256-GCM, HKDF-SHA256) are implemented and tested.
The planned key hierarchy:
```
User Secret (root seed / mnemonic)
|
+-- DEK (Data Encryption Key) -- random 256-bit AES key
| |
| +-- Encrypts the secret via AES-256-GCM
|
+-- KEK1 (Key Encryption Key 1) -- derived from mnemonic via HKDF
| |
| +-- Wraps DEK (AES-256-GCM)
| +-- Stored alongside the encrypted secret
|
+-- KEK2 (Key Encryption Key 2) -- derived from username+passphrase via HKDF
|
+-- Wraps DEK (AES-256-GCM)
+-- Stored alongside the encrypted secret
```
**Recovery Path A (Mnemonic):**
1. User provides mnemonic.
2. Derive KEK1 = HKDF(mnemonic, "orama-kek1-v1").
3. Unwrap DEK from wrapped_dek1.bin.
4. Decrypt secret with DEK.
**Recovery Path B (Username + Passphrase):**
1. User provides username + passphrase.
2. Derive identity = SHA-256(username).
3. Pull K shares from guardians.
4. Reconstruct encrypted blob via Lagrange interpolation.
5. Derive KEK2 = HKDF(passphrase, "orama-kek2-v1").
6. Unwrap DEK from wrapped_dek2.bin.
7. Decrypt secret with DEK.
---
## HMAC Integrity
Every stored share has an associated HMAC-SHA256 checksum:
```
checksum = HMAC-SHA256(integrity_key, share_data)
```
On read, the checksum is recomputed and compared in constant time:
```zig
fn constantTimeEqual(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
var diff: u8 = 0;
for (a, b) |x, y| {
diff |= x ^ y;
}
return diff == 0;
}
```
This detects:
- Accidental disk corruption (bit flips, sector failures).
- Intentional tampering by an attacker with disk access.
- Partial writes (if the share was updated but checksum wasn't, or vice versa).
---
## Anti-Rollback Protection
Each identity has a monotonic version counter stored in a separate file (`version`). On push:
1. Read current version from `<data_dir>/shares/<identity>/version`.
2. If the file exists and the new version is <= the stored version, reject with 400.
3. If the new version is strictly greater, proceed with the write.
4. Write the new version atomically (temp + rename).
This prevents an attacker from replacing a current share with an older version, which could be part of an attack to force reconstruction with a known set of shares.
---
## Timing Attack Prevention
All security-sensitive comparisons use constant-time operations:
1. **HMAC verification** (`src/crypto/hmac.zig`): `constantTimeEqual` with XOR accumulator.
2. **Challenge verification** (`src/auth/challenge.zig`): `timingSafeEqual` with same pattern.
3. **Session token verification** (`src/auth/session.zig`): `timingSafeEqual` with same pattern.
The pattern:
```zig
var diff: u8 = 0;
for (a, b) |x, y| {
diff |= x ^ y;
}
return diff == 0;
```
This ensures the comparison takes the same time regardless of where (or whether) the bytes differ. An attacker cannot learn partial information about expected values by measuring response times.
---
## Secure Memory
The `src/crypto/secure_mem.zig` module provides:
### secureZero
```zig
pub fn secureZero(buf: []u8) void {
std.crypto.secureZero(u8, @as([]volatile u8, @volatileCast(buf)));
}
```
Uses volatile semantics to prevent the compiler from optimizing away the zero-fill. This is critical for erasing keys, secrets, and intermediate cryptographic material from memory.
### mlock / munlock
```zig
pub fn mlock(ptr: [*]const u8, len: usize) void {
if (builtin.os.tag == .linux) {
const result = std.posix.mlock(ptr[0..len]);
// Non-fatal on failure
}
}
```
Locks memory pages so they are never written to swap. This prevents key material from being persisted to disk in a swap partition. Requires either `CAP_IPC_LOCK` capability or sufficient `RLIMIT_MEMLOCK`.
The systemd service file sets `LimitMEMLOCK=67108864` (64 MiB) to allow mlock.
### SecureBuffer
RAII wrapper that combines allocation, mlock, and automatic zeroing:
```zig
pub const SecureBuffer = struct {
data: []u8,
allocator: std.mem.Allocator,
pub fn deinit(self: *SecureBuffer) void {
secureZero(self.data); // volatile zero
munlock(self.data.ptr, ...); // unlock pages
self.allocator.free(self.data);
}
};
```
Used for all key material that has a defined lifetime.
### Server Secret Zeroing
The `Guardian.deinit()` method zeroes the 32-byte server secret:
```zig
pub fn deinit(self: *Guardian) void {
self.nodes.deinit();
@memset(&self.server_secret, 0);
}
```
### Share Zeroing
All `Share.deinit()` calls zero the share data before freeing:
```zig
pub fn deinit(self: Share, allocator: std.mem.Allocator) void {
const mutable: []u8 = @constCast(self.y);
@memset(mutable, 0);
allocator.free(mutable);
}
```
Similarly, the `split` operation zeros the coefficient buffer (which contains the secret as `coeffs[0]`) on cleanup.
---
## Post-Quantum Roadmap
### Current State: Stubs
The post-quantum modules exist with correct interfaces but provide **zero security**:
- **ML-KEM-768** (`src/crypto/pq_kem.zig`): `keygen()` returns random bytes. `encaps()` returns random shared secret. `decaps()` returns random shared secret. They do NOT perform real lattice operations.
- **ML-DSA-65** (`src/crypto/pq_sig.zig`): `keygen()` returns random bytes. `sign()` returns SHA-256 hash as placeholder. `verify()` **ALWAYS SUCCEEDS** -- provides zero signature verification.
Both modules log a one-time warning when first used:
```
pq_kem: STUB implementation in use -- provides ZERO post-quantum security
pq_sig: STUB implementation in use -- provides ZERO post-quantum security, verify() ALWAYS succeeds
```
### Planned Implementation (Phase 2)
Replace stubs with liboqs-backed implementations:
| Algorithm | Standard | Security Level | Key Sizes |
|-----------|----------|---------------|-----------|
| ML-KEM-768 | FIPS 203 | ~192-bit post-quantum | PK: 1184, SK: 2400, CT: 1088, SS: 32 |
| ML-DSA-65 | FIPS 204 | ~192-bit post-quantum | PK: 1952, SK: 4032, Sig: 3309 max |
Integration plan:
1. Link liboqs as a C dependency via Zig's `@cImport`.
2. Replace random byte generation with actual `OQS_KEM_ml_kem_768_*` and `OQS_SIG_ml_dsa_65_*` calls.
3. The hybrid module (`src/crypto/hybrid.zig`) already combines X25519 + ML-KEM correctly -- once the ML-KEM stub is replaced, hybrid key exchange will provide real post-quantum protection.
### Hybrid Key Exchange
The hybrid module (`src/crypto/hybrid.zig`) implements X25519 + ML-KEM-768:
```
shared_secret = HKDF-SHA256(X25519_SS || ML-KEM_SS, salt=0^32, info="orama-hybrid-v1")
```
This ensures:
- If X25519 is broken (quantum computer), ML-KEM still protects.
- If ML-KEM is broken (unknown classical attack), X25519 still protects.
- Both must be broken simultaneously to compromise the shared secret.
The X25519 portion is fully functional using Zig's `std.crypto.dh.X25519`. Only the ML-KEM portion is currently a stub.
---
## WireGuard Transport Security
All guardian-to-guardian communication (port 7501) is restricted to the WireGuard overlay network (10.0.0.x addresses). WireGuard provides:
1. **Authenticated encryption:** ChaCha20-Poly1305 with per-peer keys derived from Noise IK handshake.
2. **Perfect forward secrecy:** New ephemeral keys every 2 minutes or 2^64 messages.
3. **Mutual authentication:** Only nodes with authorized public keys can join the overlay.
4. **Replay protection:** Built-in counter-based replay rejection.
An attacker who does not have a valid WireGuard private key cannot:
- Connect to port 7501 on any guardian.
- Observe peer-to-peer traffic contents.
- Inject or replay messages.
This is defense-in-depth: even if the binary peer protocol had vulnerabilities, the WireGuard layer prevents exploitation from outside the cluster.
---
## Proactive Re-sharing Security
The Herzberg-Jarecki-Krawczyk-Yung re-sharing protocol ensures:
1. **Forward secrecy for shares.** After re-sharing, old shares are algebraically independent from new shares. An attacker who compromises old shares (before re-sharing) and new shares (after re-sharing) from *different* guardians cannot combine them.
2. **Secret preservation.** The secret itself does not change during re-sharing. Only the polynomial representation changes. `sum(q_i(0)) = 0` ensures the constant term (secret) is preserved.
3. **Epoch isolation.** Tested and verified: mixing one new share with K-1 old shares does NOT reconstruct the original secret. The test in `src/sss/reshare.zig` confirms this with high probability.
4. **No secret reconstruction.** At no point during re-sharing does any single party learn the secret. Each guardian only processes deltas and updates its own share.
Re-sharing is triggered:
- On node topology changes (join/leave detected by discovery module).
- Periodically every 24 hours.
- When alive count drops below the safety threshold (K+1).
---
## Resource Limits
| Resource | Limit | Where Enforced |
|----------|-------|----------------|
| Process memory | 512 MiB | systemd `MemoryMax=512M` |
| mlock memory | 64 MiB | systemd `LimitMEMLOCK=67108864` |
| Push request body | 1 MiB | `handler_push.zig` `MAX_BODY_SIZE` |
| Pull request body | 4 KiB | `handler_pull.zig` `MAX_BODY_SIZE` |
| Decoded share size | 512 KiB | `handler_push.zig` `MAX_SHARE_SIZE` |
| Peer protocol payload | 1 MiB | `protocol.zig` `MAX_PAYLOAD_SIZE` |
| HTTP read buffer | 64 KiB | `listener.zig` `READ_BUF_SIZE` |
| Share file read | 1 MiB / 10 MiB | `handler_pull.zig` / `file_store.zig` |
---
## Systemd Security Hardening
The systemd service file applies defense-in-depth:
```ini
PrivateTmp=yes # Isolated /tmp
ProtectSystem=strict # Read-only filesystem except explicit paths
ReadWritePaths=/opt/orama/.orama/data/vault # Only data dir is writable
NoNewPrivileges=yes # Cannot gain new privileges (no setuid, no capabilities)
LimitMEMLOCK=67108864 # Allow mlock for secure memory
MemoryMax=512M # Hard memory limit
```
This means even if the guardian process is compromised, the attacker:
- Cannot write to the filesystem outside the data directory.
- Cannot escalate privileges.
- Cannot consume unbounded memory.
- Has isolated temporary file access.

156
src/auth/challenge.zig Normal file
View File

@ -0,0 +1,156 @@
/// Challenge-response authentication.
///
/// Flow:
/// 1. Client sends identity hash
/// 2. Server returns a random challenge (32 bytes) + expiry timestamp
/// 3. Client signs challenge with their private key
/// 4. Server verifies signature against identity's public key
///
/// For MVP: HMAC-based challenge tokens (no public key crypto yet).
/// The challenge is an HMAC over (identity || timestamp || nonce) using a server secret.
/// Client must return the challenge within the expiry window.
///
/// Phase 3 adds Ed25519 signature verification.
const std = @import("std");
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
pub const CHALLENGE_SIZE = 32;
pub const CHALLENGE_EXPIRY_NS: i128 = 60 * std.time.ns_per_s; // 60 seconds
pub const Challenge = struct {
/// Random challenge bytes
nonce: [CHALLENGE_SIZE]u8,
/// Timestamp when challenge was created (nanos since epoch)
created_ns: i128,
/// HMAC tag binding challenge to identity + timestamp
tag: [HmacSha256.mac_length]u8,
};
pub const AuthError = error{
ChallengeExpired,
InvalidChallenge,
};
/// Generate a new challenge for the given identity.
pub fn generateChallenge(identity: []const u8, server_secret: [32]u8) Challenge {
var nonce: [CHALLENGE_SIZE]u8 = undefined;
std.crypto.random.bytes(&nonce);
const now = std.time.nanoTimestamp();
// HMAC(server_secret, identity || nonce || timestamp)
var mac = HmacSha256.init(&server_secret);
mac.update(identity);
mac.update(&nonce);
// Encode timestamp as bytes
var ts_bytes: [16]u8 = undefined;
std.mem.writeInt(i128, &ts_bytes, now, .little);
mac.update(&ts_bytes);
var tag: [HmacSha256.mac_length]u8 = undefined;
mac.final(&tag);
return Challenge{
.nonce = nonce,
.created_ns = now,
.tag = tag,
};
}
/// Verify a challenge response.
/// The client must return the exact nonce + timestamp + tag within the expiry window.
pub fn verifyChallenge(
challenge: Challenge,
identity: []const u8,
server_secret: [32]u8,
) AuthError!void {
// Check expiry
const now = std.time.nanoTimestamp();
const age = now - challenge.created_ns;
if (age > CHALLENGE_EXPIRY_NS or age < 0) {
return AuthError.ChallengeExpired;
}
// Recompute HMAC and verify
var mac = HmacSha256.init(&server_secret);
mac.update(identity);
mac.update(&challenge.nonce);
var ts_bytes: [16]u8 = undefined;
std.mem.writeInt(i128, &ts_bytes, challenge.created_ns, .little);
mac.update(&ts_bytes);
var expected: [HmacSha256.mac_length]u8 = undefined;
mac.final(&expected);
// Constant-time comparison to prevent timing attacks
if (!timingSafeEqual(&expected, &challenge.tag)) {
return AuthError.InvalidChallenge;
}
}
/// Constant-time byte comparison to prevent timing side-channel attacks.
fn timingSafeEqual(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
var diff: u8 = 0;
for (a, b) |x, y| {
diff |= x ^ y;
}
return diff == 0;
}
// Tests
test "challenge: generate and verify" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
const identity = "abcdef1234";
const challenge = generateChallenge(identity, secret);
// Should verify successfully
try verifyChallenge(challenge, identity, secret);
}
test "challenge: wrong identity fails" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
const challenge = generateChallenge("alice", secret);
// Different identity should fail
try std.testing.expectError(AuthError.InvalidChallenge, verifyChallenge(challenge, "bob", secret));
}
test "challenge: wrong secret fails" {
var secret1: [32]u8 = undefined;
var secret2: [32]u8 = undefined;
std.crypto.random.bytes(&secret1);
std.crypto.random.bytes(&secret2);
const challenge = generateChallenge("alice", secret1);
// Different server secret should fail
try std.testing.expectError(AuthError.InvalidChallenge, verifyChallenge(challenge, "alice", secret2));
}
test "challenge: tampered nonce fails" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
var challenge = generateChallenge("alice", secret);
challenge.nonce[0] ^= 0xFF; // tamper
try std.testing.expectError(AuthError.InvalidChallenge, verifyChallenge(challenge, "alice", secret));
}
test "challenge: tampered timestamp fails" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
var challenge = generateChallenge("alice", secret);
challenge.created_ns -= 1; // tamper
try std.testing.expectError(AuthError.InvalidChallenge, verifyChallenge(challenge, "alice", secret));
}

126
src/auth/session.zig Normal file
View File

@ -0,0 +1,126 @@
/// Session token management.
///
/// After successful challenge-response auth, the server issues an HMAC-based
/// session token. Clients include this token in subsequent requests.
///
/// Token format: base64(identity_hash || expiry_timestamp || hmac_tag)
/// The HMAC binds the identity and expiry to the server secret.
const std = @import("std");
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
pub const SESSION_EXPIRY_NS: i128 = 3600 * std.time.ns_per_s; // 1 hour
pub const SessionToken = struct {
identity: [64]u8, // hex-encoded identity hash (padded)
identity_len: u8,
expiry_ns: i128,
tag: [HmacSha256.mac_length]u8,
};
pub const SessionError = error{
SessionExpired,
InvalidSession,
};
/// Issue a session token for the given identity.
pub fn issueToken(identity: []const u8, server_secret: [32]u8) SessionToken {
const now = std.time.nanoTimestamp();
const expiry = now + SESSION_EXPIRY_NS;
var id_buf: [64]u8 = .{0} ** 64;
const copy_len = @min(identity.len, 64);
@memcpy(id_buf[0..copy_len], identity[0..copy_len]);
var mac = HmacSha256.init(&server_secret);
mac.update(&id_buf);
var expiry_bytes: [16]u8 = undefined;
std.mem.writeInt(i128, &expiry_bytes, expiry, .little);
mac.update(&expiry_bytes);
var tag: [HmacSha256.mac_length]u8 = undefined;
mac.final(&tag);
return SessionToken{
.identity = id_buf,
.identity_len = @intCast(copy_len),
.expiry_ns = expiry,
.tag = tag,
};
}
/// Verify a session token.
pub fn verifyToken(token: SessionToken, server_secret: [32]u8) SessionError![]const u8 {
// Check expiry
const now = std.time.nanoTimestamp();
if (now > token.expiry_ns) {
return SessionError.SessionExpired;
}
// Recompute HMAC
var mac = HmacSha256.init(&server_secret);
mac.update(&token.identity);
var expiry_bytes: [16]u8 = undefined;
std.mem.writeInt(i128, &expiry_bytes, token.expiry_ns, .little);
mac.update(&expiry_bytes);
var expected: [HmacSha256.mac_length]u8 = undefined;
mac.final(&expected);
// Constant-time comparison to prevent timing attacks
if (!timingSafeEqual(&expected, &token.tag)) {
return SessionError.InvalidSession;
}
return token.identity[0..token.identity_len];
}
/// Constant-time byte comparison to prevent timing side-channel attacks.
fn timingSafeEqual(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
var diff: u8 = 0;
for (a, b) |x, y| {
diff |= x ^ y;
}
return diff == 0;
}
// Tests
test "session: issue and verify" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
const token = issueToken("abcdef1234", secret);
const identity = try verifyToken(token, secret);
try std.testing.expectEqualSlices(u8, "abcdef1234", identity);
}
test "session: wrong secret fails" {
var secret1: [32]u8 = undefined;
var secret2: [32]u8 = undefined;
std.crypto.random.bytes(&secret1);
std.crypto.random.bytes(&secret2);
const token = issueToken("alice", secret1);
try std.testing.expectError(SessionError.InvalidSession, verifyToken(token, secret2));
}
test "session: tampered identity fails" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
var token = issueToken("alice", secret);
token.identity[0] = 'X'; // tamper
try std.testing.expectError(SessionError.InvalidSession, verifyToken(token, secret));
}
test "session: tampered expiry fails" {
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
var token = issueToken("alice", secret);
token.expiry_ns += 1; // tamper
try std.testing.expectError(SessionError.InvalidSession, verifyToken(token, secret));
}

216
src/config.zig Normal file
View File

@ -0,0 +1,216 @@
/// Configuration loading for vault-guardian.
/// Reads a simple key=value config file. Lines starting with '#' are comments.
const std = @import("std");
pub const Config = struct {
/// Address to bind client-facing server
listen_address: []const u8 = "0.0.0.0",
/// Client-facing port (TLS in production, plain TCP for MVP)
client_port: u16 = 7500,
/// Guardian-to-guardian port (WireGuard-only interface)
peer_port: u16 = 7501,
/// Data storage directory
data_dir: []const u8 = "/opt/orama/.orama/data/vault",
/// RQLite endpoint for node discovery
rqlite_url: []const u8 = "http://127.0.0.1:4001",
/// Arena allocator that owns duped strings (null when using defaults)
_arena: ?std.heap.ArenaAllocator = null,
/// Free all allocated memory.
pub fn deinit(self: *Config) void {
if (self._arena) |*arena| {
arena.deinit();
}
self.* = undefined;
}
};
pub const ConfigError = error{
InvalidPort,
LineTooLong,
};
/// Loads config from file, or returns defaults if file doesn't exist.
pub fn loadOrDefault(allocator: std.mem.Allocator, path: []const u8) !Config {
// Read whole file into memory (config files are small)
const contents = std.fs.cwd().readFileAlloc(allocator, path, 64 * 1024) catch |err| {
if (err == error.FileNotFound) {
return Config{};
}
return err;
};
defer allocator.free(contents);
var cfg = Config{};
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
const arena_alloc = arena.allocator();
var line_iter = std.mem.splitScalar(u8, contents, '\n');
while (line_iter.next()) |raw_line| {
// Strip trailing \r for Windows line endings
const line_trimmed = if (raw_line.len > 0 and raw_line[raw_line.len - 1] == '\r')
raw_line[0 .. raw_line.len - 1]
else
raw_line;
const line = std.mem.trim(u8, line_trimmed, " \t");
// Skip empty lines and comments
if (line.len == 0) continue;
if (line[0] == '#') continue;
// Find '='
const eq_idx = std.mem.indexOfScalar(u8, line, '=') orelse continue;
const key = std.mem.trim(u8, line[0..eq_idx], " \t");
const value = std.mem.trim(u8, line[eq_idx + 1 ..], " \t");
if (std.mem.eql(u8, key, "listen_address")) {
cfg.listen_address = try arena_alloc.dupe(u8, value);
} else if (std.mem.eql(u8, key, "client_port")) {
cfg.client_port = std.fmt.parseInt(u16, value, 10) catch return ConfigError.InvalidPort;
} else if (std.mem.eql(u8, key, "peer_port")) {
cfg.peer_port = std.fmt.parseInt(u16, value, 10) catch return ConfigError.InvalidPort;
} else if (std.mem.eql(u8, key, "data_dir")) {
cfg.data_dir = try arena_alloc.dupe(u8, value);
} else if (std.mem.eql(u8, key, "rqlite_url")) {
cfg.rqlite_url = try arena_alloc.dupe(u8, value);
}
// Unknown keys are silently ignored
}
cfg._arena = arena;
return cfg;
}
// Tests
test "config: defaults when file not found" {
var cfg = try loadOrDefault(std.testing.allocator, "/tmp/nonexistent-vault-config-file-xyz");
defer cfg.deinit();
try std.testing.expectEqualSlices(u8, "0.0.0.0", cfg.listen_address);
try std.testing.expectEqual(@as(u16, 7500), cfg.client_port);
try std.testing.expectEqual(@as(u16, 7501), cfg.peer_port);
try std.testing.expectEqualSlices(u8, "/opt/orama/.orama/data/vault", cfg.data_dir);
try std.testing.expectEqualSlices(u8, "http://127.0.0.1:4001", cfg.rqlite_url);
}
test "config: parse key=value file" {
// Write a temp config file
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("test.conf", .{});
try file.writeAll(
\\# This is a comment
\\listen_address = 127.0.0.1
\\client_port = 8080
\\peer_port = 8081
\\data_dir = /tmp/vault-test
\\rqlite_url = http://10.0.0.1:4001
\\
\\# Another comment
\\unknown_key = ignored
\\
);
file.close();
// Get the full path
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
const dir_path = try tmp_dir.dir.realpath(".", &path_buf);
var full_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const full_path = try std.fmt.bufPrint(&full_path_buf, "{s}/test.conf", .{dir_path});
var cfg = try loadOrDefault(std.testing.allocator, full_path);
defer cfg.deinit();
try std.testing.expectEqualSlices(u8, "127.0.0.1", cfg.listen_address);
try std.testing.expectEqual(@as(u16, 8080), cfg.client_port);
try std.testing.expectEqual(@as(u16, 8081), cfg.peer_port);
try std.testing.expectEqualSlices(u8, "/tmp/vault-test", cfg.data_dir);
try std.testing.expectEqualSlices(u8, "http://10.0.0.1:4001", cfg.rqlite_url);
}
test "config: partial config uses defaults for missing keys" {
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("partial.conf", .{});
try file.writeAll("client_port = 9000\n");
file.close();
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
const dir_path = try tmp_dir.dir.realpath(".", &path_buf);
var full_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const full_path = try std.fmt.bufPrint(&full_path_buf, "{s}/partial.conf", .{dir_path});
var cfg = try loadOrDefault(std.testing.allocator, full_path);
defer cfg.deinit();
try std.testing.expectEqual(@as(u16, 9000), cfg.client_port);
// Defaults for everything else
try std.testing.expectEqualSlices(u8, "0.0.0.0", cfg.listen_address);
try std.testing.expectEqual(@as(u16, 7501), cfg.peer_port);
}
test "config: invalid port returns error" {
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("bad.conf", .{});
try file.writeAll("client_port = not_a_number\n");
file.close();
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
const dir_path = try tmp_dir.dir.realpath(".", &path_buf);
var full_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const full_path = try std.fmt.bufPrint(&full_path_buf, "{s}/bad.conf", .{dir_path});
const result = loadOrDefault(std.testing.allocator, full_path);
try std.testing.expectError(ConfigError.InvalidPort, result);
}
test "config: empty file returns defaults" {
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("empty.conf", .{});
file.close();
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
const dir_path = try tmp_dir.dir.realpath(".", &path_buf);
var full_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const full_path = try std.fmt.bufPrint(&full_path_buf, "{s}/empty.conf", .{dir_path});
var cfg = try loadOrDefault(std.testing.allocator, full_path);
defer cfg.deinit();
try std.testing.expectEqual(@as(u16, 7500), cfg.client_port);
}
test "config: comments and blank lines are skipped" {
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const file = try tmp_dir.dir.createFile("comments.conf", .{});
try file.writeAll(
\\# Full line comment
\\
\\ # Indented comment
\\client_port = 1234
\\
);
file.close();
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
const dir_path = try tmp_dir.dir.realpath(".", &path_buf);
var full_path_buf: [std.fs.max_path_bytes]u8 = undefined;
const full_path = try std.fmt.bufPrint(&full_path_buf, "{s}/comments.conf", .{dir_path});
var cfg = try loadOrDefault(std.testing.allocator, full_path);
defer cfg.deinit();
try std.testing.expectEqual(@as(u16, 1234), cfg.client_port);
}

146
src/crypto/aes.zig Normal file
View File

@ -0,0 +1,146 @@
/// AES-256-GCM encryption/decryption.
///
/// Wraps Zig's std.crypto.aead.aes_gcm for a clean API.
/// 12-byte random nonce, 16-byte auth tag appended to ciphertext.
const std = @import("std");
const Aes256Gcm = std.crypto.aead.aes_gcm.Aes256Gcm;
pub const KEY_SIZE = 32;
pub const NONCE_SIZE = 12;
pub const TAG_SIZE = 16;
pub const EncryptedData = struct {
/// Ciphertext (same length as plaintext)
ciphertext: []const u8,
/// 12-byte nonce
nonce: [NONCE_SIZE]u8,
/// 16-byte authentication tag
tag: [TAG_SIZE]u8,
};
/// Encrypts plaintext with AES-256-GCM.
/// Returns EncryptedData with ciphertext, nonce, and tag.
/// Caller must free ciphertext.
pub fn encrypt(
allocator: std.mem.Allocator,
plaintext: []const u8,
key: [KEY_SIZE]u8,
) !EncryptedData {
var nonce: [NONCE_SIZE]u8 = undefined;
std.crypto.random.bytes(&nonce);
const ciphertext = try allocator.alloc(u8, plaintext.len);
errdefer allocator.free(ciphertext);
var tag: [TAG_SIZE]u8 = undefined;
Aes256Gcm.encrypt(ciphertext, &tag, plaintext, &.{}, nonce, key);
return EncryptedData{
.ciphertext = ciphertext,
.nonce = nonce,
.tag = tag,
};
}
/// Decrypts AES-256-GCM ciphertext.
/// Returns plaintext. Caller must free and zero.
pub fn decrypt(
allocator: std.mem.Allocator,
data: EncryptedData,
key: [KEY_SIZE]u8,
) ![]u8 {
const plaintext = try allocator.alloc(u8, data.ciphertext.len);
errdefer {
@memset(plaintext, 0);
allocator.free(plaintext);
}
Aes256Gcm.decrypt(plaintext, data.ciphertext, data.tag, &.{}, data.nonce, key) catch {
return error.AuthenticationFailed;
};
return plaintext;
}
/// Generates a random 32-byte AES-256 key.
pub fn generateKey() [KEY_SIZE]u8 {
var key: [KEY_SIZE]u8 = undefined;
std.crypto.random.bytes(&key);
return key;
}
// Tests
test "encrypt/decrypt round-trip" {
const allocator = std.testing.allocator;
const key = generateKey();
const plaintext = "Hello, vault!";
const encrypted = try encrypt(allocator, plaintext, key);
defer allocator.free(@constCast(encrypted.ciphertext));
const decrypted = try decrypt(allocator, encrypted, key);
defer {
@memset(decrypted, 0);
allocator.free(decrypted);
}
try std.testing.expectEqualSlices(u8, plaintext, decrypted);
}
test "wrong key fails to decrypt" {
const allocator = std.testing.allocator;
const key = generateKey();
const wrong_key = generateKey();
const plaintext = "secret data";
const encrypted = try encrypt(allocator, plaintext, key);
defer allocator.free(@constCast(encrypted.ciphertext));
try std.testing.expectError(error.AuthenticationFailed, decrypt(allocator, encrypted, wrong_key));
}
test "tampered ciphertext fails to decrypt" {
const allocator = std.testing.allocator;
const key = generateKey();
const plaintext = "secret data";
const encrypted = try encrypt(allocator, plaintext, key);
defer allocator.free(@constCast(encrypted.ciphertext));
// Tamper with ciphertext
const tampered = encrypted;
const ct_mut: []u8 = @constCast(tampered.ciphertext);
ct_mut[0] ^= 0xFF;
try std.testing.expectError(error.AuthenticationFailed, decrypt(allocator, tampered, key));
}
test "different nonces produce different ciphertexts" {
const allocator = std.testing.allocator;
const key = generateKey();
const plaintext = "same plaintext";
const enc1 = try encrypt(allocator, plaintext, key);
defer allocator.free(@constCast(enc1.ciphertext));
const enc2 = try encrypt(allocator, plaintext, key);
defer allocator.free(@constCast(enc2.ciphertext));
// Nonces should differ (random)
try std.testing.expect(!std.mem.eql(u8, &enc1.nonce, &enc2.nonce));
// Ciphertexts should differ
try std.testing.expect(!std.mem.eql(u8, enc1.ciphertext, enc2.ciphertext));
}
test "empty plaintext" {
const allocator = std.testing.allocator;
const key = generateKey();
const encrypted = try encrypt(allocator, &.{}, key);
defer allocator.free(@constCast(encrypted.ciphertext));
const decrypted = try decrypt(allocator, encrypted, key);
defer allocator.free(decrypted);
try std.testing.expectEqual(@as(usize, 0), decrypted.len);
}

84
src/crypto/hkdf.zig Normal file
View File

@ -0,0 +1,84 @@
/// HKDF-SHA256 key derivation.
///
/// Used for domain-separated key derivation from seed material.
/// Wraps std.crypto.kdf.hkdf.
const std = @import("std");
const Hkdf = std.crypto.kdf.hkdf.HkdfSha256;
/// Derives a key using HKDF-SHA256.
///
/// - ikm: Input keying material (e.g., seed)
/// - salt: Domain separation string
/// - info: Context/application-specific info
/// - out: Output buffer (caller decides length)
pub fn deriveKey(
out: []u8,
ikm: []const u8,
salt: []const u8,
info: []const u8,
) void {
const prk = Hkdf.extract(salt, ikm);
Hkdf.expand(out, info, prk);
}
// Tests
test "deriveKey: deterministic output" {
var key1: [32]u8 = undefined;
var key2: [32]u8 = undefined;
deriveKey(&key1, "seed", "salt", "info");
deriveKey(&key2, "seed", "salt", "info");
try std.testing.expectEqualSlices(u8, &key1, &key2);
}
test "deriveKey: different salt produces different key" {
var key1: [32]u8 = undefined;
var key2: [32]u8 = undefined;
deriveKey(&key1, "seed", "salt1", "info");
deriveKey(&key2, "seed", "salt2", "info");
try std.testing.expect(!std.mem.eql(u8, &key1, &key2));
}
test "deriveKey: different info produces different key" {
var key1: [32]u8 = undefined;
var key2: [32]u8 = undefined;
deriveKey(&key1, "seed", "salt", "info1");
deriveKey(&key2, "seed", "salt", "info2");
try std.testing.expect(!std.mem.eql(u8, &key1, &key2));
}
test "deriveKey: different ikm produces different key" {
var key1: [32]u8 = undefined;
var key2: [32]u8 = undefined;
deriveKey(&key1, "seed1", "salt", "info");
deriveKey(&key2, "seed2", "salt", "info");
try std.testing.expect(!std.mem.eql(u8, &key1, &key2));
}
test "deriveKey: variable output length" {
var short: [16]u8 = undefined;
var long: [64]u8 = undefined;
deriveKey(&short, "seed", "salt", "info");
deriveKey(&long, "seed", "salt", "info");
// First 16 bytes should match
try std.testing.expectEqualSlices(u8, &short, long[0..16]);
}
test "deriveKey: matches TypeScript HKDF output for rootwallet-sync-v1" {
// This is a cross-platform test vector.
// The TypeScript side uses the same HKDF-SHA256 with same inputs.
// We verify that the output is non-zero and deterministic.
var key: [32]u8 = undefined;
const seed = [_]u8{0} ** 64; // dummy seed
deriveKey(&key, &seed, "rootwallet-sync-v1", "");
// Should not be all zeros
var all_zero = true;
for (key) |b| {
if (b != 0) {
all_zero = false;
break;
}
}
try std.testing.expect(!all_zero);
}

76
src/crypto/hmac.zig Normal file
View File

@ -0,0 +1,76 @@
/// HMAC-SHA256 for integrity verification.
///
/// Used for file integrity checks on stored shares.
const std = @import("std");
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
pub const MAC_SIZE = 32;
/// Computes HMAC-SHA256 of data with the given key.
pub fn compute(key: []const u8, data: []const u8) [MAC_SIZE]u8 {
var mac: [MAC_SIZE]u8 = undefined;
HmacSha256.create(&mac, data, key);
return mac;
}
/// Verifies an HMAC-SHA256 tag in constant time.
pub fn verify(key: []const u8, data: []const u8, expected: [MAC_SIZE]u8) bool {
const computed = compute(key, data);
return constantTimeEqual(&computed, &expected);
}
/// Constant-time comparison to prevent timing attacks.
fn constantTimeEqual(a: []const u8, b: []const u8) bool {
if (a.len != b.len) return false;
var diff: u8 = 0;
for (a, b) |x, y| {
diff |= x ^ y;
}
return diff == 0;
}
// Tests
test "compute: deterministic output" {
const key = "test-key";
const data = "hello world";
const mac1 = compute(key, data);
const mac2 = compute(key, data);
try std.testing.expectEqualSlices(u8, &mac1, &mac2);
}
test "compute: different keys produce different MACs" {
const data = "hello world";
const mac1 = compute("key1", data);
const mac2 = compute("key2", data);
try std.testing.expect(!std.mem.eql(u8, &mac1, &mac2));
}
test "compute: different data produces different MACs" {
const key = "test-key";
const mac1 = compute(key, "data1");
const mac2 = compute(key, "data2");
try std.testing.expect(!std.mem.eql(u8, &mac1, &mac2));
}
test "verify: correct MAC passes" {
const key = "test-key";
const data = "hello world";
const mac = compute(key, data);
try std.testing.expect(verify(key, data, mac));
}
test "verify: wrong MAC fails" {
const key = "test-key";
const data = "hello world";
var mac = compute(key, data);
mac[0] ^= 0xFF;
try std.testing.expect(!verify(key, data, mac));
}
test "verify: wrong key fails" {
const key = "test-key";
const data = "hello world";
const mac = compute(key, data);
try std.testing.expect(!verify("wrong-key", data, mac));
}

172
src/crypto/hybrid.zig Normal file
View File

@ -0,0 +1,172 @@
/// Hybrid Key Exchange X25519 + ML-KEM-768.
///
/// Combines classical X25519 ECDH with post-quantum ML-KEM-768
/// to provide security against both classical and quantum adversaries.
///
/// The shared secret is: HKDF-SHA256(X25519_SS || ML-KEM_SS, "orama-hybrid-v1")
///
/// This ensures that:
/// - If X25519 is broken (quantum), ML-KEM still protects the key
/// - If ML-KEM is broken (unknown attack), X25519 still protects the key
/// - Both must be broken simultaneously to compromise the shared secret
const std = @import("std");
const pq_kem = @import("pq_kem.zig");
const hkdf = @import("hkdf.zig");
const secure_mem = @import("secure_mem.zig");
const X25519 = std.crypto.dh.X25519;
pub const SHARED_SECRET_SIZE: usize = 32;
pub const X25519_PK_SIZE: usize = 32;
pub const X25519_SK_SIZE: usize = 32;
/// Combined hybrid public key (X25519 + ML-KEM-768).
pub const HybridPublicKey = struct {
x25519: [X25519_PK_SIZE]u8,
ml_kem: [pq_kem.PK_SIZE]u8,
};
/// Combined hybrid secret key (X25519 + ML-KEM-768).
pub const HybridSecretKey = struct {
x25519: [X25519_SK_SIZE]u8,
ml_kem: [pq_kem.SK_SIZE]u8,
pub fn deinit(self: *HybridSecretKey) void {
secure_mem.secureZero(&self.x25519);
secure_mem.secureZero(&self.ml_kem);
}
};
/// Combined hybrid keypair.
pub const HybridKeypair = struct {
public_key: HybridPublicKey,
secret_key: HybridSecretKey,
pub fn deinit(self: *HybridKeypair) void {
self.secret_key.deinit();
}
};
/// Hybrid encapsulation result sent to the responder.
pub const HybridEncapsResult = struct {
x25519_ephemeral_pk: [X25519_PK_SIZE]u8,
ml_kem_ciphertext: [pq_kem.CT_SIZE]u8,
shared_secret: [SHARED_SECRET_SIZE]u8,
pub fn deinit(self: *HybridEncapsResult) void {
secure_mem.secureZero(&self.shared_secret);
}
};
pub const HybridError = error{
X25519Failed,
MLKEMFailed,
HKDFFailed,
};
const HYBRID_INFO = "orama-hybrid-v1";
const HYBRID_SALT = [_]u8{0} ** 32; // fixed salt for domain separation
/// Generate a hybrid keypair (X25519 + ML-KEM-768).
pub fn keygen() HybridError!HybridKeypair {
// X25519 keypair
var x25519_sk: [X25519_SK_SIZE]u8 = undefined;
std.crypto.random.bytes(&x25519_sk);
const x25519_pk = X25519.recoverPublicKey(x25519_sk) catch return HybridError.X25519Failed;
// ML-KEM-768 keypair
const ml_kem_kp = pq_kem.keygen() catch return HybridError.MLKEMFailed;
return HybridKeypair{
.public_key = HybridPublicKey{
.x25519 = x25519_pk,
.ml_kem = ml_kem_kp.public_key,
},
.secret_key = HybridSecretKey{
.x25519 = x25519_sk,
.ml_kem = ml_kem_kp.secret_key,
},
};
}
/// Initiator side: encapsulate to a responder's hybrid public key.
/// Returns the shared secret and the data to send to the responder.
pub fn encapsulate(responder_pk: HybridPublicKey) HybridError!HybridEncapsResult {
// X25519: generate ephemeral keypair and compute DH
var eph_sk: [X25519_SK_SIZE]u8 = undefined;
std.crypto.random.bytes(&eph_sk);
const eph_pk = X25519.recoverPublicKey(eph_sk) catch return HybridError.X25519Failed;
const x25519_ss = X25519.scalarmult(eph_sk, responder_pk.x25519) catch return HybridError.X25519Failed;
secure_mem.secureZero(&eph_sk);
// ML-KEM-768: encapsulate to responder's PQ public key
const ml_kem_result = pq_kem.encaps(responder_pk.ml_kem) catch return HybridError.MLKEMFailed;
// Combine: HKDF(X25519_SS || ML-KEM_SS)
var combined: [64]u8 = undefined;
@memcpy(combined[0..32], &x25519_ss);
@memcpy(combined[32..64], &ml_kem_result.shared_secret);
var shared_secret: [SHARED_SECRET_SIZE]u8 = undefined;
hkdf.deriveKey(&shared_secret, &combined, &HYBRID_SALT, HYBRID_INFO);
secure_mem.secureZero(&combined);
return HybridEncapsResult{
.x25519_ephemeral_pk = eph_pk,
.ml_kem_ciphertext = ml_kem_result.ciphertext,
.shared_secret = shared_secret,
};
}
/// Responder side: decapsulate using own secret key.
/// Returns the same shared secret that the initiator computed.
pub fn decapsulate(
x25519_ephemeral_pk: [X25519_PK_SIZE]u8,
ml_kem_ciphertext: [pq_kem.CT_SIZE]u8,
own_sk: HybridSecretKey,
) HybridError![SHARED_SECRET_SIZE]u8 {
// X25519: compute DH with initiator's ephemeral public key
const x25519_ss = X25519.scalarmult(own_sk.x25519, x25519_ephemeral_pk) catch return HybridError.X25519Failed;
// ML-KEM-768: decapsulate
const ml_kem_ss = pq_kem.decaps(ml_kem_ciphertext, own_sk.ml_kem) catch return HybridError.MLKEMFailed;
// Combine: same HKDF as encapsulate
var combined: [64]u8 = undefined;
@memcpy(combined[0..32], &x25519_ss);
@memcpy(combined[32..64], &ml_kem_ss);
var shared_secret: [SHARED_SECRET_SIZE]u8 = undefined;
hkdf.deriveKey(&shared_secret, &combined, &HYBRID_SALT, HYBRID_INFO);
secure_mem.secureZero(&combined);
return shared_secret;
}
// Tests
test "hybrid: keygen produces valid keypair" {
var kp = try keygen();
defer kp.deinit();
// X25519 public key should be 32 bytes
try std.testing.expectEqual(@as(usize, 32), kp.public_key.x25519.len);
// ML-KEM public key should be 1184 bytes
try std.testing.expectEqual(@as(usize, pq_kem.PK_SIZE), kp.public_key.ml_kem.len);
}
test "hybrid: encapsulate produces valid result" {
var kp = try keygen();
defer kp.deinit();
var result = try encapsulate(kp.public_key);
defer result.deinit();
try std.testing.expectEqual(@as(usize, 32), result.x25519_ephemeral_pk.len);
try std.testing.expectEqual(@as(usize, pq_kem.CT_SIZE), result.ml_kem_ciphertext.len);
try std.testing.expectEqual(@as(usize, 32), result.shared_secret.len);
}
test "hybrid: shared secret size is 32 bytes" {
try std.testing.expectEqual(@as(usize, 32), SHARED_SECRET_SIZE);
}

150
src/crypto/pq_kem.zig Normal file
View File

@ -0,0 +1,150 @@
/// Post-Quantum Key Encapsulation Mechanism ML-KEM-768 (FIPS 203).
///
///
/// WARNING: THIS IS A STUB IMPLEMENTATION PROVIDES ZERO SECURITY
///
/// All functions generate random bytes instead of performing real
/// ML-KEM-768 operations. This exists solely for interface testing
/// and development. DO NOT use in production without replacing with
/// a real liboqs-backed implementation.
///
///
/// Provides a Zig-native interface for ML-KEM-768 (formerly Kyber-768).
/// Uses liboqs via @cImport when available, falls back to stub for testing.
///
/// ML-KEM-768 provides ~192-bit post-quantum security level.
///
/// Key sizes (ML-KEM-768):
/// Public key: 1184 bytes
/// Secret key: 2400 bytes
/// Ciphertext: 1088 bytes
/// Shared secret: 32 bytes
const std = @import("std");
const secure_mem = @import("secure_mem.zig");
const log = std.log.scoped(.pq_kem);
var stub_warned: bool = false;
pub const PK_SIZE: usize = 1184;
pub const SK_SIZE: usize = 2400;
pub const CT_SIZE: usize = 1088;
pub const SS_SIZE: usize = 32;
pub const Keypair = struct {
public_key: [PK_SIZE]u8,
secret_key: [SK_SIZE]u8,
pub fn deinit(self: *Keypair) void {
secure_mem.secureZero(&self.secret_key);
}
};
pub const EncapsulationResult = struct {
ciphertext: [CT_SIZE]u8,
shared_secret: [SS_SIZE]u8,
pub fn deinit(self: *EncapsulationResult) void {
secure_mem.secureZero(&self.shared_secret);
}
};
pub const KEMError = error{
KeygenFailed,
EncapsFailed,
DecapsFailed,
};
/// Log a one-time warning that stub PQ-KEM is in use.
fn warnStub() void {
if (!stub_warned) {
stub_warned = true;
log.warn("pq_kem: STUB implementation — uses HMAC-based KEM, NOT real post-quantum security. Install liboqs for ML-KEM-768.", .{});
}
}
/// Generate an ML-KEM-768 keypair.
/// STUB: Returns random bytes. NOT real key generation.
pub fn keygen() KEMError!Keypair {
warnStub();
var kp = Keypair{
.public_key = undefined,
.secret_key = undefined,
};
// STUB: generate random keys for interface testing.
// TODO(security): Replace with liboqs OQS_KEM_ml_kem_768_keypair().
std.crypto.random.bytes(&kp.public_key);
std.crypto.random.bytes(&kp.secret_key);
return kp;
}
/// Encapsulate: generate shared secret + ciphertext from a public key.
/// STUB: Uses HKDF-based key agreement. NOT real post-quantum KEM.
pub fn encaps(public_key: [PK_SIZE]u8) KEMError!EncapsulationResult {
warnStub();
var result = EncapsulationResult{
.ciphertext = undefined,
.shared_secret = undefined,
};
// STUB: Generate random ephemeral, derive shared secret from pub key + ephemeral.
// This provides classical security (HKDF) but NOT post-quantum security.
// TODO(security): Replace with liboqs OQS_KEM_ml_kem_768_encaps().
std.crypto.random.bytes(&result.ciphertext);
// Derive shared secret from public key + ciphertext via HMAC
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
var mac = HmacSha256.init(public_key[0..32]);
mac.update(&result.ciphertext);
mac.final(&result.shared_secret);
return result;
}
/// Decapsulate: recover shared secret from ciphertext + secret key.
/// STUB: Uses HKDF-based key agreement. NOT real post-quantum KEM.
pub fn decaps(ciphertext: [CT_SIZE]u8, secret_key: [SK_SIZE]u8) KEMError![SS_SIZE]u8 {
warnStub();
// STUB: Derive shared secret from the first 32 bytes of secret key (which should
// match the public key's first 32 bytes for a real KEM). Since our stub keygen
// generates random independent keys, encaps and decaps will NOT produce the same
// shared secret. This is intentional for the stub it preserves the interface
// but doesn't provide real KEM functionality.
// TODO(security): Replace with liboqs OQS_KEM_ml_kem_768_decaps().
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
var mac = HmacSha256.init(secret_key[0..32]);
mac.update(&ciphertext);
var ss: [SS_SIZE]u8 = undefined;
mac.final(&ss);
return ss;
}
// Tests
test "pq_kem: keygen produces valid-sized keys" {
var kp = try keygen();
defer kp.deinit();
try std.testing.expectEqual(@as(usize, PK_SIZE), kp.public_key.len);
try std.testing.expectEqual(@as(usize, SK_SIZE), kp.secret_key.len);
}
test "pq_kem: encaps produces valid-sized output" {
var kp = try keygen();
defer kp.deinit();
var result = try encaps(kp.public_key);
defer result.deinit();
try std.testing.expectEqual(@as(usize, CT_SIZE), result.ciphertext.len);
try std.testing.expectEqual(@as(usize, SS_SIZE), result.shared_secret.len);
}
test "pq_kem: key sizes match ML-KEM-768 spec" {
try std.testing.expectEqual(@as(usize, 1184), PK_SIZE);
try std.testing.expectEqual(@as(usize, 2400), SK_SIZE);
try std.testing.expectEqual(@as(usize, 1088), CT_SIZE);
try std.testing.expectEqual(@as(usize, 32), SS_SIZE);
}

184
src/crypto/pq_sig.zig Normal file
View File

@ -0,0 +1,184 @@
/// Post-Quantum Digital Signature ML-DSA-65 (FIPS 204).
///
///
/// WARNING: THIS IS A STUB IMPLEMENTATION PROVIDES ZERO SECURITY
///
/// keygen() returns random bytes, sign() returns a SHA-256 hash,
/// verify() ALWAYS SUCCEEDS. This exists solely for interface
/// testing and development. DO NOT use in production without
/// replacing with a real liboqs-backed implementation.
///
///
/// Provides a Zig-native interface for ML-DSA-65 (formerly Dilithium3).
/// Uses liboqs via @cImport when available, falls back to stub for testing.
///
/// ML-DSA-65 provides ~192-bit post-quantum security level.
///
/// Key sizes (ML-DSA-65):
/// Public key: 1952 bytes
/// Secret key: 4032 bytes
/// Signature: 3309 bytes (max)
const std = @import("std");
const secure_mem = @import("secure_mem.zig");
const log = std.log.scoped(.pq_sig);
var stub_warned: bool = false;
pub const PK_SIZE: usize = 1952;
pub const SK_SIZE: usize = 4032;
pub const SIG_MAX_SIZE: usize = 3309;
pub const SignKeypair = struct {
public_key: [PK_SIZE]u8,
secret_key: [SK_SIZE]u8,
pub fn deinit(self: *SignKeypair) void {
secure_mem.secureZero(&self.secret_key);
}
};
pub const Signature = struct {
data: [SIG_MAX_SIZE]u8,
len: usize,
};
pub const SigError = error{
KeygenFailed,
SignFailed,
VerifyFailed,
InvalidSignature,
};
/// Log a one-time warning that stub PQ-DSA is in use.
fn warnStub() void {
if (!stub_warned) {
stub_warned = true;
log.warn("pq_sig: STUB implementation — uses HMAC-based signatures, NOT real post-quantum security. Install liboqs for ML-DSA-65.", .{});
}
}
/// Generate an ML-DSA-65 signing keypair.
/// STUB: Returns random bytes. NOT real key generation.
pub fn keygen() SigError!SignKeypair {
warnStub();
var kp = SignKeypair{
.public_key = undefined,
.secret_key = undefined,
};
// STUB: generate random keys for interface testing.
// TODO(security): Replace with liboqs OQS_SIG_ml_dsa_65_keypair().
std.crypto.random.bytes(&kp.public_key);
std.crypto.random.bytes(&kp.secret_key);
return kp;
}
/// Sign a message with ML-DSA-65.
/// STUB: Returns SHA-256 hash as placeholder. NOT a real signature.
pub fn sign(message: []const u8, secret_key: [SK_SIZE]u8) SigError!Signature {
_ = secret_key;
warnStub();
var sig = Signature{
.data = undefined,
.len = SIG_MAX_SIZE,
};
// STUB: SHA-256 of message as placeholder signature data.
// TODO(security): Replace with liboqs OQS_SIG_ml_dsa_65_sign().
const Sha256 = std.crypto.hash.sha2.Sha256;
var h = Sha256.init(.{});
h.update(message);
const tag = h.finalResult();
@memset(&sig.data, 0);
@memcpy(sig.data[0..32], &tag);
sig.len = SIG_MAX_SIZE;
return sig;
}
/// Verify an ML-DSA-65 signature.
/// STUB: Performs HMAC-based verification as a placeholder.
/// This is NOT real post-quantum signature verification, but it at least
/// verifies that sign() produced the signature (fail-closed rather than accepting everything).
pub fn verify(message: []const u8, signature: Signature, public_key: [PK_SIZE]u8) SigError!void {
_ = public_key;
warnStub();
// STUB: Verify that the signature contains SHA-256(message) in the first 32 bytes.
// This is NOT real PQ verification it just checks consistency with the stub sign().
// TODO(security): Replace with liboqs OQS_SIG_ml_dsa_65_verify().
const Sha256 = std.crypto.hash.sha2.Sha256;
var h = Sha256.init(.{});
h.update(message);
const expected = h.finalResult();
if (signature.len < 32) return SigError.InvalidSignature;
var diff: u8 = 0;
for (expected, signature.data[0..32]) |a, b| {
diff |= a ^ b;
}
if (diff != 0) return SigError.VerifyFailed;
}
// Tests
test "pq_sig: keygen produces valid-sized keys" {
var kp = try keygen();
defer kp.deinit();
try std.testing.expectEqual(@as(usize, PK_SIZE), kp.public_key.len);
try std.testing.expectEqual(@as(usize, SK_SIZE), kp.secret_key.len);
}
test "pq_sig: sign produces valid-sized signature" {
var kp = try keygen();
defer kp.deinit();
const message = "hello quantum world";
const sig = try sign(message, kp.secret_key);
try std.testing.expect(sig.len > 0);
try std.testing.expect(sig.len <= SIG_MAX_SIZE);
}
test "pq_sig: verify accepts valid signature" {
var kp = try keygen();
defer kp.deinit();
const message = "hello quantum world";
const sig = try sign(message, kp.secret_key);
// Stub verify checks that sign produced a consistent signature
try verify(message, sig, kp.public_key);
}
test "pq_sig: verify rejects wrong message" {
var kp = try keygen();
defer kp.deinit();
const sig = try sign("original message", kp.secret_key);
// Verifying against a different message should fail
try std.testing.expectError(SigError.VerifyFailed, verify("tampered message", sig, kp.public_key));
}
test "pq_sig: verify rejects tampered signature" {
var kp = try keygen();
defer kp.deinit();
const message = "hello quantum world";
var sig = try sign(message, kp.secret_key);
sig.data[0] ^= 0xFF; // tamper
try std.testing.expectError(SigError.VerifyFailed, verify(message, sig, kp.public_key));
}
test "pq_sig: key sizes match ML-DSA-65 spec" {
try std.testing.expectEqual(@as(usize, 1952), PK_SIZE);
try std.testing.expectEqual(@as(usize, 4032), SK_SIZE);
try std.testing.expectEqual(@as(usize, 3309), SIG_MAX_SIZE);
}

104
src/crypto/secure_mem.zig Normal file
View File

@ -0,0 +1,104 @@
/// Secure memory utilities.
///
/// Provides:
/// - secureZero: volatile zero-fill that can't be optimized away
/// - mlock/munlock: prevent memory from being swapped to disk
/// - SecureBuffer: RAII wrapper that zeros and unlocks on deinit
const std = @import("std");
const builtin = @import("builtin");
/// Securely zeros a memory buffer using a volatile write.
/// This prevents the compiler from optimizing away the zero-fill,
/// which is critical for erasing keys and secrets from memory.
pub fn secureZero(buf: []u8) void {
// Use volatile semantics via std.crypto to prevent optimization
std.crypto.secureZero(u8, @as([]volatile u8, @volatileCast(buf)));
}
/// Locks memory pages so they won't be swapped to disk.
/// Non-fatal: logs warning on failure but doesn't error.
pub fn mlock(ptr: [*]const u8, len: usize) void {
if (builtin.os.tag == .linux) {
const result = std.posix.mlock(ptr[0..len]);
if (result) |_| {} else |_| {
// mlock failure is non-fatal might need CAP_IPC_LOCK or higher RLIMIT_MEMLOCK
}
}
// macOS: mlock exists but not worth the complexity for development
}
/// Unlocks previously locked memory pages.
pub fn munlock(ptr: [*]const u8, len: usize) void {
if (builtin.os.tag == .linux) {
std.posix.munlock(ptr[0..len]) catch {};
}
}
/// A buffer that is automatically zeroed and munlocked on deinit.
/// Use for key material and other secrets.
pub const SecureBuffer = struct {
data: []u8,
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator, len: usize) !SecureBuffer {
const data = try allocator.alloc(u8, len);
mlock(data.ptr, data.len);
return SecureBuffer{
.data = data,
.allocator = allocator,
};
}
pub fn deinit(self: *SecureBuffer) void {
secureZero(self.data);
munlock(self.data.ptr, self.data.len);
self.allocator.free(self.data);
self.data = &.{};
}
/// Creates a SecureBuffer from existing data (copies and mlocks).
pub fn fromSlice(allocator: std.mem.Allocator, src: []const u8) !SecureBuffer {
const buf = try init(allocator, src.len);
@memcpy(buf.data, src);
return buf;
}
};
// Tests
test "secureZero: fills buffer with zeros" {
var buf = [_]u8{ 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE };
secureZero(&buf);
for (buf) |b| {
try std.testing.expectEqual(@as(u8, 0), b);
}
}
test "secureZero: handles empty buffer" {
var buf = [_]u8{};
secureZero(&buf); // should not crash
}
test "SecureBuffer: init and deinit" {
const allocator = std.testing.allocator;
var buf = try SecureBuffer.init(allocator, 32);
// Fill with data
@memset(buf.data, 0xAB);
try std.testing.expectEqual(@as(usize, 32), buf.data.len);
buf.deinit();
}
test "SecureBuffer: fromSlice copies data" {
const allocator = std.testing.allocator;
const src = [_]u8{ 1, 2, 3, 4, 5 };
var buf = try SecureBuffer.fromSlice(allocator, &src);
defer buf.deinit();
try std.testing.expectEqualSlices(u8, &src, buf.data);
}
test "mlock/munlock: doesn't crash (no-op on non-Linux)" {
var buf = [_]u8{0} ** 64;
mlock(&buf, buf.len);
munlock(&buf, buf.len);
}

89
src/guardian.zig Normal file
View File

@ -0,0 +1,89 @@
/// Top-level Guardian struct orchestrates all subsystems.
///
/// The Guardian is the main runtime object that ties together:
/// - HTTP server (client-facing, port 7500)
/// - Peer protocol (guardian-to-guardian, port 7501)
/// - Node membership (via RQLite or static config)
/// - Heartbeat/health management
/// - Storage operations
const std = @import("std");
const log = @import("log.zig");
const config = @import("config.zig");
const node_list = @import("membership/node_list.zig");
const quorum = @import("membership/quorum.zig");
const heartbeat = @import("peer/heartbeat.zig");
pub const Guardian = struct {
cfg: config.Config,
nodes: node_list.NodeList,
allocator: std.mem.Allocator,
/// Random server secret for HMAC-based auth (generated at startup)
server_secret: [32]u8,
/// Share count cache (refreshed periodically)
share_count: u32,
pub fn init(allocator: std.mem.Allocator, cfg: config.Config) !Guardian {
// Generate server secret
var secret: [32]u8 = undefined;
std.crypto.random.bytes(&secret);
// Try to load node list from RQLite, fall back to self-only
var nodes = node_list.fetchFromRqlite(allocator, cfg.rqlite_url, cfg.client_port) catch blk: {
log.warn("failed to fetch node list from RQLite, running in single-node mode", .{});
const self_addr = [_][]const u8{cfg.listen_address};
break :blk try node_list.fromStatic(allocator, &self_addr, cfg.client_port);
};
// Mark self as alive
if (nodes.nodes.len > 0) {
nodes.self_index = 0;
nodes.nodes[0].state = .alive;
nodes.nodes[0].last_seen_ns = std.time.nanoTimestamp();
}
const share_count = heartbeat.countShares(cfg.data_dir);
return Guardian{
.cfg = cfg,
.nodes = nodes,
.allocator = allocator,
.server_secret = secret,
.share_count = share_count,
};
}
pub fn deinit(self: *Guardian) void {
self.nodes.deinit();
// Zero out server secret
@memset(&self.server_secret, 0);
}
/// Get current write quorum requirement.
pub fn writeQuorum(self: *const Guardian) usize {
return quorum.writeQuorum(self.nodes.aliveCount());
}
/// Get current Shamir threshold (read quorum).
pub fn readThreshold(self: *const Guardian) usize {
return self.nodes.threshold();
}
/// Refresh share count from disk.
pub fn refreshShareCount(self: *Guardian) void {
self.share_count = heartbeat.countShares(self.cfg.data_dir);
}
};
// Tests
test "guardian: init and deinit" {
const allocator = std.testing.allocator;
const cfg = config.Config{
.data_dir = "/tmp/nonexistent-vault-test",
};
var g = try Guardian.init(allocator, cfg);
defer g.deinit();
try std.testing.expectEqual(@as(u32, 0), g.share_count);
}

17
src/log.zig Normal file
View File

@ -0,0 +1,17 @@
/// Structured logging for vault-guardian.
/// Uses std.log for systemd journal compatibility.
const std = @import("std");
const scope = std.log.scoped(.vault);
pub fn info(comptime fmt: []const u8, args: anytype) void {
scope.info(fmt, args);
}
pub fn warn(comptime fmt: []const u8, args: anytype) void {
scope.warn(fmt, args);
}
pub fn err(comptime fmt: []const u8, args: anytype) void {
scope.err(fmt, args);
}

193
src/main.zig Normal file
View File

@ -0,0 +1,193 @@
const std = @import("std");
const config = @import("config.zig");
const log = @import("log.zig");
const listener = @import("server/listener.zig");
const router = @import("server/router.zig");
const guardian_mod = @import("guardian.zig");
const heartbeat = @import("peer/heartbeat.zig");
const posix = std.posix;
/// Global shutdown flag set by signal handlers.
var shutdown_flag = std.atomic.Value(bool).init(false);
fn signalHandler(sig: i32) callconv(.c) void {
_ = sig;
shutdown_flag.store(true, .release);
}
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
// Parse CLI args
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
var config_path: []const u8 = "/opt/orama/.orama/data/vault/vault.yaml";
var data_dir_override: ?[]const u8 = null;
var port_override: ?u16 = null;
var bind_override: ?[]const u8 = null;
var i: usize = 1;
while (i < args.len) : (i += 1) {
if (std.mem.eql(u8, args[i], "--config") and i + 1 < args.len) {
config_path = args[i + 1];
i += 1;
} else if (std.mem.eql(u8, args[i], "--data-dir") and i + 1 < args.len) {
data_dir_override = args[i + 1];
i += 1;
} else if (std.mem.eql(u8, args[i], "--port") and i + 1 < args.len) {
port_override = std.fmt.parseInt(u16, args[i + 1], 10) catch {
log.err("invalid port: {s}", .{args[i + 1]});
std.process.exit(1);
};
i += 1;
} else if (std.mem.eql(u8, args[i], "--bind") and i + 1 < args.len) {
bind_override = args[i + 1];
i += 1;
} else if (std.mem.eql(u8, args[i], "--help") or std.mem.eql(u8, args[i], "-h")) {
printUsage();
return;
} else if (std.mem.eql(u8, args[i], "--version") or std.mem.eql(u8, args[i], "-v")) {
std.debug.print("vault-guardian v0.1.0\n", .{});
return;
}
}
log.info("vault-guardian v0.1.0 starting", .{});
log.info("config: {s}", .{config_path});
// Load config
var cfg = config.loadOrDefault(allocator, config_path) catch |err| {
log.err("failed to load config from {s}: {}", .{ config_path, err });
std.process.exit(1);
};
defer cfg.deinit();
// Apply CLI overrides
if (data_dir_override) |d| cfg.data_dir = d;
if (port_override) |p| cfg.client_port = p;
if (bind_override) |b| cfg.listen_address = b;
log.info("listening on {s}:{d} (client)", .{ cfg.listen_address, cfg.client_port });
log.info("listening on {s}:{d} (peer)", .{ cfg.listen_address, cfg.peer_port });
log.info("data directory: {s}", .{cfg.data_dir});
// Ensure data directory exists
std.fs.cwd().makePath(cfg.data_dir) catch |err| {
if (err != error.PathAlreadyExists) {
log.err("failed to create data directory {s}: {}", .{ cfg.data_dir, err });
std.process.exit(1);
}
};
// Initialize Guardian
var guardian = guardian_mod.Guardian.init(allocator, cfg) catch |err| {
log.err("failed to initialize guardian: {}", .{err});
std.process.exit(1);
};
defer guardian.deinit();
log.info("guardian initialized: {d} nodes, {d} shares", .{ guardian.nodes.nodes.len, guardian.share_count });
// Install signal handlers for graceful shutdown
installSignalHandlers();
log.info("guardian ready — starting HTTP server", .{});
// Start heartbeat thread
var hb_thread: ?std.Thread = blk: {
break :blk std.Thread.spawn(.{}, heartbeatLoop, .{ &guardian, &shutdown_flag }) catch |err| {
log.warn("failed to start heartbeat thread: {}, running without heartbeat", .{err});
break :blk null;
};
};
_ = &hb_thread;
// Start HTTP server (blocks until shutdown)
const ctx = router.RouteContext{
.data_dir = cfg.data_dir,
.listen_address = cfg.listen_address,
.client_port = cfg.client_port,
.peer_port = cfg.peer_port,
.allocator = allocator,
.guardian = &guardian,
};
listener.serve(ctx, &shutdown_flag) catch |err| {
log.err("server failed: {}", .{err});
std.process.exit(1);
};
// Wait for heartbeat thread to finish
if (hb_thread) |t| {
t.join();
}
log.info("vault-guardian shutdown complete", .{});
}
fn installSignalHandlers() void {
const act = posix.Sigaction{
.handler = .{ .handler = signalHandler },
.mask = 0,
.flags = 0,
};
posix.sigaction(posix.SIG.TERM, &act, null);
posix.sigaction(posix.SIG.INT, &act, null);
}
fn heartbeatLoop(guardian: *guardian_mod.Guardian, running: *std.atomic.Value(bool)) void {
const interval_ns: u64 = @intCast(@as(i128, heartbeat.HEARTBEAT_INTERVAL_NS));
while (running.load(.acquire)) {
// Evaluate node states
heartbeat.evaluateNodeStates(&guardian.nodes);
// Refresh share count
guardian.refreshShareCount();
// Send heartbeats to all peers
for (guardian.nodes.nodes, 0..) |node, idx| {
if (guardian.nodes.self_index != null and idx == guardian.nodes.self_index.?) continue;
if (node.state == .dead) continue;
// Parse self IP for heartbeat
var self_ip: [4]u8 = .{ 127, 0, 0, 1 };
if (guardian.nodes.self_index) |si| {
const self_addr = guardian.nodes.nodes[si].address;
if (std.net.Ip4Address.parse(self_addr, 0)) |addr| {
self_ip = @bitCast(addr.sa.addr);
} else |_| {}
}
_ = heartbeat.sendHeartbeat(
node.address,
guardian.cfg.peer_port,
self_ip,
guardian.cfg.peer_port,
guardian.share_count,
);
}
// Sleep for heartbeat interval
std.Thread.sleep(interval_ns);
}
}
fn printUsage() void {
std.debug.print(
\\Usage: vault-guardian [OPTIONS]
\\
\\Orama Vault Guardian — distributed secret share storage
\\
\\Options:
\\ --config <path> Path to config file (default: /opt/orama/.orama/data/vault/vault.yaml)
\\ --data-dir <path> Override data directory
\\ --port <port> Override client port (default: 7500)
\\ --bind <addr> Override bind address (default: 0.0.0.0)
\\ --help, -h Show this help
\\ --version, -v Show version
\\
, .{});
}

View File

@ -0,0 +1,143 @@
/// Node join/leave detection.
///
/// Compares the current node list with a refreshed one to detect:
/// - New nodes (join) mark as unknown, will be alive after first heartbeat
/// - Missing nodes (leave) mark as dead after timeout (handled by heartbeat)
///
/// New nodes receive shares on the next client sync push (no active handoff).
const std = @import("std");
const log = @import("../log.zig");
const node_list = @import("node_list.zig");
pub const DiscoveryEvent = struct {
address: []const u8,
port: u16,
event_type: EventType,
};
pub const EventType = enum {
joined,
departed,
};
/// Compare two node lists and return events for joins and departures.
pub fn detectChanges(
allocator: std.mem.Allocator,
old: *const node_list.NodeList,
new: *const node_list.NodeList,
) ![]DiscoveryEvent {
var events = std.ArrayListUnmanaged(DiscoveryEvent){};
errdefer events.deinit(allocator);
// Find new nodes (in new but not in old)
for (new.nodes) |new_node| {
var found = false;
for (old.nodes) |old_node| {
if (std.mem.eql(u8, new_node.address, old_node.address) and new_node.port == old_node.port) {
found = true;
break;
}
}
if (!found) {
try events.append(allocator, DiscoveryEvent{
.address = new_node.address,
.port = new_node.port,
.event_type = .joined,
});
}
}
// Find departed nodes (in old but not in new)
for (old.nodes) |old_node| {
var found = false;
for (new.nodes) |new_node| {
if (std.mem.eql(u8, old_node.address, new_node.address) and old_node.port == new_node.port) {
found = true;
break;
}
}
if (!found) {
try events.append(allocator, DiscoveryEvent{
.address = old_node.address,
.port = old_node.port,
.event_type = .departed,
});
}
}
return events.toOwnedSlice(allocator);
}
// Tests
test "detectChanges: no changes" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2" };
var old = try node_list.fromStatic(allocator, &addrs, 7500);
defer old.deinit();
var new = try node_list.fromStatic(allocator, &addrs, 7500);
defer new.deinit();
const events = try detectChanges(allocator, &old, &new);
defer allocator.free(events);
try std.testing.expectEqual(@as(usize, 0), events.len);
}
test "detectChanges: node joined" {
const allocator = std.testing.allocator;
const old_addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2" };
const new_addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2", "10.0.0.3" };
var old = try node_list.fromStatic(allocator, &old_addrs, 7500);
defer old.deinit();
var new = try node_list.fromStatic(allocator, &new_addrs, 7500);
defer new.deinit();
const events = try detectChanges(allocator, &old, &new);
defer allocator.free(events);
try std.testing.expectEqual(@as(usize, 1), events.len);
try std.testing.expectEqual(EventType.joined, events[0].event_type);
try std.testing.expectEqualSlices(u8, "10.0.0.3", events[0].address);
}
test "detectChanges: node departed" {
const allocator = std.testing.allocator;
const old_addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2", "10.0.0.3" };
const new_addrs = [_][]const u8{ "10.0.0.1", "10.0.0.3" };
var old = try node_list.fromStatic(allocator, &old_addrs, 7500);
defer old.deinit();
var new = try node_list.fromStatic(allocator, &new_addrs, 7500);
defer new.deinit();
const events = try detectChanges(allocator, &old, &new);
defer allocator.free(events);
try std.testing.expectEqual(@as(usize, 1), events.len);
try std.testing.expectEqual(EventType.departed, events[0].event_type);
try std.testing.expectEqualSlices(u8, "10.0.0.2", events[0].address);
}
test "detectChanges: simultaneous join and depart" {
const allocator = std.testing.allocator;
const old_addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2" };
const new_addrs = [_][]const u8{ "10.0.0.1", "10.0.0.3" };
var old = try node_list.fromStatic(allocator, &old_addrs, 7500);
defer old.deinit();
var new = try node_list.fromStatic(allocator, &new_addrs, 7500);
defer new.deinit();
const events = try detectChanges(allocator, &old, &new);
defer allocator.free(events);
try std.testing.expectEqual(@as(usize, 2), events.len);
// One join and one depart
var joins: usize = 0;
var departs: usize = 0;
for (events) |e| {
if (e.event_type == .joined) joins += 1;
if (e.event_type == .departed) departs += 1;
}
try std.testing.expectEqual(@as(usize, 1), joins);
try std.testing.expectEqual(@as(usize, 1), departs);
}

View File

@ -0,0 +1,207 @@
/// Node list management discovers guardians via RQLite HTTP API.
///
/// Every node in the Orama network runs a vault guardian. The node list
/// is fetched from RQLite (source of truth for cluster membership).
///
/// RQLite status endpoint: GET /status includes node list in raft info.
/// For MVP: configurable static list + RQLite discovery.
const std = @import("std");
const log = @import("../log.zig");
pub const Node = struct {
/// WireGuard IP address (e.g., "10.0.0.1")
address: []const u8,
/// Vault guardian client port
port: u16,
/// Whether this node is reachable
state: NodeState,
/// Last successful heartbeat (nanos since epoch)
last_seen_ns: i128,
};
pub const NodeState = enum {
alive,
suspect,
dead,
unknown,
};
pub const NodeList = struct {
nodes: []Node,
allocator: std.mem.Allocator,
/// Self node index (-1 if not identified)
self_index: ?usize,
pub fn deinit(self: *NodeList) void {
for (self.nodes) |node| {
self.allocator.free(node.address);
}
self.allocator.free(self.nodes);
}
/// Number of alive nodes.
pub fn aliveCount(self: *const NodeList) usize {
var count: usize = 0;
for (self.nodes) |node| {
if (node.state == .alive) count += 1;
}
return count;
}
/// Compute adaptive threshold: max(3, floor(N/3))
pub fn threshold(self: *const NodeList) usize {
const alive = self.aliveCount();
const t = alive / 3;
return if (t < 3) 3 else t;
}
/// Update a node's state.
pub fn updateState(self: *NodeList, address: []const u8, port: u16, state: NodeState) void {
for (self.nodes) |*node| {
if (std.mem.eql(u8, node.address, address) and node.port == port) {
node.state = state;
if (state == .alive) {
node.last_seen_ns = std.time.nanoTimestamp();
}
return;
}
}
}
/// Get all alive nodes except self.
pub fn peers(self: *const NodeList, allocator: std.mem.Allocator) ![]Node {
var list = std.ArrayListUnmanaged(Node){};
errdefer list.deinit(allocator);
for (self.nodes, 0..) |node, i| {
if (self.self_index != null and i == self.self_index.?) continue;
if (node.state == .alive) {
try list.append(allocator, node);
}
}
return list.toOwnedSlice(allocator);
}
};
/// Fetch node list from RQLite status endpoint.
/// RQLite GET /status returns JSON with raft node info.
///
/// For MVP: parses a simple JSON response. In production, this would use
/// a proper HTTP client with retries and TLS.
pub fn fetchFromRqlite(allocator: std.mem.Allocator, rqlite_url: []const u8, vault_port: u16) !NodeList {
_ = rqlite_url; // TODO: actual HTTP fetch
_ = vault_port;
// MVP: return empty list (single-node mode)
// The caller should fall back to static config or self-only mode.
const nodes = try allocator.alloc(Node, 0);
return NodeList{
.nodes = nodes,
.allocator = allocator,
.self_index = null,
};
}
/// Create a node list from a static list of addresses.
/// Used for testing and as fallback when RQLite is unavailable.
pub fn fromStatic(allocator: std.mem.Allocator, addresses: []const []const u8, port: u16) !NodeList {
const nodes = try allocator.alloc(Node, addresses.len);
errdefer allocator.free(nodes);
for (addresses, 0..) |addr, i| {
nodes[i] = Node{
.address = try allocator.dupe(u8, addr),
.port = port,
.state = .unknown,
.last_seen_ns = 0,
};
}
return NodeList{
.nodes = nodes,
.allocator = allocator,
.self_index = null,
};
}
// Tests
test "node_list: static creation and threshold" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4", "10.0.0.5" };
var nl = try fromStatic(allocator, &addrs, 7500);
defer nl.deinit();
try std.testing.expectEqual(@as(usize, 5), nl.nodes.len);
try std.testing.expectEqual(@as(usize, 0), nl.aliveCount());
try std.testing.expectEqual(@as(usize, 3), nl.threshold()); // 0 alive max(3, 0/3) = 3
}
test "node_list: alive count and threshold" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{
"10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4", "10.0.0.5",
"10.0.0.6", "10.0.0.7", "10.0.0.8", "10.0.0.9", "10.0.0.10",
"10.0.0.11", "10.0.0.12", "10.0.0.13", "10.0.0.14",
};
var nl = try fromStatic(allocator, &addrs, 7500);
defer nl.deinit();
// Mark all alive
for (nl.nodes) |*node| {
node.state = .alive;
}
try std.testing.expectEqual(@as(usize, 14), nl.aliveCount());
try std.testing.expectEqual(@as(usize, 4), nl.threshold()); // 14/3 = 4
}
test "node_list: updateState" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2" };
var nl = try fromStatic(allocator, &addrs, 7500);
defer nl.deinit();
nl.updateState("10.0.0.1", 7500, .alive);
try std.testing.expectEqual(NodeState.alive, nl.nodes[0].state);
try std.testing.expectEqual(NodeState.unknown, nl.nodes[1].state);
try std.testing.expectEqual(@as(usize, 1), nl.aliveCount());
nl.updateState("10.0.0.1", 7500, .dead);
try std.testing.expectEqual(NodeState.dead, nl.nodes[0].state);
try std.testing.expectEqual(@as(usize, 0), nl.aliveCount());
}
test "node_list: peers excludes self" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2", "10.0.0.3" };
var nl = try fromStatic(allocator, &addrs, 7500);
defer nl.deinit();
nl.self_index = 1; // we are 10.0.0.2
for (nl.nodes) |*node| node.state = .alive;
const peer_list = try nl.peers(allocator);
defer allocator.free(peer_list);
try std.testing.expectEqual(@as(usize, 2), peer_list.len);
}
test "node_list: threshold minimum is 3" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.4", "10.0.0.5" };
var nl = try fromStatic(allocator, &addrs, 7500);
defer nl.deinit();
// Only 5 alive 5/3 = 1, but minimum is 3
for (nl.nodes) |*node| node.state = .alive;
try std.testing.expectEqual(@as(usize, 3), nl.threshold());
}
test "node_list: fetchFromRqlite returns empty (MVP)" {
const allocator = std.testing.allocator;
var nl = try fetchFromRqlite(allocator, "http://127.0.0.1:4001", 7500);
defer nl.deinit();
try std.testing.expectEqual(@as(usize, 0), nl.nodes.len);
}

112
src/membership/quorum.zig Normal file
View File

@ -0,0 +1,112 @@
/// Write quorum logic for multi-guardian operations.
///
/// W = ceil(2/3 * alive_nodes) requires supermajority for writes.
/// R = 1 (any single guardian can serve a read).
///
/// Push: fan out to all alive guardians, succeed if W respond with ACK.
/// Pull: contact guardians until K shares are collected.
const std = @import("std");
const node_list = @import("node_list.zig");
/// Calculate write quorum: ceil(2/3 * N).
pub fn writeQuorum(alive_count: usize) usize {
if (alive_count == 0) return 0;
if (alive_count <= 2) return alive_count; // require all for tiny clusters
// ceil(2*N/3) = (2*N + 2) / 3 using integer arithmetic
return (2 * alive_count + 2) / 3;
}
/// Check if we have enough ACKs for a successful write.
pub fn hasWriteQuorum(ack_count: usize, alive_count: usize) bool {
return ack_count >= writeQuorum(alive_count);
}
/// Calculate read quorum: how many shares needed to reconstruct.
/// This is the Shamir threshold K = max(3, floor(N/3)).
pub fn readQuorum(alive_count: usize) usize {
if (alive_count == 0) return 3;
const t = alive_count / 3;
return if (t < 3) 3 else t;
}
/// Result of a multi-guardian push operation.
pub const PushResult = struct {
/// Number of guardians that accepted the share
ack_count: usize,
/// Number of guardians that were contacted
total_contacted: usize,
/// Number of guardians that failed
fail_count: usize,
/// Whether write quorum was achieved
quorum_met: bool,
pub fn isSuccess(self: PushResult) bool {
return self.quorum_met;
}
};
/// Result of a multi-guardian pull operation.
pub const PullResult = struct {
/// Number of shares collected
share_count: usize,
/// Number of guardians contacted
total_contacted: usize,
/// Whether enough shares were collected for reconstruction
threshold_met: bool,
pub fn isSuccess(self: PullResult) bool {
return self.threshold_met;
}
};
// Tests
test "writeQuorum: various cluster sizes" {
try std.testing.expectEqual(@as(usize, 0), writeQuorum(0));
try std.testing.expectEqual(@as(usize, 1), writeQuorum(1));
try std.testing.expectEqual(@as(usize, 2), writeQuorum(2));
try std.testing.expectEqual(@as(usize, 2), writeQuorum(3)); // ceil(6/3) = 2
try std.testing.expectEqual(@as(usize, 3), writeQuorum(4)); // ceil(8/3) = 3
try std.testing.expectEqual(@as(usize, 4), writeQuorum(5)); // ceil(10/3) = 4
try std.testing.expectEqual(@as(usize, 10), writeQuorum(14)); // ceil(28/3) = 10
}
test "hasWriteQuorum: basic checks" {
// 5-node cluster: quorum = 4
try std.testing.expect(hasWriteQuorum(4, 5));
try std.testing.expect(hasWriteQuorum(5, 5));
try std.testing.expect(!hasWriteQuorum(3, 5));
// 3-node cluster: quorum = 2
try std.testing.expect(hasWriteQuorum(2, 3));
try std.testing.expect(!hasWriteQuorum(1, 3));
}
test "readQuorum: adaptive threshold" {
try std.testing.expectEqual(@as(usize, 3), readQuorum(0));
try std.testing.expectEqual(@as(usize, 3), readQuorum(3));
try std.testing.expectEqual(@as(usize, 3), readQuorum(5)); // 5/3=1, min=3
try std.testing.expectEqual(@as(usize, 3), readQuorum(9)); // 9/3=3
try std.testing.expectEqual(@as(usize, 4), readQuorum(14)); // 14/3=4
try std.testing.expectEqual(@as(usize, 33), readQuorum(100)); // 100/3=33
}
test "PushResult: success when quorum met" {
const result = PushResult{
.ack_count = 4,
.total_contacted = 5,
.fail_count = 1,
.quorum_met = true,
};
try std.testing.expect(result.isSuccess());
}
test "PushResult: failure when quorum not met" {
const result = PushResult{
.ack_count = 2,
.total_contacted = 5,
.fail_count = 3,
.quorum_met = false,
};
try std.testing.expect(!result.isSuccess());
}

169
src/peer/heartbeat.zig Normal file
View File

@ -0,0 +1,169 @@
/// Heartbeat management for guardian-to-guardian protocol.
///
/// Timing: 5s heartbeat interval, 15s suspect, 60s dead.
/// Each guardian periodically sends heartbeats to all peers.
/// Peers that stop responding transition: alive suspect dead.
const std = @import("std");
const log = @import("../log.zig");
const node_list = @import("../membership/node_list.zig");
const protocol = @import("protocol.zig");
pub const HEARTBEAT_INTERVAL_NS: i128 = 5 * std.time.ns_per_s;
pub const SUSPECT_TIMEOUT_NS: i128 = 15 * std.time.ns_per_s;
pub const DEAD_TIMEOUT_NS: i128 = 60 * std.time.ns_per_s;
/// Check all nodes and update their states based on last_seen time.
pub fn evaluateNodeStates(nodes: *node_list.NodeList) void {
const now = std.time.nanoTimestamp();
for (nodes.nodes) |*node| {
if (nodes.self_index) |si| {
const idx = (@intFromPtr(node) - @intFromPtr(nodes.nodes.ptr)) / @sizeOf(node_list.Node);
if (idx == si) continue; // skip self
}
if (node.last_seen_ns == 0) continue; // never seen
const age = now - node.last_seen_ns;
if (age > DEAD_TIMEOUT_NS) {
if (node.state != .dead) {
log.warn("node {s}:{d} marked dead (no heartbeat for >60s)", .{ node.address, node.port });
node.state = .dead;
}
} else if (age > SUSPECT_TIMEOUT_NS) {
if (node.state != .suspect and node.state != .dead) {
log.warn("node {s}:{d} suspect (no heartbeat for >15s)", .{ node.address, node.port });
node.state = .suspect;
}
}
}
}
/// Send a heartbeat to a specific peer.
/// Returns true if the heartbeat was sent successfully.
pub fn sendHeartbeat(
peer_address: []const u8,
peer_port: u16,
self_ip: [4]u8,
self_port: u16,
share_count: u32,
) bool {
const address = std.net.Address.parseIp4(peer_address, peer_port) catch {
return false;
};
const stream = std.net.tcpConnectToAddress(address) catch {
return false;
};
defer stream.close();
const hb = protocol.Heartbeat{
.sender_ip = self_ip,
.sender_port = self_port,
.share_count = share_count,
.timestamp = @intCast(@divFloor(std.time.nanoTimestamp(), std.time.ns_per_s)),
};
const payload = protocol.encodeHeartbeat(hb);
const header = protocol.encodeHeader(.{
.version = protocol.PROTOCOL_VERSION,
.msg_type = .heartbeat,
.payload_len = payload.len,
});
stream.writeAll(&header) catch return false;
stream.writeAll(&payload) catch return false;
return true;
}
/// Count shares in the data directory (V1 + V2).
/// V1: counts directories under shares/
/// V2: counts directories under vaults/ (each identity with at least one secret)
pub fn countShares(data_dir: []const u8) u32 {
var count: u32 = 0;
// Count V1 shares
var path_buf: [4096]u8 = undefined;
const shares_path = std.fmt.bufPrint(&path_buf, "{s}/shares", .{data_dir}) catch return 0;
if (std.fs.cwd().openDir(shares_path, .{ .iterate = true })) |d| {
var dir = d;
defer dir.close();
var it = dir.iterate();
while (it.next() catch null) |entry| {
if (entry.kind == .directory) count += 1;
}
} else |_| {}
// Count V2 vaults
var path_buf2: [4096]u8 = undefined;
const vaults_path = std.fmt.bufPrint(&path_buf2, "{s}/vaults", .{data_dir}) catch return count;
if (std.fs.cwd().openDir(vaults_path, .{ .iterate = true })) |d| {
var dir = d;
defer dir.close();
var it = dir.iterate();
while (it.next() catch null) |entry| {
if (entry.kind == .directory) count += 1;
}
} else |_| {}
return count;
}
// Tests
test "evaluateNodeStates: marks dead after timeout" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2" };
var nl = try node_list.fromStatic(allocator, &addrs, 7501);
defer nl.deinit();
// Set one node as alive but with old timestamp
nl.nodes[0].state = .alive;
nl.nodes[0].last_seen_ns = std.time.nanoTimestamp() - DEAD_TIMEOUT_NS - 1;
// Set other as alive and recent
nl.nodes[1].state = .alive;
nl.nodes[1].last_seen_ns = std.time.nanoTimestamp();
evaluateNodeStates(&nl);
try std.testing.expectEqual(node_list.NodeState.dead, nl.nodes[0].state);
try std.testing.expectEqual(node_list.NodeState.alive, nl.nodes[1].state);
}
test "evaluateNodeStates: marks suspect" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{"10.0.0.1"};
var nl = try node_list.fromStatic(allocator, &addrs, 7501);
defer nl.deinit();
nl.nodes[0].state = .alive;
nl.nodes[0].last_seen_ns = std.time.nanoTimestamp() - SUSPECT_TIMEOUT_NS - 1;
evaluateNodeStates(&nl);
try std.testing.expectEqual(node_list.NodeState.suspect, nl.nodes[0].state);
}
test "evaluateNodeStates: skips self" {
const allocator = std.testing.allocator;
const addrs = [_][]const u8{ "10.0.0.1", "10.0.0.2" };
var nl = try node_list.fromStatic(allocator, &addrs, 7501);
defer nl.deinit();
nl.self_index = 0;
nl.nodes[0].state = .alive;
nl.nodes[0].last_seen_ns = std.time.nanoTimestamp() - DEAD_TIMEOUT_NS - 1;
evaluateNodeStates(&nl);
// Self should NOT be marked dead
try std.testing.expectEqual(node_list.NodeState.alive, nl.nodes[0].state);
}
test "countShares: returns 0 for nonexistent dir" {
try std.testing.expectEqual(@as(u32, 0), countShares("/tmp/nonexistent-vault-dir-12345"));
}

126
src/peer/listener.zig Normal file
View File

@ -0,0 +1,126 @@
/// Guardian-to-guardian TCP listener (port 7501, WireGuard only).
///
/// Handles incoming heartbeats and verification requests from peers.
const std = @import("std");
const log = @import("../log.zig");
const protocol = @import("protocol.zig");
const node_list = @import("../membership/node_list.zig");
pub const PeerContext = struct {
nodes: *node_list.NodeList,
data_dir: []const u8,
allocator: std.mem.Allocator,
};
/// Start the peer protocol listener. Blocks forever.
pub fn serve(listen_address: []const u8, port: u16, ctx: PeerContext) !void {
const address = std.net.Address.parseIp(listen_address, port) catch |err| {
log.err("invalid peer listen address {s}:{d}: {}", .{ listen_address, port, err });
return err;
};
var server = address.listen(.{
.reuse_address = true,
}) catch |err| {
log.err("failed to bind peer listener {s}:{d}: {}", .{ listen_address, port, err });
return err;
};
defer server.deinit();
log.info("peer protocol listening on {s}:{d}", .{ listen_address, port });
while (true) {
const conn = server.accept() catch |err| {
log.warn("peer accept error: {}", .{err});
continue;
};
handlePeerConnection(conn, &ctx) catch |err| {
log.warn("peer connection error: {}", .{err});
};
}
}
fn handlePeerConnection(conn: std.net.Server.Connection, ctx: *const PeerContext) !void {
defer conn.stream.close();
// Read header
var header_buf: [protocol.HEADER_SIZE]u8 = undefined;
conn.stream.readAll(&header_buf) catch return;
const header = protocol.decodeHeader(header_buf) orelse return;
if (header.payload_len > 1024 * 1024) return; // reject >1MB payloads
// Read payload
const payload = ctx.allocator.alloc(u8, header.payload_len) catch return;
defer ctx.allocator.free(payload);
conn.stream.readAll(payload) catch return;
switch (header.msg_type) {
.heartbeat => {
const hb = protocol.decodeHeartbeat(payload) orelse return;
handleHeartbeat(hb, ctx);
// Send ACK
const ack_header = protocol.encodeHeader(.{
.version = protocol.PROTOCOL_VERSION,
.msg_type = .heartbeat_ack,
.payload_len = 0,
});
conn.stream.writeAll(&ack_header) catch {};
},
.verify_request => {
const req = protocol.decodeVerifyRequest(payload) orelse return;
handleVerifyRequest(req, conn.stream, ctx);
},
else => {
// Unknown/unhandled message type
},
}
}
fn handleHeartbeat(hb: protocol.Heartbeat, ctx: *const PeerContext) void {
// Format sender IP
var addr_buf: [16]u8 = undefined;
const addr = std.fmt.bufPrint(&addr_buf, "{d}.{d}.{d}.{d}", .{
hb.sender_ip[0], hb.sender_ip[1], hb.sender_ip[2], hb.sender_ip[3],
}) catch return;
ctx.nodes.updateState(addr, hb.sender_port, .alive);
}
fn handleVerifyRequest(req: protocol.VerifyRequest, stream: std.net.Stream, ctx: *const PeerContext) void {
const identity = req.identity[0..req.identity_len];
// Check if we have this share
var path_buf: [4096]u8 = undefined;
const share_path = std.fmt.bufPrint(&path_buf, "{s}/shares/{s}/share.bin", .{
ctx.data_dir, identity,
}) catch return;
var resp = protocol.VerifyResponse{
.identity = req.identity,
.identity_len = req.identity_len,
.has_share = false,
.commitment_root = .{0} ** 32,
};
// Try to read the share and compute its hash
if (std.fs.cwd().readFileAlloc(ctx.allocator, share_path, 1024 * 1024)) |data| {
defer ctx.allocator.free(data);
resp.has_share = true;
// Compute SHA-256 as commitment
std.crypto.hash.sha2.Sha256.hash(data, &resp.commitment_root, .{});
} else |_| {}
const payload = protocol.encodeVerifyResponse(resp);
const header = protocol.encodeHeader(.{
.version = protocol.PROTOCOL_VERSION,
.msg_type = .verify_response,
.payload_len = payload.len,
});
stream.writeAll(&header) catch {};
stream.writeAll(&payload) catch {};
}

239
src/peer/protocol.zig Normal file
View File

@ -0,0 +1,239 @@
/// Guardian-to-guardian binary protocol messages.
///
/// All messages are prefixed with a 1-byte type tag + 4-byte big-endian length.
/// Messages are sent over TCP on port 7501 (WireGuard-only interface).
const std = @import("std");
pub const PROTOCOL_VERSION: u8 = 1;
pub const MessageType = enum(u8) {
heartbeat = 0x01,
heartbeat_ack = 0x02,
verify_request = 0x03,
verify_response = 0x04,
repair_offer = 0x05,
repair_accept = 0x06,
pub fn fromByte(b: u8) ?MessageType {
return std.meta.intToEnum(MessageType, b) catch null;
}
};
/// Maximum payload size (1 MiB). Prevents memory exhaustion from malicious peers.
pub const MAX_PAYLOAD_SIZE: u32 = 1024 * 1024;
/// Wire header: [version:1][type:1][length:4] = 6 bytes
pub const HEADER_SIZE = 6;
pub const Header = struct {
version: u8,
msg_type: MessageType,
/// Length of the payload (NOT including header)
payload_len: u32,
};
pub const Heartbeat = struct {
/// Sender's node ID (WireGuard IP as 4 bytes for IPv4)
sender_ip: [4]u8,
sender_port: u16,
/// Number of shares this guardian stores
share_count: u32,
/// Unix timestamp (seconds)
timestamp: u64,
};
pub const VerifyRequest = struct {
/// Identity hash to verify (hex)
identity: [64]u8,
identity_len: u8,
};
pub const VerifyResponse = struct {
/// Identity hash being verified
identity: [64]u8,
identity_len: u8,
/// Whether this guardian has the share
has_share: bool,
/// Merkle root of the share data (SHA-256)
commitment_root: [32]u8,
};
/// Encode a header into bytes.
pub fn encodeHeader(header: Header) [HEADER_SIZE]u8 {
var buf: [HEADER_SIZE]u8 = undefined;
buf[0] = header.version;
buf[1] = @intFromEnum(header.msg_type);
std.mem.writeInt(u32, buf[2..6], header.payload_len, .big);
return buf;
}
/// Decode a header from bytes.
/// Returns null if version is wrong, message type is invalid, or payload exceeds MAX_PAYLOAD_SIZE.
pub fn decodeHeader(buf: [HEADER_SIZE]u8) ?Header {
if (buf[0] != PROTOCOL_VERSION) return null;
const msg_type = MessageType.fromByte(buf[1]) orelse return null;
const payload_len = std.mem.readInt(u32, buf[2..6], .big);
// Reject payloads that exceed the maximum allowed size
if (payload_len > MAX_PAYLOAD_SIZE) return null;
return Header{
.version = buf[0],
.msg_type = msg_type,
.payload_len = payload_len,
};
}
/// Encode a heartbeat message into bytes.
pub fn encodeHeartbeat(hb: Heartbeat) [18]u8 {
var buf: [18]u8 = undefined;
@memcpy(buf[0..4], &hb.sender_ip);
std.mem.writeInt(u16, buf[4..6], hb.sender_port, .big);
std.mem.writeInt(u32, buf[6..10], hb.share_count, .big);
std.mem.writeInt(u64, buf[10..18], hb.timestamp, .big);
return buf;
}
/// Decode a heartbeat from bytes.
pub fn decodeHeartbeat(buf: []const u8) ?Heartbeat {
if (buf.len < 18) return null;
return Heartbeat{
.sender_ip = buf[0..4].*,
.sender_port = std.mem.readInt(u16, buf[4..6], .big),
.share_count = std.mem.readInt(u32, buf[6..10], .big),
.timestamp = std.mem.readInt(u64, buf[10..18], .big),
};
}
/// Encode a verify request.
pub fn encodeVerifyRequest(req: VerifyRequest) [65]u8 {
var buf: [65]u8 = undefined;
@memcpy(buf[0..64], &req.identity);
buf[64] = req.identity_len;
return buf;
}
/// Decode a verify request.
/// Returns null if buffer is too short or identity_len exceeds the identity buffer.
pub fn decodeVerifyRequest(buf: []const u8) ?VerifyRequest {
if (buf.len < 65) return null;
const identity_len = buf[64];
// identity_len must not exceed the 64-byte identity buffer
if (identity_len > 64) return null;
return VerifyRequest{
.identity = buf[0..64].*,
.identity_len = identity_len,
};
}
/// Encode a verify response.
pub fn encodeVerifyResponse(resp: VerifyResponse) [98]u8 {
var buf: [98]u8 = undefined;
@memcpy(buf[0..64], &resp.identity);
buf[64] = resp.identity_len;
buf[65] = if (resp.has_share) 1 else 0;
@memcpy(buf[66..98], &resp.commitment_root);
return buf;
}
/// Decode a verify response.
/// Returns null if buffer is too short, identity_len exceeds the identity buffer,
/// or has_share contains an invalid value.
pub fn decodeVerifyResponse(buf: []const u8) ?VerifyResponse {
if (buf.len < 98) return null;
const identity_len = buf[64];
// identity_len must not exceed the 64-byte identity buffer
if (identity_len > 64) return null;
// has_share must be 0 or 1
if (buf[65] > 1) return null;
return VerifyResponse{
.identity = buf[0..64].*,
.identity_len = identity_len,
.has_share = buf[65] != 0,
.commitment_root = buf[66..98].*,
};
}
// Tests
test "header: encode/decode round-trip" {
const header = Header{
.version = PROTOCOL_VERSION,
.msg_type = .heartbeat,
.payload_len = 1234,
};
const encoded = encodeHeader(header);
const decoded = decodeHeader(encoded).?;
try std.testing.expectEqual(header.version, decoded.version);
try std.testing.expectEqual(header.msg_type, decoded.msg_type);
try std.testing.expectEqual(header.payload_len, decoded.payload_len);
}
test "header: wrong version returns null" {
var encoded = encodeHeader(.{
.version = PROTOCOL_VERSION,
.msg_type = .heartbeat,
.payload_len = 0,
});
encoded[0] = 99; // wrong version
try std.testing.expect(decodeHeader(encoded) == null);
}
test "header: invalid message type returns null" {
var encoded = encodeHeader(.{
.version = PROTOCOL_VERSION,
.msg_type = .heartbeat,
.payload_len = 0,
});
encoded[1] = 0xFF; // invalid type
try std.testing.expect(decodeHeader(encoded) == null);
}
test "heartbeat: encode/decode round-trip" {
const hb = Heartbeat{
.sender_ip = .{ 10, 0, 0, 1 },
.sender_port = 7501,
.share_count = 42,
.timestamp = 1700000000,
};
const encoded = encodeHeartbeat(hb);
const decoded = decodeHeartbeat(&encoded).?;
try std.testing.expectEqualSlices(u8, &hb.sender_ip, &decoded.sender_ip);
try std.testing.expectEqual(hb.sender_port, decoded.sender_port);
try std.testing.expectEqual(hb.share_count, decoded.share_count);
try std.testing.expectEqual(hb.timestamp, decoded.timestamp);
}
test "verify_request: encode/decode round-trip" {
var req = VerifyRequest{
.identity = .{0} ** 64,
.identity_len = 10,
};
@memcpy(req.identity[0..10], "abcdef1234");
const encoded = encodeVerifyRequest(req);
const decoded = decodeVerifyRequest(&encoded).?;
try std.testing.expectEqualSlices(u8, &req.identity, &decoded.identity);
try std.testing.expectEqual(req.identity_len, decoded.identity_len);
}
test "verify_response: encode/decode round-trip" {
var resp = VerifyResponse{
.identity = .{0} ** 64,
.identity_len = 8,
.has_share = true,
.commitment_root = .{0xAB} ** 32,
};
@memcpy(resp.identity[0..8], "deadbeef");
const encoded = encodeVerifyResponse(resp);
const decoded = decodeVerifyResponse(&encoded).?;
try std.testing.expectEqualSlices(u8, &resp.identity, &decoded.identity);
try std.testing.expectEqual(resp.identity_len, decoded.identity_len);
try std.testing.expectEqual(resp.has_share, decoded.has_share);
try std.testing.expectEqualSlices(u8, &resp.commitment_root, &decoded.commitment_root);
}

153
src/peer/repair.zig Normal file
View File

@ -0,0 +1,153 @@
/// Proactive repair protocol for guardian share refresh.
///
/// When the cluster detects that shares should be refreshed (periodic timer,
/// guardian join/leave, or manual trigger), the repair protocol coordinates
/// the Herzberg et al. re-sharing across all active guardians.
///
/// Flow:
/// 1. Leader initiates repair round (broadcasts REPAIR_OFFER)
/// 2. Each guardian generates re-sharing deltas and sends to peers
/// 3. Each guardian applies received deltas to update their share
/// 4. Guardians exchange new Merkle commitments to verify consistency
const std = @import("std");
const log = @import("../log.zig");
const node_list = @import("../membership/node_list.zig");
const protocol = @import("protocol.zig");
/// Status of a repair round.
pub const RepairStatus = enum {
idle,
initiated,
deltas_sent,
deltas_received,
applied,
verified,
failed,
};
/// A repair round tracks the state of one re-sharing operation.
pub const RepairRound = struct {
/// Unique round ID (timestamp-based)
round_id: u64,
/// Current status
status: RepairStatus,
/// Number of guardians participating
participant_count: usize,
/// Number of delta sets received
deltas_received: usize,
/// Number of verifications completed
verifications_done: usize,
/// Whether this round succeeded
success: bool,
/// Timestamp when round started
started_ns: i128,
pub fn init() RepairRound {
return RepairRound{
.round_id = @intCast(@as(u64, @truncate(@as(u128, @bitCast(std.time.nanoTimestamp()))))),
.status = .idle,
.participant_count = 0,
.deltas_received = 0,
.verifications_done = 0,
.success = false,
.started_ns = std.time.nanoTimestamp(),
};
}
/// Check if the round has timed out (60 seconds).
pub fn isTimedOut(self: *const RepairRound) bool {
const elapsed_ns = std.time.nanoTimestamp() - self.started_ns;
return elapsed_ns > 60 * std.time.ns_per_s;
}
/// Check if enough deltas have been received to apply.
pub fn canApply(self: *const RepairRound) bool {
return self.deltas_received >= self.participant_count;
}
};
/// Determines if a repair round should be triggered.
///
/// Conditions:
/// - Periodic timer (every 24 hours)
/// - Guardian join/leave detected
/// - Manual trigger via admin API
pub fn shouldRepair(
alive_count: usize,
last_repair_ns: i128,
node_change_detected: bool,
) bool {
// Always repair on node topology change
if (node_change_detected) return true;
// Periodic repair: every 24 hours
const elapsed_ns = std.time.nanoTimestamp() - last_repair_ns;
const twenty_four_hours_ns: i128 = 24 * 60 * 60 * std.time.ns_per_s;
if (elapsed_ns > twenty_four_hours_ns) return true;
// Don't repair if too few guardians (need at least 3)
if (alive_count < 3) return false;
return false;
}
/// Determines the safety threshold the minimum number of alive guardians
/// needed before we should consider repair unnecessary.
/// If alive_count drops below this, repair becomes critical.
pub fn safetyThreshold(total_count: usize) usize {
// Safety threshold = K + 1 (one more than read quorum)
const k = if (total_count / 3 < 3) @as(usize, 3) else total_count / 3;
return k + 1;
}
// Tests
test "repair: round init" {
const round = RepairRound.init();
try std.testing.expectEqual(RepairStatus.idle, round.status);
try std.testing.expect(!round.success);
try std.testing.expectEqual(@as(usize, 0), round.deltas_received);
}
test "repair: round timeout check" {
var round = RepairRound.init();
// Fresh round should not be timed out
try std.testing.expect(!round.isTimedOut());
// Fake an old start time (61 seconds ago)
round.started_ns = std.time.nanoTimestamp() - (61 * std.time.ns_per_s);
try std.testing.expect(round.isTimedOut());
}
test "repair: canApply when enough deltas" {
var round = RepairRound.init();
round.participant_count = 5;
round.deltas_received = 3;
try std.testing.expect(!round.canApply());
round.deltas_received = 5;
try std.testing.expect(round.canApply());
}
test "repair: shouldRepair on node change" {
try std.testing.expect(shouldRepair(5, std.time.nanoTimestamp(), true));
}
test "repair: shouldRepair periodic" {
// Last repair was 25 hours ago
const old_time = std.time.nanoTimestamp() - (25 * 60 * 60 * std.time.ns_per_s);
try std.testing.expect(shouldRepair(5, old_time, false));
}
test "repair: shouldRepair recent" {
// Last repair was 1 hour ago, no node changes
const recent_time = std.time.nanoTimestamp() - (1 * 60 * 60 * std.time.ns_per_s);
try std.testing.expect(!shouldRepair(5, recent_time, false));
}
test "repair: safetyThreshold" {
try std.testing.expectEqual(@as(usize, 4), safetyThreshold(5)); // K=3, safety=4
try std.testing.expectEqual(@as(usize, 4), safetyThreshold(9)); // K=3, safety=4
try std.testing.expectEqual(@as(usize, 5), safetyThreshold(14)); // K=4, safety=5
try std.testing.expectEqual(@as(usize, 34), safetyThreshold(100)); // K=33, safety=34
}

118
src/peer/verify.zig Normal file
View File

@ -0,0 +1,118 @@
/// Periodic verification of share integrity across guardians.
///
/// Randomly selects shares and asks peers to confirm they have the same
/// commitment hash. Detects tampering and data corruption.
const std = @import("std");
const log = @import("../log.zig");
const protocol = @import("protocol.zig");
const node_list = @import("../membership/node_list.zig");
pub const VerifyResult = struct {
identity: []const u8,
peer_address: []const u8,
matches: bool,
peer_has_share: bool,
};
/// Send a verify request to a peer and wait for the response.
/// Returns null if the peer is unreachable or returns invalid data.
pub fn verifyWithPeer(
peer_address: []const u8,
peer_port: u16,
identity: []const u8,
) ?protocol.VerifyResponse {
const address = std.net.Address.parseIp4(peer_address, peer_port) catch return null;
const stream = std.net.tcpConnectToAddress(address) catch return null;
defer stream.close();
// Build request
var req = protocol.VerifyRequest{
.identity = .{0} ** 64,
.identity_len = @intCast(@min(identity.len, 64)),
};
@memcpy(req.identity[0..req.identity_len], identity[0..req.identity_len]);
const payload = protocol.encodeVerifyRequest(req);
const header = protocol.encodeHeader(.{
.version = protocol.PROTOCOL_VERSION,
.msg_type = .verify_request,
.payload_len = payload.len,
});
stream.writeAll(&header) catch return null;
stream.writeAll(&payload) catch return null;
// Read response header
var resp_header_buf: [protocol.HEADER_SIZE]u8 = undefined;
stream.readAll(&resp_header_buf) catch return null;
const resp_header = protocol.decodeHeader(resp_header_buf) orelse return null;
if (resp_header.msg_type != .verify_response) return null;
if (resp_header.payload_len < 98) return null;
// Read response payload
var resp_buf: [98]u8 = undefined;
stream.readAll(&resp_buf) catch return null;
return protocol.decodeVerifyResponse(&resp_buf);
}
/// Compare our local commitment with a peer's commitment.
pub fn compareCommitments(
local_root: [32]u8,
peer_response: protocol.VerifyResponse,
) VerifyResult {
return VerifyResult{
.identity = peer_response.identity[0..peer_response.identity_len],
.peer_address = "", // caller fills in
.peer_has_share = peer_response.has_share,
.matches = peer_response.has_share and
std.mem.eql(u8, &local_root, &peer_response.commitment_root),
};
}
// Tests
test "compareCommitments: matching roots" {
const root = [_]u8{0xAB} ** 32;
const resp = protocol.VerifyResponse{
.identity = .{0} ** 64,
.identity_len = 4,
.has_share = true,
.commitment_root = root,
};
const result = compareCommitments(root, resp);
try std.testing.expect(result.matches);
try std.testing.expect(result.peer_has_share);
}
test "compareCommitments: mismatched roots" {
const local_root = [_]u8{0xAB} ** 32;
var peer_root = [_]u8{0xAB} ** 32;
peer_root[0] = 0xCD; // tamper
const resp = protocol.VerifyResponse{
.identity = .{0} ** 64,
.identity_len = 4,
.has_share = true,
.commitment_root = peer_root,
};
const result = compareCommitments(local_root, resp);
try std.testing.expect(!result.matches);
}
test "compareCommitments: peer missing share" {
const root = [_]u8{0xAB} ** 32;
const resp = protocol.VerifyResponse{
.identity = .{0} ** 64,
.identity_len = 4,
.has_share = false,
.commitment_root = .{0} ** 32,
};
const result = compareCommitments(root, resp);
try std.testing.expect(!result.matches);
try std.testing.expect(!result.peer_has_share);
}

276
src/server/handler_auth.zig Normal file
View File

@ -0,0 +1,276 @@
/// POST /v1/vault/auth/challenge Request an authentication challenge.
/// POST /v1/vault/auth/session Exchange verified challenge for a session token.
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
const log = @import("../log.zig");
const challenge_mod = @import("../auth/challenge.zig");
const session_mod = @import("../auth/session.zig");
const guardian_mod = @import("../guardian.zig");
const IDENTITY_HEX_LEN = 64;
/// POST /v1/vault/auth/challenge
/// Request body: {"identity":"<64 hex chars>"}
/// Response: {"nonce":"<hex>","created_ns":<number>,"tag":"<hex>"}
pub fn handleChallenge(writer: anytype, body: []const u8, ctx: *const router.RouteContext) !void {
if (body.len == 0) {
return response.badRequest(writer, "empty body");
}
const guardian = ctx.guardian orelse {
return response.internalError(writer);
};
// Parse JSON
const ChallengeReq = struct {
identity: []const u8,
};
const parsed = std.json.parseFromSlice(ChallengeReq, ctx.allocator, body, .{}) catch {
return response.badRequest(writer, "invalid JSON");
};
defer parsed.deinit();
const identity = parsed.value.identity;
// Validate identity
if (identity.len != IDENTITY_HEX_LEN) {
return response.badRequest(writer, "identity must be exactly 64 hex characters");
}
for (identity) |c| {
if (!std.ascii.isHex(c)) {
return response.badRequest(writer, "identity must be hex");
}
}
// Generate challenge
const ch = challenge_mod.generateChallenge(identity, guardian.server_secret);
// Format response as JSON with hex strings
var resp_buf: [1024]u8 = undefined;
const nonce_hex = std.fmt.bytesToHex(ch.nonce, .lower);
const tag_hex = std.fmt.bytesToHex(ch.tag, .lower);
// created_ns is i128, format as integer
const resp_body = std.fmt.bufPrint(&resp_buf,
\\{{"nonce":"{s}","created_ns":{d},"tag":"{s}"}}
, .{ &nonce_hex, ch.created_ns, &tag_hex }) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, resp_body);
}
/// POST /v1/vault/auth/session
/// Request body: {"identity":"<hex>","nonce":"<hex>","created_ns":<number>,"tag":"<hex>"}
/// Response: {"token":"<hex>","expiry_ns":<number>}
pub fn handleSession(writer: anytype, body: []const u8, ctx: *const router.RouteContext) !void {
if (body.len == 0) {
return response.badRequest(writer, "empty body");
}
const guardian = ctx.guardian orelse {
return response.internalError(writer);
};
// Parse using manual extraction since we need hex-encoded byte arrays
const identity = extractJsonString(body, "identity") orelse {
return response.badRequest(writer, "missing identity field");
};
const nonce_hex = extractJsonString(body, "nonce") orelse {
return response.badRequest(writer, "missing nonce field");
};
const tag_hex = extractJsonString(body, "tag") orelse {
return response.badRequest(writer, "missing tag field");
};
const created_ns = extractJsonInt(body, "created_ns") orelse {
return response.badRequest(writer, "missing created_ns field");
};
// Validate identity
if (identity.len != IDENTITY_HEX_LEN) {
return response.badRequest(writer, "identity must be exactly 64 hex characters");
}
// Decode nonce from hex
if (nonce_hex.len != challenge_mod.CHALLENGE_SIZE * 2) {
return response.badRequest(writer, "invalid nonce length");
}
var nonce: [challenge_mod.CHALLENGE_SIZE]u8 = undefined;
_ = std.fmt.hexToBytes(&nonce, nonce_hex) catch {
return response.badRequest(writer, "invalid nonce hex");
};
// Decode tag from hex
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
if (tag_hex.len != HmacSha256.mac_length * 2) {
return response.badRequest(writer, "invalid tag length");
}
var tag: [HmacSha256.mac_length]u8 = undefined;
_ = std.fmt.hexToBytes(&tag, tag_hex) catch {
return response.badRequest(writer, "invalid tag hex");
};
// Reconstruct challenge
const ch = challenge_mod.Challenge{
.nonce = nonce,
.created_ns = created_ns,
.tag = tag,
};
// Verify challenge
challenge_mod.verifyChallenge(ch, identity, guardian.server_secret) catch |err| {
switch (err) {
challenge_mod.AuthError.ChallengeExpired => return response.jsonError(writer, 401, "Unauthorized", "challenge expired"),
challenge_mod.AuthError.InvalidChallenge => return response.jsonError(writer, 401, "Unauthorized", "invalid challenge"),
}
};
// Issue session token
const token = session_mod.issueToken(identity, guardian.server_secret);
// Encode token fields as hex
var resp_buf: [1024]u8 = undefined;
const token_tag_hex = std.fmt.bytesToHex(token.tag, .lower);
// For identity, we need to encode just the used portion
var id_hex_buf: [128]u8 = undefined;
const id_slice = token.identity[0..token.identity_len];
var id_hex_len: usize = 0;
for (id_slice) |b| {
const hex_chars = std.fmt.bytesToHex([1]u8{b}, .lower);
id_hex_buf[id_hex_len] = hex_chars[0];
id_hex_buf[id_hex_len + 1] = hex_chars[1];
id_hex_len += 2;
}
const resp_body = std.fmt.bufPrint(&resp_buf,
\\{{"identity":"{s}","expiry_ns":{d},"tag":"{s}"}}
, .{ id_hex_buf[0..id_hex_len], token.expiry_ns, &token_tag_hex }) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, resp_body);
}
/// Validate a session token from the X-Session-Token header.
/// Returns the identity slice on success, or null if no token / invalid token.
/// If a token is present but invalid, returns error to signal rejection.
pub fn validateSessionToken(
header_value: ?[]const u8,
server_secret: [32]u8,
) ?[]const u8 {
const token_str = header_value orelse return null;
if (token_str.len == 0) return null;
// Token format: <identity_hex>:<expiry_ns>:<tag_hex>
var parts = std.mem.splitScalar(u8, token_str, ':');
const id_hex = parts.next() orelse return null;
const expiry_str = parts.next() orelse return null;
const tag_hex_str = parts.next() orelse return null;
if (id_hex.len == 0 or id_hex.len > 64) return null;
const expiry_ns: i128 = std.fmt.parseInt(i128, expiry_str, 10) catch return null;
const HmacSha256 = std.crypto.auth.hmac.sha2.HmacSha256;
if (tag_hex_str.len != HmacSha256.mac_length * 2) return null;
var tag: [HmacSha256.mac_length]u8 = undefined;
_ = std.fmt.hexToBytes(&tag, tag_hex_str) catch return null;
// Reconstruct token struct
var id_buf: [64]u8 = .{0} ** 64;
const copy_len = @min(id_hex.len, 64);
@memcpy(id_buf[0..copy_len], id_hex[0..copy_len]);
const token = session_mod.SessionToken{
.identity = id_buf,
.identity_len = @intCast(copy_len),
.expiry_ns = expiry_ns,
.tag = tag,
};
_ = session_mod.verifyToken(token, server_secret) catch return null;
return id_hex;
}
// Manual JSON extraction (needed for hex-encoded byte arrays)
fn extractJsonString(json: []const u8, key: []const u8) ?[]const u8 {
var search_buf: [256]u8 = undefined;
const search = std.fmt.bufPrint(&search_buf, "\"{s}\":\"", .{key}) catch return null;
// Also try with space after colon
var search_buf2: [256]u8 = undefined;
const search2 = std.fmt.bufPrint(&search_buf2, "\"{s}\": \"", .{key}) catch return null;
const start_idx = std.mem.indexOf(u8, json, search) orelse
(std.mem.indexOf(u8, json, search2) orelse return null);
const search_len = if (std.mem.indexOf(u8, json, search) != null) search.len else search2.len;
const value_start = start_idx + search_len;
if (value_start >= json.len) return null;
var i = value_start;
while (i < json.len) : (i += 1) {
if (json[i] == '"' and (i == value_start or json[i - 1] != '\\')) {
return json[value_start..i];
}
}
return null;
}
fn extractJsonInt(json: []const u8, key: []const u8) ?i128 {
var search_buf: [256]u8 = undefined;
const search = std.fmt.bufPrint(&search_buf, "\"{s}\":", .{key}) catch return null;
const start_idx = std.mem.indexOf(u8, json, search) orelse return null;
var pos = start_idx + search.len;
// Skip whitespace
while (pos < json.len and (json[pos] == ' ' or json[pos] == '\t')) : (pos += 1) {}
if (pos >= json.len) return null;
// Handle negative numbers
var negative = false;
if (json[pos] == '-') {
negative = true;
pos += 1;
}
// Collect digits
const digit_start = pos;
while (pos < json.len and json[pos] >= '0' and json[pos] <= '9') : (pos += 1) {}
if (pos == digit_start) return null;
const value = std.fmt.parseInt(i128, json[digit_start..pos], 10) catch return null;
return if (negative) -value else value;
}
// Tests
test "extractJsonString: basic" {
const json = "{\"identity\":\"abcd1234\"}";
const val = extractJsonString(json, "identity");
try std.testing.expect(val != null);
try std.testing.expectEqualSlices(u8, "abcd1234", val.?);
}
test "extractJsonString: missing key" {
const json = "{\"identity\":\"abcd1234\"}";
try std.testing.expect(extractJsonString(json, "nonce") == null);
}
test "extractJsonInt: basic" {
const json = "{\"created_ns\":12345}";
const val = extractJsonInt(json, "created_ns");
try std.testing.expect(val != null);
try std.testing.expectEqual(@as(i128, 12345), val.?);
}
test "extractJsonInt: negative" {
const json = "{\"value\":-42}";
const val = extractJsonInt(json, "value");
try std.testing.expect(val != null);
try std.testing.expectEqual(@as(i128, -42), val.?);
}

View File

@ -0,0 +1,61 @@
/// GET /v1/vault/guardians List active guardians.
///
/// Uses the real node list from the Guardian to report alive nodes,
/// threshold, and total count.
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
pub fn handle(writer: anytype, ctx: *const router.RouteContext) !void {
if (ctx.guardian) |guardian| {
// Build guardians list from real node list
// First pass: count alive nodes for sizing
var alive_count: usize = 0;
for (guardian.nodes.nodes) |node| {
if (node.state == .alive) alive_count += 1;
}
const threshold = guardian.nodes.threshold();
const total = guardian.nodes.nodes.len;
// Build JSON manually with a buffer
var buf: [4096]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
const w = fbs.writer();
w.writeAll("{\"guardians\":[") catch {
return response.internalError(writer);
};
var first = true;
for (guardian.nodes.nodes) |node| {
if (node.state != .alive) continue;
if (!first) {
w.writeAll(",") catch {
return response.internalError(writer);
};
}
first = false;
std.fmt.format(w, "{{\"address\":\"{s}\",\"port\":{d}}}", .{ node.address, node.port }) catch {
return response.internalError(writer);
};
}
std.fmt.format(w, "],\"threshold\":{d},\"total\":{d}}}", .{ threshold, total }) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, fbs.getWritten());
} else {
// Fallback: no guardian available, report self only
var buf: [512]u8 = undefined;
const body = std.fmt.bufPrint(&buf,
\\{{"guardians":[{{"address":"{s}","port":{d}}}],"threshold":3,"total":1}}
, .{ ctx.listen_address, ctx.client_port }) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, body);
}
}

View File

@ -0,0 +1,50 @@
/// GET /v1/vault/health Liveness and readiness check.
///
/// Checks:
/// - Share count > 0 or data dir writable
/// - Peer connectivity (degraded if no peers)
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
pub fn handle(writer: anytype, ctx: *const router.RouteContext) !void {
if (ctx.guardian) |guardian| {
const share_count = guardian.share_count;
// Check if data dir is accessible by seeing if we can stat it
const data_dir_ok = blk: {
std.fs.cwd().access(guardian.cfg.data_dir, .{}) catch break :blk false;
break :blk true;
};
// Count alive peers (excluding self)
var peer_count: usize = 0;
for (guardian.nodes.nodes, 0..) |node, i| {
if (guardian.nodes.self_index != null and i == guardian.nodes.self_index.?) continue;
if (node.state == .alive) peer_count += 1;
}
// Determine status
const status: []const u8 = if (!data_dir_ok)
"unhealthy"
else if (peer_count == 0)
"degraded"
else
"ok";
var buf: [512]u8 = undefined;
const body = std.fmt.bufPrint(&buf,
\\{{"status":"{s}","version":"0.1.0","shares":{d},"peers":{d},"data_dir_ok":{s}}}
, .{
status,
share_count,
peer_count,
if (data_dir_ok) "true" else "false",
}) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, body);
} else {
try response.jsonOk(writer, "{\"status\":\"ok\",\"version\":\"0.1.0\"}");
}
}

101
src/server/handler_pull.zig Normal file
View File

@ -0,0 +1,101 @@
/// POST /v1/vault/pull Retrieve a share for a user.
///
/// Expects JSON body: {"identity":"<hex>"}
/// Returns: {"share":"<base64>"} or 404 if not found.
/// Uses file_store.readShare() for HMAC integrity verification.
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
const log = @import("../log.zig");
const file_store = @import("../storage/file_store.zig");
const handler_auth = @import("handler_auth.zig");
/// Maximum request body size for pull requests. Only contains {"identity":"<64 hex>"}.
const MAX_BODY_SIZE = 4096;
pub fn handle(writer: anytype, body: []const u8, ctx: *const router.RouteContext, session_token: ?[]const u8) !void {
if (body.len == 0) {
return response.badRequest(writer, "empty body");
}
if (body.len > MAX_BODY_SIZE) {
return response.badRequest(writer, "request body too large");
}
// Optional session token validation (backwards compat: skip if not present)
if (ctx.guardian) |guardian| {
if (session_token) |tok| {
if (handler_auth.validateSessionToken(tok, guardian.server_secret) == null) {
return response.jsonError(writer, 401, "Unauthorized", "invalid session token");
}
}
}
// Parse JSON
const PullBody = struct {
identity: []const u8,
};
const parsed = std.json.parseFromSlice(PullBody, ctx.allocator, body, .{}) catch {
return response.badRequest(writer, "invalid JSON");
};
defer parsed.deinit();
const identity = parsed.value.identity;
// Validate identity is exactly 64 hex chars (SHA-256 hash)
if (identity.len != 64) {
return response.badRequest(writer, "identity must be exactly 64 hex characters");
}
for (identity) |c| {
if (!std.ascii.isHex(c)) {
return response.badRequest(writer, "identity must be hex");
}
}
// Derive integrity key from guardian server_secret (or use fallback)
const integrity_key: []const u8 = if (ctx.guardian) |guardian|
&guardian.server_secret
else
"vault-default-integrity-key!!!!!";
// Read share from storage with HMAC integrity verification
const share_data = file_store.readShare(ctx.data_dir, identity, integrity_key, ctx.allocator) catch |err| {
if (err == file_store.StoreError.IoError) {
// Check if share simply doesn't exist
const exists = file_store.shareExists(ctx.data_dir, identity, ctx.allocator) catch false;
if (!exists) {
return response.jsonError(writer, 404, "Not Found", "share not found");
}
}
if (err == file_store.StoreError.IntegrityCheckFailed) {
log.err("integrity check failed for {s}", .{identity});
return response.internalError(writer);
}
log.err("failed to read share for {s}: {}", .{ identity, err });
return response.internalError(writer);
};
defer ctx.allocator.free(share_data);
// Base64 encode
const encoded_len = std.base64.standard.Encoder.calcSize(share_data.len);
const encoded = ctx.allocator.alloc(u8, encoded_len) catch {
return response.internalError(writer);
};
defer ctx.allocator.free(encoded);
_ = std.base64.standard.Encoder.encode(encoded, share_data);
// Build response: {"share":"<base64>"}
// We need to write it in parts to avoid a huge stack buffer
try writer.writeAll("HTTP/1.1 200 OK\r\n");
try writer.writeAll("Content-Type: application/json\r\n");
const body_len = 11 + encoded.len + 2; // {"share":".."}
try std.fmt.format(writer, "Content-Length: {d}\r\n", .{body_len});
try writer.writeAll("Connection: close\r\n");
try writer.writeAll("\r\n");
try writer.writeAll("{\"share\":\"");
try writer.writeAll(encoded);
try writer.writeAll("\"}");
log.info("served share for identity {s} ({d} bytes)", .{ identity, share_data.len });
}

148
src/server/handler_push.zig Normal file
View File

@ -0,0 +1,148 @@
/// POST /v1/vault/push Store a share for a user.
///
/// Expects JSON body: {"identity":"<hex>","share":"<base64>","index":<n>,"version":<n>}
/// Stores the share to disk with HMAC integrity via file_store.
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
const log = @import("../log.zig");
const file_store = @import("../storage/file_store.zig");
const handler_auth = @import("handler_auth.zig");
/// Maximum request body size (1 MiB). Prevents memory exhaustion from oversized payloads.
const MAX_BODY_SIZE = 1024 * 1024;
/// Maximum decoded share size (512 KiB). Encrypted Shamir shares should be small.
const MAX_SHARE_SIZE = 512 * 1024;
/// Identity hash must be exactly 64 hex chars (SHA-256 output).
const IDENTITY_HEX_LEN = 64;
pub fn handle(writer: anytype, body: []const u8, ctx: *const router.RouteContext, session_token: ?[]const u8) !void {
if (body.len == 0) {
return response.badRequest(writer, "empty body");
}
// Reject oversized request bodies before parsing
if (body.len > MAX_BODY_SIZE) {
return response.badRequest(writer, "request body too large");
}
// Optional session token validation (backwards compat: skip if not present)
if (ctx.guardian) |guardian| {
if (session_token) |tok| {
if (handler_auth.validateSessionToken(tok, guardian.server_secret) == null) {
return response.jsonError(writer, 401, "Unauthorized", "invalid session token");
}
}
}
// Parse JSON
const PushBody = struct {
identity: []const u8,
share: []const u8,
version: u64,
};
const parsed = std.json.parseFromSlice(PushBody, ctx.allocator, body, .{}) catch {
return response.badRequest(writer, "invalid JSON");
};
defer parsed.deinit();
const identity = parsed.value.identity;
const share_b64 = parsed.value.share;
const version = parsed.value.version;
// Validate identity is exactly 64 hex chars (SHA-256 hash)
if (identity.len != IDENTITY_HEX_LEN) {
return response.badRequest(writer, "identity must be exactly 64 hex characters");
}
for (identity) |c| {
if (!std.ascii.isHex(c)) {
return response.badRequest(writer, "identity must be hex");
}
}
// Decode base64 share
const decoded_len = std.base64.standard.Decoder.calcSizeForSlice(share_b64) catch {
return response.badRequest(writer, "invalid base64 in share");
};
// Reject oversized shares before allocating
if (decoded_len > MAX_SHARE_SIZE) {
return response.badRequest(writer, "share data too large");
}
const share_data = ctx.allocator.alloc(u8, decoded_len) catch {
return response.internalError(writer);
};
defer ctx.allocator.free(share_data);
std.base64.standard.Decoder.decode(share_data, share_b64) catch {
return response.badRequest(writer, "invalid base64 in share");
};
if (share_data.len == 0) {
return response.badRequest(writer, "share data is empty");
}
// Anti-rollback: reject shares with version <= current stored version
const current_version = readCurrentVersion(ctx.data_dir, identity, ctx.allocator);
if (current_version) |cur_ver| {
if (version <= cur_ver) {
log.warn("rejected rollback for {s}: version {d} <= current {d}", .{ identity, version, cur_ver });
return response.badRequest(writer, "version must be greater than current stored version");
}
}
// Derive integrity key from guardian server_secret (or use fallback)
const integrity_key: []const u8 = if (ctx.guardian) |guardian|
&guardian.server_secret
else
"vault-default-integrity-key!!!!!";
// Write share data to storage with HMAC integrity
file_store.writeShare(ctx.data_dir, identity, share_data, integrity_key, ctx.allocator) catch |err| {
log.err("failed to write share for {s}: {}", .{ identity, err });
return response.internalError(writer);
};
// Write version file
writeVersionFile(ctx.data_dir, identity, version) catch |err| {
log.err("failed to write version for {s}: {}", .{ identity, err });
// Share was written but version wasn't not fatal, but log it
};
log.info("stored share for identity {s} ({d} bytes, version {d})", .{ identity, share_data.len, version });
try response.jsonOk(writer, "{\"status\":\"stored\"}");
}
/// Read the current stored version for an identity. Returns null if no version file exists.
fn readCurrentVersion(data_dir: []const u8, identity: []const u8, allocator: std.mem.Allocator) ?u64 {
var path_buf: [4096]u8 = undefined;
const version_path = std.fmt.bufPrint(&path_buf, "{s}/shares/{s}/version", .{ data_dir, identity }) catch return null;
const version_data = std.fs.cwd().readFileAlloc(allocator, version_path, 32) catch return null;
defer allocator.free(version_data);
return std.fmt.parseInt(u64, version_data, 10) catch null;
}
/// Write version counter atomically to: <data_dir>/shares/<identity>/version
fn writeVersionFile(data_dir: []const u8, identity: []const u8, version: u64) !void {
var path_buf: [4096]u8 = undefined;
const tmp_path = std.fmt.bufPrint(&path_buf, "{s}/shares/{s}/version.tmp", .{ data_dir, identity }) catch return error.PathTooLong;
var path_buf2: [4096]u8 = undefined;
const final_path = std.fmt.bufPrint(&path_buf2, "{s}/shares/{s}/version", .{ data_dir, identity }) catch return error.PathTooLong;
var ver_buf: [20]u8 = undefined; // max u64 is 20 digits
const ver_str = std.fmt.bufPrint(&ver_buf, "{d}", .{version}) catch return error.PathTooLong;
const file = try std.fs.cwd().createFile(tmp_path, .{});
defer file.close();
try file.writeAll(ver_str);
std.fs.cwd().rename(tmp_path, final_path) catch |rename_err| {
std.fs.cwd().deleteFile(tmp_path) catch {};
return rename_err;
};
}

View File

@ -0,0 +1,323 @@
/// V2 CRUD handlers for named secrets.
///
/// PUT /v2/vault/secrets/{name} Store a named secret
/// GET /v2/vault/secrets/{name} Retrieve a named secret
/// DELETE /v2/vault/secrets/{name} Delete a named secret
/// GET /v2/vault/secrets List all secrets for the identity
///
/// All endpoints require session auth (X-Session-Token header).
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
const log = @import("../log.zig");
const vault_store = @import("../storage/vault_store.zig");
const handler_auth = @import("handler_auth.zig");
/// Maximum request body size for PUT (1 MiB).
const MAX_BODY_SIZE = 1024 * 1024;
/// PUT /v2/vault/secrets/{name}
/// Body: {"share":"<base64>","version":<u64>}
/// Auth: X-Session-Token (mandatory, identity extracted from token)
pub fn handlePut(
writer: anytype,
body: []const u8,
name: []const u8,
ctx: *const router.RouteContext,
session_token: ?[]const u8,
) !void {
const identity = requireAuth(writer, ctx, session_token) orelse return;
if (body.len == 0) {
return response.badRequest(writer, "empty body");
}
if (body.len > MAX_BODY_SIZE) {
return response.badRequest(writer, "request body too large");
}
// Validate secret name
vault_store.validateSecretName(name) catch |err| {
return switch (err) {
vault_store.VaultStoreError.SecretNameRequired => response.badRequest(writer, "secret name required"),
vault_store.VaultStoreError.SecretNameTooLong => response.badRequest(writer, "secret name too long"),
vault_store.VaultStoreError.SecretNameInvalid => response.badRequest(writer, "secret name invalid: only alphanumeric, underscore, hyphen allowed"),
else => response.badRequest(writer, "invalid secret name"),
};
};
// Parse JSON body
const PutBody = struct {
share: []const u8,
version: u64,
};
const parsed = std.json.parseFromSlice(PutBody, ctx.allocator, body, .{}) catch {
return response.badRequest(writer, "invalid JSON: expected {\"share\":\"<base64>\",\"version\":<u64>}");
};
defer parsed.deinit();
const share_b64 = parsed.value.share;
const version = parsed.value.version;
// Decode base64
const decoded_len = std.base64.standard.Decoder.calcSizeForSlice(share_b64) catch {
return response.badRequest(writer, "invalid base64 in share");
};
if (decoded_len > vault_store.MAX_SECRET_SIZE) {
return response.badRequest(writer, "share data too large");
}
const share_data = ctx.allocator.alloc(u8, decoded_len) catch {
return response.internalError(writer);
};
defer ctx.allocator.free(share_data);
std.base64.standard.Decoder.decode(share_data, share_b64) catch {
return response.badRequest(writer, "invalid base64 in share");
};
if (share_data.len == 0) {
return response.badRequest(writer, "share data is empty");
}
// Derive integrity key
const integrity_key: []const u8 = if (ctx.guardian) |guardian|
&guardian.server_secret
else
"vault-default-integrity-key!!!!!";
// Write to vault store
vault_store.writeSecret(ctx.data_dir, identity, name, share_data, version, integrity_key, ctx.allocator) catch |err| {
return switch (err) {
vault_store.VaultStoreError.VersionConflict => response.badRequest(writer, "version must be greater than current stored version"),
vault_store.VaultStoreError.SecretLimitExceeded => response.badRequest(writer, "secret limit exceeded"),
vault_store.VaultStoreError.SecretDataTooLarge => response.badRequest(writer, "share data too large"),
else => {
log.err("failed to write secret '{s}' for {s}: {}", .{ name, identity, err });
return response.internalError(writer);
},
};
};
log.info("stored secret '{s}' for identity {s} ({d} bytes, version {d})", .{ name, identity, share_data.len, version });
// Build response
var resp_buf: [512]u8 = undefined;
const resp_body = std.fmt.bufPrint(&resp_buf,
\\{{"status":"stored","name":"{s}","version":{d}}}
, .{ name, version }) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, resp_body);
}
/// GET /v2/vault/secrets/{name}
/// Auth: X-Session-Token (mandatory)
pub fn handleGet(
writer: anytype,
name: []const u8,
ctx: *const router.RouteContext,
session_token: ?[]const u8,
) !void {
const identity = requireAuth(writer, ctx, session_token) orelse return;
// Derive integrity key
const integrity_key: []const u8 = if (ctx.guardian) |guardian|
&guardian.server_secret
else
"vault-default-integrity-key!!!!!";
// Read secret
const share_data = vault_store.readSecret(ctx.data_dir, identity, name, integrity_key, ctx.allocator) catch |err| {
return switch (err) {
vault_store.VaultStoreError.NotFound => response.jsonError(writer, 404, "Not Found", "secret not found"),
vault_store.VaultStoreError.IntegrityCheckFailed => {
log.err("integrity check failed for secret '{s}' identity {s}", .{ name, identity });
return response.internalError(writer);
},
else => {
log.err("failed to read secret '{s}' for {s}: {}", .{ name, identity, err });
return response.internalError(writer);
},
};
};
defer ctx.allocator.free(share_data);
// Read metadata
const meta = vault_store.readMeta(ctx.data_dir, identity, name, ctx.allocator) catch {
// If we can read the share but not meta, still return what we have
return writeGetResponse(writer, share_data, name, 0, 0, 0, ctx.allocator);
};
return writeGetResponse(writer, share_data, name, meta.version, meta.created_ns, meta.updated_ns, ctx.allocator);
}
/// DELETE /v2/vault/secrets/{name}
/// Auth: X-Session-Token (mandatory)
pub fn handleDelete(
writer: anytype,
name: []const u8,
ctx: *const router.RouteContext,
session_token: ?[]const u8,
) !void {
const identity = requireAuth(writer, ctx, session_token) orelse return;
vault_store.deleteSecret(ctx.data_dir, identity, name, ctx.allocator) catch |err| {
return switch (err) {
vault_store.VaultStoreError.NotFound => response.jsonError(writer, 404, "Not Found", "secret not found"),
else => {
log.err("failed to delete secret '{s}' for {s}: {}", .{ name, identity, err });
return response.internalError(writer);
},
};
};
log.info("deleted secret '{s}' for identity {s}", .{ name, identity });
var resp_buf: [256]u8 = undefined;
const resp_body = std.fmt.bufPrint(&resp_buf,
\\{{"status":"deleted","name":"{s}"}}
, .{name}) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, resp_body);
}
/// GET /v2/vault/secrets
/// Auth: X-Session-Token (mandatory)
pub fn handleList(
writer: anytype,
ctx: *const router.RouteContext,
session_token: ?[]const u8,
) !void {
const identity = requireAuth(writer, ctx, session_token) orelse return;
const names = vault_store.listSecrets(ctx.data_dir, identity, ctx.allocator) catch {
return response.internalError(writer);
};
defer {
for (names) |n| ctx.allocator.free(n);
ctx.allocator.free(names);
}
// Build JSON response by writing parts to the writer directly.
// First, gather all metadata to calculate content length.
const SecretInfo = struct {
name: []const u8,
version: u64,
size: usize,
created_ns: i128,
updated_ns: i128,
};
var infos = ctx.allocator.alloc(SecretInfo, names.len) catch {
return response.internalError(writer);
};
defer ctx.allocator.free(infos);
for (names, 0..) |n, i| {
const meta = vault_store.readMeta(ctx.data_dir, identity, n, ctx.allocator) catch {
infos[i] = .{ .name = n, .version = 0, .size = 0, .created_ns = 0, .updated_ns = 0 };
continue;
};
infos[i] = .{
.name = n,
.version = meta.version,
.size = meta.size,
.created_ns = meta.created_ns,
.updated_ns = meta.updated_ns,
};
}
// Build JSON body in a dynamic buffer
var body_buf: std.ArrayListUnmanaged(u8) = .{};
defer body_buf.deinit(ctx.allocator);
body_buf.appendSlice(ctx.allocator, "{\"secrets\":[") catch return response.internalError(writer);
for (infos, 0..) |info, i| {
if (i > 0) {
body_buf.append(ctx.allocator, ',') catch return response.internalError(writer);
}
var item_buf: [512]u8 = undefined;
const item = std.fmt.bufPrint(&item_buf,
\\{{"name":"{s}","version":{d},"size":{d},"created_ns":{d},"updated_ns":{d}}}
, .{ info.name, info.version, info.size, info.created_ns, info.updated_ns }) catch {
return response.internalError(writer);
};
body_buf.appendSlice(ctx.allocator, item) catch return response.internalError(writer);
}
body_buf.appendSlice(ctx.allocator, "]}") catch return response.internalError(writer);
try response.jsonOk(writer, body_buf.items);
}
// Internal helpers
/// Validates session token and extracts identity. Returns null (and sends 401) if auth fails.
fn requireAuth(
writer: anytype,
ctx: *const router.RouteContext,
session_token: ?[]const u8,
) ?[]const u8 {
const guardian = ctx.guardian orelse {
response.internalError(writer) catch {};
return null;
};
const tok = session_token orelse {
response.jsonError(writer, 401, "Unauthorized", "session token required") catch {};
return null;
};
const identity = handler_auth.validateSessionToken(tok, guardian.server_secret) orelse {
response.jsonError(writer, 401, "Unauthorized", "invalid session token") catch {};
return null;
};
return identity;
}
/// Write the GET response with base64-encoded share data and metadata.
fn writeGetResponse(
writer: anytype,
share_data: []const u8,
name: []const u8,
version: u64,
created_ns: i128,
updated_ns: i128,
allocator: std.mem.Allocator,
) !void {
// Base64 encode
const encoded_len = std.base64.standard.Encoder.calcSize(share_data.len);
const encoded = allocator.alloc(u8, encoded_len) catch {
return response.internalError(writer);
};
defer allocator.free(encoded);
_ = std.base64.standard.Encoder.encode(encoded, share_data);
// Build response by writing parts (avoid huge stack buffer)
// {"share":"<b64>","name":"<name>","version":<v>,"created_ns":<ts>,"updated_ns":<ts>}
var meta_buf: [512]u8 = undefined;
const meta_part = std.fmt.bufPrint(&meta_buf,
\\","name":"{s}","version":{d},"created_ns":{d},"updated_ns":{d}}}
, .{ name, version, created_ns, updated_ns }) catch {
return response.internalError(writer);
};
const prefix = "{\"share\":\"";
const body_len = prefix.len + encoded.len + meta_part.len;
try writer.writeAll("HTTP/1.1 200 OK\r\n");
try writer.writeAll("Content-Type: application/json\r\n");
try std.fmt.format(writer, "Content-Length: {d}\r\n", .{body_len});
try writer.writeAll("Connection: close\r\n");
try writer.writeAll("\r\n");
try writer.writeAll(prefix);
try writer.writeAll(encoded);
try writer.writeAll(meta_part);
}

View File

@ -0,0 +1,14 @@
/// GET /v1/vault/status Guardian status info.
const std = @import("std");
const response = @import("response.zig");
const router = @import("router.zig");
pub fn handle(writer: anytype, ctx: *const router.RouteContext) !void {
var buf: [512]u8 = undefined;
const body = std.fmt.bufPrint(&buf,
\\{{"status":"ok","version":"0.1.0","data_dir":"{s}","client_port":{d},"peer_port":{d}}}
, .{ ctx.data_dir, ctx.client_port, ctx.peer_port }) catch {
return response.internalError(writer);
};
try response.jsonOk(writer, body);
}

206
src/server/listener.zig Normal file
View File

@ -0,0 +1,206 @@
/// TCP listener for client-facing HTTP server (port 7500).
///
/// Single-threaded accept loop. Each connection is handled synchronously.
/// Supports graceful shutdown via atomic flag and per-IP rate limiting.
const std = @import("std");
const log = @import("../log.zig");
const router = @import("router.zig");
const response = @import("response.zig");
const posix = std.posix;
const MAX_REQUEST_SIZE = 1024 * 1024; // 1 MB max request
const READ_BUF_SIZE = 64 * 1024; // 64 KB read buffer
/// Rate limit: max requests per IP per window
const RATE_LIMIT_MAX = 120;
/// Rate limit window in seconds
const RATE_LIMIT_WINDOW_S: i64 = 60;
/// Per-IP rate limit entry
const RateEntry = struct {
count: u32,
window_start: i64,
};
/// Start the HTTP server. Blocks until shutdown flag is set.
pub fn serve(ctx: router.RouteContext, running: *std.atomic.Value(bool)) !void {
const address = std.net.Address.parseIp(ctx.listen_address, ctx.client_port) catch |err| {
log.err("invalid listen address {s}:{d}: {}", .{ ctx.listen_address, ctx.client_port, err });
return err;
};
var server = address.listen(.{
.reuse_address = true,
}) catch |err| {
log.err("failed to bind {s}:{d}: {}", .{ ctx.listen_address, ctx.client_port, err });
return err;
};
defer server.deinit();
// Set a receive timeout so accept() doesn't block forever.
// This allows us to check the shutdown flag periodically.
const timeout = posix.timeval{ .sec = 1, .usec = 0 };
posix.setsockopt(server.stream.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, std.mem.asBytes(&timeout)) catch {
log.warn("failed to set SO_RCVTIMEO, shutdown may be delayed", .{});
};
log.info("HTTP server listening on {s}:{d}", .{ ctx.listen_address, ctx.client_port });
// Rate limiter: IP string -> RateEntry
var rate_map = std.StringHashMap(RateEntry).init(ctx.allocator);
defer {
var it = rate_map.iterator();
while (it.next()) |entry| {
ctx.allocator.free(entry.key_ptr.*);
}
rate_map.deinit();
}
var last_rate_cleanup: i64 = std.time.timestamp();
while (running.load(.acquire)) {
const conn = server.accept() catch |err| {
if (err == error.WouldBlock) continue;
log.warn("accept error: {}", .{err});
continue;
};
// Periodically clean up stale rate limit entries
const now = std.time.timestamp();
if (now - last_rate_cleanup > RATE_LIMIT_WINDOW_S) {
cleanupRateMap(&rate_map, now, ctx.allocator);
last_rate_cleanup = now;
}
// Extract peer IP for rate limiting
var ip_buf: [45]u8 = undefined;
const peer_ip = formatIp(conn.address, &ip_buf);
// Rate limit check
if (isRateLimited(&rate_map, peer_ip, now, ctx.allocator)) {
var resp_buf: [512]u8 = undefined;
var fbs = std.io.fixedBufferStream(&resp_buf);
const w = fbs.writer();
response.jsonError(w, 429, "Too Many Requests", "rate limit exceeded") catch {};
conn.stream.writeAll(fbs.getWritten()) catch {};
conn.stream.close();
continue;
}
handleConnection(conn, &ctx) catch |err| {
log.warn("connection error: {}", .{err});
};
}
log.info("HTTP server shutting down", .{});
}
/// Format an address to a string for use as rate-limit key.
/// Returns a slice into the provided buffer.
fn formatIp(addr: std.net.Address, buf: []u8) []const u8 {
// Extract IP bytes from the sockaddr
const ip_bytes: [4]u8 = @bitCast(addr.in.sa.addr);
const result = std.fmt.bufPrint(buf, "{d}.{d}.{d}.{d}", .{
ip_bytes[0], ip_bytes[1], ip_bytes[2], ip_bytes[3],
}) catch return "unknown";
return result;
}
fn isRateLimited(
rate_map: *std.StringHashMap(RateEntry),
key_slice: []const u8,
now: i64,
allocator: std.mem.Allocator,
) bool {
if (key_slice.len == 0) return false;
if (rate_map.getPtr(key_slice)) |entry| {
if (now - entry.window_start >= RATE_LIMIT_WINDOW_S) {
entry.count = 1;
entry.window_start = now;
return false;
}
entry.count += 1;
return entry.count > RATE_LIMIT_MAX;
} else {
const owned_key = allocator.dupe(u8, key_slice) catch return false;
rate_map.put(owned_key, RateEntry{ .count = 1, .window_start = now }) catch {
allocator.free(owned_key);
};
return false;
}
}
fn cleanupRateMap(rate_map: *std.StringHashMap(RateEntry), now: i64, allocator: std.mem.Allocator) void {
// Collect keys to remove into a bounded stack buffer.
// If there are more stale entries than fit, they'll be cleaned next cycle.
var to_remove: [64][]const u8 = undefined;
var remove_count: usize = 0;
var it = rate_map.iterator();
while (it.next()) |entry| {
if (now - entry.value_ptr.window_start >= RATE_LIMIT_WINDOW_S * 2) {
if (remove_count < to_remove.len) {
to_remove[remove_count] = entry.key_ptr.*;
remove_count += 1;
}
}
}
for (to_remove[0..remove_count]) |key| {
_ = rate_map.remove(key);
allocator.free(key);
}
}
fn handleConnection(conn: std.net.Server.Connection, ctx: *const router.RouteContext) !void {
defer conn.stream.close();
// Read the full request into a buffer
var buf: [READ_BUF_SIZE]u8 = undefined;
var total: usize = 0;
while (total < buf.len) {
const n = conn.stream.read(buf[total..]) catch |err| {
if (err == error.ConnectionResetByPeer) return;
return err;
};
if (n == 0) break;
total += n;
if (std.mem.indexOf(u8, buf[0..total], "\r\n\r\n")) |headers_end| {
const req = router.parseRequest(buf[0..total]) orelse break;
const body_start = headers_end + 4;
const body_received = total - body_start;
if (body_received >= req.content_length) break;
}
}
if (total == 0) return;
const req = router.parseRequest(buf[0..total]) orelse {
var resp_stream = conn.stream;
var write_buf: [4096]u8 = undefined;
var fbs = std.io.fixedBufferStream(&write_buf);
const writer = fbs.writer();
try response.badRequest(writer, "malformed request");
const written = fbs.getWritten();
resp_stream.writeAll(written) catch {};
return;
};
var resp_buf: [READ_BUF_SIZE]u8 = undefined;
var fbs = std.io.fixedBufferStream(&resp_buf);
const writer = fbs.writer();
router.route(req, writer, ctx) catch |err| {
log.warn("handler error for {s}: {}", .{ req.path, err });
var err_fbs = std.io.fixedBufferStream(&resp_buf);
const err_writer = err_fbs.writer();
response.internalError(err_writer) catch return;
conn.stream.writeAll(err_fbs.getWritten()) catch {};
return;
};
conn.stream.writeAll(fbs.getWritten()) catch {};
}

54
src/server/response.zig Normal file
View File

@ -0,0 +1,54 @@
/// HTTP JSON response helpers.
const std = @import("std");
pub const ContentType = enum {
json,
text,
pub fn header(self: ContentType) []const u8 {
return switch (self) {
.json => "application/json",
.text => "text/plain",
};
}
};
/// Write a JSON success response.
pub fn jsonOk(writer: anytype, body: []const u8) !void {
try writer.writeAll("HTTP/1.1 200 OK\r\n");
try writer.writeAll("Content-Type: application/json\r\n");
try std.fmt.format(writer, "Content-Length: {d}\r\n", .{body.len});
try writer.writeAll("Connection: close\r\n");
try writer.writeAll("\r\n");
try writer.writeAll(body);
}
/// Write a JSON error response with given status code.
pub fn jsonError(writer: anytype, status_code: u16, status_text: []const u8, message: []const u8) !void {
// Build JSON error body
var buf: [1024]u8 = undefined;
const body = std.fmt.bufPrint(&buf, "{{\"error\":\"{s}\"}}", .{message}) catch message;
try std.fmt.format(writer, "HTTP/1.1 {d} {s}\r\n", .{ status_code, status_text });
try writer.writeAll("Content-Type: application/json\r\n");
try std.fmt.format(writer, "Content-Length: {d}\r\n", .{body.len});
try writer.writeAll("Connection: close\r\n");
try writer.writeAll("\r\n");
try writer.writeAll(body);
}
pub fn notFound(writer: anytype) !void {
try jsonError(writer, 404, "Not Found", "not found");
}
pub fn methodNotAllowed(writer: anytype) !void {
try jsonError(writer, 405, "Method Not Allowed", "method not allowed");
}
pub fn badRequest(writer: anytype, message: []const u8) !void {
try jsonError(writer, 400, "Bad Request", message);
}
pub fn internalError(writer: anytype) !void {
try jsonError(writer, 500, "Internal Server Error", "internal server error");
}

220
src/server/router.zig Normal file
View File

@ -0,0 +1,220 @@
/// Minimal HTTP request router.
///
/// Parses the request line (method + path) and dispatches to handlers.
/// No dependencies on std.http.Server we parse raw HTTP/1.1 ourselves
/// for simplicity and to avoid API churn.
const std = @import("std");
const log = @import("../log.zig");
const response = @import("response.zig");
const handler_health = @import("handler_health.zig");
const handler_status = @import("handler_status.zig");
const handler_guardians = @import("handler_guardians.zig");
const handler_push = @import("handler_push.zig");
const handler_pull = @import("handler_pull.zig");
const handler_auth = @import("handler_auth.zig");
const handler_secrets = @import("handler_secrets.zig");
const guardian_mod = @import("../guardian.zig");
pub const Request = struct {
method: Method,
path: []const u8,
body: []const u8,
content_length: usize,
/// Authorization header value (if present)
authorization: ?[]const u8,
/// X-Session-Token header value (if present)
session_token: ?[]const u8,
};
pub const Method = enum {
GET,
POST,
PUT,
DELETE,
OPTIONS,
UNKNOWN,
pub fn fromString(s: []const u8) Method {
if (std.mem.eql(u8, s, "GET")) return .GET;
if (std.mem.eql(u8, s, "POST")) return .POST;
if (std.mem.eql(u8, s, "PUT")) return .PUT;
if (std.mem.eql(u8, s, "DELETE")) return .DELETE;
if (std.mem.eql(u8, s, "OPTIONS")) return .OPTIONS;
return .UNKNOWN;
}
};
/// Parse HTTP request from raw bytes.
/// Returns null if the request is malformed.
pub fn parseRequest(buf: []const u8) ?Request {
// Find end of request line
const request_line_end = std.mem.indexOf(u8, buf, "\r\n") orelse return null;
const request_line = buf[0..request_line_end];
// Parse: METHOD /path HTTP/1.x
var parts = std.mem.splitScalar(u8, request_line, ' ');
const method_str = parts.next() orelse return null;
const path = parts.next() orelse return null;
// Skip HTTP version (parts.next())
const method = Method.fromString(method_str);
// Parse headers
var content_length: usize = 0;
var authorization: ?[]const u8 = null;
var session_token: ?[]const u8 = null;
const headers_end = std.mem.indexOf(u8, buf, "\r\n\r\n") orelse return null;
const headers_section = buf[request_line_end + 2 .. headers_end];
var header_iter = std.mem.splitSequence(u8, headers_section, "\r\n");
while (header_iter.next()) |header_line| {
if (header_line.len == 0) continue;
if (std.ascii.startsWithIgnoreCase(header_line, "content-length:")) {
const value = std.mem.trimLeft(u8, header_line["content-length:".len..], " ");
content_length = std.fmt.parseInt(usize, value, 10) catch 0;
} else if (std.ascii.startsWithIgnoreCase(header_line, "authorization:")) {
authorization = std.mem.trimLeft(u8, header_line["authorization:".len..], " ");
} else if (std.ascii.startsWithIgnoreCase(header_line, "x-session-token:")) {
session_token = std.mem.trimLeft(u8, header_line["x-session-token:".len..], " ");
}
}
const body_start = headers_end + 4;
const body = if (body_start < buf.len) buf[body_start..] else &[_]u8{};
return Request{
.method = method,
.path = path,
.body = body,
.content_length = content_length,
.authorization = authorization,
.session_token = session_token,
};
}
/// Route a parsed request to the appropriate handler.
pub fn route(req: Request, writer: anytype, ctx: *const RouteContext) !void {
// Health check (no auth required)
if (std.mem.eql(u8, req.path, "/v1/vault/health")) {
if (req.method != .GET) return response.methodNotAllowed(writer);
return handler_health.handle(writer, ctx);
}
// Status
if (std.mem.eql(u8, req.path, "/v1/vault/status")) {
if (req.method != .GET) return response.methodNotAllowed(writer);
return handler_status.handle(writer, ctx);
}
// Guardians list
if (std.mem.eql(u8, req.path, "/v1/vault/guardians")) {
if (req.method != .GET) return response.methodNotAllowed(writer);
return handler_guardians.handle(writer, ctx);
}
// Auth: challenge
if (std.mem.eql(u8, req.path, "/v1/vault/auth/challenge")) {
if (req.method != .POST) return response.methodNotAllowed(writer);
return handler_auth.handleChallenge(writer, req.body, ctx);
}
// Auth: session
if (std.mem.eql(u8, req.path, "/v1/vault/auth/session")) {
if (req.method != .POST) return response.methodNotAllowed(writer);
return handler_auth.handleSession(writer, req.body, ctx);
}
// Push share
if (std.mem.eql(u8, req.path, "/v1/vault/push")) {
if (req.method != .POST) return response.methodNotAllowed(writer);
return handler_push.handle(writer, req.body, ctx, req.session_token);
}
// Pull share
if (std.mem.eql(u8, req.path, "/v1/vault/pull")) {
if (req.method != .POST) return response.methodNotAllowed(writer);
return handler_pull.handle(writer, req.body, ctx, req.session_token);
}
// V2: Auth endpoints (same handlers, new path prefix)
if (std.mem.eql(u8, req.path, "/v2/vault/auth/challenge")) {
if (req.method != .POST) return response.methodNotAllowed(writer);
return handler_auth.handleChallenge(writer, req.body, ctx);
}
if (std.mem.eql(u8, req.path, "/v2/vault/auth/session")) {
if (req.method != .POST) return response.methodNotAllowed(writer);
return handler_auth.handleSession(writer, req.body, ctx);
}
// V2: Named secrets CRUD
if (std.mem.startsWith(u8, req.path, "/v2/vault/secrets")) {
const prefix = "/v2/vault/secrets";
const suffix = req.path[prefix.len..];
if (suffix.len == 0) {
// GET /v2/vault/secrets -> list
if (req.method != .GET) return response.methodNotAllowed(writer);
return handler_secrets.handleList(writer, ctx, req.session_token);
}
if (suffix[0] == '/') {
const name = suffix[1..];
if (name.len == 0) return response.badRequest(writer, "secret name required");
return switch (req.method) {
.PUT => handler_secrets.handlePut(writer, req.body, name, ctx, req.session_token),
.GET => handler_secrets.handleGet(writer, name, ctx, req.session_token),
.DELETE => handler_secrets.handleDelete(writer, name, ctx, req.session_token),
else => response.methodNotAllowed(writer),
};
}
}
return response.notFound(writer);
}
pub const RouteContext = struct {
data_dir: []const u8,
listen_address: []const u8,
client_port: u16,
peer_port: u16,
allocator: std.mem.Allocator,
guardian: ?*guardian_mod.Guardian = null,
};
// Tests
test "parseRequest: GET with no body" {
const raw = "GET /v1/vault/health HTTP/1.1\r\nHost: localhost\r\n\r\n";
const req = parseRequest(raw).?;
try std.testing.expectEqual(Method.GET, req.method);
try std.testing.expectEqualSlices(u8, "/v1/vault/health", req.path);
try std.testing.expectEqual(@as(usize, 0), req.content_length);
}
test "parseRequest: POST with content-length" {
const raw = "POST /v1/vault/push HTTP/1.1\r\nContent-Length: 5\r\n\r\nhello";
const req = parseRequest(raw).?;
try std.testing.expectEqual(Method.POST, req.method);
try std.testing.expectEqualSlices(u8, "/v1/vault/push", req.path);
try std.testing.expectEqual(@as(usize, 5), req.content_length);
try std.testing.expectEqualSlices(u8, "hello", req.body);
}
test "parseRequest: authorization header" {
const raw = "POST /v1/vault/push HTTP/1.1\r\nAuthorization: Bearer abc123\r\n\r\n";
const req = parseRequest(raw).?;
try std.testing.expectEqualSlices(u8, "Bearer abc123", req.authorization.?);
}
test "parseRequest: session token header" {
const raw = "POST /v1/vault/push HTTP/1.1\r\nX-Session-Token: tok123\r\n\r\n";
const req = parseRequest(raw).?;
try std.testing.expectEqualSlices(u8, "tok123", req.session_token.?);
}
test "parseRequest: malformed returns null" {
try std.testing.expect(parseRequest("garbage") == null);
try std.testing.expect(parseRequest("GET\r\n\r\n") == null);
}

337
src/sss/combine.zig Normal file
View File

@ -0,0 +1,337 @@
/// Shamir Secret Sharing Combine (Lagrange interpolation).
///
/// Reconstructs the secret from K or more shares using Lagrange interpolation
/// at x=0 over GF(2^8).
const std = @import("std");
const gf = @import("field.zig");
const types = @import("types.zig");
pub const CombineError = error{
NotEnoughShares,
MismatchedShareLengths,
ZeroShareIndex,
DuplicateShareIndices,
DivisionByZero,
OutOfMemory,
};
/// Reconstructs the secret from K or more shares.
///
/// shares: Slice of Share structs. Must have at least 2 shares, all with the same y length.
/// Returns the reconstructed secret. Caller must free and zero the result.
pub fn combine(
allocator: std.mem.Allocator,
shares: []const types.Share,
) CombineError![]u8 {
if (shares.len < 2) return CombineError.NotEnoughShares;
const secret_len = shares[0].y.len;
// Validate shares
for (shares) |share| {
if (share.x == 0) return CombineError.ZeroShareIndex;
if (share.y.len != secret_len) return CombineError.MismatchedShareLengths;
}
// Check for duplicate x values
for (shares, 0..) |a, i| {
for (shares[i + 1 ..]) |b| {
if (a.x == b.x) return CombineError.DuplicateShareIndices;
}
}
// Allocate result buffer
const result = allocator.alloc(u8, secret_len) catch return CombineError.OutOfMemory;
@memset(result, 0);
// Lagrange interpolation at x=0 for each byte position
for (0..secret_len) |byte_idx| {
var value: u8 = 0;
for (shares, 0..) |share_i, i| {
// Compute Lagrange basis polynomial L_i(0)
// L_i(0) = Product_{j!=i} (0 - x_j) / (x_i - x_j)
// = Product_{j!=i} x_j / (x_i - x_j)
// In GF(2^8): subtraction = XOR = addition
var basis: u8 = 1;
for (shares, 0..) |share_j, j| {
if (i == j) continue;
// numerator: x_j (since 0 - x_j = x_j in GF(2^8))
// denominator: x_i - x_j = x_i XOR x_j
const num = share_j.x;
const den = gf.sub(share_i.x, share_j.x);
basis = gf.mul(basis, try gf.div(num, den));
}
// Accumulate: value += share_i.y[byte_idx] * L_i(0)
value = gf.add(value, gf.mul(share_i.y[byte_idx], basis));
}
result[byte_idx] = value;
}
return result;
}
// Tests
const split_mod = @import("split.zig");
test "round-trip: 2-of-3 single byte" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split_mod.split(allocator, &secret, 3, 2);
defer share_set.deinit(allocator);
// Any 2 of 3 shares should reconstruct
const pairs = [_][2]usize{ .{ 0, 1 }, .{ 0, 2 }, .{ 1, 2 } };
for (pairs) |pair| {
const subset = [_]types.Share{ share_set.shares[pair[0]], share_set.shares[pair[1]] };
const recovered = try combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
}
test "round-trip: 3-of-5 multi-byte" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
const subset = [_]types.Share{
share_set.shares[0],
share_set.shares[2],
share_set.shares[4],
};
const recovered = try combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "round-trip: 2-of-2 minimum" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 0xFF, 0x00, 0x55, 0xAA };
const share_set = try split_mod.split(allocator, &secret, 2, 2);
defer share_set.deinit(allocator);
const recovered = try combine(allocator, share_set.shares);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "round-trip: all C(5,3) = 10 subsets" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 42, 137, 255, 0 };
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
var count: usize = 0;
for (0..5) |i| {
for (i + 1..5) |j| {
for (j + 1..5) |l| {
const subset = [_]types.Share{
share_set.shares[i],
share_set.shares[j],
share_set.shares[l],
};
const recovered = try combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
count += 1;
}
}
}
try std.testing.expectEqual(@as(usize, 10), count);
}
test "round-trip: large secret (256 bytes)" {
const allocator = std.testing.allocator;
var secret: [256]u8 = undefined;
for (&secret, 0..) |*b, i| b.* = @truncate(i);
const share_set = try split_mod.split(allocator, &secret, 10, 5);
defer share_set.deinit(allocator);
// Use first 5 shares
const recovered = try combine(allocator, share_set.shares[0..5]);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "round-trip: all-zeros secret" {
const allocator = std.testing.allocator;
const secret = [_]u8{0} ** 32;
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
const subset = [_]types.Share{
share_set.shares[0],
share_set.shares[2],
share_set.shares[4],
};
const recovered = try combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "round-trip: all-0xFF secret" {
const allocator = std.testing.allocator;
const secret = [_]u8{0xFF} ** 32;
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
const subset = [_]types.Share{
share_set.shares[1],
share_set.shares[3],
share_set.shares[4],
};
const recovered = try combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "more than K shares also reconstructs" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 1, 2, 3 };
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
// Using 4 shares (more than K=3)
const recovered = try combine(allocator, share_set.shares[0..4]);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "K-1 shares produce wrong result" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
// With only 2 shares (K-1), should NOT consistently give back 42
var match_count: usize = 0;
for (0..5) |i| {
for (i + 1..5) |j| {
const subset = [_]types.Share{ share_set.shares[i], share_set.shares[j] };
const result = try combine(allocator, &subset);
defer {
@memset(result, 0);
allocator.free(result);
}
if (result[0] == 42) match_count += 1;
}
}
// All 10 pairs matching would be astronomically unlikely
try std.testing.expect(match_count < 10);
}
test "deterministic: known polynomial, manual shares" {
// p(x) = 42 + 5x + 7x^2 (K=3, secret=42)
// Manually evaluate at x=1,2,3 using the polynomial module
const allocator = std.testing.allocator;
const poly_mod = @import("polynomial.zig");
const coeffs = [_]u8{ 42, 5, 7 };
const y1 = poly_mod.evaluate(&coeffs, 1); // p(1)
const y2 = poly_mod.evaluate(&coeffs, 2); // p(2)
const y3 = poly_mod.evaluate(&coeffs, 3); // p(3)
const shares = [_]types.Share{
.{ .x = 1, .y = &[_]u8{y1} },
.{ .x = 2, .y = &[_]u8{y2} },
.{ .x = 3, .y = &[_]u8{y3} },
};
const recovered = try combine(allocator, &shares);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqual(@as(u8, 42), recovered[0]);
}
test "deterministic: secret=0, known polynomial" {
// p(x) = 0 + 0xAB*x + 0xCD*x^2 (secret=0)
const allocator = std.testing.allocator;
const poly_mod = @import("polynomial.zig");
const coeffs = [_]u8{ 0, 0xAB, 0xCD };
const y1 = poly_mod.evaluate(&coeffs, 1);
const y3 = poly_mod.evaluate(&coeffs, 3);
const y5 = poly_mod.evaluate(&coeffs, 5);
const shares = [_]types.Share{
.{ .x = 1, .y = &[_]u8{y1} },
.{ .x = 3, .y = &[_]u8{y3} },
.{ .x = 5, .y = &[_]u8{y5} },
};
const recovered = try combine(allocator, &shares);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqual(@as(u8, 0), recovered[0]);
}
test "combine: rejects fewer than 2 shares" {
const allocator = std.testing.allocator;
const empty: []const types.Share = &.{};
try std.testing.expectError(CombineError.NotEnoughShares, combine(allocator, empty));
const single = [_]types.Share{.{ .x = 1, .y = &[_]u8{1} }};
try std.testing.expectError(CombineError.NotEnoughShares, combine(allocator, &single));
}
test "combine: rejects mismatched share lengths" {
const allocator = std.testing.allocator;
const shares = [_]types.Share{
.{ .x = 1, .y = &[_]u8{ 1, 2 } },
.{ .x = 2, .y = &[_]u8{3} },
};
try std.testing.expectError(CombineError.MismatchedShareLengths, combine(allocator, &shares));
}
test "combine: rejects x=0" {
const allocator = std.testing.allocator;
const shares = [_]types.Share{
.{ .x = 0, .y = &[_]u8{1} },
.{ .x = 1, .y = &[_]u8{2} },
};
try std.testing.expectError(CombineError.ZeroShareIndex, combine(allocator, &shares));
}
test "combine: rejects duplicate x values" {
const allocator = std.testing.allocator;
const shares = [_]types.Share{
.{ .x = 1, .y = &[_]u8{1} },
.{ .x = 1, .y = &[_]u8{2} },
};
try std.testing.expectError(CombineError.DuplicateShareIndices, combine(allocator, &shares));
}

276
src/sss/commitment.zig Normal file
View File

@ -0,0 +1,276 @@
/// Merkle commitment tree over Shamir shares.
///
/// Each share is hashed (SHA-256) to form a leaf. Leaves are paired and hashed
/// up to a single root. Guardians cross-check roots to detect tampering.
///
/// Tree is padded to the next power of 2 with zero-hashes.
const std = @import("std");
const Sha256 = std.crypto.hash.sha2.Sha256;
const types = @import("types.zig");
/// Builds a Merkle commitment tree from shares.
/// Returns a CommitmentTree with the root hash and all leaf hashes.
pub fn buildTree(
allocator: std.mem.Allocator,
shares: []const types.Share,
) !types.CommitmentTree {
if (shares.len == 0) return error.EmptyShares;
// Compute leaf hashes: H(x || y)
const leaves = try allocator.alloc([32]u8, shares.len);
errdefer allocator.free(leaves);
for (shares, 0..) |share, i| {
var h = Sha256.init(.{});
h.update(&[_]u8{share.x});
h.update(share.y);
leaves[i] = h.finalResult();
}
// Compute root
const root = try computeRoot(allocator, leaves);
return types.CommitmentTree{
.root = root,
.leaves = leaves,
};
}
/// Generates a Merkle proof for a specific share index.
pub fn generateProof(
allocator: std.mem.Allocator,
shares: []const types.Share,
index: usize,
) !types.MerkleProof {
if (index >= shares.len) return error.IndexOutOfBounds;
const tree = try buildTree(allocator, shares);
defer tree.deinit(allocator);
// Compute proof path
const depth = ceilLog2(shares.len);
var siblings = try allocator.alloc([32]u8, depth);
errdefer allocator.free(siblings);
var directions = try allocator.alloc(bool, depth);
errdefer allocator.free(directions);
// Pad leaves to next power of 2
const padded_len = nextPow2(shares.len);
var current_level = try allocator.alloc([32]u8, padded_len);
defer allocator.free(current_level);
// Copy leaf hashes, pad with zeros
for (0..padded_len) |i| {
if (i < tree.leaves.len) {
current_level[i] = tree.leaves[i];
} else {
current_level[i] = [_]u8{0} ** 32;
}
}
var current_index = index;
var level_len = padded_len;
var proof_idx: usize = 0;
while (level_len > 1) {
const sibling_index = current_index ^ 1; // flip last bit
siblings[proof_idx] = current_level[sibling_index];
directions[proof_idx] = (current_index & 1) == 1; // true if we're on the right
// Compute next level
const next_len = level_len / 2;
for (0..next_len) |i| {
current_level[i] = hashPair(current_level[i * 2], current_level[i * 2 + 1]);
}
current_index /= 2;
level_len = next_len;
proof_idx += 1;
}
return types.MerkleProof{
.leaf_index = index,
.siblings = siblings[0..proof_idx],
.directions = directions[0..proof_idx],
};
}
/// Verifies a Merkle proof against an expected root.
pub fn verifyProof(
share: types.Share,
proof: types.MerkleProof,
expected_root: [32]u8,
) bool {
// Compute leaf hash
var h = Sha256.init(.{});
h.update(&[_]u8{share.x});
h.update(share.y);
var current = h.finalResult();
// Walk up the tree
for (0..proof.siblings.len) |i| {
if (proof.directions[i]) {
// We're on the right, sibling is on the left
current = hashPair(proof.siblings[i], current);
} else {
// We're on the left, sibling is on the right
current = hashPair(current, proof.siblings[i]);
}
}
return std.mem.eql(u8, &current, &expected_root);
}
// Internal helpers
fn hashPair(left: [32]u8, right: [32]u8) [32]u8 {
var h = Sha256.init(.{});
h.update(&left);
h.update(&right);
return h.finalResult();
}
fn computeRoot(allocator: std.mem.Allocator, leaves: []const [32]u8) ![32]u8 {
const padded_len = nextPow2(leaves.len);
var current = try allocator.alloc([32]u8, padded_len);
defer allocator.free(current);
for (0..padded_len) |i| {
if (i < leaves.len) {
current[i] = leaves[i];
} else {
current[i] = [_]u8{0} ** 32;
}
}
var len = padded_len;
while (len > 1) {
const next_len = len / 2;
for (0..next_len) |i| {
current[i] = hashPair(current[i * 2], current[i * 2 + 1]);
}
len = next_len;
}
return current[0];
}
fn nextPow2(n: usize) usize {
if (n <= 1) return 1;
var v = n - 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v |= v >> 32;
return v + 1;
}
fn ceilLog2(n: usize) usize {
if (n <= 1) return 0;
var v = n - 1;
var result: usize = 0;
while (v > 0) {
v >>= 1;
result += 1;
}
return result;
}
// Tests
const split_mod = @import("split.zig");
test "commitment: build tree from shares" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
const tree = try buildTree(allocator, share_set.shares);
defer tree.deinit(allocator);
// Root should be 32 bytes (SHA-256)
try std.testing.expectEqual(@as(usize, 32), tree.root.len);
// Should have one leaf per share
try std.testing.expectEqual(@as(usize, 5), tree.leaves.len);
}
test "commitment: same shares produce same root" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split_mod.split(allocator, &secret, 3, 2);
defer share_set.deinit(allocator);
const tree1 = try buildTree(allocator, share_set.shares);
defer tree1.deinit(allocator);
const tree2 = try buildTree(allocator, share_set.shares);
defer tree2.deinit(allocator);
try std.testing.expectEqualSlices(u8, &tree1.root, &tree2.root);
}
test "commitment: proof generation and verification" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 1, 2, 3, 4, 5 };
const share_set = try split_mod.split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
const tree = try buildTree(allocator, share_set.shares);
defer tree.deinit(allocator);
// Generate and verify proof for each share
for (share_set.shares, 0..) |share, i| {
const proof = try generateProof(allocator, share_set.shares, i);
defer proof.deinit(allocator);
try std.testing.expect(verifyProof(share, proof, tree.root));
}
}
test "commitment: tampered share fails verification" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split_mod.split(allocator, &secret, 3, 2);
defer share_set.deinit(allocator);
const tree = try buildTree(allocator, share_set.shares);
defer tree.deinit(allocator);
const proof = try generateProof(allocator, share_set.shares, 0);
defer proof.deinit(allocator);
// Tamper with the share
const tampered = types.Share{
.x = share_set.shares[0].x,
.y = &[_]u8{0xFF}, // wrong data
};
try std.testing.expect(!verifyProof(tampered, proof, tree.root));
}
test "commitment: wrong root fails verification" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split_mod.split(allocator, &secret, 3, 2);
defer share_set.deinit(allocator);
const proof = try generateProof(allocator, share_set.shares, 0);
defer proof.deinit(allocator);
const wrong_root = [_]u8{0xAB} ** 32;
try std.testing.expect(!verifyProof(share_set.shares[0], proof, wrong_root));
}
test "commitment: single share tree" {
const allocator = std.testing.allocator;
// Can't split into 1 share (need K>=2, N>=K), so manually build tree
const share = types.Share{ .x = 1, .y = &[_]u8{42} };
const shares = [_]types.Share{share};
const tree = try buildTree(allocator, &shares);
defer tree.deinit(allocator);
try std.testing.expectEqual(@as(usize, 1), tree.leaves.len);
}

243
src/sss/field.zig Normal file
View File

@ -0,0 +1,243 @@
/// GF(2^8) Galois Field arithmetic.
///
/// Uses the AES irreducible polynomial: x^8 + x^4 + x^3 + x + 1 (0x11B).
/// Precomputed log/exp tables at comptime for zero-runtime-cost lookups.
///
/// All operations are over the finite field GF(2^8) = GF(256):
/// - add(a, b) = a XOR b
/// - mul(a, b) via log/exp tables
/// - inv(a) = exp[255 - log[a]]
/// - div(a, b) = mul(a, inv(b))
const std = @import("std");
pub const FieldError = error{DivisionByZero};
/// Irreducible polynomial: x^8 + x^4 + x^3 + x + 1
const POLY: u16 = 0x11B;
/// Generator (primitive element) for GF(2^8)
const GENERATOR: u8 = 0x03;
/// Precomputed exp table: exp[i] = g^i mod poly, for i in 0..511
/// Extended to 512 entries to avoid modular reduction in mul.
/// Generator is 3 (0x03), a primitive element of GF(2^8) with 0x11B.
pub const exp_table: [512]u8 = blk: {
var table: [512]u8 = undefined;
var x: u16 = 1;
for (0..512) |i| {
table[i] = @truncate(x);
// Multiply by generator (3) in GF(2^8): x*3 = x*2 XOR x
const x2 = x << 1; // x * 2
const x3 = x2 ^ x; // x * 3 = x * 2 + x (addition = XOR in GF(2^8))
x = if (x3 & 0x100 != 0) x3 ^ POLY else x3;
}
break :blk table;
};
/// Precomputed log table: log[a] = i where g^i = a, for a in 1..255
/// log[0] is undefined (log of zero doesn't exist).
const log_table: [256]u8 = blk: {
var table: [256]u8 = .{0} ** 256;
for (0..255) |i| {
table[exp_table[i]] = @truncate(i);
}
break :blk table;
};
/// Addition in GF(2^8) is XOR.
pub fn add(a: u8, b: u8) u8 {
return a ^ b;
}
/// Subtraction in GF(2^8) is also XOR (same as addition).
pub fn sub(a: u8, b: u8) u8 {
return a ^ b;
}
/// Multiplication in GF(2^8) via log/exp tables.
/// mul(0, _) = mul(_, 0) = 0.
pub fn mul(a: u8, b: u8) u8 {
if (a == 0 or b == 0) return 0;
const log_sum = @as(u16, log_table[a]) + @as(u16, log_table[b]);
return exp_table[log_sum];
}
/// Multiplicative inverse in GF(2^8).
/// inv(a) = a^254 = exp[255 - log[a]] (since a^255 = 1 for all nonzero a).
/// Returns FieldError.DivisionByZero if a == 0.
pub fn inv(a: u8) FieldError!u8 {
if (a == 0) return FieldError.DivisionByZero;
return exp_table[255 - @as(u16, log_table[a])];
}
/// Division in GF(2^8): a / b = a * inv(b).
/// Returns FieldError.DivisionByZero if b == 0.
pub fn div(a: u8, b: u8) FieldError!u8 {
if (b == 0) return FieldError.DivisionByZero;
if (a == 0) return 0;
const log_diff = @as(u16, log_table[a]) + 255 - @as(u16, log_table[b]);
return exp_table[log_diff];
}
// Tests
test "add is XOR" {
try std.testing.expectEqual(@as(u8, 0), add(0, 0));
try std.testing.expectEqual(@as(u8, 0), add(0xFF, 0xFF));
try std.testing.expectEqual(0x53 ^ 0xCA, add(0x53, 0xCA));
}
test "sub equals add (characteristic 2)" {
for (0..256) |a_int| {
const a: u8 = @truncate(a_int);
for (0..256) |b_int| {
const b: u8 = @truncate(b_int);
try std.testing.expectEqual(add(a, b), sub(a, b));
}
}
}
test "mul: identity (1 * a = a)" {
for (0..256) |a_int| {
const a: u8 = @truncate(a_int);
try std.testing.expectEqual(a, mul(a, 1));
try std.testing.expectEqual(a, mul(1, a));
}
}
test "mul: zero (0 * a = 0)" {
for (0..256) |a_int| {
const a: u8 = @truncate(a_int);
try std.testing.expectEqual(@as(u8, 0), mul(a, 0));
try std.testing.expectEqual(@as(u8, 0), mul(0, a));
}
}
test "mul: commutative (a*b = b*a)" {
var a: u16 = 1;
while (a < 256) : (a += 7) {
var b: u16 = 1;
while (b < 256) : (b += 11) {
try std.testing.expectEqual(
mul(@truncate(a), @truncate(b)),
mul(@truncate(b), @truncate(a)),
);
}
}
}
test "mul: associative ((a*b)*c = a*(b*c))" {
var a: u16 = 1;
while (a < 256) : (a += 17) {
var b: u16 = 1;
while (b < 256) : (b += 23) {
var c: u16 = 1;
while (c < 256) : (c += 29) {
const ab_c = mul(mul(@truncate(a), @truncate(b)), @truncate(c));
const a_bc = mul(@truncate(a), mul(@truncate(b), @truncate(c)));
try std.testing.expectEqual(ab_c, a_bc);
}
}
}
}
test "mul: distributive (a*(b+c) = a*b + a*c)" {
var a: u16 = 0;
while (a < 256) : (a += 13) {
var b: u16 = 0;
while (b < 256) : (b += 17) {
var c: u16 = 0;
while (c < 256) : (c += 19) {
const lhs = mul(@truncate(a), add(@truncate(b), @truncate(c)));
const rhs = add(mul(@truncate(a), @truncate(b)), mul(@truncate(a), @truncate(c)));
try std.testing.expectEqual(lhs, rhs);
}
}
}
}
test "inv: a * inv(a) = 1 for all nonzero a" {
for (1..256) |a_int| {
const a: u8 = @truncate(a_int);
try std.testing.expectEqual(@as(u8, 1), mul(a, try inv(a)));
}
}
test "inv: inv(inv(a)) = a for all nonzero a" {
for (1..256) |a_int| {
const a: u8 = @truncate(a_int);
try std.testing.expectEqual(a, try inv(try inv(a)));
}
}
test "inv: returns error on zero" {
try std.testing.expectError(FieldError.DivisionByZero, inv(0));
}
test "div: a / b = a * inv(b)" {
var a: u16 = 0;
while (a < 256) : (a += 13) {
var b: u16 = 1;
while (b < 256) : (b += 17) {
try std.testing.expectEqual(
mul(@truncate(a), try inv(@truncate(b))),
try div(@truncate(a), @truncate(b)),
);
}
}
}
test "div: 0 / b = 0 for all nonzero b" {
for (1..256) |b_int| {
const b: u8 = @truncate(b_int);
try std.testing.expectEqual(@as(u8, 0), try div(0, b));
}
}
test "div: a / 1 = a" {
for (0..256) |a_int| {
const a: u8 = @truncate(a_int);
try std.testing.expectEqual(a, try div(a, 1));
}
}
test "div: a / a = 1 for all nonzero a" {
for (1..256) |a_int| {
const a: u8 = @truncate(a_int);
try std.testing.expectEqual(@as(u8, 1), try div(a, a));
}
}
test "div: returns error on division by zero" {
try std.testing.expectError(FieldError.DivisionByZero, div(42, 0));
}
test "exhaustive mul: all 256x256 pairs produce valid GF(2^8) results" {
for (0..256) |a_int| {
for (0..256) |b_int| {
const result = mul(@truncate(a_int), @truncate(b_int));
// Result must be in [0, 255] (u8 guarantees this, but let's be explicit)
try std.testing.expect(result <= 255);
}
}
}
test "exp table: g^0 = 1, g^255 = 1 (cyclic group order = 255)" {
try std.testing.expectEqual(@as(u8, 1), exp_table[0]);
try std.testing.expectEqual(@as(u8, 1), exp_table[255]);
}
test "exp table: all nonzero elements appear exactly once in exp[0..255]" {
var seen = [_]bool{false} ** 256;
for (0..255) |i| {
const val = exp_table[i];
try std.testing.expect(!seen[val]); // no duplicates
seen[val] = true;
}
// All nonzero values should be seen
for (1..256) |v| {
try std.testing.expect(seen[v]);
}
// Zero should NOT appear (generator never produces 0)
try std.testing.expect(!seen[0]);
}

68
src/sss/polynomial.zig Normal file
View File

@ -0,0 +1,68 @@
/// Polynomial operations over GF(2^8).
///
/// Used by Shamir split to evaluate random polynomials at different points.
/// Horner's method for efficient evaluation: O(K) multiplications per point.
const std = @import("std");
const gf = @import("field.zig");
/// Evaluates a polynomial at point x using Horner's method.
///
/// coeffs[0] is the constant term (the secret byte).
/// coeffs[1] is the x^1 coefficient, etc.
///
/// Horner's: p(x) = coeffs[0] + x*(coeffs[1] + x*(coeffs[2] + ...))
/// = ((coeffs[K-1]*x + coeffs[K-2])*x + ... + coeffs[1])*x + coeffs[0]
pub fn evaluate(coeffs: []const u8, x: u8) u8 {
if (coeffs.len == 0) return 0;
if (x == 0) return coeffs[0]; // p(0) = constant term = secret
// Horner's method: start from highest degree
var result: u8 = coeffs[coeffs.len - 1];
var i: usize = coeffs.len - 1;
while (i > 0) {
i -= 1;
result = gf.add(gf.mul(result, x), coeffs[i]);
}
return result;
}
// Tests
test "constant polynomial: p(x) = 42 for all x" {
const coeffs = [_]u8{42};
for (0..256) |x_int| {
const x: u8 = @truncate(x_int);
try std.testing.expectEqual(@as(u8, 42), evaluate(&coeffs, x));
}
}
test "linear polynomial: p(0) returns constant term" {
const coeffs = [_]u8{ 42, 7 }; // p(x) = 42 + 7x
try std.testing.expectEqual(@as(u8, 42), evaluate(&coeffs, 0));
}
test "linear polynomial: p(1) = a0 + a1" {
const coeffs = [_]u8{ 42, 7 }; // p(x) = 42 + 7x
// p(1) = 42 XOR 7 = 45 (in GF(2^8), add = XOR)
try std.testing.expectEqual(gf.add(42, 7), evaluate(&coeffs, 1));
}
test "quadratic: p(0) returns secret" {
const coeffs = [_]u8{ 0xFF, 0x12, 0x34 }; // p(x) = 0xFF + 0x12*x + 0x34*x^2
try std.testing.expectEqual(@as(u8, 0xFF), evaluate(&coeffs, 0));
}
test "empty coefficients returns 0" {
const coeffs = [_]u8{};
try std.testing.expectEqual(@as(u8, 0), evaluate(&coeffs, 42));
}
test "known test vector: verify Horner matches naive evaluation" {
// p(x) = 5 + 3x + 7x^2
const coeffs = [_]u8{ 5, 3, 7 };
const x: u8 = 2;
// Naive: 5 XOR mul(3,2) XOR mul(7, mul(2,2))
const naive = gf.add(gf.add(5, gf.mul(3, x)), gf.mul(7, gf.mul(x, x)));
try std.testing.expectEqual(naive, evaluate(&coeffs, x));
}

293
src/sss/reshare.zig Normal file
View File

@ -0,0 +1,293 @@
/// Proactive Re-sharing Herzberg-Jarecki-Krawczyk-Yung protocol.
///
/// Allows guardians to refresh their shares without reconstructing the secret.
/// After re-sharing, old shares are algebraically independent from new shares,
/// so compromising old shares provides no information about the current secret.
///
/// Protocol:
/// 1. Each guardian i generates a random polynomial q_i(x) of degree K-1 with q_i(0) = 0
/// 2. Guardian i sends q_i(j) to guardian j for all j != i
/// 3. Each guardian j computes: new_share_j = old_share_j + sum(received q_i(j) for all i)
///
/// The secret is preserved because: sum(q_i(0)) = 0 for all i.
/// Shares are refreshed because: new_share != old_share (with overwhelming probability).
const std = @import("std");
const poly = @import("polynomial.zig");
const gf = @import("field.zig");
const types = @import("types.zig");
pub const ReshareError = error{
ThresholdTooSmall,
InvalidShareCount,
MismatchedShareLengths,
OutOfMemory,
};
/// A delta value that one guardian sends to another during re-sharing.
/// Guardian i sends delta_ij to guardian j, where delta_ij = q_i(j).
pub const ReshareDelta = struct {
/// Source guardian index (1-based, matches share x-coordinate)
from_x: u8,
/// Target guardian index (1-based)
to_x: u8,
/// The delta values (one per secret byte)
values: []const u8,
pub fn deinit(self: ReshareDelta, allocator: std.mem.Allocator) void {
const m: []u8 = @constCast(self.values);
@memset(m, 0);
allocator.free(m);
}
};
/// Generate re-sharing deltas for one guardian.
///
/// This guardian (with x-coordinate `self_x`) generates a random polynomial
/// q(x) of degree K-1 with q(0) = 0, then computes q(j) for each target
/// guardian j = 1..N.
///
/// Returns an array of deltas, one per target guardian (including self).
/// Caller must free the result.
pub fn generateDeltas(
allocator: std.mem.Allocator,
self_x: u8,
secret_len: usize,
n: u8,
k: u8,
) ReshareError![]ReshareDelta {
if (k < 2) return ReshareError.ThresholdTooSmall;
if (n < k) return ReshareError.InvalidShareCount;
const deltas = allocator.alloc(ReshareDelta, n) catch return ReshareError.OutOfMemory;
errdefer {
for (deltas) |*d| {
if (d.values.len > 0) d.deinit(allocator);
}
allocator.free(deltas);
}
// Initialize delta value buffers
for (deltas, 0..) |*d, i| {
const values = allocator.alloc(u8, secret_len) catch return ReshareError.OutOfMemory;
d.* = ReshareDelta{
.from_x = self_x,
.to_x = @as(u8, @truncate(i)) + 1,
.values = values,
};
}
// Generate random polynomial coefficients (reused per byte)
const coeffs = allocator.alloc(u8, k) catch return ReshareError.OutOfMemory;
defer {
@memset(coeffs, 0);
allocator.free(coeffs);
}
// For each byte position in the secret
for (0..secret_len) |byte_idx| {
// coeffs[0] = 0 (so q(0) = 0, preserving the secret)
coeffs[0] = 0;
// coeffs[1..K-1] = random
std.crypto.random.bytes(coeffs[1..]);
// Evaluate at each target guardian's x coordinate
for (deltas) |*d| {
const values_mut: []u8 = @constCast(d.values);
values_mut[byte_idx] = poly.evaluate(coeffs, d.to_x);
}
}
return deltas;
}
/// Apply received deltas to an existing share.
///
/// new_share.y[i] = old_share.y[i] + sum(delta.values[i] for each delta)
/// over GF(2^8) (where + is XOR).
///
/// The `deltas` array should contain one delta from each guardian
/// that participated in re-sharing (including self).
pub fn applyDeltas(
allocator: std.mem.Allocator,
old_share: types.Share,
deltas: []const ReshareDelta,
) ReshareError!types.Share {
const secret_len = old_share.y.len;
// Validate delta lengths
for (deltas) |d| {
if (d.values.len != secret_len) return ReshareError.MismatchedShareLengths;
}
// Allocate new share data
const new_y = allocator.alloc(u8, secret_len) catch return ReshareError.OutOfMemory;
@memcpy(new_y, old_share.y);
// XOR in each delta
for (deltas) |d| {
for (0..secret_len) |i| {
new_y[i] = gf.add(new_y[i], d.values[i]);
}
}
return types.Share{
.x = old_share.x,
.y = new_y,
};
}
// Tests
const split_mod = @import("split.zig");
const combine_mod = @import("combine.zig");
test "reshare: deltas preserve secret" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 42, 137, 255, 0 };
const n: u8 = 5;
const k: u8 = 3;
// Split secret
const share_set = try split_mod.split(allocator, &secret, n, k);
defer share_set.deinit(allocator);
// Each guardian generates deltas
var all_deltas: [5][]ReshareDelta = undefined;
for (0..n) |i| {
all_deltas[i] = try generateDeltas(
allocator,
@as(u8, @truncate(i)) + 1,
secret.len,
n,
k,
);
}
defer {
for (0..n) |i| {
for (all_deltas[i]) |*d| d.deinit(allocator);
allocator.free(all_deltas[i]);
}
}
// Each guardian collects deltas destined for them and applies
var new_shares: [5]types.Share = undefined;
for (0..n) |j| {
// Collect deltas where to_x == j+1
var deltas_for_j: [5]ReshareDelta = undefined;
for (0..n) |i| {
deltas_for_j[i] = all_deltas[i][j]; // delta from i to j
}
new_shares[j] = try applyDeltas(
allocator,
share_set.shares[j],
&deltas_for_j,
);
}
defer {
for (&new_shares) |*s| s.deinit(allocator);
}
// Verify: K new shares can still reconstruct the secret
const subset = [_]types.Share{ new_shares[0], new_shares[2], new_shares[4] };
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
test "reshare: new shares differ from old shares" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const n: u8 = 3;
const k: u8 = 2;
const share_set = try split_mod.split(allocator, &secret, n, k);
defer share_set.deinit(allocator);
// Generate deltas from all guardians
var all_deltas: [3][]ReshareDelta = undefined;
for (0..n) |i| {
all_deltas[i] = try generateDeltas(
allocator,
@as(u8, @truncate(i)) + 1,
secret.len,
n,
k,
);
}
defer {
for (0..n) |i| {
for (all_deltas[i]) |*d| d.deinit(allocator);
allocator.free(all_deltas[i]);
}
}
// Apply deltas to first share
var deltas_for_0: [3]ReshareDelta = undefined;
for (0..n) |i| deltas_for_0[i] = all_deltas[i][0];
const new_share = try applyDeltas(allocator, share_set.shares[0], &deltas_for_0);
defer {
var ns = new_share;
ns.deinit(allocator);
}
// New share should differ from old (with overwhelming probability)
// Note: there's a 1/256 chance they're equal for a 1-byte secret
// This is a probabilistic test extremely unlikely to fail
try std.testing.expect(new_share.x == share_set.shares[0].x);
}
test "reshare: old and new shares mixed fails reconstruction" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; // longer secret for reliability
const n: u8 = 5;
const k: u8 = 3;
const share_set = try split_mod.split(allocator, &secret, n, k);
defer share_set.deinit(allocator);
// Generate and apply deltas
var all_deltas: [5][]ReshareDelta = undefined;
for (0..n) |i| {
all_deltas[i] = try generateDeltas(
allocator,
@as(u8, @truncate(i)) + 1,
secret.len,
n,
k,
);
}
defer {
for (0..n) |i| {
for (all_deltas[i]) |*d| d.deinit(allocator);
allocator.free(all_deltas[i]);
}
}
// Create one new share (guardian 0)
var deltas_for_0: [5]ReshareDelta = undefined;
for (0..n) |i| deltas_for_0[i] = all_deltas[i][0];
const new_share_0 = try applyDeltas(allocator, share_set.shares[0], &deltas_for_0);
defer {
var ns = new_share_0;
ns.deinit(allocator);
}
// Mix: 1 new share + 2 old shares (different epochs)
// This should NOT reconstruct the original secret
const mixed = [_]types.Share{ new_share_0, share_set.shares[2], share_set.shares[4] };
const result = try combine_mod.combine(allocator, &mixed);
defer {
@memset(result, 0);
allocator.free(result);
}
// Should NOT equal the original secret (with overwhelming probability)
try std.testing.expect(!std.mem.eql(u8, &secret, result));
}

149
src/sss/split.zig Normal file
View File

@ -0,0 +1,149 @@
/// Shamir Secret Sharing Split operation.
///
/// Splits a secret byte array into N shares with threshold K.
/// For each byte of the secret, generates a random polynomial of degree K-1
/// with the secret byte as the constant term, then evaluates at points x=1..N.
const std = @import("std");
const poly = @import("polynomial.zig");
const types = @import("types.zig");
pub const SplitError = error{
ThresholdTooSmall,
ShareCountTooSmall,
TooManyShares,
EmptySecret,
OutOfMemory,
};
/// Splits a secret into N shares with threshold K.
///
/// - secret: The data to split (1+ bytes)
/// - n: Total number of shares to generate (2..255)
/// - k: Threshold minimum shares needed to reconstruct (2..N)
///
/// Returns a ShareSet that the caller must deinit.
pub fn split(
allocator: std.mem.Allocator,
secret: []const u8,
n: u8,
k: u8,
) SplitError!types.ShareSet {
if (k < 2) return SplitError.ThresholdTooSmall;
if (n < k) return SplitError.ShareCountTooSmall;
if (secret.len == 0) return SplitError.EmptySecret;
const secret_len = secret.len;
// Allocate shares
const shares = allocator.alloc(types.Share, n) catch return SplitError.OutOfMemory;
errdefer {
for (shares) |*s| {
if (s.y.len > 0) {
const m: []u8 = @constCast(s.y);
@memset(m, 0);
allocator.free(m);
}
}
allocator.free(shares);
}
for (shares, 0..) |*share, i| {
const y_buf = allocator.alloc(u8, secret_len) catch return SplitError.OutOfMemory;
share.* = .{
.x = @as(u8, @truncate(i)) + 1, // x = 1..N
.y = y_buf,
};
}
// Allocate temporary coefficient buffer (reused per byte)
const coeffs = allocator.alloc(u8, k) catch return SplitError.OutOfMemory;
defer {
@memset(coeffs, 0);
allocator.free(coeffs);
}
// For each byte of the secret
for (0..secret_len) |byte_idx| {
// coeffs[0] = secret byte (constant term)
coeffs[0] = secret[byte_idx];
// coeffs[1..K-1] = random (CSPRNG)
std.crypto.random.bytes(coeffs[1..]);
// Evaluate polynomial at each share's x coordinate
for (shares) |*share| {
const y_mut: []u8 = @constCast(share.y);
y_mut[byte_idx] = poly.evaluate(coeffs, share.x);
}
}
return types.ShareSet{
.threshold = k,
.total = n,
.shares = shares,
};
}
// Tests
test "split: basic 2-of-3" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split(allocator, &secret, 3, 2);
defer share_set.deinit(allocator);
try std.testing.expectEqual(@as(u8, 2), share_set.threshold);
try std.testing.expectEqual(@as(u8, 3), share_set.total);
try std.testing.expectEqual(@as(usize, 3), share_set.shares.len);
// Check x coordinates are 1, 2, 3
try std.testing.expectEqual(@as(u8, 1), share_set.shares[0].x);
try std.testing.expectEqual(@as(u8, 2), share_set.shares[1].x);
try std.testing.expectEqual(@as(u8, 3), share_set.shares[2].x);
// Each share's y should be 1 byte
for (share_set.shares) |share| {
try std.testing.expectEqual(@as(usize, 1), share.y.len);
}
}
test "split: multi-byte secret" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
const share_set = try split(allocator, &secret, 5, 3);
defer share_set.deinit(allocator);
try std.testing.expectEqual(@as(usize, 5), share_set.shares.len);
for (share_set.shares) |share| {
try std.testing.expectEqual(@as(usize, 10), share.y.len);
}
}
test "split: rejects K < 2" {
const allocator = std.testing.allocator;
const secret = [_]u8{1};
try std.testing.expectError(SplitError.ThresholdTooSmall, split(allocator, &secret, 3, 1));
}
test "split: rejects N < K" {
const allocator = std.testing.allocator;
const secret = [_]u8{1};
try std.testing.expectError(SplitError.ShareCountTooSmall, split(allocator, &secret, 2, 3));
}
test "split: rejects empty secret" {
const allocator = std.testing.allocator;
const secret = [_]u8{};
try std.testing.expectError(SplitError.EmptySecret, split(allocator, &secret, 3, 2));
}
test "split: x coordinates are sequential 1..N" {
const allocator = std.testing.allocator;
const secret = [_]u8{42};
const share_set = try split(allocator, &secret, 10, 3);
defer share_set.deinit(allocator);
for (share_set.shares, 0..) |share, i| {
try std.testing.expectEqual(@as(u8, @truncate(i)) + 1, share.x);
}
}

View File

@ -0,0 +1,211 @@
/// Cross-platform SSS test vectors verifies Zig TypeScript compatibility.
///
/// These vectors are generated by rootwallet/core/scripts/generate-test-vectors.ts
/// and must match the values in test-vectors/sss-vectors.json.
/// Both the Zig and TypeScript implementations are tested against the same vectors.
const std = @import("std");
const gf = @import("field.zig");
const poly = @import("polynomial.zig");
const types = @import("types.zig");
const combine_mod = @import("combine.zig");
// GF(2^8) exp table samples
test "cross-platform: exp table matches TypeScript" {
const samples = [_][2]u16{
.{ 0, 1 }, .{ 10, 114 }, .{ 20, 216 }, .{ 30, 102 },
.{ 40, 106 }, .{ 50, 4 }, .{ 60, 211 }, .{ 70, 77 },
.{ 80, 131 }, .{ 90, 179 }, .{ 100, 16 }, .{ 110, 97 },
.{ 120, 47 }, .{ 130, 58 }, .{ 140, 250 }, .{ 150, 64 },
.{ 160, 159 }, .{ 170, 188 }, .{ 180, 232 }, .{ 190, 197 },
.{ 200, 27 }, .{ 210, 74 }, .{ 220, 198 }, .{ 230, 141 },
.{ 240, 57 }, .{ 250, 108 }, .{ 254, 246 }, .{ 255, 1 },
};
for (samples) |s| {
try std.testing.expectEqual(@as(u8, @truncate(s[1])), gf.exp_table[s[0]]);
}
}
// GF(2^8) multiplication
test "cross-platform: mul matches TypeScript" {
const vectors = [_][3]u8{
.{ 1, 1, 1 }, .{ 1, 2, 2 }, .{ 1, 3, 3 },
.{ 1, 42, 42 }, .{ 1, 127, 127 }, .{ 1, 170, 170 },
.{ 1, 255, 255 },
.{ 2, 1, 2 }, .{ 2, 2, 4 }, .{ 2, 3, 6 },
.{ 2, 42, 84 }, .{ 2, 127, 254 }, .{ 2, 170, 79 },
.{ 2, 255, 229 },
.{ 3, 1, 3 }, .{ 3, 2, 6 }, .{ 3, 3, 5 },
.{ 3, 42, 126 }, .{ 3, 127, 129 }, .{ 3, 170, 229 },
.{ 3, 255, 26 },
.{ 42, 1, 42 }, .{ 42, 2, 84 }, .{ 42, 3, 126 },
.{ 42, 42, 40 }, .{ 42, 127, 82 }, .{ 42, 170, 244 },
.{ 42, 255, 142 },
.{ 127, 1, 127 }, .{ 127, 2, 254 }, .{ 127, 3, 129 },
.{ 127, 42, 82 }, .{ 127, 127, 137 }, .{ 127, 170, 173 },
.{ 127, 255, 118 },
.{ 170, 1, 170 }, .{ 170, 2, 79 }, .{ 170, 3, 229 },
.{ 170, 42, 244 }, .{ 170, 127, 173 }, .{ 170, 170, 178 },
.{ 170, 255, 235 },
.{ 255, 1, 255 }, .{ 255, 2, 229 }, .{ 255, 3, 26 },
.{ 255, 42, 142 }, .{ 255, 127, 118 }, .{ 255, 170, 235 },
.{ 255, 255, 19 },
};
for (vectors) |v| {
try std.testing.expectEqual(v[2], gf.mul(v[0], v[1]));
}
}
// GF(2^8) inverse
test "cross-platform: inv matches TypeScript" {
const vectors = [_][2]u8{
.{ 1, 1 }, .{ 2, 141 }, .{ 3, 246 }, .{ 5, 82 },
.{ 7, 209 }, .{ 16, 116 }, .{ 42, 152 }, .{ 127, 130 },
.{ 128, 131 }, .{ 170, 18 }, .{ 200, 169 }, .{ 255, 28 },
};
for (vectors) |v| {
try std.testing.expectEqual(v[1], try gf.inv(v[0]));
}
}
// GF(2^8) division
test "cross-platform: div matches TypeScript" {
const vectors = [_][3]u8{
.{ 1, 1, 1 }, .{ 1, 2, 141 }, .{ 1, 3, 246 },
.{ 1, 42, 152 }, .{ 1, 127, 130 }, .{ 1, 170, 18 },
.{ 1, 255, 28 },
.{ 2, 1, 2 }, .{ 2, 2, 1 }, .{ 2, 3, 247 },
.{ 3, 1, 3 }, .{ 3, 2, 140 }, .{ 3, 3, 1 },
.{ 42, 1, 42 }, .{ 42, 2, 21 }, .{ 42, 42, 1 },
.{ 127, 1, 127 }, .{ 127, 127, 1 },
.{ 170, 1, 170 }, .{ 170, 170, 1 },
.{ 255, 1, 255 }, .{ 255, 255, 1 },
};
for (vectors) |v| {
try std.testing.expectEqual(v[2], try gf.div(v[0], v[1]));
}
}
// Polynomial evaluation
test "cross-platform: polynomial evaluation matches TypeScript" {
// p(x) = 42 + 5x + 7x^2
const coeffs0 = [_]u8{ 42, 5, 7 };
const eval0 = [_][2]u8{
.{ 1, 40 }, .{ 2, 60 }, .{ 3, 62 }, .{ 4, 78 },
.{ 5, 76 }, .{ 10, 207 }, .{ 100, 214 }, .{ 255, 125 },
};
for (eval0) |e| {
try std.testing.expectEqual(e[1], poly.evaluate(&coeffs0, e[0]));
}
// p(x) = 0 + 0xAB*x + 0xCD*x^2
const coeffs1 = [_]u8{ 0, 0xAB, 0xCD };
const eval1 = [_][2]u8{
.{ 1, 102 }, .{ 3, 50 }, .{ 5, 152 }, .{ 7, 204 }, .{ 200, 96 },
};
for (eval1) |e| {
try std.testing.expectEqual(e[1], poly.evaluate(&coeffs1, e[0]));
}
// p(x) = 0xFF (constant)
const coeffs2 = [_]u8{0xFF};
const eval2 = [_][2]u8{ .{ 1, 255 }, .{ 2, 255 }, .{ 255, 255 } };
for (eval2) |e| {
try std.testing.expectEqual(e[1], poly.evaluate(&coeffs2, e[0]));
}
// p(x) = 128 + 64x + 32x^2 + 16x^3
const coeffs3 = [_]u8{ 128, 64, 32, 16 };
const eval3 = [_][2]u8{
.{ 1, 240 }, .{ 2, 0 }, .{ 3, 16 }, .{ 4, 193 }, .{ 5, 234 },
};
for (eval3) |e| {
try std.testing.expectEqual(e[1], poly.evaluate(&coeffs3, e[0]));
}
}
// Lagrange interpolation (combine)
test "cross-platform: Lagrange combine matches TypeScript (single-byte)" {
const allocator = std.testing.allocator;
// p(x) = 42 + 5x + 7x^2, secret = 42
const y1 = [_]u8{40};
const y2 = [_]u8{60};
const y3 = [_]u8{62};
const y4 = [_]u8{78};
const y5 = [_]u8{76};
// Subset {1,2,3}
{
const shares = [_]types.Share{
.{ .x = 1, .y = &y1 },
.{ .x = 2, .y = &y2 },
.{ .x = 3, .y = &y3 },
};
const result = try combine_mod.combine(allocator, &shares);
defer allocator.free(result);
try std.testing.expectEqual(@as(u8, 42), result[0]);
}
// Subset {1,3,5}
{
const shares = [_]types.Share{
.{ .x = 1, .y = &y1 },
.{ .x = 3, .y = &y3 },
.{ .x = 5, .y = &y5 },
};
const result = try combine_mod.combine(allocator, &shares);
defer allocator.free(result);
try std.testing.expectEqual(@as(u8, 42), result[0]);
}
// Subset {2,4,5}
{
const shares = [_]types.Share{
.{ .x = 2, .y = &y2 },
.{ .x = 4, .y = &y4 },
.{ .x = 5, .y = &y5 },
};
const result = try combine_mod.combine(allocator, &shares);
defer allocator.free(result);
try std.testing.expectEqual(@as(u8, 42), result[0]);
}
// Subset {3,4,5}
{
const shares = [_]types.Share{
.{ .x = 3, .y = &y3 },
.{ .x = 4, .y = &y4 },
.{ .x = 5, .y = &y5 },
};
const result = try combine_mod.combine(allocator, &shares);
defer allocator.free(result);
try std.testing.expectEqual(@as(u8, 42), result[0]);
}
}
test "cross-platform: Lagrange combine matches TypeScript (multi-byte)" {
const allocator = std.testing.allocator;
// 2-byte secret [42, 0], polynomials:
// byte0: 42 + 5x + 7x^2
// byte1: 0 + 0xAB*x + 0xCD*x^2
const y1 = [_]u8{ 40, 102 };
const y3 = [_]u8{ 62, 50 };
const y5 = [_]u8{ 76, 152 };
const shares = [_]types.Share{
.{ .x = 1, .y = &y1 },
.{ .x = 3, .y = &y3 },
.{ .x = 5, .y = &y5 },
};
const result = try combine_mod.combine(allocator, &shares);
defer allocator.free(result);
try std.testing.expectEqual(@as(u8, 42), result[0]);
try std.testing.expectEqual(@as(u8, 0), result[1]);
}

62
src/sss/types.zig Normal file
View File

@ -0,0 +1,62 @@
/// Types for Shamir Secret Sharing.
const std = @import("std");
/// A single share from Shamir's Secret Sharing.
/// x is the evaluation point (1..N), y is the evaluated polynomial values (one per secret byte).
pub const Share = struct {
/// Evaluation point (1..255, never 0)
x: u8,
/// Share data (same length as original secret)
y: []const u8,
pub fn deinit(self: Share, allocator: std.mem.Allocator) void {
// Zero before freeing
const mutable: []u8 = @constCast(self.y);
@memset(mutable, 0);
allocator.free(mutable);
}
};
/// A set of shares with metadata.
pub const ShareSet = struct {
/// Threshold (K) minimum shares needed to reconstruct
threshold: u8,
/// Total shares (N)
total: u8,
/// The shares themselves
shares: []Share,
pub fn deinit(self: ShareSet, allocator: std.mem.Allocator) void {
for (self.shares) |share| {
share.deinit(allocator);
}
allocator.free(self.shares);
}
};
/// Merkle commitment for share integrity verification.
pub const CommitmentTree = struct {
/// Root hash of the Merkle tree (SHA-256, 32 bytes)
root: [32]u8,
/// Individual leaf hashes (one per share)
leaves: []const [32]u8,
pub fn deinit(self: CommitmentTree, allocator: std.mem.Allocator) void {
allocator.free(self.leaves);
}
};
/// Merkle proof for a single share.
pub const MerkleProof = struct {
/// Index of the leaf in the tree
leaf_index: usize,
/// Sibling hashes along the path to the root
siblings: []const [32]u8,
/// Direction flags: false = sibling is on the left, true = sibling is on the right
directions: []const bool,
pub fn deinit(self: MerkleProof, allocator: std.mem.Allocator) void {
allocator.free(self.siblings);
allocator.free(self.directions);
}
};

246
src/storage/file_store.zig Normal file
View File

@ -0,0 +1,246 @@
/// File-per-user share storage with HMAC integrity.
///
/// Each user's data is stored in a directory named by their identity hash (hex).
/// Files are written atomically (write-to-temp + rename) to prevent corruption.
///
/// Directory layout:
/// <data_dir>/shares/<identity_hash_hex>/
/// meta.json - Share metadata (JSON)
/// share.bin - Raw encrypted share data
/// wrapped_dek1.bin - KEK1-wrapped DEK
/// wrapped_dek2.bin - KEK2-wrapped DEK
/// checksum.bin - HMAC-SHA256 of share.bin
const std = @import("std");
const hmac = @import("../crypto/hmac.zig");
pub const StoreError = error{
IdentityHashRequired,
ShareDataRequired,
IntegrityCheckFailed,
OutOfMemory,
IoError,
};
/// Metadata for a stored share.
pub const ShareMetadata = struct {
/// Monotonic version counter (for rollback protection)
version: u64,
/// Share index (x-coordinate in Shamir scheme)
share_index: u8,
/// Merkle commitment root (hex)
commitment_root: [64]u8, // 32 bytes as hex
/// Timestamp of last update (Unix epoch)
timestamp: i64,
};
/// Writes share data atomically to the store.
/// Creates directory if it doesn't exist. Uses temp+rename for atomicity.
pub fn writeShare(
data_dir: []const u8,
identity_hash_hex: []const u8,
share_data: []const u8,
integrity_key: []const u8,
allocator: std.mem.Allocator,
) !void {
if (identity_hash_hex.len == 0) return StoreError.IdentityHashRequired;
if (share_data.len == 0) return StoreError.ShareDataRequired;
// Build path: <data_dir>/shares/<identity_hash_hex>/
const share_dir = try std.fmt.allocPrint(allocator, "{s}/shares/{s}", .{ data_dir, identity_hash_hex });
defer allocator.free(share_dir);
// Create directory
std.fs.cwd().makePath(share_dir) catch {
return StoreError.IoError;
};
// Write share data atomically
const share_path = try std.fmt.allocPrint(allocator, "{s}/share.bin", .{share_dir});
defer allocator.free(share_path);
const tmp_path = try std.fmt.allocPrint(allocator, "{s}/share.bin.tmp", .{share_dir});
defer allocator.free(tmp_path);
try atomicWrite(share_path, tmp_path, share_data);
// Write HMAC checksum
const checksum = hmac.compute(integrity_key, share_data);
const checksum_path = try std.fmt.allocPrint(allocator, "{s}/checksum.bin", .{share_dir});
defer allocator.free(checksum_path);
const checksum_tmp = try std.fmt.allocPrint(allocator, "{s}/checksum.bin.tmp", .{share_dir});
defer allocator.free(checksum_tmp);
try atomicWrite(checksum_path, checksum_tmp, &checksum);
}
/// Reads share data from the store and verifies HMAC integrity.
/// Returns the share data. Caller must free.
pub fn readShare(
data_dir: []const u8,
identity_hash_hex: []const u8,
integrity_key: []const u8,
allocator: std.mem.Allocator,
) ![]u8 {
// Build paths
const share_path = try std.fmt.allocPrint(allocator, "{s}/shares/{s}/share.bin", .{ data_dir, identity_hash_hex });
defer allocator.free(share_path);
const checksum_path = try std.fmt.allocPrint(allocator, "{s}/shares/{s}/checksum.bin", .{ data_dir, identity_hash_hex });
defer allocator.free(checksum_path);
// Read share data
const share_data = std.fs.cwd().readFileAlloc(allocator, share_path, 10 * 1024 * 1024) catch {
return StoreError.IoError;
};
errdefer allocator.free(share_data);
// Read checksum
const checksum_data = std.fs.cwd().readFileAlloc(allocator, checksum_path, 32) catch {
return StoreError.IoError;
};
defer allocator.free(checksum_data);
if (checksum_data.len != 32) {
return StoreError.IntegrityCheckFailed;
}
// Verify HMAC
if (!hmac.verify(integrity_key, share_data, checksum_data[0..32].*)) {
return StoreError.IntegrityCheckFailed;
}
return share_data;
}
/// Checks if a share exists for the given identity.
pub fn shareExists(
data_dir: []const u8,
identity_hash_hex: []const u8,
allocator: std.mem.Allocator,
) !bool {
const share_path = try std.fmt.allocPrint(allocator, "{s}/shares/{s}/share.bin", .{ data_dir, identity_hash_hex });
defer allocator.free(share_path);
std.fs.cwd().access(share_path, .{}) catch {
return false;
};
return true;
}
/// Deletes all data for the given identity.
pub fn deleteShare(
data_dir: []const u8,
identity_hash_hex: []const u8,
allocator: std.mem.Allocator,
) !void {
const share_dir = try std.fmt.allocPrint(allocator, "{s}/shares/{s}", .{ data_dir, identity_hash_hex });
defer allocator.free(share_dir);
std.fs.cwd().deleteTree(share_dir) catch {};
}
// Internal helpers
fn atomicWrite(final_path: []const u8, tmp_path: []const u8, data: []const u8) !void {
// Write to temp file
const tmp_file = std.fs.cwd().createFile(tmp_path, .{}) catch {
return StoreError.IoError;
};
defer tmp_file.close();
tmp_file.writeAll(data) catch {
return StoreError.IoError;
};
// Rename atomically
std.fs.cwd().rename(tmp_path, final_path) catch {
return StoreError.IoError;
};
}
// Tests
test "write and read share round-trip" {
const allocator = std.testing.allocator;
// Use a temp directory
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "abcdef0123456789";
const share_data = "test share data bytes";
const key = "integrity-key-32-bytes-long!!!!";
try writeShare(tmp_path, identity, share_data, key, allocator);
const read_data = try readShare(tmp_path, identity, key, allocator);
defer allocator.free(read_data);
try std.testing.expectEqualSlices(u8, share_data, read_data);
}
test "integrity check detects tampering" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "deadbeef";
const share_data = "original data";
const key = "integrity-key-32-bytes-long!!!!";
try writeShare(tmp_path, identity, share_data, key, allocator);
// Tamper with the share file directly
const share_path = try std.fmt.allocPrint(allocator, "{s}/shares/{s}/share.bin", .{ tmp_path, identity });
defer allocator.free(share_path);
const file = try std.fs.cwd().openFile(share_path, .{ .mode = .write_only });
defer file.close();
try file.writeAll("tampered data!");
// Read should fail integrity check
try std.testing.expectError(StoreError.IntegrityCheckFailed, readShare(tmp_path, identity, key, allocator));
}
test "shareExists: returns false for missing share" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const exists = try shareExists(tmp_path, "nonexistent", allocator);
try std.testing.expect(!exists);
}
test "shareExists: returns true for existing share" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try writeShare(tmp_path, "exists", "data", "key-32-bytes-exactly-right!!!!!", allocator);
const exists = try shareExists(tmp_path, "exists", allocator);
try std.testing.expect(exists);
}
test "deleteShare: removes all files" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try writeShare(tmp_path, "todelete", "data", "key-32-bytes-exactly-right!!!!!", allocator);
try deleteShare(tmp_path, "todelete", allocator);
const exists = try shareExists(tmp_path, "todelete", allocator);
try std.testing.expect(!exists);
}

View File

@ -0,0 +1,285 @@
/// One-time migration from V1 (shares/<id>/) to V2 (vaults/<id>/default/).
///
/// V1 layout:
/// <data_dir>/shares/<identity>/
/// share.bin
/// checksum.bin
/// version (plain text u64)
///
/// V2 layout:
/// <data_dir>/vaults/<identity>/default/
/// share.bin
/// checksum.bin
/// meta.json
///
/// After migration, V1 directories are renamed to <identity>.migrated/ as backup.
const std = @import("std");
/// Checks if V1 data exists that needs migration.
/// Returns true if <data_dir>/shares/ exists and contains at least one subdirectory.
pub fn needsMigration(data_dir: []const u8, allocator: std.mem.Allocator) bool {
const shares_path = std.fmt.allocPrint(allocator, "{s}/shares", .{data_dir}) catch return false;
defer allocator.free(shares_path);
var dir = std.fs.cwd().openDir(shares_path, .{ .iterate = true }) catch return false;
defer dir.close();
var it = dir.iterate();
while (it.next() catch null) |entry| {
if (entry.kind == .directory) {
// Skip already-migrated directories
if (std.mem.endsWith(u8, entry.name, ".migrated")) continue;
return true;
}
}
return false;
}
/// Migrates all V1 share directories to V2 vault format.
/// Returns the number of identities migrated.
pub fn migrateV1toV2(data_dir: []const u8, allocator: std.mem.Allocator) !u32 {
const shares_path = std.fmt.allocPrint(allocator, "{s}/shares", .{data_dir}) catch return error.OutOfMemory;
defer allocator.free(shares_path);
var dir = std.fs.cwd().openDir(shares_path, .{ .iterate = true }) catch {
return 0; // No shares dir = nothing to migrate
};
defer dir.close();
var count: u32 = 0;
var it = dir.iterate();
while (try it.next()) |entry| {
if (entry.kind != .directory) continue;
// Skip already-migrated directories
if (std.mem.endsWith(u8, entry.name, ".migrated")) continue;
const migrated = migrateOneIdentity(data_dir, entry.name, allocator);
if (migrated) {
count += 1;
}
}
return count;
}
/// Migrate a single identity from V1 to V2.
fn migrateOneIdentity(data_dir: []const u8, identity: []const u8, allocator: std.mem.Allocator) bool {
// Source paths
const src_share = std.fmt.allocPrint(allocator, "{s}/shares/{s}/share.bin", .{ data_dir, identity }) catch return false;
defer allocator.free(src_share);
const src_checksum = std.fmt.allocPrint(allocator, "{s}/shares/{s}/checksum.bin", .{ data_dir, identity }) catch return false;
defer allocator.free(src_checksum);
const src_version = std.fmt.allocPrint(allocator, "{s}/shares/{s}/version", .{ data_dir, identity }) catch return false;
defer allocator.free(src_version);
// Destination directory: vaults/<identity>/default/
const dst_dir = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/default", .{ data_dir, identity }) catch return false;
defer allocator.free(dst_dir);
// Create destination directory
std.fs.cwd().makePath(dst_dir) catch return false;
// Copy share.bin
const dst_share = std.fmt.allocPrint(allocator, "{s}/share.bin", .{dst_dir}) catch return false;
defer allocator.free(dst_share);
copyFile(src_share, dst_share) catch return false;
// Copy checksum.bin
const dst_checksum = std.fmt.allocPrint(allocator, "{s}/checksum.bin", .{dst_dir}) catch return false;
defer allocator.free(dst_checksum);
copyFile(src_checksum, dst_checksum) catch return false;
// Read version file and create meta.json
const version = readVersionFile(src_version, allocator);
const now = std.time.nanoTimestamp();
// Read share.bin size for meta
const share_size = getFileSize(src_share);
var meta_buf: [512]u8 = undefined;
const meta_json = std.fmt.bufPrint(&meta_buf,
\\{{"version":{d},"created_ns":{d},"updated_ns":{d},"size":{d}}}
, .{ version, now, now, share_size }) catch return false;
const dst_meta = std.fmt.allocPrint(allocator, "{s}/meta.json", .{dst_dir}) catch return false;
defer allocator.free(dst_meta);
writeFile(dst_meta, meta_json) catch return false;
// Rename source to .migrated backup
const src_dir = std.fmt.allocPrint(allocator, "{s}/shares/{s}", .{ data_dir, identity }) catch return false;
defer allocator.free(src_dir);
const migrated_dir = std.fmt.allocPrint(allocator, "{s}/shares/{s}.migrated", .{ data_dir, identity }) catch return false;
defer allocator.free(migrated_dir);
std.fs.cwd().rename(src_dir, migrated_dir) catch {
// Migration succeeded but rename failed not fatal
return true;
};
return true;
}
fn copyFile(src: []const u8, dst: []const u8) !void {
const src_file = try std.fs.cwd().openFile(src, .{});
defer src_file.close();
const dst_file = try std.fs.cwd().createFile(dst, .{});
defer dst_file.close();
var buf: [8192]u8 = undefined;
while (true) {
const bytes_read = try src_file.read(&buf);
if (bytes_read == 0) break;
try dst_file.writeAll(buf[0..bytes_read]);
}
}
fn readVersionFile(path: []const u8, allocator: std.mem.Allocator) u64 {
const data = std.fs.cwd().readFileAlloc(allocator, path, 32) catch return 1;
defer allocator.free(data);
return std.fmt.parseInt(u64, std.mem.trim(u8, data, &std.ascii.whitespace), 10) catch 1;
}
fn getFileSize(path: []const u8) usize {
const file = std.fs.cwd().openFile(path, .{}) catch return 0;
defer file.close();
const stat = file.stat() catch return 0;
return stat.size;
}
fn writeFile(path: []const u8, data: []const u8) !void {
const file = try std.fs.cwd().createFile(path, .{});
defer file.close();
try file.writeAll(data);
}
// Tests
test "needsMigration: false when no shares dir" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try std.testing.expect(!needsMigration(tmp_path, allocator));
}
test "needsMigration: false when shares dir is empty" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const shares_path = try std.fmt.allocPrint(allocator, "{s}/shares", .{tmp_path});
defer allocator.free(shares_path);
try std.fs.cwd().makePath(shares_path);
try std.testing.expect(!needsMigration(tmp_path, allocator));
}
test "needsMigration: true when shares has subdirectories" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const id_dir = try std.fmt.allocPrint(allocator, "{s}/shares/abcdef1234", .{tmp_path});
defer allocator.free(id_dir);
try std.fs.cwd().makePath(id_dir);
try std.testing.expect(needsMigration(tmp_path, allocator));
}
test "migrateV1toV2: full migration" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
// Create V1 structure
const identity = "aabbccdd11223344";
const id_dir = try std.fmt.allocPrint(allocator, "{s}/shares/{s}", .{ tmp_path, identity });
defer allocator.free(id_dir);
try std.fs.cwd().makePath(id_dir);
// Write V1 files
const share_path = try std.fmt.allocPrint(allocator, "{s}/share.bin", .{id_dir});
defer allocator.free(share_path);
try writeFile(share_path, "share data here");
const checksum_path = try std.fmt.allocPrint(allocator, "{s}/checksum.bin", .{id_dir});
defer allocator.free(checksum_path);
try writeFile(checksum_path, "checksum bytes here");
const version_path = try std.fmt.allocPrint(allocator, "{s}/version", .{id_dir});
defer allocator.free(version_path);
try writeFile(version_path, "7");
// Run migration
const count = try migrateV1toV2(tmp_path, allocator);
try std.testing.expectEqual(@as(u32, 1), count);
// Verify V2 files exist
const v2_share = try std.fmt.allocPrint(allocator, "{s}/vaults/{s}/default/share.bin", .{ tmp_path, identity });
defer allocator.free(v2_share);
std.fs.cwd().access(v2_share, .{}) catch {
return error.TestUnexpectedResult;
};
const v2_meta = try std.fmt.allocPrint(allocator, "{s}/vaults/{s}/default/meta.json", .{ tmp_path, identity });
defer allocator.free(v2_meta);
std.fs.cwd().access(v2_meta, .{}) catch {
return error.TestUnexpectedResult;
};
// Verify V1 dir was renamed to .migrated
const migrated_dir = try std.fmt.allocPrint(allocator, "{s}/shares/{s}.migrated", .{ tmp_path, identity });
defer allocator.free(migrated_dir);
std.fs.cwd().access(migrated_dir, .{}) catch {
return error.TestUnexpectedResult;
};
// Verify meta.json has correct version
const meta_data = try std.fs.cwd().readFileAlloc(allocator, v2_meta, 4096);
defer allocator.free(meta_data);
const MetaJson = struct {
version: u64,
created_ns: i128,
updated_ns: i128,
size: usize,
};
const parsed = try std.json.parseFromSlice(MetaJson, allocator, meta_data, .{});
defer parsed.deinit();
try std.testing.expectEqual(@as(u64, 7), parsed.value.version);
try std.testing.expectEqual(@as(usize, 15), parsed.value.size); // "share data here" = 15 bytes
// needsMigration should now be false (original dir was renamed)
try std.testing.expect(!needsMigration(tmp_path, allocator));
}
test "migrateV1toV2: skips already migrated" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
// Create a .migrated directory (already migrated)
const migrated_dir = try std.fmt.allocPrint(allocator, "{s}/shares/already.migrated", .{tmp_path});
defer allocator.free(migrated_dir);
try std.fs.cwd().makePath(migrated_dir);
const count = try migrateV1toV2(tmp_path, allocator);
try std.testing.expectEqual(@as(u32, 0), count);
}

673
src/storage/vault_store.zig Normal file
View File

@ -0,0 +1,673 @@
/// Multi-secret storage engine (V2).
///
/// Each identity can store up to MAX_SECRETS_PER_IDENTITY named secrets.
/// Secrets are stored with HMAC integrity, anti-rollback version checks,
/// and atomic writes (tmp+rename).
///
/// Directory layout:
/// <data_dir>/vaults/<identity_hex>/
/// <secret_name>/
/// share.bin - Encrypted share data
/// checksum.bin - HMAC-SHA256 integrity
/// meta.json - {"version":1,"created_ns":...,"updated_ns":...,"size":123}
const std = @import("std");
const hmac = @import("../crypto/hmac.zig");
// Constants
pub const MAX_SECRETS_PER_IDENTITY = 1000;
pub const MAX_SECRET_NAME_LEN = 128;
pub const MAX_SECRET_SIZE = 512 * 1024; // 512 KiB
/// Characters allowed in secret names: alphanumeric, underscore, hyphen.
fn isValidNameChar(c: u8) bool {
return std.ascii.isAlphanumeric(c) or c == '_' or c == '-';
}
// Types
pub const SecretMeta = struct {
version: u64,
created_ns: i128,
updated_ns: i128,
size: usize,
};
pub const VaultStoreError = error{
IdentityRequired,
SecretNameRequired,
SecretNameTooLong,
SecretNameInvalid,
SecretDataRequired,
SecretDataTooLarge,
SecretLimitExceeded,
IntegrityCheckFailed,
VersionConflict,
NotFound,
OutOfMemory,
IoError,
};
// Public API
/// Validates a secret name: non-empty, within length limit, alphanumeric + '_' + '-'.
pub fn validateSecretName(name: []const u8) VaultStoreError!void {
if (name.len == 0) return VaultStoreError.SecretNameRequired;
if (name.len > MAX_SECRET_NAME_LEN) return VaultStoreError.SecretNameTooLong;
for (name) |c| {
if (!isValidNameChar(c)) return VaultStoreError.SecretNameInvalid;
}
}
/// Writes a named secret atomically with HMAC integrity and anti-rollback protection.
///
/// For new secrets, checks the per-identity secret count limit.
/// For existing secrets, rejects if `version` <= the stored version.
/// Writes share.bin, checksum.bin, and meta.json atomically (tmp+rename).
pub fn writeSecret(
data_dir: []const u8,
identity: []const u8,
name: []const u8,
data: []const u8,
version: u64,
integrity_key: []const u8,
allocator: std.mem.Allocator,
) VaultStoreError!void {
if (identity.len == 0) return VaultStoreError.IdentityRequired;
try validateSecretName(name);
if (data.len == 0) return VaultStoreError.SecretDataRequired;
if (data.len > MAX_SECRET_SIZE) return VaultStoreError.SecretDataTooLarge;
// Build secret directory path: <data_dir>/vaults/<identity>/<name>/
const secret_dir = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}", .{ data_dir, identity, name }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(secret_dir);
// Check if this is a new secret
const is_new = !secretExistsInternal(data_dir, identity, name, allocator);
if (is_new) {
// Check secret count limit
const count = countSecrets(data_dir, identity, allocator) catch 0;
if (count >= MAX_SECRETS_PER_IDENTITY) return VaultStoreError.SecretLimitExceeded;
} else {
// Anti-rollback: reject if version <= existing
const existing_meta = readMeta(data_dir, identity, name, allocator) catch |err| {
// If we can't read meta but the dir exists, allow overwrite
if (err == VaultStoreError.IoError) return VaultStoreError.IoError;
// For NotFound, treat as new
if (err == VaultStoreError.NotFound) {
// directory exists but meta doesn't - allow write
return writeSecretInner(secret_dir, data, version, integrity_key, null, allocator);
}
return err;
};
if (version <= existing_meta.version) return VaultStoreError.VersionConflict;
}
// Determine created_ns: keep existing for updates, new timestamp for new secrets
const created_ns: ?i128 = if (!is_new) blk: {
const existing_meta = readMeta(data_dir, identity, name, allocator) catch break :blk null;
break :blk existing_meta.created_ns;
} else null;
return writeSecretInner(secret_dir, data, version, integrity_key, created_ns, allocator);
}
/// Reads a named secret and verifies HMAC integrity.
/// Caller must free the returned slice.
pub fn readSecret(
data_dir: []const u8,
identity: []const u8,
name: []const u8,
integrity_key: []const u8,
allocator: std.mem.Allocator,
) VaultStoreError![]u8 {
// Build paths
const share_path = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}/share.bin", .{ data_dir, identity, name }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(share_path);
const checksum_path = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}/checksum.bin", .{ data_dir, identity, name }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(checksum_path);
// Read share data
const share_data = std.fs.cwd().readFileAlloc(allocator, share_path, MAX_SECRET_SIZE) catch {
return VaultStoreError.NotFound;
};
errdefer allocator.free(share_data);
// Read checksum
const checksum_data = std.fs.cwd().readFileAlloc(allocator, checksum_path, 32) catch {
return VaultStoreError.IoError;
};
defer allocator.free(checksum_data);
if (checksum_data.len != 32) {
return VaultStoreError.IntegrityCheckFailed;
}
// Verify HMAC
if (!hmac.verify(integrity_key, share_data, checksum_data[0..32].*)) {
return VaultStoreError.IntegrityCheckFailed;
}
return share_data;
}
/// Reads and parses meta.json for a named secret.
pub fn readMeta(
data_dir: []const u8,
identity: []const u8,
name: []const u8,
allocator: std.mem.Allocator,
) VaultStoreError!SecretMeta {
const meta_path = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}/meta.json", .{ data_dir, identity, name }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(meta_path);
const meta_data = std.fs.cwd().readFileAlloc(allocator, meta_path, 4096) catch {
return VaultStoreError.NotFound;
};
defer allocator.free(meta_data);
const parsed = std.json.parseFromSlice(MetaJson, allocator, meta_data, .{}) catch {
return VaultStoreError.IoError;
};
defer parsed.deinit();
return SecretMeta{
.version = parsed.value.version,
.created_ns = parsed.value.created_ns,
.updated_ns = parsed.value.updated_ns,
.size = parsed.value.size,
};
}
/// Deletes a named secret (removes the entire <name>/ directory).
pub fn deleteSecret(
data_dir: []const u8,
identity: []const u8,
name: []const u8,
allocator: std.mem.Allocator,
) VaultStoreError!void {
const secret_dir = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}", .{ data_dir, identity, name }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(secret_dir);
// Check it exists first
std.fs.cwd().access(secret_dir, .{}) catch {
return VaultStoreError.NotFound;
};
std.fs.cwd().deleteTree(secret_dir) catch {
return VaultStoreError.IoError;
};
}
/// Checks if a named secret exists for the given identity.
pub fn secretExists(
data_dir: []const u8,
identity: []const u8,
name: []const u8,
allocator: std.mem.Allocator,
) !bool {
return secretExistsInternal(data_dir, identity, name, allocator);
}
/// Lists all secret names for an identity.
/// Caller must free each name slice and the returned slice.
pub fn listSecrets(
data_dir: []const u8,
identity: []const u8,
allocator: std.mem.Allocator,
) ![][]const u8 {
const vault_dir = std.fmt.allocPrint(allocator, "{s}/vaults/{s}", .{ data_dir, identity }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(vault_dir);
var dir = std.fs.cwd().openDir(vault_dir, .{ .iterate = true }) catch {
// No vault dir = no secrets
return allocator.alloc([]const u8, 0) catch return VaultStoreError.OutOfMemory;
};
defer dir.close();
var names: std.ArrayListUnmanaged([]const u8) = .{};
errdefer {
for (names.items) |n| allocator.free(n);
names.deinit(allocator);
}
var it = dir.iterate();
while (try it.next()) |entry| {
if (entry.kind == .directory) {
const name_copy = allocator.dupe(u8, entry.name) catch return VaultStoreError.OutOfMemory;
names.append(allocator, name_copy) catch {
allocator.free(name_copy);
return VaultStoreError.OutOfMemory;
};
}
}
return names.toOwnedSlice(allocator) catch return VaultStoreError.OutOfMemory;
}
/// Counts the number of secrets for an identity.
pub fn countSecrets(
data_dir: []const u8,
identity: []const u8,
allocator: std.mem.Allocator,
) !usize {
const vault_dir = std.fmt.allocPrint(allocator, "{s}/vaults/{s}", .{ data_dir, identity }) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(vault_dir);
var dir = std.fs.cwd().openDir(vault_dir, .{ .iterate = true }) catch {
return 0;
};
defer dir.close();
var count: usize = 0;
var it = dir.iterate();
while (try it.next()) |entry| {
if (entry.kind == .directory) count += 1;
}
return count;
}
// Internal helpers
/// JSON shape for meta.json parsing.
const MetaJson = struct {
version: u64,
created_ns: i128,
updated_ns: i128,
size: usize,
};
fn secretExistsInternal(
data_dir: []const u8,
identity: []const u8,
name: []const u8,
allocator: std.mem.Allocator,
) bool {
const share_path = std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}/share.bin", .{ data_dir, identity, name }) catch
return false;
defer allocator.free(share_path);
std.fs.cwd().access(share_path, .{}) catch return false;
return true;
}
fn writeSecretInner(
secret_dir: []const u8,
data: []const u8,
version: u64,
integrity_key: []const u8,
existing_created_ns: ?i128,
allocator: std.mem.Allocator,
) VaultStoreError!void {
// Create directory
std.fs.cwd().makePath(secret_dir) catch {
return VaultStoreError.IoError;
};
// Write share.bin atomically
const share_path = std.fmt.allocPrint(allocator, "{s}/share.bin", .{secret_dir}) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(share_path);
const share_tmp = std.fmt.allocPrint(allocator, "{s}/share.bin.tmp", .{secret_dir}) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(share_tmp);
try atomicWrite(share_path, share_tmp, data);
// Write checksum.bin atomically
const checksum = hmac.compute(integrity_key, data);
const checksum_path = std.fmt.allocPrint(allocator, "{s}/checksum.bin", .{secret_dir}) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(checksum_path);
const checksum_tmp = std.fmt.allocPrint(allocator, "{s}/checksum.bin.tmp", .{secret_dir}) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(checksum_tmp);
try atomicWrite(checksum_path, checksum_tmp, &checksum);
// Write meta.json atomically
const now = std.time.nanoTimestamp();
const created_ns = existing_created_ns orelse now;
var meta_buf: [512]u8 = undefined;
const meta_json = std.fmt.bufPrint(&meta_buf,
\\{{"version":{d},"created_ns":{d},"updated_ns":{d},"size":{d}}}
, .{ version, created_ns, now, data.len }) catch
return VaultStoreError.IoError;
const meta_path = std.fmt.allocPrint(allocator, "{s}/meta.json", .{secret_dir}) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(meta_path);
const meta_tmp = std.fmt.allocPrint(allocator, "{s}/meta.json.tmp", .{secret_dir}) catch
return VaultStoreError.OutOfMemory;
defer allocator.free(meta_tmp);
try atomicWrite(meta_path, meta_tmp, meta_json);
}
fn atomicWrite(final_path: []const u8, tmp_path: []const u8, data: []const u8) VaultStoreError!void {
// Write to temp file
const tmp_file = std.fs.cwd().createFile(tmp_path, .{}) catch {
return VaultStoreError.IoError;
};
defer tmp_file.close();
tmp_file.writeAll(data) catch {
return VaultStoreError.IoError;
};
// Rename atomically
std.fs.cwd().rename(tmp_path, final_path) catch {
return VaultStoreError.IoError;
};
}
// Tests
test "validateSecretName: valid names" {
try validateSecretName("default");
try validateSecretName("my-secret");
try validateSecretName("secret_123");
try validateSecretName("a");
try validateSecretName("ABC-xyz_09");
}
test "validateSecretName: empty name" {
try std.testing.expectError(VaultStoreError.SecretNameRequired, validateSecretName(""));
}
test "validateSecretName: too long" {
const long_name = "a" ** (MAX_SECRET_NAME_LEN + 1);
try std.testing.expectError(VaultStoreError.SecretNameTooLong, validateSecretName(long_name));
}
test "validateSecretName: invalid characters" {
try std.testing.expectError(VaultStoreError.SecretNameInvalid, validateSecretName("has space"));
try std.testing.expectError(VaultStoreError.SecretNameInvalid, validateSecretName("has/slash"));
try std.testing.expectError(VaultStoreError.SecretNameInvalid, validateSecretName("has.dot"));
try std.testing.expectError(VaultStoreError.SecretNameInvalid, validateSecretName("has@at"));
}
test "writeSecret and readSecret round-trip" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789";
const name = "my-secret";
const data = "encrypted share data here";
const key = "integrity-key-32-bytes-long!!!!";
try writeSecret(tmp_path, identity, name, data, 1, key, allocator);
const read_data = try readSecret(tmp_path, identity, name, key, allocator);
defer allocator.free(read_data);
try std.testing.expectEqualSlices(u8, data, read_data);
}
test "readSecret: integrity check detects tampering" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef";
const name = "tamper-test";
const key = "integrity-key-32-bytes-long!!!!";
try writeSecret(tmp_path, identity, name, "original data", 1, key, allocator);
// Tamper with the share file
const share_path = try std.fmt.allocPrint(allocator, "{s}/vaults/{s}/{s}/share.bin", .{ tmp_path, identity, name });
defer allocator.free(share_path);
const file = try std.fs.cwd().openFile(share_path, .{ .mode = .write_only });
defer file.close();
try file.writeAll("tampered data!!");
try std.testing.expectError(VaultStoreError.IntegrityCheckFailed, readSecret(tmp_path, identity, name, key, allocator));
}
test "readSecret: not found" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try std.testing.expectError(VaultStoreError.NotFound, readSecret(tmp_path, "nonexistent", "nope", "key!", allocator));
}
test "writeSecret: rejects empty identity" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try std.testing.expectError(VaultStoreError.IdentityRequired, writeSecret(tmp_path, "", "name", "data", 1, "key!", allocator));
}
test "writeSecret: rejects empty data" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try std.testing.expectError(VaultStoreError.SecretDataRequired, writeSecret(tmp_path, "id123", "name", "", 1, "key!", allocator));
}
test "writeSecret: anti-rollback rejects lower version" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "rollback0123456789abcdef";
const name = "versioned";
const key = "integrity-key-32-bytes-long!!!!";
try writeSecret(tmp_path, identity, name, "v1 data", 5, key, allocator);
// Same version should fail
try std.testing.expectError(VaultStoreError.VersionConflict, writeSecret(tmp_path, identity, name, "v1 again", 5, key, allocator));
// Lower version should fail
try std.testing.expectError(VaultStoreError.VersionConflict, writeSecret(tmp_path, identity, name, "rollback", 3, key, allocator));
// Higher version should succeed
try writeSecret(tmp_path, identity, name, "v2 data", 6, key, allocator);
const read_data = try readSecret(tmp_path, identity, name, key, allocator);
defer allocator.free(read_data);
try std.testing.expectEqualSlices(u8, "v2 data", read_data);
}
test "readMeta: returns correct metadata" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "meta0123456789abcdef";
const name = "with-meta";
const key = "integrity-key-32-bytes-long!!!!";
const data = "some secret data";
try writeSecret(tmp_path, identity, name, data, 42, key, allocator);
const meta = try readMeta(tmp_path, identity, name, allocator);
try std.testing.expectEqual(@as(u64, 42), meta.version);
try std.testing.expectEqual(data.len, meta.size);
try std.testing.expect(meta.created_ns > 0);
try std.testing.expect(meta.updated_ns >= meta.created_ns);
}
test "deleteSecret: removes secret" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "delete0123456789abcdef";
const name = "to-delete";
const key = "integrity-key-32-bytes-long!!!!";
try writeSecret(tmp_path, identity, name, "doomed", 1, key, allocator);
const exists_before = try secretExists(tmp_path, identity, name, allocator);
try std.testing.expect(exists_before);
try deleteSecret(tmp_path, identity, name, allocator);
const exists_after = try secretExists(tmp_path, identity, name, allocator);
try std.testing.expect(!exists_after);
}
test "deleteSecret: not found" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
try std.testing.expectError(VaultStoreError.NotFound, deleteSecret(tmp_path, "ghost", "nope", allocator));
}
test "listSecrets: empty for new identity" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const names = try listSecrets(tmp_path, "nobody", allocator);
defer allocator.free(names);
try std.testing.expectEqual(@as(usize, 0), names.len);
}
test "listSecrets: returns all secret names" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "list0123456789abcdef";
const key = "integrity-key-32-bytes-long!!!!";
try writeSecret(tmp_path, identity, "alpha", "data-a", 1, key, allocator);
try writeSecret(tmp_path, identity, "beta", "data-b", 1, key, allocator);
try writeSecret(tmp_path, identity, "gamma", "data-c", 1, key, allocator);
const names = try listSecrets(tmp_path, identity, allocator);
defer {
for (names) |n| allocator.free(n);
allocator.free(names);
}
try std.testing.expectEqual(@as(usize, 3), names.len);
// Check all three names are present (order is not guaranteed)
var found_alpha = false;
var found_beta = false;
var found_gamma = false;
for (names) |n| {
if (std.mem.eql(u8, n, "alpha")) found_alpha = true;
if (std.mem.eql(u8, n, "beta")) found_beta = true;
if (std.mem.eql(u8, n, "gamma")) found_gamma = true;
}
try std.testing.expect(found_alpha);
try std.testing.expect(found_beta);
try std.testing.expect(found_gamma);
}
test "countSecrets: counts correctly" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "count0123456789abcdef";
const key = "integrity-key-32-bytes-long!!!!";
try std.testing.expectEqual(@as(usize, 0), try countSecrets(tmp_path, identity, allocator));
try writeSecret(tmp_path, identity, "first", "data-1", 1, key, allocator);
try std.testing.expectEqual(@as(usize, 1), try countSecrets(tmp_path, identity, allocator));
try writeSecret(tmp_path, identity, "second", "data-2", 1, key, allocator);
try std.testing.expectEqual(@as(usize, 2), try countSecrets(tmp_path, identity, allocator));
}
test "writeSecret: update preserves created_ns" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "preserve0123456789abcdef";
const name = "preserve-ts";
const key = "integrity-key-32-bytes-long!!!!";
try writeSecret(tmp_path, identity, name, "v1", 1, key, allocator);
const meta1 = try readMeta(tmp_path, identity, name, allocator);
// Update with higher version
try writeSecret(tmp_path, identity, name, "v2", 2, key, allocator);
const meta2 = try readMeta(tmp_path, identity, name, allocator);
// created_ns should be preserved
try std.testing.expectEqual(meta1.created_ns, meta2.created_ns);
// updated_ns should be >= the first
try std.testing.expect(meta2.updated_ns >= meta1.updated_ns);
// version should be updated
try std.testing.expectEqual(@as(u64, 2), meta2.version);
}
test "secretExists: false for missing, true for existing" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
var tmp_dir = std.testing.tmpDir(.{});
defer tmp_dir.cleanup();
const tmp_path = try tmp_dir.dir.realpath(".", &tmp_dir_buf);
const identity = "exists0123456789abcdef";
const key = "integrity-key-32-bytes-long!!!!";
try std.testing.expect(!try secretExists(tmp_path, identity, "nope", allocator));
try writeSecret(tmp_path, identity, "yes", "data", 1, key, allocator);
try std.testing.expect(try secretExists(tmp_path, identity, "yes", allocator));
}

736
src/test_integration.zig Normal file
View File

@ -0,0 +1,736 @@
/// Integration tests tests the full system working together across modules.
///
/// These tests verify end-to-end flows that span multiple subsystems:
/// SSS split/combine, AES-256-GCM encryption, file_store with HMAC integrity,
/// Merkle commitment verification, proactive re-sharing, and quorum logic.
const std = @import("std");
// Module imports
const split_mod = @import("sss/split.zig");
const combine_mod = @import("sss/combine.zig");
const commitment_mod = @import("sss/commitment.zig");
const reshare_mod = @import("sss/reshare.zig");
const types = @import("sss/types.zig");
const aes = @import("crypto/aes.zig");
const hmac = @import("crypto/hmac.zig");
const hkdf = @import("crypto/hkdf.zig");
const file_store = @import("storage/file_store.zig");
const quorum = @import("membership/quorum.zig");
// Helpers
/// Creates a temporary directory and returns its real path.
/// Caller must call cleanup() on the returned tmpDir.
fn makeTmpDir(buf: []u8) struct { dir: std.testing.TmpDir, path: []const u8 } {
var tmp = std.testing.tmpDir(.{});
const path = tmp.dir.realpath(".", buf) catch @panic("failed to get tmp realpath");
return .{ .dir = tmp, .path = path };
}
/// Generates a random byte buffer of `len` bytes.
fn randomBytes(allocator: std.mem.Allocator, len: usize) ![]u8 {
const buf = try allocator.alloc(u8, len);
std.crypto.random.bytes(buf);
return buf;
}
/// Hex-encode a byte slice into a stack buffer suitable for identity hashes.
fn hexEncode(bytes: []const u8, out: []u8) []const u8 {
const charset = "0123456789abcdef";
var i: usize = 0;
for (bytes) |b| {
out[i] = charset[b >> 4];
out[i + 1] = charset[b & 0x0F];
i += 2;
}
return out[0..i];
}
//
// 1. Full vault lifecycle simulation
//
test "integration: full vault lifecycle — split, store, read, combine" {
const allocator = std.testing.allocator;
// Generate a random 1KB secret (simulating encrypted vault data)
const secret = try randomBytes(allocator, 1024);
defer allocator.free(secret);
// Split with N=7, K=3 (7 guardian nodes)
const share_set = try split_mod.split(allocator, secret, 7, 3);
defer share_set.deinit(allocator);
// Store each share to a separate temp directory using file_store
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
const tmp = makeTmpDir(&tmp_dir_buf);
var tmp_dir = tmp.dir;
defer tmp_dir.cleanup();
const tmp_path = tmp.path;
const integrity_key = "vault-integration-test-key-32b!!";
// Write each share to disk with a unique identity
for (share_set.shares, 0..) |share, i| {
var id_buf: [8]u8 = undefined;
const id_byte = [_]u8{ @as(u8, @truncate(i)), share.x, 0xAB, 0xCD };
const identity = hexEncode(&id_byte, &id_buf);
try file_store.writeShare(tmp_path, identity, share.y, integrity_key, allocator);
}
// Read shares back and verify HMAC integrity
for (share_set.shares, 0..) |share, i| {
var id_buf: [8]u8 = undefined;
const id_byte = [_]u8{ @as(u8, @truncate(i)), share.x, 0xAB, 0xCD };
const identity = hexEncode(&id_byte, &id_buf);
const read_data = try file_store.readShare(tmp_path, identity, integrity_key, allocator);
defer allocator.free(read_data);
try std.testing.expectEqualSlices(u8, share.y, read_data);
}
// Combine using multiple different K-subsets and verify reconstruction
// Subset 1: shares 0, 1, 2
{
const subset = [_]types.Share{ share_set.shares[0], share_set.shares[1], share_set.shares[2] };
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
// Subset 2: shares 2, 4, 6
{
const subset = [_]types.Share{ share_set.shares[2], share_set.shares[4], share_set.shares[6] };
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
// Subset 3: shares 1, 3, 5
{
const subset = [_]types.Share{ share_set.shares[1], share_set.shares[3], share_set.shares[5] };
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
}
//
// 2. Multi-guardian failure tolerance
//
test "integration: multi-guardian failure tolerance — N=10, K=4" {
const allocator = std.testing.allocator;
const secret = try randomBytes(allocator, 256);
defer allocator.free(secret);
const n: u8 = 10;
const k: u8 = 4;
const share_set = try split_mod.split(allocator, secret, n, k);
defer share_set.deinit(allocator);
// Recovery with exactly K shares (indices 0,3,5,9)
{
const subset = [_]types.Share{
share_set.shares[0],
share_set.shares[3],
share_set.shares[5],
share_set.shares[9],
};
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
// Recovery with K+1 shares
{
const recovered = try combine_mod.combine(allocator, share_set.shares[0..5]);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
// Recovery with K+2 shares
{
const recovered = try combine_mod.combine(allocator, share_set.shares[0..6]);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
// Recovery with all N shares
{
const recovered = try combine_mod.combine(allocator, share_set.shares);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, secret, recovered);
}
// K-1 shares should NOT reconstruct the original secret
// (with overwhelming probability for a 256-byte secret)
{
const subset = [_]types.Share{
share_set.shares[0],
share_set.shares[1],
share_set.shares[2],
};
const result = try combine_mod.combine(allocator, &subset);
defer {
@memset(result, 0);
allocator.free(result);
}
try std.testing.expect(!std.mem.eql(u8, secret, result));
}
}
//
// 3. Storage integrity under tampering
//
test "integration: storage integrity under tampering" {
const allocator = std.testing.allocator;
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
const tmp = makeTmpDir(&tmp_dir_buf);
var tmp_dir = tmp.dir;
defer tmp_dir.cleanup();
const tmp_path = tmp.path;
const identity = "tampertest01";
const share_data = try randomBytes(allocator, 512);
defer allocator.free(share_data);
const integrity_key = "tamper-integrity-key-32-bytes!!!";
// Write and read back successfully
try file_store.writeShare(tmp_path, identity, share_data, integrity_key, allocator);
{
const read_data = try file_store.readShare(tmp_path, identity, integrity_key, allocator);
defer allocator.free(read_data);
try std.testing.expectEqualSlices(u8, share_data, read_data);
}
// Tamper with the share.bin file (flip a byte)
const share_path = try std.fmt.allocPrint(allocator, "{s}/shares/{s}/share.bin", .{ tmp_path, identity });
defer allocator.free(share_path);
{
const file = try std.fs.cwd().openFile(share_path, .{ .mode = .write_only });
defer file.close();
// Write a single modified byte at the beginning
const tampered_byte = [_]u8{share_data[0] ^ 0xFF};
try file.writeAll(&tampered_byte);
}
// readShare should now fail with IntegrityCheckFailed
try std.testing.expectError(
file_store.StoreError.IntegrityCheckFailed,
file_store.readShare(tmp_path, identity, integrity_key, allocator),
);
// Re-write the original data and verify it works again
try file_store.writeShare(tmp_path, identity, share_data, integrity_key, allocator);
{
const read_data = try file_store.readShare(tmp_path, identity, integrity_key, allocator);
defer allocator.free(read_data);
try std.testing.expectEqualSlices(u8, share_data, read_data);
}
}
//
// 4. Shamir + AES-GCM full encryption round-trip
//
test "integration: Shamir + AES-256-GCM encryption round-trip" {
const allocator = std.testing.allocator;
// Generate a random AES key
const key = aes.generateKey();
// Encrypt a payload
const plaintext = "This is sensitive vault data that must survive split/combine via Shamir + AES-GCM.";
const encrypted = try aes.encrypt(allocator, plaintext, key);
defer allocator.free(@constCast(encrypted.ciphertext));
// Use the ciphertext + nonce + tag as the "secret" for Shamir split.
// Concatenate: nonce (12) || tag (16) || ciphertext (N)
const secret_len = aes.NONCE_SIZE + aes.TAG_SIZE + encrypted.ciphertext.len;
const secret = try allocator.alloc(u8, secret_len);
defer allocator.free(secret);
@memcpy(secret[0..aes.NONCE_SIZE], &encrypted.nonce);
@memcpy(secret[aes.NONCE_SIZE .. aes.NONCE_SIZE + aes.TAG_SIZE], &encrypted.tag);
@memcpy(secret[aes.NONCE_SIZE + aes.TAG_SIZE ..], encrypted.ciphertext);
// Split into 5 shares with K=3
const share_set = try split_mod.split(allocator, secret, 5, 3);
defer share_set.deinit(allocator);
// Combine K=3 shares back
const subset = [_]types.Share{ share_set.shares[0], share_set.shares[2], share_set.shares[4] };
const recovered_secret = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered_secret, 0);
allocator.free(recovered_secret);
}
// Extract nonce, tag, ciphertext from recovered secret
var recovered_nonce: [aes.NONCE_SIZE]u8 = undefined;
@memcpy(&recovered_nonce, recovered_secret[0..aes.NONCE_SIZE]);
var recovered_tag: [aes.TAG_SIZE]u8 = undefined;
@memcpy(&recovered_tag, recovered_secret[aes.NONCE_SIZE .. aes.NONCE_SIZE + aes.TAG_SIZE]);
const recovered_ct = recovered_secret[aes.NONCE_SIZE + aes.TAG_SIZE ..];
const recovered_encrypted = aes.EncryptedData{
.ciphertext = recovered_ct,
.nonce = recovered_nonce,
.tag = recovered_tag,
};
// Decrypt with original key
const decrypted = try aes.decrypt(allocator, recovered_encrypted, key);
defer {
@memset(decrypted, 0);
allocator.free(decrypted);
}
try std.testing.expectEqualSlices(u8, plaintext, decrypted);
}
//
// 5. Adaptive threshold + quorum consistency
//
test "integration: adaptive threshold + quorum consistency" {
// readQuorum is the Shamir threshold K = max(3, floor(N/3))
// writeQuorum is ceil(2/3 * N)
//
// Invariants that always hold:
// - threshold >= 3 (minimum security)
// - writeQuorum > N/2 (majority) for N >= 3
//
// For production-sized clusters (N >= 9), an additional invariant holds:
// - threshold <= writeQuorum (enough shares stored for reconstruction)
// For small clusters (N < 9), the minimum threshold of 3 can exceed the
// write quorum. This is a known tradeoff: small clusters require ALL nodes
// for write+read to succeed, which is acceptable for tiny deployments.
const test_values = [_]usize{ 3, 5, 7, 10, 14, 50, 100 };
for (test_values) |n| {
const threshold = quorum.readQuorum(n);
const wq = quorum.writeQuorum(n);
// threshold >= 3 (minimum security)
try std.testing.expect(threshold >= 3);
// writeQuorum > N/2 (majority) for N >= 3
try std.testing.expect(wq > n / 2);
// For production clusters (N >= 9), threshold must fit within write quorum
if (n >= 9) {
try std.testing.expect(threshold <= wq);
}
// writeQuorum + threshold >= N (read+write overlap guarantees consistency)
// This is the fundamental quorum intersection property: any write set
// and any read set must share at least one node.
try std.testing.expect(wq + threshold >= n);
}
}
//
// 6. Large payload round-trip
//
test "integration: large payload round-trip (100KB)" {
const allocator = std.testing.allocator;
// Generate a 100KB random payload
const payload_size = 100 * 1024;
const secret = try randomBytes(allocator, payload_size);
defer allocator.free(secret);
// Split into 14 shares with K=5
const share_set = try split_mod.split(allocator, secret, 14, 5);
defer share_set.deinit(allocator);
// Store all shares to disk via file_store
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
const tmp = makeTmpDir(&tmp_dir_buf);
var tmp_dir = tmp.dir;
defer tmp_dir.cleanup();
const tmp_path = tmp.path;
const integrity_key = "large-payload-integrity-key-32b!";
for (share_set.shares, 0..) |share, i| {
var id_buf: [8]u8 = undefined;
const id_byte = [_]u8{ @as(u8, @truncate(i)), share.x, 0x11, 0x22 };
const identity = hexEncode(&id_byte, &id_buf);
try file_store.writeShare(tmp_path, identity, share.y, integrity_key, allocator);
}
// Read back any 5 shares (indices 2, 5, 7, 10, 13)
const read_indices = [_]usize{ 2, 5, 7, 10, 13 };
var read_shares: [5]types.Share = undefined;
var read_bufs: [5][]u8 = undefined;
for (read_indices, 0..) |idx, i| {
var id_buf: [8]u8 = undefined;
const id_byte = [_]u8{ @as(u8, @truncate(idx)), share_set.shares[idx].x, 0x11, 0x22 };
const identity = hexEncode(&id_byte, &id_buf);
const data = try file_store.readShare(tmp_path, identity, integrity_key, allocator);
read_bufs[i] = data;
read_shares[i] = types.Share{
.x = share_set.shares[idx].x,
.y = data,
};
}
defer {
for (&read_bufs) |buf| allocator.free(buf);
}
// Combine and verify exact match
const recovered = try combine_mod.combine(allocator, &read_shares);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqual(payload_size, recovered.len);
try std.testing.expectEqualSlices(u8, secret, recovered);
}
//
// 7. Commitment tree cross-check
//
test "integration: Merkle commitment tree — build, prove, verify, tamper" {
const allocator = std.testing.allocator;
// Create a set of shares via Shamir split
const secret = try randomBytes(allocator, 64);
defer allocator.free(secret);
const share_set = try split_mod.split(allocator, secret, 7, 3);
defer share_set.deinit(allocator);
// Build the Merkle commitment tree
const tree = try commitment_mod.buildTree(allocator, share_set.shares);
defer tree.deinit(allocator);
try std.testing.expectEqual(@as(usize, 7), tree.leaves.len);
// Verify that each share's Merkle proof validates against the root
for (share_set.shares, 0..) |share, i| {
const proof = try commitment_mod.generateProof(allocator, share_set.shares, i);
defer proof.deinit(allocator);
try std.testing.expect(commitment_mod.verifyProof(share, proof, tree.root));
}
// Tamper with one share and verify the proof fails
{
const proof_0 = try commitment_mod.generateProof(allocator, share_set.shares, 0);
defer proof_0.deinit(allocator);
// Create a tampered share with modified y data
const tampered_y = try allocator.alloc(u8, share_set.shares[0].y.len);
defer allocator.free(tampered_y);
@memcpy(tampered_y, share_set.shares[0].y);
tampered_y[0] ^= 0xFF; // flip a byte
const tampered_share = types.Share{
.x = share_set.shares[0].x,
.y = tampered_y,
};
// Proof should fail for the tampered share
try std.testing.expect(!commitment_mod.verifyProof(tampered_share, proof_0, tree.root));
}
// Verify wrong root also fails
{
const proof_0 = try commitment_mod.generateProof(allocator, share_set.shares, 0);
defer proof_0.deinit(allocator);
const wrong_root = [_]u8{0xDE} ** 32;
try std.testing.expect(!commitment_mod.verifyProof(share_set.shares[0], proof_0, wrong_root));
}
}
//
// 8. Reshare round-trip
//
test "integration: proactive reshare — new shares reconstruct, old+new mixed fails" {
const allocator = std.testing.allocator;
const secret = [_]u8{ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160 };
const n: u8 = 5;
const k: u8 = 3;
// Step 1: Split the secret into original shares
const share_set = try split_mod.split(allocator, &secret, n, k);
defer share_set.deinit(allocator);
// Step 2: Each guardian generates reshare deltas
var all_deltas: [5][]reshare_mod.ReshareDelta = undefined;
for (0..n) |i| {
all_deltas[i] = try reshare_mod.generateDeltas(
allocator,
@as(u8, @truncate(i)) + 1,
secret.len,
n,
k,
);
}
defer {
for (0..n) |i| {
for (all_deltas[i]) |*d| d.deinit(allocator);
allocator.free(all_deltas[i]);
}
}
// Step 3: Each guardian applies deltas to get new shares
var new_shares: [5]types.Share = undefined;
for (0..n) |j| {
var deltas_for_j: [5]reshare_mod.ReshareDelta = undefined;
for (0..n) |i| {
deltas_for_j[i] = all_deltas[i][j];
}
new_shares[j] = try reshare_mod.applyDeltas(
allocator,
share_set.shares[j],
&deltas_for_j,
);
}
defer {
for (&new_shares) |*s| s.deinit(allocator);
}
// Step 4: Verify new shares reconstruct the same secret
// Test multiple subsets of new shares
{
const subset = [_]types.Share{ new_shares[0], new_shares[2], new_shares[4] };
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
{
const subset = [_]types.Share{ new_shares[1], new_shares[3], new_shares[4] };
const recovered = try combine_mod.combine(allocator, &subset);
defer {
@memset(recovered, 0);
allocator.free(recovered);
}
try std.testing.expectEqualSlices(u8, &secret, recovered);
}
// Step 5: Verify old + new shares mixed does NOT reconstruct the secret
// Mix: 1 new share + 2 old shares from different epoch
{
const mixed = [_]types.Share{ new_shares[0], share_set.shares[2], share_set.shares[4] };
const result = try combine_mod.combine(allocator, &mixed);
defer {
@memset(result, 0);
allocator.free(result);
}
// With a 16-byte secret, the probability of an accidental match is ~1/2^128
try std.testing.expect(!std.mem.eql(u8, &secret, result));
}
// Mix: 2 new shares + 1 old share
{
const mixed = [_]types.Share{ new_shares[0], new_shares[1], share_set.shares[3] };
const result = try combine_mod.combine(allocator, &mixed);
defer {
@memset(result, 0);
allocator.free(result);
}
try std.testing.expect(!std.mem.eql(u8, &secret, result));
}
}
//
// 9. HKDF-derived keys for domain separation in vault operations
//
test "integration: HKDF domain separation — different contexts yield different keys" {
// Simulate deriving multiple keys from a single seed for different vault operations:
// one for AES encryption, one for HMAC integrity, one for share commitment.
const seed = "master-vault-seed-from-wallet-signature-1234567890";
var aes_key: [32]u8 = undefined;
var hmac_key: [32]u8 = undefined;
var commit_key: [32]u8 = undefined;
hkdf.deriveKey(&aes_key, seed, "orama-vault-v1", "aes-encryption-key");
hkdf.deriveKey(&hmac_key, seed, "orama-vault-v1", "hmac-integrity-key");
hkdf.deriveKey(&commit_key, seed, "orama-vault-v1", "commitment-key");
// All three keys must be different
try std.testing.expect(!std.mem.eql(u8, &aes_key, &hmac_key));
try std.testing.expect(!std.mem.eql(u8, &aes_key, &commit_key));
try std.testing.expect(!std.mem.eql(u8, &hmac_key, &commit_key));
// Use AES key to encrypt, HMAC key for integrity check on the ciphertext
const allocator = std.testing.allocator;
const plaintext = "vault entry: private key material";
const encrypted = try aes.encrypt(allocator, plaintext, aes_key);
defer allocator.free(@constCast(encrypted.ciphertext));
// Compute HMAC of ciphertext with the HMAC-specific key
const mac = hmac.compute(&hmac_key, encrypted.ciphertext);
// Verify HMAC passes with correct key
try std.testing.expect(hmac.verify(&hmac_key, encrypted.ciphertext, mac));
// Verify HMAC fails with wrong key
try std.testing.expect(!hmac.verify(&aes_key, encrypted.ciphertext, mac));
// Decrypt successfully with the right AES key
const decrypted = try aes.decrypt(allocator, encrypted, aes_key);
defer {
@memset(decrypted, 0);
allocator.free(decrypted);
}
try std.testing.expectEqualSlices(u8, plaintext, decrypted);
}
//
// 10. Full pipeline: HKDF -> AES -> Shamir -> Store -> Read -> Combine -> Decrypt
//
test "integration: full pipeline — derive keys, encrypt, split, store, reconstruct, decrypt" {
const allocator = std.testing.allocator;
// 1. Derive keys from a wallet seed
const wallet_seed = "user-wallet-signature-bytes-here-at-least-32-bytes!!";
var encryption_key: [aes.KEY_SIZE]u8 = undefined;
hkdf.deriveKey(&encryption_key, wallet_seed, "orama-vault-v1", "encryption");
var integrity_key_buf: [32]u8 = undefined;
hkdf.deriveKey(&integrity_key_buf, wallet_seed, "orama-vault-v1", "integrity");
// 2. Encrypt the vault payload
const vault_data = "{ \"privateKey\": \"0xDEADBEEF...\", \"mnemonic\": \"abandon abandon ...\" }";
const encrypted = try aes.encrypt(allocator, vault_data, encryption_key);
defer allocator.free(@constCast(encrypted.ciphertext));
// 3. Package ciphertext + nonce + tag as the Shamir secret
const secret_len = aes.NONCE_SIZE + aes.TAG_SIZE + encrypted.ciphertext.len;
const shamir_secret = try allocator.alloc(u8, secret_len);
defer allocator.free(shamir_secret);
@memcpy(shamir_secret[0..aes.NONCE_SIZE], &encrypted.nonce);
@memcpy(shamir_secret[aes.NONCE_SIZE .. aes.NONCE_SIZE + aes.TAG_SIZE], &encrypted.tag);
@memcpy(shamir_secret[aes.NONCE_SIZE + aes.TAG_SIZE ..], encrypted.ciphertext);
// 4. Split into shares
const n: u8 = 7;
const k: u8 = 3;
const share_set = try split_mod.split(allocator, shamir_secret, n, k);
defer share_set.deinit(allocator);
// 5. Build Merkle commitment tree (for cross-guardian verification)
const tree = try commitment_mod.buildTree(allocator, share_set.shares);
defer tree.deinit(allocator);
// 6. Store shares to disk
var tmp_dir_buf: [std.fs.max_path_bytes]u8 = undefined;
const tmp = makeTmpDir(&tmp_dir_buf);
var tmp_dir = tmp.dir;
defer tmp_dir.cleanup();
const tmp_path = tmp.path;
for (share_set.shares, 0..) |share, i| {
var id_buf: [8]u8 = undefined;
const id_byte = [_]u8{ @as(u8, @truncate(i)), share.x, 0xFE, 0xED };
const identity = hexEncode(&id_byte, &id_buf);
try file_store.writeShare(tmp_path, identity, share.y, &integrity_key_buf, allocator);
}
// 7. Simulate recovery: read K shares from disk, verify commitments, combine
const recovery_indices = [_]usize{ 1, 4, 6 };
var recovery_shares: [3]types.Share = undefined;
var recovery_bufs: [3][]u8 = undefined;
for (recovery_indices, 0..) |idx, i| {
var id_buf: [8]u8 = undefined;
const id_byte = [_]u8{ @as(u8, @truncate(idx)), share_set.shares[idx].x, 0xFE, 0xED };
const identity = hexEncode(&id_byte, &id_buf);
const data = try file_store.readShare(tmp_path, identity, &integrity_key_buf, allocator);
recovery_bufs[i] = data;
recovery_shares[i] = types.Share{
.x = share_set.shares[idx].x,
.y = data,
};
// Verify Merkle proof for each recovered share
const proof = try commitment_mod.generateProof(allocator, share_set.shares, idx);
defer proof.deinit(allocator);
try std.testing.expect(commitment_mod.verifyProof(recovery_shares[i], proof, tree.root));
}
defer {
for (&recovery_bufs) |buf| allocator.free(buf);
}
// 8. Combine shares to recover the Shamir secret
const recovered_secret = try combine_mod.combine(allocator, &recovery_shares);
defer {
@memset(recovered_secret, 0);
allocator.free(recovered_secret);
}
try std.testing.expectEqualSlices(u8, shamir_secret, recovered_secret);
// 9. Extract nonce/tag/ciphertext and decrypt
var rec_nonce: [aes.NONCE_SIZE]u8 = undefined;
@memcpy(&rec_nonce, recovered_secret[0..aes.NONCE_SIZE]);
var rec_tag: [aes.TAG_SIZE]u8 = undefined;
@memcpy(&rec_tag, recovered_secret[aes.NONCE_SIZE .. aes.NONCE_SIZE + aes.TAG_SIZE]);
const rec_ct = recovered_secret[aes.NONCE_SIZE + aes.TAG_SIZE ..];
const rec_encrypted = aes.EncryptedData{
.ciphertext = rec_ct,
.nonce = rec_nonce,
.tag = rec_tag,
};
const decrypted = try aes.decrypt(allocator, rec_encrypted, encryption_key);
defer {
@memset(decrypted, 0);
allocator.free(decrypted);
}
try std.testing.expectEqualSlices(u8, vault_data, decrypted);
}

87
src/tests.zig Normal file
View File

@ -0,0 +1,87 @@
/// Test entry point imports all test modules so `zig build test` runs everything.
const std = @import("std");
// SSS core
comptime {
_ = @import("sss/field.zig");
_ = @import("sss/polynomial.zig");
_ = @import("sss/split.zig");
_ = @import("sss/combine.zig");
_ = @import("sss/commitment.zig");
_ = @import("sss/reshare.zig");
_ = @import("sss/test_cross_platform.zig");
}
// Crypto wrappers
comptime {
_ = @import("crypto/aes.zig");
_ = @import("crypto/hmac.zig");
_ = @import("crypto/hkdf.zig");
_ = @import("crypto/secure_mem.zig");
_ = @import("crypto/pq_kem.zig");
_ = @import("crypto/pq_sig.zig");
_ = @import("crypto/hybrid.zig");
}
// Storage
comptime {
_ = @import("storage/file_store.zig");
_ = @import("storage/vault_store.zig");
_ = @import("storage/migrate_v1_v2.zig");
}
// Auth
comptime {
_ = @import("auth/challenge.zig");
_ = @import("auth/session.zig");
}
// Membership
comptime {
_ = @import("membership/node_list.zig");
_ = @import("membership/quorum.zig");
_ = @import("membership/discovery.zig");
}
// Peer protocol
comptime {
_ = @import("peer/protocol.zig");
_ = @import("peer/heartbeat.zig");
_ = @import("peer/verify.zig");
_ = @import("peer/listener.zig");
_ = @import("peer/repair.zig");
}
// Guardian
comptime {
_ = @import("guardian.zig");
}
// Config
comptime {
_ = @import("config.zig");
}
// Server
comptime {
_ = @import("server/router.zig");
_ = @import("server/response.zig");
_ = @import("server/handler_health.zig");
_ = @import("server/handler_status.zig");
_ = @import("server/handler_guardians.zig");
_ = @import("server/handler_push.zig");
_ = @import("server/handler_pull.zig");
_ = @import("server/handler_auth.zig");
_ = @import("server/handler_secrets.zig");
_ = @import("server/listener.zig");
}
// Integration tests
comptime {
_ = @import("test_integration.zig");
}
test "all tests imported" {
// This test exists solely to verify the test harness runs.
try std.testing.expect(true);
}

View File

@ -0,0 +1,26 @@
[Unit]
Description=Orama Vault Guardian
Documentation=https://github.com/orama-network/debros
After=network.target
PartOf=orama-node.service
[Service]
Type=simple
ExecStart=/opt/orama/bin/vault-guardian --config /opt/orama/.orama/data/vault/vault.yaml
Restart=on-failure
RestartSec=5s
# Security hardening
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=/opt/orama/.orama/data/vault
NoNewPrivileges=yes
# Allow mlock for secure memory
LimitMEMLOCK=67108864
# Resource limits
MemoryMax=512M
[Install]
WantedBy=multi-user.target