mirror of
https://github.com/DeBrosOfficial/orama.git
synced 2026-03-27 09:24:12 +00:00
Compare commits
357 Commits
v0.52.20-n
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82c477266d | ||
|
|
169be97026 | ||
|
|
4b7c342c77 | ||
|
|
7d5ccc0678 | ||
|
|
1ca779880b | ||
|
|
3b779cd5a0 | ||
|
|
b94fd1efcd | ||
|
|
abcc23c4f3 | ||
|
|
ebaf37e9d0 | ||
|
|
7c165b9579 | ||
|
|
c536e45d0f | ||
|
|
655bd92178 | ||
|
|
211c0275d3 | ||
|
|
5456d57aeb | ||
|
|
8ea4499052 | ||
|
|
6657c90e36 | ||
|
|
0764ac287e | ||
|
|
c4fd1878a7 | ||
|
|
3d70f92ed5 | ||
|
|
fa826f0d00 | ||
|
|
733b059681 | ||
|
|
78d876e71b | ||
|
|
6468019136 | ||
|
|
e2b6f7d721 | ||
|
|
fd87eec476 | ||
|
|
a0468461ab | ||
|
|
2f5718146a | ||
|
|
f26676db2c | ||
|
|
fade8f89ed | ||
|
|
ed4e490463 | ||
|
|
6898f47e2e | ||
|
|
f0d2621199 | ||
|
|
c6998b6ac2 | ||
|
|
45a8285ae8 | ||
|
|
80e26f33fb | ||
|
|
25495448ed | ||
|
|
1882876922 | ||
|
|
7227e5ceb9 | ||
|
|
7f1c592235 | ||
|
|
72fb5f1a5a | ||
|
|
2fecebc0c2 | ||
|
|
85eb98ed34 | ||
|
|
714a986a78 | ||
|
|
bcfdabb32d | ||
|
|
3597c61cfc | ||
|
|
552fde428e | ||
|
|
ca86becf85 | ||
|
|
bfff2a241b | ||
|
|
3e9ef5ac6c | ||
|
|
f1dc3014fc | ||
|
|
19463b8621 | ||
|
|
a79ae41dd5 | ||
|
|
e4d51676cc | ||
|
|
e6f828d6f1 | ||
|
|
8ee606bfb1 | ||
|
|
58ea896cb0 | ||
|
|
d256a83fb7 | ||
|
|
c731486454 | ||
|
|
8cabe48f7d | ||
|
|
c499b2d76e | ||
|
|
4ebf558719 | ||
|
|
2b0bfaaa12 | ||
|
|
a71b979036 | ||
|
|
106c2df4d2 | ||
|
|
40600c3557 | ||
|
|
aa2da83969 | ||
|
|
bb98418ac9 | ||
|
|
b58e1d80ee | ||
|
|
4f1709e136 | ||
|
|
83804422c4 | ||
|
|
8aef779fcd | ||
|
|
0b5b6e68e3 | ||
|
|
f889c2e358 | ||
|
|
1e38fc2861 | ||
|
|
88ba08fcba | ||
|
|
865a4f3434 | ||
|
|
7163aad850 | ||
|
|
25a167f9b4 | ||
|
|
bc9cbb3627 | ||
|
|
ef8002bf13 | ||
|
|
29d255676f | ||
|
|
ba4e2688e4 | ||
|
|
749d5ed5e7 | ||
|
|
afbb7d4ede | ||
|
|
2986e64162 | ||
|
|
1ab63857d3 | ||
|
|
61ccad952a | ||
|
|
85a556d0a0 | ||
|
|
ed82c8ca6b | ||
|
|
266507ef09 | ||
|
|
5fed8a6c88 | ||
|
|
1d186706f6 | ||
|
|
83bd495f0f | ||
|
|
5c73330be6 | ||
|
|
ee0f035948 | ||
|
|
b5dfcab1d6 | ||
|
|
4356f5544a | ||
|
|
ebdd08f71c | ||
|
|
35ad8bdb16 | ||
|
|
051c002ec8 | ||
|
|
fbeecb617a | ||
|
|
f3f0716715 | ||
|
|
7dc6fecac2 | ||
|
|
eddf0553b7 | ||
|
|
888df0385e | ||
|
|
b305f562d7 | ||
|
|
1fb6f9a13e | ||
|
|
490c4f66da | ||
|
|
91ac56c50a | ||
|
|
f7db698273 | ||
|
|
a78e09d2b9 | ||
|
|
359fb5ae04 | ||
|
|
21e82abb65 | ||
|
|
a02c63a7ee | ||
|
|
cbdde6ab66 | ||
|
|
af5250ccad | ||
|
|
7a7553f0eb | ||
|
|
d5cfb12435 | ||
|
|
a297a14b44 | ||
|
|
e2b38c409a | ||
|
|
5d543b2662 | ||
|
|
b382350f76 | ||
|
|
7690b22c0a | ||
|
|
3999253685 | ||
|
|
854523c3a9 | ||
|
|
02b5c095d0 | ||
|
|
a7f100038d | ||
|
|
c855b790f8 | ||
|
|
f972358e78 | ||
|
|
0c4af88388 | ||
|
|
d85ed032f8 | ||
|
|
156de7eb19 | ||
|
|
65ffd28151 | ||
|
|
11d5c1b19a | ||
|
|
859c30fcd9 | ||
|
|
79a489d650 | ||
|
|
e95ecfb12a | ||
|
|
b43e6d77b7 | ||
|
|
e3dd359e55 | ||
|
|
765ce46ea7 | ||
|
|
3343ade433 | ||
|
|
c7036cb931 | ||
|
|
9c52287af9 | ||
|
|
af5f5f9893 | ||
|
|
683ce50106 | ||
|
|
c401fdcd74 | ||
|
|
73dfe22438 | ||
|
|
4b3b7b3458 | ||
|
|
9282fe64ee | ||
|
|
b5109f1ee8 | ||
|
|
16eaf9a129 | ||
|
|
8c392194bb | ||
|
|
51371e199d | ||
|
|
04f345f9ee | ||
|
|
810094771d | ||
|
|
4acea72467 | ||
|
|
dcaf695fbc | ||
|
|
9a8fba3f47 | ||
|
|
46aa2f2869 | ||
|
|
7b12dde469 | ||
|
|
82963c960e | ||
|
|
d6106bcbb8 | ||
|
|
15ecf366d5 | ||
|
|
e706ed3397 | ||
|
|
42c0c61d19 | ||
|
|
cd4189f64b | ||
|
|
d8c93f6ee9 | ||
|
|
571f8babb4 | ||
|
|
4b24b0aa6c | ||
|
|
6397efde25 | ||
|
|
29581bec51 | ||
|
|
81414722cd | ||
|
|
c3d6500785 | ||
|
|
5ec292a4f2 | ||
|
|
d4f5f3b999 | ||
|
|
c3f87aede7 | ||
|
|
7ded21939b | ||
|
|
edd9c1f3dc | ||
|
|
468ca06398 | ||
|
|
c827651245 | ||
|
|
2c374b2156 | ||
|
|
039c246d47 | ||
|
|
380b10add3 | ||
|
|
1a717537e5 | ||
|
|
e94da3a639 | ||
|
|
6c3d16c332 | ||
|
|
ec664466c0 | ||
|
|
6101455f4a | ||
|
|
3d3b0d2ee6 | ||
|
|
2281899784 | ||
|
|
fb229af2a0 | ||
|
|
00c9792780 | ||
|
|
fc0b958b1e | ||
|
|
84c9b9ab9b | ||
|
|
da8c9822f4 | ||
|
|
b1011c29b5 | ||
|
|
ec66213e2e | ||
|
|
5547c8ccb5 | ||
|
|
1c2bde2d81 | ||
|
|
b33da4282b | ||
|
|
903bef14a3 | ||
|
|
0a7e3ba3c7 | ||
|
|
c2071586f8 | ||
|
|
1338b32a0e | ||
|
|
76bbf23f25 | ||
|
|
0dcde29f7c | ||
|
|
9fc9bbb8e5 | ||
|
|
ade6241357 | ||
|
|
d3d1bb98ba | ||
|
|
ccee66d525 | ||
|
|
acc38d584a | ||
|
|
c20f6e9a25 | ||
|
|
b0bc0a232e | ||
|
|
86f73a1d8e | ||
|
|
8c82124e05 | ||
|
|
6f4f55f669 | ||
|
|
fff665374f | ||
|
|
2b3e6874c8 | ||
|
|
cbbf72092d | ||
|
|
9ddbe945fd | ||
|
|
4f893e08d1 | ||
|
|
df5b11b175 | ||
|
|
a9844a1451 | ||
|
|
4ee76588ed | ||
|
|
b3b1905fb2 | ||
|
|
54aab4841d | ||
|
|
ee80be15d8 | ||
|
|
6740e67d40 | ||
|
|
670c3f99df | ||
|
|
9f43cea907 | ||
| 65286df31e | |||
|
|
b91b7c27ea | ||
|
|
432952ed69 | ||
|
|
9193f088a3 | ||
|
|
3505a6a0eb | ||
|
|
3ca4e1f43b | ||
|
|
2fb1d68fcb | ||
|
|
7126c4068b | ||
|
|
681cef999a | ||
|
|
5c7767b7c8 | ||
|
|
d8994b1e4f | ||
|
|
b983066016 | ||
|
|
660008b0aa | ||
|
|
775289a1a2 | ||
|
|
87059fb9c4 | ||
|
|
90a26295a4 | ||
|
|
4c1f842939 | ||
|
|
33ebf222ff | ||
|
|
2f1ccfa473 | ||
|
|
6f7b7606b0 | ||
|
|
adb180932b | ||
|
|
5d6de3b0b8 | ||
|
|
747be5863b | ||
|
|
358de8a8ad | ||
|
|
47ffe817b4 | ||
|
|
7f77836d73 | ||
|
|
1d060490a8 | ||
|
|
0421155594 | ||
|
|
32470052ba | ||
|
|
0ca211c983 | ||
|
|
2b17bcdaa2 | ||
|
|
c405be3e69 | ||
|
|
c2298e476e | ||
|
|
ee566d93b7 | ||
|
|
7c3378a8ec | ||
|
|
bd4542ef56 | ||
|
|
f88a28b3df | ||
|
|
b0ac58af3e | ||
|
|
52b3a99bb9 | ||
|
|
19bfaff943 | ||
|
|
b58b632be9 | ||
|
|
a33d03c6b2 | ||
|
|
6ba0a824e0 | ||
|
|
d5e28bb694 | ||
|
|
72ba75d16b | ||
|
|
b896e37e09 | ||
|
|
b1732b2cbe | ||
|
|
badaa920d9 | ||
|
|
ed80b5b023 | ||
|
|
e9bf94ba96 | ||
|
|
52a726ffd4 | ||
|
|
efa26e6ec8 | ||
|
|
239fb2084b | ||
|
|
5463df73d5 | ||
|
|
0ea58354ca | ||
|
|
263fbbb8b4 | ||
|
|
a72aebc1fe | ||
|
|
80ea58848b | ||
|
|
687316b8d6 | ||
|
|
170665bf02 | ||
|
|
17fc78975d | ||
|
|
6a86592cad | ||
| abcf9a42eb | |||
|
|
a9af0d2f2d | ||
|
|
0b24c66d56 | ||
|
|
f991d55676 | ||
|
|
0388c3a766 | ||
|
|
c726dfc401 | ||
|
|
a5c30d0141 | ||
|
|
93b25c42e4 | ||
|
|
50f7abf376 | ||
|
|
5b21774e04 | ||
|
|
05ca685eee | ||
|
|
a7d21d4217 | ||
|
|
fbdfa23c77 | ||
|
|
d00290d278 | ||
|
|
69d7ccf4c7 | ||
|
|
d6009bb33f | ||
|
|
cf26c1af2c | ||
|
|
3196e91e85 | ||
|
|
42131c0e75 | ||
|
|
5e7d59c7a1 | ||
|
|
11ce4f2a53 | ||
|
|
d3543ac3ab | ||
|
|
2b51859ea7 | ||
|
|
3ba7e88e4e | ||
|
|
952132de8e | ||
|
|
31e01df940 | ||
|
|
9093c8937e | ||
|
|
2088b6a0cf | ||
|
|
3d02663e27 | ||
|
|
a17255e6b4 | ||
|
|
09c903dd14 | ||
|
|
a895726cbd | ||
|
|
f1fcbf69cf | ||
|
|
c282cf57d6 | ||
|
|
4ec47fa7ef | ||
|
|
6abe43ddc6 | ||
|
|
7fe56f11d5 | ||
|
|
909be0f18f | ||
|
|
6e59b17c6a | ||
|
|
69fd6e32f1 | ||
|
|
30d18aca02 | ||
|
|
ed7f4ae3d9 | ||
|
|
f71ef8e60b | ||
|
|
6e80ff28b4 | ||
|
|
58224826d2 | ||
|
|
6f30514974 | ||
|
|
13e05609e0 | ||
|
|
8a7ae4ad6f | ||
|
|
f2d6254b7b | ||
|
|
5b05f52162 | ||
|
|
042e516b8c | ||
|
|
cc74a8f135 | ||
|
|
168808b007 | ||
|
|
c326711d7c | ||
|
|
685295551c | ||
|
|
ebe2706ad8 | ||
|
|
ca00561da1 | ||
|
|
7b7087e5eb | ||
|
|
c5d3dd1f6d | ||
|
|
2aead48045 | ||
|
|
8f82dc7ca3 | ||
|
|
ea5ef6bc1a | ||
|
|
f561bc5311 | ||
|
|
a4b4b8f0df | ||
|
|
fe05240362 |
@ -1,11 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
echo -e "\nRunning tests:"
|
|
||||||
go test ./... # Runs all tests in your repo
|
|
||||||
status=$?
|
|
||||||
if [ $status -ne 0 ]; then
|
|
||||||
echo "Push aborted: some tests failed."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "All tests passed. Proceeding with push."
|
|
||||||
fi
|
|
||||||
91
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
91
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: Report a bug in Orama Network
|
||||||
|
labels: ["bug"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for reporting a bug! Please fill out the sections below.
|
||||||
|
|
||||||
|
**Security issues:** If this is a security vulnerability, do NOT open an issue. Email security@orama.io instead.
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: Orama version
|
||||||
|
description: "Run `orama version` to find this"
|
||||||
|
placeholder: "v0.18.0-beta"
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: component
|
||||||
|
attributes:
|
||||||
|
label: Component
|
||||||
|
options:
|
||||||
|
- Gateway / API
|
||||||
|
- CLI (orama command)
|
||||||
|
- WireGuard / Networking
|
||||||
|
- RQLite / Storage
|
||||||
|
- Olric / Caching
|
||||||
|
- IPFS / Pinning
|
||||||
|
- CoreDNS
|
||||||
|
- OramaOS
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: A clear description of the bug
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: steps
|
||||||
|
attributes:
|
||||||
|
label: Steps to reproduce
|
||||||
|
description: Minimal steps to reproduce the behavior
|
||||||
|
placeholder: |
|
||||||
|
1. Run `orama ...`
|
||||||
|
2. See error
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: expected
|
||||||
|
attributes:
|
||||||
|
label: Expected behavior
|
||||||
|
description: What you expected to happen
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: actual
|
||||||
|
attributes:
|
||||||
|
label: Actual behavior
|
||||||
|
description: What actually happened (include error messages and logs if any)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: environment
|
||||||
|
attributes:
|
||||||
|
label: Environment
|
||||||
|
description: OS, Go version, deployment environment, etc.
|
||||||
|
placeholder: |
|
||||||
|
- OS: Ubuntu 22.04
|
||||||
|
- Go: 1.23
|
||||||
|
- Environment: sandbox
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: context
|
||||||
|
attributes:
|
||||||
|
label: Additional context
|
||||||
|
description: Logs, screenshots, monitor reports, or anything else that might help
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
49
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
49
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
name: Feature Request
|
||||||
|
description: Suggest a new feature or improvement
|
||||||
|
labels: ["enhancement"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thanks for the suggestion! Please describe what you'd like to see.
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: component
|
||||||
|
attributes:
|
||||||
|
label: Component
|
||||||
|
options:
|
||||||
|
- Gateway / API
|
||||||
|
- CLI (orama command)
|
||||||
|
- WireGuard / Networking
|
||||||
|
- RQLite / Storage
|
||||||
|
- Olric / Caching
|
||||||
|
- IPFS / Pinning
|
||||||
|
- CoreDNS
|
||||||
|
- OramaOS
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: problem
|
||||||
|
attributes:
|
||||||
|
label: Problem
|
||||||
|
description: What problem does this solve? Why do you need it?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: solution
|
||||||
|
attributes:
|
||||||
|
label: Proposed solution
|
||||||
|
description: How do you think this should work?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: alternatives
|
||||||
|
attributes:
|
||||||
|
label: Alternatives considered
|
||||||
|
description: Any workarounds or alternative approaches you've thought of
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
31
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
31
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
## Summary
|
||||||
|
|
||||||
|
<!-- What does this PR do? Keep it to 1-3 bullet points. -->
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
<!-- Why is this change needed? Link to an issue if applicable. -->
|
||||||
|
|
||||||
|
## Test plan
|
||||||
|
|
||||||
|
<!-- How did you verify this works? -->
|
||||||
|
|
||||||
|
- [ ] `make test` passes
|
||||||
|
- [ ] Tested on sandbox/staging environment
|
||||||
|
|
||||||
|
## Distributed system impact
|
||||||
|
|
||||||
|
<!-- Does this change affect any of the following? If yes, explain. -->
|
||||||
|
|
||||||
|
- [ ] Raft quorum / RQLite
|
||||||
|
- [ ] WireGuard mesh / networking
|
||||||
|
- [ ] Olric gossip / caching
|
||||||
|
- [ ] Service startup ordering
|
||||||
|
- [ ] Rolling upgrade compatibility
|
||||||
|
|
||||||
|
## Checklist
|
||||||
|
|
||||||
|
- [ ] Tests added for new functionality or bug fix
|
||||||
|
- [ ] No debug code (`fmt.Println`, `log.Println`) left behind
|
||||||
|
- [ ] Docs updated (if user-facing behavior changed)
|
||||||
|
- [ ] Errors wrapped with context (`fmt.Errorf("...: %w", err)`)
|
||||||
80
.github/workflows/publish-sdk.yml
vendored
Normal file
80
.github/workflows/publish-sdk.yml
vendored
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
name: Publish SDK to npm
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: "Version to publish (e.g., 1.0.0). Leave empty to use package.json version."
|
||||||
|
required: false
|
||||||
|
dry-run:
|
||||||
|
description: "Dry run (don't actually publish)"
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish:
|
||||||
|
name: Build & Publish @debros/orama
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: sdk
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: "20"
|
||||||
|
registry-url: "https://registry.npmjs.org"
|
||||||
|
|
||||||
|
- name: Install pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
|
||||||
|
- name: Bump version
|
||||||
|
if: inputs.version != ''
|
||||||
|
run: npm version ${{ inputs.version }} --no-git-tag-version
|
||||||
|
|
||||||
|
- name: Typecheck
|
||||||
|
run: pnpm typecheck
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: pnpm build
|
||||||
|
|
||||||
|
- name: Run unit tests
|
||||||
|
run: pnpm vitest run tests/unit
|
||||||
|
|
||||||
|
- name: Publish (dry run)
|
||||||
|
if: inputs.dry-run == true
|
||||||
|
run: npm publish --access public --dry-run
|
||||||
|
env:
|
||||||
|
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
|
||||||
|
- name: Publish
|
||||||
|
if: inputs.dry-run == false
|
||||||
|
run: npm publish --access public
|
||||||
|
env:
|
||||||
|
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
|
||||||
|
- name: Get published version
|
||||||
|
if: inputs.dry-run == false
|
||||||
|
id: version
|
||||||
|
run: echo "version=$(node -p "require('./package.json').version")" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Create git tag
|
||||||
|
if: inputs.dry-run == false
|
||||||
|
working-directory: .
|
||||||
|
run: |
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||||
|
git tag "sdk/v${{ steps.version.outputs.version }}"
|
||||||
|
git push origin "sdk/v${{ steps.version.outputs.version }}"
|
||||||
200
.github/workflows/release-apt.yml
vendored
Normal file
200
.github/workflows/release-apt.yml
vendored
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
name: Release APT Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: "Version to release (e.g., 0.69.20)"
|
||||||
|
required: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-deb:
|
||||||
|
name: Build Debian Package
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
arch: [amd64, arm64]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: "1.24"
|
||||||
|
cache-dependency-path: core/go.sum
|
||||||
|
|
||||||
|
- name: Get version
|
||||||
|
id: version
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "release" ]; then
|
||||||
|
VERSION="${{ github.event.release.tag_name }}"
|
||||||
|
VERSION="${VERSION#v}" # Remove 'v' prefix if present
|
||||||
|
else
|
||||||
|
VERSION="${{ github.event.inputs.version }}"
|
||||||
|
fi
|
||||||
|
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up QEMU (for arm64)
|
||||||
|
if: matrix.arch == 'arm64'
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build binary
|
||||||
|
working-directory: core
|
||||||
|
env:
|
||||||
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
CGO_ENABLED: 0
|
||||||
|
run: |
|
||||||
|
VERSION="${{ steps.version.outputs.version }}"
|
||||||
|
COMMIT=$(git rev-parse --short HEAD)
|
||||||
|
DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
|
LDFLAGS="-X 'main.version=$VERSION' -X 'main.commit=$COMMIT' -X 'main.date=$DATE'"
|
||||||
|
|
||||||
|
mkdir -p build/usr/local/bin
|
||||||
|
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
|
||||||
|
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-node cmd/node/main.go
|
||||||
|
# Build the entire gateway package so helper files (e.g., config parsing) are included
|
||||||
|
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama-gateway ./cmd/gateway
|
||||||
|
|
||||||
|
- name: Create Debian package structure
|
||||||
|
run: |
|
||||||
|
VERSION="${{ steps.version.outputs.version }}"
|
||||||
|
ARCH="${{ matrix.arch }}"
|
||||||
|
PKG_NAME="orama_${VERSION}_${ARCH}"
|
||||||
|
|
||||||
|
mkdir -p ${PKG_NAME}/DEBIAN
|
||||||
|
mkdir -p ${PKG_NAME}/usr/local/bin
|
||||||
|
|
||||||
|
# Copy binaries
|
||||||
|
cp core/build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
|
||||||
|
chmod 755 ${PKG_NAME}/usr/local/bin/*
|
||||||
|
|
||||||
|
# Create control file
|
||||||
|
cat > ${PKG_NAME}/DEBIAN/control << EOF
|
||||||
|
Package: orama
|
||||||
|
Version: ${VERSION}
|
||||||
|
Section: net
|
||||||
|
Priority: optional
|
||||||
|
Architecture: ${ARCH}
|
||||||
|
Depends: libc6
|
||||||
|
Maintainer: DeBros Team <team@orama.network>
|
||||||
|
Description: Orama Network - Distributed P2P Database System
|
||||||
|
Orama is a distributed peer-to-peer network that combines
|
||||||
|
RQLite for distributed SQL, IPFS for content-addressed storage,
|
||||||
|
and LibP2P for peer discovery and communication.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create postinst script
|
||||||
|
cat > ${PKG_NAME}/DEBIAN/postinst << 'EOF'
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
echo ""
|
||||||
|
echo "Orama installed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "To set up your node, run:"
|
||||||
|
echo " sudo orama install"
|
||||||
|
echo ""
|
||||||
|
EOF
|
||||||
|
chmod 755 ${PKG_NAME}/DEBIAN/postinst
|
||||||
|
|
||||||
|
- name: Build .deb package
|
||||||
|
run: |
|
||||||
|
VERSION="${{ steps.version.outputs.version }}"
|
||||||
|
ARCH="${{ matrix.arch }}"
|
||||||
|
PKG_NAME="orama_${VERSION}_${ARCH}"
|
||||||
|
|
||||||
|
dpkg-deb --build ${PKG_NAME}
|
||||||
|
mv ${PKG_NAME}.deb orama_${VERSION}_${ARCH}.deb
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: deb-${{ matrix.arch }}
|
||||||
|
path: "*.deb"
|
||||||
|
|
||||||
|
publish-apt:
|
||||||
|
name: Publish to APT Repository
|
||||||
|
needs: build-deb
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Download all artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: packages
|
||||||
|
|
||||||
|
- name: Get version
|
||||||
|
id: version
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "release" ]; then
|
||||||
|
VERSION="${{ github.event.release.tag_name }}"
|
||||||
|
VERSION="${VERSION#v}"
|
||||||
|
else
|
||||||
|
VERSION="${{ github.event.inputs.version }}"
|
||||||
|
fi
|
||||||
|
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up GPG
|
||||||
|
if: env.GPG_PRIVATE_KEY != ''
|
||||||
|
env:
|
||||||
|
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
|
run: |
|
||||||
|
echo "$GPG_PRIVATE_KEY" | gpg --import
|
||||||
|
|
||||||
|
- name: Create APT repository structure
|
||||||
|
run: |
|
||||||
|
mkdir -p apt-repo/pool/main/o/orama
|
||||||
|
mkdir -p apt-repo/dists/stable/main/binary-amd64
|
||||||
|
mkdir -p apt-repo/dists/stable/main/binary-arm64
|
||||||
|
|
||||||
|
# Move packages
|
||||||
|
mv packages/deb-amd64/*.deb apt-repo/pool/main/o/orama/
|
||||||
|
mv packages/deb-arm64/*.deb apt-repo/pool/main/o/orama/
|
||||||
|
|
||||||
|
# Generate Packages files
|
||||||
|
cd apt-repo
|
||||||
|
dpkg-scanpackages --arch amd64 pool/ > dists/stable/main/binary-amd64/Packages
|
||||||
|
dpkg-scanpackages --arch arm64 pool/ > dists/stable/main/binary-arm64/Packages
|
||||||
|
|
||||||
|
gzip -k dists/stable/main/binary-amd64/Packages
|
||||||
|
gzip -k dists/stable/main/binary-arm64/Packages
|
||||||
|
|
||||||
|
# Generate Release file
|
||||||
|
cat > dists/stable/Release << EOF
|
||||||
|
Origin: Orama
|
||||||
|
Label: Orama
|
||||||
|
Suite: stable
|
||||||
|
Codename: stable
|
||||||
|
Architectures: amd64 arm64
|
||||||
|
Components: main
|
||||||
|
Description: Orama Network APT Repository
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
- name: Upload to release
|
||||||
|
if: github.event_name == 'release'
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
apt-repo/pool/main/o/orama/*.deb
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Deploy APT repository to GitHub Pages
|
||||||
|
uses: peaceiris/actions-gh-pages@v4
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
publish_dir: ./apt-repo
|
||||||
|
destination_dir: apt
|
||||||
|
keep_files: true
|
||||||
33
.github/workflows/release.yaml
vendored
33
.github/workflows/release.yaml
vendored
@ -23,8 +23,8 @@ jobs:
|
|||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.24'
|
||||||
cache: true
|
cache-dependency-path: core/go.sum
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
uses: goreleaser/goreleaser-action@v5
|
uses: goreleaser/goreleaser-action@v5
|
||||||
@ -34,6 +34,7 @@ jobs:
|
|||||||
args: release --clean
|
args: release --clean
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
HOMEBREW_TAP_TOKEN: ${{ secrets.HOMEBREW_TAP_TOKEN }}
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
@ -42,32 +43,26 @@ jobs:
|
|||||||
path: dist/
|
path: dist/
|
||||||
retention-days: 5
|
retention-days: 5
|
||||||
|
|
||||||
# Optional: Publish to GitHub Packages (requires additional setup)
|
# Verify release artifacts
|
||||||
publish-packages:
|
verify-release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build-release
|
needs: build-release
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: release-artifacts
|
name: release-artifacts
|
||||||
path: dist/
|
path: dist/
|
||||||
|
|
||||||
- name: Publish to GitHub Packages
|
- name: List release artifacts
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
run: |
|
||||||
echo "Publishing Debian packages to GitHub Packages..."
|
echo "=== Release Artifacts ==="
|
||||||
for deb in dist/*.deb; do
|
ls -la dist/
|
||||||
if [ -f "$deb" ]; then
|
echo ""
|
||||||
curl -H "Authorization: token $GITHUB_TOKEN" \
|
echo "=== .deb packages ==="
|
||||||
-H "Content-Type: application/octet-stream" \
|
ls -la dist/*.deb 2>/dev/null || echo "No .deb files found"
|
||||||
--data-binary @"$deb" \
|
echo ""
|
||||||
"https://uploads.github.com/repos/${{ github.repository }}/releases/upload?name=$(basename "$deb")"
|
echo "=== Archives ==="
|
||||||
fi
|
ls -la dist/*.tar.gz 2>/dev/null || echo "No .tar.gz files found"
|
||||||
done
|
|
||||||
|
|||||||
129
.gitignore
vendored
129
.gitignore
vendored
@ -1,34 +1,4 @@
|
|||||||
# Binaries for programs and plugins
|
# === Global ===
|
||||||
*.exe
|
|
||||||
*.exe~
|
|
||||||
*.dll
|
|
||||||
*.so
|
|
||||||
*.dylib
|
|
||||||
|
|
||||||
# Test binary, built with `go test -c`
|
|
||||||
*.test
|
|
||||||
|
|
||||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
|
||||||
*.out
|
|
||||||
|
|
||||||
# Dependency directories (remove the comment below to include it)
|
|
||||||
# vendor/
|
|
||||||
|
|
||||||
# Go workspace file
|
|
||||||
go.work
|
|
||||||
|
|
||||||
# Built binaries
|
|
||||||
bin/
|
|
||||||
dist/
|
|
||||||
|
|
||||||
# IDE and editor files
|
|
||||||
.vscode/
|
|
||||||
.idea/
|
|
||||||
*.swp
|
|
||||||
*.swo
|
|
||||||
*~
|
|
||||||
|
|
||||||
# OS generated files
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.DS_Store?
|
.DS_Store?
|
||||||
._*
|
._*
|
||||||
@ -36,42 +6,85 @@ dist/
|
|||||||
.Trashes
|
.Trashes
|
||||||
ehthumbs.db
|
ehthumbs.db
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
|
||||||
# Log files
|
# IDE
|
||||||
*.log
|
.vscode/
|
||||||
|
.idea/
|
||||||
|
.cursor/
|
||||||
|
|
||||||
# Environment variables
|
# Environment & credentials
|
||||||
.env
|
.env
|
||||||
.env.local
|
.env.*
|
||||||
.env.*.local
|
!.env.example
|
||||||
|
.mcp.json
|
||||||
|
.claude/
|
||||||
|
.codex/
|
||||||
|
|
||||||
# Temporary files
|
# === Core (Go) ===
|
||||||
tmp/
|
core/phantom-auth/
|
||||||
temp/
|
core/bin/
|
||||||
*.tmp
|
core/bin-linux/
|
||||||
|
core/dist/
|
||||||
|
core/orama-cli-linux
|
||||||
|
core/keys_backup/
|
||||||
|
core/.gocache/
|
||||||
|
core/configs/
|
||||||
|
core/data/*
|
||||||
|
core/tmp/
|
||||||
|
core/temp/
|
||||||
|
core/results/
|
||||||
|
core/rnd/
|
||||||
|
core/vps.txt
|
||||||
|
core/coverage.txt
|
||||||
|
core/coverage.html
|
||||||
|
core/profile.out
|
||||||
|
core/e2e/config.yaml
|
||||||
|
core/scripts/remote-nodes.conf
|
||||||
|
|
||||||
# Coverage reports
|
# Go build artifacts
|
||||||
coverage.txt
|
*.exe
|
||||||
coverage.html
|
*.exe~
|
||||||
profile.out
|
*.dll
|
||||||
|
*.so
|
||||||
# Build artifacts
|
*.dylib
|
||||||
|
*.test
|
||||||
|
*.out
|
||||||
*.deb
|
*.deb
|
||||||
*.rpm
|
*.rpm
|
||||||
*.tar.gz
|
*.tar.gz
|
||||||
*.zip
|
*.zip
|
||||||
|
go.work
|
||||||
|
|
||||||
# Local development files
|
# Logs
|
||||||
|
*.log
|
||||||
|
|
||||||
|
# Databases
|
||||||
|
*.db
|
||||||
|
|
||||||
|
# === Website ===
|
||||||
|
website/node_modules/
|
||||||
|
website/dist/
|
||||||
|
website/invest-api/invest-api
|
||||||
|
website/invest-api/*.db
|
||||||
|
website/invest-api/*.db-shm
|
||||||
|
website/invest-api/*.db-wal
|
||||||
|
|
||||||
|
# === SDK (TypeScript) ===
|
||||||
|
sdk/node_modules/
|
||||||
|
sdk/dist/
|
||||||
|
sdk/coverage/
|
||||||
|
|
||||||
|
# === Vault (Zig) ===
|
||||||
|
vault/.zig-cache/
|
||||||
|
vault/zig-out/
|
||||||
|
|
||||||
|
# === OS ===
|
||||||
|
os/output/
|
||||||
|
|
||||||
|
# === Local development ===
|
||||||
|
.dev/
|
||||||
.local/
|
.local/
|
||||||
local/
|
local/
|
||||||
|
|
||||||
data/*
|
|
||||||
./bootstrap
|
|
||||||
./node
|
|
||||||
data/bootstrap/rqlite/
|
|
||||||
|
|
||||||
.env.*
|
|
||||||
|
|
||||||
configs/
|
|
||||||
|
|
||||||
.dev/
|
|
||||||
131
.goreleaser.yaml
131
.goreleaser.yaml
@ -1,17 +1,23 @@
|
|||||||
# GoReleaser Configuration for DeBros Network
|
# GoReleaser Configuration for DeBros Network
|
||||||
# Builds and releases the network-cli binary for multiple platforms
|
# Builds and releases orama (CLI) and orama-node binaries
|
||||||
# Other binaries (node, gateway, identity) are installed via: network-cli setup
|
# Publishes to: GitHub Releases, Homebrew, and apt (.deb packages)
|
||||||
|
|
||||||
project_name: debros-network
|
project_name: orama-network
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
|
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- cmd: go mod tidy
|
||||||
|
dir: core
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
# network-cli binary - only build the CLI
|
# orama CLI binary
|
||||||
- id: network-cli
|
- id: orama
|
||||||
|
dir: core
|
||||||
main: ./cmd/cli
|
main: ./cmd/cli
|
||||||
binary: network-cli
|
binary: orama
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
- darwin
|
- darwin
|
||||||
@ -23,20 +29,109 @@ builds:
|
|||||||
- -X main.version={{.Version}}
|
- -X main.version={{.Version}}
|
||||||
- -X main.commit={{.ShortCommit}}
|
- -X main.commit={{.ShortCommit}}
|
||||||
- -X main.date={{.Date}}
|
- -X main.date={{.Date}}
|
||||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||||
|
|
||||||
|
# orama-node binary (Linux only for apt)
|
||||||
|
- id: orama-node
|
||||||
|
dir: core
|
||||||
|
main: ./cmd/node
|
||||||
|
binary: orama-node
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
ldflags:
|
||||||
|
- -s -w
|
||||||
|
- -X main.version={{.Version}}
|
||||||
|
- -X main.commit={{.ShortCommit}}
|
||||||
|
- -X main.date={{.Date}}
|
||||||
|
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
# Tar.gz archives for network-cli
|
# Tar.gz archives for orama CLI
|
||||||
- id: binaries
|
- id: orama-archives
|
||||||
|
builds:
|
||||||
|
- orama
|
||||||
format: tar.gz
|
format: tar.gz
|
||||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
name_template: "orama_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||||
files:
|
files:
|
||||||
- README.md
|
- README.md
|
||||||
- LICENSE
|
- LICENSE
|
||||||
- CHANGELOG.md
|
|
||||||
format_overrides:
|
# Tar.gz archives for orama-node
|
||||||
- goos: windows
|
- id: orama-node-archives
|
||||||
format: zip
|
builds:
|
||||||
|
- orama-node
|
||||||
|
format: tar.gz
|
||||||
|
name_template: "orama-node_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||||
|
files:
|
||||||
|
- README.md
|
||||||
|
- LICENSE
|
||||||
|
|
||||||
|
# Debian packages for apt
|
||||||
|
nfpms:
|
||||||
|
# orama CLI .deb package
|
||||||
|
- id: orama-deb
|
||||||
|
package_name: orama
|
||||||
|
builds:
|
||||||
|
- orama
|
||||||
|
vendor: DeBros
|
||||||
|
homepage: https://github.com/DeBrosOfficial/network
|
||||||
|
maintainer: DeBros <dev@debros.io>
|
||||||
|
description: CLI tool for the Orama decentralized network
|
||||||
|
license: MIT
|
||||||
|
formats:
|
||||||
|
- deb
|
||||||
|
bindir: /usr/bin
|
||||||
|
section: utils
|
||||||
|
priority: optional
|
||||||
|
contents:
|
||||||
|
- src: ./core/README.md
|
||||||
|
dst: /usr/share/doc/orama/README.md
|
||||||
|
deb:
|
||||||
|
lintian_overrides:
|
||||||
|
- statically-linked-binary
|
||||||
|
|
||||||
|
# orama-node .deb package
|
||||||
|
- id: orama-node-deb
|
||||||
|
package_name: orama-node
|
||||||
|
builds:
|
||||||
|
- orama-node
|
||||||
|
vendor: DeBros
|
||||||
|
homepage: https://github.com/DeBrosOfficial/network
|
||||||
|
maintainer: DeBros <dev@debros.io>
|
||||||
|
description: Node daemon for the Orama decentralized network
|
||||||
|
license: MIT
|
||||||
|
formats:
|
||||||
|
- deb
|
||||||
|
bindir: /usr/bin
|
||||||
|
section: net
|
||||||
|
priority: optional
|
||||||
|
contents:
|
||||||
|
- src: ./core/README.md
|
||||||
|
dst: /usr/share/doc/orama-node/README.md
|
||||||
|
deb:
|
||||||
|
lintian_overrides:
|
||||||
|
- statically-linked-binary
|
||||||
|
|
||||||
|
# Homebrew tap for macOS (orama CLI only)
|
||||||
|
brews:
|
||||||
|
- name: orama
|
||||||
|
ids:
|
||||||
|
- orama-archives
|
||||||
|
repository:
|
||||||
|
owner: DeBrosOfficial
|
||||||
|
name: homebrew-tap
|
||||||
|
token: "{{ .Env.HOMEBREW_TAP_TOKEN }}"
|
||||||
|
folder: Formula
|
||||||
|
homepage: https://github.com/DeBrosOfficial/network
|
||||||
|
description: CLI tool for the Orama decentralized network
|
||||||
|
license: MIT
|
||||||
|
install: |
|
||||||
|
bin.install "orama"
|
||||||
|
test: |
|
||||||
|
system "#{bin}/orama", "--version"
|
||||||
|
|
||||||
checksum:
|
checksum:
|
||||||
name_template: "checksums.txt"
|
name_template: "checksums.txt"
|
||||||
@ -50,10 +145,10 @@ changelog:
|
|||||||
abbrev: -1
|
abbrev: -1
|
||||||
filters:
|
filters:
|
||||||
exclude:
|
exclude:
|
||||||
- '^docs:'
|
- "^docs:"
|
||||||
- '^test:'
|
- "^test:"
|
||||||
- '^chore:'
|
- "^chore:"
|
||||||
- '^ci:'
|
- "^ci:"
|
||||||
- Merge pull request
|
- Merge pull request
|
||||||
- Merge branch
|
- Merge branch
|
||||||
|
|
||||||
|
|||||||
@ -1,68 +0,0 @@
|
|||||||
// Project-local debug tasks
|
|
||||||
//
|
|
||||||
// For more documentation on how to configure debug tasks,
|
|
||||||
// see: https://zed.dev/docs/debugger
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"label": "Gateway Go (Delve)",
|
|
||||||
"adapter": "Delve",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "debug",
|
|
||||||
"program": "./cmd/gateway",
|
|
||||||
"env": {
|
|
||||||
"GATEWAY_ADDR": ":6001",
|
|
||||||
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
|
|
||||||
"GATEWAY_NAMESPACE": "default",
|
|
||||||
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"label": "E2E Test Go (Delve)",
|
|
||||||
"adapter": "Delve",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "test",
|
|
||||||
"buildFlags": "-tags e2e",
|
|
||||||
"program": "./e2e",
|
|
||||||
"env": {
|
|
||||||
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
|
|
||||||
},
|
|
||||||
"args": ["-test.v"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"adapter": "Delve",
|
|
||||||
"label": "Gateway Go 6001 Port (Delve)",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "debug",
|
|
||||||
"program": "./cmd/gateway",
|
|
||||||
"env": {
|
|
||||||
"GATEWAY_ADDR": ":6001",
|
|
||||||
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
|
|
||||||
"GATEWAY_NAMESPACE": "default",
|
|
||||||
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"adapter": "Delve",
|
|
||||||
"label": "Network CLI - peers (Delve)",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "debug",
|
|
||||||
"program": "./cmd/cli",
|
|
||||||
"args": ["peers"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"adapter": "Delve",
|
|
||||||
"label": "Network CLI - PubSub Subscribe (Delve)",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "debug",
|
|
||||||
"program": "./cmd/cli",
|
|
||||||
"args": ["pubsub", "subscribe", "monitoring"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"adapter": "Delve",
|
|
||||||
"label": "Node Go (Delve)",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "debug",
|
|
||||||
"program": "./cmd/node",
|
|
||||||
"args": ["--config", "configs/node.yaml"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
299
CHANGELOG.md
299
CHANGELOG.md
@ -1,299 +0,0 @@
|
|||||||
# Changelog
|
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
|
||||||
|
|
||||||
The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semantic Versioning][semver].
|
|
||||||
|
|
||||||
## [Unreleased]
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
## [0.52.15]
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added Base64 encoding for the response body in the anonProxyHandler to prevent corruption of binary data when returned in JSON format.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- **GoReleaser**: Updated to build only `network-cli` binary (v0.52.2+)
|
|
||||||
- Other binaries (node, gateway, identity) now installed via `network-cli setup`
|
|
||||||
- Cleaner, smaller release packages
|
|
||||||
- Resolves archive mismatch errors
|
|
||||||
- **GitHub Actions**: Updated artifact actions from v3 to v4 (deprecated versions)
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed install script to be more clear and bug fixing
|
|
||||||
|
|
||||||
## [0.52.1] - 2025-10-26
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- **CLI Refactor**: Modularized monolithic CLI into `pkg/cli/` package structure for better maintainability
|
|
||||||
- New `environment.go`: Multi-environment management system (local, devnet, testnet)
|
|
||||||
- New `env_commands.go`: Environment switching commands (`env list`, `env switch`, `devnet enable`, `testnet enable`)
|
|
||||||
- New `setup.go`: Interactive VPS installation command (`network-cli setup`) that replaces bash install script
|
|
||||||
- New `service.go`: Systemd service management commands (`service start|stop|restart|status|logs`)
|
|
||||||
- New `auth_commands.go`, `config_commands.go`, `basic_commands.go`: Refactored commands into modular pkg/cli
|
|
||||||
- **Release Pipeline**: Complete automated release infrastructure via `.goreleaser.yaml` and GitHub Actions
|
|
||||||
- Multi-platform binary builds (Linux/macOS, amd64/arm64)
|
|
||||||
- Automatic GitHub Release creation with changelog and artifacts
|
|
||||||
- Semantic versioning support with pre-release handling
|
|
||||||
- **Environment Configuration**: Multi-environment switching system
|
|
||||||
- Default environments: local (http://localhost:6001), devnet (https://devnet.debros.network), testnet (https://testnet.debros.network)
|
|
||||||
- Stored in `~/.debros/environments.json`
|
|
||||||
- CLI auto-uses active environment for authentication and operations
|
|
||||||
- **Comprehensive Documentation**
|
|
||||||
- `.cursor/RELEASES.md`: Overview and quick start
|
|
||||||
- `.cursor/goreleaser-guide.md`: Detailed distribution guide
|
|
||||||
- `.cursor/release-checklist.md`: Quick reference
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- **CLI Refactoring**: `cmd/cli/main.go` reduced from 1340 → 180 lines (thin router pattern)
|
|
||||||
- All business logic moved to modular `pkg/cli/` functions
|
|
||||||
- Easier to test, maintain, and extend individual commands
|
|
||||||
- **Installation**: `scripts/install-debros-network.sh` now APT-ready with fallback to source build
|
|
||||||
- **Setup Process**: Consolidated all installation logic into `network-cli setup` command
|
|
||||||
- Single unified installation regardless of installation method
|
|
||||||
- Interactive user experience with clear progress indicators
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
## [0.51.9] - 2025-10-25
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- One-command `make dev` target to start full development stack (bootstrap + node2 + node3 + gateway in background)
|
|
||||||
- New `network-cli config init` (no --type) generates complete development stack with all configs and identities
|
|
||||||
- Full stack initialization with auto-generated peer identities for bootstrap and all nodes
|
|
||||||
- Explicit control over LibP2P listen addresses for better localhost/development support
|
|
||||||
- Production/development mode detection for NAT services (disabled for localhost, enabled for production)
|
|
||||||
- Process management with .dev/pids directory for background process tracking
|
|
||||||
- Centralized logging to ~/.debros/logs/ for all network services
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Simplified Makefile: removed legacy dev commands, replaced with unified `make dev` target
|
|
||||||
- Updated README with clearer getting started instructions (single `make dev` command)
|
|
||||||
- Simplified `network-cli config init` behavior: defaults to generating full stack instead of single node
|
|
||||||
- `network-cli config init` now handles bootstrap peer discovery and join addresses automatically
|
|
||||||
- LibP2P configuration: removed always-on NAT services for development environments
|
|
||||||
- Code formatting in pkg/node/node.go (indentation fixes in bootstrapPeerSource)
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
- Removed legacy Makefile targets: run-example, show-bootstrap, run-cli, cli-health, cli-peers, cli-status, cli-storage-test, cli-pubsub-test
|
|
||||||
- Removed verbose dev-setup, dev-cluster, and old dev workflow targets
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed indentation in bootstrapPeerSource function for consistency
|
|
||||||
- Fixed gateway.yaml generation with correct YAML indentation for bootstrap_peers
|
|
||||||
- Fixed script for running and added gateway running as well
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
## [0.51.6] - 2025-10-24
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- LibP2P added support over NAT
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
## [0.51.5] - 2025-10-24
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added validation for yaml files
|
|
||||||
- Added authenticaiton command on cli
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated readme
|
|
||||||
- Where we read .yaml files from and where data is saved to ~/.debros
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Regular nodes rqlite not starting
|
|
||||||
|
|
||||||
## [0.51.2] - 2025-09-26
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Enhance gateway configuration by adding RQLiteDSN support and updating default connection settings. Updated config parsing to include RQLiteDSN from YAML and environment variables. Changed default RQLite connection URL from port 4001 to 5001.
|
|
||||||
- Update CHANGELOG.md for version 0.51.2, enhance API key extraction to support query parameters, and implement internal auth context in status and storage handlers.
|
|
||||||
|
|
||||||
## [0.51.1] - 2025-09-26
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Changed the configuration file for run-node3 to use node3.yaml.
|
|
||||||
- Modified select_data_dir function to require a hasConfigFile parameter and added error handling for missing configuration.
|
|
||||||
- Updated main function to pass the config path to select_data_dir.
|
|
||||||
- Introduced a peer exchange protocol in the discovery package, allowing nodes to request and exchange peer information.
|
|
||||||
- Refactored peer discovery logic in the node package to utilize the new discovery manager for active peer exchange.
|
|
||||||
- Cleaned up unused code related to previous peer discovery methods.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
## [0.50.0] - 2025-09-23
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed wrong URL /v1/db to /v1/rqlite
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
## [0.50.0] - 2025-09-23
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Created new rqlite folder
|
|
||||||
- Created rqlite adapter, client, gateway, migrations and rqlite init
|
|
||||||
- Created namespace_helpers on gateway
|
|
||||||
- Created new rqlite implementation
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated node.go to support new rqlite architecture
|
|
||||||
- Updated readme
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
- Removed old storage folder
|
|
||||||
- Removed old pkg/gatway storage and migrated to new rqlite
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
## [0.44.0] - 2025-09-22
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added gateway.yaml file for gateway default configurations
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated readme to include all options for .yaml files
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
- Removed unused command setup-production-security.sh
|
|
||||||
- Removed anyone proxy from libp2p proxy
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
## [0.43.6] - 2025-09-20
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added Gateway port on install-debros-network.sh
|
|
||||||
- Added default bootstrap peers on config.go
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated Gateway port from 8080/8005 to 6001
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
## [0.43.4] - 2025-09-18
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added extra comments on main.go
|
|
||||||
- Remove backoff_test.go and associated backoff tests
|
|
||||||
- Created node_test, write tests for CalculateNextBackoff, AddJitter, GetPeerId, LoadOrCreateIdentity, hasBootstrapConnections
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- replaced git.debros.io with github.com
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
## [0.43.3] - 2025-09-15
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- User authentication module with OAuth2 support.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Make file version to 0.43.2
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
- Removed cli, network-cli binaries from project
|
|
||||||
- Removed AI_CONTEXT.md
|
|
||||||
- Removed Network.md
|
|
||||||
- Removed unused log from monitoring.go
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Resolved race condition when saving settings.
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
_Initial release._
|
|
||||||
|
|
||||||
[keepachangelog]: https://keepachangelog.com/en/1.1.0/
|
|
||||||
[semver]: https://semver.org/spec/v2.0.0.html
|
|
||||||
@ -32,7 +32,7 @@ This Code applies within all project spaces and when an individual is officially
|
|||||||
|
|
||||||
## Enforcement
|
## Enforcement
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@debros.io
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the maintainers at: security@orama.io
|
||||||
|
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
|||||||
@ -1,47 +1,78 @@
|
|||||||
# Contributing to DeBros Network
|
# Contributing to Orama Network
|
||||||
|
|
||||||
Thanks for helping improve the network! This guide covers setup, local dev, tests, and PR guidelines.
|
Thanks for helping improve the network! This monorepo contains multiple projects — pick the one relevant to your contribution.
|
||||||
|
|
||||||
## Requirements
|
## Repository Structure
|
||||||
|
|
||||||
- Go 1.22+ (1.23 recommended)
|
| Package | Language | Build |
|
||||||
- RQLite (optional for local runs; the Makefile starts nodes with embedded setup)
|
|---------|----------|-------|
|
||||||
- Make (optional)
|
| `core/` | Go 1.24+ | `make core-build` |
|
||||||
|
| `website/` | TypeScript (pnpm) | `make website-build` |
|
||||||
|
| `vault/` | Zig 0.14+ | `make vault-build` |
|
||||||
|
| `os/` | Go + Buildroot | `make os-build` |
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/DeBrosOfficial/network.git
|
git clone https://github.com/DeBrosOfficial/network.git
|
||||||
cd network
|
cd network
|
||||||
make deps
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Build, Test, Lint
|
### Core (Go)
|
||||||
|
|
||||||
- Build: `make build`
|
|
||||||
- Test: `make test`
|
|
||||||
- Format/Vet: `make fmt vet` (or `make lint`)
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Useful CLI commands:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/network-cli health
|
cd core
|
||||||
./bin/network-cli peers
|
make deps
|
||||||
./bin/network-cli status
|
make build
|
||||||
|
make test
|
||||||
```
|
```
|
||||||
|
|
||||||
## Versioning
|
### Website
|
||||||
|
|
||||||
- The CLI reports its version via `network-cli version`.
|
```bash
|
||||||
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
|
cd website
|
||||||
|
pnpm install
|
||||||
|
pnpm dev
|
||||||
|
```
|
||||||
|
|
||||||
|
### Vault (Zig)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd vault
|
||||||
|
zig build
|
||||||
|
zig build test
|
||||||
|
```
|
||||||
|
|
||||||
## Pull Requests
|
## Pull Requests
|
||||||
|
|
||||||
1. Fork and create a topic branch.
|
1. Fork and create a topic branch from `main`.
|
||||||
2. Ensure `make build test` passes; include tests for new functionality.
|
2. Ensure `make test` passes for affected packages.
|
||||||
3. Keep PRs focused and well-described (motivation, approach, testing).
|
3. Include tests for new functionality or bug fixes.
|
||||||
4. Update README/docs for behavior changes.
|
4. Keep PRs focused — one concern per PR.
|
||||||
|
5. Write a clear description: motivation, approach, and how you tested it.
|
||||||
|
6. Update docs if you're changing user-facing behavior.
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
|
||||||
|
### Go (core/, os/)
|
||||||
|
|
||||||
|
- Follow standard Go conventions
|
||||||
|
- Run `make lint` before submitting
|
||||||
|
- Wrap errors with context: `fmt.Errorf("failed to X: %w", err)`
|
||||||
|
- No magic values — use named constants
|
||||||
|
|
||||||
|
### TypeScript (website/)
|
||||||
|
|
||||||
|
- TypeScript strict mode
|
||||||
|
- Follow existing patterns in the codebase
|
||||||
|
|
||||||
|
### Zig (vault/)
|
||||||
|
|
||||||
|
- Follow standard Zig conventions
|
||||||
|
- Run `zig build test` before submitting
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
If you find a security vulnerability, **do not open a public issue**. Email security@debros.io instead.
|
||||||
|
|
||||||
Thank you for contributing!
|
Thank you for contributing!
|
||||||
|
|||||||
239
Makefile
239
Makefile
@ -1,205 +1,66 @@
|
|||||||
TEST?=./...
|
# Orama Monorepo
|
||||||
|
# Delegates to sub-project Makefiles
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: help build test clean
|
||||||
test:
|
|
||||||
@echo Running tests...
|
|
||||||
go test -v $(TEST)
|
|
||||||
|
|
||||||
# Gateway-focused E2E tests assume gateway and nodes are already running
|
# === Core (Go network) ===
|
||||||
# Configure via env:
|
.PHONY: core core-build core-test core-clean core-lint
|
||||||
# GATEWAY_BASE_URL (default http://127.0.0.1:6001)
|
core: core-build
|
||||||
# GATEWAY_API_KEY (required for auth-protected routes)
|
|
||||||
.PHONY: test-e2e
|
|
||||||
test-e2e:
|
|
||||||
@echo "Running gateway E2E tests (HTTP/WS only)..."
|
|
||||||
@echo "Base URL: $${GATEWAY_BASE_URL:-http://127.0.0.1:6001}"
|
|
||||||
@test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1)
|
|
||||||
go test -v -tags e2e ./e2e
|
|
||||||
|
|
||||||
# Network - Distributed P2P Database System
|
core-build:
|
||||||
# Makefile for development and build tasks
|
$(MAKE) -C core build
|
||||||
|
|
||||||
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports
|
core-test:
|
||||||
|
$(MAKE) -C core test
|
||||||
|
|
||||||
VERSION := 0.52.20
|
core-lint:
|
||||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
$(MAKE) -C core lint
|
||||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
|
||||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
|
||||||
|
|
||||||
# Build targets
|
core-clean:
|
||||||
build: deps
|
$(MAKE) -C core clean
|
||||||
@echo "Building network executables (version=$(VERSION))..."
|
|
||||||
@mkdir -p bin
|
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
|
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/network-cli cmd/cli/main.go
|
|
||||||
# Inject gateway build metadata via pkg path variables
|
|
||||||
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
|
||||||
@echo "Build complete! Run ./bin/network-cli version"
|
|
||||||
|
|
||||||
# Clean build artifacts
|
# === Website ===
|
||||||
clean:
|
.PHONY: website website-dev website-build
|
||||||
@echo "Cleaning build artifacts..."
|
website-dev:
|
||||||
rm -rf bin/
|
cd website && pnpm dev
|
||||||
rm -rf data/
|
|
||||||
@echo "Clean complete!"
|
|
||||||
|
|
||||||
# Run bootstrap node (auto-selects identity and data dir)
|
website-build:
|
||||||
run-node:
|
cd website && pnpm build
|
||||||
@echo "Starting bootstrap node..."
|
|
||||||
@echo "Config: ~/.debros/bootstrap.yaml"
|
|
||||||
@echo "Generate it with: network-cli config init --type bootstrap"
|
|
||||||
go run ./cmd/node --config node.yaml
|
|
||||||
|
|
||||||
# Run second node (regular) - requires join address of bootstrap node
|
# === SDK (TypeScript) ===
|
||||||
# Usage: make run-node2 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
|
.PHONY: sdk sdk-build sdk-test
|
||||||
run-node2:
|
sdk: sdk-build
|
||||||
@echo "Starting regular node (node.yaml)..."
|
|
||||||
@echo "Config: ~/.debros/node.yaml"
|
|
||||||
@echo "Generate it with: network-cli config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
|
|
||||||
go run ./cmd/node --config node2.yaml
|
|
||||||
|
|
||||||
# Run third node (regular) - requires join address of bootstrap node
|
sdk-build:
|
||||||
# Usage: make run-node3 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
|
cd sdk && pnpm install && pnpm build
|
||||||
run-node3:
|
|
||||||
@echo "Starting regular node (node2.yaml)..."
|
|
||||||
@echo "Config: ~/.debros/node2.yaml"
|
|
||||||
@echo "Generate it with: network-cli config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
|
|
||||||
go run ./cmd/node --config node3.yaml
|
|
||||||
|
|
||||||
# Run gateway HTTP server
|
sdk-test:
|
||||||
# Usage examples:
|
cd sdk && pnpm test
|
||||||
# make run-gateway # uses ~/.debros/gateway.yaml
|
|
||||||
# Config generated with: network-cli config init --type gateway
|
|
||||||
run-gateway:
|
|
||||||
@echo "Starting gateway HTTP server..."
|
|
||||||
@echo "Note: Config must be in ~/.debros/gateway.yaml"
|
|
||||||
@echo "Generate it with: network-cli config init --type gateway"
|
|
||||||
go run ./cmd/gateway
|
|
||||||
|
|
||||||
# One-command dev: Start bootstrap, node2, node3, gateway, and anon in background
|
# === Vault (Zig) ===
|
||||||
# Requires: configs already exist in ~/.debros
|
.PHONY: vault vault-build vault-test
|
||||||
dev: build
|
vault-build:
|
||||||
@echo "🚀 Starting development network stack..."
|
cd vault && zig build
|
||||||
@mkdir -p .dev/pids
|
|
||||||
@mkdir -p $$HOME/.debros/logs
|
vault-test:
|
||||||
@echo "Starting Anyone client (anon proxy)..."
|
cd vault && zig build test
|
||||||
@if [ "$$(uname)" = "Darwin" ]; then \
|
|
||||||
echo " Detected macOS - using npx anyone-client"; \
|
# === OS ===
|
||||||
if command -v npx >/dev/null 2>&1; then \
|
.PHONY: os os-build
|
||||||
nohup npx anyone-client > $$HOME/.debros/logs/anon.log 2>&1 & echo $$! > .dev/pids/anon.pid; \
|
os-build:
|
||||||
echo " Anyone client started (PID: $$(cat .dev/pids/anon.pid))"; \
|
$(MAKE) -C os
|
||||||
else \
|
|
||||||
echo " ⚠️ npx not found - skipping Anyone client"; \
|
# === Aggregate ===
|
||||||
echo " Install with: npm install -g npm"; \
|
build: core-build
|
||||||
fi; \
|
test: core-test
|
||||||
elif [ "$$(uname)" = "Linux" ]; then \
|
clean: core-clean
|
||||||
echo " Detected Linux - checking systemctl"; \
|
|
||||||
if systemctl is-active --quiet anon 2>/dev/null; then \
|
|
||||||
echo " ✓ Anon service already running"; \
|
|
||||||
elif command -v systemctl >/dev/null 2>&1; then \
|
|
||||||
echo " Starting anon service..."; \
|
|
||||||
sudo systemctl start anon 2>/dev/null || echo " ⚠️ Failed to start anon service"; \
|
|
||||||
else \
|
|
||||||
echo " ⚠️ systemctl not found - skipping Anon"; \
|
|
||||||
fi; \
|
|
||||||
fi
|
|
||||||
@sleep 2
|
|
||||||
@echo "Starting bootstrap node..."
|
|
||||||
@nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid
|
|
||||||
@sleep 2
|
|
||||||
@echo "Starting node2..."
|
|
||||||
@nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid
|
|
||||||
@sleep 1
|
|
||||||
@echo "Starting node3..."
|
|
||||||
@nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid
|
|
||||||
@sleep 1
|
|
||||||
@echo "Starting gateway..."
|
|
||||||
@nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid
|
|
||||||
@echo ""
|
|
||||||
@echo "============================================================"
|
|
||||||
@echo "✅ Development stack started!"
|
|
||||||
@echo "============================================================"
|
|
||||||
@echo ""
|
|
||||||
@echo "Processes:"
|
|
||||||
@if [ -f .dev/pids/anon.pid ]; then \
|
|
||||||
echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \
|
|
||||||
fi
|
|
||||||
@echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)"
|
|
||||||
@echo " Node2: PID=$$(cat .dev/pids/node2.pid)"
|
|
||||||
@echo " Node3: PID=$$(cat .dev/pids/node3.pid)"
|
|
||||||
@echo " Gateway: PID=$$(cat .dev/pids/gateway.pid)"
|
|
||||||
@echo ""
|
|
||||||
@echo "Ports:"
|
|
||||||
@echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)"
|
|
||||||
@echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001"
|
|
||||||
@echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002"
|
|
||||||
@echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003"
|
|
||||||
@echo " Gateway: 6001"
|
|
||||||
@echo ""
|
|
||||||
@echo "Press Ctrl+C to stop all processes"
|
|
||||||
@echo "============================================================"
|
|
||||||
@echo ""
|
|
||||||
@if [ -f .dev/pids/anon.pid ]; then \
|
|
||||||
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \
|
|
||||||
tail -f $$HOME/.debros/logs/anon.log $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \
|
|
||||||
else \
|
|
||||||
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \
|
|
||||||
tail -f $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Help
|
|
||||||
help:
|
help:
|
||||||
@echo "Available targets:"
|
@echo "Orama Monorepo"
|
||||||
@echo " build - Build all executables"
|
|
||||||
@echo " clean - Clean build artifacts"
|
|
||||||
@echo " test - Run tests"
|
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Development:"
|
@echo " Core (Go): make core-build | core-test | core-lint | core-clean"
|
||||||
@echo " dev - Start full dev stack (bootstrap + 2 nodes + gateway)"
|
@echo " Website: make website-dev | website-build"
|
||||||
@echo " Requires: configs in ~/.debros (run 'network-cli config init' first)"
|
@echo " Vault (Zig): make vault-build | vault-test"
|
||||||
|
@echo " OS: make os-build"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Configuration (NEW):"
|
@echo " Aggregate: make build | test | clean (delegates to core)"
|
||||||
@echo " First, generate config files in ~/.debros with:"
|
|
||||||
@echo " make build # Build CLI first"
|
|
||||||
@echo " ./bin/network-cli config init # Generate full stack"
|
|
||||||
@echo ""
|
|
||||||
@echo "Network Targets (requires config files in ~/.debros):"
|
|
||||||
@echo " run-node - Start bootstrap node"
|
|
||||||
@echo " run-node2 - Start second node"
|
|
||||||
@echo " run-node3 - Start third node"
|
|
||||||
@echo " run-gateway - Start HTTP gateway"
|
|
||||||
@echo " run-example - Run usage example"
|
|
||||||
@echo ""
|
|
||||||
@echo "Running Multiple Nodes:"
|
|
||||||
@echo " Nodes use --config flag to select which YAML file in ~/.debros to load:"
|
|
||||||
@echo " go run ./cmd/node --config bootstrap.yaml"
|
|
||||||
@echo " go run ./cmd/node --config node.yaml"
|
|
||||||
@echo " go run ./cmd/node --config node2.yaml"
|
|
||||||
@echo " Generate configs with: ./bin/network-cli config init --name <filename.yaml>"
|
|
||||||
@echo ""
|
|
||||||
@echo "CLI Commands:"
|
|
||||||
@echo " run-cli - Run network CLI help"
|
|
||||||
@echo " cli-health - Check network health"
|
|
||||||
@echo " cli-peers - List network peers"
|
|
||||||
@echo " cli-status - Get network status"
|
|
||||||
@echo " cli-storage-test - Test storage operations"
|
|
||||||
@echo " cli-pubsub-test - Test pub/sub operations"
|
|
||||||
@echo ""
|
|
||||||
@echo "Development:"
|
|
||||||
@echo " test-multinode - Full multi-node test with 1 bootstrap + 2 nodes"
|
|
||||||
@echo " test-peer-discovery - Test peer discovery (requires running nodes)"
|
|
||||||
@echo " test-replication - Test data replication (requires running nodes)"
|
|
||||||
@echo " test-consensus - Test database consensus (requires running nodes)"
|
|
||||||
@echo ""
|
|
||||||
@echo "Maintenance:"
|
|
||||||
@echo " deps - Download dependencies"
|
|
||||||
@echo " tidy - Tidy dependencies"
|
|
||||||
@echo " fmt - Format code"
|
|
||||||
@echo " vet - Vet code"
|
|
||||||
@echo " lint - Lint code (fmt + vet)"
|
|
||||||
@echo " clear-ports - Clear common dev ports"
|
|
||||||
@echo " dev-setup - Setup development environment"
|
|
||||||
@echo " dev-cluster - Show cluster startup commands"
|
|
||||||
@echo " dev - Full development workflow"
|
|
||||||
@echo " help - Show this help"
|
|
||||||
|
|||||||
203
cmd/cli/main.go
203
cmd/cli/main.go
@ -1,203 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/cli"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
timeout = 30 * time.Second
|
|
||||||
format = "table"
|
|
||||||
)
|
|
||||||
|
|
||||||
// version metadata populated via -ldflags at build time
|
|
||||||
var (
|
|
||||||
version = "dev"
|
|
||||||
commit = ""
|
|
||||||
date = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if len(os.Args) < 2 {
|
|
||||||
showHelp()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
command := os.Args[1]
|
|
||||||
args := os.Args[2:]
|
|
||||||
|
|
||||||
// Parse global flags
|
|
||||||
parseGlobalFlags(args)
|
|
||||||
|
|
||||||
switch command {
|
|
||||||
case "version":
|
|
||||||
fmt.Printf("network-cli %s", version)
|
|
||||||
if commit != "" {
|
|
||||||
fmt.Printf(" (commit %s)", commit)
|
|
||||||
}
|
|
||||||
if date != "" {
|
|
||||||
fmt.Printf(" built %s", date)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
return
|
|
||||||
|
|
||||||
// Environment commands
|
|
||||||
case "env":
|
|
||||||
cli.HandleEnvCommand(args)
|
|
||||||
case "devnet", "testnet", "local":
|
|
||||||
// Shorthand for switching environments
|
|
||||||
if len(args) > 0 && (args[0] == "enable" || args[0] == "switch") {
|
|
||||||
if err := cli.SwitchEnvironment(command); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "❌ Failed to switch environment: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
env, _ := cli.GetActiveEnvironment()
|
|
||||||
fmt.Printf("✅ Switched to %s environment\n", command)
|
|
||||||
if env != nil {
|
|
||||||
fmt.Printf(" Gateway URL: %s\n", env.GatewayURL)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, "Usage: network-cli %s enable\n", command)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup and service commands
|
|
||||||
case "setup":
|
|
||||||
cli.HandleSetupCommand(args)
|
|
||||||
case "service":
|
|
||||||
cli.HandleServiceCommand(args)
|
|
||||||
|
|
||||||
// Authentication commands
|
|
||||||
case "auth":
|
|
||||||
cli.HandleAuthCommand(args)
|
|
||||||
|
|
||||||
// Config commands
|
|
||||||
case "config":
|
|
||||||
cli.HandleConfigCommand(args)
|
|
||||||
|
|
||||||
// Basic network commands
|
|
||||||
case "health":
|
|
||||||
cli.HandleHealthCommand(format, timeout)
|
|
||||||
case "peers":
|
|
||||||
cli.HandlePeersCommand(format, timeout)
|
|
||||||
case "status":
|
|
||||||
cli.HandleStatusCommand(format, timeout)
|
|
||||||
case "peer-id":
|
|
||||||
cli.HandlePeerIDCommand(format, timeout)
|
|
||||||
|
|
||||||
// Query command
|
|
||||||
case "query":
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "Usage: network-cli query <sql>\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
cli.HandleQueryCommand(args[0], format, timeout)
|
|
||||||
|
|
||||||
// PubSub commands
|
|
||||||
case "pubsub":
|
|
||||||
cli.HandlePubSubCommand(args, format, timeout)
|
|
||||||
|
|
||||||
// Connect command
|
|
||||||
case "connect":
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "Usage: network-cli connect <peer_address>\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
cli.HandleConnectCommand(args[0], timeout)
|
|
||||||
|
|
||||||
// Help
|
|
||||||
case "help", "--help", "-h":
|
|
||||||
showHelp()
|
|
||||||
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "Unknown command: %s\n", command)
|
|
||||||
showHelp()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseGlobalFlags(args []string) {
|
|
||||||
for i, arg := range args {
|
|
||||||
switch arg {
|
|
||||||
case "-f", "--format":
|
|
||||||
if i+1 < len(args) {
|
|
||||||
format = args[i+1]
|
|
||||||
}
|
|
||||||
case "-t", "--timeout":
|
|
||||||
if i+1 < len(args) {
|
|
||||||
if d, err := time.ParseDuration(args[i+1]); err == nil {
|
|
||||||
timeout = d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func showHelp() {
|
|
||||||
fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n")
|
|
||||||
fmt.Printf("Usage: network-cli <command> [args...]\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🌍 Environment Management:\n")
|
|
||||||
fmt.Printf(" env list - List available environments\n")
|
|
||||||
fmt.Printf(" env current - Show current environment\n")
|
|
||||||
fmt.Printf(" env switch <env> - Switch to environment (local, devnet, testnet)\n")
|
|
||||||
fmt.Printf(" devnet enable - Shorthand for switching to devnet\n")
|
|
||||||
fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🚀 Setup & Services:\n")
|
|
||||||
fmt.Printf(" setup [--force] - Interactive VPS setup (Linux only, requires root)\n")
|
|
||||||
fmt.Printf(" service start <target> - Start service (node, gateway, all)\n")
|
|
||||||
fmt.Printf(" service stop <target> - Stop service\n")
|
|
||||||
fmt.Printf(" service restart <target> - Restart service\n")
|
|
||||||
fmt.Printf(" service status [target] - Show service status\n")
|
|
||||||
fmt.Printf(" service logs <target> [opts] - View service logs (--follow, --since=1h)\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🔐 Authentication:\n")
|
|
||||||
fmt.Printf(" auth login - Authenticate with wallet\n")
|
|
||||||
fmt.Printf(" auth logout - Clear stored credentials\n")
|
|
||||||
fmt.Printf(" auth whoami - Show current authentication\n")
|
|
||||||
fmt.Printf(" auth status - Show detailed auth info\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("⚙️ Configuration:\n")
|
|
||||||
fmt.Printf(" config init [--type <type>] - Generate configs (full stack or single)\n")
|
|
||||||
fmt.Printf(" config validate --name <file> - Validate config file\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🌐 Network Commands:\n")
|
|
||||||
fmt.Printf(" health - Check network health\n")
|
|
||||||
fmt.Printf(" peers - List connected peers\n")
|
|
||||||
fmt.Printf(" status - Show network status\n")
|
|
||||||
fmt.Printf(" peer-id - Show this node's peer ID\n")
|
|
||||||
fmt.Printf(" connect <peer_address> - Connect to peer\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🗄️ Database:\n")
|
|
||||||
fmt.Printf(" query <sql> 🔐 Execute database query\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("📡 PubSub:\n")
|
|
||||||
fmt.Printf(" pubsub publish <topic> <msg> 🔐 Publish message\n")
|
|
||||||
fmt.Printf(" pubsub subscribe <topic> 🔐 Subscribe to topic\n")
|
|
||||||
fmt.Printf(" pubsub topics 🔐 List topics\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("Global Flags:\n")
|
|
||||||
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
|
|
||||||
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🔐 = Requires authentication (auto-prompts if needed)\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("Examples:\n")
|
|
||||||
fmt.Printf(" # Switch to devnet\n")
|
|
||||||
fmt.Printf(" network-cli devnet enable\n\n")
|
|
||||||
|
|
||||||
fmt.Printf(" # Authenticate and query\n")
|
|
||||||
fmt.Printf(" network-cli auth login\n")
|
|
||||||
fmt.Printf(" network-cli query \"SELECT * FROM users LIMIT 10\"\n\n")
|
|
||||||
|
|
||||||
fmt.Printf(" # Setup VPS (Linux only)\n")
|
|
||||||
fmt.Printf(" sudo network-cli setup\n\n")
|
|
||||||
|
|
||||||
fmt.Printf(" # Manage services\n")
|
|
||||||
fmt.Printf(" sudo network-cli service status all\n")
|
|
||||||
fmt.Printf(" sudo network-cli service logs node --follow\n")
|
|
||||||
}
|
|
||||||
@ -1,124 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// For transition, alias main.GatewayConfig to pkg/gateway.Config
|
|
||||||
// server.go will be removed; this keeps compatibility until then.
|
|
||||||
type GatewayConfig = gateway.Config
|
|
||||||
|
|
||||||
func getEnvDefault(key, def string) string {
|
|
||||||
if v := os.Getenv(key); strings.TrimSpace(v) != "" {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEnvBoolDefault(key string, def bool) bool {
|
|
||||||
v := strings.TrimSpace(os.Getenv(key))
|
|
||||||
if v == "" {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
switch strings.ToLower(v) {
|
|
||||||
case "1", "true", "t", "yes", "y", "on":
|
|
||||||
return true
|
|
||||||
case "0", "false", "f", "no", "n", "off":
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseGatewayConfig loads gateway.yaml from ~/.debros exclusively.
|
|
||||||
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
|
||||||
// Determine config path
|
|
||||||
configPath, err := config.DefaultPath("gateway.yaml")
|
|
||||||
if err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
|
||||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load YAML
|
|
||||||
type yamlCfg struct {
|
|
||||||
ListenAddr string `yaml:"listen_addr"`
|
|
||||||
ClientNamespace string `yaml:"client_namespace"`
|
|
||||||
RQLiteDSN string `yaml:"rqlite_dsn"`
|
|
||||||
BootstrapPeers []string `yaml:"bootstrap_peers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := os.ReadFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "Config file not found",
|
|
||||||
zap.String("path", configPath),
|
|
||||||
zap.Error(err))
|
|
||||||
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
|
||||||
fmt.Fprintf(os.Stderr, "Generate it using: network-cli config init --type gateway\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
var y yamlCfg
|
|
||||||
// Use strict YAML decoding to reject unknown fields
|
|
||||||
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "Failed to parse gateway config", zap.Error(err))
|
|
||||||
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build config from YAML
|
|
||||||
cfg := &gateway.Config{
|
|
||||||
ListenAddr: ":6001",
|
|
||||||
ClientNamespace: "default",
|
|
||||||
BootstrapPeers: nil,
|
|
||||||
RQLiteDSN: "",
|
|
||||||
}
|
|
||||||
|
|
||||||
if v := strings.TrimSpace(y.ListenAddr); v != "" {
|
|
||||||
cfg.ListenAddr = v
|
|
||||||
}
|
|
||||||
if v := strings.TrimSpace(y.ClientNamespace); v != "" {
|
|
||||||
cfg.ClientNamespace = v
|
|
||||||
}
|
|
||||||
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
|
|
||||||
cfg.RQLiteDSN = v
|
|
||||||
}
|
|
||||||
if len(y.BootstrapPeers) > 0 {
|
|
||||||
var bp []string
|
|
||||||
for _, p := range y.BootstrapPeers {
|
|
||||||
p = strings.TrimSpace(p)
|
|
||||||
if p != "" {
|
|
||||||
bp = append(bp, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(bp) > 0 {
|
|
||||||
cfg.BootstrapPeers = bp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration
|
|
||||||
if errs := cfg.ValidateConfig(); len(errs) > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs))
|
|
||||||
for _, err := range errs {
|
|
||||||
fmt.Fprintf(os.Stderr, " - %s\n", err)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Loaded gateway configuration from YAML",
|
|
||||||
zap.String("path", configPath),
|
|
||||||
zap.String("addr", cfg.ListenAddr),
|
|
||||||
zap.String("namespace", cfg.ClientNamespace),
|
|
||||||
zap.Int("bootstrap_peer_count", len(cfg.BootstrapPeers)),
|
|
||||||
)
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
@ -1,102 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func setupLogger() *logging.ColoredLogger {
|
|
||||||
logger, err := logging.NewColoredLogger(logging.ComponentGeneral, true)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
logger := setupLogger()
|
|
||||||
|
|
||||||
// Load gateway config (flags/env)
|
|
||||||
cfg := parseGatewayConfig(logger)
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Starting gateway initialization...")
|
|
||||||
|
|
||||||
// Initialize gateway (connect client, prepare routes)
|
|
||||||
gw, err := gateway.New(logger, cfg)
|
|
||||||
if err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "failed to initialize gateway", zap.Error(err))
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer gw.Close()
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Gateway initialization completed successfully")
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Creating HTTP server and routes...")
|
|
||||||
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: cfg.ListenAddr,
|
|
||||||
Handler: gw.Routes(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to bind listener explicitly so binding failures are visible immediately.
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Gateway HTTP server starting",
|
|
||||||
zap.String("addr", cfg.ListenAddr),
|
|
||||||
zap.String("namespace", cfg.ClientNamespace),
|
|
||||||
zap.Int("bootstrap_peer_count", len(cfg.BootstrapPeers)),
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Attempting to bind HTTP listener...")
|
|
||||||
|
|
||||||
ln, err := net.Listen("tcp", cfg.ListenAddr)
|
|
||||||
if err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "failed to bind HTTP listen address", zap.Error(err))
|
|
||||||
// exit because server cannot function without a listener
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "HTTP listener bound", zap.String("listen_addr", ln.Addr().String()))
|
|
||||||
|
|
||||||
// Serve in a goroutine so we can handle graceful shutdown on signals.
|
|
||||||
serveErrCh := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
if err := server.Serve(ln); err != nil && err != http.ErrServerClosed {
|
|
||||||
serveErrCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
serveErrCh <- nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for termination signal or server error
|
|
||||||
quit := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case sig := <-quit:
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "shutdown signal received", zap.String("signal", sig.String()))
|
|
||||||
case err := <-serveErrCh:
|
|
||||||
if err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
|
|
||||||
// continue to shutdown path so we close resources cleanly
|
|
||||||
} else {
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "HTTP server exited normally")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Shutting down gateway HTTP server...")
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
if err := server.Shutdown(ctx); err != nil {
|
|
||||||
logger.ComponentError(logging.ComponentGeneral, "HTTP server shutdown error", zap.Error(err))
|
|
||||||
} else {
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Gateway shutdown complete")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
8
core/.env.example
Normal file
8
core/.env.example
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# OpenRouter API Key for changelog generation
|
||||||
|
# Get your API key from https://openrouter.ai/keys
|
||||||
|
OPENROUTER_API_KEY=your-api-key-here
|
||||||
|
|
||||||
|
# ZeroSSL API Key for TLS certificates (alternative to Let's Encrypt)
|
||||||
|
# Get your free API key from https://app.zerossl.com/developer
|
||||||
|
# If not set, Caddy will use Let's Encrypt as the default CA
|
||||||
|
ZEROSSL_API_KEY=
|
||||||
98
core/.githooks/pre-commit
Normal file
98
core/.githooks/pre-commit
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NOCOLOR='\033[0m'
|
||||||
|
|
||||||
|
# Get the directory where this hook is located
|
||||||
|
HOOK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
# Go up from .git/hooks/ to repo root
|
||||||
|
REPO_ROOT="$(cd "$HOOK_DIR/../.." && pwd)"
|
||||||
|
CHANGELOG_SCRIPT="$REPO_ROOT/scripts/update_changelog.sh"
|
||||||
|
PREVIEW_FILE="$REPO_ROOT/.changelog_preview.tmp"
|
||||||
|
VERSION_FILE="$REPO_ROOT/.changelog_version.tmp"
|
||||||
|
|
||||||
|
# Only run changelog update if there are actual code changes (not just changelog files)
|
||||||
|
STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM)
|
||||||
|
if [ -z "$STAGED_FILES" ]; then
|
||||||
|
# No staged files, exit
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if only CHANGELOG.md and/or Makefile are being committed
|
||||||
|
OTHER_FILES=$(echo "$STAGED_FILES" | grep -v "^CHANGELOG.md$" | grep -v "^Makefile$")
|
||||||
|
if [ -z "$OTHER_FILES" ]; then
|
||||||
|
# Only changelog files are being committed, skip update
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for skip flag
|
||||||
|
# To skip changelog generation, set SKIP_CHANGELOG=1 before committing:
|
||||||
|
# SKIP_CHANGELOG=1 git commit -m "your message"
|
||||||
|
# SKIP_CHANGELOG=1 git commit
|
||||||
|
if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then
|
||||||
|
echo -e "${YELLOW}Skipping changelog update (SKIP_CHANGELOG is set)${NOCOLOR}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update changelog before commit
|
||||||
|
if [ -f "$CHANGELOG_SCRIPT" ]; then
|
||||||
|
echo -e "\n${CYAN}Updating changelog...${NOCOLOR}"
|
||||||
|
|
||||||
|
# Set environment variable to indicate we're running from pre-commit
|
||||||
|
export CHANGELOG_CONTEXT=pre-commit
|
||||||
|
|
||||||
|
bash "$CHANGELOG_SCRIPT"
|
||||||
|
changelog_status=$?
|
||||||
|
if [ $changelog_status -ne 0 ]; then
|
||||||
|
echo -e "${RED}Commit aborted: changelog update failed.${NOCOLOR}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show preview if changelog was updated
|
||||||
|
if [ -f "$PREVIEW_FILE" ] && [ -f "$VERSION_FILE" ]; then
|
||||||
|
NEW_VERSION=$(cat "$VERSION_FILE")
|
||||||
|
PREVIEW_CONTENT=$(cat "$PREVIEW_FILE")
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||||
|
echo -e "${CYAN} CHANGELOG PREVIEW${NOCOLOR}"
|
||||||
|
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}New Version: ${YELLOW}$NEW_VERSION${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${CYAN}Changelog Entry:${NOCOLOR}"
|
||||||
|
echo -e "${BLUE}────────────────────────────────────────────────────────────────────────${NOCOLOR}"
|
||||||
|
echo -e "$PREVIEW_CONTENT"
|
||||||
|
echo -e "${BLUE}────────────────────────────────────────────────────────────────────────${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}Do you want to proceed with the commit? (yes/no):${NOCOLOR} "
|
||||||
|
# Read from /dev/tty to ensure we can read from terminal even in git hook context
|
||||||
|
read -r confirmation < /dev/tty
|
||||||
|
|
||||||
|
if [ "$confirmation" != "yes" ]; then
|
||||||
|
echo -e "${RED}Commit aborted by user.${NOCOLOR}"
|
||||||
|
echo -e "${YELLOW}To revert changes, run:${NOCOLOR}"
|
||||||
|
echo -e " git checkout CHANGELOG.md Makefile"
|
||||||
|
# Clean up temp files
|
||||||
|
rm -f "$PREVIEW_FILE" "$VERSION_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}Proceeding with commit...${NOCOLOR}"
|
||||||
|
|
||||||
|
# Add the updated CHANGELOG.md and Makefile to the current commit
|
||||||
|
echo -e "${CYAN}Staging CHANGELOG.md and Makefile...${NOCOLOR}"
|
||||||
|
git add CHANGELOG.md Makefile
|
||||||
|
|
||||||
|
# Clean up temp files
|
||||||
|
rm -f "$PREVIEW_FILE" "$VERSION_FILE"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}Warning: changelog update script not found at $CHANGELOG_SCRIPT${NOCOLOR}"
|
||||||
|
fi
|
||||||
|
|
||||||
18
core/.githooks/pre-push
Normal file
18
core/.githooks/pre-push
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
NOCOLOR='\033[0m'
|
||||||
|
|
||||||
|
# Run tests before push
|
||||||
|
echo -e "\n${CYAN}Running tests...${NOCOLOR}"
|
||||||
|
cd "$(git rev-parse --show-toplevel)/core" && go test ./...
|
||||||
|
status=$?
|
||||||
|
if [ $status -ne 0 ]; then
|
||||||
|
echo -e "${RED}Push aborted: some tests failed.${NOCOLOR}"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}All tests passed. Proceeding with push.${NOCOLOR}"
|
||||||
|
fi
|
||||||
181
core/Makefile
Normal file
181
core/Makefile
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
TEST?=./...
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
@echo Running tests...
|
||||||
|
go test -v $(TEST)
|
||||||
|
|
||||||
|
# Gateway-focused E2E tests assume gateway and nodes are already running
|
||||||
|
# Auto-discovers configuration from ~/.orama and queries database for API key
|
||||||
|
# No environment variables required
|
||||||
|
.PHONY: test-e2e test-e2e-deployments test-e2e-fullstack test-e2e-https test-e2e-quick test-e2e-prod test-e2e-shared test-e2e-cluster test-e2e-integration test-e2e-production
|
||||||
|
|
||||||
|
# Production E2E tests - includes production-only tests
|
||||||
|
test-e2e-prod:
|
||||||
|
@if [ -z "$$ORAMA_GATEWAY_URL" ]; then \
|
||||||
|
echo "❌ ORAMA_GATEWAY_URL not set"; \
|
||||||
|
echo "Usage: ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
@echo "Running E2E tests (including production-only) against $$ORAMA_GATEWAY_URL..."
|
||||||
|
go test -v -tags "e2e production" -timeout 30m ./e2e/...
|
||||||
|
|
||||||
|
# Generic e2e target
|
||||||
|
test-e2e:
|
||||||
|
@echo "Running comprehensive E2E tests..."
|
||||||
|
@echo "Auto-discovering configuration from ~/.orama..."
|
||||||
|
go test -v -tags e2e -timeout 30m ./e2e/...
|
||||||
|
|
||||||
|
test-e2e-deployments:
|
||||||
|
@echo "Running deployment E2E tests..."
|
||||||
|
go test -v -tags e2e -timeout 15m ./e2e/deployments/...
|
||||||
|
|
||||||
|
test-e2e-fullstack:
|
||||||
|
@echo "Running fullstack E2E tests..."
|
||||||
|
go test -v -tags e2e -timeout 20m -run "TestFullStack" ./e2e/...
|
||||||
|
|
||||||
|
test-e2e-https:
|
||||||
|
@echo "Running HTTPS/external access E2E tests..."
|
||||||
|
go test -v -tags e2e -timeout 10m -run "TestHTTPS" ./e2e/...
|
||||||
|
|
||||||
|
test-e2e-shared:
|
||||||
|
@echo "Running shared E2E tests..."
|
||||||
|
go test -v -tags e2e -timeout 10m ./e2e/shared/...
|
||||||
|
|
||||||
|
test-e2e-cluster:
|
||||||
|
@echo "Running cluster E2E tests..."
|
||||||
|
go test -v -tags e2e -timeout 15m ./e2e/cluster/...
|
||||||
|
|
||||||
|
test-e2e-integration:
|
||||||
|
@echo "Running integration E2E tests..."
|
||||||
|
go test -v -tags e2e -timeout 20m ./e2e/integration/...
|
||||||
|
|
||||||
|
test-e2e-production:
|
||||||
|
@echo "Running production-only E2E tests..."
|
||||||
|
go test -v -tags "e2e production" -timeout 15m ./e2e/production/...
|
||||||
|
|
||||||
|
test-e2e-quick:
|
||||||
|
@echo "Running quick E2E smoke tests..."
|
||||||
|
go test -v -tags e2e -timeout 5m -run "TestStatic|TestHealth" ./e2e/...
|
||||||
|
|
||||||
|
# Network - Distributed P2P Database System
|
||||||
|
# Makefile for development and build tasks
|
||||||
|
|
||||||
|
.PHONY: build clean test deps tidy fmt vet lint install-hooks push-devnet push-testnet rollout-devnet rollout-testnet release
|
||||||
|
|
||||||
|
VERSION := 0.120.0
|
||||||
|
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||||
|
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
|
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||||
|
LDFLAGS_LINUX := -s -w $(LDFLAGS)
|
||||||
|
|
||||||
|
# Build targets
|
||||||
|
build: deps
|
||||||
|
@echo "Building network executables (version=$(VERSION))..."
|
||||||
|
@mkdir -p bin
|
||||||
|
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
||||||
|
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
|
||||||
|
go build -ldflags "$(LDFLAGS)" -o bin/orama ./cmd/cli/
|
||||||
|
# Inject gateway build metadata via pkg path variables
|
||||||
|
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
||||||
|
go build -ldflags "$(LDFLAGS)" -o bin/sfu ./cmd/sfu
|
||||||
|
go build -ldflags "$(LDFLAGS)" -o bin/turn ./cmd/turn
|
||||||
|
@echo "Build complete! Run ./bin/orama version"
|
||||||
|
|
||||||
|
# Cross-compile CLI for Linux (only binary needed locally; VPS builds everything else from source)
|
||||||
|
build-linux: deps
|
||||||
|
@echo "Cross-compiling CLI for linux/amd64 (version=$(VERSION))..."
|
||||||
|
@mkdir -p bin-linux
|
||||||
|
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama ./cmd/cli/
|
||||||
|
@echo "✓ CLI built at bin-linux/orama"
|
||||||
|
@echo ""
|
||||||
|
@echo "Prefer 'make build-archive' for full pre-built binary archive."
|
||||||
|
|
||||||
|
# Build pre-compiled binary archive for deployment (all binaries + deps)
|
||||||
|
build-archive: deps
|
||||||
|
@echo "Building binary archive (version=$(VERSION))..."
|
||||||
|
go build -ldflags "$(LDFLAGS)" -o bin/orama ./cmd/cli/
|
||||||
|
./bin/orama build --output /tmp/orama-$(VERSION)-linux-amd64.tar.gz
|
||||||
|
|
||||||
|
# Install git hooks
|
||||||
|
install-hooks:
|
||||||
|
@echo "Installing git hooks..."
|
||||||
|
@bash scripts/install-hooks.sh
|
||||||
|
|
||||||
|
# Install orama CLI to ~/.local/bin and configure PATH
|
||||||
|
install: build
|
||||||
|
@bash scripts/install.sh
|
||||||
|
|
||||||
|
# Clean build artifacts
|
||||||
|
clean:
|
||||||
|
@echo "Cleaning build artifacts..."
|
||||||
|
rm -rf bin/
|
||||||
|
rm -rf data/
|
||||||
|
@echo "Clean complete!"
|
||||||
|
|
||||||
|
# Push binary archive to devnet nodes (fanout distribution)
|
||||||
|
push-devnet:
|
||||||
|
./bin/orama node push --env devnet
|
||||||
|
|
||||||
|
# Push binary archive to testnet nodes (fanout distribution)
|
||||||
|
push-testnet:
|
||||||
|
./bin/orama node push --env testnet
|
||||||
|
|
||||||
|
# Full rollout to devnet (build + push + rolling upgrade)
|
||||||
|
rollout-devnet:
|
||||||
|
./bin/orama node rollout --env devnet --yes
|
||||||
|
|
||||||
|
# Full rollout to testnet (build + push + rolling upgrade)
|
||||||
|
rollout-testnet:
|
||||||
|
./bin/orama node rollout --env testnet --yes
|
||||||
|
|
||||||
|
# Interactive release workflow (tag + push)
|
||||||
|
release:
|
||||||
|
@bash scripts/release.sh
|
||||||
|
|
||||||
|
# Check health of all nodes in an environment
|
||||||
|
# Usage: make health ENV=devnet
|
||||||
|
health:
|
||||||
|
@if [ -z "$(ENV)" ]; then \
|
||||||
|
echo "Usage: make health ENV=devnet|testnet"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
./bin/orama monitor report --env $(ENV)
|
||||||
|
|
||||||
|
# Help
|
||||||
|
help:
|
||||||
|
@echo "Available targets:"
|
||||||
|
@echo " build - Build all executables"
|
||||||
|
@echo " install - Build and install 'orama' CLI to ~/.local/bin"
|
||||||
|
@echo " clean - Clean build artifacts"
|
||||||
|
@echo " test - Run unit tests"
|
||||||
|
@echo ""
|
||||||
|
@echo "E2E Testing:"
|
||||||
|
@echo " make test-e2e-prod - Run all E2E tests incl. production-only (needs ORAMA_GATEWAY_URL)"
|
||||||
|
@echo " make test-e2e-shared - Run shared E2E tests (cache, storage, pubsub, auth)"
|
||||||
|
@echo " make test-e2e-cluster - Run cluster E2E tests (libp2p, olric, rqlite, namespace)"
|
||||||
|
@echo " make test-e2e-integration - Run integration E2E tests (fullstack, persistence, concurrency)"
|
||||||
|
@echo " make test-e2e-deployments - Run deployment E2E tests"
|
||||||
|
@echo " make test-e2e-production - Run production-only E2E tests (DNS, HTTPS, cross-node)"
|
||||||
|
@echo " make test-e2e-quick - Quick smoke tests (static deploys, health checks)"
|
||||||
|
@echo " make test-e2e - Generic E2E tests (auto-discovers config)"
|
||||||
|
@echo ""
|
||||||
|
@echo " Example:"
|
||||||
|
@echo " ORAMA_GATEWAY_URL=https://orama-devnet.network make test-e2e-prod"
|
||||||
|
@echo ""
|
||||||
|
@echo "Deployment:"
|
||||||
|
@echo " make build-archive - Build pre-compiled binary archive for deployment"
|
||||||
|
@echo " make push-devnet - Push binary archive to devnet nodes"
|
||||||
|
@echo " make push-testnet - Push binary archive to testnet nodes"
|
||||||
|
@echo " make rollout-devnet - Full rollout: build + push + rolling upgrade (devnet)"
|
||||||
|
@echo " make rollout-testnet - Full rollout: build + push + rolling upgrade (testnet)"
|
||||||
|
@echo " make health ENV=devnet - Check health of all nodes in an environment"
|
||||||
|
@echo " make release - Interactive release workflow (tag + push)"
|
||||||
|
@echo ""
|
||||||
|
@echo "Maintenance:"
|
||||||
|
@echo " deps - Download dependencies"
|
||||||
|
@echo " tidy - Tidy dependencies"
|
||||||
|
@echo " fmt - Format code"
|
||||||
|
@echo " vet - Vet code"
|
||||||
|
@echo " lint - Lint code (fmt + vet)"
|
||||||
|
@echo " help - Show this help"
|
||||||
5
core/cmd/cli/main.go
Normal file
5
core/cmd/cli/main.go
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
runCLI()
|
||||||
|
}
|
||||||
103
core/cmd/cli/root.go
Normal file
103
core/cmd/cli/root.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
// Command groups
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/app"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/authcmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/buildcmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/dbcmd"
|
||||||
|
deploycmd "github.com/DeBrosOfficial/network/pkg/cli/cmd/deploy"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/envcmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/functioncmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/inspectcmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/monitorcmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/namespacecmd"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/node"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli/cmd/sandboxcmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// version metadata populated via -ldflags at build time
|
||||||
|
// Must match Makefile: -X 'main.version=...' -X 'main.commit=...' -X 'main.date=...'
|
||||||
|
var (
|
||||||
|
version = "dev"
|
||||||
|
commit = ""
|
||||||
|
date = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
func newRootCmd() *cobra.Command {
|
||||||
|
rootCmd := &cobra.Command{
|
||||||
|
Use: "orama",
|
||||||
|
Short: "Orama CLI - Distributed P2P Network Management Tool",
|
||||||
|
Long: `Orama CLI is a tool for managing nodes, deploying applications,
|
||||||
|
and interacting with the Orama distributed network.`,
|
||||||
|
SilenceUsage: true,
|
||||||
|
SilenceErrors: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version command
|
||||||
|
rootCmd.AddCommand(&cobra.Command{
|
||||||
|
Use: "version",
|
||||||
|
Short: "Show version information",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Printf("orama %s", version)
|
||||||
|
if commit != "" {
|
||||||
|
fmt.Printf(" (commit %s)", commit)
|
||||||
|
}
|
||||||
|
if date != "" {
|
||||||
|
fmt.Printf(" built %s", date)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Node operator commands (was "prod")
|
||||||
|
rootCmd.AddCommand(node.Cmd)
|
||||||
|
|
||||||
|
// Deploy command (top-level, upsert)
|
||||||
|
rootCmd.AddCommand(deploycmd.Cmd)
|
||||||
|
|
||||||
|
// App management (was "deployments")
|
||||||
|
rootCmd.AddCommand(app.Cmd)
|
||||||
|
|
||||||
|
// Database commands
|
||||||
|
rootCmd.AddCommand(dbcmd.Cmd)
|
||||||
|
|
||||||
|
// Namespace commands
|
||||||
|
rootCmd.AddCommand(namespacecmd.Cmd)
|
||||||
|
|
||||||
|
// Environment commands
|
||||||
|
rootCmd.AddCommand(envcmd.Cmd)
|
||||||
|
|
||||||
|
// Auth commands
|
||||||
|
rootCmd.AddCommand(authcmd.Cmd)
|
||||||
|
|
||||||
|
// Inspect command
|
||||||
|
rootCmd.AddCommand(inspectcmd.Cmd)
|
||||||
|
|
||||||
|
// Monitor command
|
||||||
|
rootCmd.AddCommand(monitorcmd.Cmd)
|
||||||
|
|
||||||
|
// Serverless function commands
|
||||||
|
rootCmd.AddCommand(functioncmd.Cmd)
|
||||||
|
|
||||||
|
// Build command (cross-compile binary archive)
|
||||||
|
rootCmd.AddCommand(buildcmd.Cmd)
|
||||||
|
|
||||||
|
// Sandbox command (ephemeral Hetzner Cloud clusters)
|
||||||
|
rootCmd.AddCommand(sandboxcmd.Cmd)
|
||||||
|
|
||||||
|
return rootCmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCLI() {
|
||||||
|
rootCmd := newRootCmd()
|
||||||
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
233
core/cmd/gateway/config.go
Normal file
233
core/cmd/gateway/config.go
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/config"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getEnvDefault(key, def string) string {
|
||||||
|
if v := os.Getenv(key); strings.TrimSpace(v) != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEnvBoolDefault(key string, def bool) bool {
|
||||||
|
v := strings.TrimSpace(os.Getenv(key))
|
||||||
|
if v == "" {
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
switch strings.ToLower(v) {
|
||||||
|
case "1", "true", "t", "yes", "y", "on":
|
||||||
|
return true
|
||||||
|
case "0", "false", "f", "no", "n", "off":
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseGatewayConfig loads gateway.yaml from ~/.orama exclusively.
|
||||||
|
// It accepts an optional --config flag for absolute paths (used by systemd services).
|
||||||
|
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||||
|
// Parse --config flag (optional, for systemd services that pass absolute paths)
|
||||||
|
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
// Determine config path
|
||||||
|
var configPath string
|
||||||
|
var err error
|
||||||
|
if *configFlag != "" {
|
||||||
|
// If --config flag is provided, use it (handles both absolute and relative paths)
|
||||||
|
if filepath.IsAbs(*configFlag) {
|
||||||
|
configPath = *configFlag
|
||||||
|
} else {
|
||||||
|
configPath, err = config.DefaultPath(*configFlag)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Default behavior: look for gateway.yaml in ~/.orama/data/, ~/.orama/configs/, or ~/.orama/
|
||||||
|
configPath, err = config.DefaultPath("gateway.yaml")
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load YAML
|
||||||
|
type yamlWebRTCCfg struct {
|
||||||
|
Enabled bool `yaml:"enabled"`
|
||||||
|
SFUPort int `yaml:"sfu_port"`
|
||||||
|
TURNDomain string `yaml:"turn_domain"`
|
||||||
|
TURNSecret string `yaml:"turn_secret"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type yamlCfg struct {
|
||||||
|
ListenAddr string `yaml:"listen_addr"`
|
||||||
|
ClientNamespace string `yaml:"client_namespace"`
|
||||||
|
RQLiteDSN string `yaml:"rqlite_dsn"`
|
||||||
|
GlobalRQLiteDSN string `yaml:"global_rqlite_dsn"`
|
||||||
|
Peers []string `yaml:"bootstrap_peers"`
|
||||||
|
EnableHTTPS bool `yaml:"enable_https"`
|
||||||
|
DomainName string `yaml:"domain_name"`
|
||||||
|
TLSCacheDir string `yaml:"tls_cache_dir"`
|
||||||
|
OlricServers []string `yaml:"olric_servers"`
|
||||||
|
OlricTimeout string `yaml:"olric_timeout"`
|
||||||
|
IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"`
|
||||||
|
IPFSAPIURL string `yaml:"ipfs_api_url"`
|
||||||
|
IPFSTimeout string `yaml:"ipfs_timeout"`
|
||||||
|
IPFSReplicationFactor int `yaml:"ipfs_replication_factor"`
|
||||||
|
WebRTC yamlWebRTCCfg `yaml:"webrtc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "Config file not found",
|
||||||
|
zap.String("path", configPath),
|
||||||
|
zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
||||||
|
fmt.Fprintf(os.Stderr, "Generate it using: orama config init --type gateway\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var y yamlCfg
|
||||||
|
// Use strict YAML decoding to reject unknown fields
|
||||||
|
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "Failed to parse gateway config", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build config from YAML
|
||||||
|
cfg := &gateway.Config{
|
||||||
|
ListenAddr: ":6001",
|
||||||
|
ClientNamespace: "default",
|
||||||
|
BootstrapPeers: nil,
|
||||||
|
RQLiteDSN: "",
|
||||||
|
GlobalRQLiteDSN: "",
|
||||||
|
EnableHTTPS: false,
|
||||||
|
DomainName: "",
|
||||||
|
TLSCacheDir: "",
|
||||||
|
OlricServers: nil,
|
||||||
|
OlricTimeout: 0,
|
||||||
|
IPFSClusterAPIURL: "",
|
||||||
|
IPFSAPIURL: "",
|
||||||
|
IPFSTimeout: 0,
|
||||||
|
IPFSReplicationFactor: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := strings.TrimSpace(y.ListenAddr); v != "" {
|
||||||
|
cfg.ListenAddr = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.ClientNamespace); v != "" {
|
||||||
|
cfg.ClientNamespace = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
|
||||||
|
cfg.RQLiteDSN = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.GlobalRQLiteDSN); v != "" {
|
||||||
|
cfg.GlobalRQLiteDSN = v
|
||||||
|
}
|
||||||
|
if len(y.Peers) > 0 {
|
||||||
|
var peers []string
|
||||||
|
for _, p := range y.Peers {
|
||||||
|
p = strings.TrimSpace(p)
|
||||||
|
if p != "" {
|
||||||
|
peers = append(peers, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(peers) > 0 {
|
||||||
|
cfg.BootstrapPeers = peers
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPS configuration
|
||||||
|
cfg.EnableHTTPS = y.EnableHTTPS
|
||||||
|
if v := strings.TrimSpace(y.DomainName); v != "" {
|
||||||
|
cfg.DomainName = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.TLSCacheDir); v != "" {
|
||||||
|
cfg.TLSCacheDir = v
|
||||||
|
} else if cfg.EnableHTTPS {
|
||||||
|
// Default TLS cache directory if HTTPS is enabled but not specified
|
||||||
|
homeDir, err := os.UserHomeDir()
|
||||||
|
if err == nil {
|
||||||
|
cfg.TLSCacheDir = filepath.Join(homeDir, ".orama", "tls-cache")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Olric configuration
|
||||||
|
if len(y.OlricServers) > 0 {
|
||||||
|
cfg.OlricServers = y.OlricServers
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.OlricTimeout); v != "" {
|
||||||
|
if parsed, err := time.ParseDuration(v); err == nil {
|
||||||
|
cfg.OlricTimeout = parsed
|
||||||
|
} else {
|
||||||
|
logger.ComponentWarn(logging.ComponentGeneral, "invalid olric_timeout, using default", zap.String("value", v), zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPFS configuration
|
||||||
|
if v := strings.TrimSpace(y.IPFSClusterAPIURL); v != "" {
|
||||||
|
cfg.IPFSClusterAPIURL = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.IPFSAPIURL); v != "" {
|
||||||
|
cfg.IPFSAPIURL = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.IPFSTimeout); v != "" {
|
||||||
|
if parsed, err := time.ParseDuration(v); err == nil {
|
||||||
|
cfg.IPFSTimeout = parsed
|
||||||
|
} else {
|
||||||
|
logger.ComponentWarn(logging.ComponentGeneral, "invalid ipfs_timeout, using default", zap.String("value", v), zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if y.IPFSReplicationFactor > 0 {
|
||||||
|
cfg.IPFSReplicationFactor = y.IPFSReplicationFactor
|
||||||
|
}
|
||||||
|
|
||||||
|
// WebRTC configuration
|
||||||
|
cfg.WebRTCEnabled = y.WebRTC.Enabled
|
||||||
|
if y.WebRTC.SFUPort > 0 {
|
||||||
|
cfg.SFUPort = y.WebRTC.SFUPort
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.WebRTC.TURNDomain); v != "" {
|
||||||
|
cfg.TURNDomain = v
|
||||||
|
}
|
||||||
|
if v := strings.TrimSpace(y.WebRTC.TURNSecret); v != "" {
|
||||||
|
cfg.TURNSecret = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate configuration
|
||||||
|
if errs := cfg.ValidateConfig(); len(errs) > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs))
|
||||||
|
for _, err := range errs {
|
||||||
|
fmt.Fprintf(os.Stderr, " - %s\n", err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Loaded gateway configuration from YAML",
|
||||||
|
zap.String("path", configPath),
|
||||||
|
zap.String("addr", cfg.ListenAddr),
|
||||||
|
zap.String("namespace", cfg.ClientNamespace),
|
||||||
|
zap.Int("peer_count", len(cfg.BootstrapPeers)),
|
||||||
|
)
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
235
core/cmd/gateway/main.go
Normal file
235
core/cmd/gateway/main.go
Normal file
@ -0,0 +1,235 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupLogger() *logging.ColoredLogger {
|
||||||
|
logger, err := logging.NewColoredLogger(logging.ComponentGeneral, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
logger := setupLogger()
|
||||||
|
|
||||||
|
// Load gateway config (flags/env)
|
||||||
|
cfg := parseGatewayConfig(logger)
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Starting gateway initialization...")
|
||||||
|
|
||||||
|
// Initialize gateway (connect client, prepare routes)
|
||||||
|
gw, err := gateway.New(logger, cfg)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "failed to initialize gateway", zap.Error(err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer gw.Close()
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Gateway initialization completed successfully")
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Creating HTTP server and routes...")
|
||||||
|
|
||||||
|
// Check if HTTPS is enabled
|
||||||
|
if cfg.EnableHTTPS && cfg.DomainName != "" {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTPS enabled with ACME",
|
||||||
|
zap.String("domain", cfg.DomainName),
|
||||||
|
zap.String("tls_cache_dir", cfg.TLSCacheDir),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Set up ACME manager
|
||||||
|
manager := &autocert.Manager{
|
||||||
|
Prompt: autocert.AcceptTOS,
|
||||||
|
HostPolicy: autocert.HostWhitelist(cfg.DomainName),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set cache directory if specified
|
||||||
|
if cfg.TLSCacheDir != "" {
|
||||||
|
manager.Cache = autocert.DirCache(cfg.TLSCacheDir)
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Using TLS certificate cache",
|
||||||
|
zap.String("cache_dir", cfg.TLSCacheDir),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HTTP server for ACME challenge (port 80)
|
||||||
|
httpServer := &http.Server{
|
||||||
|
Addr: ":80",
|
||||||
|
Handler: manager.HTTPHandler(nil), // Redirects all HTTP traffic to HTTPS except ACME challenge
|
||||||
|
ReadHeaderTimeout: 10 * time.Second,
|
||||||
|
ReadTimeout: 60 * time.Second,
|
||||||
|
WriteTimeout: 120 * time.Second,
|
||||||
|
IdleTimeout: 120 * time.Second,
|
||||||
|
MaxHeaderBytes: 1 << 20, // 1MB
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HTTPS server (port 443)
|
||||||
|
httpsServer := &http.Server{
|
||||||
|
Addr: ":443",
|
||||||
|
Handler: gw.Routes(),
|
||||||
|
TLSConfig: manager.TLSConfig(),
|
||||||
|
ReadHeaderTimeout: 10 * time.Second,
|
||||||
|
ReadTimeout: 60 * time.Second,
|
||||||
|
WriteTimeout: 120 * time.Second,
|
||||||
|
IdleTimeout: 120 * time.Second,
|
||||||
|
MaxHeaderBytes: 1 << 20, // 1MB
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start HTTP server for ACME challenge
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Starting HTTP server for ACME challenge on port 80...")
|
||||||
|
httpLn, err := net.Listen("tcp", ":80")
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "failed to bind HTTP listen address (port 80)", zap.Error(err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTP listener bound", zap.String("listen_addr", httpLn.Addr().String()))
|
||||||
|
|
||||||
|
// Start HTTPS server
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Starting HTTPS server on port 443...")
|
||||||
|
httpsLn, err := net.Listen("tcp", ":443")
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "failed to bind HTTPS listen address (port 443)", zap.Error(err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTPS listener bound", zap.String("listen_addr", httpsLn.Addr().String()))
|
||||||
|
|
||||||
|
// Serve HTTP in a goroutine
|
||||||
|
httpServeErrCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
if err := httpServer.Serve(httpLn); err != nil && err != http.ErrServerClosed {
|
||||||
|
httpServeErrCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
httpServeErrCh <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Serve HTTPS in a goroutine
|
||||||
|
httpsServeErrCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
if err := httpsServer.ServeTLS(httpsLn, "", ""); err != nil && err != http.ErrServerClosed {
|
||||||
|
httpsServeErrCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
httpsServeErrCh <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for termination signal or server error
|
||||||
|
quit := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case sig := <-quit:
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "shutdown signal received", zap.String("signal", sig.String()))
|
||||||
|
case err := <-httpServeErrCh:
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTP server exited normally")
|
||||||
|
}
|
||||||
|
case err := <-httpsServeErrCh:
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "HTTPS server error", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTPS server exited normally")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Shutting down gateway servers...")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Shutdown HTTPS server
|
||||||
|
if err := httpsServer.Shutdown(ctx); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "HTTPS server shutdown error", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTPS server shutdown complete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown HTTP server
|
||||||
|
if err := httpServer.Shutdown(ctx); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "HTTP server shutdown error", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTP server shutdown complete")
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Gateway shutdown complete")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard HTTP server (no HTTPS)
|
||||||
|
server := &http.Server{
|
||||||
|
Addr: cfg.ListenAddr,
|
||||||
|
Handler: gw.Routes(),
|
||||||
|
ReadHeaderTimeout: 10 * time.Second,
|
||||||
|
ReadTimeout: 60 * time.Second,
|
||||||
|
WriteTimeout: 120 * time.Second,
|
||||||
|
IdleTimeout: 120 * time.Second,
|
||||||
|
MaxHeaderBytes: 1 << 20, // 1MB
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to bind listener explicitly so binding failures are visible immediately.
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Gateway HTTP server starting",
|
||||||
|
zap.String("addr", cfg.ListenAddr),
|
||||||
|
zap.String("namespace", cfg.ClientNamespace),
|
||||||
|
zap.Int("bootstrap_peer_count", len(cfg.BootstrapPeers)),
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Attempting to bind HTTP listener...")
|
||||||
|
|
||||||
|
ln, err := net.Listen("tcp", cfg.ListenAddr)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "failed to bind HTTP listen address", zap.Error(err))
|
||||||
|
// exit because server cannot function without a listener
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTP listener bound", zap.String("listen_addr", ln.Addr().String()))
|
||||||
|
|
||||||
|
// Serve in a goroutine so we can handle graceful shutdown on signals.
|
||||||
|
serveErrCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
if err := server.Serve(ln); err != nil && err != http.ErrServerClosed {
|
||||||
|
serveErrCh <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
serveErrCh <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for termination signal or server error
|
||||||
|
quit := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case sig := <-quit:
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "shutdown signal received", zap.String("signal", sig.String()))
|
||||||
|
case err := <-serveErrCh:
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
|
||||||
|
// continue to shutdown path so we close resources cleanly
|
||||||
|
} else {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "HTTP server exited normally")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Shutting down gateway HTTP server...")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if err := server.Shutdown(ctx); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentGeneral, "HTTP server shutdown error", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
logger.ComponentInfo(logging.ComponentGeneral, "Gateway shutdown complete")
|
||||||
|
}
|
||||||
|
}
|
||||||
11
core/cmd/inspector/main.go
Normal file
11
core/cmd/inspector/main.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cli.HandleInspectCommand(os.Args[1:])
|
||||||
|
}
|
||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -32,7 +33,7 @@ func setup_logger(component logging.Component) (logger *logging.ColoredLogger) {
|
|||||||
|
|
||||||
// parse_flags parses command-line flags and returns them.
|
// parse_flags parses command-line flags and returns them.
|
||||||
func parse_flags() (configName *string, help *bool) {
|
func parse_flags() (configName *string, help *bool) {
|
||||||
configName = flag.String("config", "node.yaml", "Config filename in ~/.debros (default: node.yaml)")
|
configName = flag.String("config", "node.yaml", "Config filename in ~/.orama (default: node.yaml)")
|
||||||
help = flag.Bool("help", false, "Show help")
|
help = flag.Bool("help", false, "Show help")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -62,12 +63,20 @@ func check_if_should_open_help(help *bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// select_data_dir validates that we can load the config from ~/.debros
|
// select_data_dir validates that we can load the config from ~/.orama
|
||||||
func select_data_dir_check(configName *string) {
|
func select_data_dir_check(configName *string) {
|
||||||
logger := setup_logger(logging.ComponentNode)
|
logger := setup_logger(logging.ComponentNode)
|
||||||
|
|
||||||
|
var configPath string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Check if configName is an absolute path
|
||||||
|
if filepath.IsAbs(*configName) {
|
||||||
|
// Use absolute path directly
|
||||||
|
configPath = *configName
|
||||||
|
} else {
|
||||||
// Ensure config directory exists and is writable
|
// Ensure config directory exists and is writable
|
||||||
_, err := config.EnsureConfigDir()
|
_, err = config.EnsureConfigDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to ensure config directory", zap.Error(err))
|
logger.Error("Failed to ensure config directory", zap.Error(err))
|
||||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
||||||
@ -79,11 +88,12 @@ func select_data_dir_check(configName *string) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
configPath, err := config.DefaultPath(*configName)
|
configPath, err = config.DefaultPath(*configName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to determine config path", zap.Error(err))
|
logger.Error("Failed to determine config path", zap.Error(err))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(configPath); err != nil {
|
if _, err := os.Stat(configPath); err != nil {
|
||||||
logger.Error("Config file not found",
|
logger.Error("Config file not found",
|
||||||
@ -92,8 +102,8 @@ func select_data_dir_check(configName *string) {
|
|||||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
||||||
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
|
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
|
||||||
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
|
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
|
||||||
fmt.Fprintf(os.Stderr, " network-cli config init --type bootstrap\n")
|
fmt.Fprintf(os.Stderr, " orama config init --type node\n")
|
||||||
fmt.Fprintf(os.Stderr, " network-cli config init --type node --bootstrap-peers '<peer_multiaddr>'\n")
|
fmt.Fprintf(os.Stderr, " orama config init --type node --peers '<peer_multiaddr>'\n")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -125,16 +135,35 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the peer ID to a file for CLI access (especially useful for bootstrap)
|
// Save the peer ID to a file for CLI access
|
||||||
peerID := n.GetPeerID()
|
peerID := n.GetPeerID()
|
||||||
peerInfoFile := filepath.Join(dataDir, "peer.info")
|
peerInfoFile := filepath.Join(dataDir, "peer.info")
|
||||||
peerMultiaddr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/p2p/%s", port, peerID)
|
|
||||||
|
// Extract advertise IP from config (prefer http_adv_address, fallback to raft_adv_address)
|
||||||
|
advertiseIP := "0.0.0.0" // Default fallback
|
||||||
|
if cfg.Discovery.HttpAdvAddress != "" {
|
||||||
|
if host, _, err := net.SplitHostPort(cfg.Discovery.HttpAdvAddress); err == nil && host != "" && host != "localhost" {
|
||||||
|
advertiseIP = host
|
||||||
|
}
|
||||||
|
} else if cfg.Discovery.RaftAdvAddress != "" {
|
||||||
|
if host, _, err := net.SplitHostPort(cfg.Discovery.RaftAdvAddress); err == nil && host != "" && host != "localhost" {
|
||||||
|
advertiseIP = host
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine IP protocol (IPv4 or IPv6) for multiaddr
|
||||||
|
ipProtocol := "ip4"
|
||||||
|
if ip := net.ParseIP(advertiseIP); ip != nil && ip.To4() == nil {
|
||||||
|
ipProtocol = "ip6"
|
||||||
|
}
|
||||||
|
|
||||||
|
peerMultiaddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, advertiseIP, port, peerID)
|
||||||
|
|
||||||
if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil {
|
if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil {
|
||||||
logger.Error("Failed to save peer info: %v", zap.Error(err))
|
logger.Error("Failed to save peer info: %v", zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
logger.Info("Peer info saved to: %s", zap.String("path", peerInfoFile))
|
logger.Info("Peer info saved to: %s", zap.String("path", peerInfoFile))
|
||||||
logger.Info("Bootstrap multiaddr: %s", zap.String("path", peerMultiaddr))
|
logger.Info("Peer multiaddr: %s", zap.String("path", peerMultiaddr))
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Node started successfully")
|
logger.Info("Node started successfully")
|
||||||
@ -232,16 +261,25 @@ func main() {
|
|||||||
|
|
||||||
check_if_should_open_help(help)
|
check_if_should_open_help(help)
|
||||||
|
|
||||||
// Check if config file exists
|
// Check if config file exists and determine path
|
||||||
select_data_dir_check(configName)
|
select_data_dir_check(configName)
|
||||||
|
|
||||||
// Load configuration from ~/.debros/node.yaml
|
// Determine config path (handle both absolute and relative paths)
|
||||||
configPath, err := config.DefaultPath(*configName)
|
// Note: select_data_dir_check already validated the path exists, so we can safely determine it here
|
||||||
|
var configPath string
|
||||||
|
var err error
|
||||||
|
if filepath.IsAbs(*configName) {
|
||||||
|
// Absolute path passed directly (e.g., from systemd service)
|
||||||
|
configPath = *configName
|
||||||
|
} else {
|
||||||
|
// Relative path - use DefaultPath which checks both ~/.orama/configs/ and ~/.orama/
|
||||||
|
configPath, err = config.DefaultPath(*configName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to determine config path", zap.Error(err))
|
logger.Error("Failed to determine config path", zap.Error(err))
|
||||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var cfg *config.Config
|
var cfg *config.Config
|
||||||
var cfgErr error
|
var cfgErr error
|
||||||
@ -255,10 +293,10 @@ func main() {
|
|||||||
|
|
||||||
// Set default advertised addresses if empty
|
// Set default advertised addresses if empty
|
||||||
if cfg.Discovery.HttpAdvAddress == "" {
|
if cfg.Discovery.HttpAdvAddress == "" {
|
||||||
cfg.Discovery.HttpAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLitePort)
|
cfg.Discovery.HttpAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLitePort)
|
||||||
}
|
}
|
||||||
if cfg.Discovery.RaftAdvAddress == "" {
|
if cfg.Discovery.RaftAdvAddress == "" {
|
||||||
cfg.Discovery.RaftAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLiteRaftPort)
|
cfg.Discovery.RaftAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLiteRaftPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate configuration
|
// Validate configuration
|
||||||
@ -278,7 +316,7 @@ func main() {
|
|||||||
zap.Strings("listen_addresses", cfg.Node.ListenAddresses),
|
zap.Strings("listen_addresses", cfg.Node.ListenAddresses),
|
||||||
zap.Int("rqlite_http_port", cfg.Database.RQLitePort),
|
zap.Int("rqlite_http_port", cfg.Database.RQLitePort),
|
||||||
zap.Int("rqlite_raft_port", cfg.Database.RQLiteRaftPort),
|
zap.Int("rqlite_raft_port", cfg.Database.RQLiteRaftPort),
|
||||||
zap.Strings("bootstrap_peers", cfg.Discovery.BootstrapPeers),
|
zap.Strings("peers", cfg.Discovery.BootstrapPeers),
|
||||||
zap.String("rqlite_join_address", cfg.Database.RQLiteJoinAddress),
|
zap.String("rqlite_join_address", cfg.Database.RQLiteJoinAddress),
|
||||||
zap.String("data_directory", cfg.Node.DataDir))
|
zap.String("data_directory", cfg.Node.DataDir))
|
||||||
|
|
||||||
118
core/cmd/sfu/config.go
Normal file
118
core/cmd/sfu/config.go
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/config"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/sfu"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newSFUServer creates a new SFU server from config and logger.
|
||||||
|
// Wrapper to keep main.go clean and avoid importing sfu in main.
|
||||||
|
func newSFUServer(cfg *sfu.Config, logger *zap.Logger) (*sfu.Server, error) {
|
||||||
|
return sfu.NewServer(cfg, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSFUConfig(logger *logging.ColoredLogger) *sfu.Config {
|
||||||
|
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var configPath string
|
||||||
|
var err error
|
||||||
|
if *configFlag != "" {
|
||||||
|
if filepath.IsAbs(*configFlag) {
|
||||||
|
configPath = *configFlag
|
||||||
|
} else {
|
||||||
|
configPath, err = config.DefaultPath(*configFlag)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "Failed to determine config path", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
configPath, err = config.DefaultPath("sfu.yaml")
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "Failed to determine config path", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type yamlTURNServer struct {
|
||||||
|
Host string `yaml:"host"`
|
||||||
|
Port int `yaml:"port"`
|
||||||
|
Secure bool `yaml:"secure"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type yamlCfg struct {
|
||||||
|
ListenAddr string `yaml:"listen_addr"`
|
||||||
|
Namespace string `yaml:"namespace"`
|
||||||
|
MediaPortStart int `yaml:"media_port_start"`
|
||||||
|
MediaPortEnd int `yaml:"media_port_end"`
|
||||||
|
TURNServers []yamlTURNServer `yaml:"turn_servers"`
|
||||||
|
TURNSecret string `yaml:"turn_secret"`
|
||||||
|
TURNCredentialTTL int `yaml:"turn_credential_ttl"`
|
||||||
|
RQLiteDSN string `yaml:"rqlite_dsn"`
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "Config file not found",
|
||||||
|
zap.String("path", configPath), zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var y yamlCfg
|
||||||
|
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "Failed to parse SFU config", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var turnServers []sfu.TURNServerConfig
|
||||||
|
for _, ts := range y.TURNServers {
|
||||||
|
turnServers = append(turnServers, sfu.TURNServerConfig{
|
||||||
|
Host: ts.Host,
|
||||||
|
Port: ts.Port,
|
||||||
|
Secure: ts.Secure,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &sfu.Config{
|
||||||
|
ListenAddr: y.ListenAddr,
|
||||||
|
Namespace: y.Namespace,
|
||||||
|
MediaPortStart: y.MediaPortStart,
|
||||||
|
MediaPortEnd: y.MediaPortEnd,
|
||||||
|
TURNServers: turnServers,
|
||||||
|
TURNSecret: y.TURNSecret,
|
||||||
|
TURNCredentialTTL: y.TURNCredentialTTL,
|
||||||
|
RQLiteDSN: y.RQLiteDSN,
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs := cfg.Validate(); len(errs) > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "\nSFU configuration errors (%d):\n", len(errs))
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintf(os.Stderr, " - %s\n", e)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentSFU, "Loaded SFU configuration",
|
||||||
|
zap.String("path", configPath),
|
||||||
|
zap.String("listen_addr", cfg.ListenAddr),
|
||||||
|
zap.String("namespace", cfg.Namespace),
|
||||||
|
zap.Int("media_ports", cfg.MediaPortEnd-cfg.MediaPortStart),
|
||||||
|
zap.Int("turn_servers", len(cfg.TURNServers)),
|
||||||
|
)
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
61
core/cmd/sfu/main.go
Normal file
61
core/cmd/sfu/main.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
version = "dev"
|
||||||
|
commit = "unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
logger, err := logging.NewColoredLogger(logging.ComponentSFU, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentSFU, "Starting SFU server",
|
||||||
|
zap.String("version", version),
|
||||||
|
zap.String("commit", commit))
|
||||||
|
|
||||||
|
cfg := parseSFUConfig(logger)
|
||||||
|
|
||||||
|
server, err := newSFUServer(cfg, logger.Logger)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "Failed to create SFU server", zap.Error(err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start HTTP server in background
|
||||||
|
go func() {
|
||||||
|
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "SFU server error", zap.Error(err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for termination signal
|
||||||
|
quit := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||||
|
sig := <-quit
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentSFU, "Shutdown signal received", zap.String("signal", sig.String()))
|
||||||
|
|
||||||
|
// Graceful drain: notify peers and wait
|
||||||
|
server.Drain(30 * time.Second)
|
||||||
|
|
||||||
|
if err := server.Close(); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentSFU, "Error during shutdown", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentSFU, "SFU server shutdown complete")
|
||||||
|
}
|
||||||
100
core/cmd/turn/config.go
Normal file
100
core/cmd/turn/config.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/config"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/turn"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseTURNConfig(logger *logging.ColoredLogger) *turn.Config {
|
||||||
|
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var configPath string
|
||||||
|
var err error
|
||||||
|
if *configFlag != "" {
|
||||||
|
if filepath.IsAbs(*configFlag) {
|
||||||
|
configPath = *configFlag
|
||||||
|
} else {
|
||||||
|
configPath, err = config.DefaultPath(*configFlag)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentTURN, "Failed to determine config path", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
configPath, err = config.DefaultPath("turn.yaml")
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentTURN, "Failed to determine config path", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type yamlCfg struct {
|
||||||
|
ListenAddr string `yaml:"listen_addr"`
|
||||||
|
TURNSListenAddr string `yaml:"turns_listen_addr"`
|
||||||
|
PublicIP string `yaml:"public_ip"`
|
||||||
|
Realm string `yaml:"realm"`
|
||||||
|
AuthSecret string `yaml:"auth_secret"`
|
||||||
|
RelayPortStart int `yaml:"relay_port_start"`
|
||||||
|
RelayPortEnd int `yaml:"relay_port_end"`
|
||||||
|
Namespace string `yaml:"namespace"`
|
||||||
|
TLSCertPath string `yaml:"tls_cert_path"`
|
||||||
|
TLSKeyPath string `yaml:"tls_key_path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentTURN, "Config file not found",
|
||||||
|
zap.String("path", configPath), zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var y yamlCfg
|
||||||
|
if err := config.DecodeStrict(strings.NewReader(string(data)), &y); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentTURN, "Failed to parse TURN config", zap.Error(err))
|
||||||
|
fmt.Fprintf(os.Stderr, "Configuration parse error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &turn.Config{
|
||||||
|
ListenAddr: y.ListenAddr,
|
||||||
|
TURNSListenAddr: y.TURNSListenAddr,
|
||||||
|
PublicIP: y.PublicIP,
|
||||||
|
Realm: y.Realm,
|
||||||
|
AuthSecret: y.AuthSecret,
|
||||||
|
RelayPortStart: y.RelayPortStart,
|
||||||
|
RelayPortEnd: y.RelayPortEnd,
|
||||||
|
Namespace: y.Namespace,
|
||||||
|
TLSCertPath: y.TLSCertPath,
|
||||||
|
TLSKeyPath: y.TLSKeyPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs := cfg.Validate(); len(errs) > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "\nTURN configuration errors (%d):\n", len(errs))
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintf(os.Stderr, " - %s\n", e)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "\nPlease fix the configuration and try again.\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentTURN, "Loaded TURN configuration",
|
||||||
|
zap.String("path", configPath),
|
||||||
|
zap.String("listen_addr", cfg.ListenAddr),
|
||||||
|
zap.String("namespace", cfg.Namespace),
|
||||||
|
zap.String("realm", cfg.Realm),
|
||||||
|
)
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
48
core/cmd/turn/main.go
Normal file
48
core/cmd/turn/main.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/turn"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
version = "dev"
|
||||||
|
commit = "unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
logger, err := logging.NewColoredLogger(logging.ComponentTURN, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentTURN, "Starting TURN server",
|
||||||
|
zap.String("version", version),
|
||||||
|
zap.String("commit", commit))
|
||||||
|
|
||||||
|
cfg := parseTURNConfig(logger)
|
||||||
|
|
||||||
|
server, err := turn.NewServer(cfg, logger.Logger)
|
||||||
|
if err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentTURN, "Failed to start TURN server", zap.Error(err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for termination signal
|
||||||
|
quit := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
|
||||||
|
sig := <-quit
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentTURN, "Shutdown signal received", zap.String("signal", sig.String()))
|
||||||
|
|
||||||
|
if err := server.Close(); err != nil {
|
||||||
|
logger.ComponentError(logging.ComponentTURN, "Error during shutdown", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.ComponentInfo(logging.ComponentTURN, "TURN server shutdown complete")
|
||||||
|
}
|
||||||
19
core/debian/control
Normal file
19
core/debian/control
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Package: orama
|
||||||
|
Version: 0.69.20
|
||||||
|
Section: net
|
||||||
|
Priority: optional
|
||||||
|
Architecture: amd64
|
||||||
|
Depends: libc6
|
||||||
|
Maintainer: DeBros Team <dev@orama.io>
|
||||||
|
Description: Orama Network - Distributed P2P Database System
|
||||||
|
Orama is a distributed peer-to-peer network that combines
|
||||||
|
RQLite for distributed SQL, IPFS for content-addressed storage,
|
||||||
|
and LibP2P for peer discovery and communication.
|
||||||
|
.
|
||||||
|
Features:
|
||||||
|
- Distributed SQLite database with Raft consensus
|
||||||
|
- IPFS-based file storage with encryption
|
||||||
|
- LibP2P peer-to-peer networking
|
||||||
|
- Olric distributed cache
|
||||||
|
- Unified HTTP/HTTPS gateway
|
||||||
|
|
||||||
18
core/debian/postinst
Normal file
18
core/debian/postinst
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Post-installation script for orama package
|
||||||
|
|
||||||
|
echo "Orama installed successfully!"
|
||||||
|
echo ""
|
||||||
|
echo "To set up your node, run:"
|
||||||
|
echo " sudo orama install"
|
||||||
|
echo ""
|
||||||
|
echo "This will launch the interactive installer."
|
||||||
|
echo ""
|
||||||
|
echo "For command-line installation:"
|
||||||
|
echo " sudo orama install --vps-ip <your-ip> --domain <your-domain>"
|
||||||
|
echo ""
|
||||||
|
echo "For help:"
|
||||||
|
echo " orama --help"
|
||||||
|
|
||||||
571
core/docs/ARCHITECTURE.md
Normal file
571
core/docs/ARCHITECTURE.md
Normal file
@ -0,0 +1,571 @@
|
|||||||
|
# Orama Network Architecture
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Orama Network is a high-performance API Gateway and Reverse Proxy designed for a decentralized ecosystem. It serves as a unified entry point that orchestrates traffic between clients and various backend services.
|
||||||
|
|
||||||
|
## Architecture Pattern
|
||||||
|
|
||||||
|
**Modular Gateway / Edge Proxy Architecture**
|
||||||
|
|
||||||
|
The system follows a clean, layered architecture with clear separation of concerns:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ Clients │
|
||||||
|
│ (Web, Mobile, CLI, SDKs) │
|
||||||
|
└────────────────────────┬────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ HTTPS/WSS
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────────────────────────┐
|
||||||
|
│ API Gateway (Port 443) │
|
||||||
|
│ ┌──────────────────────────────────────────────────────┐ │
|
||||||
|
│ │ Handlers Layer (HTTP/WebSocket) │ │
|
||||||
|
│ │ - Auth handlers - Storage handlers │ │
|
||||||
|
│ │ - Cache handlers - PubSub handlers │ │
|
||||||
|
│ │ - Serverless - Database handlers │ │
|
||||||
|
│ └──────────────────────┬───────────────────────────────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ ┌──────────────────────▼───────────────────────────────┐ │
|
||||||
|
│ │ Middleware (Security, Auth, Logging) │ │
|
||||||
|
│ └──────────────────────┬───────────────────────────────┘ │
|
||||||
|
│ │ │
|
||||||
|
│ ┌──────────────────────▼───────────────────────────────┐ │
|
||||||
|
│ │ Service Coordination (Gateway Core) │ │
|
||||||
|
│ └──────────────────────┬───────────────────────────────┘ │
|
||||||
|
└─────────────────────────┼────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
┌─────────────────┼─────────────────┐
|
||||||
|
│ │ │
|
||||||
|
▼ ▼ ▼
|
||||||
|
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
|
||||||
|
│ RQLite │ │ Olric │ │ IPFS │
|
||||||
|
│ (Database) │ │ (Cache) │ │ (Storage) │
|
||||||
|
│ │ │ │ │ │
|
||||||
|
│ Port 5001 │ │ Port 3320 │ │ Port 4501 │
|
||||||
|
└──────────────┘ └──────────────┘ └──────────────┘
|
||||||
|
|
||||||
|
┌─────────────────┐ ┌──────────────┐
|
||||||
|
│ IPFS Cluster │ │ Serverless │
|
||||||
|
│ (Pinning) │ │ (WASM) │
|
||||||
|
│ │ │ │
|
||||||
|
│ Port 9094 │ │ In-Process │
|
||||||
|
└─────────────────┘ └──────────────┘
|
||||||
|
|
||||||
|
┌─────────────────┐
|
||||||
|
│ Anyone │
|
||||||
|
│ (Anonymity) │
|
||||||
|
│ │
|
||||||
|
│ Port 9050 │
|
||||||
|
└─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Core Components
|
||||||
|
|
||||||
|
### 1. API Gateway (`pkg/gateway/`)
|
||||||
|
|
||||||
|
The gateway is the main entry point for all client requests. It coordinates between various backend services.
|
||||||
|
|
||||||
|
**Key Files:**
|
||||||
|
- `gateway.go` - Core gateway struct and routing
|
||||||
|
- `dependencies.go` - Service initialization and dependency injection
|
||||||
|
- `lifecycle.go` - Start/stop/health lifecycle management
|
||||||
|
- `middleware.go` - Authentication, logging, error handling
|
||||||
|
- `routes.go` - HTTP route registration
|
||||||
|
|
||||||
|
**Handler Packages:**
|
||||||
|
- `handlers/auth/` - Authentication (JWT, API keys, wallet signatures)
|
||||||
|
- `handlers/storage/` - IPFS storage operations
|
||||||
|
- `handlers/cache/` - Distributed cache operations
|
||||||
|
- `handlers/pubsub/` - Pub/sub messaging
|
||||||
|
- `handlers/serverless/` - Serverless function deployment and execution
|
||||||
|
|
||||||
|
### 2. Client SDK (`pkg/client/`)
|
||||||
|
|
||||||
|
Provides a clean Go SDK for interacting with the Orama Network.
|
||||||
|
|
||||||
|
**Architecture:**
|
||||||
|
```go
|
||||||
|
// Main client interface
|
||||||
|
type NetworkClient interface {
|
||||||
|
Storage() StorageClient
|
||||||
|
Cache() CacheClient
|
||||||
|
Database() DatabaseClient
|
||||||
|
PubSub() PubSubClient
|
||||||
|
Serverless() ServerlessClient
|
||||||
|
Auth() AuthClient
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key Files:**
|
||||||
|
- `client.go` - Main client orchestration
|
||||||
|
- `config.go` - Client configuration
|
||||||
|
- `storage_client.go` - IPFS storage client
|
||||||
|
- `cache_client.go` - Olric cache client
|
||||||
|
- `database_client.go` - RQLite database client
|
||||||
|
- `pubsub_bridge.go` - Pub/sub messaging client
|
||||||
|
- `transport.go` - HTTP transport layer
|
||||||
|
- `errors.go` - Client-specific errors
|
||||||
|
|
||||||
|
**Usage Example:**
|
||||||
|
```go
|
||||||
|
import "github.com/DeBrosOfficial/network/pkg/client"
|
||||||
|
|
||||||
|
// Create client
|
||||||
|
cfg := client.DefaultClientConfig()
|
||||||
|
cfg.GatewayURL = "https://api.orama.network"
|
||||||
|
cfg.APIKey = "your-api-key"
|
||||||
|
|
||||||
|
c := client.NewNetworkClient(cfg)
|
||||||
|
|
||||||
|
// Use storage
|
||||||
|
resp, err := c.Storage().Upload(ctx, data, "file.txt")
|
||||||
|
|
||||||
|
// Use cache
|
||||||
|
err = c.Cache().Set(ctx, "key", value, 0)
|
||||||
|
|
||||||
|
// Query database
|
||||||
|
rows, err := c.Database().Query(ctx, "SELECT * FROM users")
|
||||||
|
|
||||||
|
// Publish message
|
||||||
|
err = c.PubSub().Publish(ctx, "chat", []byte("hello"))
|
||||||
|
|
||||||
|
// Deploy function
|
||||||
|
fn, err := c.Serverless().Deploy(ctx, def, wasmBytes)
|
||||||
|
|
||||||
|
// Invoke function
|
||||||
|
result, err := c.Serverless().Invoke(ctx, "function-name", input)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Database Layer (`pkg/rqlite/`)
|
||||||
|
|
||||||
|
ORM-like interface over RQLite distributed SQL database.
|
||||||
|
|
||||||
|
**Key Files:**
|
||||||
|
- `client.go` - Main ORM client
|
||||||
|
- `orm_types.go` - Interfaces (Client, Tx, Repository[T])
|
||||||
|
- `query_builder.go` - Fluent query builder
|
||||||
|
- `repository.go` - Generic repository pattern
|
||||||
|
- `scanner.go` - Reflection-based row scanning
|
||||||
|
- `transaction.go` - Transaction support
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
- Fluent query builder
|
||||||
|
- Generic repository pattern with type safety
|
||||||
|
- Automatic struct mapping
|
||||||
|
- Transaction support
|
||||||
|
- Connection pooling with retry
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
```go
|
||||||
|
// Query builder
|
||||||
|
users, err := client.CreateQueryBuilder("users").
|
||||||
|
Select("id", "name", "email").
|
||||||
|
Where("age > ?", 18).
|
||||||
|
OrderBy("name ASC").
|
||||||
|
Limit(10).
|
||||||
|
GetMany(ctx, &users)
|
||||||
|
|
||||||
|
// Repository pattern
|
||||||
|
type User struct {
|
||||||
|
ID int `db:"id"`
|
||||||
|
Name string `db:"name"`
|
||||||
|
Email string `db:"email"`
|
||||||
|
}
|
||||||
|
|
||||||
|
repo := client.Repository("users")
|
||||||
|
user := &User{Name: "Alice", Email: "alice@example.com"}
|
||||||
|
err := repo.Save(ctx, user)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Serverless Engine (`pkg/serverless/`)
|
||||||
|
|
||||||
|
WebAssembly (WASM) function execution engine with host functions.
|
||||||
|
|
||||||
|
**Architecture:**
|
||||||
|
```
|
||||||
|
pkg/serverless/
|
||||||
|
├── engine.go - Core WASM engine
|
||||||
|
├── execution/ - Function execution
|
||||||
|
│ ├── executor.go
|
||||||
|
│ └── lifecycle.go
|
||||||
|
├── cache/ - Module caching
|
||||||
|
│ └── module_cache.go
|
||||||
|
├── registry/ - Function metadata
|
||||||
|
│ ├── registry.go
|
||||||
|
│ ├── function_store.go
|
||||||
|
│ ├── ipfs_store.go
|
||||||
|
│ └── invocation_logger.go
|
||||||
|
└── hostfunctions/ - Host functions by domain
|
||||||
|
├── cache.go - Cache operations
|
||||||
|
├── storage.go - Storage operations
|
||||||
|
├── database.go - Database queries
|
||||||
|
├── pubsub.go - Messaging
|
||||||
|
├── http.go - HTTP requests
|
||||||
|
└── logging.go - Logging
|
||||||
|
```
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
- Secure WASM execution sandbox
|
||||||
|
- Memory and CPU limits
|
||||||
|
- Host function injection (cache, storage, DB, HTTP)
|
||||||
|
- Function versioning
|
||||||
|
- Invocation logging
|
||||||
|
- Hot module reloading
|
||||||
|
|
||||||
|
### 5. Configuration System (`pkg/config/`)
|
||||||
|
|
||||||
|
Domain-specific configuration with validation.
|
||||||
|
|
||||||
|
**Structure:**
|
||||||
|
```
|
||||||
|
pkg/config/
|
||||||
|
├── config.go - Main config aggregator
|
||||||
|
├── loader.go - YAML loading
|
||||||
|
├── node_config.go - Node settings
|
||||||
|
├── database_config.go - Database settings
|
||||||
|
├── gateway_config.go - Gateway settings
|
||||||
|
└── validate/ - Validation
|
||||||
|
├── validators.go
|
||||||
|
├── node.go
|
||||||
|
├── database.go
|
||||||
|
└── gateway.go
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6. Anyone Integration (`pkg/anyoneproxy/`)
|
||||||
|
|
||||||
|
Integration with the Anyone Protocol for anonymous routing.
|
||||||
|
|
||||||
|
**Modes:**
|
||||||
|
|
||||||
|
| Mode | Purpose | Port | Rewards |
|
||||||
|
|------|---------|------|---------|
|
||||||
|
| Client | Route traffic anonymously | 9050 (SOCKS5) | No |
|
||||||
|
| Relay | Provide bandwidth to network | 9001 (ORPort) + 9050 | Yes ($ANYONE) |
|
||||||
|
|
||||||
|
**Key Files:**
|
||||||
|
- `pkg/anyoneproxy/socks.go` - SOCKS5 proxy client interface
|
||||||
|
- `pkg/gateway/anon_proxy_handler.go` - Anonymous proxy API endpoint
|
||||||
|
- `pkg/environments/production/installers/anyone_relay.go` - Relay installation
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
- Smart routing (bypasses proxy for local/private addresses)
|
||||||
|
- Automatic detection of existing Anyone installations
|
||||||
|
- Migration support for existing relay operators
|
||||||
|
- Exit relay mode with legal warnings
|
||||||
|
|
||||||
|
**API Endpoint:**
|
||||||
|
- `POST /v1/proxy/anon` - Route HTTP requests through Anyone network
|
||||||
|
|
||||||
|
**Relay Requirements:**
|
||||||
|
- Linux OS (Debian/Ubuntu)
|
||||||
|
- 100 $ANYONE tokens in wallet
|
||||||
|
- ORPort accessible from internet
|
||||||
|
- Registration at dashboard.anyone.io
|
||||||
|
|
||||||
|
### 7. Shared Utilities
|
||||||
|
|
||||||
|
**HTTP Utilities (`pkg/httputil/`):**
|
||||||
|
- Request parsing and validation
|
||||||
|
- JSON response writers
|
||||||
|
- Error handling
|
||||||
|
- Authentication extraction
|
||||||
|
|
||||||
|
**Error Handling (`pkg/errors/`):**
|
||||||
|
- Typed errors (ValidationError, NotFoundError, etc.)
|
||||||
|
- HTTP status code mapping
|
||||||
|
- Error wrapping with context
|
||||||
|
- Stack traces
|
||||||
|
|
||||||
|
**Contracts (`pkg/contracts/`):**
|
||||||
|
- Interface definitions for all services
|
||||||
|
- Enables dependency injection
|
||||||
|
- Clean abstractions
|
||||||
|
|
||||||
|
## Data Flow
|
||||||
|
|
||||||
|
### 1. HTTP Request Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
Client Request
|
||||||
|
↓
|
||||||
|
[HTTPS Termination]
|
||||||
|
↓
|
||||||
|
[Authentication Middleware]
|
||||||
|
↓
|
||||||
|
[Route Handler]
|
||||||
|
↓
|
||||||
|
[Service Layer]
|
||||||
|
↓
|
||||||
|
[Backend Service] (RQLite/Olric/IPFS)
|
||||||
|
↓
|
||||||
|
[Response Formatting]
|
||||||
|
↓
|
||||||
|
Client Response
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. WebSocket Flow (Pub/Sub)
|
||||||
|
|
||||||
|
```
|
||||||
|
Client WebSocket Connect
|
||||||
|
↓
|
||||||
|
[Upgrade to WebSocket]
|
||||||
|
↓
|
||||||
|
[Authentication]
|
||||||
|
↓
|
||||||
|
[Subscribe to Topic]
|
||||||
|
↓
|
||||||
|
[LibP2P PubSub] ←→ [Local Subscribers]
|
||||||
|
↓
|
||||||
|
[Message Broadcasting]
|
||||||
|
↓
|
||||||
|
Client Receives Messages
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Serverless Invocation Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
Function Deployment:
|
||||||
|
Upload WASM → Store in IPFS → Save Metadata (RQLite) → Compile Module
|
||||||
|
|
||||||
|
Function Invocation:
|
||||||
|
Request → Load Metadata → Get WASM from IPFS →
|
||||||
|
Execute in Sandbox → Return Result → Log Invocation
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Architecture
|
||||||
|
|
||||||
|
### Authentication Methods
|
||||||
|
|
||||||
|
1. **Wallet Signatures** (Ethereum-style)
|
||||||
|
- Challenge/response flow
|
||||||
|
- Nonce-based to prevent replay attacks
|
||||||
|
- Issues JWT tokens after verification
|
||||||
|
|
||||||
|
2. **API Keys**
|
||||||
|
- Long-lived credentials
|
||||||
|
- Stored in RQLite
|
||||||
|
- Namespace-scoped
|
||||||
|
|
||||||
|
3. **JWT Tokens**
|
||||||
|
- Short-lived (15 min default)
|
||||||
|
- Refresh token support
|
||||||
|
- Claims-based authorization
|
||||||
|
|
||||||
|
### Network Security (WireGuard Mesh)
|
||||||
|
|
||||||
|
All inter-node communication is encrypted via a WireGuard VPN mesh:
|
||||||
|
|
||||||
|
- **WireGuard IPs:** Each node gets a private IP (10.0.0.x/24) used for all cluster traffic
|
||||||
|
- **UFW Firewall:** Only public ports are exposed: 22 (SSH), 53 (DNS, nameservers only), 80/443 (HTTP/HTTPS), 51820 (WireGuard UDP)
|
||||||
|
- **IPv6 disabled:** System-wide via sysctl to prevent bypass of IPv4 firewall rules
|
||||||
|
- **Internal services** (RQLite 5001/7001, IPFS 4001/4501, Olric 3320/3322, Gateway 6001) are only accessible via WireGuard or localhost
|
||||||
|
- **Invite tokens:** Single-use, time-limited tokens for secure node joining. No shared secrets on the CLI
|
||||||
|
- **Join flow:** New nodes authenticate via HTTPS (443) with TOFU certificate pinning, establish WireGuard tunnel, then join all services over the encrypted mesh
|
||||||
|
|
||||||
|
### Service Authentication
|
||||||
|
|
||||||
|
- **RQLite:** HTTP basic auth on all queries/executions — credentials generated at genesis, distributed via join response
|
||||||
|
- **Olric:** Memberlist gossip encrypted with a shared 32-byte key
|
||||||
|
- **IPFS Cluster:** TrustedPeers restricted to known cluster peer IDs (not `*`)
|
||||||
|
- **Internal endpoints:** `/v1/internal/wg/peers` and `/v1/internal/wg/peer/remove` require cluster secret
|
||||||
|
- **Vault:** V1 push/pull endpoints require session token authentication when guardian is configured
|
||||||
|
- **WebSockets:** Origin header validated against the node's configured domain
|
||||||
|
|
||||||
|
### Token & Key Security
|
||||||
|
|
||||||
|
- **Refresh tokens:** Stored as SHA-256 hashes (never plaintext)
|
||||||
|
- **API keys:** Stored as HMAC-SHA256 hashes with a server-side secret
|
||||||
|
- **TURN secrets:** Encrypted at rest with AES-256-GCM (key derived from cluster secret)
|
||||||
|
- **Binary signing:** Build archives signed with rootwallet EVM signature, verified on install
|
||||||
|
|
||||||
|
### Process Isolation
|
||||||
|
|
||||||
|
- **Dedicated user:** All services run as `orama` user (not root)
|
||||||
|
- **systemd hardening:** `ProtectSystem=strict`, `NoNewPrivileges=yes`, `PrivateDevices=yes`, etc.
|
||||||
|
- **Capabilities:** Caddy and CoreDNS get `CAP_NET_BIND_SERVICE` for privileged ports
|
||||||
|
|
||||||
|
See [SECURITY.md](SECURITY.md) for the full security hardening reference.
|
||||||
|
|
||||||
|
### TLS/HTTPS
|
||||||
|
|
||||||
|
- Automatic ACME (Let's Encrypt) certificate management via Caddy
|
||||||
|
- TLS 1.3 support
|
||||||
|
- HTTP/2 enabled
|
||||||
|
- On-demand TLS for deployment custom domains
|
||||||
|
|
||||||
|
### Middleware Stack
|
||||||
|
|
||||||
|
1. **Logger** - Request/response logging
|
||||||
|
2. **CORS** - Cross-origin resource sharing
|
||||||
|
3. **Authentication** - JWT/API key validation
|
||||||
|
4. **Authorization** - Namespace access control
|
||||||
|
5. **Rate Limiting** - Per-client rate limits
|
||||||
|
6. **Error Handling** - Consistent error responses
|
||||||
|
|
||||||
|
## Scalability
|
||||||
|
|
||||||
|
### Horizontal Scaling
|
||||||
|
|
||||||
|
- **Gateway:** Stateless, can run multiple instances behind load balancer
|
||||||
|
- **RQLite:** Multi-node cluster with Raft consensus
|
||||||
|
- **IPFS:** Distributed storage across nodes
|
||||||
|
- **Olric:** Distributed cache with consistent hashing
|
||||||
|
|
||||||
|
### Caching Strategy
|
||||||
|
|
||||||
|
1. **WASM Module Cache** - Compiled modules cached in memory
|
||||||
|
2. **Olric Distributed Cache** - Shared cache across nodes
|
||||||
|
3. **Local Cache** - Per-gateway request caching
|
||||||
|
|
||||||
|
### High Availability
|
||||||
|
|
||||||
|
- **Database:** RQLite cluster with automatic leader election
|
||||||
|
- **Storage:** IPFS replication factor configurable
|
||||||
|
- **Cache:** Olric replication and eventual consistency
|
||||||
|
- **Gateway:** Stateless, multiple replicas supported
|
||||||
|
|
||||||
|
## Monitoring & Observability
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
|
||||||
|
- `/health` - Liveness probe
|
||||||
|
- `/v1/status` - Detailed status with service checks
|
||||||
|
|
||||||
|
### Metrics
|
||||||
|
|
||||||
|
- Prometheus-compatible metrics endpoint
|
||||||
|
- Request counts, latencies, error rates
|
||||||
|
- Service-specific metrics (cache hit ratio, DB query times)
|
||||||
|
|
||||||
|
### Logging
|
||||||
|
|
||||||
|
- Structured logging (JSON format)
|
||||||
|
- Log levels: DEBUG, INFO, WARN, ERROR
|
||||||
|
- Correlation IDs for request tracing
|
||||||
|
|
||||||
|
## Development Patterns
|
||||||
|
|
||||||
|
### SOLID Principles
|
||||||
|
|
||||||
|
- **Single Responsibility:** Each handler/service has one focus
|
||||||
|
- **Open/Closed:** Interface-based design for extensibility
|
||||||
|
- **Liskov Substitution:** All implementations conform to contracts
|
||||||
|
- **Interface Segregation:** Small, focused interfaces
|
||||||
|
- **Dependency Inversion:** Depend on abstractions, not implementations
|
||||||
|
|
||||||
|
### Code Organization
|
||||||
|
|
||||||
|
- **Average file size:** ~150 lines
|
||||||
|
- **Package structure:** Domain-driven, feature-focused
|
||||||
|
- **Testing:** Unit tests for logic, E2E tests for integration
|
||||||
|
- **Documentation:** Godoc comments on all public APIs
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
### Building & Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make build # Build all binaries
|
||||||
|
make test # Run unit tests
|
||||||
|
make test-e2e # Run E2E tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Production
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First node (genesis — creates cluster)
|
||||||
|
# Nameserver nodes use the base domain as --domain
|
||||||
|
sudo orama install --vps-ip <IP> --domain example.com --base-domain example.com --nameserver
|
||||||
|
|
||||||
|
# On the genesis node, generate an invite for a new node
|
||||||
|
orama invite
|
||||||
|
# Outputs: sudo orama install --join https://example.com --token <TOKEN> --vps-ip <NEW_IP>
|
||||||
|
|
||||||
|
# Additional nameserver nodes (join via invite token over HTTPS)
|
||||||
|
sudo orama install --join https://example.com --token <TOKEN> \
|
||||||
|
--vps-ip <IP> --domain example.com --base-domain example.com --nameserver
|
||||||
|
```
|
||||||
|
|
||||||
|
**Security:** Nodes join via single-use invite tokens over HTTPS. A WireGuard VPN tunnel
|
||||||
|
is established before any cluster services start. All inter-node traffic (RQLite, IPFS,
|
||||||
|
Olric, LibP2P) flows over the encrypted WireGuard mesh — no cluster ports are exposed
|
||||||
|
publicly. **Never use `http://<ip>:6001`** for joining — port 6001 is internal-only and
|
||||||
|
blocked by UFW. Use the domain (`https://node1.example.com`) or, if DNS is not yet
|
||||||
|
configured, use the IP over HTTP port 80 (`http://<ip>`) which goes through Caddy.
|
||||||
|
|
||||||
|
### Docker (Future)
|
||||||
|
|
||||||
|
Planned containerization with Docker Compose and Kubernetes support.
|
||||||
|
|
||||||
|
## WebRTC (Voice/Video/Data)
|
||||||
|
|
||||||
|
Namespaces can opt in to WebRTC support for real-time voice, video, and data channels.
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
- **SFU (Selective Forwarding Unit)** — Pion WebRTC server that handles signaling (WebSocket), SDP negotiation, and RTP forwarding. Runs on all 3 cluster nodes, binds only to WireGuard IPs.
|
||||||
|
- **TURN Server** — Pion TURN relay that provides NAT traversal. Runs on 2 of 3 nodes for redundancy. Public-facing (UDP 3478, 443, relay range 49152-65535).
|
||||||
|
|
||||||
|
### Security Model
|
||||||
|
|
||||||
|
- **TURN-shielded**: SFU binds only to WireGuard (10.0.0.x), never 0.0.0.0. All client media flows through TURN relay.
|
||||||
|
- **Forced relay**: `iceTransportPolicy: relay` enforced server-side — no direct peer connections.
|
||||||
|
- **HMAC credentials**: Per-namespace TURN shared secret with 10-minute TTL.
|
||||||
|
- **Namespace isolation**: Each namespace has its own TURN secret, port ranges, and rooms.
|
||||||
|
|
||||||
|
### Port Allocation
|
||||||
|
|
||||||
|
WebRTC uses a separate port allocation system from core namespace services:
|
||||||
|
|
||||||
|
| Service | Port Range |
|
||||||
|
|---------|-----------|
|
||||||
|
| SFU signaling | 30000-30099 |
|
||||||
|
| SFU media (RTP) | 20000-29999 |
|
||||||
|
| TURN listen | 3478/udp (standard) |
|
||||||
|
| TURN TLS | 443/udp |
|
||||||
|
| TURN relay | 49152-65535/udp |
|
||||||
|
|
||||||
|
See [docs/WEBRTC.md](WEBRTC.md) for full details including client integration, API reference, and debugging.
|
||||||
|
|
||||||
|
## OramaOS
|
||||||
|
|
||||||
|
For mainnet, devnet, and testnet environments, nodes run **OramaOS** — a custom minimal Linux image built with Buildroot.
|
||||||
|
|
||||||
|
**Key properties:**
|
||||||
|
- No SSH, no shell — operators cannot access the filesystem
|
||||||
|
- LUKS full-disk encryption with Shamir key distribution across peers
|
||||||
|
- Read-only rootfs (SquashFS + dm-verity)
|
||||||
|
- A/B partition updates with cryptographic signature verification
|
||||||
|
- Service sandboxing via Linux namespaces + seccomp
|
||||||
|
- Single root process: the **orama-agent**
|
||||||
|
|
||||||
|
**The orama-agent manages:**
|
||||||
|
- Boot sequence and LUKS key reconstruction
|
||||||
|
- WireGuard tunnel setup
|
||||||
|
- Service lifecycle in sandboxed namespaces
|
||||||
|
- Command reception from Gateway over WireGuard (port 9998)
|
||||||
|
- OS updates (download, verify, A/B swap, reboot with rollback)
|
||||||
|
|
||||||
|
**Node enrollment:** OramaOS nodes join via `orama node enroll` instead of `orama node install`. The enrollment flow uses a registration code + invite token + wallet verification.
|
||||||
|
|
||||||
|
See [ORAMAOS_DEPLOYMENT.md](ORAMAOS_DEPLOYMENT.md) for the full deployment guide.
|
||||||
|
|
||||||
|
Sandbox clusters remain on Ubuntu for development convenience.
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
1. **GraphQL Support** - GraphQL gateway alongside REST
|
||||||
|
2. **gRPC Support** - gRPC protocol support
|
||||||
|
3. **Event Sourcing** - Event-driven architecture
|
||||||
|
4. **Kubernetes Operator** - Native K8s deployment
|
||||||
|
5. **Observability** - OpenTelemetry integration
|
||||||
|
6. **Multi-tenancy** - Enhanced namespace isolation
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
- [RQLite Documentation](https://rqlite.io/docs/)
|
||||||
|
- [IPFS Documentation](https://docs.ipfs.tech/)
|
||||||
|
- [LibP2P Documentation](https://docs.libp2p.io/)
|
||||||
|
- [WebAssembly (WASM)](https://webassembly.org/)
|
||||||
151
core/docs/CLEAN_NODE.md
Normal file
151
core/docs/CLEAN_NODE.md
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
# Clean Node — Full Reset Guide
|
||||||
|
|
||||||
|
How to completely remove all Orama Network state from a VPS so it can be reinstalled fresh.
|
||||||
|
|
||||||
|
> **OramaOS nodes:** This guide applies to Ubuntu-based nodes only. OramaOS has no SSH or shell access. To remove an OramaOS node: use `POST /v1/node/leave` via the Gateway API for graceful departure, or reflash the OramaOS image via your VPS provider's dashboard for a factory reset. See [ORAMAOS_DEPLOYMENT.md](ORAMAOS_DEPLOYMENT.md) for details.
|
||||||
|
|
||||||
|
## Quick Clean (Copy-Paste)
|
||||||
|
|
||||||
|
Run this as root or with sudo on the target VPS:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Stop and disable all services
|
||||||
|
sudo systemctl stop orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||||
|
sudo systemctl disable orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||||
|
|
||||||
|
# 1b. Kill leftover processes (binaries may run outside systemd)
|
||||||
|
sudo pkill -f orama-node 2>/dev/null; sudo pkill -f ipfs-cluster-service 2>/dev/null
|
||||||
|
sudo pkill -f "ipfs daemon" 2>/dev/null; sudo pkill -f olric-server 2>/dev/null
|
||||||
|
sudo pkill -f rqlited 2>/dev/null; sudo pkill -f coredns 2>/dev/null
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# 2. Remove systemd service files
|
||||||
|
sudo rm -f /etc/systemd/system/orama-*.service
|
||||||
|
sudo rm -f /etc/systemd/system/coredns.service
|
||||||
|
sudo rm -f /etc/systemd/system/caddy.service
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
# 3. Tear down WireGuard
|
||||||
|
# Must stop the systemd unit first — wg-quick@wg0 is a oneshot with
|
||||||
|
# RemainAfterExit=yes, so it stays "active (exited)" even after the
|
||||||
|
# interface is removed. Without "stop", a future "systemctl start" is a no-op.
|
||||||
|
sudo systemctl stop wg-quick@wg0 2>/dev/null
|
||||||
|
sudo wg-quick down wg0 2>/dev/null
|
||||||
|
sudo systemctl disable wg-quick@wg0 2>/dev/null
|
||||||
|
sudo rm -f /etc/wireguard/wg0.conf
|
||||||
|
|
||||||
|
# 4. Reset UFW firewall
|
||||||
|
sudo ufw --force reset
|
||||||
|
sudo ufw allow 22/tcp
|
||||||
|
sudo ufw --force enable
|
||||||
|
|
||||||
|
# 5. Remove orama data directory
|
||||||
|
sudo rm -rf /opt/orama
|
||||||
|
|
||||||
|
# 6. Remove legacy orama user (if exists from old installs)
|
||||||
|
sudo userdel -r orama 2>/dev/null
|
||||||
|
sudo rm -rf /home/orama
|
||||||
|
sudo rm -f /etc/sudoers.d/orama-access
|
||||||
|
sudo rm -f /etc/sudoers.d/orama-deployments
|
||||||
|
sudo rm -f /etc/sudoers.d/orama-wireguard
|
||||||
|
|
||||||
|
# 7. Remove CoreDNS config
|
||||||
|
sudo rm -rf /etc/coredns
|
||||||
|
|
||||||
|
# 8. Remove Caddy config and data
|
||||||
|
sudo rm -rf /etc/caddy
|
||||||
|
sudo rm -rf /var/lib/caddy
|
||||||
|
|
||||||
|
# 9. Remove deployment systemd services (dynamic)
|
||||||
|
sudo rm -f /etc/systemd/system/orama-deploy-*.service
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
# 10. Clean temp files
|
||||||
|
sudo rm -f /tmp/orama /tmp/network-source.tar.gz /tmp/network-source.zip
|
||||||
|
sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build
|
||||||
|
|
||||||
|
echo "Node cleaned. Ready for fresh install."
|
||||||
|
```
|
||||||
|
|
||||||
|
## What This Removes
|
||||||
|
|
||||||
|
| Category | Paths |
|
||||||
|
|----------|-------|
|
||||||
|
| **App data** | `/opt/orama/.orama/` (configs, secrets, logs, IPFS, RQLite, Olric) |
|
||||||
|
| **Source code** | `/opt/orama/src/` |
|
||||||
|
| **Binaries** | `/opt/orama/bin/orama-node`, `/opt/orama/bin/gateway` |
|
||||||
|
| **Systemd** | `orama-*.service`, `coredns.service`, `caddy.service`, `orama-deploy-*.service` |
|
||||||
|
| **WireGuard** | `/etc/wireguard/wg0.conf`, `wg-quick@wg0` systemd unit |
|
||||||
|
| **Firewall** | All UFW rules (reset to default + SSH only) |
|
||||||
|
| **Legacy** | `orama` user, `/etc/sudoers.d/orama-*` (old installs only) |
|
||||||
|
| **CoreDNS** | `/etc/coredns/Corefile` |
|
||||||
|
| **Caddy** | `/etc/caddy/Caddyfile`, `/var/lib/caddy/` (TLS certs) |
|
||||||
|
| **Anyone Relay** | `orama-anyone-relay.service`, `orama-anyone-client.service` |
|
||||||
|
| **Temp files** | `/tmp/orama`, `/tmp/network-source.*`, build dirs |
|
||||||
|
|
||||||
|
## What This Does NOT Remove
|
||||||
|
|
||||||
|
These are shared system tools that may be used by other software. Remove manually if desired:
|
||||||
|
|
||||||
|
| Binary | Path | Remove Command |
|
||||||
|
|--------|------|----------------|
|
||||||
|
| RQLite | `/usr/local/bin/rqlited` | `sudo rm /usr/local/bin/rqlited` |
|
||||||
|
| IPFS | `/usr/local/bin/ipfs` | `sudo rm /usr/local/bin/ipfs` |
|
||||||
|
| IPFS Cluster | `/usr/local/bin/ipfs-cluster-service` | `sudo rm /usr/local/bin/ipfs-cluster-service` |
|
||||||
|
| Olric | `/usr/local/bin/olric-server` | `sudo rm /usr/local/bin/olric-server` |
|
||||||
|
| CoreDNS | `/usr/local/bin/coredns` | `sudo rm /usr/local/bin/coredns` |
|
||||||
|
| Caddy | `/usr/bin/caddy` | `sudo rm /usr/bin/caddy` |
|
||||||
|
| xcaddy | `/usr/local/bin/xcaddy` | `sudo rm /usr/local/bin/xcaddy` |
|
||||||
|
| Go | `/usr/local/go/` | `sudo rm -rf /usr/local/go` |
|
||||||
|
| Orama CLI | `/usr/local/bin/orama` | `sudo rm /usr/local/bin/orama` |
|
||||||
|
|
||||||
|
## Nuclear Clean (Remove Everything Including Binaries)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run quick clean above first, then:
|
||||||
|
sudo rm -f /usr/local/bin/rqlited
|
||||||
|
sudo rm -f /usr/local/bin/ipfs
|
||||||
|
sudo rm -f /usr/local/bin/ipfs-cluster-service
|
||||||
|
sudo rm -f /usr/local/bin/olric-server
|
||||||
|
sudo rm -f /usr/local/bin/coredns
|
||||||
|
sudo rm -f /usr/local/bin/xcaddy
|
||||||
|
sudo rm -f /usr/bin/caddy
|
||||||
|
sudo rm -f /usr/local/bin/orama
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Node Clean
|
||||||
|
|
||||||
|
To clean all nodes at once from your local machine:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Define your nodes
|
||||||
|
NODES=(
|
||||||
|
"ubuntu@141.227.165.168:password1"
|
||||||
|
"ubuntu@141.227.165.154:password2"
|
||||||
|
"ubuntu@141.227.156.51:password3"
|
||||||
|
)
|
||||||
|
|
||||||
|
for entry in "${NODES[@]}"; do
|
||||||
|
IFS=: read -r userhost pass <<< "$entry"
|
||||||
|
echo "Cleaning $userhost..."
|
||||||
|
sshpass -p "$pass" ssh -o StrictHostKeyChecking=no "$userhost" 'bash -s' << 'CLEAN'
|
||||||
|
sudo systemctl stop orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||||
|
sudo systemctl disable orama-node orama-ipfs orama-ipfs-cluster orama-olric orama-anyone-relay orama-anyone-client coredns caddy 2>/dev/null
|
||||||
|
sudo rm -f /etc/systemd/system/orama-*.service /etc/systemd/system/coredns.service /etc/systemd/system/caddy.service /etc/systemd/system/orama-deploy-*.service
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
sudo systemctl stop wg-quick@wg0 2>/dev/null
|
||||||
|
sudo wg-quick down wg0 2>/dev/null
|
||||||
|
sudo systemctl disable wg-quick@wg0 2>/dev/null
|
||||||
|
sudo rm -f /etc/wireguard/wg0.conf
|
||||||
|
sudo ufw --force reset && sudo ufw allow 22/tcp && sudo ufw --force enable
|
||||||
|
sudo rm -rf /opt/orama
|
||||||
|
sudo userdel -r orama 2>/dev/null
|
||||||
|
sudo rm -rf /home/orama
|
||||||
|
sudo rm -f /etc/sudoers.d/orama-access /etc/sudoers.d/orama-deployments /etc/sudoers.d/orama-wireguard
|
||||||
|
sudo rm -rf /etc/coredns /etc/caddy /var/lib/caddy
|
||||||
|
sudo rm -f /tmp/orama /tmp/network-source.tar.gz
|
||||||
|
sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build
|
||||||
|
echo "Done"
|
||||||
|
CLEAN
|
||||||
|
done
|
||||||
|
```
|
||||||
546
core/docs/CLIENT_SDK.md
Normal file
546
core/docs/CLIENT_SDK.md
Normal file
@ -0,0 +1,546 @@
|
|||||||
|
# Orama Network Client SDK
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Orama Network Client SDK provides a clean, type-safe Go interface for interacting with the Orama Network. It abstracts away the complexity of HTTP requests, authentication, and error handling.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/DeBrosOfficial/network/pkg/client
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create client configuration
|
||||||
|
cfg := client.DefaultClientConfig()
|
||||||
|
cfg.GatewayURL = "https://api.orama.network"
|
||||||
|
cfg.APIKey = "your-api-key-here"
|
||||||
|
|
||||||
|
// Create client
|
||||||
|
c := client.NewNetworkClient(cfg)
|
||||||
|
|
||||||
|
// Use the client
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Upload to storage
|
||||||
|
data := []byte("Hello, Orama!")
|
||||||
|
resp, err := c.Storage().Upload(ctx, data, "hello.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Uploaded: CID=%s\n", resp.CID)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### ClientConfig
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ClientConfig struct {
|
||||||
|
// Gateway URL (e.g., "https://api.orama.network")
|
||||||
|
GatewayURL string
|
||||||
|
|
||||||
|
// Authentication (choose one)
|
||||||
|
APIKey string // API key authentication
|
||||||
|
JWTToken string // JWT token authentication
|
||||||
|
|
||||||
|
// Client options
|
||||||
|
Timeout time.Duration // Request timeout (default: 30s)
|
||||||
|
UserAgent string // Custom user agent
|
||||||
|
|
||||||
|
// Network client namespace
|
||||||
|
Namespace string // Default namespace for operations
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Creating a Client
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Default configuration
|
||||||
|
cfg := client.DefaultClientConfig()
|
||||||
|
cfg.GatewayURL = "https://api.orama.network"
|
||||||
|
cfg.APIKey = "your-api-key"
|
||||||
|
|
||||||
|
c := client.NewNetworkClient(cfg)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authentication
|
||||||
|
|
||||||
|
### API Key Authentication
|
||||||
|
|
||||||
|
```go
|
||||||
|
cfg := client.DefaultClientConfig()
|
||||||
|
cfg.APIKey = "your-api-key-here"
|
||||||
|
c := client.NewNetworkClient(cfg)
|
||||||
|
```
|
||||||
|
|
||||||
|
### JWT Token Authentication
|
||||||
|
|
||||||
|
```go
|
||||||
|
cfg := client.DefaultClientConfig()
|
||||||
|
cfg.JWTToken = "your-jwt-token-here"
|
||||||
|
c := client.NewNetworkClient(cfg)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Obtaining Credentials
|
||||||
|
|
||||||
|
```go
|
||||||
|
// 1. Login with wallet signature (not yet implemented in SDK)
|
||||||
|
// Use the gateway API directly: POST /v1/auth/challenge + /v1/auth/verify
|
||||||
|
|
||||||
|
// 2. Issue API key after authentication
|
||||||
|
// POST /v1/auth/apikey with JWT token
|
||||||
|
```
|
||||||
|
|
||||||
|
## Storage Client
|
||||||
|
|
||||||
|
Upload, download, pin, and unpin files to IPFS.
|
||||||
|
|
||||||
|
### Upload File
|
||||||
|
|
||||||
|
```go
|
||||||
|
data := []byte("Hello, World!")
|
||||||
|
resp, err := c.Storage().Upload(ctx, data, "hello.txt")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("CID: %s\n", resp.CID)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Upload with Options
|
||||||
|
|
||||||
|
```go
|
||||||
|
opts := &client.StorageUploadOptions{
|
||||||
|
Pin: true, // Pin after upload
|
||||||
|
Encrypt: true, // Encrypt before upload
|
||||||
|
ReplicationFactor: 3, // Number of replicas
|
||||||
|
}
|
||||||
|
resp, err := c.Storage().UploadWithOptions(ctx, data, "file.txt", opts)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get File
|
||||||
|
|
||||||
|
```go
|
||||||
|
cid := "QmXxx..."
|
||||||
|
data, err := c.Storage().Get(ctx, cid)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Downloaded %d bytes\n", len(data))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pin File
|
||||||
|
|
||||||
|
```go
|
||||||
|
cid := "QmXxx..."
|
||||||
|
resp, err := c.Storage().Pin(ctx, cid)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Pinned: %s\n", resp.CID)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Unpin File
|
||||||
|
|
||||||
|
```go
|
||||||
|
cid := "QmXxx..."
|
||||||
|
err := c.Storage().Unpin(ctx, cid)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println("Unpinned successfully")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Pin Status
|
||||||
|
|
||||||
|
```go
|
||||||
|
cid := "QmXxx..."
|
||||||
|
status, err := c.Storage().Status(ctx, cid)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Status: %s, Replicas: %d\n", status.Status, status.Replicas)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cache Client
|
||||||
|
|
||||||
|
Distributed key-value cache using Olric.
|
||||||
|
|
||||||
|
### Set Value
|
||||||
|
|
||||||
|
```go
|
||||||
|
key := "user:123"
|
||||||
|
value := map[string]interface{}{
|
||||||
|
"name": "Alice",
|
||||||
|
"email": "alice@example.com",
|
||||||
|
}
|
||||||
|
ttl := 5 * time.Minute
|
||||||
|
|
||||||
|
err := c.Cache().Set(ctx, key, value, ttl)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get Value
|
||||||
|
|
||||||
|
```go
|
||||||
|
key := "user:123"
|
||||||
|
var user map[string]interface{}
|
||||||
|
err := c.Cache().Get(ctx, key, &user)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("User: %+v\n", user)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Delete Value
|
||||||
|
|
||||||
|
```go
|
||||||
|
key := "user:123"
|
||||||
|
err := c.Cache().Delete(ctx, key)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multi-Get
|
||||||
|
|
||||||
|
```go
|
||||||
|
keys := []string{"user:1", "user:2", "user:3"}
|
||||||
|
results, err := c.Cache().MGet(ctx, keys)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
for key, value := range results {
|
||||||
|
fmt.Printf("%s: %v\n", key, value)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Client
|
||||||
|
|
||||||
|
Query RQLite distributed SQL database.
|
||||||
|
|
||||||
|
### Execute Query (Write)
|
||||||
|
|
||||||
|
```go
|
||||||
|
sql := "INSERT INTO users (name, email) VALUES (?, ?)"
|
||||||
|
args := []interface{}{"Alice", "alice@example.com"}
|
||||||
|
|
||||||
|
result, err := c.Database().Execute(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Inserted %d rows\n", result.RowsAffected)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Query (Read)
|
||||||
|
|
||||||
|
```go
|
||||||
|
sql := "SELECT id, name, email FROM users WHERE id = ?"
|
||||||
|
args := []interface{}{123}
|
||||||
|
|
||||||
|
rows, err := c.Database().Query(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var users []User
|
||||||
|
for _, row := range rows {
|
||||||
|
var user User
|
||||||
|
// Parse row into user struct
|
||||||
|
// (manual parsing required, or use ORM layer)
|
||||||
|
users = append(users, user)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create Table
|
||||||
|
|
||||||
|
```go
|
||||||
|
schema := `CREATE TABLE IF NOT EXISTS users (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
email TEXT UNIQUE NOT NULL,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)`
|
||||||
|
|
||||||
|
_, err := c.Database().Execute(ctx, schema)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Transaction
|
||||||
|
|
||||||
|
```go
|
||||||
|
tx, err := c.Database().Begin(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Execute(ctx, "INSERT INTO users (name) VALUES (?)", "Alice")
|
||||||
|
if err != nil {
|
||||||
|
tx.Rollback(ctx)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Execute(ctx, "INSERT INTO users (name) VALUES (?)", "Bob")
|
||||||
|
if err != nil {
|
||||||
|
tx.Rollback(ctx)
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## PubSub Client
|
||||||
|
|
||||||
|
Publish and subscribe to topics.
|
||||||
|
|
||||||
|
### Publish Message
|
||||||
|
|
||||||
|
```go
|
||||||
|
topic := "chat"
|
||||||
|
message := []byte("Hello, everyone!")
|
||||||
|
|
||||||
|
err := c.PubSub().Publish(ctx, topic, message)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Subscribe to Topic
|
||||||
|
|
||||||
|
```go
|
||||||
|
topic := "chat"
|
||||||
|
handler := func(ctx context.Context, msg []byte) error {
|
||||||
|
fmt.Printf("Received: %s\n", string(msg))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unsubscribe, err := c.PubSub().Subscribe(ctx, topic, handler)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Later: unsubscribe
|
||||||
|
defer unsubscribe()
|
||||||
|
```
|
||||||
|
|
||||||
|
### List Topics
|
||||||
|
|
||||||
|
```go
|
||||||
|
topics, err := c.PubSub().ListTopics(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Topics: %v\n", topics)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Serverless Client
|
||||||
|
|
||||||
|
Deploy and invoke WebAssembly functions.
|
||||||
|
|
||||||
|
### Deploy Function
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Read WASM file
|
||||||
|
wasmBytes, err := os.ReadFile("function.wasm")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function definition
|
||||||
|
def := &client.FunctionDefinition{
|
||||||
|
Name: "hello-world",
|
||||||
|
Namespace: "default",
|
||||||
|
Description: "Hello world function",
|
||||||
|
MemoryLimit: 64, // MB
|
||||||
|
Timeout: 30, // seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deploy
|
||||||
|
fn, err := c.Serverless().Deploy(ctx, def, wasmBytes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Deployed: %s (CID: %s)\n", fn.Name, fn.WASMCID)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Invoke Function
|
||||||
|
|
||||||
|
```go
|
||||||
|
functionName := "hello-world"
|
||||||
|
input := map[string]interface{}{
|
||||||
|
"name": "Alice",
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := c.Serverless().Invoke(ctx, functionName, input)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Result: %s\n", output)
|
||||||
|
```
|
||||||
|
|
||||||
|
### List Functions
|
||||||
|
|
||||||
|
```go
|
||||||
|
functions, err := c.Serverless().List(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, fn := range functions {
|
||||||
|
fmt.Printf("- %s: %s\n", fn.Name, fn.Description)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Delete Function
|
||||||
|
|
||||||
|
```go
|
||||||
|
functionName := "hello-world"
|
||||||
|
err := c.Serverless().Delete(ctx, functionName)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get Function Logs
|
||||||
|
|
||||||
|
```go
|
||||||
|
functionName := "hello-world"
|
||||||
|
logs, err := c.Serverless().GetLogs(ctx, functionName, 100)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, log := range logs {
|
||||||
|
fmt.Printf("[%s] %s: %s\n", log.Timestamp, log.Level, log.Message)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
All client methods return typed errors that can be checked:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/DeBrosOfficial/network/pkg/errors"
|
||||||
|
|
||||||
|
resp, err := c.Storage().Upload(ctx, data, "file.txt")
|
||||||
|
if err != nil {
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
fmt.Println("Resource not found")
|
||||||
|
} else if errors.IsUnauthorized(err) {
|
||||||
|
fmt.Println("Authentication failed")
|
||||||
|
} else if errors.IsValidation(err) {
|
||||||
|
fmt.Println("Validation error")
|
||||||
|
} else {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Usage
|
||||||
|
|
||||||
|
### Custom Timeout
|
||||||
|
|
||||||
|
```go
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := c.Storage().Upload(ctx, data, "file.txt")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Retry Logic
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/DeBrosOfficial/network/pkg/errors"
|
||||||
|
|
||||||
|
maxRetries := 3
|
||||||
|
for i := 0; i < maxRetries; i++ {
|
||||||
|
resp, err := c.Storage().Upload(ctx, data, "file.txt")
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !errors.ShouldRetry(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second * time.Duration(i+1))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multiple Namespaces
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Default namespace
|
||||||
|
c1 := client.NewNetworkClient(cfg)
|
||||||
|
c1.Storage().Upload(ctx, data, "file.txt") // Uses default namespace
|
||||||
|
|
||||||
|
// Override namespace per request
|
||||||
|
opts := &client.StorageUploadOptions{
|
||||||
|
Namespace: "custom-namespace",
|
||||||
|
}
|
||||||
|
c1.Storage().UploadWithOptions(ctx, data, "file.txt", opts)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
### Mock Client
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create a mock client for testing
|
||||||
|
mockClient := &MockNetworkClient{
|
||||||
|
StorageClient: &MockStorageClient{
|
||||||
|
UploadFunc: func(ctx context.Context, data []byte, filename string) (*UploadResponse, error) {
|
||||||
|
return &UploadResponse{CID: "QmMock"}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use in tests
|
||||||
|
resp, err := mockClient.Storage().Upload(ctx, data, "test.txt")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "QmMock", resp.CID)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
See the `examples/` directory for complete examples:
|
||||||
|
|
||||||
|
- `examples/storage/` - Storage upload/download examples
|
||||||
|
- `examples/cache/` - Cache operations
|
||||||
|
- `examples/database/` - Database queries
|
||||||
|
- `examples/pubsub/` - Pub/sub messaging
|
||||||
|
- `examples/serverless/` - Serverless functions
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
Complete API documentation is available at:
|
||||||
|
- GoDoc: https://pkg.go.dev/github.com/DeBrosOfficial/network/pkg/client
|
||||||
|
- OpenAPI: `openapi/gateway.yaml`
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
- GitHub Issues: https://github.com/DeBrosOfficial/network/issues
|
||||||
|
- Documentation: https://github.com/DeBrosOfficial/network/tree/main/docs
|
||||||
217
core/docs/COMMON_PROBLEMS.md
Normal file
217
core/docs/COMMON_PROBLEMS.md
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
# Common Problems & Solutions
|
||||||
|
|
||||||
|
Troubleshooting guide for known issues in the Orama Network.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Namespace Gateway: "Olric unavailable"
|
||||||
|
|
||||||
|
**Symptom:** `ns-<name>.orama-devnet.network/v1/health` returns `"olric": {"status": "unavailable"}`.
|
||||||
|
|
||||||
|
**Cause:** The Olric memberlist gossip between namespace nodes is broken. Olric uses UDP pings for health checks — if those fail, the cluster can't bootstrap and the gateway reports Olric as unavailable.
|
||||||
|
|
||||||
|
### Check 1: WireGuard packet loss between nodes
|
||||||
|
|
||||||
|
SSH into each node and ping the other namespace nodes over WireGuard:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ping -c 10 -W 2 10.0.0.X # replace with the WG IP of each peer
|
||||||
|
```
|
||||||
|
|
||||||
|
If you see packet loss over WireGuard but **not** over the public IP (`ping <public-ip>`), the WireGuard peer session is corrupted.
|
||||||
|
|
||||||
|
**Fix — Reset the WireGuard peer on both sides:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On Node A — replace <pubkey> and <endpoint> with Node B's values
|
||||||
|
wg set wg0 peer <NodeB-pubkey> remove
|
||||||
|
wg set wg0 peer <NodeB-pubkey> endpoint <NodeB-public-ip>:51820 allowed-ips <NodeB-wg-ip>/32 persistent-keepalive 25
|
||||||
|
|
||||||
|
# On Node B — same but with Node A's values
|
||||||
|
wg set wg0 peer <NodeA-pubkey> remove
|
||||||
|
wg set wg0 peer <NodeA-pubkey> endpoint <NodeA-public-ip>:51820 allowed-ips <NodeA-wg-ip>/32 persistent-keepalive 25
|
||||||
|
```
|
||||||
|
|
||||||
|
Then restart services: `sudo orama node restart`
|
||||||
|
|
||||||
|
You can find peer public keys with `wg show wg0`.
|
||||||
|
|
||||||
|
### Check 2: Olric bound to 0.0.0.0 instead of WireGuard IP
|
||||||
|
|
||||||
|
Check the Olric config on each node:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat /opt/orama/.orama/data/namespaces/<name>/configs/olric-*.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
If `bindAddr` is `0.0.0.0`, the node will try to bind to IPv6 on dual-stack hosts, breaking memberlist gossip.
|
||||||
|
|
||||||
|
**Fix:** Edit the YAML to use the node's WireGuard IP (run `ip addr show wg0` to find it), then restart: `sudo orama node restart`
|
||||||
|
|
||||||
|
This was fixed in code (BindAddr validation in `SpawnOlric`), so new namespaces won't have this issue.
|
||||||
|
|
||||||
|
### Check 3: Olric logs show "Failed UDP ping" constantly
|
||||||
|
|
||||||
|
```bash
|
||||||
|
journalctl -u orama-namespace-olric@<name>.service --no-pager -n 30
|
||||||
|
```
|
||||||
|
|
||||||
|
If every UDP ping fails but TCP stream connections succeed, it's the WireGuard packet loss issue (see Check 1).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Namespace Gateway: Missing config fields
|
||||||
|
|
||||||
|
**Symptom:** Gateway config YAML is missing `global_rqlite_dsn`, has `olric_timeout: 0s`, or `olric_servers` only lists `localhost`.
|
||||||
|
|
||||||
|
**Cause:** Before the spawn handler fix, `spawnGatewayRemote()` didn't send `global_rqlite_dsn` or `olric_timeout` to remote nodes.
|
||||||
|
|
||||||
|
**Fix:** Edit the gateway config manually:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim /opt/orama/.orama/data/namespaces/<name>/configs/gateway-*.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Add/fix:
|
||||||
|
```yaml
|
||||||
|
global_rqlite_dsn: "http://10.0.0.X:10001"
|
||||||
|
olric_timeout: 30s
|
||||||
|
olric_servers:
|
||||||
|
- "10.0.0.X:10002"
|
||||||
|
- "10.0.0.Y:10002"
|
||||||
|
- "10.0.0.Z:10002"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then: `sudo orama node restart`
|
||||||
|
|
||||||
|
This was fixed in code, so new namespaces get the correct config.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Namespace not restoring after restart (missing cluster-state.json)
|
||||||
|
|
||||||
|
**Symptom:** After `orama node restart`, the namespace services don't come back because `RestoreLocalClustersFromDisk` has no state file.
|
||||||
|
|
||||||
|
**Check:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls /opt/orama/.orama/data/namespaces/<name>/cluster-state.json
|
||||||
|
```
|
||||||
|
|
||||||
|
If the file doesn't exist, the node can't restore the namespace.
|
||||||
|
|
||||||
|
**Fix:** Create the file manually from another node that has it, or reconstruct it. The format is:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"namespace": "<name>",
|
||||||
|
"rqlite": { "http_port": 10001, "raft_port": 10000, ... },
|
||||||
|
"olric": { "http_port": 10002, "memberlist_port": 10003, ... },
|
||||||
|
"gateway": { "http_port": 10004, ... }
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This was fixed in code — `ProvisionCluster` now saves state to all nodes (including remote ones via the `save-cluster-state` spawn action).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Namespace gateway processes not restarting after upgrade
|
||||||
|
|
||||||
|
**Symptom:** After `orama upgrade --restart` or `orama node restart`, namespace gateway/olric/rqlite services don't start.
|
||||||
|
|
||||||
|
**Cause:** `orama node stop` disables systemd template services (`orama-namespace-gateway@<name>.service`). They have `PartOf=orama-node.service`, but that only propagates restart to **enabled** services.
|
||||||
|
|
||||||
|
**Fix:** Re-enable the services before restarting:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl enable orama-namespace-rqlite@<name>.service
|
||||||
|
systemctl enable orama-namespace-olric@<name>.service
|
||||||
|
systemctl enable orama-namespace-gateway@<name>.service
|
||||||
|
sudo orama node restart
|
||||||
|
```
|
||||||
|
|
||||||
|
This was fixed in code — the upgrade orchestrator now re-enables `@` services before restarting.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. SSH commands eating stdin inside heredocs
|
||||||
|
|
||||||
|
**Symptom:** When running a script that SSHes into multiple nodes inside a heredoc (`<<'EOS'`), only the first SSH command runs — the rest are silently skipped.
|
||||||
|
|
||||||
|
**Cause:** `ssh` reads from stdin, consuming the rest of the heredoc.
|
||||||
|
|
||||||
|
**Fix:** Add `-n` flag to all `ssh` calls inside heredocs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh -n user@host 'command'
|
||||||
|
```
|
||||||
|
|
||||||
|
`scp` is not affected (doesn't read stdin).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. RQLite returns 401 Unauthorized
|
||||||
|
|
||||||
|
**Symptom:** RQLite queries fail with HTTP 401 after security hardening.
|
||||||
|
|
||||||
|
**Cause:** RQLite now requires basic auth. The client isn't sending credentials.
|
||||||
|
|
||||||
|
**Fix:** Ensure the RQLite client is configured with the credentials from `/opt/orama/.orama/secrets/rqlite-auth.json`. The central RQLite client wrapper (`pkg/rqlite/client.go`) handles this automatically. If using a standalone client (e.g., CoreDNS plugin), ensure it's also configured.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Olric cluster split after upgrade
|
||||||
|
|
||||||
|
**Symptom:** Olric nodes can't gossip after enabling memberlist encryption.
|
||||||
|
|
||||||
|
**Cause:** Olric memberlist encryption is all-or-nothing. Nodes with encryption can't communicate with nodes without it.
|
||||||
|
|
||||||
|
**Fix:** All nodes must be restarted simultaneously when enabling Olric encryption. The cache will be lost (it rebuilds from DB). This is expected — Olric is a cache, not persistent storage.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. OramaOS: LUKS unlock fails
|
||||||
|
|
||||||
|
**Symptom:** OramaOS node can't reconstruct its LUKS key after reboot.
|
||||||
|
|
||||||
|
**Cause:** Not enough peer vault-guardians are online to meet the Shamir threshold (K = max(3, N/3)).
|
||||||
|
|
||||||
|
**Fix:** Ensure enough cluster nodes are online and reachable over WireGuard. The agent retries with exponential backoff. For genesis nodes before 5+ peers exist, use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama node unlock --genesis --node-ip <wg-ip>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9. OramaOS: Enrollment timeout
|
||||||
|
|
||||||
|
**Symptom:** `orama node enroll` hangs or times out.
|
||||||
|
|
||||||
|
**Cause:** The OramaOS node's port 9999 isn't reachable, or the Gateway can't reach the node's WebSocket.
|
||||||
|
|
||||||
|
**Fix:** Check that port 9999 is open in your VPS provider's external firewall (Hetzner firewall, AWS security groups, etc.). OramaOS opens it internally, but provider-level firewalls must be configured separately.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10. Binary signature verification fails
|
||||||
|
|
||||||
|
**Symptom:** `orama node install` rejects the binary archive with a signature error.
|
||||||
|
|
||||||
|
**Cause:** The archive was tampered with, or the manifest.sig file is missing/corrupted.
|
||||||
|
|
||||||
|
**Fix:** Rebuild the archive with `orama build` and re-sign with `make sign` (in the orama-os repo). Ensure you're using the rootwallet that matches the embedded signer address.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## General Debugging Tips
|
||||||
|
|
||||||
|
- **Always use `sudo orama node restart`** instead of raw `systemctl` commands
|
||||||
|
- **Namespace data lives at:** `/opt/orama/.orama/data/namespaces/<name>/`
|
||||||
|
- **Check service logs:** `journalctl -u orama-namespace-olric@<name>.service --no-pager -n 50`
|
||||||
|
- **Check WireGuard:** `wg show wg0` — look for recent handshakes and transfer bytes
|
||||||
|
- **Check gateway health:** `curl http://localhost:<port>/v1/health` from the node itself
|
||||||
|
- **Node IPs:** Check `scripts/remote-nodes.conf` for credentials, `wg show wg0` for WG IPs
|
||||||
|
- **OramaOS nodes:** No SSH access — use Gateway API endpoints (`/v1/node/status`, `/v1/node/logs`) for diagnostics
|
||||||
1041
core/docs/DEPLOYMENT_GUIDE.md
Normal file
1041
core/docs/DEPLOYMENT_GUIDE.md
Normal file
File diff suppressed because it is too large
Load Diff
152
core/docs/DEVNET_INSTALL.md
Normal file
152
core/docs/DEVNET_INSTALL.md
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
# Devnet Installation Commands
|
||||||
|
|
||||||
|
This document contains example installation commands for a multi-node devnet cluster.
|
||||||
|
|
||||||
|
**Wallet:** `<YOUR_WALLET_ADDRESS>`
|
||||||
|
**Contact:** `@anon: <YOUR_WALLET_ADDRESS>`
|
||||||
|
|
||||||
|
## Node Configuration
|
||||||
|
|
||||||
|
| Node | Role | Nameserver | Anyone Relay |
|
||||||
|
|------|------|------------|--------------|
|
||||||
|
| ns1 | Genesis | Yes | No |
|
||||||
|
| ns2 | Nameserver | Yes | Yes (relay-1) |
|
||||||
|
| ns3 | Nameserver | Yes | Yes (relay-2) |
|
||||||
|
| node4 | Worker | No | Yes (relay-3) |
|
||||||
|
| node5 | Worker | No | Yes (relay-4) |
|
||||||
|
| node6 | Worker | No | No |
|
||||||
|
|
||||||
|
**Note:** Store credentials securely (not in version control).
|
||||||
|
|
||||||
|
## MyFamily Fingerprints
|
||||||
|
|
||||||
|
If running multiple Anyone relays, configure MyFamily with all your relay fingerprints:
|
||||||
|
```
|
||||||
|
<FINGERPRINT_1>,<FINGERPRINT_2>,<FINGERPRINT_3>,...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation Order
|
||||||
|
|
||||||
|
Install nodes **one at a time**, waiting for each to complete before starting the next:
|
||||||
|
|
||||||
|
1. ns1 (genesis, no Anyone relay)
|
||||||
|
2. ns2 (nameserver + relay)
|
||||||
|
3. ns3 (nameserver + relay)
|
||||||
|
4. node4 (non-nameserver + relay)
|
||||||
|
5. node5 (non-nameserver + relay)
|
||||||
|
6. node6 (non-nameserver, no relay)
|
||||||
|
|
||||||
|
## ns1 - Genesis Node (No Anyone Relay)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH: <user>@<ns1-ip>
|
||||||
|
|
||||||
|
sudo orama node install \
|
||||||
|
--vps-ip <ns1-ip> \
|
||||||
|
--domain <your-domain.com> \
|
||||||
|
--base-domain <your-domain.com> \
|
||||||
|
--nameserver
|
||||||
|
```
|
||||||
|
|
||||||
|
After ns1 is installed, generate invite tokens:
|
||||||
|
```bash
|
||||||
|
sudo orama node invite --expiry 24h
|
||||||
|
```
|
||||||
|
|
||||||
|
## ns2 - Nameserver + Relay
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH: <user>@<ns2-ip>
|
||||||
|
|
||||||
|
sudo orama node install \
|
||||||
|
--join http://<ns1-ip> --token <TOKEN> \
|
||||||
|
--vps-ip <ns2-ip> \
|
||||||
|
--domain <your-domain.com> \
|
||||||
|
--base-domain <your-domain.com> \
|
||||||
|
--nameserver \
|
||||||
|
--anyone-relay --anyone-migrate \
|
||||||
|
--anyone-nickname <relay-name> \
|
||||||
|
--anyone-wallet <wallet-address> \
|
||||||
|
--anyone-contact "<contact-info>" \
|
||||||
|
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||||
|
```
|
||||||
|
|
||||||
|
## ns3 - Nameserver + Relay
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH: <user>@<ns3-ip>
|
||||||
|
|
||||||
|
sudo orama node install \
|
||||||
|
--join http://<ns1-ip> --token <TOKEN> \
|
||||||
|
--vps-ip <ns3-ip> \
|
||||||
|
--domain <your-domain.com> \
|
||||||
|
--base-domain <your-domain.com> \
|
||||||
|
--nameserver \
|
||||||
|
--anyone-relay --anyone-migrate \
|
||||||
|
--anyone-nickname <relay-name> \
|
||||||
|
--anyone-wallet <wallet-address> \
|
||||||
|
--anyone-contact "<contact-info>" \
|
||||||
|
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||||
|
```
|
||||||
|
|
||||||
|
## node4 - Non-Nameserver + Relay
|
||||||
|
|
||||||
|
Domain is auto-generated (e.g., `node-a3f8k2.<your-domain.com>`). No `--domain` flag needed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH: <user>@<node4-ip>
|
||||||
|
|
||||||
|
sudo orama node install \
|
||||||
|
--join http://<ns1-ip> --token <TOKEN> \
|
||||||
|
--vps-ip <node4-ip> \
|
||||||
|
--base-domain <your-domain.com> \
|
||||||
|
--anyone-relay --anyone-migrate \
|
||||||
|
--anyone-nickname <relay-name> \
|
||||||
|
--anyone-wallet <wallet-address> \
|
||||||
|
--anyone-contact "<contact-info>" \
|
||||||
|
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||||
|
```
|
||||||
|
|
||||||
|
## node5 - Non-Nameserver + Relay
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH: <user>@<node5-ip>
|
||||||
|
|
||||||
|
sudo orama node install \
|
||||||
|
--join http://<ns1-ip> --token <TOKEN> \
|
||||||
|
--vps-ip <node5-ip> \
|
||||||
|
--base-domain <your-domain.com> \
|
||||||
|
--anyone-relay --anyone-migrate \
|
||||||
|
--anyone-nickname <relay-name> \
|
||||||
|
--anyone-wallet <wallet-address> \
|
||||||
|
--anyone-contact "<contact-info>" \
|
||||||
|
--anyone-family "<fingerprint1>,<fingerprint2>,..."
|
||||||
|
```
|
||||||
|
|
||||||
|
## node6 - Non-Nameserver (No Anyone Relay)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH: <user>@<node6-ip>
|
||||||
|
|
||||||
|
sudo orama node install \
|
||||||
|
--join http://<ns1-ip> --token <TOKEN> \
|
||||||
|
--vps-ip <node6-ip> \
|
||||||
|
--base-domain <your-domain.com>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
After all nodes are installed, verify cluster health:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Full cluster report (from local machine)
|
||||||
|
./bin/orama monitor report --env devnet
|
||||||
|
|
||||||
|
# Single node health
|
||||||
|
./bin/orama monitor report --env devnet --node <ip>
|
||||||
|
|
||||||
|
# Or manually from any VPS:
|
||||||
|
curl -s http://localhost:5001/status | jq -r '.store.raft.state, .store.raft.num_peers'
|
||||||
|
curl -s http://localhost:6001/health
|
||||||
|
systemctl status orama-anyone-relay
|
||||||
|
```
|
||||||
469
core/docs/DEV_DEPLOY.md
Normal file
469
core/docs/DEV_DEPLOY.md
Normal file
@ -0,0 +1,469 @@
|
|||||||
|
# Development Guide
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Go 1.21+
|
||||||
|
- Node.js 18+ (for anyone-client in dev mode)
|
||||||
|
- macOS or Linux
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build all binaries
|
||||||
|
make build
|
||||||
|
|
||||||
|
# Outputs:
|
||||||
|
# bin/orama-node — the node binary
|
||||||
|
# bin/orama — the CLI
|
||||||
|
# bin/gateway — standalone gateway (optional)
|
||||||
|
# bin/identity — identity tool
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploying to VPS
|
||||||
|
|
||||||
|
All binaries are pre-compiled locally and shipped as a binary archive. Zero compilation on the VPS.
|
||||||
|
|
||||||
|
### Deploy Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# One-command: build + push + rolling upgrade
|
||||||
|
orama node rollout --env testnet
|
||||||
|
|
||||||
|
# Or step by step:
|
||||||
|
|
||||||
|
# 1. Build binary archive (cross-compiles all binaries for linux/amd64)
|
||||||
|
orama build
|
||||||
|
# Creates: /tmp/orama-<version>-linux-amd64.tar.gz
|
||||||
|
|
||||||
|
# 2. Push archive to all nodes (fanout via hub node)
|
||||||
|
orama node push --env testnet
|
||||||
|
|
||||||
|
# 3. Rolling upgrade (one node at a time, followers first, leader last)
|
||||||
|
orama node upgrade --env testnet
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fresh Node Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the archive first (if not already built)
|
||||||
|
orama build
|
||||||
|
|
||||||
|
# Install on a new VPS (auto-uploads binary archive, zero compilation)
|
||||||
|
orama node install --vps-ip <ip> --nameserver --domain <domain> --base-domain <domain>
|
||||||
|
```
|
||||||
|
|
||||||
|
The installer auto-detects the binary archive at `/opt/orama/manifest.json` and copies pre-built binaries instead of compiling from source.
|
||||||
|
|
||||||
|
### Upgrading a Multi-Node Cluster (CRITICAL)
|
||||||
|
|
||||||
|
**NEVER restart all nodes simultaneously.** RQLite uses Raft consensus and requires a majority (quorum) to function.
|
||||||
|
|
||||||
|
#### Safe Upgrade Procedure
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Full rollout (build + push + rolling upgrade, one command)
|
||||||
|
orama node rollout --env testnet
|
||||||
|
|
||||||
|
# Or with more control:
|
||||||
|
orama node push --env testnet # Push archive to all nodes
|
||||||
|
orama node upgrade --env testnet # Rolling upgrade (auto-detects leader)
|
||||||
|
orama node upgrade --env testnet --node 1.2.3.4 # Single node only
|
||||||
|
orama node upgrade --env testnet --delay 60 # 60s between nodes
|
||||||
|
```
|
||||||
|
|
||||||
|
The rolling upgrade automatically:
|
||||||
|
1. Upgrades **follower** nodes first
|
||||||
|
2. Upgrades the **leader** last
|
||||||
|
3. Waits a configurable delay between nodes (default: 30s)
|
||||||
|
|
||||||
|
After each node, verify health:
|
||||||
|
```bash
|
||||||
|
orama monitor report --env testnet
|
||||||
|
```
|
||||||
|
|
||||||
|
#### What NOT to Do
|
||||||
|
|
||||||
|
- **DON'T** stop all nodes, replace binaries, then start all nodes
|
||||||
|
- **DON'T** run `orama node upgrade --restart` on multiple nodes in parallel
|
||||||
|
- **DON'T** clear RQLite data directories unless doing a full cluster rebuild
|
||||||
|
- **DON'T** use `systemctl stop orama-node` on multiple nodes simultaneously
|
||||||
|
|
||||||
|
#### Recovery from Cluster Split
|
||||||
|
|
||||||
|
If nodes get stuck in "Candidate" state or show "leader not found" errors:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Recover the Raft cluster (specify the node with highest commit index as leader)
|
||||||
|
orama node recover-raft --env testnet --leader 1.2.3.4
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
1. Stop orama-node on ALL nodes
|
||||||
|
2. Backup + delete raft/ on non-leader nodes
|
||||||
|
3. Start the leader, wait for Leader state
|
||||||
|
4. Start remaining nodes in batches
|
||||||
|
5. Verify cluster health
|
||||||
|
|
||||||
|
### Cleaning Nodes for Reinstallation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Wipe all data and services (preserves Anyone relay keys)
|
||||||
|
orama node clean --env testnet --force
|
||||||
|
|
||||||
|
# Also remove shared binaries (rqlited, ipfs, caddy, etc.)
|
||||||
|
orama node clean --env testnet --nuclear --force
|
||||||
|
|
||||||
|
# Single node only
|
||||||
|
orama node clean --env testnet --node 1.2.3.4 --force
|
||||||
|
```
|
||||||
|
|
||||||
|
### Push Options
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama node push --env devnet # Fanout via hub (default, fastest)
|
||||||
|
orama node push --env testnet --node 1.2.3.4 # Single node
|
||||||
|
orama node push --env testnet --direct # Sequential, no fanout
|
||||||
|
```
|
||||||
|
|
||||||
|
### CLI Flags Reference
|
||||||
|
|
||||||
|
#### `orama node install`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--vps-ip <ip>` | VPS public IP address (required) |
|
||||||
|
| `--domain <domain>` | Domain for HTTPS certificates. Required for nameserver nodes (use the base domain, e.g., `example.com`). Auto-generated for non-nameserver nodes if omitted (e.g., `node-a3f8k2.example.com`) |
|
||||||
|
| `--base-domain <domain>` | Base domain for deployment routing (e.g., example.com) |
|
||||||
|
| `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) |
|
||||||
|
| `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) |
|
||||||
|
| `--token <token>` | Invite token for joining (from `orama node invite` on existing node) |
|
||||||
|
| `--force` | Force reconfiguration even if already installed |
|
||||||
|
| `--skip-firewall` | Skip UFW firewall setup |
|
||||||
|
| `--skip-checks` | Skip minimum resource checks (RAM/CPU) |
|
||||||
|
| `--anyone-relay` | Install and configure an Anyone relay on this node |
|
||||||
|
| `--anyone-migrate` | Migrate existing Anyone relay installation (preserves keys/fingerprint) |
|
||||||
|
| `--anyone-nickname <name>` | Relay nickname (required for relay mode) |
|
||||||
|
| `--anyone-wallet <addr>` | Ethereum wallet for relay rewards (required for relay mode) |
|
||||||
|
| `--anyone-contact <info>` | Contact info for relay (required for relay mode) |
|
||||||
|
| `--anyone-family <fps>` | Comma-separated fingerprints of related relays (MyFamily) |
|
||||||
|
| `--anyone-orport <port>` | ORPort for relay (default: 9001) |
|
||||||
|
| `--anyone-exit` | Configure as an exit relay (default: non-exit) |
|
||||||
|
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited). Runs a speedtest during install to measure available bandwidth |
|
||||||
|
| `--anyone-accounting <GB>` | Monthly data cap for relay in GB (0=unlimited) |
|
||||||
|
|
||||||
|
#### `orama node invite`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--expiry <duration>` | Token expiry duration (default: 1h, e.g. `--expiry 24h`) |
|
||||||
|
|
||||||
|
**Important notes about invite tokens:**
|
||||||
|
|
||||||
|
- **Tokens are single-use.** Once a node consumes a token during the join handshake, it cannot be reused. Generate a separate token for each node you want to join.
|
||||||
|
- **Expiry is checked in UTC.** RQLite uses `datetime('now')` which is always UTC. If your local timezone differs, account for the offset when choosing expiry durations.
|
||||||
|
- **Use longer expiry for multi-node deployments.** When deploying multiple nodes, use `--expiry 24h` to avoid tokens expiring mid-deployment.
|
||||||
|
|
||||||
|
#### `orama node upgrade`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--restart` | Restart all services after upgrade (local mode) |
|
||||||
|
| `--env <env>` | Target environment for remote rolling upgrade |
|
||||||
|
| `--node <ip>` | Upgrade a single node only |
|
||||||
|
| `--delay <seconds>` | Delay between nodes during rolling upgrade (default: 30) |
|
||||||
|
| `--anyone-relay` | Enable Anyone relay (same flags as install) |
|
||||||
|
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited) |
|
||||||
|
| `--anyone-accounting <GB>` | Monthly data cap for relay in GB (0=unlimited) |
|
||||||
|
|
||||||
|
#### `orama build`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--arch <arch>` | Target architecture (default: amd64) |
|
||||||
|
| `--output <path>` | Output archive path |
|
||||||
|
| `--verbose` | Verbose build output |
|
||||||
|
|
||||||
|
#### `orama node push`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--env <env>` | Target environment (required) |
|
||||||
|
| `--node <ip>` | Push to a single node only |
|
||||||
|
| `--direct` | Sequential upload (no hub fanout) |
|
||||||
|
|
||||||
|
#### `orama node rollout`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--env <env>` | Target environment (required) |
|
||||||
|
| `--no-build` | Skip the build step |
|
||||||
|
| `--yes` | Skip confirmation |
|
||||||
|
| `--delay <seconds>` | Delay between nodes (default: 30) |
|
||||||
|
|
||||||
|
#### `orama node clean`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--env <env>` | Target environment (required) |
|
||||||
|
| `--node <ip>` | Clean a single node only |
|
||||||
|
| `--nuclear` | Also remove shared binaries |
|
||||||
|
| `--force` | Skip confirmation (DESTRUCTIVE) |
|
||||||
|
|
||||||
|
#### `orama node recover-raft`
|
||||||
|
|
||||||
|
| Flag | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `--env <env>` | Target environment (required) |
|
||||||
|
| `--leader <ip>` | Leader node IP — highest commit index (required) |
|
||||||
|
| `--force` | Skip confirmation (DESTRUCTIVE) |
|
||||||
|
|
||||||
|
#### `orama node` (Service Management)
|
||||||
|
|
||||||
|
Use these commands to manage services on production nodes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop all services (orama-node, coredns, caddy)
|
||||||
|
sudo orama node stop
|
||||||
|
|
||||||
|
# Start all services
|
||||||
|
sudo orama node start
|
||||||
|
|
||||||
|
# Restart all services
|
||||||
|
sudo orama node restart
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
sudo orama node status
|
||||||
|
|
||||||
|
# Diagnose common issues
|
||||||
|
sudo orama node doctor
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Always use `orama node stop` instead of manually running `systemctl stop`. The CLI ensures all related services (including CoreDNS and Caddy on nameserver nodes) are handled correctly.
|
||||||
|
|
||||||
|
#### `orama node report`
|
||||||
|
|
||||||
|
Outputs comprehensive health data as JSON. Used by `orama monitor` over SSH:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo orama node report --json
|
||||||
|
```
|
||||||
|
|
||||||
|
See [MONITORING.md](MONITORING.md) for full details.
|
||||||
|
|
||||||
|
#### `orama monitor`
|
||||||
|
|
||||||
|
Real-time cluster monitoring from your local machine:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Interactive TUI
|
||||||
|
orama monitor --env testnet
|
||||||
|
|
||||||
|
# Cluster overview
|
||||||
|
orama monitor cluster --env testnet
|
||||||
|
|
||||||
|
# Alerts only
|
||||||
|
orama monitor alerts --env testnet
|
||||||
|
|
||||||
|
# Full JSON for LLM analysis
|
||||||
|
orama monitor report --env testnet
|
||||||
|
```
|
||||||
|
|
||||||
|
See [MONITORING.md](MONITORING.md) for all subcommands and flags.
|
||||||
|
|
||||||
|
### Node Join Flow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Genesis node (first node, creates cluster)
|
||||||
|
# Nameserver nodes use the base domain as --domain
|
||||||
|
sudo orama node install --vps-ip 1.2.3.4 --domain example.com \
|
||||||
|
--base-domain example.com --nameserver
|
||||||
|
|
||||||
|
# 2. On genesis node, generate an invite
|
||||||
|
orama node invite --expiry 24h
|
||||||
|
# Output: sudo orama node install --join https://example.com --token <TOKEN> --vps-ip <IP>
|
||||||
|
|
||||||
|
# 3a. Join as nameserver (requires --domain set to base domain)
|
||||||
|
sudo orama node install --join http://1.2.3.4 --token abc123... \
|
||||||
|
--vps-ip 5.6.7.8 --domain example.com --base-domain example.com --nameserver
|
||||||
|
|
||||||
|
# 3b. Join as regular node (domain auto-generated, no --domain needed)
|
||||||
|
sudo orama node install --join http://1.2.3.4 --token abc123... \
|
||||||
|
--vps-ip 5.6.7.8 --base-domain example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
The join flow establishes a WireGuard VPN tunnel before starting cluster services.
|
||||||
|
All inter-node communication (RQLite, IPFS, Olric) uses WireGuard IPs (10.0.0.x).
|
||||||
|
No cluster ports are ever exposed publicly.
|
||||||
|
|
||||||
|
#### DNS Prerequisite
|
||||||
|
|
||||||
|
The `--join` URL should use the HTTPS domain of the genesis node (e.g., `https://node1.example.com`).
|
||||||
|
For this to work, the domain registrar for `example.com` must have NS records pointing to the genesis
|
||||||
|
node's IP so that `node1.example.com` resolves publicly.
|
||||||
|
|
||||||
|
**If DNS is not yet configured**, you can use the genesis node's public IP with HTTP as a fallback:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo orama node install --join http://1.2.3.4 --vps-ip 5.6.7.8 --token abc123... --nameserver
|
||||||
|
```
|
||||||
|
|
||||||
|
This works because Caddy's `:80` block proxies all HTTP traffic to the gateway. However, once DNS
|
||||||
|
is properly configured, always use the HTTPS domain URL.
|
||||||
|
|
||||||
|
**Important:** Never use `http://<ip>:6001` — port 6001 is the internal gateway and is blocked by
|
||||||
|
UFW from external access. The join request goes through Caddy on port 80 (HTTP) or 443 (HTTPS),
|
||||||
|
which proxies to the gateway internally.
|
||||||
|
|
||||||
|
## OramaOS Enrollment
|
||||||
|
|
||||||
|
For OramaOS nodes (mainnet, devnet, testnet), use the enrollment flow instead of `orama node install`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Flash OramaOS image to VPS (via provider dashboard)
|
||||||
|
# 2. Generate invite token on existing cluster node
|
||||||
|
orama node invite --expiry 24h
|
||||||
|
|
||||||
|
# 3. Enroll the OramaOS node
|
||||||
|
orama node enroll --node-ip <vps-public-ip> --token <invite-token> --gateway <gateway-url>
|
||||||
|
|
||||||
|
# 4. For genesis node reboots (before 5+ peers exist)
|
||||||
|
orama node unlock --genesis --node-ip <wg-ip>
|
||||||
|
```
|
||||||
|
|
||||||
|
OramaOS nodes have no SSH access. All management happens through the Gateway API:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Status, logs, commands — all via Gateway proxy
|
||||||
|
curl "https://gateway.example.com/v1/node/status?node_id=<id>"
|
||||||
|
curl "https://gateway.example.com/v1/node/logs?node_id=<id>&service=gateway"
|
||||||
|
```
|
||||||
|
|
||||||
|
See [ORAMAOS_DEPLOYMENT.md](ORAMAOS_DEPLOYMENT.md) for the full guide.
|
||||||
|
|
||||||
|
**Note:** `orama node clean` does not work on OramaOS nodes (no SSH). Use `orama node leave` for graceful departure, or reflash the image for a factory reset.
|
||||||
|
|
||||||
|
## Pre-Install Checklist (Ubuntu Only)
|
||||||
|
|
||||||
|
Before running `orama node install` on a VPS, ensure:
|
||||||
|
|
||||||
|
1. **Stop Docker if running.** Docker commonly binds ports 4001 and 8080 which conflict with IPFS. The installer checks for port conflicts and shows which process is using each port, but it's easier to stop Docker first:
|
||||||
|
```bash
|
||||||
|
sudo systemctl stop docker docker.socket
|
||||||
|
sudo systemctl disable docker docker.socket
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Stop any existing IPFS instance.**
|
||||||
|
```bash
|
||||||
|
sudo systemctl stop ipfs
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Stop any service on port 53** (for nameserver nodes). The installer handles `systemd-resolved` automatically, but other DNS services (like `bind9` or `dnsmasq`) must be stopped manually.
|
||||||
|
|
||||||
|
## Recovering from Failed Joins
|
||||||
|
|
||||||
|
If a node partially joins the cluster (registers in RQLite's Raft but then fails or gets cleaned), the remaining cluster can lose quorum permanently. This happens because RQLite thinks there are N voters but only N-1 are reachable.
|
||||||
|
|
||||||
|
**Symptoms:** RQLite stuck in "Candidate" state, no leader elected, all writes fail.
|
||||||
|
|
||||||
|
**Solution:** Do a full clean reinstall of all affected nodes. Use [CLEAN_NODE.md](CLEAN_NODE.md) to reset each node, then reinstall starting from the genesis node.
|
||||||
|
|
||||||
|
**Prevention:** Always ensure a joining node can complete the full installation before it joins. The installer validates port availability upfront to catch conflicts early.
|
||||||
|
|
||||||
|
## Debugging Production Issues
|
||||||
|
|
||||||
|
Always follow the local-first approach:
|
||||||
|
|
||||||
|
1. **Reproduce locally** — set up the same conditions on your machine
|
||||||
|
2. **Find the root cause** — understand why it's happening
|
||||||
|
3. **Fix in the codebase** — make changes to the source code
|
||||||
|
4. **Test locally** — run `make test` and verify
|
||||||
|
5. **Deploy** — only then deploy the fix to production
|
||||||
|
|
||||||
|
Never fix issues directly on the server — those fixes are lost on next deployment.
|
||||||
|
|
||||||
|
## Trusting the Self-Signed TLS Certificate
|
||||||
|
|
||||||
|
When Let's Encrypt is rate-limited, Caddy falls back to its internal CA (self-signed certificates). Browsers will show security warnings unless you install the root CA certificate.
|
||||||
|
|
||||||
|
### Downloading the Root CA Certificate
|
||||||
|
|
||||||
|
From VPS 1 (or any node), copy the certificate:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy the cert to an accessible location on the VPS
|
||||||
|
ssh ubuntu@<VPS_IP> "sudo cp /var/lib/caddy/.local/share/caddy/pki/authorities/local/root.crt /tmp/caddy-root-ca.crt && sudo chmod 644 /tmp/caddy-root-ca.crt"
|
||||||
|
|
||||||
|
# Download to your local machine
|
||||||
|
scp ubuntu@<VPS_IP>:/tmp/caddy-root-ca.crt ~/Downloads/caddy-root-ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
### macOS
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ~/Downloads/caddy-root-ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
This adds the cert system-wide. All browsers (Safari, Chrome, Arc, etc.) will trust it immediately. Firefox uses its own certificate store — go to **Settings > Privacy & Security > Certificates > View Certificates > Import** and import the `.crt` file there.
|
||||||
|
|
||||||
|
To remove it later:
|
||||||
|
```bash
|
||||||
|
sudo security remove-trusted-cert -d ~/Downloads/caddy-root-ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
### iOS (iPhone/iPad)
|
||||||
|
|
||||||
|
1. Transfer `caddy-root-ca.crt` to your device (AirDrop, email attachment, or host it on a URL)
|
||||||
|
2. Open the file — iOS will show "Profile Downloaded"
|
||||||
|
3. Go to **Settings > General > VPN & Device Management** (or "Profiles" on older iOS)
|
||||||
|
4. Tap the "Caddy Local Authority" profile and tap **Install**
|
||||||
|
5. Go to **Settings > General > About > Certificate Trust Settings**
|
||||||
|
6. Enable **full trust** for "Caddy Local Authority - 2026 ECC Root"
|
||||||
|
|
||||||
|
### Android
|
||||||
|
|
||||||
|
1. Transfer `caddy-root-ca.crt` to your device
|
||||||
|
2. Go to **Settings > Security > Encryption & Credentials > Install a certificate > CA certificate**
|
||||||
|
3. Select the `caddy-root-ca.crt` file
|
||||||
|
4. Confirm the installation
|
||||||
|
|
||||||
|
Note: On Android 7+, user-installed CA certificates are only trusted by apps that explicitly opt in. Chrome will trust it, but some apps may not.
|
||||||
|
|
||||||
|
### Windows
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
certutil -addstore -f "ROOT" caddy-root-ca.crt
|
||||||
|
```
|
||||||
|
|
||||||
|
Or double-click the `.crt` file > **Install Certificate** > **Local Machine** > **Place in "Trusted Root Certification Authorities"**.
|
||||||
|
|
||||||
|
### Linux
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo cp caddy-root-ca.crt /usr/local/share/ca-certificates/caddy-root-ca.crt
|
||||||
|
sudo update-ca-certificates
|
||||||
|
```
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
See [ARCHITECTURE.md](ARCHITECTURE.md) for the full architecture overview.
|
||||||
|
|
||||||
|
Key directories:
|
||||||
|
|
||||||
|
```
|
||||||
|
cmd/
|
||||||
|
cli/ — CLI entry point (orama command)
|
||||||
|
node/ — Node entry point (orama-node)
|
||||||
|
gateway/ — Standalone gateway entry point
|
||||||
|
pkg/
|
||||||
|
cli/ — CLI command implementations
|
||||||
|
gateway/ — HTTP gateway, routes, middleware
|
||||||
|
deployments/ — Deployment types, service, storage
|
||||||
|
environments/ — Production (systemd) and development (direct) modes
|
||||||
|
rqlite/ — Distributed SQLite via RQLite
|
||||||
|
```
|
||||||
213
core/docs/INSPECTOR.md
Normal file
213
core/docs/INSPECTOR.md
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
# Inspector
|
||||||
|
|
||||||
|
The inspector is a cluster health check tool that SSHs into every node, collects subsystem data in parallel, runs deterministic checks, and optionally sends failures to an AI model for root-cause analysis.
|
||||||
|
|
||||||
|
## Pipeline
|
||||||
|
|
||||||
|
```
|
||||||
|
Collect (parallel SSH) → Check (deterministic Go) → Report (table/JSON) → Analyze (optional AI)
|
||||||
|
```
|
||||||
|
|
||||||
|
1. **Collect** — SSH into every node in parallel, run diagnostic commands, parse results into structured data.
|
||||||
|
2. **Check** — Run pure Go check functions against the collected data. Each check produces a pass/fail/warn/skip result with a severity level.
|
||||||
|
3. **Report** — Print results as a table (default) or JSON. Failures sort first, grouped by subsystem.
|
||||||
|
4. **Analyze** — If `--ai` is enabled and there are failures or warnings, send them to an LLM via OpenRouter for root-cause analysis.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Inspect all subsystems on devnet
|
||||||
|
orama inspect --env devnet
|
||||||
|
|
||||||
|
# Inspect only RQLite
|
||||||
|
orama inspect --env devnet --subsystem rqlite
|
||||||
|
|
||||||
|
# JSON output
|
||||||
|
orama inspect --env devnet --format json
|
||||||
|
|
||||||
|
# With AI analysis
|
||||||
|
orama inspect --env devnet --ai
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
orama inspect [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
| Flag | Default | Description |
|
||||||
|
|------|---------|-------------|
|
||||||
|
| `--config` | `scripts/remote-nodes.conf` | Path to node configuration file |
|
||||||
|
| `--env` | *(required)* | Environment to inspect (`devnet`, `testnet`) |
|
||||||
|
| `--subsystem` | `all` | Comma-separated subsystems to inspect |
|
||||||
|
| `--format` | `table` | Output format: `table` or `json` |
|
||||||
|
| `--timeout` | `30s` | SSH command timeout per node |
|
||||||
|
| `--verbose` | `false` | Print collection progress |
|
||||||
|
| `--ai` | `false` | Enable AI analysis of failures |
|
||||||
|
| `--model` | `moonshotai/kimi-k2.5` | OpenRouter model for AI analysis |
|
||||||
|
| `--api-key` | `$OPENROUTER_API_KEY` | OpenRouter API key |
|
||||||
|
|
||||||
|
### Subsystem Names
|
||||||
|
|
||||||
|
`rqlite`, `olric`, `ipfs`, `dns`, `wireguard` (alias: `wg`), `system`, `network`, `namespace`
|
||||||
|
|
||||||
|
Multiple subsystems can be combined: `--subsystem rqlite,olric,dns`
|
||||||
|
|
||||||
|
## Subsystems
|
||||||
|
|
||||||
|
| Subsystem | What It Checks |
|
||||||
|
|-----------|---------------|
|
||||||
|
| **rqlite** | Raft state, leader election, readyz, commit/applied gap, FSM pending, strong reads, debug vars (query errors, leader_not_found, snapshots), cross-node leader agreement, term consistency, applied index convergence, quorum, version match |
|
||||||
|
| **olric** | Service active, memberlist up, restart count, memory usage, log analysis (suspects, flapping, errors), cross-node memberlist consistency |
|
||||||
|
| **ipfs** | Daemon active, cluster active, swarm peer count, cluster peer count, cluster errors, repo usage %, swarm key present, bootstrap list empty, cross-node version consistency |
|
||||||
|
| **dns** | CoreDNS active, Caddy active, ports (53/80/443), memory, restart count, log errors, Corefile exists, SOA/NS/wildcard/base-A resolution, TLS cert expiry, cross-node nameserver availability |
|
||||||
|
| **wireguard** | Interface up, service active, correct 10.0.0.x IP, listen port 51820, peer count vs expected, MTU 1420, config exists + permissions 600, peer handshakes (fresh/stale/never), peer traffic, catch-all route detection, cross-node peer count + MTU consistency |
|
||||||
|
| **system** | Core services (orama-node, rqlite, olric, ipfs, ipfs-cluster, wg-quick), nameserver services (coredns, caddy), failed systemd units, memory/disk/inode usage, load average, OOM kills, swap, UFW active, process user (orama), panic count, expected ports |
|
||||||
|
| **network** | Internet reachability, default route, WireGuard route, TCP connection count, TIME_WAIT count, TCP retransmission rate, WireGuard mesh ping (all peers) |
|
||||||
|
| **namespace** | Per-namespace: RQLite up + raft state + readyz, Olric memberlist, Gateway HTTP health. Cross-namespace: all-healthy check, RQLite quorum per namespace |
|
||||||
|
|
||||||
|
## Severity Levels
|
||||||
|
|
||||||
|
| Level | When Used |
|
||||||
|
|-------|-----------|
|
||||||
|
| **CRITICAL** | Service completely down. Raft quorum lost, RQLite unresponsive, no leader. |
|
||||||
|
| **HIGH** | Service degraded. Olric down, gateway not responding, IPFS swarm key missing. |
|
||||||
|
| **MEDIUM** | Non-ideal but functional. Stale handshakes, elevated memory, log suspects. |
|
||||||
|
| **LOW** | Informational. Non-standard MTU, port mismatch, version skew. |
|
||||||
|
|
||||||
|
## Check Statuses
|
||||||
|
|
||||||
|
| Status | Meaning |
|
||||||
|
|--------|---------|
|
||||||
|
| **pass** | Check passed. |
|
||||||
|
| **fail** | Check failed — action needed. |
|
||||||
|
| **warn** | Degraded — monitor or investigate. |
|
||||||
|
| **skip** | Check could not run (insufficient data). |
|
||||||
|
|
||||||
|
## Output Formats
|
||||||
|
|
||||||
|
### Table (default)
|
||||||
|
|
||||||
|
```
|
||||||
|
Inspecting 14 devnet nodes...
|
||||||
|
|
||||||
|
## RQLITE
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
OK [CRITICAL] RQLite responding (ubuntu@10.0.0.1)
|
||||||
|
responsive=true version=v8.36.16
|
||||||
|
FAIL [CRITICAL] Cluster has exactly one leader
|
||||||
|
leaders=0 (NO LEADER)
|
||||||
|
...
|
||||||
|
|
||||||
|
======================================================================
|
||||||
|
Summary: 800 passed, 12 failed, 31 warnings, 0 skipped (4.2s)
|
||||||
|
```
|
||||||
|
|
||||||
|
Failures sort first, then warnings, then passes. Within each group, higher severity checks appear first.
|
||||||
|
|
||||||
|
### JSON (`--format json`)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"summary": {
|
||||||
|
"passed": 800,
|
||||||
|
"failed": 12,
|
||||||
|
"warned": 31,
|
||||||
|
"skipped": 0,
|
||||||
|
"total": 843,
|
||||||
|
"duration_seconds": 4.2
|
||||||
|
},
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"id": "rqlite.responsive",
|
||||||
|
"name": "RQLite responding",
|
||||||
|
"subsystem": "rqlite",
|
||||||
|
"severity": 3,
|
||||||
|
"status": "pass",
|
||||||
|
"message": "responsive=true version=v8.36.16",
|
||||||
|
"node": "ubuntu@10.0.0.1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## AI Analysis
|
||||||
|
|
||||||
|
When `--ai` is enabled, failures and warnings are sent to an LLM via OpenRouter for root-cause analysis.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use default model (kimi-k2.5)
|
||||||
|
orama inspect --env devnet --ai
|
||||||
|
|
||||||
|
# Use a different model
|
||||||
|
orama inspect --env devnet --ai --model openai/gpt-4o
|
||||||
|
|
||||||
|
# Pass API key directly
|
||||||
|
orama inspect --env devnet --ai --api-key sk-or-...
|
||||||
|
```
|
||||||
|
|
||||||
|
The API key can be set via:
|
||||||
|
1. `--api-key` flag
|
||||||
|
2. `OPENROUTER_API_KEY` environment variable
|
||||||
|
3. `.env` file in the current directory
|
||||||
|
|
||||||
|
The AI receives the full check results plus cluster metadata and returns a structured analysis with likely root causes and suggested fixes.
|
||||||
|
|
||||||
|
## Exit Codes
|
||||||
|
|
||||||
|
| Code | Meaning |
|
||||||
|
|------|---------|
|
||||||
|
| `0` | All checks passed (or only warnings). |
|
||||||
|
| `1` | At least one check failed. |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
The inspector reads node definitions from a pipe-delimited config file (default: `scripts/remote-nodes.conf`).
|
||||||
|
|
||||||
|
### Format
|
||||||
|
|
||||||
|
```
|
||||||
|
# environment|user@host|role
|
||||||
|
devnet|ubuntu@1.2.3.4|node
|
||||||
|
devnet|ubuntu@5.6.7.8|nameserver-ns1
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| `environment` | Cluster name (`devnet`, `testnet`) |
|
||||||
|
| `user@host` | SSH credentials |
|
||||||
|
| `role` | `node` or `nameserver-ns1`, `nameserver-ns2`, etc. |
|
||||||
|
|
||||||
|
SSH keys are resolved from rootwallet (`rw vault ssh get <host>/<user> --priv`).
|
||||||
|
|
||||||
|
Blank lines and lines starting with `#` are ignored.
|
||||||
|
|
||||||
|
### Node Roles
|
||||||
|
|
||||||
|
- **`node`** — Regular cluster node. Runs RQLite, Olric, IPFS, WireGuard, namespaces.
|
||||||
|
- **`nameserver-*`** — DNS nameserver. Runs CoreDNS + Caddy in addition to base services. System checks verify nameserver-specific services.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Full cluster inspection
|
||||||
|
orama inspect --env devnet
|
||||||
|
|
||||||
|
# Check only networking
|
||||||
|
orama inspect --env devnet --subsystem wireguard,network
|
||||||
|
|
||||||
|
# Quick RQLite health check
|
||||||
|
orama inspect --env devnet --subsystem rqlite
|
||||||
|
|
||||||
|
# Verbose mode (shows collection progress)
|
||||||
|
orama inspect --env devnet --verbose
|
||||||
|
|
||||||
|
# JSON for scripting / piping
|
||||||
|
orama inspect --env devnet --format json | jq '.checks[] | select(.status == "fail")'
|
||||||
|
|
||||||
|
# AI-assisted debugging
|
||||||
|
orama inspect --env devnet --ai --model anthropic/claude-sonnet-4
|
||||||
|
|
||||||
|
# Custom config file
|
||||||
|
orama inspect --config /path/to/nodes.conf --env testnet
|
||||||
|
```
|
||||||
278
core/docs/MONITORING.md
Normal file
278
core/docs/MONITORING.md
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
# Monitoring
|
||||||
|
|
||||||
|
Real-time cluster health monitoring via SSH. The system has two parts:
|
||||||
|
|
||||||
|
1. **`orama node report`** — Runs on each VPS node, collects all local health data, outputs JSON
|
||||||
|
2. **`orama monitor`** — Runs on your local machine, SSHes into nodes, aggregates results, displays via TUI or tables
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
Developer Machine VPS Nodes (via SSH)
|
||||||
|
┌──────────────────┐ ┌────────────────────┐
|
||||||
|
│ orama monitor │ ──SSH──────────>│ orama node report │
|
||||||
|
│ (TUI / tables) │ <──JSON─────── │ (local collector) │
|
||||||
|
│ │ └────────────────────┘
|
||||||
|
│ CollectOnce() │ ──SSH──────────>│ orama node report │
|
||||||
|
│ DeriveAlerts() │ <──JSON─────── │ (local collector) │
|
||||||
|
│ Render() │ └────────────────────┘
|
||||||
|
└──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Each node runs `orama node report --json` locally (no SSH to other nodes), collecting data via `os/exec` and `net/http` to localhost services. The monitor SSHes into all nodes in parallel, collects reports, then runs cross-node analysis to detect cluster-wide issues.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Interactive TUI (auto-refreshes every 30s)
|
||||||
|
orama monitor --env testnet
|
||||||
|
|
||||||
|
# Cluster overview table
|
||||||
|
orama monitor cluster --env testnet
|
||||||
|
|
||||||
|
# Alerts only
|
||||||
|
orama monitor alerts --env testnet
|
||||||
|
|
||||||
|
# Full JSON report (pipe to jq or feed to LLM)
|
||||||
|
orama monitor report --env testnet
|
||||||
|
```
|
||||||
|
|
||||||
|
## `orama monitor` — Local Orchestrator
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
orama monitor [subcommand] --env <environment> [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
Without a subcommand, launches the interactive TUI.
|
||||||
|
|
||||||
|
### Global Flags
|
||||||
|
|
||||||
|
| Flag | Default | Description |
|
||||||
|
|------|---------|-------------|
|
||||||
|
| `--env` | *(required)* | Environment: `devnet`, `testnet`, `mainnet` |
|
||||||
|
| `--json` | `false` | Machine-readable JSON output (for one-shot subcommands) |
|
||||||
|
| `--node` | | Filter to a specific node host/IP |
|
||||||
|
| `--config` | `scripts/remote-nodes.conf` | Path to node configuration file |
|
||||||
|
|
||||||
|
### Subcommands
|
||||||
|
|
||||||
|
| Subcommand | Description |
|
||||||
|
|------------|-------------|
|
||||||
|
| `live` | Interactive TUI monitor (default when no subcommand) |
|
||||||
|
| `cluster` | Cluster overview: all nodes, roles, RQLite state, WG peers |
|
||||||
|
| `node` | Per-node health details (system, services, WG, DNS) |
|
||||||
|
| `service` | Service status matrix across all nodes |
|
||||||
|
| `mesh` | WireGuard mesh connectivity and peer details |
|
||||||
|
| `dns` | DNS health: CoreDNS, Caddy, TLS cert expiry, resolution |
|
||||||
|
| `namespaces` | Namespace health across nodes |
|
||||||
|
| `alerts` | Active alerts and warnings sorted by severity |
|
||||||
|
| `report` | Full JSON dump optimized for LLM consumption |
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Cluster overview
|
||||||
|
orama monitor cluster --env testnet
|
||||||
|
|
||||||
|
# Cluster overview as JSON
|
||||||
|
orama monitor cluster --env testnet --json
|
||||||
|
|
||||||
|
# Alerts for all nodes
|
||||||
|
orama monitor alerts --env testnet
|
||||||
|
|
||||||
|
# Single-node deep dive
|
||||||
|
orama monitor node --env testnet --node 51.195.109.238
|
||||||
|
|
||||||
|
# Services for one node
|
||||||
|
orama monitor service --env testnet --node 51.195.109.238
|
||||||
|
|
||||||
|
# WireGuard mesh details
|
||||||
|
orama monitor mesh --env testnet
|
||||||
|
|
||||||
|
# DNS health
|
||||||
|
orama monitor dns --env testnet
|
||||||
|
|
||||||
|
# Namespace health
|
||||||
|
orama monitor namespaces --env testnet
|
||||||
|
|
||||||
|
# Full report for LLM analysis
|
||||||
|
orama monitor report --env testnet | jq .
|
||||||
|
|
||||||
|
# Single-node report
|
||||||
|
orama monitor report --env testnet --node 51.195.109.238
|
||||||
|
|
||||||
|
# Custom config file
|
||||||
|
orama monitor cluster --config /path/to/nodes.conf --env devnet
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interactive TUI
|
||||||
|
|
||||||
|
The `live` subcommand (default) launches a full-screen terminal UI:
|
||||||
|
|
||||||
|
**Tabs:** Overview | Nodes | Services | WG Mesh | DNS | Namespaces | Alerts
|
||||||
|
|
||||||
|
**Key Bindings:**
|
||||||
|
|
||||||
|
| Key | Action |
|
||||||
|
|-----|--------|
|
||||||
|
| `Tab` / `Shift+Tab` | Switch tabs |
|
||||||
|
| `j` / `k` or `↑` / `↓` | Scroll content |
|
||||||
|
| `r` | Force refresh |
|
||||||
|
| `q` / `Ctrl+C` | Quit |
|
||||||
|
|
||||||
|
The TUI auto-refreshes every 30 seconds. A spinner shows during data collection. Colors indicate health: green = healthy, red = critical, yellow = warning.
|
||||||
|
|
||||||
|
### LLM Report Format
|
||||||
|
|
||||||
|
`orama monitor report` outputs structured JSON designed for AI consumption:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"meta": {
|
||||||
|
"environment": "testnet",
|
||||||
|
"collected_at": "2026-02-16T12:00:00Z",
|
||||||
|
"duration_seconds": 3.2,
|
||||||
|
"node_count": 3,
|
||||||
|
"healthy_count": 3
|
||||||
|
},
|
||||||
|
"summary": {
|
||||||
|
"rqlite_leader": "10.0.0.1",
|
||||||
|
"rqlite_voters": "3/3",
|
||||||
|
"rqlite_raft_term": 42,
|
||||||
|
"wg_mesh_status": "all connected",
|
||||||
|
"service_health": "all nominal",
|
||||||
|
"critical_alerts": 0,
|
||||||
|
"warning_alerts": 1,
|
||||||
|
"info_alerts": 0
|
||||||
|
},
|
||||||
|
"alerts": [...],
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"host": "51.195.109.238",
|
||||||
|
"status": "healthy",
|
||||||
|
"collection_ms": 526,
|
||||||
|
"report": { ... }
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## `orama node report` — VPS-Side Collector
|
||||||
|
|
||||||
|
Runs locally on a VPS node. Collects all system and service data in parallel and outputs a single JSON blob. Requires root privileges.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On a VPS node
|
||||||
|
sudo orama node report --json
|
||||||
|
```
|
||||||
|
|
||||||
|
### What It Collects
|
||||||
|
|
||||||
|
| Section | Data |
|
||||||
|
|---------|------|
|
||||||
|
| **system** | CPU count, load average, memory/disk/swap usage, OOM kills, kernel version, uptime, clock time |
|
||||||
|
| **services** | Systemd service states (active, restarts, memory, CPU, restart loop detection) for 10 core services |
|
||||||
|
| **rqlite** | Raft state, leader, term, applied/commit index, peers, strong read test, readyz, debug vars |
|
||||||
|
| **olric** | Service state, memberlist, member count, restarts, memory, log analysis |
|
||||||
|
| **ipfs** | Daemon/cluster state, swarm/cluster peers, repo size, versions, swarm key |
|
||||||
|
| **gateway** | HTTP health check, subsystem status |
|
||||||
|
| **wireguard** | Interface state, WG IP, peers, handshake ages, MTU, config permissions |
|
||||||
|
| **dns** | CoreDNS/Caddy state, port bindings, resolution tests, TLS cert expiry |
|
||||||
|
| **anyone** | Relay/client state, bootstrap progress, fingerprint |
|
||||||
|
| **network** | Internet reachability, TCP stats, retransmission rate, listening ports, UFW rules |
|
||||||
|
| **processes** | Zombie count, orphan orama processes, panic/fatal count in logs |
|
||||||
|
| **namespaces** | Per-namespace service probes (RQLite, Olric, Gateway) |
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
All 12 collectors run in parallel with goroutines. Typical collection time is **< 1 second** per node. HTTP timeouts are 3 seconds, command timeouts are 4 seconds.
|
||||||
|
|
||||||
|
### Output Schema
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"timestamp": "2026-02-16T12:00:00Z",
|
||||||
|
"hostname": "ns1",
|
||||||
|
"version": "0.107.0",
|
||||||
|
"collect_ms": 526,
|
||||||
|
"errors": [],
|
||||||
|
"system": { "cpu_count": 4, "load_avg_1": 0.1, "mem_total_mb": 7937, ... },
|
||||||
|
"services": { "services": [...], "failed_units": [] },
|
||||||
|
"rqlite": { "responsive": true, "raft_state": "Leader", "term": 42, ... },
|
||||||
|
"olric": { "service_active": true, "memberlist_up": true, ... },
|
||||||
|
"ipfs": { "daemon_active": true, "swarm_peers": 2, ... },
|
||||||
|
"gateway": { "responsive": true, "http_status": 200, ... },
|
||||||
|
"wireguard": { "interface_up": true, "wg_ip": "10.0.0.1", "peers": [...], ... },
|
||||||
|
"dns": { "coredns_active": true, "caddy_active": true, "base_tls_days_left": 88, ... },
|
||||||
|
"anyone": { "relay_active": true, "bootstrapped": true, ... },
|
||||||
|
"network": { "internet_reachable": true, "ufw_active": true, ... },
|
||||||
|
"processes": { "zombie_count": 0, "orphan_count": 0, "panic_count": 0, ... },
|
||||||
|
"namespaces": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Alert Detection
|
||||||
|
|
||||||
|
Alerts are derived from cross-node analysis of all collected reports. Each alert has a severity level and identifies the affected subsystem and node.
|
||||||
|
|
||||||
|
### Alert Severities
|
||||||
|
|
||||||
|
| Severity | Examples |
|
||||||
|
|----------|----------|
|
||||||
|
| **critical** | SSH collection failed (node unreachable), no RQLite leader, split brain, RQLite unresponsive, WireGuard interface down, WG peer never handshaked, OOM kills, service failed, UFW inactive |
|
||||||
|
| **warning** | Strong read failed, memory > 90%, disk > 85%, stale WG handshake (> 3min), Raft term inconsistency, applied index lag > 100, restart loop detected, TLS cert < 14 days, DNS down, namespace gateway down, Anyone not bootstrapped, clock skew > 5s, binary version mismatch, internet unreachable, high TCP retransmission |
|
||||||
|
| **info** | Zombie processes, orphan orama processes, swap usage > 30% |
|
||||||
|
|
||||||
|
### Cross-Node Checks
|
||||||
|
|
||||||
|
These checks compare data across all nodes:
|
||||||
|
|
||||||
|
- **RQLite Leader**: Exactly one leader exists (no split brain)
|
||||||
|
- **Leader Agreement**: All nodes agree on the same leader address
|
||||||
|
- **Raft Term Consistency**: Term values within 1 of each other
|
||||||
|
- **Applied Index Lag**: Followers within 100 entries of the leader
|
||||||
|
- **WireGuard Peer Symmetry**: Each node has N-1 peers
|
||||||
|
- **Clock Skew**: Node clocks within 5 seconds of each other
|
||||||
|
- **Binary Version**: All nodes running the same version
|
||||||
|
- **WebRTC SFU Coverage**: SFU running on expected nodes (3/3) per namespace
|
||||||
|
- **WebRTC TURN Redundancy**: TURN running on expected nodes (2/3) per namespace
|
||||||
|
|
||||||
|
### Per-Node Checks
|
||||||
|
|
||||||
|
- **RQLite**: Responsive, ready, strong read
|
||||||
|
- **WireGuard**: Interface up, handshake freshness
|
||||||
|
- **System**: Memory, disk, load, OOM kills, swap
|
||||||
|
- **Services**: Systemd state, restart loops
|
||||||
|
- **DNS**: CoreDNS/Caddy up, TLS cert expiry, SOA resolution
|
||||||
|
- **Anyone**: Bootstrap progress
|
||||||
|
- **Processes**: Zombies, orphans, panics in logs
|
||||||
|
- **Namespaces**: Gateway and RQLite per namespace
|
||||||
|
- **WebRTC**: SFU and TURN service health (when provisioned)
|
||||||
|
- **Network**: UFW, internet reachability, TCP retransmission
|
||||||
|
|
||||||
|
## Monitor vs Inspector
|
||||||
|
|
||||||
|
Both tools check cluster health, but they serve different purposes:
|
||||||
|
|
||||||
|
| | `orama monitor` | `orama inspect` |
|
||||||
|
|---|---|---|
|
||||||
|
| **Data source** | `orama node report --json` (single SSH call per node) | 15+ SSH commands per node per subsystem |
|
||||||
|
| **Speed** | ~3-5s for full cluster | ~4-10s for full cluster |
|
||||||
|
| **Output** | TUI, tables, JSON | Tables, JSON |
|
||||||
|
| **Focus** | Real-time monitoring, alert detection | Deep diagnostic checks with pass/fail/warn |
|
||||||
|
| **AI support** | `report` subcommand for LLM input | `--ai` flag for inline analysis |
|
||||||
|
| **Use case** | "Is anything wrong right now?" | "What exactly is wrong and why?" |
|
||||||
|
|
||||||
|
Use `monitor` for day-to-day health checks and the interactive TUI. Use `inspect` for deep diagnostics when something is already known to be broken.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Uses the same `scripts/remote-nodes.conf` as the inspector. See [INSPECTOR.md](INSPECTOR.md#configuration) for format details.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Nodes must have the `orama` CLI installed (via `orama node install` or `upload-source.sh`). The monitor runs `sudo orama node report --json` over SSH, so the binary must be at `/usr/local/bin/orama` on each node.
|
||||||
248
core/docs/NAMESERVER_SETUP.md
Normal file
248
core/docs/NAMESERVER_SETUP.md
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
# Nameserver Setup Guide
|
||||||
|
|
||||||
|
This guide explains how to configure your domain registrar to use Orama Network nodes as authoritative nameservers.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
When you install Orama with the `--nameserver` flag, the node runs CoreDNS to serve DNS records for your domain. This enables:
|
||||||
|
|
||||||
|
- Dynamic DNS for deployments (e.g., `myapp.node-abc123.dbrs.space`)
|
||||||
|
- Wildcard DNS support for all subdomains
|
||||||
|
- ACME DNS-01 challenges for automatic SSL certificates
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
Before setting up nameservers, you need:
|
||||||
|
|
||||||
|
1. **Domain ownership** - A domain you control (e.g., `dbrs.space`)
|
||||||
|
2. **3+ VPS nodes** - Recommended for redundancy
|
||||||
|
3. **Static IP addresses** - Each VPS must have a static public IP
|
||||||
|
4. **Access to registrar DNS settings** - Admin access to your domain registrar
|
||||||
|
|
||||||
|
## Understanding DNS Records
|
||||||
|
|
||||||
|
### NS Records (Nameserver Records)
|
||||||
|
NS records tell the internet which servers are authoritative for your domain:
|
||||||
|
```
|
||||||
|
dbrs.space. IN NS ns1.dbrs.space.
|
||||||
|
dbrs.space. IN NS ns2.dbrs.space.
|
||||||
|
dbrs.space. IN NS ns3.dbrs.space.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Glue Records
|
||||||
|
Glue records are A records that provide IP addresses for nameservers that are under the same domain. They're required because:
|
||||||
|
- `ns1.dbrs.space` is under `dbrs.space`
|
||||||
|
- To resolve `ns1.dbrs.space`, you need to query `dbrs.space` nameservers
|
||||||
|
- But those nameservers ARE `ns1.dbrs.space` - circular dependency!
|
||||||
|
- Glue records break this cycle by providing IPs at the registry level
|
||||||
|
|
||||||
|
```
|
||||||
|
ns1.dbrs.space. IN A 141.227.165.168
|
||||||
|
ns2.dbrs.space. IN A 141.227.165.154
|
||||||
|
ns3.dbrs.space. IN A 141.227.156.51
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Step 1: Install Orama on Each VPS
|
||||||
|
|
||||||
|
Install Orama with the `--nameserver` flag on each VPS that will serve as a nameserver:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On VPS 1 (ns1)
|
||||||
|
sudo orama install \
|
||||||
|
--nameserver \
|
||||||
|
--domain dbrs.space \
|
||||||
|
--vps-ip 141.227.165.168
|
||||||
|
|
||||||
|
# On VPS 2 (ns2)
|
||||||
|
sudo orama install \
|
||||||
|
--nameserver \
|
||||||
|
--domain dbrs.space \
|
||||||
|
--vps-ip 141.227.165.154
|
||||||
|
|
||||||
|
# On VPS 3 (ns3)
|
||||||
|
sudo orama install \
|
||||||
|
--nameserver \
|
||||||
|
--domain dbrs.space \
|
||||||
|
--vps-ip 141.227.156.51
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Configure Your Registrar
|
||||||
|
|
||||||
|
#### For Namecheap
|
||||||
|
|
||||||
|
1. **Log into Namecheap Dashboard**
|
||||||
|
- Go to https://www.namecheap.com
|
||||||
|
- Navigate to **Domain List** → **Manage** (next to your domain)
|
||||||
|
|
||||||
|
2. **Add Glue Records (Personal DNS Servers)**
|
||||||
|
- Go to **Advanced DNS** tab
|
||||||
|
- Scroll down to **Personal DNS Servers** section
|
||||||
|
- Click **Add Nameserver**
|
||||||
|
- Add each nameserver with its IP:
|
||||||
|
| Nameserver | IP Address |
|
||||||
|
|------------|------------|
|
||||||
|
| ns1.yourdomain.com | 141.227.165.168 |
|
||||||
|
| ns2.yourdomain.com | 141.227.165.154 |
|
||||||
|
| ns3.yourdomain.com | 141.227.156.51 |
|
||||||
|
|
||||||
|
3. **Set Custom Nameservers**
|
||||||
|
- Go back to the **Domain** tab
|
||||||
|
- Under **Nameservers**, select **Custom DNS**
|
||||||
|
- Add your nameserver hostnames:
|
||||||
|
- ns1.yourdomain.com
|
||||||
|
- ns2.yourdomain.com
|
||||||
|
- ns3.yourdomain.com
|
||||||
|
- Click the green checkmark to save
|
||||||
|
|
||||||
|
4. **Wait for Propagation**
|
||||||
|
- DNS changes can take 24-48 hours to propagate globally
|
||||||
|
- Most changes are visible within 1-4 hours
|
||||||
|
|
||||||
|
#### For GoDaddy
|
||||||
|
|
||||||
|
1. Log into GoDaddy account
|
||||||
|
2. Go to **My Products** → **DNS** for your domain
|
||||||
|
3. Under **Nameservers**, click **Change**
|
||||||
|
4. Select **Enter my own nameservers**
|
||||||
|
5. Add your nameserver hostnames
|
||||||
|
6. For glue records, go to **DNS Management** → **Host Names**
|
||||||
|
7. Add A records for ns1, ns2, ns3
|
||||||
|
|
||||||
|
#### For Cloudflare (as Registrar)
|
||||||
|
|
||||||
|
1. Log into Cloudflare Dashboard
|
||||||
|
2. Go to **Domain Registration** → your domain
|
||||||
|
3. Under **Nameservers**, change to custom
|
||||||
|
4. Note: Cloudflare Registrar may require contacting support for glue records
|
||||||
|
|
||||||
|
#### For Google Domains
|
||||||
|
|
||||||
|
1. Log into Google Domains
|
||||||
|
2. Select your domain → **DNS**
|
||||||
|
3. Under **Name servers**, select **Use custom name servers**
|
||||||
|
4. Add your nameserver hostnames
|
||||||
|
5. For glue records, click **Add** under **Glue records**
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
### Step 1: Verify NS Records
|
||||||
|
|
||||||
|
After propagation, check that NS records are visible:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check NS records from Google DNS
|
||||||
|
dig NS yourdomain.com @8.8.8.8
|
||||||
|
|
||||||
|
# Expected output should show:
|
||||||
|
# yourdomain.com. IN NS ns1.yourdomain.com.
|
||||||
|
# yourdomain.com. IN NS ns2.yourdomain.com.
|
||||||
|
# yourdomain.com. IN NS ns3.yourdomain.com.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Verify Glue Records
|
||||||
|
|
||||||
|
Check that glue records resolve:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check glue records
|
||||||
|
dig A ns1.yourdomain.com @8.8.8.8
|
||||||
|
dig A ns2.yourdomain.com @8.8.8.8
|
||||||
|
dig A ns3.yourdomain.com @8.8.8.8
|
||||||
|
|
||||||
|
# Each should return the correct IP address
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Test CoreDNS
|
||||||
|
|
||||||
|
Query your nameservers directly:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test a query against ns1
|
||||||
|
dig @ns1.yourdomain.com test.yourdomain.com
|
||||||
|
|
||||||
|
# Test wildcard resolution
|
||||||
|
dig @ns1.yourdomain.com myapp.node-abc123.yourdomain.com
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4: Verify from Multiple Locations
|
||||||
|
|
||||||
|
Use online tools to verify global propagation:
|
||||||
|
- https://dnschecker.org
|
||||||
|
- https://www.whatsmydns.net
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### DNS Not Resolving
|
||||||
|
|
||||||
|
1. **Check CoreDNS is running:**
|
||||||
|
```bash
|
||||||
|
sudo systemctl status coredns
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Check CoreDNS logs:**
|
||||||
|
```bash
|
||||||
|
sudo journalctl -u coredns -f
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Verify port 53 is open:**
|
||||||
|
```bash
|
||||||
|
sudo ufw status
|
||||||
|
# Port 53 (TCP/UDP) should be allowed
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Test locally:**
|
||||||
|
```bash
|
||||||
|
dig @localhost yourdomain.com
|
||||||
|
```
|
||||||
|
|
||||||
|
### Glue Records Not Propagating
|
||||||
|
|
||||||
|
- Glue records are stored at the registry level, not DNS level
|
||||||
|
- They can take longer to propagate (up to 48 hours)
|
||||||
|
- Verify at your registrar that they were saved correctly
|
||||||
|
- Some registrars require the domain to be using their nameservers first
|
||||||
|
|
||||||
|
### SERVFAIL Errors
|
||||||
|
|
||||||
|
Usually indicates CoreDNS configuration issues:
|
||||||
|
|
||||||
|
1. Check Corefile syntax
|
||||||
|
2. Verify RQLite connectivity
|
||||||
|
3. Check firewall rules
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
### Firewall Rules
|
||||||
|
|
||||||
|
Only expose necessary ports:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Allow DNS from anywhere
|
||||||
|
sudo ufw allow 53/tcp
|
||||||
|
sudo ufw allow 53/udp
|
||||||
|
|
||||||
|
# Restrict admin ports to internal network
|
||||||
|
sudo ufw allow from 10.0.0.0/8 to any port 8080 # Health
|
||||||
|
sudo ufw allow from 10.0.0.0/8 to any port 9153 # Metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rate Limiting
|
||||||
|
|
||||||
|
Consider adding rate limiting to prevent DNS amplification attacks.
|
||||||
|
This can be configured in the CoreDNS Corefile.
|
||||||
|
|
||||||
|
## Multi-Node Coordination
|
||||||
|
|
||||||
|
When running multiple nameservers:
|
||||||
|
|
||||||
|
1. **All nodes share the same RQLite cluster** - DNS records are automatically synchronized
|
||||||
|
2. **Install in order** - First node bootstraps, others join
|
||||||
|
3. **Same domain configuration** - All nodes must use the same `--domain` value
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [CoreDNS RQLite Plugin](../pkg/coredns/README.md) - Technical details
|
||||||
|
- [Deployment Guide](./DEPLOYMENT_GUIDE.md) - Full deployment instructions
|
||||||
|
- [Architecture](./ARCHITECTURE.md) - System architecture overview
|
||||||
233
core/docs/ORAMAOS_DEPLOYMENT.md
Normal file
233
core/docs/ORAMAOS_DEPLOYMENT.md
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
# OramaOS Deployment Guide
|
||||||
|
|
||||||
|
OramaOS is a custom minimal Linux image built with Buildroot. It replaces the standard Ubuntu-based node deployment for mainnet, devnet, and testnet environments. Sandbox clusters remain on Ubuntu for development convenience.
|
||||||
|
|
||||||
|
## What is OramaOS?
|
||||||
|
|
||||||
|
OramaOS is a locked-down operating system designed specifically for Orama node operators. Key properties:
|
||||||
|
|
||||||
|
- **No SSH, no shell** — operators cannot access the filesystem or run commands on the machine
|
||||||
|
- **LUKS full-disk encryption** — the data partition is encrypted; the key is split via Shamir's Secret Sharing across peer nodes
|
||||||
|
- **Read-only rootfs** — the OS image uses SquashFS with dm-verity integrity verification
|
||||||
|
- **A/B partition updates** — signed OS images are applied atomically with automatic rollback on failure
|
||||||
|
- **Service sandboxing** — each service runs in its own Linux namespace with seccomp syscall filtering
|
||||||
|
- **Signed binaries** — all updates are cryptographically signed with the Orama rootwallet
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
Partition Layout:
|
||||||
|
/dev/sda1 — ESP (EFI System Partition, systemd-boot)
|
||||||
|
/dev/sda2 — rootfs-A (SquashFS, read-only, dm-verity)
|
||||||
|
/dev/sda3 — rootfs-B (standby, for A/B updates)
|
||||||
|
/dev/sda4 — data (LUKS2 encrypted, ext4)
|
||||||
|
|
||||||
|
Boot Flow:
|
||||||
|
systemd-boot → dm-verity rootfs → orama-agent → WireGuard → services
|
||||||
|
```
|
||||||
|
|
||||||
|
The **orama-agent** is the only root process. It manages:
|
||||||
|
- Boot sequence and LUKS key reconstruction
|
||||||
|
- WireGuard tunnel setup
|
||||||
|
- Service lifecycle (start, stop, restart in sandboxed namespaces)
|
||||||
|
- Command reception from the Gateway over WireGuard
|
||||||
|
- OS updates (download, verify signature, A/B swap, reboot)
|
||||||
|
|
||||||
|
## Enrollment Flow
|
||||||
|
|
||||||
|
OramaOS nodes join the cluster through an enrollment process (different from the Ubuntu `orama node install` flow):
|
||||||
|
|
||||||
|
### Step 1: Flash OramaOS to VPS
|
||||||
|
|
||||||
|
Download the OramaOS image and flash it to your VPS:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Download image (URL provided upon acceptance)
|
||||||
|
wget https://releases.orama.network/oramaos-v1.0.0-amd64.qcow2
|
||||||
|
|
||||||
|
# Flash to VPS (provider-specific — Hetzner, Vultr, etc.)
|
||||||
|
# Most providers support uploading custom images via their dashboard
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: First Boot — Enrollment Mode
|
||||||
|
|
||||||
|
On first boot, the agent:
|
||||||
|
1. Generates a random 8-character registration code
|
||||||
|
2. Starts a temporary HTTP server on port 9999
|
||||||
|
3. Opens an outbound WebSocket to the Gateway
|
||||||
|
4. Waits for enrollment to complete
|
||||||
|
|
||||||
|
The registration code is displayed on the VPS console (if available) and served at `http://<vps-ip>:9999/`.
|
||||||
|
|
||||||
|
### Step 3: Run Enrollment from CLI
|
||||||
|
|
||||||
|
On your local machine (where you have the `orama` CLI and rootwallet):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate an invite token on any existing cluster node
|
||||||
|
orama node invite --expiry 24h
|
||||||
|
|
||||||
|
# Enroll the OramaOS node
|
||||||
|
orama node enroll --node-ip <vps-public-ip> --token <invite-token> --gateway <gateway-url>
|
||||||
|
```
|
||||||
|
|
||||||
|
The enrollment command:
|
||||||
|
1. Fetches the registration code from the node (port 9999)
|
||||||
|
2. Sends the code + invite token to the Gateway
|
||||||
|
3. Gateway validates everything, assigns a WireGuard IP, and pushes config to the node
|
||||||
|
4. Node configures WireGuard, formats the LUKS-encrypted data partition
|
||||||
|
5. LUKS key is split via Shamir and distributed to peer vault-guardians
|
||||||
|
6. Services start in sandboxed namespaces
|
||||||
|
7. Port 9999 closes permanently
|
||||||
|
|
||||||
|
### Step 4: Verify
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check the node is online and healthy
|
||||||
|
orama monitor report --env <env>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Genesis Node
|
||||||
|
|
||||||
|
The first OramaOS node in a cluster is the **genesis node**. It has a special boot path because there are no peers yet for Shamir key distribution:
|
||||||
|
|
||||||
|
1. Genesis generates a LUKS key and encrypts the data partition
|
||||||
|
2. The LUKS key is encrypted with a rootwallet-derived key and stored on the unencrypted rootfs
|
||||||
|
3. On reboot (before enough peers exist), the operator must manually unlock:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama node unlock --genesis --node-ip <wg-ip>
|
||||||
|
```
|
||||||
|
|
||||||
|
This command:
|
||||||
|
1. Fetches the encrypted genesis key from the node
|
||||||
|
2. Decrypts it using the rootwallet (`rw decrypt`)
|
||||||
|
3. Sends the decrypted LUKS key to the agent over WireGuard
|
||||||
|
|
||||||
|
Once 5+ peers have joined, the genesis node distributes Shamir shares to peers, deletes the local encrypted key, and transitions to normal Shamir-based unlock. After this transition, `orama node unlock` is no longer needed.
|
||||||
|
|
||||||
|
## Normal Reboot (Shamir Unlock)
|
||||||
|
|
||||||
|
When an enrolled OramaOS node reboots:
|
||||||
|
|
||||||
|
1. Agent starts, brings up WireGuard
|
||||||
|
2. Contacts peer vault-guardians over WireGuard
|
||||||
|
3. Fetches K Shamir shares (K = threshold, typically `max(3, N/3)`)
|
||||||
|
4. Reconstructs LUKS key via Lagrange interpolation over GF(256)
|
||||||
|
5. Decrypts and mounts data partition
|
||||||
|
6. Starts all services
|
||||||
|
7. Zeros key from memory
|
||||||
|
|
||||||
|
If not enough peers are available, the agent enters a degraded "waiting for peers" state and retries with exponential backoff (1s, 2s, 4s, 8s, 16s, max 5 retries per cycle).
|
||||||
|
|
||||||
|
## Node Management
|
||||||
|
|
||||||
|
Since OramaOS has no SSH, all management happens through the Gateway API:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check node status
|
||||||
|
curl "https://gateway.example.com/v1/node/status?node_id=<id>"
|
||||||
|
|
||||||
|
# Send a command (e.g., restart a service)
|
||||||
|
curl -X POST "https://gateway.example.com/v1/node/command?node_id=<id>" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"action":"restart","service":"rqlite"}'
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
curl "https://gateway.example.com/v1/node/logs?node_id=<id>&service=gateway&lines=100"
|
||||||
|
|
||||||
|
# Graceful node departure
|
||||||
|
curl -X POST "https://gateway.example.com/v1/node/leave" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"node_id":"<id>"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
The Gateway proxies these requests to the agent over WireGuard (port 9998). The agent is never directly accessible from the public internet.
|
||||||
|
|
||||||
|
## OS Updates
|
||||||
|
|
||||||
|
OramaOS uses an A/B partition scheme for atomic, rollback-safe updates:
|
||||||
|
|
||||||
|
1. Agent periodically checks for new versions
|
||||||
|
2. Downloads the signed image (P2P over WireGuard between nodes)
|
||||||
|
3. Verifies the rootwallet EVM signature against the embedded public key
|
||||||
|
4. Writes to the standby partition (if running from A, writes to B)
|
||||||
|
5. Sets systemd-boot to boot from B with `tries_left=3`
|
||||||
|
6. Reboots
|
||||||
|
7. If B boots successfully (agent starts, WG connects, services healthy): marks B as "good"
|
||||||
|
8. If B fails 3 times: systemd-boot automatically falls back to A
|
||||||
|
|
||||||
|
No operator intervention is needed for updates. Failed updates are automatically rolled back.
|
||||||
|
|
||||||
|
## Service Sandboxing
|
||||||
|
|
||||||
|
Each service on OramaOS runs in an isolated environment:
|
||||||
|
|
||||||
|
- **Mount namespace** — each service only sees its own data directory as writable; everything else is read-only
|
||||||
|
- **UTS namespace** — isolated hostname
|
||||||
|
- **Dedicated UID/GID** — each service runs as a different user (not root)
|
||||||
|
- **Seccomp filtering** — per-service syscall allowlist (initially in audit mode, then enforce mode)
|
||||||
|
|
||||||
|
Services and their sandbox profiles:
|
||||||
|
| Service | Writable Path | Extra Syscalls |
|
||||||
|
|---------|--------------|----------------|
|
||||||
|
| RQLite | `/opt/orama/.orama/data/rqlite` | fsync, fdatasync (Raft + SQLite WAL) |
|
||||||
|
| Olric | `/opt/orama/.orama/data/olric` | sendmmsg, recvmmsg (gossip) |
|
||||||
|
| IPFS | `/opt/orama/.orama/data/ipfs` | sendfile, splice (data transfer) |
|
||||||
|
| Gateway | `/opt/orama/.orama/data/gateway` | sendfile, splice (HTTP) |
|
||||||
|
| CoreDNS | `/opt/orama/.orama/data/coredns` | sendmmsg, recvmmsg (DNS) |
|
||||||
|
|
||||||
|
## OramaOS vs Ubuntu Deployment
|
||||||
|
|
||||||
|
| Feature | Ubuntu | OramaOS |
|
||||||
|
|---------|--------|---------|
|
||||||
|
| SSH access | Yes | No |
|
||||||
|
| Shell access | Yes | No |
|
||||||
|
| Disk encryption | No | LUKS2 (Shamir) |
|
||||||
|
| OS updates | Manual (`orama node upgrade`) | Automatic (signed, A/B) |
|
||||||
|
| Service isolation | systemd only | Namespaces + seccomp |
|
||||||
|
| Rootfs integrity | None | dm-verity |
|
||||||
|
| Binary signing | Optional | Required |
|
||||||
|
| Operator data access | Full | None |
|
||||||
|
| Environments | All (including sandbox) | Mainnet, devnet, testnet |
|
||||||
|
|
||||||
|
## Cleaning / Factory Reset
|
||||||
|
|
||||||
|
OramaOS nodes cannot be cleaned with the standard `orama node clean` command (no SSH access). Instead:
|
||||||
|
|
||||||
|
- **Graceful departure:** `orama node leave` via the Gateway API — stops services, redistributes Shamir shares, removes WG peer
|
||||||
|
- **Factory reset:** Reflash the OramaOS image on the VPS via the hosting provider's dashboard
|
||||||
|
- **Data is unrecoverable:** Since the LUKS key is distributed across peers, reflashing destroys all data permanently
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Node stuck in enrollment mode
|
||||||
|
The node boots but enrollment never completes.
|
||||||
|
|
||||||
|
**Check:** Can you reach `http://<vps-ip>:9999/` from your machine? If not, the VPS firewall may be blocking port 9999.
|
||||||
|
|
||||||
|
**Fix:** Ensure port 9999 is open in the VPS provider's firewall. OramaOS opens it automatically via its internal firewall, but external provider firewalls (Hetzner, AWS security groups) must be configured separately.
|
||||||
|
|
||||||
|
### LUKS unlock fails (not enough peers)
|
||||||
|
After reboot, the node can't reconstruct its LUKS key.
|
||||||
|
|
||||||
|
**Check:** How many peer nodes are online? The node needs at least K peers (threshold) to be reachable over WireGuard.
|
||||||
|
|
||||||
|
**Fix:** Ensure enough cluster nodes are online. If this is the genesis node and fewer than 5 peers exist, use:
|
||||||
|
```bash
|
||||||
|
orama node unlock --genesis --node-ip <wg-ip>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update failed, node rolled back
|
||||||
|
The node applied an update but reverted to the previous version.
|
||||||
|
|
||||||
|
**Check:** The agent logs will show why the new partition failed to boot (accessible via `GET /v1/node/logs?service=agent`).
|
||||||
|
|
||||||
|
**Common causes:** Corrupted download (signature verification should catch this), hardware issue, or incompatible configuration.
|
||||||
|
|
||||||
|
### Services not starting after reboot
|
||||||
|
The node rebooted and LUKS unlocked, but services are unhealthy.
|
||||||
|
|
||||||
|
**Check:** `GET /v1/node/status` — which services are down?
|
||||||
|
|
||||||
|
**Fix:** Try restarting the specific service via `POST /v1/node/command` with `{"action":"restart","service":"<name>"}`. If the issue persists, check service logs.
|
||||||
208
core/docs/SANDBOX.md
Normal file
208
core/docs/SANDBOX.md
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
# Sandbox: Ephemeral Hetzner Cloud Clusters
|
||||||
|
|
||||||
|
Spin up temporary 5-node Orama clusters on Hetzner Cloud for development and testing. Total cost: ~€0.04/hour.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# One-time setup (API key, domain, floating IPs, SSH key)
|
||||||
|
orama sandbox setup
|
||||||
|
|
||||||
|
# Create a cluster (~5 minutes)
|
||||||
|
orama sandbox create --name my-feature
|
||||||
|
|
||||||
|
# Check health
|
||||||
|
orama sandbox status
|
||||||
|
|
||||||
|
# SSH into a node
|
||||||
|
orama sandbox ssh 1
|
||||||
|
|
||||||
|
# Deploy code changes
|
||||||
|
orama sandbox rollout
|
||||||
|
|
||||||
|
# Tear it down
|
||||||
|
orama sandbox destroy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
### 1. Hetzner Cloud Account
|
||||||
|
|
||||||
|
Create a project at [console.hetzner.cloud](https://console.hetzner.cloud) and generate an API token with read/write permissions under **Security > API Tokens**.
|
||||||
|
|
||||||
|
### 2. Domain with Glue Records
|
||||||
|
|
||||||
|
You need a domain (or subdomain) that points to Hetzner Floating IPs. The `orama sandbox setup` wizard will guide you through this.
|
||||||
|
|
||||||
|
**Example:** Using `sbx.dbrs.space`
|
||||||
|
|
||||||
|
At your domain registrar:
|
||||||
|
1. Create glue records (Personal DNS Servers):
|
||||||
|
- `ns1.sbx.dbrs.space` → `<floating-ip-1>`
|
||||||
|
- `ns2.sbx.dbrs.space` → `<floating-ip-2>`
|
||||||
|
2. Set custom nameservers for `sbx.dbrs.space`:
|
||||||
|
- `ns1.sbx.dbrs.space`
|
||||||
|
- `ns2.sbx.dbrs.space`
|
||||||
|
|
||||||
|
DNS propagation can take up to 48 hours.
|
||||||
|
|
||||||
|
### 3. Binary Archive
|
||||||
|
|
||||||
|
Build the binary archive before creating a cluster:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama build
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates `/tmp/orama-<version>-linux-amd64.tar.gz` with all pre-compiled binaries.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
Run the interactive setup wizard:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama sandbox setup
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
1. Prompt for your Hetzner API token and validate it
|
||||||
|
2. Ask for your sandbox domain
|
||||||
|
3. Create or reuse 2 Hetzner Floating IPs (~$0.005/hr each)
|
||||||
|
4. Create a firewall with sandbox rules
|
||||||
|
5. Create a rootwallet SSH entry (`sandbox/root`) if it doesn't exist
|
||||||
|
6. Upload the wallet-derived public key to Hetzner
|
||||||
|
7. Display DNS configuration instructions
|
||||||
|
|
||||||
|
Config is saved to `~/.orama/sandbox.yaml`.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
### `orama sandbox create [--name <name>]`
|
||||||
|
|
||||||
|
Creates a new 5-node cluster. If `--name` is omitted, a random name is generated (e.g., "swift-falcon").
|
||||||
|
|
||||||
|
**Cluster layout:**
|
||||||
|
- Nodes 1-2: Nameservers (CoreDNS + Caddy + all services)
|
||||||
|
- Nodes 3-5: Regular nodes (all services except CoreDNS)
|
||||||
|
|
||||||
|
**Phases:**
|
||||||
|
1. Provision 5 CX22 servers on Hetzner (parallel, ~90s)
|
||||||
|
2. Assign floating IPs to nameserver nodes (~10s)
|
||||||
|
3. Upload binary archive to all nodes (parallel, ~60s)
|
||||||
|
4. Install genesis node + generate invite tokens (~120s)
|
||||||
|
5. Join remaining 4 nodes (serial with health checks, ~180s)
|
||||||
|
6. Verify cluster health (~15s)
|
||||||
|
|
||||||
|
**One sandbox at a time.** Since the floating IPs are shared, only one sandbox can own the nameservers. Destroy the active sandbox before creating a new one.
|
||||||
|
|
||||||
|
### `orama sandbox destroy [--name <name>] [--force]`
|
||||||
|
|
||||||
|
Tears down a cluster:
|
||||||
|
1. Unassigns floating IPs
|
||||||
|
2. Deletes all 5 servers (parallel)
|
||||||
|
3. Removes state file
|
||||||
|
|
||||||
|
Use `--force` to skip confirmation.
|
||||||
|
|
||||||
|
### `orama sandbox list`
|
||||||
|
|
||||||
|
Lists all sandboxes with their status. Also checks Hetzner for orphaned servers that don't have a corresponding state file.
|
||||||
|
|
||||||
|
### `orama sandbox status [--name <name>]`
|
||||||
|
|
||||||
|
Shows per-node health including:
|
||||||
|
- Service status (active/inactive)
|
||||||
|
- RQLite role (Leader/Follower)
|
||||||
|
- Cluster summary (commit index, voter count)
|
||||||
|
|
||||||
|
### `orama sandbox rollout [--name <name>]`
|
||||||
|
|
||||||
|
Deploys code changes:
|
||||||
|
1. Uses the latest binary archive from `/tmp/` (run `orama build` first)
|
||||||
|
2. Pushes to all nodes
|
||||||
|
3. Rolling upgrade: followers first, leader last, 15s between nodes
|
||||||
|
|
||||||
|
### `orama sandbox ssh <node-number>`
|
||||||
|
|
||||||
|
Opens an interactive SSH session to a sandbox node (1-5).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama sandbox ssh 1 # SSH into node 1 (genesis/ns1)
|
||||||
|
orama sandbox ssh 3 # SSH into node 3 (regular node)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Floating IPs
|
||||||
|
|
||||||
|
Hetzner Floating IPs are persistent IPv4 addresses that can be reassigned between servers. They solve the DNS chicken-and-egg problem:
|
||||||
|
|
||||||
|
- Glue records at the registrar point to 2 Floating IPs (configured once)
|
||||||
|
- Each new sandbox assigns the Floating IPs to its nameserver nodes
|
||||||
|
- DNS works instantly — no propagation delay between clusters
|
||||||
|
|
||||||
|
### SSH Authentication
|
||||||
|
|
||||||
|
Sandbox uses a rootwallet-derived SSH key (`sandbox/root` vault entry), the same mechanism as production. The wallet must be unlocked (`rw unlock`) before running sandbox commands that use SSH. The public key is uploaded to Hetzner during setup and injected into every server at creation time.
|
||||||
|
|
||||||
|
### Server Naming
|
||||||
|
|
||||||
|
Servers: `sbx-<name>-<N>` (e.g., `sbx-swift-falcon-1` through `sbx-swift-falcon-5`)
|
||||||
|
|
||||||
|
### State Files
|
||||||
|
|
||||||
|
Sandbox state is stored at `~/.orama/sandboxes/<name>.yaml`. This tracks server IDs, IPs, roles, and cluster status.
|
||||||
|
|
||||||
|
## Cost
|
||||||
|
|
||||||
|
| Resource | Cost | Qty | Total |
|
||||||
|
|----------|------|-----|-------|
|
||||||
|
| CX22 (2 vCPU, 4GB) | €0.006/hr | 5 | €0.03/hr |
|
||||||
|
| Floating IPv4 | €0.005/hr | 2 | €0.01/hr |
|
||||||
|
| **Total** | | | **~€0.04/hr** |
|
||||||
|
|
||||||
|
Servers are billed per hour. Floating IPs are billed as long as they exist (even unassigned). Destroy the sandbox when not in use to save on server costs.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### "sandbox not configured"
|
||||||
|
|
||||||
|
Run `orama sandbox setup` first.
|
||||||
|
|
||||||
|
### "no binary archive found"
|
||||||
|
|
||||||
|
Run `orama build` to create the binary archive.
|
||||||
|
|
||||||
|
### "sandbox X is already active"
|
||||||
|
|
||||||
|
Only one sandbox can be active at a time. Destroy it first:
|
||||||
|
```bash
|
||||||
|
orama sandbox destroy --name <name>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Server creation fails
|
||||||
|
|
||||||
|
Check:
|
||||||
|
- Hetzner API token is valid and has read/write permissions
|
||||||
|
- You haven't hit Hetzner's server limit (default: 10 per project)
|
||||||
|
- The selected location has CX22 capacity
|
||||||
|
|
||||||
|
### Genesis install fails
|
||||||
|
|
||||||
|
SSH into the node to debug:
|
||||||
|
```bash
|
||||||
|
orama sandbox ssh 1
|
||||||
|
journalctl -u orama-node -f
|
||||||
|
```
|
||||||
|
|
||||||
|
The sandbox will be left in "error" state. You can destroy and recreate it.
|
||||||
|
|
||||||
|
### DNS not resolving
|
||||||
|
|
||||||
|
1. Verify glue records are configured at your registrar
|
||||||
|
2. Check propagation: `dig NS sbx.dbrs.space @8.8.8.8`
|
||||||
|
3. Propagation can take 24-48 hours for new domains
|
||||||
|
|
||||||
|
### Orphaned servers
|
||||||
|
|
||||||
|
If `orama sandbox list` shows orphaned servers, delete them manually at [console.hetzner.cloud](https://console.hetzner.cloud). Sandbox servers are labeled `orama-sandbox=<name>` for easy identification.
|
||||||
194
core/docs/SECURITY.md
Normal file
194
core/docs/SECURITY.md
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
# Security Hardening
|
||||||
|
|
||||||
|
This document describes all security measures applied to the Orama Network, covering both Phase 1 (service hardening on existing Ubuntu nodes) and Phase 2 (OramaOS locked-down image).
|
||||||
|
|
||||||
|
## Phase 1: Service Hardening
|
||||||
|
|
||||||
|
These measures apply to all nodes (Ubuntu and OramaOS).
|
||||||
|
|
||||||
|
### Network Isolation
|
||||||
|
|
||||||
|
**CIDR Validation (Step 1.1)**
|
||||||
|
- WireGuard subnet restricted to `10.0.0.0/24` across all components: firewall rules, rate limiter, auth module, and WireGuard PostUp/PostDown iptables rules
|
||||||
|
- Prevents other tenants on shared VPS providers from bypassing the firewall via overlapping `10.x.x.x` ranges
|
||||||
|
|
||||||
|
**IPv6 Disabled (Step 1.2)**
|
||||||
|
- IPv6 disabled system-wide via sysctl: `net.ipv6.conf.all.disable_ipv6=1`
|
||||||
|
- Prevents services bound to `0.0.0.0` from being reachable via IPv6 (which had no firewall rules)
|
||||||
|
|
||||||
|
### Authentication
|
||||||
|
|
||||||
|
**Internal Endpoint Auth (Step 1.3)**
|
||||||
|
- `/v1/internal/wg/peers` and `/v1/internal/wg/peer/remove` now require cluster secret validation
|
||||||
|
- Peer removal additionally validates the request originates from a WireGuard subnet IP
|
||||||
|
|
||||||
|
**RQLite Authentication (Step 1.7)**
|
||||||
|
- RQLite runs with `-auth` flag pointing to a credentials file
|
||||||
|
- All RQLite HTTP requests include `Authorization: Basic <base64>` headers
|
||||||
|
- Credentials generated at cluster genesis, distributed to joining nodes via join response
|
||||||
|
- Both the central RQLite client wrapper and the standalone CoreDNS RQLite client send auth
|
||||||
|
|
||||||
|
**Olric Gossip Encryption (Step 1.8)**
|
||||||
|
- Olric memberlist uses a 32-byte encryption key for all gossip traffic
|
||||||
|
- Key generated at genesis, distributed via join response
|
||||||
|
- Prevents rogue nodes from joining the gossip ring and poisoning caches
|
||||||
|
- Note: encryption is all-or-nothing (coordinated restart required when enabling)
|
||||||
|
|
||||||
|
**IPFS Cluster TrustedPeers (Step 1.9)**
|
||||||
|
- IPFS Cluster `TrustedPeers` populated with actual cluster peer IDs (was `["*"]`)
|
||||||
|
- New peers added to TrustedPeers on all existing nodes during join
|
||||||
|
- Prevents unauthorized peers from controlling IPFS pinning
|
||||||
|
|
||||||
|
**Vault V1 Auth Enforcement (Step 1.14)**
|
||||||
|
- V1 push/pull endpoints require a valid session token when vault-guardian is configured
|
||||||
|
- Previously, auth was optional for backward compatibility — any WG peer could read/overwrite Shamir shares
|
||||||
|
|
||||||
|
### Token & Key Storage
|
||||||
|
|
||||||
|
**Refresh Token Hashing (Step 1.5)**
|
||||||
|
- Refresh tokens stored as SHA-256 hashes in RQLite (never plaintext)
|
||||||
|
- On lookup: hash the incoming token, query by hash
|
||||||
|
- On revocation: hash before revoking (both single-token and by-subject)
|
||||||
|
- Existing tokens invalidated on upgrade (users re-authenticate)
|
||||||
|
|
||||||
|
**API Key Hashing (Step 1.6)**
|
||||||
|
- API keys stored as HMAC-SHA256 hashes using a server-side secret
|
||||||
|
- HMAC secret generated at cluster genesis, stored in `~/.orama/secrets/api-key-hmac-secret`
|
||||||
|
- On lookup: compute HMAC, query by hash — fast enough for every request (unlike bcrypt)
|
||||||
|
- In-memory cache uses raw key as cache key (never persisted)
|
||||||
|
- During rolling upgrade: dual lookup (HMAC first, then raw as fallback) until all nodes upgraded
|
||||||
|
|
||||||
|
**TURN Secret Encryption (Step 1.15)**
|
||||||
|
- TURN shared secrets encrypted at rest in RQLite using AES-256-GCM
|
||||||
|
- Encryption key derived via HKDF from the cluster secret with purpose string `"turn-encryption"`
|
||||||
|
|
||||||
|
### TLS & Transport
|
||||||
|
|
||||||
|
**InsecureSkipVerify Fix (Step 1.10)**
|
||||||
|
- During node join, TLS verification uses TOFU (Trust On First Use)
|
||||||
|
- Invite token output includes the CA certificate fingerprint (SHA-256)
|
||||||
|
- Joining node verifies the server cert fingerprint matches before proceeding
|
||||||
|
- After join: CA cert stored locally for future connections
|
||||||
|
|
||||||
|
**WebSocket Origin Validation (Step 1.4)**
|
||||||
|
- All WebSocket upgraders validate the `Origin` header against the node's configured domain
|
||||||
|
- Non-browser clients (no Origin header) are still allowed
|
||||||
|
- Prevents cross-site WebSocket hijacking attacks
|
||||||
|
|
||||||
|
### Process Isolation
|
||||||
|
|
||||||
|
**Dedicated User (Step 1.11)**
|
||||||
|
- All services run as the `orama` user (not root)
|
||||||
|
- Caddy and CoreDNS get `AmbientCapabilities=CAP_NET_BIND_SERVICE` for ports 80/443 and 53
|
||||||
|
- WireGuard stays as root (kernel netlink requires it)
|
||||||
|
- vault-guardian already had proper hardening
|
||||||
|
|
||||||
|
**systemd Hardening (Step 1.12)**
|
||||||
|
- All service units include:
|
||||||
|
```ini
|
||||||
|
ProtectSystem=strict
|
||||||
|
ProtectHome=yes
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
PrivateDevices=yes
|
||||||
|
ProtectKernelTunables=yes
|
||||||
|
ProtectKernelModules=yes
|
||||||
|
RestrictNamespaces=yes
|
||||||
|
ReadWritePaths=/opt/orama/.orama
|
||||||
|
```
|
||||||
|
- Applied to both template files (`pkg/environments/templates/`) and hardcoded unit generators (`pkg/environments/production/services.go`)
|
||||||
|
|
||||||
|
### Supply Chain
|
||||||
|
|
||||||
|
**Binary Signing (Step 1.13)**
|
||||||
|
- Build archives include `manifest.sig` — a rootwallet EVM signature of the manifest hash
|
||||||
|
- During install, the signature is verified against the embedded Orama public key
|
||||||
|
- Unsigned or tampered archives are rejected
|
||||||
|
|
||||||
|
## Phase 2: OramaOS
|
||||||
|
|
||||||
|
These measures apply only to OramaOS nodes (mainnet, devnet, testnet).
|
||||||
|
|
||||||
|
### Immutable OS
|
||||||
|
|
||||||
|
- **Read-only rootfs** — SquashFS with dm-verity integrity verification
|
||||||
|
- **No shell** — `/bin/sh` symlinked to `/bin/false`, no bash/ash/ssh
|
||||||
|
- **No SSH** — OpenSSH not included in the image
|
||||||
|
- **Minimal packages** — only what's needed for systemd, cryptsetup, and the agent
|
||||||
|
|
||||||
|
### Full-Disk Encryption
|
||||||
|
|
||||||
|
- **LUKS2** with AES-XTS-Plain64 on the data partition
|
||||||
|
- **Shamir's Secret Sharing** over GF(256) — LUKS key split across peer vault-guardians
|
||||||
|
- **Adaptive threshold** — K = max(3, N/3) where N is the number of peers
|
||||||
|
- **Key zeroing** — LUKS key wiped from memory immediately after use
|
||||||
|
- **Malicious share detection** — fetch K+1 shares when possible, verify consistency
|
||||||
|
|
||||||
|
### Service Sandboxing
|
||||||
|
|
||||||
|
Each service runs in isolated Linux namespaces:
|
||||||
|
- **CLONE_NEWNS** — mount namespace (filesystem isolation)
|
||||||
|
- **CLONE_NEWUTS** — hostname namespace
|
||||||
|
- **Dedicated UID/GID** — each service has its own user
|
||||||
|
- **Seccomp filtering** — per-service syscall allowlist
|
||||||
|
|
||||||
|
Note: CLONE_NEWPID is intentionally omitted — it makes services PID 1 in their namespace, which changes signal semantics (SIGTERM ignored by default for PID 1).
|
||||||
|
|
||||||
|
### Signed Updates
|
||||||
|
|
||||||
|
- A/B partition scheme with systemd-boot and boot counting (`tries_left=3`)
|
||||||
|
- All updates signed with rootwallet EVM signature (secp256k1 + keccak256)
|
||||||
|
- Signer address: `0xb5d8a496c8b2412990d7D467E17727fdF5954afC`
|
||||||
|
- P2P distribution over WireGuard between nodes
|
||||||
|
- Automatic rollback on 3 consecutive boot failures
|
||||||
|
|
||||||
|
### Zero Operator Access
|
||||||
|
|
||||||
|
- Operators cannot read data on the machine (LUKS encrypted, no shell)
|
||||||
|
- Management only through Gateway API → agent over WireGuard
|
||||||
|
- All commands are logged and auditable
|
||||||
|
- No root access, no console access, no file system access
|
||||||
|
|
||||||
|
## Rollout Strategy
|
||||||
|
|
||||||
|
### Phase 1 Batches
|
||||||
|
|
||||||
|
```
|
||||||
|
Batch 1 (zero-risk, no restart):
|
||||||
|
- CIDR fix
|
||||||
|
- IPv6 disable
|
||||||
|
- Internal endpoint auth
|
||||||
|
- WebSocket origin check
|
||||||
|
|
||||||
|
Batch 2 (medium-risk, restart needed):
|
||||||
|
- Hash refresh tokens
|
||||||
|
- Hash API keys
|
||||||
|
- Binary signing
|
||||||
|
- Vault V1 auth enforcement
|
||||||
|
- TURN secret encryption
|
||||||
|
|
||||||
|
Batch 3 (high-risk, coordinated rollout):
|
||||||
|
- RQLite auth (followers first, leader last)
|
||||||
|
- Olric encryption (simultaneous restart)
|
||||||
|
- IPFS Cluster TrustedPeers
|
||||||
|
|
||||||
|
Batch 4 (infrastructure changes):
|
||||||
|
- InsecureSkipVerify fix
|
||||||
|
- Dedicated user
|
||||||
|
- systemd hardening
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2
|
||||||
|
|
||||||
|
1. Build and test OramaOS image in QEMU
|
||||||
|
2. Deploy to sandbox cluster alongside Ubuntu nodes
|
||||||
|
3. Verify interop and stability
|
||||||
|
4. Gradual migration: testnet → devnet → mainnet (one node at a time, maintaining Raft quorum)
|
||||||
|
|
||||||
|
## Verification
|
||||||
|
|
||||||
|
All changes verified on sandbox cluster before production deployment:
|
||||||
|
|
||||||
|
- `make test` — all unit tests pass
|
||||||
|
- `orama monitor report --env sandbox` — full cluster health
|
||||||
|
- Manual endpoint testing (e.g., curl without auth → 401)
|
||||||
|
- Security-specific checks (IPv6 listeners, RQLite auth, binary signatures)
|
||||||
374
core/docs/SERVERLESS.md
Normal file
374
core/docs/SERVERLESS.md
Normal file
@ -0,0 +1,374 @@
|
|||||||
|
# Serverless Functions
|
||||||
|
|
||||||
|
Orama Network runs serverless functions as sandboxed WebAssembly (WASM) modules. Functions are written in Go, compiled to WASM with TinyGo, and executed in an isolated wazero runtime with configurable memory limits and timeouts.
|
||||||
|
|
||||||
|
Functions receive input via **stdin** (JSON) and return output via **stdout** (JSON). They can also access Orama services — database, cache, storage, secrets, PubSub, and HTTP — through **host functions** injected by the runtime.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Scaffold a new function
|
||||||
|
orama function init my-function
|
||||||
|
|
||||||
|
# 2. Edit your handler
|
||||||
|
cd my-function
|
||||||
|
# edit function.go
|
||||||
|
|
||||||
|
# 3. Build to WASM
|
||||||
|
orama function build
|
||||||
|
|
||||||
|
# 4. Deploy
|
||||||
|
orama function deploy
|
||||||
|
|
||||||
|
# 5. Invoke
|
||||||
|
orama function invoke my-function --data '{"name": "World"}'
|
||||||
|
|
||||||
|
# 6. View logs
|
||||||
|
orama function logs my-function
|
||||||
|
```
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
my-function/
|
||||||
|
├── function.go # Handler code
|
||||||
|
└── function.yaml # Configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
### function.yaml
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: my-function # Required. Letters, digits, hyphens, underscores.
|
||||||
|
public: false # Allow unauthenticated invocation (default: false)
|
||||||
|
memory: 64 # Memory limit in MB (1-256, default: 64)
|
||||||
|
timeout: 30 # Execution timeout in seconds (1-300, default: 30)
|
||||||
|
retry:
|
||||||
|
count: 0 # Retry attempts on failure (default: 0)
|
||||||
|
delay: 5 # Seconds between retries (default: 5)
|
||||||
|
env: # Environment variables (accessible via get_env)
|
||||||
|
MY_VAR: "value"
|
||||||
|
```
|
||||||
|
|
||||||
|
### function.go (minimal)
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Read JSON input from stdin
|
||||||
|
var input []byte
|
||||||
|
buf := make([]byte, 4096)
|
||||||
|
for {
|
||||||
|
n, err := os.Stdin.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
input = append(input, buf[:n]...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload map[string]interface{}
|
||||||
|
json.Unmarshal(input, &payload)
|
||||||
|
|
||||||
|
// Process and return JSON output via stdout
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"result": "Hello!",
|
||||||
|
}
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building
|
||||||
|
|
||||||
|
Functions are compiled to WASM using [TinyGo](https://tinygo.org/):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Using the CLI (recommended)
|
||||||
|
orama function build
|
||||||
|
|
||||||
|
# Or manually
|
||||||
|
tinygo build -o function.wasm -target wasi function.go
|
||||||
|
```
|
||||||
|
|
||||||
|
## Host Functions API
|
||||||
|
|
||||||
|
Host functions let your WASM code interact with Orama services. They are imported from the `"env"` or `"host"` module (both work) and use a pointer/length ABI for string parameters.
|
||||||
|
|
||||||
|
All host functions are registered at runtime by the engine. They are available to every function without additional configuration.
|
||||||
|
|
||||||
|
### Context
|
||||||
|
|
||||||
|
| Function | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `get_caller_wallet()` → string | Wallet address of the caller (from JWT) |
|
||||||
|
| `get_request_id()` → string | Unique invocation ID |
|
||||||
|
| `get_env(key)` → string | Environment variable from function.yaml |
|
||||||
|
| `get_secret(name)` → string | Decrypted secret value (see [Managing Secrets](#managing-secrets)) |
|
||||||
|
|
||||||
|
### Database (RQLite)
|
||||||
|
|
||||||
|
| Function | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `db_query(sql, argsJSON)` → JSON | Execute SELECT query. Args as JSON array. Returns JSON array of row objects. |
|
||||||
|
| `db_execute(sql, argsJSON)` → int | Execute INSERT/UPDATE/DELETE. Returns affected row count. |
|
||||||
|
|
||||||
|
Example query from WASM:
|
||||||
|
```
|
||||||
|
db_query("SELECT push_token, device_type FROM devices WHERE user_id = ?", '["user123"]')
|
||||||
|
→ [{"push_token": "abc...", "device_type": "ios"}]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cache (Olric Distributed Cache)
|
||||||
|
|
||||||
|
| Function | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `cache_get(key)` → bytes | Get cached value by key. Returns empty on miss. |
|
||||||
|
| `cache_set(key, value, ttl)` | Store value with TTL in seconds. |
|
||||||
|
| `cache_incr(key)` → int64 | Atomically increment by 1 (init to 0 if missing). |
|
||||||
|
| `cache_incr_by(key, delta)` → int64 | Atomically increment by delta. |
|
||||||
|
|
||||||
|
### HTTP
|
||||||
|
|
||||||
|
| Function | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `http_fetch(method, url, headersJSON, body)` → JSON | Make outbound HTTP request. Headers as JSON object. Returns `{"status": 200, "headers": {...}, "body": "..."}`. Timeout: 30s. |
|
||||||
|
|
||||||
|
### PubSub
|
||||||
|
|
||||||
|
| Function | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `pubsub_publish(topic, dataJSON)` → bool | Publish message to a PubSub topic. Returns true on success. |
|
||||||
|
|
||||||
|
### Logging
|
||||||
|
|
||||||
|
| Function | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `log_info(message)` | Log info-level message (captured in invocation logs). |
|
||||||
|
| `log_error(message)` | Log error-level message. |
|
||||||
|
|
||||||
|
## Managing Secrets
|
||||||
|
|
||||||
|
Secrets are encrypted at rest (AES-256-GCM) and scoped to your namespace. Functions read them via `get_secret("name")` at runtime.
|
||||||
|
|
||||||
|
### CLI Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set a secret (inline value)
|
||||||
|
orama function secrets set APNS_KEY_ID "ABC123DEF"
|
||||||
|
|
||||||
|
# Set a secret from a file (useful for PEM keys, certificates)
|
||||||
|
orama function secrets set APNS_AUTH_KEY --from-file ./AuthKey_ABC123.p8
|
||||||
|
|
||||||
|
# List all secret names (values are never shown)
|
||||||
|
orama function secrets list
|
||||||
|
|
||||||
|
# Delete a secret
|
||||||
|
orama function secrets delete APNS_KEY_ID
|
||||||
|
|
||||||
|
# Delete without confirmation
|
||||||
|
orama function secrets delete APNS_KEY_ID --force
|
||||||
|
```
|
||||||
|
|
||||||
|
### How It Works
|
||||||
|
|
||||||
|
1. **You set secrets** via the CLI → encrypted and stored in the database
|
||||||
|
2. **Functions read secrets** at runtime via `get_secret("name")` → decrypted on demand
|
||||||
|
3. **Namespace isolation** → each namespace has its own secret store; functions in namespace A cannot read secrets from namespace B
|
||||||
|
|
||||||
|
## PubSub Triggers
|
||||||
|
|
||||||
|
Triggers let functions react to events automatically. When a message is published to a PubSub topic, all functions with a trigger on that topic are invoked asynchronously.
|
||||||
|
|
||||||
|
### CLI Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add a trigger: invoke "call-push-handler" when messages hit "calls:invite"
|
||||||
|
orama function triggers add call-push-handler --topic calls:invite
|
||||||
|
|
||||||
|
# List triggers for a function
|
||||||
|
orama function triggers list call-push-handler
|
||||||
|
|
||||||
|
# Delete a trigger
|
||||||
|
orama function triggers delete call-push-handler <trigger-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Trigger Event Payload
|
||||||
|
|
||||||
|
When triggered via PubSub, the function receives this JSON via stdin:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"topic": "calls:invite",
|
||||||
|
"data": { ... },
|
||||||
|
"namespace": "my-namespace",
|
||||||
|
"trigger_depth": 1,
|
||||||
|
"timestamp": 1708972800
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Depth Limiting
|
||||||
|
|
||||||
|
To prevent infinite loops (function A publishes to topic → triggers function A again), trigger depth is tracked. Maximum depth is **5**. If a function's output triggers another function, `trigger_depth` increments. At depth 5, no further triggers fire.
|
||||||
|
|
||||||
|
## Function Lifecycle
|
||||||
|
|
||||||
|
### Versioning
|
||||||
|
|
||||||
|
Each deploy creates a new version. The WASM binary is stored in **IPFS** (content-addressed) and metadata is stored in **RQLite**.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List versions
|
||||||
|
orama function versions my-function
|
||||||
|
|
||||||
|
# Invoke a specific version
|
||||||
|
curl -X POST .../v1/functions/my-function@2/invoke
|
||||||
|
```
|
||||||
|
|
||||||
|
### Invocation Logging
|
||||||
|
|
||||||
|
Every invocation is logged with: request ID, duration, status (success/error/timeout), input/output size, and any `log_info`/`log_error` messages.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
orama function logs my-function
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI Reference
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `orama function init <name>` | Scaffold a new function project |
|
||||||
|
| `orama function build [dir]` | Compile Go to WASM |
|
||||||
|
| `orama function deploy [dir]` | Deploy WASM to the network |
|
||||||
|
| `orama function invoke <name> --data <json>` | Invoke a function |
|
||||||
|
| `orama function list` | List deployed functions |
|
||||||
|
| `orama function get <name>` | Get function details |
|
||||||
|
| `orama function delete <name>` | Delete a function |
|
||||||
|
| `orama function logs <name>` | View invocation logs |
|
||||||
|
| `orama function versions <name>` | List function versions |
|
||||||
|
| `orama function secrets set <name> <value>` | Set an encrypted secret |
|
||||||
|
| `orama function secrets list` | List secret names |
|
||||||
|
| `orama function secrets delete <name>` | Delete a secret |
|
||||||
|
| `orama function triggers add <fn> --topic <t>` | Add PubSub trigger |
|
||||||
|
| `orama function triggers list <fn>` | List triggers |
|
||||||
|
| `orama function triggers delete <fn> <id>` | Delete a trigger |
|
||||||
|
|
||||||
|
## HTTP API Reference
|
||||||
|
|
||||||
|
| Method | Endpoint | Description |
|
||||||
|
|--------|----------|-------------|
|
||||||
|
| POST | `/v1/functions` | Deploy function (multipart/form-data) |
|
||||||
|
| GET | `/v1/functions` | List functions |
|
||||||
|
| GET | `/v1/functions/{name}` | Get function info |
|
||||||
|
| DELETE | `/v1/functions/{name}` | Delete function |
|
||||||
|
| POST | `/v1/functions/{name}/invoke` | Invoke function |
|
||||||
|
| GET | `/v1/functions/{name}/versions` | List versions |
|
||||||
|
| GET | `/v1/functions/{name}/logs` | Get logs |
|
||||||
|
| WS | `/v1/functions/{name}/ws` | WebSocket invoke (streaming) |
|
||||||
|
| PUT | `/v1/functions/secrets` | Set a secret |
|
||||||
|
| GET | `/v1/functions/secrets` | List secret names |
|
||||||
|
| DELETE | `/v1/functions/secrets/{name}` | Delete a secret |
|
||||||
|
| POST | `/v1/functions/{name}/triggers` | Add PubSub trigger |
|
||||||
|
| GET | `/v1/functions/{name}/triggers` | List triggers |
|
||||||
|
| DELETE | `/v1/functions/{name}/triggers/{id}` | Delete trigger |
|
||||||
|
| POST | `/v1/invoke/{namespace}/{name}` | Direct invoke (alt endpoint) |
|
||||||
|
|
||||||
|
## Example: Call Push Handler
|
||||||
|
|
||||||
|
A real-world function that sends VoIP push notifications when a call invite is published to PubSub:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# function.yaml
|
||||||
|
name: call-push-handler
|
||||||
|
memory: 128
|
||||||
|
timeout: 30
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
// function.go — triggered by PubSub on "calls:invite"
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This function:
|
||||||
|
// 1. Receives a call invite event from PubSub trigger
|
||||||
|
// 2. Queries the database for the callee's device info
|
||||||
|
// 3. Reads push notification credentials from secrets
|
||||||
|
// 4. Sends a push notification via http_fetch
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Read PubSub trigger event from stdin
|
||||||
|
var input []byte
|
||||||
|
buf := make([]byte, 4096)
|
||||||
|
for {
|
||||||
|
n, err := os.Stdin.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
input = append(input, buf[:n]...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the trigger event wrapper
|
||||||
|
var event struct {
|
||||||
|
Topic string `json:"topic"`
|
||||||
|
Data json.RawMessage `json:"data"`
|
||||||
|
}
|
||||||
|
json.Unmarshal(input, &event)
|
||||||
|
|
||||||
|
// Parse the actual call invite data
|
||||||
|
var invite struct {
|
||||||
|
CalleeID string `json:"calleeId"`
|
||||||
|
CallerName string `json:"callerName"`
|
||||||
|
CallType string `json:"callType"`
|
||||||
|
}
|
||||||
|
json.Unmarshal(event.Data, &invite)
|
||||||
|
|
||||||
|
// At this point, the function would use host functions:
|
||||||
|
//
|
||||||
|
// 1. db_query("SELECT push_token, device_type FROM devices WHERE user_id = ?",
|
||||||
|
// json.Marshal([]string{invite.CalleeID}))
|
||||||
|
//
|
||||||
|
// 2. get_secret("FCM_SERVER_KEY") for Android push
|
||||||
|
// get_secret("APNS_KEY_PEM") for iOS push
|
||||||
|
//
|
||||||
|
// 3. http_fetch("POST", "https://fcm.googleapis.com/v1/...", headers, body)
|
||||||
|
//
|
||||||
|
// 4. log_info("Push sent to " + invite.CalleeID)
|
||||||
|
//
|
||||||
|
// Note: Host functions use the WASM ABI (pointer/length).
|
||||||
|
// A Go SDK for ergonomic access is planned.
|
||||||
|
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"status": "sent",
|
||||||
|
"callee": invite.CalleeID,
|
||||||
|
}
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Deploy and wire the trigger:
|
||||||
|
```bash
|
||||||
|
orama function build
|
||||||
|
orama function deploy
|
||||||
|
|
||||||
|
# Set push notification secrets
|
||||||
|
orama function secrets set FCM_SERVER_KEY "your-fcm-key"
|
||||||
|
orama function secrets set APNS_KEY_PEM --from-file ./AuthKey.p8
|
||||||
|
orama function secrets set APNS_KEY_ID "ABC123"
|
||||||
|
orama function secrets set APNS_TEAM_ID "TEAM456"
|
||||||
|
|
||||||
|
# Wire the PubSub trigger
|
||||||
|
orama function triggers add call-push-handler --topic calls:invite
|
||||||
|
```
|
||||||
291
core/docs/WEBRTC.md
Normal file
291
core/docs/WEBRTC.md
Normal file
@ -0,0 +1,291 @@
|
|||||||
|
# WebRTC Integration
|
||||||
|
|
||||||
|
Real-time voice, video, and data channels for Orama Network namespaces.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
Client A Client B
|
||||||
|
│ │
|
||||||
|
│ 1. Get TURN credentials (REST) │
|
||||||
|
│ 2. Connect WebSocket (signaling) │
|
||||||
|
│ 3. Exchange SDP/ICE via SFU │
|
||||||
|
│ │
|
||||||
|
▼ ▼
|
||||||
|
┌──────────┐ UDP relay ┌──────────┐
|
||||||
|
│ TURN │◄──────────────────►│ TURN │
|
||||||
|
│ Server │ (public IPs) │ Server │
|
||||||
|
│ Node 1 │ │ Node 2 │
|
||||||
|
└────┬─────┘ └────┬─────┘
|
||||||
|
│ WireGuard │ WireGuard
|
||||||
|
▼ ▼
|
||||||
|
┌──────────────────────────────────────────┐
|
||||||
|
│ SFU Servers (3 nodes) │
|
||||||
|
│ - WebSocket signaling (WireGuard only) │
|
||||||
|
│ - Pion WebRTC (RTP forwarding) │
|
||||||
|
│ - Room management │
|
||||||
|
│ - Track publish/subscribe │
|
||||||
|
└──────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key design decisions:**
|
||||||
|
- **TURN-shielded**: SFU binds only to WireGuard IPs. All client media flows through TURN relay.
|
||||||
|
- **`iceTransportPolicy: relay`** enforced server-side — no direct peer connections.
|
||||||
|
- **Opt-in per namespace** via `orama namespace enable webrtc`.
|
||||||
|
- **SFU on all 3 nodes**, **TURN on 2 of 3 nodes** (redundancy without over-provisioning).
|
||||||
|
- **Separate port allocation** from existing namespace services.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Namespace must be provisioned with a ready cluster (RQLite + Olric + Gateway running).
|
||||||
|
- Command must be run on a cluster node (uses internal gateway endpoint).
|
||||||
|
|
||||||
|
## Enable / Disable
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable WebRTC for a namespace
|
||||||
|
orama namespace enable webrtc --namespace myapp
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
orama namespace webrtc-status --namespace myapp
|
||||||
|
|
||||||
|
# Disable WebRTC (stops services, deallocates ports, removes DNS)
|
||||||
|
orama namespace disable webrtc --namespace myapp
|
||||||
|
```
|
||||||
|
|
||||||
|
### What happens on enable:
|
||||||
|
1. Generates a per-namespace TURN shared secret (32 bytes, crypto/rand)
|
||||||
|
2. Inserts `namespace_webrtc_config` DB record
|
||||||
|
3. Allocates WebRTC port blocks on each node (SFU signaling + media range, TURN relay range)
|
||||||
|
4. Spawns TURN on 2 nodes (selected by capacity)
|
||||||
|
5. Spawns SFU on all 3 nodes
|
||||||
|
6. Creates DNS A records: `turn.ns-{name}.{baseDomain}` pointing to TURN node public IPs
|
||||||
|
7. Updates cluster state on all nodes (for cold-boot restoration)
|
||||||
|
|
||||||
|
### What happens on disable:
|
||||||
|
1. Stops SFU on all 3 nodes
|
||||||
|
2. Stops TURN on 2 nodes
|
||||||
|
3. Deallocates all WebRTC ports
|
||||||
|
4. Deletes TURN DNS records
|
||||||
|
5. Cleans up DB records (`namespace_webrtc_config`, `webrtc_rooms`)
|
||||||
|
6. Updates cluster state
|
||||||
|
|
||||||
|
## Client Integration (JavaScript)
|
||||||
|
|
||||||
|
### Authentication
|
||||||
|
|
||||||
|
All WebRTC endpoints require authentication. Use one of:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Option A: API Key via header (recommended)
|
||||||
|
X-API-Key: <your-namespace-api-key>
|
||||||
|
|
||||||
|
# Option B: API Key via Authorization header
|
||||||
|
Authorization: ApiKey <your-namespace-api-key>
|
||||||
|
|
||||||
|
# Option C: JWT Bearer token
|
||||||
|
Authorization: Bearer <jwt>
|
||||||
|
```
|
||||||
|
|
||||||
|
### 1. Get TURN Credentials
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const response = await fetch('https://ns-myapp.orama-devnet.network/v1/webrtc/turn/credentials', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'X-API-Key': apiKey }
|
||||||
|
});
|
||||||
|
|
||||||
|
const { uris, username, password, ttl } = await response.json();
|
||||||
|
// uris: [
|
||||||
|
// "turn:turn.ns-myapp.orama-devnet.network:3478?transport=udp",
|
||||||
|
// "turn:turn.ns-myapp.orama-devnet.network:3478?transport=tcp",
|
||||||
|
// "turns:turn.ns-myapp.orama-devnet.network:5349"
|
||||||
|
// ]
|
||||||
|
// username: "{expiry_unix}:{namespace}"
|
||||||
|
// password: HMAC-SHA1 derived (base64)
|
||||||
|
// ttl: 600 (seconds)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Create PeerConnection
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const pc = new RTCPeerConnection({
|
||||||
|
iceServers: [{ urls: uris, username, credential: password }],
|
||||||
|
iceTransportPolicy: 'relay' // enforced by SFU
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Connect Signaling WebSocket
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const ws = new WebSocket(
|
||||||
|
`wss://ns-myapp.orama-devnet.network/v1/webrtc/signal?room=${roomId}&api_key=${apiKey}`
|
||||||
|
);
|
||||||
|
|
||||||
|
ws.onmessage = (event) => {
|
||||||
|
const msg = JSON.parse(event.data);
|
||||||
|
switch (msg.type) {
|
||||||
|
case 'offer': handleOffer(msg); break;
|
||||||
|
case 'answer': handleAnswer(msg); break;
|
||||||
|
case 'ice-candidate': handleICE(msg); break;
|
||||||
|
case 'peer-joined': handleJoin(msg); break;
|
||||||
|
case 'peer-left': handleLeave(msg); break;
|
||||||
|
case 'turn-credentials':
|
||||||
|
case 'refresh-credentials':
|
||||||
|
updateTURN(msg); // SFU sends refreshed creds at 80% TTL
|
||||||
|
break;
|
||||||
|
case 'server-draining':
|
||||||
|
reconnect(); // SFU shutting down, reconnect to another node
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Room Management (REST)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const headers = { 'X-API-Key': apiKey, 'Content-Type': 'application/json' };
|
||||||
|
|
||||||
|
// Create room
|
||||||
|
await fetch('/v1/webrtc/rooms', {
|
||||||
|
method: 'POST',
|
||||||
|
headers,
|
||||||
|
body: JSON.stringify({ room_id: 'my-room' })
|
||||||
|
});
|
||||||
|
|
||||||
|
// List rooms
|
||||||
|
const rooms = await fetch('/v1/webrtc/rooms', { headers });
|
||||||
|
|
||||||
|
// Close room
|
||||||
|
await fetch('/v1/webrtc/rooms?room_id=my-room', {
|
||||||
|
method: 'DELETE',
|
||||||
|
headers
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### REST Endpoints
|
||||||
|
|
||||||
|
| Method | Path | Auth | Description |
|
||||||
|
|--------|------|------|-------------|
|
||||||
|
| POST | `/v1/webrtc/turn/credentials` | JWT/API key | Get TURN relay credentials |
|
||||||
|
| GET/WS | `/v1/webrtc/signal` | JWT/API key | WebSocket signaling |
|
||||||
|
| GET | `/v1/webrtc/rooms` | JWT/API key | List rooms |
|
||||||
|
| POST | `/v1/webrtc/rooms` | JWT/API key (owner) | Create room |
|
||||||
|
| DELETE | `/v1/webrtc/rooms` | JWT/API key (owner) | Close room |
|
||||||
|
|
||||||
|
### Signaling Messages
|
||||||
|
|
||||||
|
| Type | Direction | Description |
|
||||||
|
|------|-----------|-------------|
|
||||||
|
| `join` | Client → SFU | Join room |
|
||||||
|
| `offer` | Client ↔ SFU | SDP offer |
|
||||||
|
| `answer` | Client ↔ SFU | SDP answer |
|
||||||
|
| `ice-candidate` | Client ↔ SFU | ICE candidate |
|
||||||
|
| `leave` | Client → SFU | Leave room |
|
||||||
|
| `peer-joined` | SFU → Client | New peer notification |
|
||||||
|
| `peer-left` | SFU → Client | Peer departure |
|
||||||
|
| `turn-credentials` | SFU → Client | Initial TURN credentials |
|
||||||
|
| `refresh-credentials` | SFU → Client | Refreshed credentials (at 80% TTL) |
|
||||||
|
| `server-draining` | SFU → Client | SFU shutting down |
|
||||||
|
|
||||||
|
## Port Allocation
|
||||||
|
|
||||||
|
WebRTC uses a **separate port allocation system** from the core namespace ports:
|
||||||
|
|
||||||
|
| Service | Port Range | Protocol | Per Namespace |
|
||||||
|
|---------|-----------|----------|---------------|
|
||||||
|
| SFU signaling | 30000-30099 | TCP (WireGuard only) | 1 port |
|
||||||
|
| SFU media (RTP) | 20000-29999 | UDP (WireGuard only) | 500 ports |
|
||||||
|
| TURN listen | 3478 | UDP + TCP | fixed |
|
||||||
|
| TURNS (TLS) | 5349 | TCP | fixed |
|
||||||
|
| TURN relay | 49152-65535 | UDP | 800 ports |
|
||||||
|
|
||||||
|
## TURN Credential Protocol
|
||||||
|
|
||||||
|
- Credentials use HMAC-SHA1 with a per-namespace shared secret
|
||||||
|
- Username format: `{expiry_unix}:{namespace}`
|
||||||
|
- Password: `base64(HMAC-SHA1(shared_secret, username))`
|
||||||
|
- Default TTL: 600 seconds (10 minutes)
|
||||||
|
- SFU proactively sends `refresh-credentials` at 80% of TTL (8 minutes)
|
||||||
|
- Clients should update ICE servers on receiving refresh
|
||||||
|
|
||||||
|
## TURNS TLS Certificate
|
||||||
|
|
||||||
|
TURNS (port 5349) uses TLS. Certificate provisioning:
|
||||||
|
|
||||||
|
1. **Let's Encrypt (primary)**: On TURN spawn, the TURN domain is added to the local Caddy instance's Caddyfile. Caddy provisions a Let's Encrypt cert via DNS-01 ACME challenge (using the orama DNS provider). TURN reads the cert from Caddy's storage.
|
||||||
|
2. **Self-signed (fallback)**: If Caddy cert provisioning fails (timeout, Caddy not running), a self-signed cert is generated with the node's public IP as SAN.
|
||||||
|
|
||||||
|
Caddy auto-renews Let's Encrypt certs at ~60 days. TURN picks up renewed certs on restart.
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check WebRTC status
|
||||||
|
orama namespace webrtc-status --namespace myapp
|
||||||
|
|
||||||
|
# Monitor report includes SFU/TURN status
|
||||||
|
orama monitor report --env devnet
|
||||||
|
|
||||||
|
# Inspector checks WebRTC health
|
||||||
|
orama inspector --env devnet
|
||||||
|
```
|
||||||
|
|
||||||
|
The monitoring report includes per-namespace `sfu_up` and `turn_up` fields. The inspector runs cross-node checks to verify SFU coverage (3 nodes) and TURN redundancy (2 nodes).
|
||||||
|
|
||||||
|
## Debugging
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SFU logs
|
||||||
|
journalctl -u orama-namespace-sfu@myapp -f
|
||||||
|
|
||||||
|
# TURN logs
|
||||||
|
journalctl -u orama-namespace-turn@myapp -f
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
systemctl status orama-namespace-sfu@myapp
|
||||||
|
systemctl status orama-namespace-turn@myapp
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Model
|
||||||
|
|
||||||
|
- **Forced relay**: `iceTransportPolicy: relay` enforced server-side. Clients cannot bypass TURN.
|
||||||
|
- **HMAC credentials**: Per-namespace TURN shared secret. Credentials expire after 10 minutes.
|
||||||
|
- **Namespace isolation**: Each namespace has its own TURN secret, port ranges, and rooms.
|
||||||
|
- **Authentication required**: All WebRTC endpoints require API key or JWT (`X-API-Key` header, `Authorization: ApiKey`, or `Authorization: Bearer`).
|
||||||
|
- **Room management**: Creating/closing rooms requires namespace ownership.
|
||||||
|
- **SFU on WireGuard only**: SFU binds to 10.0.0.x, never 0.0.0.0. Only reachable via TURN relay.
|
||||||
|
- **Permissions-Policy**: `camera=(self), microphone=(self)` — only same-origin can access media devices.
|
||||||
|
|
||||||
|
## Firewall
|
||||||
|
|
||||||
|
When WebRTC is enabled, the following ports are opened via UFW on TURN nodes:
|
||||||
|
|
||||||
|
| Port | Protocol | Purpose |
|
||||||
|
|------|----------|---------|
|
||||||
|
| 3478 | UDP | TURN standard |
|
||||||
|
| 3478 | TCP | TURN TCP fallback (for clients behind UDP-blocking firewalls) |
|
||||||
|
| 5349 | TCP | TURNS — TURN over TLS (encrypted, works through strict firewalls/DPI) |
|
||||||
|
| 49152-65535 | UDP | TURN relay range (allocated per namespace) |
|
||||||
|
|
||||||
|
SFU ports are NOT opened in the firewall — they are WireGuard-internal only.
|
||||||
|
|
||||||
|
## Database Tables
|
||||||
|
|
||||||
|
| Table | Purpose |
|
||||||
|
|-------|---------|
|
||||||
|
| `namespace_webrtc_config` | Per-namespace WebRTC config (enabled, TURN secret, node counts) |
|
||||||
|
| `webrtc_rooms` | Room-to-SFU-node affinity |
|
||||||
|
| `webrtc_port_allocations` | SFU/TURN port tracking |
|
||||||
|
|
||||||
|
## Cold Boot Recovery
|
||||||
|
|
||||||
|
On node restart, the cluster state file (`cluster_state.json`) includes `has_sfu`, `has_turn`, and port allocation data. The restore process:
|
||||||
|
|
||||||
|
1. Core services restore first: RQLite → Olric → Gateway
|
||||||
|
2. If `has_turn` is set: fetches TURN shared secret from DB, spawns TURN
|
||||||
|
3. If `has_sfu` is set: fetches WebRTC config from DB, spawns SFU with TURN server list
|
||||||
|
|
||||||
|
If the DB is unavailable during restore, SFU/TURN restoration is skipped with a warning log. They will be restored on the next successful DB connection.
|
||||||
42
core/docs/examples/functions/build.sh
Executable file
42
core/docs/examples/functions/build.sh
Executable file
@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Build all example functions to WASM using TinyGo
|
||||||
|
#
|
||||||
|
# Prerequisites:
|
||||||
|
# - TinyGo installed: https://tinygo.org/getting-started/install/
|
||||||
|
# - On macOS: brew install tinygo
|
||||||
|
#
|
||||||
|
# Usage: ./build.sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
OUTPUT_DIR="$SCRIPT_DIR/bin"
|
||||||
|
|
||||||
|
# Check if TinyGo is installed
|
||||||
|
if ! command -v tinygo &> /dev/null; then
|
||||||
|
echo "Error: TinyGo is not installed."
|
||||||
|
echo "Install it with: brew install tinygo (macOS) or see https://tinygo.org/getting-started/install/"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create output directory
|
||||||
|
mkdir -p "$OUTPUT_DIR"
|
||||||
|
|
||||||
|
echo "Building example functions to WASM..."
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Build each function
|
||||||
|
for dir in "$SCRIPT_DIR"/*/; do
|
||||||
|
if [ -f "$dir/main.go" ]; then
|
||||||
|
name=$(basename "$dir")
|
||||||
|
echo "Building $name..."
|
||||||
|
cd "$dir"
|
||||||
|
tinygo build -o "$OUTPUT_DIR/$name.wasm" -target wasi main.go
|
||||||
|
echo " -> $OUTPUT_DIR/$name.wasm"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Done! WASM files are in $OUTPUT_DIR/"
|
||||||
|
ls -lh "$OUTPUT_DIR"/*.wasm 2>/dev/null || echo "No WASM files built."
|
||||||
|
|
||||||
66
core/docs/examples/functions/counter/main.go
Normal file
66
core/docs/examples/functions/counter/main.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Example: Counter function with Olric cache
|
||||||
|
// This function demonstrates using the distributed cache to maintain state.
|
||||||
|
// Compile with: tinygo build -o counter.wasm -target wasi main.go
|
||||||
|
//
|
||||||
|
// Note: This example shows the CONCEPT. Actual host function integration
|
||||||
|
// requires the host function bindings to be exposed to the WASM module.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Read input from stdin
|
||||||
|
var input []byte
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
for {
|
||||||
|
n, err := os.Stdin.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
input = append(input, buf[:n]...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse input
|
||||||
|
var payload struct {
|
||||||
|
Action string `json:"action"` // "increment", "decrement", "get", "reset"
|
||||||
|
CounterID string `json:"counter_id"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(input, &payload); err != nil {
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"error": "Invalid JSON input",
|
||||||
|
}
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.CounterID == "" {
|
||||||
|
payload.CounterID = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: In the real implementation, this would use host functions:
|
||||||
|
// - cache_get(key) to read the counter
|
||||||
|
// - cache_put(key, value, ttl) to write the counter
|
||||||
|
//
|
||||||
|
// For this example, we just simulate the logic:
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"counter_id": payload.CounterID,
|
||||||
|
"action": payload.Action,
|
||||||
|
"message": "Counter operations require cache host functions",
|
||||||
|
"example": map[string]interface{}{
|
||||||
|
"increment": "cache_put('counter:' + counter_id, current + 1)",
|
||||||
|
"decrement": "cache_put('counter:' + counter_id, current - 1)",
|
||||||
|
"get": "cache_get('counter:' + counter_id)",
|
||||||
|
"reset": "cache_put('counter:' + counter_id, 0)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
}
|
||||||
|
|
||||||
50
core/docs/examples/functions/echo/main.go
Normal file
50
core/docs/examples/functions/echo/main.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
// Example: Echo function
|
||||||
|
// This is a simple serverless function that echoes back the input.
|
||||||
|
// Compile with: tinygo build -o echo.wasm -target wasi main.go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Input is read from stdin, output is written to stdout.
|
||||||
|
// The Orama serverless engine passes the invocation payload via stdin
|
||||||
|
// and expects the response on stdout.
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Read all input from stdin
|
||||||
|
var input []byte
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
for {
|
||||||
|
n, err := os.Stdin.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
input = append(input, buf[:n]...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse input as JSON (optional - could also just echo raw bytes)
|
||||||
|
var payload map[string]interface{}
|
||||||
|
if err := json.Unmarshal(input, &payload); err != nil {
|
||||||
|
// Not JSON, just echo the raw input
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"echo": string(input),
|
||||||
|
}
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create response
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"echo": payload,
|
||||||
|
"message": "Echo function received your input!",
|
||||||
|
}
|
||||||
|
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
}
|
||||||
|
|
||||||
42
core/docs/examples/functions/hello/main.go
Normal file
42
core/docs/examples/functions/hello/main.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Example: Hello function
|
||||||
|
// This is a simple serverless function that returns a greeting.
|
||||||
|
// Compile with: tinygo build -o hello.wasm -target wasi main.go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Read input from stdin
|
||||||
|
var input []byte
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
for {
|
||||||
|
n, err := os.Stdin.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
input = append(input, buf[:n]...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse input to get name
|
||||||
|
var payload struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(input, &payload); err != nil || payload.Name == "" {
|
||||||
|
payload.Name = "World"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create greeting response
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"greeting": "Hello, " + payload.Name + "!",
|
||||||
|
"message": "This is a serverless function running on Orama Network",
|
||||||
|
}
|
||||||
|
|
||||||
|
output, _ := json.Marshal(response)
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
}
|
||||||
|
|
||||||
556
core/e2e/cluster/namespace_cluster_test.go
Normal file
556
core/e2e/cluster/namespace_cluster_test.go
Normal file
@ -0,0 +1,556 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package cluster_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// STRICT NAMESPACE CLUSTER TESTS
|
||||||
|
// These tests FAIL if things don't work. No t.Skip() for expected functionality.
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// TestNamespaceCluster_FullProvisioning is a STRICT test that verifies the complete
|
||||||
|
// namespace cluster provisioning flow. This test FAILS if any component doesn't work.
|
||||||
|
func TestNamespaceCluster_FullProvisioning(t *testing.T) {
|
||||||
|
// Generate unique namespace name
|
||||||
|
newNamespace := fmt.Sprintf("e2e-cluster-%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
env, err := e2e.LoadTestEnvWithNamespace(newNamespace)
|
||||||
|
require.NoError(t, err, "FATAL: Failed to create test environment for namespace %s", newNamespace)
|
||||||
|
require.NotEmpty(t, env.APIKey, "FATAL: No API key received - namespace provisioning failed")
|
||||||
|
|
||||||
|
t.Logf("Created namespace: %s", newNamespace)
|
||||||
|
t.Logf("API Key: %s...", env.APIKey[:min(20, len(env.APIKey))])
|
||||||
|
|
||||||
|
// Get cluster status to verify provisioning
|
||||||
|
t.Run("Cluster status shows ready", func(t *testing.T) {
|
||||||
|
// Query the namespace cluster status
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/namespace/status?name="+newNamespace, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Failed to query cluster status")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("Cluster status response: %s", string(bodyBytes))
|
||||||
|
|
||||||
|
// If status endpoint exists and returns cluster info, verify it
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
var result map[string]interface{}
|
||||||
|
if err := json.Unmarshal(bodyBytes, &result); err == nil {
|
||||||
|
status, _ := result["status"].(string)
|
||||||
|
if status != "" && status != "ready" && status != "default" {
|
||||||
|
t.Errorf("FAIL: Cluster status is '%s', expected 'ready'", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Verify we can use the namespace for deployments
|
||||||
|
t.Run("Deployments work on namespace", func(t *testing.T) {
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
|
||||||
|
t.Skip("Test tarball not found - skipping deployment test")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("cluster-test-%d", time.Now().Unix())
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID, "FAIL: Deployment creation failed on namespace cluster")
|
||||||
|
|
||||||
|
t.Logf("Created deployment %s (ID: %s) on namespace %s", deploymentName, deploymentID, newNamespace)
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Verify deployment is accessible
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Failed to get deployment")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode, "FAIL: Cannot retrieve deployment from namespace cluster")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_RQLiteHealth verifies that namespace RQLite cluster is running
|
||||||
|
// and accepting connections. This test FAILS if RQLite is not accessible.
|
||||||
|
func TestNamespaceCluster_RQLiteHealth(t *testing.T) {
|
||||||
|
t.Run("Check namespace port range for RQLite", func(t *testing.T) {
|
||||||
|
foundRQLite := false
|
||||||
|
var healthyPorts []int
|
||||||
|
var unhealthyPorts []int
|
||||||
|
|
||||||
|
// Check first few port blocks
|
||||||
|
for portStart := 10000; portStart <= 10015; portStart += 5 {
|
||||||
|
rqlitePort := portStart // RQLite HTTP is first port in block
|
||||||
|
if isPortListening("localhost", rqlitePort) {
|
||||||
|
t.Logf("Found RQLite instance on port %d", rqlitePort)
|
||||||
|
foundRQLite = true
|
||||||
|
|
||||||
|
// Verify it responds to health check
|
||||||
|
healthURL := fmt.Sprintf("http://localhost:%d/status", rqlitePort)
|
||||||
|
healthResp, err := http.Get(healthURL)
|
||||||
|
if err == nil {
|
||||||
|
defer healthResp.Body.Close()
|
||||||
|
if healthResp.StatusCode == http.StatusOK {
|
||||||
|
healthyPorts = append(healthyPorts, rqlitePort)
|
||||||
|
t.Logf(" ✓ RQLite on port %d is healthy", rqlitePort)
|
||||||
|
} else {
|
||||||
|
unhealthyPorts = append(unhealthyPorts, rqlitePort)
|
||||||
|
t.Errorf("FAIL: RQLite on port %d returned status %d", rqlitePort, healthResp.StatusCode)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unhealthyPorts = append(unhealthyPorts, rqlitePort)
|
||||||
|
t.Errorf("FAIL: RQLite on port %d health check failed: %v", rqlitePort, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundRQLite {
|
||||||
|
t.Log("No namespace RQLite instances found in port range 10000-10015")
|
||||||
|
t.Log("This is expected if no namespaces have been provisioned yet")
|
||||||
|
} else {
|
||||||
|
t.Logf("Summary: %d healthy, %d unhealthy RQLite instances", len(healthyPorts), len(unhealthyPorts))
|
||||||
|
require.Empty(t, unhealthyPorts, "FAIL: Some RQLite instances are unhealthy")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_OlricHealth verifies that namespace Olric cluster is running
|
||||||
|
// and accepting connections.
|
||||||
|
func TestNamespaceCluster_OlricHealth(t *testing.T) {
|
||||||
|
t.Run("Check namespace port range for Olric", func(t *testing.T) {
|
||||||
|
foundOlric := false
|
||||||
|
foundCount := 0
|
||||||
|
|
||||||
|
// Check first few port blocks - Olric memberlist is port_start + 3
|
||||||
|
for portStart := 10000; portStart <= 10015; portStart += 5 {
|
||||||
|
olricMemberlistPort := portStart + 3
|
||||||
|
if isPortListening("localhost", olricMemberlistPort) {
|
||||||
|
t.Logf("Found Olric memberlist on port %d", olricMemberlistPort)
|
||||||
|
foundOlric = true
|
||||||
|
foundCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundOlric {
|
||||||
|
t.Log("No namespace Olric instances found in port range 10003-10018")
|
||||||
|
t.Log("This is expected if no namespaces have been provisioned yet")
|
||||||
|
} else {
|
||||||
|
t.Logf("Found %d Olric memberlist ports accepting connections", foundCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_GatewayHealth verifies that namespace Gateway instances are running.
|
||||||
|
// This test FAILS if gateway binary exists but gateways don't spawn.
|
||||||
|
func TestNamespaceCluster_GatewayHealth(t *testing.T) {
|
||||||
|
// Check if gateway binary exists
|
||||||
|
gatewayBinaryPaths := []string{
|
||||||
|
"./bin/orama",
|
||||||
|
"../bin/orama",
|
||||||
|
"/usr/local/bin/orama",
|
||||||
|
}
|
||||||
|
|
||||||
|
var gatewayBinaryExists bool
|
||||||
|
var foundPath string
|
||||||
|
for _, path := range gatewayBinaryPaths {
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
gatewayBinaryExists = true
|
||||||
|
foundPath = path
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !gatewayBinaryExists {
|
||||||
|
t.Log("Gateway binary not found - namespace gateways will not spawn")
|
||||||
|
t.Log("Run 'make build' to build the gateway binary")
|
||||||
|
t.Log("Checked paths:", gatewayBinaryPaths)
|
||||||
|
// This is a FAILURE if we expect gateway to work
|
||||||
|
t.Error("FAIL: Gateway binary not found. Run 'make build' first.")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Gateway binary found at: %s", foundPath)
|
||||||
|
|
||||||
|
t.Run("Check namespace port range for Gateway", func(t *testing.T) {
|
||||||
|
foundGateway := false
|
||||||
|
var healthyPorts []int
|
||||||
|
var unhealthyPorts []int
|
||||||
|
|
||||||
|
// Check first few port blocks - Gateway HTTP is port_start + 4
|
||||||
|
for portStart := 10000; portStart <= 10015; portStart += 5 {
|
||||||
|
gatewayPort := portStart + 4
|
||||||
|
if isPortListening("localhost", gatewayPort) {
|
||||||
|
t.Logf("Found Gateway instance on port %d", gatewayPort)
|
||||||
|
foundGateway = true
|
||||||
|
|
||||||
|
// Verify it responds to health check
|
||||||
|
healthURL := fmt.Sprintf("http://localhost:%d/v1/health", gatewayPort)
|
||||||
|
healthResp, err := http.Get(healthURL)
|
||||||
|
if err == nil {
|
||||||
|
defer healthResp.Body.Close()
|
||||||
|
if healthResp.StatusCode == http.StatusOK {
|
||||||
|
healthyPorts = append(healthyPorts, gatewayPort)
|
||||||
|
t.Logf(" ✓ Gateway on port %d is healthy", gatewayPort)
|
||||||
|
} else {
|
||||||
|
unhealthyPorts = append(unhealthyPorts, gatewayPort)
|
||||||
|
t.Errorf("FAIL: Gateway on port %d returned status %d", gatewayPort, healthResp.StatusCode)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unhealthyPorts = append(unhealthyPorts, gatewayPort)
|
||||||
|
t.Errorf("FAIL: Gateway on port %d health check failed: %v", gatewayPort, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundGateway {
|
||||||
|
t.Log("No namespace Gateway instances found in port range 10004-10019")
|
||||||
|
t.Log("This is expected if no namespaces have been provisioned yet")
|
||||||
|
} else {
|
||||||
|
t.Logf("Summary: %d healthy, %d unhealthy Gateway instances", len(healthyPorts), len(unhealthyPorts))
|
||||||
|
require.Empty(t, unhealthyPorts, "FAIL: Some Gateway instances are unhealthy")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_ProvisioningCreatesProcesses creates a new namespace and
|
||||||
|
// verifies that actual processes are spawned. This is the STRICTEST test.
|
||||||
|
func TestNamespaceCluster_ProvisioningCreatesProcesses(t *testing.T) {
|
||||||
|
newNamespace := fmt.Sprintf("e2e-strict-%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
// Record ports before provisioning
|
||||||
|
portsBefore := getListeningPortsInRange(10000, 10099)
|
||||||
|
t.Logf("Ports in use before provisioning: %v", portsBefore)
|
||||||
|
|
||||||
|
// Create namespace
|
||||||
|
env, err := e2e.LoadTestEnvWithNamespace(newNamespace)
|
||||||
|
require.NoError(t, err, "FATAL: Failed to create namespace")
|
||||||
|
require.NotEmpty(t, env.APIKey, "FATAL: No API key - provisioning failed")
|
||||||
|
|
||||||
|
t.Logf("Namespace '%s' created successfully", newNamespace)
|
||||||
|
|
||||||
|
// Wait a moment for processes to fully start
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
// Record ports after provisioning
|
||||||
|
portsAfter := getListeningPortsInRange(10000, 10099)
|
||||||
|
t.Logf("Ports in use after provisioning: %v", portsAfter)
|
||||||
|
|
||||||
|
// Check if new ports were opened
|
||||||
|
newPorts := diffPorts(portsBefore, portsAfter)
|
||||||
|
sort.Ints(newPorts)
|
||||||
|
t.Logf("New ports opened: %v", newPorts)
|
||||||
|
|
||||||
|
t.Run("New ports allocated for namespace cluster", func(t *testing.T) {
|
||||||
|
if len(newPorts) == 0 {
|
||||||
|
// This might be OK for default namespace or if using global cluster
|
||||||
|
t.Log("No new ports detected")
|
||||||
|
t.Log("Possible reasons:")
|
||||||
|
t.Log(" - Namespace uses default cluster (expected for 'default')")
|
||||||
|
t.Log(" - Cluster already existed from previous test")
|
||||||
|
t.Log(" - Provisioning is handled differently in this environment")
|
||||||
|
} else {
|
||||||
|
t.Logf("SUCCESS: %d new ports opened for namespace cluster", len(newPorts))
|
||||||
|
|
||||||
|
// Verify the ports follow expected pattern
|
||||||
|
for _, port := range newPorts {
|
||||||
|
offset := (port - 10000) % 5
|
||||||
|
switch offset {
|
||||||
|
case 0:
|
||||||
|
t.Logf(" Port %d: RQLite HTTP", port)
|
||||||
|
case 1:
|
||||||
|
t.Logf(" Port %d: RQLite Raft", port)
|
||||||
|
case 2:
|
||||||
|
t.Logf(" Port %d: Olric HTTP", port)
|
||||||
|
case 3:
|
||||||
|
t.Logf(" Port %d: Olric Memberlist", port)
|
||||||
|
case 4:
|
||||||
|
t.Logf(" Port %d: Gateway HTTP", port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RQLite is accessible on allocated ports", func(t *testing.T) {
|
||||||
|
rqlitePorts := filterPortsByOffset(newPorts, 0) // RQLite HTTP is offset 0
|
||||||
|
if len(rqlitePorts) == 0 {
|
||||||
|
t.Log("No new RQLite ports detected")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, port := range rqlitePorts {
|
||||||
|
healthURL := fmt.Sprintf("http://localhost:%d/status", port)
|
||||||
|
resp, err := http.Get(healthURL)
|
||||||
|
require.NoError(t, err, "FAIL: RQLite on port %d is not responding", port)
|
||||||
|
resp.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"FAIL: RQLite on port %d returned status %d", port, resp.StatusCode)
|
||||||
|
t.Logf("✓ RQLite on port %d is healthy", port)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Olric is accessible on allocated ports", func(t *testing.T) {
|
||||||
|
olricPorts := filterPortsByOffset(newPorts, 3) // Olric Memberlist is offset 3
|
||||||
|
if len(olricPorts) == 0 {
|
||||||
|
t.Log("No new Olric ports detected")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, port := range olricPorts {
|
||||||
|
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 2*time.Second)
|
||||||
|
require.NoError(t, err, "FAIL: Olric memberlist on port %d is not responding", port)
|
||||||
|
conn.Close()
|
||||||
|
t.Logf("✓ Olric memberlist on port %d is accepting connections", port)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_StatusEndpoint tests the /v1/namespace/status endpoint
|
||||||
|
func TestNamespaceCluster_StatusEndpoint(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
t.Run("Status endpoint returns 404 for non-existent cluster", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/namespace/status?id=non-existent-id", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Request should not fail")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
require.Equal(t, http.StatusNotFound, resp.StatusCode,
|
||||||
|
"FAIL: Should return 404 for non-existent cluster, got %d", resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_CrossNamespaceAccess verifies namespace isolation
|
||||||
|
func TestNamespaceCluster_CrossNamespaceAccess(t *testing.T) {
|
||||||
|
nsA := fmt.Sprintf("ns-a-%d", time.Now().Unix())
|
||||||
|
nsB := fmt.Sprintf("ns-b-%d", time.Now().Unix())
|
||||||
|
|
||||||
|
envA, err := e2e.LoadTestEnvWithNamespace(nsA)
|
||||||
|
require.NoError(t, err, "FAIL: Cannot create namespace A")
|
||||||
|
|
||||||
|
envB, err := e2e.LoadTestEnvWithNamespace(nsB)
|
||||||
|
require.NoError(t, err, "FAIL: Cannot create namespace B")
|
||||||
|
|
||||||
|
// Verify both namespaces have different API keys
|
||||||
|
require.NotEqual(t, envA.APIKey, envB.APIKey, "FAIL: Namespaces should have different API keys")
|
||||||
|
t.Logf("Namespace A API key: %s...", envA.APIKey[:min(10, len(envA.APIKey))])
|
||||||
|
t.Logf("Namespace B API key: %s...", envB.APIKey[:min(10, len(envB.APIKey))])
|
||||||
|
|
||||||
|
t.Run("API keys are namespace-scoped", func(t *testing.T) {
|
||||||
|
// Namespace A should not see namespace B's resources
|
||||||
|
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/deployments/list", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode, "Should list deployments")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
json.Unmarshal(bodyBytes, &result)
|
||||||
|
|
||||||
|
deployments, _ := result["deployments"].([]interface{})
|
||||||
|
for _, d := range deployments {
|
||||||
|
dep, ok := d.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ns, _ := dep["namespace"].(string)
|
||||||
|
require.NotEqual(t, nsB, ns,
|
||||||
|
"FAIL: Namespace A sees Namespace B deployments - isolation broken!")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeployment_SubdomainFormat tests deployment subdomain format
|
||||||
|
func TestDeployment_SubdomainFormat(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
|
||||||
|
t.Skip("Test tarball not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("subdomain-test-%d", time.Now().UnixNano())
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID, "FAIL: Deployment creation failed")
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deployment has subdomain with random suffix", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Failed to get deployment")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode, "Should get deployment")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
json.Unmarshal(bodyBytes, &result)
|
||||||
|
|
||||||
|
deployment, ok := result["deployment"].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
deployment = result
|
||||||
|
}
|
||||||
|
|
||||||
|
subdomain, _ := deployment["subdomain"].(string)
|
||||||
|
if subdomain != "" {
|
||||||
|
require.True(t, strings.HasPrefix(subdomain, deploymentName),
|
||||||
|
"FAIL: Subdomain '%s' should start with deployment name '%s'", subdomain, deploymentName)
|
||||||
|
|
||||||
|
suffix := strings.TrimPrefix(subdomain, deploymentName+"-")
|
||||||
|
if suffix != subdomain { // There was a dash separator
|
||||||
|
require.Equal(t, 6, len(suffix),
|
||||||
|
"FAIL: Random suffix should be 6 characters, got %d (%s)", len(suffix), suffix)
|
||||||
|
}
|
||||||
|
t.Logf("Deployment subdomain: %s", subdomain)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_PortAllocation tests port allocation correctness
|
||||||
|
func TestNamespaceCluster_PortAllocation(t *testing.T) {
|
||||||
|
t.Run("Port range is 10000-10099", func(t *testing.T) {
|
||||||
|
const portRangeStart = 10000
|
||||||
|
const portRangeEnd = 10099
|
||||||
|
const portsPerNamespace = 5
|
||||||
|
const maxNamespacesPerNode = 20
|
||||||
|
|
||||||
|
totalPorts := portRangeEnd - portRangeStart + 1
|
||||||
|
require.Equal(t, 100, totalPorts, "Port range should be 100 ports")
|
||||||
|
|
||||||
|
expectedMax := totalPorts / portsPerNamespace
|
||||||
|
require.Equal(t, maxNamespacesPerNode, expectedMax,
|
||||||
|
"Max namespaces per node calculation mismatch")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Port assignments are sequential within block", func(t *testing.T) {
|
||||||
|
portStart := 10000
|
||||||
|
ports := map[string]int{
|
||||||
|
"rqlite_http": portStart + 0,
|
||||||
|
"rqlite_raft": portStart + 1,
|
||||||
|
"olric_http": portStart + 2,
|
||||||
|
"olric_memberlist": portStart + 3,
|
||||||
|
"gateway_http": portStart + 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
seen := make(map[int]bool)
|
||||||
|
for name, port := range ports {
|
||||||
|
require.False(t, seen[port], "FAIL: Port %d for %s is duplicate", port, name)
|
||||||
|
seen[port] = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// HELPER FUNCTIONS
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
func isPortListening(host string, port int) bool {
|
||||||
|
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", host, port), 1*time.Second)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
conn.Close()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func getListeningPortsInRange(start, end int) []int {
|
||||||
|
var ports []int
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Check ports concurrently for speed
|
||||||
|
results := make(chan int, end-start+1)
|
||||||
|
for port := start; port <= end; port++ {
|
||||||
|
go func(p int) {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
results <- 0
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
if isPortListening("localhost", p) {
|
||||||
|
results <- p
|
||||||
|
} else {
|
||||||
|
results <- 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(port)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i <= end-start; i++ {
|
||||||
|
if port := <-results; port > 0 {
|
||||||
|
ports = append(ports, port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ports
|
||||||
|
}
|
||||||
|
|
||||||
|
func diffPorts(before, after []int) []int {
|
||||||
|
beforeMap := make(map[int]bool)
|
||||||
|
for _, p := range before {
|
||||||
|
beforeMap[p] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var newPorts []int
|
||||||
|
for _, p := range after {
|
||||||
|
if !beforeMap[p] {
|
||||||
|
newPorts = append(newPorts, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newPorts
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterPortsByOffset(ports []int, offset int) []int {
|
||||||
|
var filtered []int
|
||||||
|
for _, p := range ports {
|
||||||
|
if (p-10000)%5 == offset {
|
||||||
|
filtered = append(filtered, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
447
core/e2e/cluster/namespace_isolation_test.go
Normal file
447
core/e2e/cluster/namespace_isolation_test.go
Normal file
@ -0,0 +1,447 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package cluster_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestNamespaceIsolation creates two namespaces once and runs all isolation
|
||||||
|
// subtests against them. This keeps namespace usage to 2 regardless of how
|
||||||
|
// many isolation scenarios we test.
|
||||||
|
func TestNamespaceIsolation(t *testing.T) {
|
||||||
|
envA, err := e2e.LoadTestEnvWithNamespace("namespace-a-" + fmt.Sprintf("%d", time.Now().Unix()))
|
||||||
|
require.NoError(t, err, "Failed to create namespace A environment")
|
||||||
|
|
||||||
|
envB, err := e2e.LoadTestEnvWithNamespace("namespace-b-" + fmt.Sprintf("%d", time.Now().Unix()))
|
||||||
|
require.NoError(t, err, "Failed to create namespace B environment")
|
||||||
|
|
||||||
|
t.Run("Deployments", func(t *testing.T) {
|
||||||
|
testNamespaceIsolationDeployments(t, envA, envB)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SQLiteDatabases", func(t *testing.T) {
|
||||||
|
testNamespaceIsolationSQLiteDatabases(t, envA, envB)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("IPFSContent", func(t *testing.T) {
|
||||||
|
testNamespaceIsolationIPFSContent(t, envA, envB)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("OlricCache", func(t *testing.T) {
|
||||||
|
testNamespaceIsolationOlricCache(t, envA, envB)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testNamespaceIsolationDeployments(t *testing.T, envA, envB *e2e.E2ETestEnv) {
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
// Create deployment in namespace-a
|
||||||
|
deploymentNameA := "test-app-ns-a"
|
||||||
|
deploymentIDA := e2e.CreateTestDeployment(t, envA, deploymentNameA, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !envA.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, envA, deploymentIDA)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create deployment in namespace-b
|
||||||
|
deploymentNameB := "test-app-ns-b"
|
||||||
|
deploymentIDB := e2e.CreateTestDeployment(t, envB, deploymentNameB, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !envB.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, envB, deploymentIDB)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Namespace-A cannot list Namespace-B deployments", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/deployments/list", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytes, &result), "Should decode JSON")
|
||||||
|
|
||||||
|
deployments, ok := result["deployments"].([]interface{})
|
||||||
|
require.True(t, ok, "Deployments should be an array")
|
||||||
|
|
||||||
|
// Should only see namespace-a deployments
|
||||||
|
for _, d := range deployments {
|
||||||
|
dep, ok := d.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
assert.NotEqual(t, deploymentNameB, dep["name"], "Should not see namespace-b deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A cannot see Namespace B deployments")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-A cannot access Namespace-B deployment by ID", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/deployments/get?id="+deploymentIDB, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should return 404 or 403
|
||||||
|
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
|
||||||
|
"Should block cross-namespace access")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A cannot access Namespace B deployment (status: %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-A cannot delete Namespace-B deployment", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("DELETE", envA.GatewayURL+"/v1/deployments/delete?id="+deploymentIDB, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
|
||||||
|
"Should block cross-namespace deletion")
|
||||||
|
|
||||||
|
// Verify deployment still exists for namespace-b
|
||||||
|
req2, _ := http.NewRequest("GET", envB.GatewayURL+"/v1/deployments/get?id="+deploymentIDB, nil)
|
||||||
|
req2.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
|
||||||
|
resp2, err := envB.HTTPClient.Do(req2)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp2.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp2.StatusCode, "Deployment should still exist in namespace B")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A cannot delete Namespace B deployment")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testNamespaceIsolationSQLiteDatabases(t *testing.T, envA, envB *e2e.E2ETestEnv) {
|
||||||
|
// Create database in namespace-a
|
||||||
|
dbNameA := "users-db-a"
|
||||||
|
e2e.CreateSQLiteDB(t, envA, dbNameA)
|
||||||
|
defer func() {
|
||||||
|
if !envA.SkipCleanup {
|
||||||
|
e2e.DeleteSQLiteDB(t, envA, dbNameA)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create database in namespace-b
|
||||||
|
dbNameB := "users-db-b"
|
||||||
|
e2e.CreateSQLiteDB(t, envB, dbNameB)
|
||||||
|
defer func() {
|
||||||
|
if !envB.SkipCleanup {
|
||||||
|
e2e.DeleteSQLiteDB(t, envB, dbNameB)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Namespace-A cannot list Namespace-B databases", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/db/sqlite/list", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytes, &result), "Should decode JSON")
|
||||||
|
|
||||||
|
databases, ok := result["databases"].([]interface{})
|
||||||
|
require.True(t, ok, "Databases should be an array")
|
||||||
|
|
||||||
|
for _, db := range databases {
|
||||||
|
database, ok := db.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
assert.NotEqual(t, dbNameB, database["database_name"], "Should not see namespace-b database")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A cannot see Namespace B databases")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-A cannot query Namespace-B database", func(t *testing.T) {
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"database_name": dbNameB,
|
||||||
|
"query": "SELECT * FROM users",
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/db/sqlite/query", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Should block cross-namespace query")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A cannot query Namespace B database")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-A cannot backup Namespace-B database", func(t *testing.T) {
|
||||||
|
reqBody := map[string]string{"database_name": dbNameB}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/db/sqlite/backup", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Should block cross-namespace backup")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A cannot backup Namespace B database")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testNamespaceIsolationIPFSContent(t *testing.T, envA, envB *e2e.E2ETestEnv) {
|
||||||
|
// Upload file in namespace-a
|
||||||
|
cidA := e2e.UploadTestFile(t, envA, "test-file-a.txt", "Content from namespace A")
|
||||||
|
defer func() {
|
||||||
|
if !envA.SkipCleanup {
|
||||||
|
e2e.UnpinFile(t, envA, cidA)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Namespace-B cannot GET Namespace-A IPFS content", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", envB.GatewayURL+"/v1/storage/get/"+cidA, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
|
||||||
|
resp, err := envB.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
|
||||||
|
"Should block cross-namespace IPFS GET")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace B cannot GET Namespace A IPFS content (status: %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-B cannot PIN Namespace-A IPFS content", func(t *testing.T) {
|
||||||
|
reqBody := map[string]string{
|
||||||
|
"cid": cidA,
|
||||||
|
"name": "stolen-content",
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/storage/pin", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envB.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
|
||||||
|
"Should block cross-namespace PIN")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace B cannot PIN Namespace A IPFS content (status: %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-B cannot UNPIN Namespace-A IPFS content", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("DELETE", envB.GatewayURL+"/v1/storage/unpin/"+cidA, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
|
||||||
|
resp, err := envB.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
|
||||||
|
"Should block cross-namespace UNPIN")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace B cannot UNPIN Namespace A IPFS content (status: %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-A can list only their own IPFS pins", func(t *testing.T) {
|
||||||
|
t.Skip("List pins endpoint not implemented yet - namespace isolation enforced at GET/PIN/UNPIN levels")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testNamespaceIsolationOlricCache(t *testing.T, envA, envB *e2e.E2ETestEnv) {
|
||||||
|
dmap := "test-cache"
|
||||||
|
keyA := "user-session-123"
|
||||||
|
valueA := `{"user_id": "alice", "token": "secret-token-a"}`
|
||||||
|
|
||||||
|
t.Run("Namespace-A sets cache key", func(t *testing.T) {
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA,
|
||||||
|
"value": valueA,
|
||||||
|
"ttl": "300s",
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/cache/put", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envA.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should set cache key successfully")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace A set cache key")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-B cannot GET Namespace-A cache key", func(t *testing.T) {
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA,
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envB.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should return 404 (key doesn't exist in namespace-b)
|
||||||
|
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Should not find key in different namespace")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace B cannot GET Namespace A cache key")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-B cannot DELETE Namespace-A cache key", func(t *testing.T) {
|
||||||
|
reqBody := map[string]string{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA,
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/delete", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envB.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Contains(t, []int{http.StatusOK, http.StatusNotFound}, resp.StatusCode)
|
||||||
|
|
||||||
|
// Verify key still exists for namespace-a
|
||||||
|
reqBody2 := map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA,
|
||||||
|
}
|
||||||
|
bodyBytes2, _ := json.Marshal(reqBody2)
|
||||||
|
|
||||||
|
req2, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes2))
|
||||||
|
req2.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
req2.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp2, err := envA.HTTPClient.Do(req2)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp2.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp2.StatusCode, "Key should still exist in namespace A")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes3, _ := io.ReadAll(resp2.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytes3, &result), "Should decode result")
|
||||||
|
|
||||||
|
// Parse expected JSON string for comparison
|
||||||
|
var expectedValue map[string]interface{}
|
||||||
|
json.Unmarshal([]byte(valueA), &expectedValue)
|
||||||
|
assert.Equal(t, expectedValue, result["value"], "Value should match")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace B cannot DELETE Namespace A cache key")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Namespace-B can set same key name in their namespace", func(t *testing.T) {
|
||||||
|
// Same key name, different namespace should be allowed
|
||||||
|
valueB := `{"user_id": "bob", "token": "secret-token-b"}`
|
||||||
|
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA, // Same key name as namespace-a
|
||||||
|
"value": valueB,
|
||||||
|
"ttl": "300s",
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/put", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := envB.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should set key in namespace B")
|
||||||
|
|
||||||
|
// Verify namespace-a still has their value
|
||||||
|
reqBody2 := map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA,
|
||||||
|
}
|
||||||
|
bodyBytes2, _ := json.Marshal(reqBody2)
|
||||||
|
|
||||||
|
req2, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes2))
|
||||||
|
req2.Header.Set("Authorization", "Bearer "+envA.APIKey)
|
||||||
|
req2.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp2, _ := envA.HTTPClient.Do(req2)
|
||||||
|
defer resp2.Body.Close()
|
||||||
|
|
||||||
|
var resultA map[string]interface{}
|
||||||
|
bodyBytesA, _ := io.ReadAll(resp2.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytesA, &resultA), "Should decode result A")
|
||||||
|
|
||||||
|
// Parse expected JSON string for comparison
|
||||||
|
var expectedValueA map[string]interface{}
|
||||||
|
json.Unmarshal([]byte(valueA), &expectedValueA)
|
||||||
|
assert.Equal(t, expectedValueA, resultA["value"], "Namespace A value should be unchanged")
|
||||||
|
|
||||||
|
// Verify namespace-b has their different value
|
||||||
|
reqBody3 := map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": keyA,
|
||||||
|
}
|
||||||
|
bodyBytes3, _ := json.Marshal(reqBody3)
|
||||||
|
|
||||||
|
req3, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes3))
|
||||||
|
req3.Header.Set("Authorization", "Bearer "+envB.APIKey)
|
||||||
|
req3.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp3, _ := envB.HTTPClient.Do(req3)
|
||||||
|
defer resp3.Body.Close()
|
||||||
|
|
||||||
|
var resultB map[string]interface{}
|
||||||
|
bodyBytesB, _ := io.ReadAll(resp3.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytesB, &resultB), "Should decode result B")
|
||||||
|
|
||||||
|
// Parse expected JSON string for comparison
|
||||||
|
var expectedValueB map[string]interface{}
|
||||||
|
json.Unmarshal([]byte(valueB), &expectedValueB)
|
||||||
|
assert.Equal(t, expectedValueB, resultB["value"], "Namespace B value should be different")
|
||||||
|
|
||||||
|
t.Logf("✓ Namespace B can set same key name independently")
|
||||||
|
t.Logf(" - Namespace A value: %s", valueA)
|
||||||
|
t.Logf(" - Namespace B value: %s", valueB)
|
||||||
|
})
|
||||||
|
}
|
||||||
177
core/e2e/cluster/rqlite_failover_test.go
Normal file
177
core/e2e/cluster/rqlite_failover_test.go
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package cluster
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRQLite_ReadConsistencyLevels tests that different consistency levels work.
|
||||||
|
func TestRQLite_ReadConsistencyLevels(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, val TEXT)", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, status == http.StatusOK || status == http.StatusCreated, "create table got %d", status)
|
||||||
|
|
||||||
|
// Insert data
|
||||||
|
insertReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("INSERT INTO %s(val) VALUES ('consistency-test')", table),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, status, err = insertReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, status)
|
||||||
|
|
||||||
|
t.Run("Default consistency read", func(t *testing.T) {
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT * FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, status)
|
||||||
|
t.Logf("Default read: %s", string(body))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Strong consistency read", func(t *testing.T) {
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/query?level=strong",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT * FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, status)
|
||||||
|
t.Logf("Strong read: %s", string(body))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Weak consistency read", func(t *testing.T) {
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/query?level=weak",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT * FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, status)
|
||||||
|
t.Logf("Weak read: %s", string(body))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRQLite_WriteAfterMultipleReads verifies write-read cycles stay consistent.
|
||||||
|
func TestRQLite_WriteAfterMultipleReads(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, counter INTEGER DEFAULT 0)", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, status == http.StatusOK || status == http.StatusCreated)
|
||||||
|
|
||||||
|
// Write-read cycle 10 times
|
||||||
|
for i := 1; i <= 10; i++ {
|
||||||
|
insertReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("INSERT INTO %s(counter) VALUES (%d)", table, i),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, status, err := insertReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "insert %d failed", i)
|
||||||
|
require.Equal(t, http.StatusOK, status, "insert %d got status %d", i, status)
|
||||||
|
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) as cnt FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body, _, _ := queryReq.Do(ctx)
|
||||||
|
t.Logf("Iteration %d: %s", i, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final verification
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: gatewayURL + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) as cnt FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, status)
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
json.Unmarshal(body, &result)
|
||||||
|
t.Logf("Final count result: %s", string(body))
|
||||||
|
}
|
||||||
147
core/e2e/config.go
Normal file
147
core/e2e/config.go
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// E2EConfig holds the configuration for E2E tests
|
||||||
|
type E2EConfig struct {
|
||||||
|
// Mode can be "local" or "production"
|
||||||
|
Mode string `yaml:"mode"`
|
||||||
|
|
||||||
|
// BaseDomain is the domain used for deployment routing (e.g., "dbrs.space" or "orama.network")
|
||||||
|
BaseDomain string `yaml:"base_domain"`
|
||||||
|
|
||||||
|
// Servers is a list of production servers (only used when mode=production)
|
||||||
|
Servers []ServerConfig `yaml:"servers"`
|
||||||
|
|
||||||
|
// Nameservers is a list of nameserver hostnames (e.g., ["ns1.dbrs.space", "ns2.dbrs.space"])
|
||||||
|
Nameservers []string `yaml:"nameservers"`
|
||||||
|
|
||||||
|
// APIKey is the API key for production testing (auto-discovered if empty)
|
||||||
|
APIKey string `yaml:"api_key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerConfig holds configuration for a single production server
|
||||||
|
type ServerConfig struct {
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
IP string `yaml:"ip"`
|
||||||
|
User string `yaml:"user"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
IsNameserver bool `yaml:"is_nameserver"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns the default configuration
|
||||||
|
func DefaultConfig() *E2EConfig {
|
||||||
|
return &E2EConfig{
|
||||||
|
Mode: "production",
|
||||||
|
BaseDomain: "orama.network",
|
||||||
|
Servers: []ServerConfig{},
|
||||||
|
Nameservers: []string{},
|
||||||
|
APIKey: "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadE2EConfig loads the E2E test configuration from e2e/config.yaml
|
||||||
|
// Falls back to defaults if the file doesn't exist
|
||||||
|
func LoadE2EConfig() (*E2EConfig, error) {
|
||||||
|
// Try multiple locations for the config file
|
||||||
|
configPaths := []string{
|
||||||
|
"config.yaml", // Relative to e2e directory (when running from e2e/)
|
||||||
|
"e2e/config.yaml", // Relative to project root
|
||||||
|
"../e2e/config.yaml", // From subdirectory within e2e/
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also try absolute path based on working directory
|
||||||
|
if cwd, err := os.Getwd(); err == nil {
|
||||||
|
configPaths = append(configPaths, filepath.Join(cwd, "config.yaml"))
|
||||||
|
configPaths = append(configPaths, filepath.Join(cwd, "e2e", "config.yaml"))
|
||||||
|
// Go up one level if we're in a subdirectory
|
||||||
|
configPaths = append(configPaths, filepath.Join(cwd, "..", "config.yaml"))
|
||||||
|
}
|
||||||
|
|
||||||
|
var configData []byte
|
||||||
|
var readErr error
|
||||||
|
|
||||||
|
for _, path := range configPaths {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err == nil {
|
||||||
|
configData = data
|
||||||
|
break
|
||||||
|
}
|
||||||
|
readErr = err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no config file found, return defaults
|
||||||
|
if configData == nil {
|
||||||
|
// Check if running in production mode via environment variable
|
||||||
|
if os.Getenv("E2E_MODE") == "production" {
|
||||||
|
return nil, readErr // Config file required for production mode
|
||||||
|
}
|
||||||
|
return DefaultConfig(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg E2EConfig
|
||||||
|
if err := yaml.Unmarshal(configData, &cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply defaults for empty values
|
||||||
|
if cfg.Mode == "" {
|
||||||
|
cfg.Mode = "production"
|
||||||
|
}
|
||||||
|
if cfg.BaseDomain == "" {
|
||||||
|
cfg.BaseDomain = "orama.network"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsProductionMode returns true if running in production mode
|
||||||
|
func IsProductionMode() bool {
|
||||||
|
// Check environment variable first
|
||||||
|
if os.Getenv("E2E_MODE") == "production" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := LoadE2EConfig()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return cfg.Mode == "production"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServerIPs returns a list of all server IP addresses from config
|
||||||
|
func GetServerIPs(cfg *E2EConfig) []string {
|
||||||
|
if cfg == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ips := make([]string, 0, len(cfg.Servers))
|
||||||
|
for _, server := range cfg.Servers {
|
||||||
|
if server.IP != "" {
|
||||||
|
ips = append(ips, server.IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ips
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNameserverServers returns servers configured as nameservers
|
||||||
|
func GetNameserverServers(cfg *E2EConfig) []ServerConfig {
|
||||||
|
if cfg == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var nameservers []ServerConfig
|
||||||
|
for _, server := range cfg.Servers {
|
||||||
|
if server.IsNameserver {
|
||||||
|
nameservers = append(nameservers, server)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nameservers
|
||||||
|
}
|
||||||
45
core/e2e/config.yaml.example
Normal file
45
core/e2e/config.yaml.example
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# E2E Test Configuration
|
||||||
|
#
|
||||||
|
# Copy this file to config.yaml and fill in your values.
|
||||||
|
# config.yaml is git-ignored and should contain your actual credentials.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# cp config.yaml.example config.yaml
|
||||||
|
# # Edit config.yaml with your server credentials
|
||||||
|
# go test -v -tags e2e ./e2e/...
|
||||||
|
|
||||||
|
# Test mode: "local" or "production"
|
||||||
|
# - local: Tests run against `make dev` cluster on localhost
|
||||||
|
# - production: Tests run against real VPS servers
|
||||||
|
mode: local
|
||||||
|
|
||||||
|
# Base domain for deployment routing
|
||||||
|
# - Local: orama.network (default)
|
||||||
|
# - Production: dbrs.space (or your custom domain)
|
||||||
|
base_domain: orama.network
|
||||||
|
|
||||||
|
# Production servers (only used when mode=production)
|
||||||
|
# Add your VPS servers here with their credentials
|
||||||
|
servers:
|
||||||
|
# Example:
|
||||||
|
# - name: vps-1
|
||||||
|
# ip: 1.2.3.4
|
||||||
|
# user: ubuntu
|
||||||
|
# password: "your-password-here"
|
||||||
|
# is_nameserver: true
|
||||||
|
# - name: vps-2
|
||||||
|
# ip: 5.6.7.8
|
||||||
|
# user: ubuntu
|
||||||
|
# password: "another-password"
|
||||||
|
# is_nameserver: false
|
||||||
|
|
||||||
|
# Nameserver hostnames (for DNS tests in production)
|
||||||
|
# These should match your NS records
|
||||||
|
nameservers:
|
||||||
|
# Example:
|
||||||
|
# - ns1.yourdomain.com
|
||||||
|
# - ns2.yourdomain.com
|
||||||
|
|
||||||
|
# API key for production testing
|
||||||
|
# Leave empty to auto-discover from RQLite or create fresh key
|
||||||
|
api_key: ""
|
||||||
223
core/e2e/deployments/edge_cases_test.go
Normal file
223
core/e2e/deployments/edge_cases_test.go
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDeploy_InvalidTarball verifies that uploading an invalid/corrupt tarball
|
||||||
|
// returns a clean error (not a 500 or panic).
|
||||||
|
func TestDeploy_InvalidTarball(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("invalid-tar-%d", time.Now().Unix())
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(deploymentName + "\r\n")
|
||||||
|
|
||||||
|
// Write invalid tarball data (random bytes, not a real gzip)
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
body.WriteString("this is not a valid tarball content at all!!!")
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
respBody, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("Status: %d, Body: %s", resp.StatusCode, string(respBody))
|
||||||
|
|
||||||
|
// Should return an error, not 2xx (ideally 400, but server currently returns 500)
|
||||||
|
assert.True(t, resp.StatusCode >= 400,
|
||||||
|
"Invalid tarball should return error (got %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeploy_EmptyTarball verifies that uploading an empty file returns an error.
|
||||||
|
func TestDeploy_EmptyTarball(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("empty-tar-%d", time.Now().Unix())
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(deploymentName + "\r\n")
|
||||||
|
|
||||||
|
// Empty tarball
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
respBody, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("Status: %d, Body: %s", resp.StatusCode, string(respBody))
|
||||||
|
|
||||||
|
assert.True(t, resp.StatusCode >= 400,
|
||||||
|
"Empty tarball should return error (got %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeploy_MissingName verifies that deploying without a name returns an error.
|
||||||
|
func TestDeploy_MissingName(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
// No name field
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
// Create tarball from directory for the "no name" test
|
||||||
|
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Skip("Failed to create tarball from test app")
|
||||||
|
}
|
||||||
|
body.Write(tarData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.True(t, resp.StatusCode >= 400,
|
||||||
|
"Missing name should return error (got %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeploy_ConcurrentSameName verifies that deploying two apps with the same
|
||||||
|
// name concurrently doesn't cause data corruption.
|
||||||
|
func TestDeploy_ConcurrentSameName(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("concurrent-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
results := make([]int, 2)
|
||||||
|
ids := make([]string, 2)
|
||||||
|
|
||||||
|
// Pre-create tarball once for both goroutines
|
||||||
|
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Skip("Failed to create tarball from test app")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(deploymentName + "\r\n")
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
body.Write(tarData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
results[idx] = resp.StatusCode
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
json.NewDecoder(resp.Body).Decode(&result)
|
||||||
|
if id, ok := result["deployment_id"].(string); ok {
|
||||||
|
ids[idx] = id
|
||||||
|
} else if id, ok := result["id"].(string); ok {
|
||||||
|
ids[idx] = id
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
t.Logf("Concurrent deploy results: status1=%d status2=%d id1=%s id2=%s",
|
||||||
|
results[0], results[1], ids[0], ids[1])
|
||||||
|
|
||||||
|
// At least one should succeed
|
||||||
|
successCount := 0
|
||||||
|
for _, status := range results {
|
||||||
|
if status == http.StatusCreated {
|
||||||
|
successCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.GreaterOrEqual(t, successCount, 1,
|
||||||
|
"At least one concurrent deploy should succeed")
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
for _, id := range ids {
|
||||||
|
if id != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readFileBytes(path string) ([]byte, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
return io.ReadAll(f)
|
||||||
|
}
|
||||||
308
core/e2e/deployments/go_sqlite_test.go
Normal file
308
core/e2e/deployments/go_sqlite_test.go
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestGoBackendWithSQLite tests Go backend deployment with hosted SQLite connectivity
|
||||||
|
// 1. Create hosted SQLite database
|
||||||
|
// 2. Deploy Go backend with DATABASE_NAME env var
|
||||||
|
// 3. POST /api/users → verify insert
|
||||||
|
// 4. GET /api/users → verify read
|
||||||
|
// 5. Cleanup
|
||||||
|
func TestGoBackendWithSQLite(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("go-sqlite-test-%d", time.Now().Unix())
|
||||||
|
dbName := fmt.Sprintf("test-db-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/go-api")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
if deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
// Delete the test database
|
||||||
|
deleteSQLiteDB(t, env, dbName)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Create SQLite database", func(t *testing.T) {
|
||||||
|
e2e.CreateSQLiteDB(t, env, dbName)
|
||||||
|
t.Logf("Created database: %s", dbName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Deploy Go backend with DATABASE_NAME", func(t *testing.T) {
|
||||||
|
deploymentID = createGoDeployment(t, env, deploymentName, tarballPath, map[string]string{
|
||||||
|
"DATABASE_NAME": dbName,
|
||||||
|
"GATEWAY_URL": env.GatewayURL,
|
||||||
|
"API_KEY": env.APIKey,
|
||||||
|
})
|
||||||
|
require.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
|
||||||
|
t.Logf("Created Go deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wait for deployment to become healthy", func(t *testing.T) {
|
||||||
|
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
|
||||||
|
require.True(t, healthy, "Deployment should become healthy")
|
||||||
|
t.Logf("Deployment is healthy")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test health endpoint", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Health check should return 200")
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
var health map[string]interface{}
|
||||||
|
require.NoError(t, json.Unmarshal(body, &health))
|
||||||
|
|
||||||
|
assert.Contains(t, []string{"healthy", "ok"}, health["status"])
|
||||||
|
t.Logf("Health response: %+v", health)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("POST /api/notes - create note", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
noteData := map[string]string{
|
||||||
|
"title": "Test Note",
|
||||||
|
"content": "This is a test note",
|
||||||
|
}
|
||||||
|
body, _ := json.Marshal(noteData)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/api/notes", bytes.NewBuffer(body))
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Host = domain
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusCreated, resp.StatusCode, "Should create note successfully")
|
||||||
|
|
||||||
|
var note map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(¬e))
|
||||||
|
|
||||||
|
assert.Equal(t, "Test Note", note["title"])
|
||||||
|
assert.Equal(t, "This is a test note", note["content"])
|
||||||
|
t.Logf("Created note: %+v", note)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("GET /api/notes - list notes", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/notes")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
var notes []map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(¬es))
|
||||||
|
|
||||||
|
assert.GreaterOrEqual(t, len(notes), 1, "Should have at least one note")
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, note := range notes {
|
||||||
|
if note["title"] == "Test Note" {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, found, "Test note should be in the list")
|
||||||
|
t.Logf("Notes count: %d", len(notes))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("DELETE /api/notes - delete note", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
// First get the note ID
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/notes")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var notes []map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(¬es))
|
||||||
|
|
||||||
|
var noteID int
|
||||||
|
for _, note := range notes {
|
||||||
|
if note["title"] == "Test Note" {
|
||||||
|
noteID = int(note["id"].(float64))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NotZero(t, noteID, "Should find test note ID")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/api/notes/%d", env.GatewayURL, noteID), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Host = domain
|
||||||
|
|
||||||
|
deleteResp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer deleteResp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, deleteResp.StatusCode, "Should delete note successfully")
|
||||||
|
t.Logf("Deleted note ID: %d", noteID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// createGoDeployment creates a Go backend deployment with environment variables
|
||||||
|
func createGoDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string, envVars map[string]string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var fileData []byte
|
||||||
|
info, err := os.Stat(tarballPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to stat tarball path: %v", err)
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
// Build Go binary for linux/amd64, then tar it
|
||||||
|
tmpDir, err := os.MkdirTemp("", "go-deploy-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
binaryPath := filepath.Join(tmpDir, "app")
|
||||||
|
buildCmd := exec.Command("go", "build", "-o", binaryPath, ".")
|
||||||
|
buildCmd.Dir = tarballPath
|
||||||
|
buildCmd.Env = append(os.Environ(), "GOOS=linux", "GOARCH=amd64", "CGO_ENABLED=0")
|
||||||
|
if out, err := buildCmd.CombinedOutput(); err != nil {
|
||||||
|
t.Fatalf("failed to build Go app: %v\n%s", err, string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
fileData, err = exec.Command("tar", "-czf", "-", "-C", tmpDir, ".").Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create tarball: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
file, err := os.Open(tarballPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open tarball: %v", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
fileData, _ = io.ReadAll(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
// Write name field
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
// Write environment variables
|
||||||
|
for key, value := range envVars {
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString(fmt.Sprintf("Content-Disposition: form-data; name=\"env_%s\"\r\n\r\n", key))
|
||||||
|
body.WriteString(value + "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write tarball file
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
body.Write(fileData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/go/upload", body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to execute request: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id, ok := result["deployment_id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
if id, ok := result["id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
t.Fatalf("Deployment response missing id field: %+v", result)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSQLiteDB deletes a SQLite database
|
||||||
|
func deleteSQLiteDB(t *testing.T, env *e2e.E2ETestEnv, dbName string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("DELETE", env.GatewayURL+"/v1/db/"+dbName, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("warning: failed to create delete request: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("warning: failed to delete database: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Logf("warning: delete database returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
264
core/e2e/deployments/nextjs_ssr_test.go
Normal file
264
core/e2e/deployments/nextjs_ssr_test.go
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestNextJSDeployment_SSR tests Next.js deployment with SSR and API routes
|
||||||
|
// 1. Deploy Next.js app
|
||||||
|
// 2. Test SSR page (verify server-rendered HTML)
|
||||||
|
// 3. Test API routes (/api/hello, /api/data)
|
||||||
|
// 4. Test static assets
|
||||||
|
// 5. Cleanup
|
||||||
|
func TestNextJSDeployment_SSR(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("nextjs-ssr-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/nextjs-ssr.tar.gz")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Check if tarball exists
|
||||||
|
if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
|
||||||
|
t.Skip("Next.js SSR tarball not found at " + tarballPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy Next.js SSR app", func(t *testing.T) {
|
||||||
|
deploymentID = createNextJSDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
|
||||||
|
t.Logf("Created Next.js deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wait for deployment to become healthy", func(t *testing.T) {
|
||||||
|
healthy := e2e.WaitForHealthy(t, env, deploymentID, 120*time.Second)
|
||||||
|
require.True(t, healthy, "Deployment should become healthy")
|
||||||
|
t.Logf("Deployment is healthy")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify deployment in database", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
assert.Equal(t, deploymentName, deployment["name"], "Deployment name should match")
|
||||||
|
|
||||||
|
deploymentType, ok := deployment["type"].(string)
|
||||||
|
require.True(t, ok, "Type should be a string")
|
||||||
|
assert.Contains(t, deploymentType, "nextjs", "Type should be nextjs")
|
||||||
|
|
||||||
|
t.Logf("Deployment type: %s", deploymentType)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test SSR page - verify server-rendered HTML", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "SSR page should return 200")
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err, "Should read response body")
|
||||||
|
bodyStr := string(body)
|
||||||
|
|
||||||
|
// Verify HTML is server-rendered (contains actual content, not just loading state)
|
||||||
|
assert.Contains(t, bodyStr, "Orama Network Next.js Test", "Should contain app title")
|
||||||
|
assert.Contains(t, bodyStr, "Server-Side Rendering Test", "Should contain SSR test marker")
|
||||||
|
assert.Contains(t, resp.Header.Get("Content-Type"), "text/html", "Should be HTML content")
|
||||||
|
|
||||||
|
t.Logf("SSR page loaded successfully")
|
||||||
|
t.Logf("Content-Type: %s", resp.Header.Get("Content-Type"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test API route - /api/hello", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/hello")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "API route should return 200")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Should decode JSON response")
|
||||||
|
|
||||||
|
assert.Contains(t, result["message"], "Hello", "Should contain hello message")
|
||||||
|
assert.NotEmpty(t, result["timestamp"], "Should have timestamp")
|
||||||
|
|
||||||
|
t.Logf("API /hello response: %+v", result)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test API route - /api/data", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/data")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "API data route should return 200")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Should decode JSON response")
|
||||||
|
|
||||||
|
// Just verify it returns valid JSON
|
||||||
|
t.Logf("API /data response: %+v", result)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test static asset - _next directory", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
// First, get the main page to find the actual static asset path
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
bodyStr := string(body)
|
||||||
|
|
||||||
|
// Look for _next/static references in the HTML
|
||||||
|
if strings.Contains(bodyStr, "_next/static") {
|
||||||
|
t.Logf("Found _next/static references in HTML")
|
||||||
|
|
||||||
|
// Try to fetch a common static chunk
|
||||||
|
// The exact path depends on Next.js build output
|
||||||
|
// We'll just verify the _next directory structure is accessible
|
||||||
|
chunkResp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/_next/static/chunks/main.js")
|
||||||
|
defer chunkResp.Body.Close()
|
||||||
|
|
||||||
|
// It's OK if specific files don't exist (they have hashed names)
|
||||||
|
// Just verify we don't get a 500 error
|
||||||
|
assert.NotEqual(t, http.StatusInternalServerError, chunkResp.StatusCode,
|
||||||
|
"Static asset request should not cause server error")
|
||||||
|
|
||||||
|
t.Logf("Static asset request status: %d", chunkResp.StatusCode)
|
||||||
|
} else {
|
||||||
|
t.Logf("No _next/static references found (may be using different bundling)")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test 404 handling", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/nonexistent-page-xyz")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Next.js should handle 404 gracefully
|
||||||
|
// Could be 404 or 200 depending on catch-all routes
|
||||||
|
assert.Contains(t, []int{200, 404}, resp.StatusCode,
|
||||||
|
"Should return either 200 (catch-all) or 404")
|
||||||
|
|
||||||
|
t.Logf("404 handling: status=%d", resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// createNextJSDeployment creates a Next.js deployment
|
||||||
|
func createNextJSDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
file, err := os.Open(tarballPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open tarball: %v", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
// Write name field
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
// Write ssr field (enable SSR mode)
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"ssr\"\r\n\r\n")
|
||||||
|
body.WriteString("true\r\n")
|
||||||
|
|
||||||
|
// Write tarball file
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
fileData, _ := io.ReadAll(file)
|
||||||
|
body.Write(fileData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/nextjs/upload", body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
// Use a longer timeout for large Next.js uploads (can be 50MB+)
|
||||||
|
uploadClient := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := uploadClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to execute request: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id, ok := result["deployment_id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
if id, ok := result["id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
t.Fatalf("Deployment response missing id field: %+v", result)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
203
core/e2e/deployments/nodejs_deployment_test.go
Normal file
203
core/e2e/deployments/nodejs_deployment_test.go
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNodeJSDeployment_FullFlow(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("test-nodejs-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/node-api")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Upload Node.js backend", func(t *testing.T) {
|
||||||
|
deploymentID = createNodeJSDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
|
||||||
|
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wait for deployment to become healthy", func(t *testing.T) {
|
||||||
|
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
|
||||||
|
assert.True(t, healthy, "Deployment should become healthy within timeout")
|
||||||
|
t.Logf("Deployment is healthy")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test health endpoint", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
// Get the deployment URLs (can be array of strings or map)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test via Host header (localhost testing)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, extractDomain(nodeURL), "/health")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Health check should return 200")
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var health map[string]interface{}
|
||||||
|
require.NoError(t, json.Unmarshal(body, &health))
|
||||||
|
|
||||||
|
assert.Contains(t, []string{"healthy", "ok"}, health["status"],
|
||||||
|
"Health status should be 'healthy' or 'ok'")
|
||||||
|
t.Logf("Health check passed: %v", health)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test API endpoint", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
// Test health endpoint (node-api app serves /health)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.Unmarshal(body, &result))
|
||||||
|
|
||||||
|
assert.NotEmpty(t, result["service"])
|
||||||
|
t.Logf("API endpoint response: %v", result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createNodeJSDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var fileData []byte
|
||||||
|
|
||||||
|
info, err := os.Stat(tarballPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to stat tarball path: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
// Create tarball from directory
|
||||||
|
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
|
||||||
|
require.NoError(t, err, "Failed to create tarball from %s", tarballPath)
|
||||||
|
fileData = tarData
|
||||||
|
} else {
|
||||||
|
file, err := os.Open(tarballPath)
|
||||||
|
require.NoError(t, err, "Failed to open tarball: %s", tarballPath)
|
||||||
|
defer file.Close()
|
||||||
|
fileData, _ = io.ReadAll(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
body.Write(fileData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/nodejs/upload", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result))
|
||||||
|
|
||||||
|
if id, ok := result["deployment_id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
if id, ok := result["id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
t.Fatalf("Deployment response missing id field: %+v", result)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractNodeURL gets the node URL from deployment response
|
||||||
|
// Handles both array of strings and map formats
|
||||||
|
func extractNodeURL(t *testing.T, deployment map[string]interface{}) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Try as array of strings first (new format)
|
||||||
|
if urls, ok := deployment["urls"].([]interface{}); ok && len(urls) > 0 {
|
||||||
|
if url, ok := urls[0].(string); ok {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try as map (legacy format)
|
||||||
|
if urls, ok := deployment["urls"].(map[string]interface{}); ok {
|
||||||
|
if url, ok := urls["node"].(string); ok {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractDomain(url string) string {
|
||||||
|
// Extract domain from URL like "https://myapp.node-xyz.dbrs.space"
|
||||||
|
// Remove protocol
|
||||||
|
domain := url
|
||||||
|
if len(url) > 8 && url[:8] == "https://" {
|
||||||
|
domain = url[8:]
|
||||||
|
} else if len(url) > 7 && url[:7] == "http://" {
|
||||||
|
domain = url[7:]
|
||||||
|
}
|
||||||
|
// Remove trailing slash
|
||||||
|
if len(domain) > 0 && domain[len(domain)-1] == '/' {
|
||||||
|
domain = domain[:len(domain)-1]
|
||||||
|
}
|
||||||
|
return domain
|
||||||
|
}
|
||||||
352
core/e2e/deployments/replica_test.go
Normal file
352
core/e2e/deployments/replica_test.go
Normal file
@ -0,0 +1,352 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestStaticReplica_CreatedOnDeploy verifies that deploying a static app
|
||||||
|
// creates replica records on a second node.
|
||||||
|
func TestStaticReplica_CreatedOnDeploy(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("replica-static-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy static app", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wait for replica setup", func(t *testing.T) {
|
||||||
|
// Static replicas should set up quickly (IPFS content)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Deployment has replica records", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
// Check that replicas field exists and has entries
|
||||||
|
replicas, ok := deployment["replicas"].([]interface{})
|
||||||
|
if !ok {
|
||||||
|
// Replicas might be in a nested structure or separate endpoint
|
||||||
|
t.Logf("Deployment response: %+v", deployment)
|
||||||
|
// Try querying replicas via the deployment details
|
||||||
|
homeNodeID, _ := deployment["home_node_id"].(string)
|
||||||
|
require.NotEmpty(t, homeNodeID, "Deployment should have a home_node_id")
|
||||||
|
t.Logf("Home node: %s", homeNodeID)
|
||||||
|
// If replicas aren't in the response, that's still okay — we verify
|
||||||
|
// via DNS and cross-node serving below
|
||||||
|
t.Log("Replica records not in deployment response; will verify via DNS/serving")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.GreaterOrEqual(t, len(replicas), 1, "Should have at least 1 replica")
|
||||||
|
t.Logf("Found %d replica records", len(replicas))
|
||||||
|
for i, r := range replicas {
|
||||||
|
if replica, ok := r.(map[string]interface{}); ok {
|
||||||
|
t.Logf(" Replica %d: node=%s status=%s", i, replica["node_id"], replica["status"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Static content served via gateway", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Static content should be served (got %d: %s)", resp.StatusCode, string(body))
|
||||||
|
t.Logf("Served via gateway: status=%d", resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDynamicReplica_CreatedOnDeploy verifies that deploying a dynamic (Node.js) app
|
||||||
|
// creates a replica process on a second node.
|
||||||
|
func TestDynamicReplica_CreatedOnDeploy(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("replica-nodejs-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/node-api")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy Node.js backend", func(t *testing.T) {
|
||||||
|
deploymentID = createNodeJSDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wait for deployment and replica", func(t *testing.T) {
|
||||||
|
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
|
||||||
|
assert.True(t, healthy, "Deployment should become healthy")
|
||||||
|
// Extra wait for async replica setup
|
||||||
|
time.Sleep(15 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Dynamic app served from both nodes", func(t *testing.T) {
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL in deployment")
|
||||||
|
}
|
||||||
|
domain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Dynamic app should be served via gateway (got %d: %s)", resp.StatusCode, string(body))
|
||||||
|
t.Logf("Served via gateway: status=%d body=%s", resp.StatusCode, string(body))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplica_UpdatePropagation verifies that updating a deployment propagates to replicas.
|
||||||
|
func TestReplica_UpdatePropagation(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("replica-update-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy v1", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
time.Sleep(10 * time.Second) // Wait for replica
|
||||||
|
})
|
||||||
|
|
||||||
|
var v1CID string
|
||||||
|
t.Run("Record v1 CID", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
v1CID, _ = deployment["content_cid"].(string)
|
||||||
|
require.NotEmpty(t, v1CID)
|
||||||
|
t.Logf("v1 CID: %s", v1CID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Update to v2", func(t *testing.T) {
|
||||||
|
updateStaticDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
time.Sleep(10 * time.Second) // Wait for update + replica propagation
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("All nodes serve updated version", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
v2CID, _ := deployment["content_cid"].(string)
|
||||||
|
|
||||||
|
// v2 CID might be same (same tarball) but version should increment
|
||||||
|
version, _ := deployment["version"].(float64)
|
||||||
|
assert.Equal(t, float64(2), version, "Should be version 2")
|
||||||
|
t.Logf("v2 CID: %s, version: %v", v2CID, version)
|
||||||
|
|
||||||
|
// Verify via gateway
|
||||||
|
dep := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
depCID, _ := dep["content_cid"].(string)
|
||||||
|
assert.Equal(t, v2CID, depCID, "CID should match after update")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplica_RollbackPropagation verifies rollback propagates to replica nodes.
|
||||||
|
func TestReplica_RollbackPropagation(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("replica-rollback-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy v1 and update to v2", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
updateStaticDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
var v1CID string
|
||||||
|
t.Run("Get v1 CID from versions", func(t *testing.T) {
|
||||||
|
versions := listVersions(t, env, deploymentName)
|
||||||
|
if len(versions) > 0 {
|
||||||
|
v1CID, _ = versions[0]["content_cid"].(string)
|
||||||
|
}
|
||||||
|
if v1CID == "" {
|
||||||
|
// Fall back: v1 CID from current deployment
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
v1CID, _ = deployment["content_cid"].(string)
|
||||||
|
}
|
||||||
|
t.Logf("v1 CID for rollback comparison: %s", v1CID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Rollback to v1", func(t *testing.T) {
|
||||||
|
rollbackDeployment(t, env, deploymentName, 1)
|
||||||
|
time.Sleep(10 * time.Second) // Wait for rollback + replica propagation
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("All nodes have rolled-back CID", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
currentCID, _ := deployment["content_cid"].(string)
|
||||||
|
t.Logf("Post-rollback CID: %s", currentCID)
|
||||||
|
|
||||||
|
assert.Equal(t, v1CID, currentCID, "CID should match v1 after rollback")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplica_TeardownOnDelete verifies that deleting a deployment removes replicas.
|
||||||
|
func TestReplica_TeardownOnDelete(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("replica-delete-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
time.Sleep(10 * time.Second) // Wait for replica
|
||||||
|
|
||||||
|
// Get the domain before deletion
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
domain := ""
|
||||||
|
if nodeURL != "" {
|
||||||
|
domain = extractDomain(nodeURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Delete deployment", func(t *testing.T) {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
time.Sleep(10 * time.Second) // Wait for teardown propagation
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Deployment no longer served on any node", func(t *testing.T) {
|
||||||
|
if domain == "" {
|
||||||
|
t.Skip("No domain to test")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", env.GatewayURL+"/", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Host = domain
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Connection failed (expected after deletion)")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
assert.NotContains(t, string(body), "<div id=\"root\">",
|
||||||
|
"Deleted deployment should not be served")
|
||||||
|
}
|
||||||
|
t.Logf("status=%d (expected non-200)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateStaticDeployment updates an existing static deployment.
|
||||||
|
func updateStaticDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var fileData []byte
|
||||||
|
info, err := os.Stat(tarballPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if info.IsDir() {
|
||||||
|
fileData, err = exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
|
||||||
|
require.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
file, err := os.Open(tarballPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer file.Close()
|
||||||
|
fileData, _ = io.ReadAll(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
body.Write(fileData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/update", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Update failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
}
|
||||||
232
core/e2e/deployments/rollback_test.go
Normal file
232
core/e2e/deployments/rollback_test.go
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDeploymentRollback_FullFlow tests the complete rollback workflow:
|
||||||
|
// 1. Deploy v1
|
||||||
|
// 2. Update to v2
|
||||||
|
// 3. Verify v2 content
|
||||||
|
// 4. Rollback to v1
|
||||||
|
// 5. Verify v1 content is restored
|
||||||
|
func TestDeploymentRollback_FullFlow(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("rollback-test-%d", time.Now().Unix())
|
||||||
|
tarballPathV1 := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy v1", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPathV1)
|
||||||
|
require.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
|
||||||
|
t.Logf("Created deployment v1: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify v1 deployment", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
version, ok := deployment["version"].(float64)
|
||||||
|
require.True(t, ok, "Version should be a number")
|
||||||
|
assert.Equal(t, float64(1), version, "Initial version should be 1")
|
||||||
|
|
||||||
|
contentCID, ok := deployment["content_cid"].(string)
|
||||||
|
require.True(t, ok, "Content CID should be a string")
|
||||||
|
assert.NotEmpty(t, contentCID, "Content CID should not be empty")
|
||||||
|
|
||||||
|
t.Logf("v1 version: %v, CID: %s", version, contentCID)
|
||||||
|
})
|
||||||
|
|
||||||
|
var v1CID string
|
||||||
|
t.Run("Save v1 CID", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
v1CID = deployment["content_cid"].(string)
|
||||||
|
t.Logf("Saved v1 CID: %s", v1CID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Update to v2", func(t *testing.T) {
|
||||||
|
// Update the deployment with the same tarball (simulates a new version)
|
||||||
|
updateDeployment(t, env, deploymentName, tarballPathV1)
|
||||||
|
|
||||||
|
// Wait for update to complete
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify v2 deployment", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
version, ok := deployment["version"].(float64)
|
||||||
|
require.True(t, ok, "Version should be a number")
|
||||||
|
assert.Equal(t, float64(2), version, "Version should be 2 after update")
|
||||||
|
|
||||||
|
t.Logf("v2 version: %v", version)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("List deployment versions", func(t *testing.T) {
|
||||||
|
versions := listVersions(t, env, deploymentName)
|
||||||
|
t.Logf("Available versions: %+v", versions)
|
||||||
|
|
||||||
|
// Should have at least 2 versions in history
|
||||||
|
assert.GreaterOrEqual(t, len(versions), 1, "Should have version history")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Rollback to v1", func(t *testing.T) {
|
||||||
|
rollbackDeployment(t, env, deploymentName, 1)
|
||||||
|
|
||||||
|
// Wait for rollback to complete
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify rollback succeeded", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
version, ok := deployment["version"].(float64)
|
||||||
|
require.True(t, ok, "Version should be a number")
|
||||||
|
// Note: Version number increases even on rollback (it's a new deployment version)
|
||||||
|
// But the content_cid should be the same as v1
|
||||||
|
t.Logf("Post-rollback version: %v", version)
|
||||||
|
|
||||||
|
contentCID, ok := deployment["content_cid"].(string)
|
||||||
|
require.True(t, ok, "Content CID should be a string")
|
||||||
|
assert.Equal(t, v1CID, contentCID, "Content CID should match v1 after rollback")
|
||||||
|
|
||||||
|
t.Logf("Rollback verified - content CID matches v1: %s", contentCID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateDeployment updates an existing static deployment
|
||||||
|
func updateDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var fileData []byte
|
||||||
|
info, err := os.Stat(tarballPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if info.IsDir() {
|
||||||
|
fileData, err = exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
|
||||||
|
require.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
file, err := os.Open(tarballPath)
|
||||||
|
require.NoError(t, err, "Failed to open tarball")
|
||||||
|
defer file.Close()
|
||||||
|
fileData, _ = io.ReadAll(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
// Write name field
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
// Write tarball file
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
body.Write(fileData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/update", body)
|
||||||
|
require.NoError(t, err, "Failed to create request")
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Failed to execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Update failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Failed to decode response")
|
||||||
|
t.Logf("Update response: %+v", result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// listVersions lists available versions for a deployment
|
||||||
|
func listVersions(t *testing.T, env *e2e.E2ETestEnv, name string) []map[string]interface{} {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/versions?name="+name, nil)
|
||||||
|
require.NoError(t, err, "Failed to create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Failed to execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("List versions returned status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
Versions []map[string]interface{} `json:"versions"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
t.Logf("Failed to decode versions: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.Versions
|
||||||
|
}
|
||||||
|
|
||||||
|
// rollbackDeployment triggers a rollback to a specific version
|
||||||
|
func rollbackDeployment(t *testing.T, env *e2e.E2ETestEnv, name string, targetVersion int) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"name": name,
|
||||||
|
"version": targetVersion,
|
||||||
|
}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/rollback", bytes.NewBuffer(bodyBytes))
|
||||||
|
require.NoError(t, err, "Failed to create request")
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Failed to execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Rollback failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Failed to decode response")
|
||||||
|
t.Logf("Rollback response: %+v", result)
|
||||||
|
}
|
||||||
210
core/e2e/deployments/static_deployment_test.go
Normal file
210
core/e2e/deployments/static_deployment_test.go
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package deployments_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStaticDeployment_FullFlow(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("test-static-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Upload static tarball", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
|
||||||
|
t.Logf("✓ Created deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify deployment in database", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
assert.Equal(t, deploymentName, deployment["name"], "Deployment name should match")
|
||||||
|
assert.NotEmpty(t, deployment["content_cid"], "Content CID should not be empty")
|
||||||
|
|
||||||
|
// Status might be "deploying" or "active" depending on timing
|
||||||
|
status, ok := deployment["status"].(string)
|
||||||
|
require.True(t, ok, "Status should be a string")
|
||||||
|
assert.Contains(t, []string{"deploying", "active"}, status, "Status should be deploying or active")
|
||||||
|
|
||||||
|
t.Logf("✓ Deployment verified in database")
|
||||||
|
t.Logf(" - Name: %s", deployment["name"])
|
||||||
|
t.Logf(" - Status: %s", status)
|
||||||
|
t.Logf(" - CID: %s", deployment["content_cid"])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify DNS record creation", func(t *testing.T) {
|
||||||
|
// Wait for deployment to become active
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Get the actual domain from deployment response
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
require.NotEmpty(t, nodeURL, "Deployment should have a URL")
|
||||||
|
expectedDomain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
// Make request with Host header (localhost testing)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, expectedDomain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should return 200 with React app HTML
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should return 200 OK")
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err, "Should read response body")
|
||||||
|
|
||||||
|
bodyStr := string(body)
|
||||||
|
|
||||||
|
// Verify React app content
|
||||||
|
assert.Contains(t, bodyStr, "<div id=\"root\">", "Should contain React root div")
|
||||||
|
assert.Contains(t, resp.Header.Get("Content-Type"), "text/html", "Content-Type should be text/html")
|
||||||
|
|
||||||
|
t.Logf("✓ Domain routing works")
|
||||||
|
t.Logf(" - Domain: %s", expectedDomain)
|
||||||
|
t.Logf(" - Status: %d", resp.StatusCode)
|
||||||
|
t.Logf(" - Content-Type: %s", resp.Header.Get("Content-Type"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify static assets serve correctly", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
require.NotEmpty(t, nodeURL, "Deployment should have a URL")
|
||||||
|
expectedDomain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
// Test CSS file (exact path depends on Vite build output)
|
||||||
|
// We'll just test a few common asset paths
|
||||||
|
assetPaths := []struct {
|
||||||
|
path string
|
||||||
|
contentType string
|
||||||
|
}{
|
||||||
|
{"/index.html", "text/html"},
|
||||||
|
// Note: Asset paths with hashes change on each build
|
||||||
|
// We'll test what we can
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, asset := range assetPaths {
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, expectedDomain, asset.path)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
assert.Contains(t, resp.Header.Get("Content-Type"), asset.contentType,
|
||||||
|
"Content-Type should be %s for %s", asset.contentType, asset.path)
|
||||||
|
|
||||||
|
t.Logf("✓ Asset served correctly: %s (%s)", asset.path, asset.contentType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify SPA fallback routing", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
require.NotEmpty(t, nodeURL, "Deployment should have a URL")
|
||||||
|
expectedDomain := extractDomain(nodeURL)
|
||||||
|
|
||||||
|
// Request unknown route (should return index.html for SPA)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, expectedDomain, "/about/team")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "SPA fallback should return 200")
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err, "Should read response body")
|
||||||
|
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">", "Should return index.html for unknown paths")
|
||||||
|
|
||||||
|
t.Logf("✓ SPA fallback routing works")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("List deployments", func(t *testing.T) {
|
||||||
|
req, err := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/list", nil)
|
||||||
|
require.NoError(t, err, "Should create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "List deployments should return 200")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, e2e.DecodeJSON(mustReadAll(t, resp.Body), &result), "Should decode JSON")
|
||||||
|
|
||||||
|
deployments, ok := result["deployments"].([]interface{})
|
||||||
|
require.True(t, ok, "Deployments should be an array")
|
||||||
|
|
||||||
|
assert.GreaterOrEqual(t, len(deployments), 1, "Should have at least one deployment")
|
||||||
|
|
||||||
|
// Find our deployment
|
||||||
|
found := false
|
||||||
|
for _, d := range deployments {
|
||||||
|
dep, ok := d.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if dep["name"] == deploymentName {
|
||||||
|
found = true
|
||||||
|
t.Logf("✓ Found deployment in list: %s", deploymentName)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, found, "Deployment should be in list")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Delete deployment", func(t *testing.T) {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
// Verify deletion - allow time for replication
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("Delete verification response: status=%d body=%s", resp.StatusCode, string(body))
|
||||||
|
|
||||||
|
// After deletion, either 404 (not found) or 200 with empty/error response is acceptable
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
// If 200, check if the deployment is actually gone
|
||||||
|
t.Logf("Got 200 - this may indicate soft delete or eventual consistency")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Deployment deleted successfully")
|
||||||
|
|
||||||
|
// Clear deploymentID so cleanup doesn't try to delete again
|
||||||
|
deploymentID = ""
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustReadAll(t *testing.T, r io.Reader) []byte {
|
||||||
|
t.Helper()
|
||||||
|
data, err := io.ReadAll(r)
|
||||||
|
require.NoError(t, err, "Should read all data")
|
||||||
|
return data
|
||||||
|
}
|
||||||
1731
core/e2e/env.go
Normal file
1731
core/e2e/env.go
Normal file
File diff suppressed because it is too large
Load Diff
527
core/e2e/integration/concurrency_test.go
Normal file
527
core/e2e/integration/concurrency_test.go
Normal file
@ -0,0 +1,527 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestCache_ConcurrentWrites tests concurrent cache writes
|
||||||
|
func TestCache_ConcurrentWrites(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
numGoroutines := 10
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var errorCount int32
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
key := fmt.Sprintf("key-%d", idx)
|
||||||
|
value := fmt.Sprintf("value-%d", idx)
|
||||||
|
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if errorCount > 0 {
|
||||||
|
t.Fatalf("expected no errors, got %d", errorCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all values exist
|
||||||
|
scanReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/scan",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := scanReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("scan failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var scanResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &scanResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := scanResp["keys"].([]interface{})
|
||||||
|
if len(keys) < numGoroutines {
|
||||||
|
t.Fatalf("expected at least %d keys, got %d", numGoroutines, len(keys))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCache_ConcurrentReads tests concurrent cache reads
|
||||||
|
func TestCache_ConcurrentReads(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "shared-key"
|
||||||
|
value := "shared-value"
|
||||||
|
|
||||||
|
// Put value first
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read concurrently
|
||||||
|
numGoroutines := 10
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var errorCount int32
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := getReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var getResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &getResp); err != nil {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if getResp["value"] != value {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if errorCount > 0 {
|
||||||
|
t.Fatalf("expected no errors, got %d", errorCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCache_ConcurrentDeleteAndWrite tests concurrent delete and write
|
||||||
|
func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var errorCount int32
|
||||||
|
|
||||||
|
numWrites := 5
|
||||||
|
numDeletes := 3
|
||||||
|
|
||||||
|
// Write keys
|
||||||
|
for i := 0; i < numWrites; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
key := fmt.Sprintf("key-%d", idx)
|
||||||
|
value := fmt.Sprintf("value-%d", idx)
|
||||||
|
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Delete some keys
|
||||||
|
for i := 0; i < numDeletes; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
key := fmt.Sprintf("key-%d", idx)
|
||||||
|
|
||||||
|
deleteReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/delete",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := deleteReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if errorCount > 0 {
|
||||||
|
t.Fatalf("expected no errors, got %d", errorCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRQLite_ConcurrentInserts tests concurrent database inserts
|
||||||
|
func TestRQLite_ConcurrentInserts(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
// Cleanup table after test
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
schema := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert concurrently
|
||||||
|
numInserts := 10
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var errorCount int32
|
||||||
|
|
||||||
|
for i := 0; i < numInserts; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
txReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, idx),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := txReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
atomic.AddInt32(&errorCount, 1)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if errorCount > 0 {
|
||||||
|
t.Logf("warning: %d concurrent inserts failed", errorCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify count
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("count query failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var countResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &countResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||||
|
row := rows[0].([]interface{})
|
||||||
|
count := int(row[0].(float64))
|
||||||
|
if count < numInserts {
|
||||||
|
t.Logf("warning: expected %d inserts, got %d", numInserts, count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRQLite_LargeBatchTransaction tests a large transaction with many statements
|
||||||
|
func TestRQLite_LargeBatchTransaction(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
// Cleanup table after test
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
schema := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT)",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create large batch (100 statements)
|
||||||
|
var ops []map[string]interface{}
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
ops = append(ops, map[string]interface{}{
|
||||||
|
"kind": "exec",
|
||||||
|
"sql": fmt.Sprintf("INSERT INTO %s(value) VALUES ('value-%d')", table, i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
txReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"ops": ops,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = txReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("large batch transaction failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify count
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("count query failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var countResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &countResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||||
|
row := rows[0].([]interface{})
|
||||||
|
if int(row[0].(float64)) != 100 {
|
||||||
|
t.Fatalf("expected 100 rows, got %v", row[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCache_TTLExpiryWithSleep tests TTL expiry with a controlled sleep
|
||||||
|
func TestCache_TTLExpiryWithSleep(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "ttl-expiry-key"
|
||||||
|
value := "ttl-expiry-value"
|
||||||
|
|
||||||
|
// Put value with 2 second TTL
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
"ttl": "2s",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put with TTL failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify exists immediately
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = getReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("get immediately after put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sleep for TTL duration + buffer
|
||||||
|
e2e.Delay(2500)
|
||||||
|
|
||||||
|
// Try to get after TTL expires
|
||||||
|
_, status, err = getReq.Do(ctx)
|
||||||
|
if status == http.StatusOK {
|
||||||
|
t.Logf("warning: TTL expiry may not be fully implemented; key still exists after TTL")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCache_ConcurrentWriteAndDelete tests concurrent writes and deletes on same key
|
||||||
|
func TestCache_ConcurrentWriteAndDelete(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "contested-key"
|
||||||
|
|
||||||
|
// Alternate between writes and deletes
|
||||||
|
numIterations := 5
|
||||||
|
for i := 0; i < numIterations; i++ {
|
||||||
|
// Write
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": fmt.Sprintf("value-%d", i),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put failed at iteration %d: status %d, err %v", i, status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = getReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("get failed at iteration %d: status %d, err %v", i, status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete
|
||||||
|
deleteReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/delete",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = deleteReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Logf("warning: delete at iteration %d failed: status %d, err %v", i, status, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
462
core/e2e/integration/data_persistence_test.go
Normal file
462
core/e2e/integration/data_persistence_test.go
Normal file
@ -0,0 +1,462 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// STRICT DATA PERSISTENCE TESTS
|
||||||
|
// These tests verify that data is properly persisted and survives operations.
|
||||||
|
// Tests FAIL if data is lost or corrupted.
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
// TestRQLite_DataPersistence verifies that RQLite data is persisted through the gateway.
|
||||||
|
func TestRQLite_DataPersistence(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
tableName := fmt.Sprintf("persist_test_%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": tableName},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT, version INTEGER)",
|
||||||
|
tableName,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create table")
|
||||||
|
require.True(t, status == http.StatusCreated || status == http.StatusOK,
|
||||||
|
"FAIL: Create table returned status %d", status)
|
||||||
|
|
||||||
|
t.Run("Data_survives_multiple_writes", func(t *testing.T) {
|
||||||
|
// Insert initial data
|
||||||
|
var statements []string
|
||||||
|
for i := 1; i <= 10; i++ {
|
||||||
|
statements = append(statements,
|
||||||
|
fmt.Sprintf("INSERT INTO %s (value, version) VALUES ('item_%d', %d)", tableName, i, i))
|
||||||
|
}
|
||||||
|
|
||||||
|
insertReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{"statements": statements},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := insertReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not insert rows")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
|
||||||
|
|
||||||
|
// Verify all data exists
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not count rows")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Count query returned status %d", status)
|
||||||
|
|
||||||
|
var queryResp map[string]interface{}
|
||||||
|
e2e.DecodeJSON(body, &queryResp)
|
||||||
|
|
||||||
|
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||||
|
row := rows[0].([]interface{})
|
||||||
|
count := int(row[0].(float64))
|
||||||
|
require.Equal(t, 10, count, "FAIL: Expected 10 rows, got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update data
|
||||||
|
updateReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("UPDATE %s SET version = version + 100 WHERE version <= 5", tableName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = updateReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not update rows")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Update returned status %d", status)
|
||||||
|
|
||||||
|
// Verify updates persisted
|
||||||
|
queryUpdatedReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE version > 100", tableName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err = queryUpdatedReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not count updated rows")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Count updated query returned status %d", status)
|
||||||
|
|
||||||
|
e2e.DecodeJSON(body, &queryResp)
|
||||||
|
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||||
|
row := rows[0].([]interface{})
|
||||||
|
count := int(row[0].(float64))
|
||||||
|
require.Equal(t, 5, count, "FAIL: Expected 5 updated rows, got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf(" ✓ Data persists through multiple write operations")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Deletes_are_persisted", func(t *testing.T) {
|
||||||
|
// Delete some rows
|
||||||
|
deleteReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("DELETE FROM %s WHERE version > 100", tableName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := deleteReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not delete rows")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Delete returned status %d", status)
|
||||||
|
|
||||||
|
// Verify deletes persisted
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not count remaining rows")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Count query returned status %d", status)
|
||||||
|
|
||||||
|
var queryResp map[string]interface{}
|
||||||
|
e2e.DecodeJSON(body, &queryResp)
|
||||||
|
|
||||||
|
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||||
|
row := rows[0].([]interface{})
|
||||||
|
count := int(row[0].(float64))
|
||||||
|
require.Equal(t, 5, count, "FAIL: Expected 5 rows after delete, got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf(" ✓ Deletes are properly persisted")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRQLite_DataFilesExist verifies RQLite data files are created on disk.
|
||||||
|
func TestRQLite_DataFilesExist(t *testing.T) {
|
||||||
|
homeDir, err := os.UserHomeDir()
|
||||||
|
require.NoError(t, err, "FAIL: Could not get home directory")
|
||||||
|
|
||||||
|
// Check for RQLite data directories
|
||||||
|
dataLocations := []string{
|
||||||
|
filepath.Join(homeDir, ".orama", "node-1", "rqlite"),
|
||||||
|
filepath.Join(homeDir, ".orama", "node-2", "rqlite"),
|
||||||
|
filepath.Join(homeDir, ".orama", "node-3", "rqlite"),
|
||||||
|
filepath.Join(homeDir, ".orama", "node-4", "rqlite"),
|
||||||
|
filepath.Join(homeDir, ".orama", "node-5", "rqlite"),
|
||||||
|
}
|
||||||
|
|
||||||
|
foundDataDirs := 0
|
||||||
|
for _, dataDir := range dataLocations {
|
||||||
|
if _, err := os.Stat(dataDir); err == nil {
|
||||||
|
foundDataDirs++
|
||||||
|
t.Logf(" ✓ Found RQLite data directory: %s", dataDir)
|
||||||
|
|
||||||
|
// Check for Raft log files
|
||||||
|
entries, _ := os.ReadDir(dataDir)
|
||||||
|
for _, entry := range entries {
|
||||||
|
t.Logf(" - %s", entry.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Greater(t, foundDataDirs, 0,
|
||||||
|
"FAIL: No RQLite data directories found - data may not be persisted")
|
||||||
|
t.Logf(" Found %d RQLite data directories", foundDataDirs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestOlric_DataPersistence verifies Olric cache data persistence.
|
||||||
|
// Note: Olric is an in-memory cache, so this tests data survival during runtime.
|
||||||
|
func TestOlric_DataPersistence(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
||||||
|
|
||||||
|
dmap := fmt.Sprintf("persist_cache_%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
t.Run("Cache_data_survives_multiple_operations", func(t *testing.T) {
|
||||||
|
// Put multiple keys
|
||||||
|
keys := make(map[string]string)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
key := fmt.Sprintf("persist_key_%d", i)
|
||||||
|
value := fmt.Sprintf("persist_value_%d", i)
|
||||||
|
keys[key] = value
|
||||||
|
|
||||||
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
||||||
|
require.NoError(t, err, "FAIL: Could not put key %s", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform other operations
|
||||||
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, "other_key", "other_value")
|
||||||
|
require.NoError(t, err, "FAIL: Could not put other key")
|
||||||
|
|
||||||
|
// Verify original keys still exist
|
||||||
|
for key, expectedValue := range keys {
|
||||||
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
||||||
|
require.NoError(t, err, "FAIL: Key %s not found after other operations", key)
|
||||||
|
require.Equal(t, expectedValue, retrieved, "FAIL: Value mismatch for key %s", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf(" ✓ Cache data survives multiple operations")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNamespaceCluster_DataPersistence verifies namespace-specific data is isolated and persisted.
|
||||||
|
func TestNamespaceCluster_DataPersistence(t *testing.T) {
|
||||||
|
// Create namespace
|
||||||
|
namespace := fmt.Sprintf("persist-ns-%d", time.Now().UnixNano())
|
||||||
|
env, err := e2e.LoadTestEnvWithNamespace(namespace)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create namespace")
|
||||||
|
|
||||||
|
t.Logf("Created namespace: %s", namespace)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
t.Run("Namespace_data_is_isolated", func(t *testing.T) {
|
||||||
|
// Create data via gateway API
|
||||||
|
tableName := fmt.Sprintf("ns_data_%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/rqlite/create-table",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT)", tableName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create table in namespace")
|
||||||
|
require.True(t, status == http.StatusOK || status == http.StatusCreated,
|
||||||
|
"FAIL: Create table returned status %d", status)
|
||||||
|
|
||||||
|
// Insert data
|
||||||
|
insertReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/rqlite/transaction",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("INSERT INTO %s (value) VALUES ('ns_test_value')", tableName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = insertReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not insert into namespace table")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
|
||||||
|
|
||||||
|
// Verify data exists
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/rqlite/query",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT value FROM %s", tableName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Could not query namespace table")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Query returned status %d", status)
|
||||||
|
|
||||||
|
var queryResp map[string]interface{}
|
||||||
|
json.Unmarshal(body, &queryResp)
|
||||||
|
count, _ := queryResp["count"].(float64)
|
||||||
|
require.Equal(t, float64(1), count, "FAIL: Expected 1 row in namespace table")
|
||||||
|
|
||||||
|
t.Logf(" ✓ Namespace data is isolated and persisted")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestIPFS_DataPersistence verifies IPFS content is persisted and pinned.
|
||||||
|
// Note: Detailed IPFS tests are in storage_http_test.go. This test uses the helper from env.go.
|
||||||
|
func TestIPFS_DataPersistence(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
t.Run("Uploaded_content_persists", func(t *testing.T) {
|
||||||
|
// Use helper function to upload content via multipart form
|
||||||
|
content := fmt.Sprintf("persistent content %d", time.Now().UnixNano())
|
||||||
|
cid := e2e.UploadTestFile(t, env, "persist_test.txt", content)
|
||||||
|
require.NotEmpty(t, cid, "FAIL: No CID returned from upload")
|
||||||
|
t.Logf(" Uploaded content with CID: %s", cid)
|
||||||
|
|
||||||
|
// Verify content can be retrieved
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: env.GatewayURL + "/v1/storage/get/" + cid,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, status, err := getReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Get content failed")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Get returned status %d", status)
|
||||||
|
require.Contains(t, string(respBody), "persistent content",
|
||||||
|
"FAIL: Retrieved content doesn't match uploaded content")
|
||||||
|
|
||||||
|
t.Logf(" ✓ IPFS content persists and is retrievable")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSQLite_DataPersistence verifies per-deployment SQLite databases persist.
|
||||||
|
func TestSQLite_DataPersistence(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dbName := fmt.Sprintf("persist_db_%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
t.Run("SQLite_database_persists", func(t *testing.T) {
|
||||||
|
// Create database
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/db/sqlite/create",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"database_name": dbName,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Create database failed")
|
||||||
|
require.True(t, status == http.StatusOK || status == http.StatusCreated,
|
||||||
|
"FAIL: Create returned status %d", status)
|
||||||
|
t.Logf(" Created SQLite database: %s", dbName)
|
||||||
|
|
||||||
|
// Create table and insert data
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/db/sqlite/query",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"database_name": dbName,
|
||||||
|
"query": "CREATE TABLE IF NOT EXISTS test_table (id INTEGER PRIMARY KEY, data TEXT)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = queryReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Create table failed")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Create table returned status %d", status)
|
||||||
|
|
||||||
|
// Insert data
|
||||||
|
insertReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/db/sqlite/query",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"database_name": dbName,
|
||||||
|
"query": "INSERT INTO test_table (data) VALUES ('persistent_data')",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = insertReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Insert failed")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
|
||||||
|
|
||||||
|
// Verify data persists
|
||||||
|
selectReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: env.GatewayURL + "/v1/db/sqlite/query",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"database_name": dbName,
|
||||||
|
"query": "SELECT data FROM test_table",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := selectReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Select failed")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: Select returned status %d", status)
|
||||||
|
require.Contains(t, string(body), "persistent_data",
|
||||||
|
"FAIL: Data not found in SQLite database")
|
||||||
|
|
||||||
|
t.Logf(" ✓ SQLite database data persists")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SQLite_database_listed", func(t *testing.T) {
|
||||||
|
// List databases to verify it was persisted
|
||||||
|
listReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: env.GatewayURL + "/v1/db/sqlite/list",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + env.APIKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := listReq.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: List databases failed")
|
||||||
|
require.Equal(t, http.StatusOK, status, "FAIL: List returned status %d", status)
|
||||||
|
require.Contains(t, string(body), dbName,
|
||||||
|
"FAIL: Created database not found in list")
|
||||||
|
|
||||||
|
t.Logf(" ✓ SQLite database appears in list")
|
||||||
|
})
|
||||||
|
}
|
||||||
356
core/e2e/integration/domain_routing_test.go
Normal file
356
core/e2e/integration/domain_routing_test.go
Normal file
@ -0,0 +1,356 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDomainRouting_BasicRouting(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("test-routing-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for deployment to be active
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Get deployment details for debugging
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
t.Logf("Deployment created: ID=%s, CID=%s, Name=%s, Status=%s",
|
||||||
|
deploymentID, deployment["content_cid"], deployment["name"], deployment["status"])
|
||||||
|
|
||||||
|
t.Run("Standard domain resolves", func(t *testing.T) {
|
||||||
|
// Domain format: {deploymentName}.{baseDomain}
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should return 200 OK")
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err, "Should read response body")
|
||||||
|
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">", "Should serve React app")
|
||||||
|
assert.Contains(t, resp.Header.Get("Content-Type"), "text/html", "Content-Type should be HTML")
|
||||||
|
|
||||||
|
t.Logf("✓ Standard domain routing works: %s", domain)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Non-orama domain passes through", func(t *testing.T) {
|
||||||
|
// Request with non-orama domain should not route to deployment
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, "example.com", "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should either return 404 or pass to default handler
|
||||||
|
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Non-orama domain should not route to deployment")
|
||||||
|
|
||||||
|
t.Logf("✓ Non-orama domains correctly pass through (status: %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("API paths bypass domain routing", func(t *testing.T) {
|
||||||
|
// /v1/* paths should bypass domain routing and use API key auth
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/list", nil)
|
||||||
|
req.Host = domain
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should return API response, not deployment content
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "API endpoint should work")
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
err = json.Unmarshal(bodyBytes, &result)
|
||||||
|
|
||||||
|
// Should be JSON API response
|
||||||
|
assert.NoError(t, err, "Should decode JSON (API response)")
|
||||||
|
assert.NotNil(t, result["deployments"], "Should have deployments field")
|
||||||
|
|
||||||
|
t.Logf("✓ API paths correctly bypass domain routing")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Well-known paths bypass domain routing", func(t *testing.T) {
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
|
||||||
|
// /.well-known/ paths should bypass (used for ACME challenges, etc.)
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/.well-known/acme-challenge/test")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should not serve deployment content
|
||||||
|
// Exact status depends on implementation, but shouldn't be deployment content
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
bodyStr := string(body)
|
||||||
|
|
||||||
|
// Shouldn't contain React app content
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
assert.NotContains(t, bodyStr, "<div id=\"root\">",
|
||||||
|
"Well-known paths should not serve deployment content")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Well-known paths bypass routing (status: %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDomainRouting_MultipleDeployments(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
// Create multiple deployments
|
||||||
|
deployment1Name := fmt.Sprintf("test-multi-1-%d", time.Now().Unix())
|
||||||
|
deployment2Name := fmt.Sprintf("test-multi-2-%d", time.Now().Unix())
|
||||||
|
|
||||||
|
deployment1ID := e2e.CreateTestDeployment(t, env, deployment1Name, tarballPath)
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
deployment2ID := e2e.CreateTestDeployment(t, env, deployment2Name, tarballPath)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deployment1ID)
|
||||||
|
e2e.DeleteDeployment(t, env, deployment2ID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
t.Run("Each deployment routes independently", func(t *testing.T) {
|
||||||
|
domain1 := env.BuildDeploymentDomain(deployment1Name)
|
||||||
|
domain2 := env.BuildDeploymentDomain(deployment2Name)
|
||||||
|
|
||||||
|
// Test deployment 1
|
||||||
|
resp1 := e2e.TestDeploymentWithHostHeader(t, env, domain1, "/")
|
||||||
|
defer resp1.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp1.StatusCode, "Deployment 1 should serve")
|
||||||
|
|
||||||
|
// Test deployment 2
|
||||||
|
resp2 := e2e.TestDeploymentWithHostHeader(t, env, domain2, "/")
|
||||||
|
defer resp2.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp2.StatusCode, "Deployment 2 should serve")
|
||||||
|
|
||||||
|
t.Logf("✓ Multiple deployments route independently")
|
||||||
|
t.Logf(" - Domain 1: %s", domain1)
|
||||||
|
t.Logf(" - Domain 2: %s", domain2)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wrong domain returns 404", func(t *testing.T) {
|
||||||
|
// Request with non-existent deployment subdomain
|
||||||
|
fakeDeploymentDomain := env.BuildDeploymentDomain(fmt.Sprintf("nonexistent-deployment-%d", time.Now().Unix()))
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, fakeDeploymentDomain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusNotFound, resp.StatusCode,
|
||||||
|
"Non-existent deployment should return 404")
|
||||||
|
|
||||||
|
t.Logf("✓ Non-existent deployment returns 404")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDomainRouting_ContentTypes(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("test-content-types-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
|
||||||
|
contentTypeTests := []struct {
|
||||||
|
path string
|
||||||
|
shouldHave string
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{"/", "text/html", "HTML root"},
|
||||||
|
{"/index.html", "text/html", "HTML file"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range contentTypeTests {
|
||||||
|
t.Run(test.description, func(t *testing.T) {
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, test.path)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
assert.Contains(t, contentType, test.shouldHave,
|
||||||
|
"Content-Type for %s should contain %s", test.path, test.shouldHave)
|
||||||
|
|
||||||
|
t.Logf("✓ %s: %s", test.description, contentType)
|
||||||
|
} else {
|
||||||
|
t.Logf("⚠ %s returned status %d", test.path, resp.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDomainRouting_SPAFallback(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("test-spa-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
|
||||||
|
t.Run("Unknown paths fall back to index.html", func(t *testing.T) {
|
||||||
|
unknownPaths := []string{
|
||||||
|
"/about",
|
||||||
|
"/users/123",
|
||||||
|
"/settings/profile",
|
||||||
|
"/some/deep/nested/path",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range unknownPaths {
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, path)
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
// Should return index.html for SPA routing
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"SPA fallback should return 200 for %s", path)
|
||||||
|
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">",
|
||||||
|
"SPA fallback should return index.html for %s", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ SPA fallback routing verified for %d paths", len(unknownPaths))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeployment_DomainFormat verifies that deployment URLs use the correct format:
|
||||||
|
// - CORRECT: {name}-{random}.{baseDomain} (e.g., "myapp-f3o4if.dbrs.space")
|
||||||
|
// - WRONG: {name}.node-{shortID}.{baseDomain} (should NOT exist)
|
||||||
|
func TestDeployment_DomainFormat(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("format-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for deployment
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
t.Run("Deployment URL has correct format", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
// Get the deployment URLs
|
||||||
|
urls, ok := deployment["urls"].([]interface{})
|
||||||
|
if !ok || len(urls) == 0 {
|
||||||
|
// Fall back to single url field
|
||||||
|
if url, ok := deployment["url"].(string); ok && url != "" {
|
||||||
|
urls = []interface{}{url}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the subdomain from deployment response
|
||||||
|
subdomain, _ := deployment["subdomain"].(string)
|
||||||
|
t.Logf("Deployment subdomain: %s", subdomain)
|
||||||
|
t.Logf("Deployment URLs: %v", urls)
|
||||||
|
|
||||||
|
foundCorrectFormat := false
|
||||||
|
for _, u := range urls {
|
||||||
|
urlStr, ok := u.(string)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL should start with https://{name}-
|
||||||
|
expectedPrefix := fmt.Sprintf("https://%s-", deploymentName)
|
||||||
|
if strings.HasPrefix(urlStr, expectedPrefix) {
|
||||||
|
foundCorrectFormat = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL should contain base domain
|
||||||
|
assert.Contains(t, urlStr, env.BaseDomain,
|
||||||
|
"URL should contain base domain %s", env.BaseDomain)
|
||||||
|
|
||||||
|
// URL should NOT contain node identifier pattern
|
||||||
|
assert.NotContains(t, urlStr, ".node-",
|
||||||
|
"URL should NOT have node identifier (got: %s)", urlStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(urls) > 0 {
|
||||||
|
assert.True(t, foundCorrectFormat, "Should find URL with correct domain format (https://{name}-{random}.{baseDomain})")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Domain format verification passed")
|
||||||
|
t.Logf(" - Format: {name}-{random}.{baseDomain}")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Domain resolves via Host header", func(t *testing.T) {
|
||||||
|
// Get the actual subdomain from the deployment
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
subdomain, _ := deployment["subdomain"].(string)
|
||||||
|
if subdomain == "" {
|
||||||
|
t.Skip("No subdomain set, skipping host header test")
|
||||||
|
}
|
||||||
|
domain := subdomain + "." + env.BaseDomain
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Domain %s should resolve successfully", domain)
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">",
|
||||||
|
"Should serve deployment content")
|
||||||
|
|
||||||
|
t.Logf("✓ Domain %s resolves correctly", domain)
|
||||||
|
})
|
||||||
|
}
|
||||||
278
core/e2e/integration/fullstack_integration_test.go
Normal file
278
core/e2e/integration/fullstack_integration_test.go
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFullStack_GoAPI_SQLite(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
appName := fmt.Sprintf("fullstack-app-%d", time.Now().Unix())
|
||||||
|
backendName := appName + "-backend"
|
||||||
|
dbName := appName + "-db"
|
||||||
|
|
||||||
|
var backendID string
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
if backendID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, backendID)
|
||||||
|
}
|
||||||
|
e2e.DeleteSQLiteDB(t, env, dbName)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Step 1: Create SQLite database
|
||||||
|
t.Run("Create SQLite database", func(t *testing.T) {
|
||||||
|
e2e.CreateSQLiteDB(t, env, dbName)
|
||||||
|
|
||||||
|
// Create users table
|
||||||
|
query := `CREATE TABLE users (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
email TEXT UNIQUE NOT NULL,
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)`
|
||||||
|
e2e.ExecuteSQLQuery(t, env, dbName, query)
|
||||||
|
|
||||||
|
// Insert test data
|
||||||
|
insertQuery := `INSERT INTO users (name, email) VALUES ('Alice', 'alice@example.com')`
|
||||||
|
result := e2e.ExecuteSQLQuery(t, env, dbName, insertQuery)
|
||||||
|
|
||||||
|
assert.NotNil(t, result, "Should execute INSERT successfully")
|
||||||
|
t.Logf("✓ Database created with users table")
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 2: Deploy Go backend (this would normally connect to SQLite)
|
||||||
|
// Note: For now we test the Go backend deployment without actual DB connection
|
||||||
|
// as that requires environment variable injection during deployment
|
||||||
|
t.Run("Deploy Go backend", func(t *testing.T) {
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/go-api")
|
||||||
|
|
||||||
|
// Note: In a real implementation, we would pass DATABASE_NAME env var
|
||||||
|
// For now, we just test the deployment mechanism
|
||||||
|
backendID = e2e.CreateTestDeployment(t, env, backendName, tarballPath)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, backendID, "Backend deployment ID should not be empty")
|
||||||
|
t.Logf("✓ Go backend deployed: %s", backendName)
|
||||||
|
|
||||||
|
// Wait for deployment to become active
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 3: Test database operations
|
||||||
|
t.Run("Test database CRUD operations", func(t *testing.T) {
|
||||||
|
// INSERT
|
||||||
|
insertQuery := `INSERT INTO users (name, email) VALUES ('Bob', 'bob@example.com')`
|
||||||
|
e2e.ExecuteSQLQuery(t, env, dbName, insertQuery)
|
||||||
|
|
||||||
|
// SELECT
|
||||||
|
users := e2e.QuerySQLite(t, env, dbName, "SELECT * FROM users ORDER BY id")
|
||||||
|
require.GreaterOrEqual(t, len(users), 2, "Should have at least 2 users")
|
||||||
|
|
||||||
|
assert.Equal(t, "Alice", users[0]["name"], "First user should be Alice")
|
||||||
|
assert.Equal(t, "Bob", users[1]["name"], "Second user should be Bob")
|
||||||
|
|
||||||
|
t.Logf("✓ Database CRUD operations work")
|
||||||
|
t.Logf(" - Found %d users", len(users))
|
||||||
|
|
||||||
|
// UPDATE
|
||||||
|
updateQuery := `UPDATE users SET email = 'alice.new@example.com' WHERE name = 'Alice'`
|
||||||
|
result := e2e.ExecuteSQLQuery(t, env, dbName, updateQuery)
|
||||||
|
|
||||||
|
rowsAffected, ok := result["rows_affected"].(float64)
|
||||||
|
require.True(t, ok, "Should have rows_affected")
|
||||||
|
assert.Equal(t, float64(1), rowsAffected, "Should update 1 row")
|
||||||
|
|
||||||
|
// Verify update
|
||||||
|
updated := e2e.QuerySQLite(t, env, dbName, "SELECT email FROM users WHERE name = 'Alice'")
|
||||||
|
require.Len(t, updated, 1, "Should find Alice")
|
||||||
|
assert.Equal(t, "alice.new@example.com", updated[0]["email"], "Email should be updated")
|
||||||
|
|
||||||
|
t.Logf("✓ UPDATE operation verified")
|
||||||
|
|
||||||
|
// DELETE
|
||||||
|
deleteQuery := `DELETE FROM users WHERE name = 'Bob'`
|
||||||
|
result = e2e.ExecuteSQLQuery(t, env, dbName, deleteQuery)
|
||||||
|
|
||||||
|
rowsAffected, ok = result["rows_affected"].(float64)
|
||||||
|
require.True(t, ok, "Should have rows_affected")
|
||||||
|
assert.Equal(t, float64(1), rowsAffected, "Should delete 1 row")
|
||||||
|
|
||||||
|
// Verify deletion
|
||||||
|
remaining := e2e.QuerySQLite(t, env, dbName, "SELECT * FROM users")
|
||||||
|
assert.Equal(t, 1, len(remaining), "Should have 1 user remaining")
|
||||||
|
|
||||||
|
t.Logf("✓ DELETE operation verified")
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 4: Test backend API endpoints (if deployment is active)
|
||||||
|
t.Run("Test backend API endpoints", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, backendID)
|
||||||
|
|
||||||
|
status, ok := deployment["status"].(string)
|
||||||
|
if !ok || status != "active" {
|
||||||
|
t.Skip("Backend deployment not active, skipping API tests")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
backendDomain := env.BuildDeploymentDomain(backendName)
|
||||||
|
|
||||||
|
// Test health endpoint
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, backendDomain, "/health")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
var health map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytes, &health), "Should decode health response")
|
||||||
|
|
||||||
|
assert.Equal(t, "healthy", health["status"], "Status should be healthy")
|
||||||
|
assert.Equal(t, "go-backend-test", health["service"], "Service name should match")
|
||||||
|
|
||||||
|
t.Logf("✓ Backend health check passed")
|
||||||
|
} else {
|
||||||
|
t.Logf("⚠ Health check returned status %d (deployment may still be starting)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test users API endpoint
|
||||||
|
resp2 := e2e.TestDeploymentWithHostHeader(t, env, backendDomain, "/api/users")
|
||||||
|
defer resp2.Body.Close()
|
||||||
|
|
||||||
|
if resp2.StatusCode == http.StatusOK {
|
||||||
|
var usersResp map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp2.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytes, &usersResp), "Should decode users response")
|
||||||
|
|
||||||
|
users, ok := usersResp["users"].([]interface{})
|
||||||
|
require.True(t, ok, "Should have users array")
|
||||||
|
assert.GreaterOrEqual(t, len(users), 3, "Should have test users")
|
||||||
|
|
||||||
|
t.Logf("✓ Backend API endpoint works")
|
||||||
|
t.Logf(" - Users endpoint returned %d users", len(users))
|
||||||
|
} else {
|
||||||
|
t.Logf("⚠ Users API returned status %d (deployment may still be starting)", resp2.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 5: Test database backup
|
||||||
|
t.Run("Test database backup", func(t *testing.T) {
|
||||||
|
reqBody := map[string]string{"database_name": dbName}
|
||||||
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/db/sqlite/backup", bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err, "Should execute backup request")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
var result map[string]interface{}
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, json.Unmarshal(bodyBytes, &result), "Should decode backup response")
|
||||||
|
|
||||||
|
backupCID, ok := result["backup_cid"].(string)
|
||||||
|
require.True(t, ok, "Should have backup CID")
|
||||||
|
assert.NotEmpty(t, backupCID, "Backup CID should not be empty")
|
||||||
|
|
||||||
|
t.Logf("✓ Database backup created")
|
||||||
|
t.Logf(" - CID: %s", backupCID)
|
||||||
|
} else {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("⚠ Backup returned status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Step 6: Test concurrent database queries
|
||||||
|
t.Run("Test concurrent database reads", func(t *testing.T) {
|
||||||
|
// WAL mode should allow concurrent reads — run sequentially to avoid t.Fatal in goroutines
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
users := e2e.QuerySQLite(t, env, dbName, "SELECT * FROM users")
|
||||||
|
assert.GreaterOrEqual(t, len(users), 0, "Should query successfully")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Sequential reads successful")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFullStack_StaticSite_SQLite(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
appName := fmt.Sprintf("fullstack-static-%d", time.Now().Unix())
|
||||||
|
frontendName := appName + "-frontend"
|
||||||
|
dbName := appName + "-db"
|
||||||
|
|
||||||
|
var frontendID string
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
if frontendID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, frontendID)
|
||||||
|
}
|
||||||
|
e2e.DeleteSQLiteDB(t, env, dbName)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy static site and create database", func(t *testing.T) {
|
||||||
|
// Create database
|
||||||
|
e2e.CreateSQLiteDB(t, env, dbName)
|
||||||
|
e2e.ExecuteSQLQuery(t, env, dbName, "CREATE TABLE page_views (id INTEGER PRIMARY KEY, page TEXT, count INTEGER)")
|
||||||
|
e2e.ExecuteSQLQuery(t, env, dbName, "INSERT INTO page_views (page, count) VALUES ('home', 0)")
|
||||||
|
|
||||||
|
// Deploy frontend
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
frontendID = e2e.CreateTestDeployment(t, env, frontendName, tarballPath)
|
||||||
|
|
||||||
|
assert.NotEmpty(t, frontendID, "Frontend deployment should succeed")
|
||||||
|
t.Logf("✓ Static site deployed with SQLite backend")
|
||||||
|
|
||||||
|
// Wait for deployment
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test frontend serving and database interaction", func(t *testing.T) {
|
||||||
|
frontendDomain := env.BuildDeploymentDomain(frontendName)
|
||||||
|
|
||||||
|
// Test frontend
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, frontendDomain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Frontend should serve")
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">", "Should contain React app")
|
||||||
|
|
||||||
|
// Simulate page view tracking
|
||||||
|
e2e.ExecuteSQLQuery(t, env, dbName, "UPDATE page_views SET count = count + 1 WHERE page = 'home'")
|
||||||
|
|
||||||
|
// Verify count
|
||||||
|
views := e2e.QuerySQLite(t, env, dbName, "SELECT count FROM page_views WHERE page = 'home'")
|
||||||
|
require.Len(t, views, 1, "Should have page view record")
|
||||||
|
|
||||||
|
count, ok := views[0]["count"].(float64)
|
||||||
|
require.True(t, ok, "Count should be a number")
|
||||||
|
assert.Equal(t, float64(1), count, "Page view count should be incremented")
|
||||||
|
|
||||||
|
t.Logf("✓ Full stack integration verified")
|
||||||
|
t.Logf(" - Frontend: %s", frontendDomain)
|
||||||
|
t.Logf(" - Database: %s", dbName)
|
||||||
|
t.Logf(" - Page views tracked: %.0f", count)
|
||||||
|
})
|
||||||
|
}
|
||||||
125
core/e2e/integration/ipfs_replica_test.go
Normal file
125
core/e2e/integration/ipfs_replica_test.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIPFS_ContentPinnedOnMultipleNodes verifies that deploying a static app
|
||||||
|
// makes the IPFS content available across multiple nodes.
|
||||||
|
func TestIPFS_ContentPinnedOnMultipleNodes(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("ipfs-pin-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(15 * time.Second) // Wait for IPFS content replication
|
||||||
|
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
contentCID, _ := deployment["content_cid"].(string)
|
||||||
|
require.NotEmpty(t, contentCID, "Deployment should have a content CID")
|
||||||
|
|
||||||
|
t.Run("Content served via gateway", func(t *testing.T) {
|
||||||
|
// Extract domain from deployment URLs
|
||||||
|
urls, _ := deployment["urls"].([]interface{})
|
||||||
|
require.NotEmpty(t, urls, "Deployment should have URLs")
|
||||||
|
urlStr, _ := urls[0].(string)
|
||||||
|
domain := urlStr
|
||||||
|
if len(urlStr) > 8 && urlStr[:8] == "https://" {
|
||||||
|
domain = urlStr[8:]
|
||||||
|
} else if len(urlStr) > 7 && urlStr[:7] == "http://" {
|
||||||
|
domain = urlStr[7:]
|
||||||
|
}
|
||||||
|
if len(domain) > 0 && domain[len(domain)-1] == '/' {
|
||||||
|
domain = domain[:len(domain)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("status=%d, body=%d bytes", resp.StatusCode, len(body))
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"IPFS content should be served via gateway (CID: %s)", contentCID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestIPFS_LargeFileDeployment verifies that deploying an app with larger
|
||||||
|
// static assets works correctly.
|
||||||
|
func TestIPFS_LargeFileDeployment(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("ipfs-large-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
// The react-vite tarball is our largest test asset
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
t.Run("Deployment has valid CID", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
contentCID, _ := deployment["content_cid"].(string)
|
||||||
|
assert.NotEmpty(t, contentCID, "Should have a content CID")
|
||||||
|
assert.True(t, len(contentCID) > 10, "CID should be a valid IPFS hash")
|
||||||
|
t.Logf("Content CID: %s", contentCID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Static content serves correctly", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
urls, ok := deployment["urls"].([]interface{})
|
||||||
|
if !ok || len(urls) == 0 {
|
||||||
|
t.Skip("No URLs in deployment")
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeURL, _ := urls[0].(string)
|
||||||
|
domain := nodeURL
|
||||||
|
if len(nodeURL) > 8 && nodeURL[:8] == "https://" {
|
||||||
|
domain = nodeURL[8:]
|
||||||
|
} else if len(nodeURL) > 7 && nodeURL[:7] == "http://" {
|
||||||
|
domain = nodeURL[7:]
|
||||||
|
}
|
||||||
|
if len(domain) > 0 && domain[len(domain)-1] == '/' {
|
||||||
|
domain = domain[:len(domain)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
assert.Greater(t, len(body), 100, "Response should have substantial content")
|
||||||
|
})
|
||||||
|
}
|
||||||
136
core/e2e/production/cross_node_proxy_test.go
Normal file
136
core/e2e/production/cross_node_proxy_test.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
//go:build e2e && production
|
||||||
|
|
||||||
|
package production
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestCrossNode_ProxyRouting tests that requests routed through the gateway
|
||||||
|
// are served correctly for a deployment.
|
||||||
|
func TestCrossNode_ProxyRouting(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Cross-node testing requires at least 2 servers in config")
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("proxy-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for deployment to be active
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
t.Logf("Testing routing for: %s", domain)
|
||||||
|
|
||||||
|
t.Run("Request via gateway succeeds", func(t *testing.T) {
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Request should return 200 (got %d: %s)", resp.StatusCode, string(body))
|
||||||
|
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">",
|
||||||
|
"Should serve deployment content")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCrossNode_APIConsistency tests that API responses are consistent
|
||||||
|
func TestCrossNode_APIConsistency(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("consistency-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for replication
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
t.Run("Deployment list contains our deployment", func(t *testing.T) {
|
||||||
|
req, err := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/list", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result))
|
||||||
|
|
||||||
|
deployments, ok := result["deployments"].([]interface{})
|
||||||
|
require.True(t, ok, "Response should have deployments array")
|
||||||
|
t.Logf("Gateway reports %d deployments", len(deployments))
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, d := range deployments {
|
||||||
|
dep, _ := d.(map[string]interface{})
|
||||||
|
if dep["name"] == deploymentName {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, found, "Our deployment should be in the list")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCrossNode_DeploymentGetConsistency tests that deployment details are correct
|
||||||
|
func TestCrossNode_DeploymentGetConsistency(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("get-consistency-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for replication
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
t.Run("Deployment details are correct", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
cid, _ := deployment["content_cid"].(string)
|
||||||
|
assert.NotEmpty(t, cid, "Should have a content CID")
|
||||||
|
|
||||||
|
name, _ := deployment["name"].(string)
|
||||||
|
assert.Equal(t, deploymentName, name, "Name should match")
|
||||||
|
|
||||||
|
t.Logf("Deployment: name=%s, cid=%s, status=%s", name, cid, deployment["status"])
|
||||||
|
})
|
||||||
|
}
|
||||||
228
core/e2e/production/failover_test.go
Normal file
228
core/e2e/production/failover_test.go
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
//go:build e2e && production
|
||||||
|
|
||||||
|
package production
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestFailover_HomeNodeDown verifies that when the home node's deployment process
|
||||||
|
// is down, requests still succeed via the replica node.
|
||||||
|
func TestFailover_HomeNodeDown(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Failover testing requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deploy a Node.js backend so we have a process to stop
|
||||||
|
deploymentName := fmt.Sprintf("failover-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/node-api")
|
||||||
|
|
||||||
|
deploymentID := createNodeJSDeploymentProd(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for deployment and replica
|
||||||
|
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
|
||||||
|
require.True(t, healthy, "Deployment should become healthy")
|
||||||
|
time.Sleep(20 * time.Second) // Wait for async replica setup
|
||||||
|
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURLProd(t, deployment)
|
||||||
|
require.NotEmpty(t, nodeURL)
|
||||||
|
domain := extractDomainProd(nodeURL)
|
||||||
|
|
||||||
|
t.Run("Deployment serves via gateway", func(t *testing.T) {
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Deployment should be served via gateway (got %d: %s)", resp.StatusCode, string(body))
|
||||||
|
t.Logf("Gateway response: status=%d body=%s", resp.StatusCode, string(body))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailover_5xxRetry verifies that if one node returns a gateway error,
|
||||||
|
// the middleware retries on the next replica.
|
||||||
|
func TestFailover_5xxRetry(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deploy a static app (always works via IPFS, no process to crash)
|
||||||
|
deploymentName := fmt.Sprintf("retry-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
nodeURL := extractNodeURLProd(t, deployment)
|
||||||
|
if nodeURL == "" {
|
||||||
|
t.Skip("No node URL")
|
||||||
|
}
|
||||||
|
domain := extractDomainProd(nodeURL)
|
||||||
|
|
||||||
|
t.Run("Deployment serves successfully", func(t *testing.T) {
|
||||||
|
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Static content should be served (got %d: %s)", resp.StatusCode, string(body))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFailover_CrossNodeProxyTimeout verifies that cross-node proxy fails fast
|
||||||
|
// (within a reasonable timeout) rather than hanging.
|
||||||
|
func TestFailover_CrossNodeProxyTimeout(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(env.Config.Servers) < 2 {
|
||||||
|
t.Skip("Requires at least 2 servers")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a request to a non-existent deployment — should fail fast
|
||||||
|
domain := fmt.Sprintf("nonexistent-%d.%s", time.Now().Unix(), env.BaseDomain)
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/", nil)
|
||||||
|
req.Host = domain
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Request failed in %v: %v", elapsed, err)
|
||||||
|
} else {
|
||||||
|
resp.Body.Close()
|
||||||
|
t.Logf("Got status %d in %v", resp.StatusCode, elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should respond within 15 seconds (our proxy timeout is 5s)
|
||||||
|
assert.Less(t, elapsed.Seconds(), 15.0,
|
||||||
|
"Request to non-existent deployment should fail fast, took %v", elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createNodeJSDeploymentProd(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var fileData []byte
|
||||||
|
|
||||||
|
info, err := os.Stat(tarballPath)
|
||||||
|
require.NoError(t, err, "Failed to stat: %s", tarballPath)
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
|
||||||
|
require.NoError(t, err, "Failed to create tarball from %s", tarballPath)
|
||||||
|
fileData = tarData
|
||||||
|
} else {
|
||||||
|
file, err := os.Open(tarballPath)
|
||||||
|
require.NoError(t, err, "Failed to open tarball: %s", tarballPath)
|
||||||
|
defer file.Close()
|
||||||
|
fileData, _ = io.ReadAll(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
|
body.WriteString("Content-Type: application/gzip\r\n\r\n")
|
||||||
|
|
||||||
|
body.Write(fileData)
|
||||||
|
body.WriteString("\r\n--" + boundary + "--\r\n")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/nodejs/upload", body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result))
|
||||||
|
|
||||||
|
if id, ok := result["deployment_id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
if id, ok := result["id"].(string); ok {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
t.Fatalf("Deployment response missing id: %+v", result)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractNodeURLProd(t *testing.T, deployment map[string]interface{}) string {
|
||||||
|
t.Helper()
|
||||||
|
if urls, ok := deployment["urls"].([]interface{}); ok && len(urls) > 0 {
|
||||||
|
if url, ok := urls[0].(string); ok {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if urls, ok := deployment["urls"].(map[string]interface{}); ok {
|
||||||
|
if url, ok := urls["node"].(string); ok {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractDomainProd(url string) string {
|
||||||
|
domain := url
|
||||||
|
if len(url) > 8 && url[:8] == "https://" {
|
||||||
|
domain = url[8:]
|
||||||
|
} else if len(url) > 7 && url[:7] == "http://" {
|
||||||
|
domain = url[7:]
|
||||||
|
}
|
||||||
|
if len(domain) > 0 && domain[len(domain)-1] == '/' {
|
||||||
|
domain = domain[:len(domain)-1]
|
||||||
|
}
|
||||||
|
return domain
|
||||||
|
}
|
||||||
185
core/e2e/production/https_certificate_test.go
Normal file
185
core/e2e/production/https_certificate_test.go
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
//go:build e2e && production
|
||||||
|
|
||||||
|
package production
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestHTTPS_CertificateValid tests that HTTPS works with a valid certificate
|
||||||
|
func TestHTTPS_CertificateValid(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("https-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
|
||||||
|
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for deployment and certificate provisioning
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
domain := env.BuildDeploymentDomain(deploymentName)
|
||||||
|
httpsURL := fmt.Sprintf("https://%s", domain)
|
||||||
|
|
||||||
|
t.Run("HTTPS connection with certificate verification", func(t *testing.T) {
|
||||||
|
// Create client that DOES verify certificates
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
// Do NOT skip verification - we want to test real certs
|
||||||
|
InsecureSkipVerify: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", httpsURL+"/", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
// Certificate might not be ready yet, or domain might not resolve
|
||||||
|
t.Logf("⚠ HTTPS request failed (this may be expected if certs are still provisioning): %v", err)
|
||||||
|
t.Skip("HTTPS not available or certificate not ready")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Logf("HTTPS returned %d (deployment may not be routed yet): %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check TLS connection state
|
||||||
|
if resp.TLS != nil {
|
||||||
|
t.Logf("✓ HTTPS works with valid certificate")
|
||||||
|
t.Logf(" - Domain: %s", domain)
|
||||||
|
t.Logf(" - TLS Version: %x", resp.TLS.Version)
|
||||||
|
t.Logf(" - Cipher Suite: %x", resp.TLS.CipherSuite)
|
||||||
|
if len(resp.TLS.PeerCertificates) > 0 {
|
||||||
|
cert := resp.TLS.PeerCertificates[0]
|
||||||
|
t.Logf(" - Certificate Subject: %s", cert.Subject)
|
||||||
|
t.Logf(" - Certificate Issuer: %s", cert.Issuer)
|
||||||
|
t.Logf(" - Valid Until: %s", cert.NotAfter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHTTPS_CertificateDetails tests certificate properties
|
||||||
|
func TestHTTPS_CertificateDetails(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
t.Run("Base domain certificate", func(t *testing.T) {
|
||||||
|
httpsURL := fmt.Sprintf("https://%s", env.BaseDomain)
|
||||||
|
|
||||||
|
// Connect and get certificate info
|
||||||
|
conn, err := tls.Dial("tcp", env.BaseDomain+":443", &tls.Config{
|
||||||
|
InsecureSkipVerify: true, // We just want to inspect the cert
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("⚠ Could not connect to %s:443: %v", env.BaseDomain, err)
|
||||||
|
t.Skip("HTTPS not available on base domain")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
certs := conn.ConnectionState().PeerCertificates
|
||||||
|
require.NotEmpty(t, certs, "Should have certificates")
|
||||||
|
|
||||||
|
cert := certs[0]
|
||||||
|
t.Logf("Certificate for %s:", env.BaseDomain)
|
||||||
|
t.Logf(" - Subject: %s", cert.Subject)
|
||||||
|
t.Logf(" - DNS Names: %v", cert.DNSNames)
|
||||||
|
t.Logf(" - Valid From: %s", cert.NotBefore)
|
||||||
|
t.Logf(" - Valid Until: %s", cert.NotAfter)
|
||||||
|
t.Logf(" - Issuer: %s", cert.Issuer)
|
||||||
|
|
||||||
|
// Check that certificate covers our domain
|
||||||
|
coversDomain := false
|
||||||
|
for _, name := range cert.DNSNames {
|
||||||
|
if name == env.BaseDomain || name == "*."+env.BaseDomain {
|
||||||
|
coversDomain = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.True(t, coversDomain, "Certificate should cover %s", env.BaseDomain)
|
||||||
|
|
||||||
|
// Check certificate is not expired
|
||||||
|
assert.True(t, time.Now().Before(cert.NotAfter), "Certificate should not be expired")
|
||||||
|
assert.True(t, time.Now().After(cert.NotBefore), "Certificate should be valid now")
|
||||||
|
|
||||||
|
// Make actual HTTPS request to verify it works
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := client.Get(httpsURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("⚠ HTTPS request failed: %v", err)
|
||||||
|
} else {
|
||||||
|
resp.Body.Close()
|
||||||
|
t.Logf("✓ HTTPS request succeeded with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHTTPS_HTTPRedirect tests that HTTP requests are redirected to HTTPS
|
||||||
|
func TestHTTPS_HTTPRedirect(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
t.Run("HTTP redirects to HTTPS", func(t *testing.T) {
|
||||||
|
// Create client that doesn't follow redirects
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
httpURL := fmt.Sprintf("http://%s", env.BaseDomain)
|
||||||
|
|
||||||
|
resp, err := client.Get(httpURL)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("⚠ HTTP request failed: %v", err)
|
||||||
|
t.Skip("HTTP not available or redirects not configured")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check for redirect
|
||||||
|
if resp.StatusCode >= 300 && resp.StatusCode < 400 {
|
||||||
|
location := resp.Header.Get("Location")
|
||||||
|
t.Logf("✓ HTTP redirects to: %s (status %d)", location, resp.StatusCode)
|
||||||
|
assert.Contains(t, location, "https://", "Should redirect to HTTPS")
|
||||||
|
} else if resp.StatusCode == http.StatusOK {
|
||||||
|
// HTTP might just serve content directly in some configurations
|
||||||
|
t.Logf("⚠ HTTP returned 200 instead of redirect (HTTPS redirect may not be configured)")
|
||||||
|
} else {
|
||||||
|
t.Logf("HTTP returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
204
core/e2e/production/https_external_test.go
Normal file
204
core/e2e/production/https_external_test.go
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
//go:build e2e && production
|
||||||
|
|
||||||
|
package production
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestHTTPS_ExternalAccess tests that deployed apps are accessible via HTTPS
|
||||||
|
// from the public internet with valid SSL certificates.
|
||||||
|
//
|
||||||
|
// This test requires:
|
||||||
|
// - Orama deployed on a VPS with a real domain
|
||||||
|
// - DNS properly configured
|
||||||
|
// - Run with: go test -v -tags "e2e production" -run TestHTTPS ./e2e/production/...
|
||||||
|
func TestHTTPS_ExternalAccess(t *testing.T) {
|
||||||
|
// Skip if not configured for external testing
|
||||||
|
externalURL := os.Getenv("ORAMA_EXTERNAL_URL")
|
||||||
|
if externalURL == "" {
|
||||||
|
t.Skip("ORAMA_EXTERNAL_URL not set - skipping external HTTPS test")
|
||||||
|
}
|
||||||
|
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("https-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy static app", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
|
||||||
|
})
|
||||||
|
|
||||||
|
var deploymentDomain string
|
||||||
|
|
||||||
|
t.Run("Get deployment domain", func(t *testing.T) {
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
nodeURL := extractNodeURL(t, deployment)
|
||||||
|
require.NotEmpty(t, nodeURL, "Deployment should have node URL")
|
||||||
|
|
||||||
|
deploymentDomain = extractDomain(nodeURL)
|
||||||
|
t.Logf("Deployment domain: %s", deploymentDomain)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wait for DNS propagation", func(t *testing.T) {
|
||||||
|
// Poll DNS until the domain resolves
|
||||||
|
deadline := time.Now().Add(2 * time.Minute)
|
||||||
|
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
ips, err := net.LookupHost(deploymentDomain)
|
||||||
|
if err == nil && len(ips) > 0 {
|
||||||
|
t.Logf("DNS resolved: %s -> %v", deploymentDomain, ips)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Logf("DNS not yet resolved, waiting...")
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("DNS did not resolve within timeout for %s", deploymentDomain)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Test HTTPS access with valid certificate", func(t *testing.T) {
|
||||||
|
// Create HTTP client that DOES verify certificates
|
||||||
|
// (no InsecureSkipVerify - we want to test real SSL)
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
// Use default verification (validates certificate)
|
||||||
|
InsecureSkipVerify: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
url := fmt.Sprintf("https://%s/", deploymentDomain)
|
||||||
|
t.Logf("Testing HTTPS: %s", url)
|
||||||
|
|
||||||
|
resp, err := client.Get(url)
|
||||||
|
require.NoError(t, err, "HTTPS request should succeed with valid certificate")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should return 200 OK")
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify it's our React app
|
||||||
|
assert.Contains(t, string(body), "<div id=\"root\">", "Should serve React app")
|
||||||
|
|
||||||
|
t.Logf("HTTPS test passed: %s returned %d", url, resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Verify SSL certificate details", func(t *testing.T) {
|
||||||
|
conn, err := tls.Dial("tcp", deploymentDomain+":443", nil)
|
||||||
|
require.NoError(t, err, "TLS dial should succeed")
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
state := conn.ConnectionState()
|
||||||
|
require.NotEmpty(t, state.PeerCertificates, "Should have peer certificates")
|
||||||
|
|
||||||
|
cert := state.PeerCertificates[0]
|
||||||
|
t.Logf("Certificate subject: %s", cert.Subject)
|
||||||
|
t.Logf("Certificate issuer: %s", cert.Issuer)
|
||||||
|
t.Logf("Certificate valid from: %s to %s", cert.NotBefore, cert.NotAfter)
|
||||||
|
|
||||||
|
// Verify certificate is not expired
|
||||||
|
assert.True(t, time.Now().After(cert.NotBefore), "Certificate should be valid (not before)")
|
||||||
|
assert.True(t, time.Now().Before(cert.NotAfter), "Certificate should be valid (not expired)")
|
||||||
|
|
||||||
|
// Verify domain matches
|
||||||
|
err = cert.VerifyHostname(deploymentDomain)
|
||||||
|
assert.NoError(t, err, "Certificate should be valid for domain %s", deploymentDomain)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHTTPS_DomainFormat verifies deployment URL format
|
||||||
|
func TestHTTPS_DomainFormat(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err, "Failed to load test environment")
|
||||||
|
|
||||||
|
deploymentName := fmt.Sprintf("domain-test-%d", time.Now().Unix())
|
||||||
|
tarballPath := filepath.Join("../../testdata/apps/react-app")
|
||||||
|
var deploymentID string
|
||||||
|
|
||||||
|
// Cleanup after test
|
||||||
|
defer func() {
|
||||||
|
if !env.SkipCleanup && deploymentID != "" {
|
||||||
|
e2e.DeleteDeployment(t, env, deploymentID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Run("Deploy app and verify domain format", func(t *testing.T) {
|
||||||
|
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
|
||||||
|
require.NotEmpty(t, deploymentID)
|
||||||
|
|
||||||
|
deployment := e2e.GetDeployment(t, env, deploymentID)
|
||||||
|
|
||||||
|
t.Logf("Deployment URLs: %+v", deployment["urls"])
|
||||||
|
|
||||||
|
// Get deployment URL (handles both array and map formats)
|
||||||
|
deploymentURL := extractNodeURL(t, deployment)
|
||||||
|
assert.NotEmpty(t, deploymentURL, "Should have deployment URL")
|
||||||
|
|
||||||
|
// URL should be simple format: {name}.{baseDomain} (NOT {name}.node-{shortID}.{baseDomain})
|
||||||
|
if deploymentURL != "" {
|
||||||
|
assert.NotContains(t, deploymentURL, ".node-", "URL should NOT contain node identifier (simplified format)")
|
||||||
|
assert.Contains(t, deploymentURL, deploymentName, "URL should contain deployment name")
|
||||||
|
t.Logf("Deployment URL: %s", deploymentURL)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractNodeURL(t *testing.T, deployment map[string]interface{}) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
if urls, ok := deployment["urls"].([]interface{}); ok && len(urls) > 0 {
|
||||||
|
if url, ok := urls[0].(string); ok {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if urls, ok := deployment["urls"].(map[string]interface{}); ok {
|
||||||
|
if url, ok := urls["node"].(string); ok {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractDomain(url string) string {
|
||||||
|
domain := url
|
||||||
|
if len(url) > 8 && url[:8] == "https://" {
|
||||||
|
domain = url[8:]
|
||||||
|
} else if len(url) > 7 && url[:7] == "http://" {
|
||||||
|
domain = url[7:]
|
||||||
|
}
|
||||||
|
if len(domain) > 0 && domain[len(domain)-1] == '/' {
|
||||||
|
domain = domain[:len(domain)-1]
|
||||||
|
}
|
||||||
|
return domain
|
||||||
|
}
|
||||||
95
core/e2e/production/middleware_test.go
Normal file
95
core/e2e/production/middleware_test.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
//go:build e2e && production
|
||||||
|
|
||||||
|
package production
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestMiddleware_NonExistentDeployment verifies that requests to a non-existent
|
||||||
|
// deployment return 404 (not 502 or hang).
|
||||||
|
func TestMiddleware_NonExistentDeployment(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
domain := fmt.Sprintf("does-not-exist-%d.%s", time.Now().Unix(), env.BaseDomain)
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", env.GatewayURL+"/", nil)
|
||||||
|
req.Host = domain
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Request failed in %v: %v", elapsed, err)
|
||||||
|
// Connection refused or timeout is acceptable
|
||||||
|
assert.Less(t, elapsed.Seconds(), 15.0, "Should fail fast")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Logf("Status: %d, elapsed: %v, body: %s", resp.StatusCode, elapsed, string(body))
|
||||||
|
|
||||||
|
// Should be 404 or 502, not 200
|
||||||
|
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Non-existent deployment should not return 200")
|
||||||
|
assert.Less(t, elapsed.Seconds(), 15.0, "Should respond fast")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMiddleware_InternalAPIAuthRejection verifies that internal replica API
|
||||||
|
// endpoints reject requests without the proper internal auth header.
|
||||||
|
func TestMiddleware_InternalAPIAuthRejection(t *testing.T) {
|
||||||
|
env, err := e2e.LoadTestEnv()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("No auth header rejected", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("POST",
|
||||||
|
env.GatewayURL+"/v1/internal/deployments/replica/setup", nil)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Should be rejected (401 or 403)
|
||||||
|
assert.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"Internal API without auth should be rejected (got %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Wrong auth header rejected", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("POST",
|
||||||
|
env.GatewayURL+"/v1/internal/deployments/replica/setup", nil)
|
||||||
|
req.Header.Set("X-Orama-Internal-Auth", "wrong-token")
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusBadRequest,
|
||||||
|
"Internal API with wrong auth should be rejected (got %d)", resp.StatusCode)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Regular API key does not grant internal access", func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("POST",
|
||||||
|
env.GatewayURL+"/v1/internal/deployments/replica/setup", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
|
resp, err := env.HTTPClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// The request may pass auth but fail on bad body — 400 is acceptable
|
||||||
|
// But it should NOT succeed with 200
|
||||||
|
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"Regular API key should not fully authenticate internal endpoints")
|
||||||
|
})
|
||||||
|
}
|
||||||
148
core/e2e/shared/auth_extended_test.go
Normal file
148
core/e2e/shared/auth_extended_test.go
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/e2e"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestAuth_ExpiredOrInvalidJWT verifies that an expired/invalid JWT token is rejected.
|
||||||
|
func TestAuth_ExpiredOrInvalidJWT(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
|
||||||
|
// Craft an obviously invalid JWT
|
||||||
|
invalidJWT := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiZXhwIjoxfQ.invalid"
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", gatewayURL+"/v1/deployments/list", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+invalidJWT)
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
|
||||||
|
"Invalid JWT should return 401")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAuth_EmptyAPIKey verifies that an empty API key is rejected.
|
||||||
|
func TestAuth_EmptyAPIKey(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", gatewayURL+"/v1/deployments/list", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set("Authorization", "Bearer ")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
|
||||||
|
"Empty API key should return 401")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAuth_SQLInjectionInAPIKey verifies that SQL injection in the API key
|
||||||
|
// does not bypass authentication.
|
||||||
|
func TestAuth_SQLInjectionInAPIKey(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
|
||||||
|
injectionAttempts := []string{
|
||||||
|
"' OR '1'='1",
|
||||||
|
"'; DROP TABLE api_keys; --",
|
||||||
|
"\" OR \"1\"=\"1",
|
||||||
|
"admin'--",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attempt := range injectionAttempts {
|
||||||
|
t.Run(attempt, func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", gatewayURL+"/v1/deployments/list", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+attempt)
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
|
||||||
|
"SQL injection attempt should be rejected")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAuth_NamespaceScopedAccess verifies that an API key for one namespace
|
||||||
|
// cannot access another namespace's deployments.
|
||||||
|
func TestAuth_NamespaceScopedAccess(t *testing.T) {
|
||||||
|
// Create two environments with different namespaces
|
||||||
|
env1, err := e2e.LoadTestEnvWithNamespace("auth-test-ns1")
|
||||||
|
if err != nil {
|
||||||
|
t.Skip("Could not create namespace env1: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
env2, err := e2e.LoadTestEnvWithNamespace("auth-test-ns2")
|
||||||
|
if err != nil {
|
||||||
|
t.Skip("Could not create namespace env2: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Namespace 1 key cannot list namespace 2 deployments", func(t *testing.T) {
|
||||||
|
// Use env1's API key to query env2's gateway
|
||||||
|
// The namespace should be scoped to the API key
|
||||||
|
req, _ := http.NewRequest("GET", env2.GatewayURL+"/v1/deployments/list", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+env1.APIKey)
|
||||||
|
req.Header.Set("X-Namespace", "auth-test-ns2")
|
||||||
|
|
||||||
|
resp, err := env1.HTTPClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Skip("Gateway unreachable")
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// The API should either reject (403) or return only ns1's deployments
|
||||||
|
t.Logf("Cross-namespace access returned: %d", resp.StatusCode)
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
t.Log("API returned 200 — namespace isolation may be enforced at data level")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAuth_PublicEndpointsNoAuth verifies that health/status endpoints
|
||||||
|
// don't require authentication.
|
||||||
|
func TestAuth_PublicEndpointsNoAuth(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
publicPaths := []string{
|
||||||
|
"/v1/health",
|
||||||
|
"/v1/status",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range publicPaths {
|
||||||
|
t.Run(path, func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", gatewayURL+path, nil)
|
||||||
|
// No auth header
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
assert.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"%s should be accessible without auth", path)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
333
core/e2e/shared/auth_negative_test.go
Normal file
333
core/e2e/shared/auth_negative_test.go
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// =============================================================================
|
||||||
|
// STRICT AUTHENTICATION NEGATIVE TESTS
|
||||||
|
// These tests verify that authentication is properly enforced.
|
||||||
|
// Tests FAIL if unauthenticated/invalid requests are allowed through.
|
||||||
|
// =============================================================================
|
||||||
|
|
||||||
|
func TestAuth_MissingAPIKey(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request protected endpoint without auth headers
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// STRICT: Must reject requests without authentication
|
||||||
|
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"FAIL: Protected endpoint allowed request without auth - expected 401/403, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Missing API key correctly rejected with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_InvalidAPIKey(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request with invalid API key
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "Bearer invalid-key-xyz-123456789")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// STRICT: Must reject invalid API keys
|
||||||
|
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"FAIL: Invalid API key was accepted - expected 401/403, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Invalid API key correctly rejected with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_CacheWithoutAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request cache endpoint without auth
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/health",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
|
||||||
|
// STRICT: Cache endpoint must require authentication
|
||||||
|
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
|
||||||
|
"FAIL: Cache endpoint accessible without auth - expected 401/403, got %d", status)
|
||||||
|
t.Logf(" ✓ Cache endpoint correctly requires auth (status %d)", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_StorageWithoutAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request storage endpoint without auth
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/storage/status/QmTest",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
|
||||||
|
// STRICT: Storage endpoint must require authentication
|
||||||
|
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
|
||||||
|
"FAIL: Storage endpoint accessible without auth - expected 401/403, got %d", status)
|
||||||
|
t.Logf(" ✓ Storage endpoint correctly requires auth (status %d)", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_RQLiteWithoutAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request rqlite endpoint without auth
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
|
||||||
|
// STRICT: RQLite endpoint must require authentication
|
||||||
|
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
|
||||||
|
"FAIL: RQLite endpoint accessible without auth - expected 401/403, got %d", status)
|
||||||
|
t.Logf(" ✓ RQLite endpoint correctly requires auth (status %d)", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_MalformedBearerToken(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request with malformed bearer token (missing "Bearer " prefix)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "invalid-token-format-no-bearer")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// STRICT: Must reject malformed authorization headers
|
||||||
|
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"FAIL: Malformed auth header accepted - expected 401/403, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Malformed bearer token correctly rejected (status %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_ExpiredJWT(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Test with a clearly invalid JWT structure
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "Bearer expired.jwt.token.invalid")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// STRICT: Must reject invalid/expired JWT tokens
|
||||||
|
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"FAIL: Invalid JWT accepted - expected 401/403, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Invalid JWT correctly rejected (status %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_EmptyBearerToken(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request with empty bearer token
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "Bearer ")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// STRICT: Must reject empty bearer tokens
|
||||||
|
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"FAIL: Empty bearer token accepted - expected 401/403, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Empty bearer token correctly rejected (status %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_DuplicateAuthHeaders(t *testing.T) {
|
||||||
|
if e2e.GetAPIKey() == "" {
|
||||||
|
t.Skip("No API key configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request with both valid API key in Authorization header
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/health",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "Bearer " + e2e.GetAPIKey(),
|
||||||
|
"X-API-Key": e2e.GetAPIKey(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
|
||||||
|
// Should succeed since we have a valid API key
|
||||||
|
require.Equal(t, http.StatusOK, status,
|
||||||
|
"FAIL: Valid API key rejected when multiple auth headers present - got %d", status)
|
||||||
|
t.Logf(" ✓ Duplicate auth headers with valid key succeeds (status %d)", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_CaseSensitiveAPIKey(t *testing.T) {
|
||||||
|
apiKey := e2e.GetAPIKey()
|
||||||
|
if apiKey == "" {
|
||||||
|
t.Skip("No API key configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create incorrectly cased API key
|
||||||
|
incorrectKey := ""
|
||||||
|
for i, ch := range apiKey {
|
||||||
|
if i%2 == 0 && unicode.IsLetter(ch) {
|
||||||
|
if unicode.IsLower(ch) {
|
||||||
|
incorrectKey += string(unicode.ToUpper(ch))
|
||||||
|
} else {
|
||||||
|
incorrectKey += string(unicode.ToLower(ch))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
incorrectKey += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if the key didn't change (no letters)
|
||||||
|
if incorrectKey == apiKey {
|
||||||
|
t.Skip("API key has no letters to change case")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
req.Header.Set("Authorization", "Bearer "+incorrectKey)
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// STRICT: API keys MUST be case-sensitive
|
||||||
|
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
|
||||||
|
"FAIL: API key check is not case-sensitive - modified key accepted with status %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Case-modified API key correctly rejected (status %d)", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_HealthEndpointNoAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Health endpoint at /v1/health should NOT require auth
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/health", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Health endpoint should be publicly accessible
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"FAIL: Health endpoint should not require auth - expected 200, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Health endpoint correctly accessible without auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_StatusEndpointNoAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Status endpoint at /v1/status should NOT require auth
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/status", nil)
|
||||||
|
require.NoError(t, err, "FAIL: Could not create request")
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(30 * time.Second)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Status endpoint should be publicly accessible
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode,
|
||||||
|
"FAIL: Status endpoint should not require auth - expected 200, got %d", resp.StatusCode)
|
||||||
|
t.Logf(" ✓ Status endpoint correctly accessible without auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_DeploymentsWithoutAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request deployments endpoint without auth
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/deployments/list",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
|
||||||
|
// STRICT: Deployments endpoint must require authentication
|
||||||
|
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
|
||||||
|
"FAIL: Deployments endpoint accessible without auth - expected 401/403, got %d", status)
|
||||||
|
t.Logf(" ✓ Deployments endpoint correctly requires auth (status %d)", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuth_SQLiteWithoutAuth(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Request SQLite endpoint without auth
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/db/sqlite/list",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
require.NoError(t, err, "FAIL: Request failed")
|
||||||
|
|
||||||
|
// STRICT: SQLite endpoint must require authentication
|
||||||
|
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
|
||||||
|
"FAIL: SQLite endpoint accessible without auth - expected 401/403, got %d", status)
|
||||||
|
t.Logf(" ✓ SQLite endpoint correctly requires auth (status %d)", status)
|
||||||
|
}
|
||||||
513
core/e2e/shared/cache_http_test.go
Normal file
513
core/e2e/shared/cache_http_test.go
Normal file
@ -0,0 +1,513 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCache_Health(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/health",
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("health check failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp["status"] != "ok" {
|
||||||
|
t.Fatalf("expected status 'ok', got %v", resp["status"])
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp["service"] != "olric" {
|
||||||
|
t.Fatalf("expected service 'olric', got %v", resp["service"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_PutGet(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "test-key"
|
||||||
|
value := "test-value"
|
||||||
|
|
||||||
|
// Put value
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("put failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d: %s", status, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get value
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err = getReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("get failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var getResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &getResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if getResp["value"] != value {
|
||||||
|
t.Fatalf("expected value %q, got %v", value, getResp["value"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_PutGetJSON(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "json-key"
|
||||||
|
jsonValue := map[string]interface{}{
|
||||||
|
"name": "John",
|
||||||
|
"age": 30,
|
||||||
|
"tags": []string{"developer", "golang"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put JSON value
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": jsonValue,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("put failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get JSON value
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := getReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("get failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var getResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &getResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
retrievedValue := getResp["value"].(map[string]interface{})
|
||||||
|
if retrievedValue["name"] != jsonValue["name"] {
|
||||||
|
t.Fatalf("expected name %q, got %v", jsonValue["name"], retrievedValue["name"])
|
||||||
|
}
|
||||||
|
if retrievedValue["age"] != float64(30) {
|
||||||
|
t.Fatalf("expected age 30, got %v", retrievedValue["age"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_Delete(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "delete-key"
|
||||||
|
value := "delete-value"
|
||||||
|
|
||||||
|
// Put value
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete value
|
||||||
|
deleteReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/delete",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = deleteReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("delete failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify deletion
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = getReq.Do(ctx)
|
||||||
|
// Should get 404 for missing key
|
||||||
|
if status != http.StatusNotFound {
|
||||||
|
t.Fatalf("expected status 404 for deleted key, got %d", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_TTL(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
key := "ttl-key"
|
||||||
|
value := "ttl-value"
|
||||||
|
|
||||||
|
// Put value with TTL
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
"ttl": "2s",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("put with TTL failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify value exists
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = getReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("get immediately after put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for TTL expiry (2 seconds + buffer)
|
||||||
|
e2e.Delay(2500)
|
||||||
|
|
||||||
|
// Verify value is expired
|
||||||
|
_, status, err = getReq.Do(ctx)
|
||||||
|
if status != http.StatusNotFound {
|
||||||
|
t.Logf("warning: TTL expiry may not be fully implemented; got status %d", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_Scan(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
|
||||||
|
// Put multiple keys
|
||||||
|
keys := []string{"user-1", "user-2", "session-1", "session-2"}
|
||||||
|
for _, key := range keys {
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": "value-" + key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan all keys
|
||||||
|
scanReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/scan",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := scanReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("scan failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var scanResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &scanResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keysResp := scanResp["keys"].([]interface{})
|
||||||
|
if len(keysResp) < 4 {
|
||||||
|
t.Fatalf("expected at least 4 keys, got %d", len(keysResp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_ScanWithRegex(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
|
||||||
|
// Put keys with different patterns
|
||||||
|
keys := []string{"user-1", "user-2", "session-1", "session-2"}
|
||||||
|
for _, key := range keys {
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": "value-" + key,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan with regex pattern
|
||||||
|
scanReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/scan",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"pattern": "^user-",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := scanReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("scan with regex failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var scanResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &scanResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keysResp := scanResp["keys"].([]interface{})
|
||||||
|
if len(keysResp) < 2 {
|
||||||
|
t.Fatalf("expected at least 2 keys matching pattern, got %d", len(keysResp))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_MultiGet(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
keys := []string{"key-1", "key-2", "key-3"}
|
||||||
|
|
||||||
|
// Put values
|
||||||
|
for i, key := range keys {
|
||||||
|
putReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/put",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": key,
|
||||||
|
"value": fmt.Sprintf("value-%d", i),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := putReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multi-get
|
||||||
|
multiGetReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/mget",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"keys": keys,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := multiGetReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("mget failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mgetResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &mgetResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
results := mgetResp["results"].([]interface{})
|
||||||
|
if len(results) != 3 {
|
||||||
|
t.Fatalf("expected 3 results, got %d", len(results))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_MissingDMap(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": "",
|
||||||
|
"key": "any-key",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := getReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusBadRequest {
|
||||||
|
t.Fatalf("expected status 400 for missing dmap, got %d", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCache_MissingKey(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dmap := e2e.GenerateDMapName()
|
||||||
|
|
||||||
|
getReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/cache/get",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"dmap": dmap,
|
||||||
|
"key": "non-existent-key",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := getReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusNotFound {
|
||||||
|
t.Fatalf("expected status 404 for missing key, got %d", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
225
core/e2e/shared/network_http_test.go
Normal file
225
core/e2e/shared/network_http_test.go
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNetwork_Health(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/health",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("health check failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp["status"] != "ok" {
|
||||||
|
t.Fatalf("expected status 'ok', got %v", resp["status"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetwork_Status(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/network/status",
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("status check failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := resp["connected"]; !ok {
|
||||||
|
t.Fatalf("expected 'connected' field in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := resp["peer_count"]; !ok {
|
||||||
|
t.Fatalf("expected 'peer_count' field in response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetwork_Peers(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/network/peers",
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("peers check failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := resp["peers"]; !ok {
|
||||||
|
t.Fatalf("expected 'peers' field in response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetwork_ProxyAnonSuccess(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/proxy/anon",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"url": "https://httpbin.org/get",
|
||||||
|
"method": "GET",
|
||||||
|
"headers": map[string]string{"User-Agent": "Orama-E2E-Test/1.0"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("proxy anon request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d: %s", status, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp["status_code"] != float64(200) {
|
||||||
|
t.Fatalf("expected proxy status 200, got %v", resp["status_code"])
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := resp["body"]; !ok {
|
||||||
|
t.Fatalf("expected 'body' field in response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetwork_ProxyAnonBadURL(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/proxy/anon",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"url": "http://localhost:1/nonexistent",
|
||||||
|
"method": "GET",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
if err == nil && status == http.StatusOK {
|
||||||
|
t.Fatalf("expected error for bad URL, got status 200")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/proxy/anon",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"url": "https://httpbin.org/post",
|
||||||
|
"method": "POST",
|
||||||
|
"headers": map[string]string{"User-Agent": "Orama-E2E-Test/1.0"},
|
||||||
|
"body": "test_data",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("proxy anon POST failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d: %s", status, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp["status_code"] != float64(200) {
|
||||||
|
t.Fatalf("expected proxy status 200, got %v", resp["status_code"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetwork_Unauthorized(t *testing.T) {
|
||||||
|
// Test without API key
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create request without auth
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/network/status",
|
||||||
|
SkipAuth: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusUnauthorized && status != http.StatusForbidden {
|
||||||
|
t.Logf("warning: expected 401/403, got %d (auth may not be enforced on this endpoint)", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
463
core/e2e/shared/pubsub_client_test.go
Normal file
463
core/e2e/shared/pubsub_client_test.go
Normal file
@ -0,0 +1,463 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestPubSub_SubscribePublish tests basic pub/sub functionality via WebSocket
|
||||||
|
func TestPubSub_SubscribePublish(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
message := "test-message-from-publisher"
|
||||||
|
|
||||||
|
// Create subscriber first
|
||||||
|
subscriber, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber: %v", err)
|
||||||
|
}
|
||||||
|
defer subscriber.Close()
|
||||||
|
|
||||||
|
// Give subscriber time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish message
|
||||||
|
if err := publisher.Publish([]byte(message)); err != nil {
|
||||||
|
t.Fatalf("publish failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive message on subscriber
|
||||||
|
msg, err := subscriber.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("receive failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(msg) != message {
|
||||||
|
t.Fatalf("expected message %q, got %q", message, string(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_MultipleSubscribers tests that multiple subscribers receive the same message
|
||||||
|
func TestPubSub_MultipleSubscribers(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
message1 := "message-1"
|
||||||
|
message2 := "message-2"
|
||||||
|
|
||||||
|
// Create two subscribers
|
||||||
|
sub1, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber1: %v", err)
|
||||||
|
}
|
||||||
|
defer sub1.Close()
|
||||||
|
|
||||||
|
sub2, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber2: %v", err)
|
||||||
|
}
|
||||||
|
defer sub2.Close()
|
||||||
|
|
||||||
|
// Give subscribers time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish first message
|
||||||
|
if err := publisher.Publish([]byte(message1)); err != nil {
|
||||||
|
t.Fatalf("publish1 failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both subscribers should receive first message
|
||||||
|
msg1a, err := sub1.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("sub1 receive1 failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(msg1a) != message1 {
|
||||||
|
t.Fatalf("sub1: expected %q, got %q", message1, string(msg1a))
|
||||||
|
}
|
||||||
|
|
||||||
|
msg1b, err := sub2.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("sub2 receive1 failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(msg1b) != message1 {
|
||||||
|
t.Fatalf("sub2: expected %q, got %q", message1, string(msg1b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish second message
|
||||||
|
if err := publisher.Publish([]byte(message2)); err != nil {
|
||||||
|
t.Fatalf("publish2 failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both subscribers should receive second message
|
||||||
|
msg2a, err := sub1.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("sub1 receive2 failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(msg2a) != message2 {
|
||||||
|
t.Fatalf("sub1: expected %q, got %q", message2, string(msg2a))
|
||||||
|
}
|
||||||
|
|
||||||
|
msg2b, err := sub2.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("sub2 receive2 failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(msg2b) != message2 {
|
||||||
|
t.Fatalf("sub2: expected %q, got %q", message2, string(msg2b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_Deduplication tests that multiple identical messages are all received
|
||||||
|
func TestPubSub_Deduplication(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
message := "duplicate-test-message"
|
||||||
|
|
||||||
|
// Create subscriber
|
||||||
|
subscriber, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber: %v", err)
|
||||||
|
}
|
||||||
|
defer subscriber.Close()
|
||||||
|
|
||||||
|
// Give subscriber time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish the same message multiple times
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
if err := publisher.Publish([]byte(message)); err != nil {
|
||||||
|
t.Fatalf("publish %d failed: %v", i, err)
|
||||||
|
}
|
||||||
|
// Small delay between publishes
|
||||||
|
e2e.Delay(50)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive messages - should get all (no dedup filter)
|
||||||
|
receivedCount := 0
|
||||||
|
for receivedCount < 3 {
|
||||||
|
_, err := subscriber.ReceiveWithTimeout(5 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
receivedCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if receivedCount < 1 {
|
||||||
|
t.Fatalf("expected to receive at least 1 message, got %d", receivedCount)
|
||||||
|
}
|
||||||
|
t.Logf("received %d messages", receivedCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_ConcurrentPublish tests concurrent message publishing
|
||||||
|
func TestPubSub_ConcurrentPublish(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
numMessages := 10
|
||||||
|
|
||||||
|
// Create subscriber
|
||||||
|
subscriber, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber: %v", err)
|
||||||
|
}
|
||||||
|
defer subscriber.Close()
|
||||||
|
|
||||||
|
// Give subscriber time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish multiple messages concurrently
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := 0; i < numMessages; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(idx int) {
|
||||||
|
defer wg.Done()
|
||||||
|
msg := fmt.Sprintf("concurrent-msg-%d", idx)
|
||||||
|
if err := publisher.Publish([]byte(msg)); err != nil {
|
||||||
|
t.Logf("publish %d failed: %v", idx, err)
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Receive messages
|
||||||
|
receivedCount := 0
|
||||||
|
for receivedCount < numMessages {
|
||||||
|
_, err := subscriber.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
receivedCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if receivedCount < numMessages {
|
||||||
|
t.Logf("expected %d messages, got %d (some may have been dropped)", numMessages, receivedCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_TopicIsolation tests that messages are isolated to their topics
|
||||||
|
func TestPubSub_TopicIsolation(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic1 := e2e.GenerateTopic()
|
||||||
|
topic2 := e2e.GenerateTopic()
|
||||||
|
msg1 := "message-on-topic1"
|
||||||
|
msg2 := "message-on-topic2"
|
||||||
|
|
||||||
|
// Create subscriber for topic1
|
||||||
|
sub1, err := e2e.NewWSPubSubClient(t, topic1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber1: %v", err)
|
||||||
|
}
|
||||||
|
defer sub1.Close()
|
||||||
|
|
||||||
|
// Create subscriber for topic2
|
||||||
|
sub2, err := e2e.NewWSPubSubClient(t, topic2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber2: %v", err)
|
||||||
|
}
|
||||||
|
defer sub2.Close()
|
||||||
|
|
||||||
|
// Give subscribers time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publishers
|
||||||
|
pub1, err := e2e.NewWSPubSubClient(t, topic1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher1: %v", err)
|
||||||
|
}
|
||||||
|
defer pub1.Close()
|
||||||
|
|
||||||
|
pub2, err := e2e.NewWSPubSubClient(t, topic2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher2: %v", err)
|
||||||
|
}
|
||||||
|
defer pub2.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish to topic2 first
|
||||||
|
if err := pub2.Publish([]byte(msg2)); err != nil {
|
||||||
|
t.Fatalf("publish2 failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to topic1
|
||||||
|
if err := pub1.Publish([]byte(msg1)); err != nil {
|
||||||
|
t.Fatalf("publish1 failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub1 should receive msg1 only
|
||||||
|
received1, err := sub1.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("sub1 receive failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(received1) != msg1 {
|
||||||
|
t.Fatalf("sub1: expected %q, got %q", msg1, string(received1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub2 should receive msg2 only
|
||||||
|
received2, err := sub2.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("sub2 receive failed: %v", err)
|
||||||
|
}
|
||||||
|
if string(received2) != msg2 {
|
||||||
|
t.Fatalf("sub2: expected %q, got %q", msg2, string(received2))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_EmptyMessage tests sending and receiving empty messages
|
||||||
|
func TestPubSub_EmptyMessage(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
|
||||||
|
// Create subscriber
|
||||||
|
subscriber, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber: %v", err)
|
||||||
|
}
|
||||||
|
defer subscriber.Close()
|
||||||
|
|
||||||
|
// Give subscriber time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish empty message
|
||||||
|
if err := publisher.Publish([]byte("")); err != nil {
|
||||||
|
t.Fatalf("publish empty failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive on subscriber - should get empty message
|
||||||
|
msg, err := subscriber.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("receive failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msg) != 0 {
|
||||||
|
t.Fatalf("expected empty message, got %q", string(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_LargeMessage tests sending and receiving large messages
|
||||||
|
func TestPubSub_LargeMessage(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
|
||||||
|
// Create a large message (100KB)
|
||||||
|
largeMessage := make([]byte, 100*1024)
|
||||||
|
for i := range largeMessage {
|
||||||
|
largeMessage[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create subscriber
|
||||||
|
subscriber, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber: %v", err)
|
||||||
|
}
|
||||||
|
defer subscriber.Close()
|
||||||
|
|
||||||
|
// Give subscriber time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish large message
|
||||||
|
if err := publisher.Publish(largeMessage); err != nil {
|
||||||
|
t.Fatalf("publish large message failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive on subscriber
|
||||||
|
msg, err := subscriber.ReceiveWithTimeout(30 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("receive failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msg) != len(largeMessage) {
|
||||||
|
t.Fatalf("expected message of length %d, got %d", len(largeMessage), len(msg))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify content
|
||||||
|
for i := range msg {
|
||||||
|
if msg[i] != largeMessage[i] {
|
||||||
|
t.Fatalf("message content mismatch at byte %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPubSub_RapidPublish tests rapid message publishing
|
||||||
|
func TestPubSub_RapidPublish(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
numMessages := 50
|
||||||
|
|
||||||
|
// Create subscriber
|
||||||
|
subscriber, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create subscriber: %v", err)
|
||||||
|
}
|
||||||
|
defer subscriber.Close()
|
||||||
|
|
||||||
|
// Give subscriber time to register
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Create publisher
|
||||||
|
publisher, err := e2e.NewWSPubSubClient(t, topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create publisher: %v", err)
|
||||||
|
}
|
||||||
|
defer publisher.Close()
|
||||||
|
|
||||||
|
// Give connections time to stabilize
|
||||||
|
e2e.Delay(200)
|
||||||
|
|
||||||
|
// Publish messages rapidly
|
||||||
|
for i := 0; i < numMessages; i++ {
|
||||||
|
msg := fmt.Sprintf("rapid-msg-%d", i)
|
||||||
|
if err := publisher.Publish([]byte(msg)); err != nil {
|
||||||
|
t.Fatalf("publish %d failed: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receive messages
|
||||||
|
receivedCount := 0
|
||||||
|
for receivedCount < numMessages {
|
||||||
|
_, err := subscriber.ReceiveWithTimeout(10 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
receivedCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow some message loss due to buffering
|
||||||
|
minExpected := numMessages * 80 / 100 // 80% minimum
|
||||||
|
if receivedCount < minExpected {
|
||||||
|
t.Fatalf("expected at least %d messages, got %d", minExpected, receivedCount)
|
||||||
|
}
|
||||||
|
t.Logf("received %d/%d messages (%.1f%%)", receivedCount, numMessages, float64(receivedCount)*100/float64(numMessages))
|
||||||
|
}
|
||||||
123
core/e2e/shared/pubsub_presence_test.go
Normal file
123
core/e2e/shared/pubsub_presence_test.go
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPubSub_Presence(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
topic := e2e.GenerateTopic()
|
||||||
|
memberID := "user123"
|
||||||
|
memberMeta := map[string]interface{}{"name": "Alice"}
|
||||||
|
|
||||||
|
// 1. Subscribe with presence
|
||||||
|
client1, err := e2e.NewWSPubSubPresenceClient(t, topic, memberID, memberMeta)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create presence client: %v", err)
|
||||||
|
}
|
||||||
|
defer client1.Close()
|
||||||
|
|
||||||
|
// Wait for join event
|
||||||
|
msg, err := client1.ReceiveWithTimeout(5 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("did not receive join event: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var event map[string]interface{}
|
||||||
|
if err := json.Unmarshal(msg, &event); err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal event: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if event["type"] != "presence.join" {
|
||||||
|
t.Fatalf("expected presence.join event, got %v", event["type"])
|
||||||
|
}
|
||||||
|
|
||||||
|
if event["member_id"] != memberID {
|
||||||
|
t.Fatalf("expected member_id %s, got %v", memberID, event["member_id"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Query presence endpoint
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("%s/v1/pubsub/presence?topic=%s", e2e.GetGatewayURL(), topic),
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("presence query failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp["count"] != float64(1) {
|
||||||
|
t.Fatalf("expected count 1, got %v", resp["count"])
|
||||||
|
}
|
||||||
|
|
||||||
|
members := resp["members"].([]interface{})
|
||||||
|
if len(members) != 1 {
|
||||||
|
t.Fatalf("expected 1 member, got %d", len(members))
|
||||||
|
}
|
||||||
|
|
||||||
|
member := members[0].(map[string]interface{})
|
||||||
|
if member["member_id"] != memberID {
|
||||||
|
t.Fatalf("expected member_id %s, got %v", memberID, member["member_id"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Subscribe second member
|
||||||
|
memberID2 := "user456"
|
||||||
|
client2, err := e2e.NewWSPubSubPresenceClient(t, topic, memberID2, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create second presence client: %v", err)
|
||||||
|
}
|
||||||
|
// We'll close client2 later to test leave event
|
||||||
|
|
||||||
|
// Client1 should receive join event for Client2
|
||||||
|
msg2, err := client1.ReceiveWithTimeout(5 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("client1 did not receive join event for client2: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(msg2, &event); err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal event: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if event["type"] != "presence.join" || event["member_id"] != memberID2 {
|
||||||
|
t.Fatalf("expected presence.join for %s, got %v for %v", memberID2, event["type"], event["member_id"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Disconnect client2 and verify leave event
|
||||||
|
client2.Close()
|
||||||
|
|
||||||
|
msg3, err := client1.ReceiveWithTimeout(5 * time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("client1 did not receive leave event for client2: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(msg3, &event); err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal event: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if event["type"] != "presence.leave" || event["member_id"] != memberID2 {
|
||||||
|
t.Fatalf("expected presence.leave for %s, got %v for %v", memberID2, event["type"], event["member_id"])
|
||||||
|
}
|
||||||
|
}
|
||||||
498
core/e2e/shared/rqlite_http_test.go
Normal file
498
core/e2e/shared/rqlite_http_test.go
Normal file
@ -0,0 +1,498 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRQLite_CreateTable(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
// Cleanup table after test
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
schema := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create table request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusCreated && status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 201 or 200, got %d: %s", status, string(body))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_InsertQuery(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
// Cleanup table after test
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
schema := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert rows
|
||||||
|
insertReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("INSERT INTO %s(name) VALUES ('alice')", table),
|
||||||
|
fmt.Sprintf("INSERT INTO %s(name) VALUES ('bob')", table),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = insertReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("insert failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query rows
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT name FROM %s ORDER BY id", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("query failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &queryResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryResp["count"].(float64) < 2 {
|
||||||
|
t.Fatalf("expected at least 2 rows, got %v", queryResp["count"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_DropTable(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
schema := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop table
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"table": table,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = dropReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("drop table request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify table doesn't exist via schema
|
||||||
|
schemaReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := schemaReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Logf("warning: failed to verify schema after drop: status %d, err %v", status, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var schemaResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &schemaResp); err != nil {
|
||||||
|
t.Logf("warning: failed to decode schema response: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tables, ok := schemaResp["tables"].([]interface{}); ok {
|
||||||
|
for _, tbl := range tables {
|
||||||
|
tblMap := tbl.(map[string]interface{})
|
||||||
|
if tblMap["name"] == table {
|
||||||
|
t.Fatalf("table %s still present after drop", table)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_Schema(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("schema request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := resp["tables"]; !ok {
|
||||||
|
t.Fatalf("expected 'tables' field in response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_MalformedSQL(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": "SELECT * FROM nonexistent_table WHERE invalid syntax",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := req.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should get an error response
|
||||||
|
if status == http.StatusOK {
|
||||||
|
t.Fatalf("expected error for malformed SQL, got status 200")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_LargeTransaction(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
table := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
// Cleanup table after test
|
||||||
|
defer func() {
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": table},
|
||||||
|
}
|
||||||
|
dropReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
schema := fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
|
||||||
|
table,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create table
|
||||||
|
createReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": schema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate large transaction (50 inserts)
|
||||||
|
var statements []string
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
statements = append(statements, fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, i))
|
||||||
|
}
|
||||||
|
|
||||||
|
txReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": statements,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = txReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("large transaction failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all rows were inserted
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("count query failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var countResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &countResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract count from result
|
||||||
|
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||||
|
row := rows[0].([]interface{})
|
||||||
|
if row[0].(float64) != 50 {
|
||||||
|
t.Fatalf("expected 50 rows, got %v", row[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_ForeignKeyMigration(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
orgsTable := e2e.GenerateTableName()
|
||||||
|
usersTable := e2e.GenerateTableName()
|
||||||
|
|
||||||
|
// Cleanup tables after test
|
||||||
|
defer func() {
|
||||||
|
dropUsersReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": usersTable},
|
||||||
|
}
|
||||||
|
dropUsersReq.Do(context.Background())
|
||||||
|
|
||||||
|
dropOrgsReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{"table": orgsTable},
|
||||||
|
}
|
||||||
|
dropOrgsReq.Do(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create base tables
|
||||||
|
createOrgsReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)",
|
||||||
|
orgsTable,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := createOrgsReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create orgs table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
createUsersReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"schema": fmt.Sprintf(
|
||||||
|
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)",
|
||||||
|
usersTable,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = createUsersReq.Do(ctx)
|
||||||
|
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||||
|
t.Fatalf("create users table failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed data
|
||||||
|
seedReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf("INSERT INTO %s(id,name) VALUES (1,'org')", orgsTable),
|
||||||
|
fmt.Sprintf("INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')", usersTable),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = seedReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("seed transaction failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migrate: change age type and add FK
|
||||||
|
migrationReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"statements": []string{
|
||||||
|
fmt.Sprintf(
|
||||||
|
"CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)",
|
||||||
|
usersTable, orgsTable,
|
||||||
|
),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s",
|
||||||
|
usersTable, usersTable,
|
||||||
|
),
|
||||||
|
fmt.Sprintf("DROP TABLE %s", usersTable),
|
||||||
|
fmt.Sprintf("ALTER TABLE %s_new RENAME TO %s", usersTable, usersTable),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err = migrationReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("migration transaction failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify data is intact
|
||||||
|
queryReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"sql": fmt.Sprintf("SELECT name, org_id, age FROM %s", usersTable),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, status, err := queryReq.Do(ctx)
|
||||||
|
if err != nil || status != http.StatusOK {
|
||||||
|
t.Fatalf("query after migration failed: status %d, err %v", status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryResp map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &queryResp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryResp["count"].(float64) != 1 {
|
||||||
|
t.Fatalf("expected 1 row after migration, got %v", queryResp["count"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRQLite_DropNonexistentTable(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
dropReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"table": "nonexistent_table_xyz_" + fmt.Sprintf("%d", time.Now().UnixNano()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := dropReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("warning: drop nonexistent table request failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should get an error (400 or 404)
|
||||||
|
if status == http.StatusOK {
|
||||||
|
t.Logf("warning: expected error for dropping nonexistent table, got status 200")
|
||||||
|
}
|
||||||
|
}
|
||||||
130
core/e2e/shared/serverless_test.go
Normal file
130
core/e2e/shared/serverless_test.go
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestServerless_DeployAndInvoke(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
wasmPath := "../examples/functions/bin/hello.wasm"
|
||||||
|
if _, err := os.Stat(wasmPath); os.IsNotExist(err) {
|
||||||
|
t.Skip("hello.wasm not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
wasmBytes, err := os.ReadFile(wasmPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read hello.wasm: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
funcName := "e2e-hello"
|
||||||
|
// Use namespace from environment or default to test namespace
|
||||||
|
namespace := os.Getenv("ORAMA_NAMESPACE")
|
||||||
|
if namespace == "" {
|
||||||
|
namespace = "default-test-ns" // Match the namespace from LoadTestEnv()
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Deploy function
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
// Add metadata
|
||||||
|
_ = writer.WriteField("name", funcName)
|
||||||
|
_ = writer.WriteField("namespace", namespace)
|
||||||
|
_ = writer.WriteField("is_public", "true") // Make function public for E2E test
|
||||||
|
|
||||||
|
// Add WASM file
|
||||||
|
part, err := writer.CreateFormFile("wasm", funcName+".wasm")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
part.Write(wasmBytes)
|
||||||
|
writer.Close()
|
||||||
|
|
||||||
|
deployReq, _ := http.NewRequestWithContext(ctx, "POST", e2e.GetGatewayURL()+"/v1/functions", &buf)
|
||||||
|
deployReq.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
deployReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(1 * time.Minute)
|
||||||
|
resp, err := client.Do(deployReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("deploy request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("deploy failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Invoke function
|
||||||
|
invokePayload := []byte(`{"name": "E2E Tester"}`)
|
||||||
|
invokeReq, _ := http.NewRequestWithContext(ctx, "POST", e2e.GetGatewayURL()+"/v1/functions/"+funcName+"/invoke?namespace="+namespace, bytes.NewReader(invokePayload))
|
||||||
|
invokeReq.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
invokeReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = client.Do(invokeReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invoke request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("invoke failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
output, _ := io.ReadAll(resp.Body)
|
||||||
|
expected := "Hello, E2E Tester!"
|
||||||
|
if !bytes.Contains(output, []byte(expected)) {
|
||||||
|
t.Errorf("output %q does not contain %q", string(output), expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. List functions
|
||||||
|
listReq, _ := http.NewRequestWithContext(ctx, "GET", e2e.GetGatewayURL()+"/v1/functions?namespace="+namespace, nil)
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
listReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
resp, err = client.Do(listReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Errorf("list failed with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Delete function
|
||||||
|
deleteReq, _ := http.NewRequestWithContext(ctx, "DELETE", e2e.GetGatewayURL()+"/v1/functions/"+funcName+"?namespace="+namespace, nil)
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
deleteReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
resp, err = client.Do(deleteReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("delete request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Errorf("delete failed with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
551
core/e2e/shared/storage_http_test.go
Normal file
551
core/e2e/shared/storage_http_test.go
Normal file
@ -0,0 +1,551 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
// uploadFile is a helper to upload a file to storage
|
||||||
|
func uploadFile(t *testing.T, ctx context.Context, content []byte, filename string) string {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", filename)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
// Add auth headers
|
||||||
|
if jwt := e2e.GetJWT(); jwt != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+jwt)
|
||||||
|
} else if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read upload response: %v", err)
|
||||||
|
}
|
||||||
|
var result map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body, &result); err != nil {
|
||||||
|
t.Fatalf("failed to decode upload response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result["cid"].(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_UploadText(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
content := []byte("Hello, IPFS!")
|
||||||
|
filename := "test.txt"
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", filename)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if err := e2e.DecodeJSON(body, &result); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if result["cid"] == nil {
|
||||||
|
t.Fatalf("expected cid in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
if result["name"] != filename {
|
||||||
|
t.Fatalf("expected name %q, got %v", filename, result["name"])
|
||||||
|
}
|
||||||
|
|
||||||
|
if result["size"] == nil || result["size"].(float64) <= 0 {
|
||||||
|
t.Fatalf("expected positive size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_UploadBinary(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// PNG header
|
||||||
|
content := []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a}
|
||||||
|
filename := "test.png"
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", filename)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if err := e2e.DecodeJSON(body, &result); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if result["cid"] == nil {
|
||||||
|
t.Fatalf("expected cid in response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_UploadLarge(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Create 1MB file
|
||||||
|
content := bytes.Repeat([]byte("x"), 1024*1024)
|
||||||
|
filename := "large.bin"
|
||||||
|
|
||||||
|
// Create multipart form
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", filename)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]interface{}
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if err := e2e.DecodeJSON(body, &result); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if result["size"] != float64(1024*1024) {
|
||||||
|
t.Fatalf("expected size %d, got %v", 1024*1024, result["size"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_PinUnpin(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
content := []byte("test content for pinning")
|
||||||
|
|
||||||
|
// Upload file first
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", "pin-test.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create upload request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var uploadResult map[string]interface{}
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if err := e2e.DecodeJSON(body, &uploadResult); err != nil {
|
||||||
|
t.Fatalf("failed to decode upload response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||||
|
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
cid, ok := uploadResult["cid"].(string)
|
||||||
|
if !ok || cid == "" {
|
||||||
|
t.Fatalf("no CID in upload response: %v", uploadResult)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pin the file
|
||||||
|
pinReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/storage/pin",
|
||||||
|
Body: map[string]interface{}{
|
||||||
|
"cid": cid,
|
||||||
|
"name": "pinned-file",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
body2, status, err := pinReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("pin failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d: %s", status, string(body2))
|
||||||
|
}
|
||||||
|
|
||||||
|
var pinResult map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(body2, &pinResult); err != nil {
|
||||||
|
t.Fatalf("failed to decode pin response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pinResult["cid"] != cid {
|
||||||
|
t.Fatalf("expected cid %s, got %v", cid, pinResult["cid"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpin the file
|
||||||
|
unpinReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/storage/unpin/" + cid,
|
||||||
|
}
|
||||||
|
|
||||||
|
body3, status, err := unpinReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unpin failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d: %s", status, string(body3))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_Status(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
content := []byte("test content for status")
|
||||||
|
|
||||||
|
// Upload file first
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", "status-test.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create upload request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var uploadResult map[string]interface{}
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if err := e2e.DecodeJSON(body, &uploadResult); err != nil {
|
||||||
|
t.Fatalf("failed to decode upload response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cid := uploadResult["cid"].(string)
|
||||||
|
|
||||||
|
// Get status
|
||||||
|
statusReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/storage/status/" + cid,
|
||||||
|
}
|
||||||
|
|
||||||
|
statusBody, status, err := statusReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("status request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var statusResult map[string]interface{}
|
||||||
|
if err := e2e.DecodeJSON(statusBody, &statusResult); err != nil {
|
||||||
|
t.Fatalf("failed to decode status response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if statusResult["cid"] != cid {
|
||||||
|
t.Fatalf("expected cid %s, got %v", cid, statusResult["cid"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_InvalidCID(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
statusReq := &e2e.HTTPRequest{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: e2e.GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, status, err := statusReq.Do(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("status request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != http.StatusNotFound {
|
||||||
|
t.Logf("warning: expected status 404 for invalid CID, got %d", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage_GetByteRange(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
content := []byte("0123456789abcdefghijklmnopqrstuvwxyz")
|
||||||
|
|
||||||
|
// Upload file first
|
||||||
|
var buf bytes.Buffer
|
||||||
|
writer := multipart.NewWriter(&buf)
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("file", "range-test.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create form file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||||
|
t.Fatalf("failed to copy data: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
t.Fatalf("failed to close writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create upload request
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("upload failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var uploadResult map[string]interface{}
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
if err := e2e.DecodeJSON(body, &uploadResult); err != nil {
|
||||||
|
t.Fatalf("failed to decode upload response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cid := uploadResult["cid"].(string)
|
||||||
|
|
||||||
|
// Get full content
|
||||||
|
getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/storage/get/"+cid, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create get request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if apiKey := e2e.GetAPIKey(); apiKey != "" {
|
||||||
|
getReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err = client.Do(getReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("get request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatalf("expected status 200, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
retrievedContent, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(retrievedContent, content) {
|
||||||
|
t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent))
|
||||||
|
}
|
||||||
|
}
|
||||||
241
core/e2e/shared/webrtc_test.go
Normal file
241
core/e2e/shared/webrtc_test.go
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
//go:build e2e
|
||||||
|
|
||||||
|
package shared_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e2e "github.com/DeBrosOfficial/network/e2e"
|
||||||
|
)
|
||||||
|
|
||||||
|
// turnCredentialsResponse is the expected response from the TURN credentials endpoint.
|
||||||
|
type turnCredentialsResponse struct {
|
||||||
|
URLs []string `json:"urls"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Credential string `json:"credential"`
|
||||||
|
TTL int `json:"ttl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWebRTC_TURNCredentials_RequiresAuth verifies that the TURN credentials endpoint
|
||||||
|
// rejects unauthenticated requests.
|
||||||
|
func TestWebRTC_TURNCredentials_RequiresAuth(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/turn/credentials", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("expected 401 Unauthorized, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWebRTC_TURNCredentials_ValidResponse verifies that authenticated requests to the
|
||||||
|
// TURN credentials endpoint return a valid credential structure.
|
||||||
|
func TestWebRTC_TURNCredentials_ValidResponse(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
apiKey := e2e.GetAPIKey()
|
||||||
|
if apiKey == "" {
|
||||||
|
t.Skip("no API key configured")
|
||||||
|
}
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/turn/credentials", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatalf("expected 200 OK, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var creds turnCredentialsResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&creds); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(creds.URLs) == 0 {
|
||||||
|
t.Fatal("expected at least one TURN URL")
|
||||||
|
}
|
||||||
|
if creds.Username == "" {
|
||||||
|
t.Fatal("expected non-empty username")
|
||||||
|
}
|
||||||
|
if creds.Credential == "" {
|
||||||
|
t.Fatal("expected non-empty credential")
|
||||||
|
}
|
||||||
|
if creds.TTL <= 0 {
|
||||||
|
t.Fatalf("expected positive TTL, got %d", creds.TTL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWebRTC_Rooms_RequiresAuth verifies that the rooms endpoint rejects unauthenticated requests.
|
||||||
|
func TestWebRTC_Rooms_RequiresAuth(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("expected 401 Unauthorized, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWebRTC_Signal_RequiresAuth verifies that the signaling WebSocket rejects
|
||||||
|
// unauthenticated connections.
|
||||||
|
func TestWebRTC_Signal_RequiresAuth(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
// Use regular HTTP GET to the signal endpoint — without auth it should return 401
|
||||||
|
// before WebSocket upgrade
|
||||||
|
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/signal?room=test-room", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusUnauthorized {
|
||||||
|
t.Fatalf("expected 401, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWebRTC_Rooms_CreateAndList verifies room creation and listing with proper auth.
|
||||||
|
func TestWebRTC_Rooms_CreateAndList(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
apiKey := e2e.GetAPIKey()
|
||||||
|
if apiKey == "" {
|
||||||
|
t.Skip("no API key configured")
|
||||||
|
}
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
roomID := e2e.GenerateUniqueID("e2e-webrtc-room")
|
||||||
|
|
||||||
|
// Create room
|
||||||
|
createBody, _ := json.Marshal(map[string]string{"room_id": roomID})
|
||||||
|
req, err := http.NewRequest("POST", gatewayURL+"/v1/webrtc/rooms", bytes.NewReader(createBody))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create room failed: %v", err)
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||||
|
t.Fatalf("expected 200/201, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List rooms
|
||||||
|
req, err = http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
|
||||||
|
resp, err = client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list rooms failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
t.Fatalf("expected 200, got %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up: delete room
|
||||||
|
req, err = http.NewRequest("DELETE", gatewayURL+"/v1/webrtc/rooms?room_id="+roomID, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
|
||||||
|
resp2, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("delete room failed: %v", err)
|
||||||
|
}
|
||||||
|
resp2.Body.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWebRTC_PermissionsPolicy verifies the Permissions-Policy header allows camera and microphone.
|
||||||
|
func TestWebRTC_PermissionsPolicy(t *testing.T) {
|
||||||
|
e2e.SkipIfMissingGateway(t)
|
||||||
|
|
||||||
|
gatewayURL := e2e.GetGatewayURL()
|
||||||
|
apiKey := e2e.GetAPIKey()
|
||||||
|
if apiKey == "" {
|
||||||
|
t.Skip("no API key configured")
|
||||||
|
}
|
||||||
|
client := e2e.NewHTTPClient(10 * time.Second)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", gatewayURL+"/v1/webrtc/rooms", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("request failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
pp := resp.Header.Get("Permissions-Policy")
|
||||||
|
if pp == "" {
|
||||||
|
t.Skip("Permissions-Policy header not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(pp, "camera=(self)") {
|
||||||
|
t.Errorf("Permissions-Policy missing camera=(self), got: %s", pp)
|
||||||
|
}
|
||||||
|
if !strings.Contains(pp, "microphone=(self)") {
|
||||||
|
t.Errorf("Permissions-Policy missing microphone=(self), got: %s", pp)
|
||||||
|
}
|
||||||
|
}
|
||||||
182
core/go.mod
Normal file
182
core/go.mod
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
module github.com/DeBrosOfficial/network
|
||||||
|
|
||||||
|
go 1.24.6
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/charmbracelet/bubbles v0.20.0
|
||||||
|
github.com/charmbracelet/bubbletea v1.2.4
|
||||||
|
github.com/charmbracelet/lipgloss v1.0.0
|
||||||
|
github.com/coredns/caddy v1.1.4
|
||||||
|
github.com/coredns/coredns v1.12.1
|
||||||
|
github.com/ethereum/go-ethereum v1.13.14
|
||||||
|
github.com/go-chi/chi/v5 v5.2.3
|
||||||
|
github.com/google/uuid v1.6.0
|
||||||
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||||
|
github.com/libp2p/go-libp2p v0.41.1
|
||||||
|
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
||||||
|
github.com/mackerelio/go-osstat v0.2.6
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.32
|
||||||
|
github.com/mdp/qrterminal/v3 v3.2.1
|
||||||
|
github.com/miekg/dns v1.1.70
|
||||||
|
github.com/multiformats/go-multiaddr v0.16.0
|
||||||
|
github.com/olric-data/olric v0.7.0
|
||||||
|
github.com/pion/interceptor v0.1.40
|
||||||
|
github.com/pion/rtcp v1.2.15
|
||||||
|
github.com/pion/turn/v4 v4.0.2
|
||||||
|
github.com/pion/webrtc/v4 v4.1.2
|
||||||
|
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
|
||||||
|
github.com/spf13/cobra v1.10.2
|
||||||
|
github.com/stretchr/testify v1.11.1
|
||||||
|
github.com/tetratelabs/wazero v1.11.0
|
||||||
|
go.uber.org/zap v1.27.0
|
||||||
|
golang.org/x/crypto v0.47.0
|
||||||
|
golang.org/x/net v0.49.0
|
||||||
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/RoaringBitmap/roaring v1.9.4 // indirect
|
||||||
|
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||||
|
github.com/armon/go-metrics v0.4.1 // indirect
|
||||||
|
github.com/atotto/clipboard v0.1.4 // indirect
|
||||||
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||||
|
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
||||||
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||||
|
github.com/buraksezer/consistent v0.10.0 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
|
github.com/charmbracelet/x/ansi v0.4.5 // indirect
|
||||||
|
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||||
|
github.com/containerd/cgroups v1.1.0 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/elastic/gosigar v0.14.3 // indirect
|
||||||
|
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||||
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||||
|
github.com/flynn/noise v1.1.0 // indirect
|
||||||
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/google/btree v1.1.3 // indirect
|
||||||
|
github.com/google/gopacket v1.1.19 // indirect
|
||||||
|
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||||
|
github.com/hashicorp/go-metrics v0.5.4 // indirect
|
||||||
|
github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
|
||||||
|
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||||
|
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
|
github.com/hashicorp/logutils v1.0.0 // indirect
|
||||||
|
github.com/hashicorp/memberlist v0.5.3 // indirect
|
||||||
|
github.com/holiman/uint256 v1.2.4 // indirect
|
||||||
|
github.com/huin/goupnp v1.3.0 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/ipfs/go-cid v0.5.0 // indirect
|
||||||
|
github.com/ipfs/go-log/v2 v2.6.0 // indirect
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.18.0 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||||
|
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||||
|
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||||
|
github.com/libp2p/go-netroute v0.3.0 // indirect
|
||||||
|
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||||
|
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
|
||||||
|
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
|
github.com/mschoch/smat v0.2.0 // indirect
|
||||||
|
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||||
|
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||||
|
github.com/muesli/termenv v0.15.2 // indirect
|
||||||
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||||
|
github.com/multiformats/go-multicodec v0.9.1 // indirect
|
||||||
|
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||||
|
github.com/multiformats/go-multistream v0.6.1 // indirect
|
||||||
|
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
|
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
|
||||||
|
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||||
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||||
|
github.com/pion/datachannel v1.5.10 // indirect
|
||||||
|
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||||
|
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||||
|
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||||
|
github.com/pion/logging v0.2.3 // indirect
|
||||||
|
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||||
|
github.com/pion/randutil v0.1.0 // indirect
|
||||||
|
github.com/pion/rtp v1.8.19 // indirect
|
||||||
|
github.com/pion/sctp v1.8.39 // indirect
|
||||||
|
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||||
|
github.com/pion/srtp/v3 v3.0.6 // indirect
|
||||||
|
github.com/pion/stun v0.6.1 // indirect
|
||||||
|
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||||
|
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||||
|
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.23.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
|
github.com/prometheus/common v0.67.5 // indirect
|
||||||
|
github.com/prometheus/procfs v0.16.1 // indirect
|
||||||
|
github.com/quic-go/qpack v0.5.1 // indirect
|
||||||
|
github.com/quic-go/quic-go v0.50.1 // indirect
|
||||||
|
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
|
||||||
|
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||||
|
github.com/redis/go-redis/v9 v9.8.0 // indirect
|
||||||
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.9 // indirect
|
||||||
|
github.com/tidwall/btree v1.7.0 // indirect
|
||||||
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
|
github.com/tidwall/redcon v1.6.2 // indirect
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||||
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
|
github.com/wlynxg/anet v0.0.5 // indirect
|
||||||
|
go.uber.org/dig v1.19.0 // indirect
|
||||||
|
go.uber.org/fx v1.24.0 // indirect
|
||||||
|
go.uber.org/mock v0.6.0 // indirect
|
||||||
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
|
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
||||||
|
golang.org/x/mod v0.31.0 // indirect
|
||||||
|
golang.org/x/sync v0.19.0 // indirect
|
||||||
|
golang.org/x/sys v0.40.0 // indirect
|
||||||
|
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect
|
||||||
|
golang.org/x/term v0.39.0 // indirect
|
||||||
|
golang.org/x/text v0.33.0 // indirect
|
||||||
|
golang.org/x/time v0.14.0 // indirect
|
||||||
|
golang.org/x/tools v0.40.0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
|
||||||
|
google.golang.org/grpc v1.78.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.36.11 // indirect
|
||||||
|
lukechampine.com/blake3 v1.4.1 // indirect
|
||||||
|
rsc.io/qr v0.2.0 // indirect
|
||||||
|
)
|
||||||
@ -8,41 +8,88 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1
|
|||||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
|
github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=
|
||||||
|
github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||||
|
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
|
||||||
|
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
|
||||||
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
|
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||||
|
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||||
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||||
|
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
|
github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=
|
||||||
|
github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
|
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||||
|
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||||
|
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||||
|
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||||
|
github.com/buraksezer/consistent v0.10.0 h1:hqBgz1PvNLC5rkWcEBVAL9dFMBWz6I0VgUCW25rrZlU=
|
||||||
|
github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
|
||||||
|
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
|
||||||
|
github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE=
|
||||||
|
github.com/charmbracelet/bubbletea v1.2.4/go.mod h1:Qr6fVQw+wX7JkWWkVyXYk/ZUQ92a6XNekLXa3rR18MM=
|
||||||
|
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
|
||||||
|
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
|
||||||
|
github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSexTdRM=
|
||||||
|
github.com/charmbracelet/x/ansi v0.4.5/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw=
|
||||||
|
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||||
|
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
|
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||||
|
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||||
|
github.com/coredns/caddy v1.1.4 h1:+Lls5xASB0QsA2jpCroCOwpPlb5GjIGlxdjXxdX0XVo=
|
||||||
|
github.com/coredns/caddy v1.1.4/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
|
||||||
|
github.com/coredns/coredns v1.12.1 h1:haptbGscSbdWU46xrjdPj1vp3wvH1Z2FgCSQKEdgN5s=
|
||||||
|
github.com/coredns/coredns v1.12.1/go.mod h1:V26ngiKdNvAiEre5PTAvklrvTjnNjl6lakq1nbE/NbU=
|
||||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
@ -50,8 +97,11 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
|
|||||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||||
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
|
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||||
|
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||||
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
|
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
|
||||||
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
|
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
|
||||||
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
@ -60,9 +110,20 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
|
|||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
|
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
|
||||||
|
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||||
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
@ -79,34 +140,83 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||||
|
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
|
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
|
||||||
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||||
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||||
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||||
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
|
||||||
|
github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
|
||||||
|
github.com/hashicorp/go-msgpack/v2 v2.1.3 h1:cB1w4Zrk0O3jQBTcFMKqYQWRFfsSQ/TYKNyUUVyCP2c=
|
||||||
|
github.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+MCUn39TxymNR5ZU=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
|
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||||
|
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
|
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
|
||||||
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
|
github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=
|
||||||
|
github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=
|
||||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||||
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
||||||
@ -116,8 +226,14 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+
|
|||||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||||
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||||
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
@ -125,8 +241,11 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
|||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||||
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||||
|
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
@ -150,12 +269,14 @@ github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUI
|
|||||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||||
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
|
github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=
|
||||||
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
|
github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
|
||||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||||
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
|
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
|
||||||
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||||
|
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||||
|
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
|
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
|
||||||
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
|
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
|
||||||
@ -164,10 +285,20 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8
|
|||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||||
|
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||||
|
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||||
|
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/mdp/qrterminal/v3 v3.2.1 h1:6+yQjiiOsSuXT5n9/m60E54vdgFsw0zhADHhHLrFet4=
|
||||||
|
github.com/mdp/qrterminal/v3 v3.2.1/go.mod h1:jOTmXvnBsMy5xqLniO0R++Jmjs2sTm9dFSuQ5kpz/SU=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
|
||||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
|
||||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||||
@ -178,37 +309,51 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv
|
|||||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
|
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||||
|
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||||
|
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||||
|
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||||
|
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||||
|
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||||
|
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
|
||||||
|
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
|
||||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||||
github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
|
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
||||||
github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
||||||
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
|
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
|
||||||
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
||||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||||
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
|
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||||
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||||
github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=
|
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
|
||||||
github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg=
|
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
|
||||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||||
|
github.com/olric-data/olric v0.7.0 h1:EKN2T6ZTtdu8Un0jV0KOWVxWm9odptJpefmDivfZdjE=
|
||||||
|
github.com/olric-data/olric v0.7.0/go.mod h1:+ZnPpgc8JkNkza8rETCKGn0P/QPF6HhZY0EbCKAOslo=
|
||||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
||||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||||
@ -216,7 +361,11 @@ github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlR
|
|||||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||||
|
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
|
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||||
|
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||||
@ -224,12 +373,12 @@ github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oL
|
|||||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||||
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||||
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||||
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
|
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
|
||||||
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
|
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
|
||||||
github.com/pion/ice/v4 v4.0.8 h1:ajNx0idNG+S+v9Phu4LSn2cs8JEfTsA1/tEjkkAVpFY=
|
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||||
github.com/pion/ice/v4 v4.0.8/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||||
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
|
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||||
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
|
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||||
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
||||||
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
||||||
@ -239,14 +388,14 @@ github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
|||||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||||
github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=
|
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
|
||||||
github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
|
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||||
github.com/pion/sctp v1.8.37 h1:ZDmGPtRPX9mKCiVXtMbTWybFw3z/hVKAZgU81wcOrqs=
|
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||||
github.com/pion/sctp v1.8.37/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||||
github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=
|
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
|
||||||
github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||||
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
|
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
|
||||||
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
|
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
|
||||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||||
@ -257,25 +406,43 @@ github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQp
|
|||||||
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
||||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||||
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
|
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||||
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
|
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||||
github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q=
|
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
|
||||||
github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck=
|
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||||
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
|
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
||||||
|
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||||
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
|
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||||
|
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||||
@ -286,12 +453,20 @@ github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6
|
|||||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
|
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
|
||||||
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||||
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
|
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||||
|
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
|
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
|
||||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg=
|
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||||
@ -316,26 +491,50 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.
|
|||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||||
|
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||||
|
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||||
|
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
|
github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA=
|
||||||
|
github.com/tetratelabs/wazero v1.11.0/go.mod h1:eV28rsN8Q+xwjogd7f4/Pp4xFxO7uOGbLcD/LzB1wiU=
|
||||||
|
github.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4=
|
||||||
|
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
|
||||||
|
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
|
||||||
|
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||||
|
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||||
|
github.com/tidwall/redcon v1.6.2 h1:5qfvrrybgtO85jnhSravmkZyC0D+7WstbfCs3MmPhow=
|
||||||
|
github.com/tidwall/redcon v1.6.2/go.mod h1:p5Wbsgeyi2VSTBWOcA5vRXrOb9arFTcU2+ZzFjqV75Y=
|
||||||
|
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||||
|
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||||
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||||
@ -343,20 +542,36 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||||
go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
|
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||||
go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||||
go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
|
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||||
go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
|
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||||
|
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||||
|
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||||
|
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||||
|
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||||
|
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||||
|
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||||
|
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||||
|
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||||
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
@ -369,8 +584,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||||
@ -383,19 +598,22 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
|
||||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
@ -405,8 +623,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
|||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
@ -419,24 +637,36 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@ -445,8 +675,10 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
|
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA=
|
||||||
|
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
@ -454,20 +686,23 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
|||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||||
|
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||||
|
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@ -480,12 +715,14 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
|||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
|
||||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
|
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||||
@ -498,19 +735,37 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA
|
|||||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||||
|
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
@ -520,5 +775,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
|||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||||
|
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||||
|
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||||
@ -1,4 +1,4 @@
|
|||||||
-- DeBros Gateway - Initial database schema (SQLite/RQLite dialect)
|
-- Orama Gateway - Initial database schema (SQLite/RQLite dialect)
|
||||||
-- This file scaffolds core tables used by the HTTP gateway for auth, observability, and namespacing.
|
-- This file scaffolds core tables used by the HTTP gateway for auth, observability, and namespacing.
|
||||||
-- Apply via your migration tooling or manual execution in RQLite.
|
-- Apply via your migration tooling or manual execution in RQLite.
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
-- DeBros Gateway - Core schema (Phase 2)
|
-- Orama Gateway - Core schema (Phase 2)
|
||||||
-- Adds apps, nonces, subscriptions, refresh_tokens, audit_events, namespace_ownership
|
-- Adds apps, nonces, subscriptions, refresh_tokens, audit_events, namespace_ownership
|
||||||
-- SQLite/RQLite dialect
|
-- SQLite/RQLite dialect
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
-- DeBros Gateway - Wallet to API Key linkage (Phase 3)
|
-- Orama Gateway - Wallet to API Key linkage (Phase 3)
|
||||||
-- Ensures one API key per (namespace, wallet) and enables lookup
|
-- Ensures one API key per (namespace, wallet) and enables lookup
|
||||||
|
|
||||||
BEGIN;
|
BEGIN;
|
||||||
243
core/migrations/004_serverless_functions.sql
Normal file
243
core/migrations/004_serverless_functions.sql
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
-- Orama Network - Serverless Functions Engine (Phase 4)
|
||||||
|
-- WASM-based serverless function execution with triggers, jobs, and secrets
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- FUNCTIONS TABLE
|
||||||
|
-- Core function registry with versioning support
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS functions (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
namespace TEXT NOT NULL,
|
||||||
|
version INTEGER NOT NULL DEFAULT 1,
|
||||||
|
wasm_cid TEXT NOT NULL,
|
||||||
|
source_cid TEXT,
|
||||||
|
memory_limit_mb INTEGER NOT NULL DEFAULT 64,
|
||||||
|
timeout_seconds INTEGER NOT NULL DEFAULT 30,
|
||||||
|
is_public BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||||
|
retry_delay_seconds INTEGER NOT NULL DEFAULT 5,
|
||||||
|
dlq_topic TEXT,
|
||||||
|
status TEXT NOT NULL DEFAULT 'active',
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
created_by TEXT NOT NULL,
|
||||||
|
UNIQUE(namespace, name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_functions_namespace ON functions(namespace);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_functions_name ON functions(namespace, name);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_functions_status ON functions(status);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- FUNCTION ENVIRONMENT VARIABLES
|
||||||
|
-- Non-sensitive configuration per function
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_env_vars (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
value TEXT NOT NULL,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
UNIQUE(function_id, key),
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_env_vars_function ON function_env_vars(function_id);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- FUNCTION SECRETS
|
||||||
|
-- Encrypted secrets per namespace (shared across functions in namespace)
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_secrets (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
namespace TEXT NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
encrypted_value BLOB NOT NULL,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
UNIQUE(namespace, name)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_secrets_namespace ON function_secrets(namespace);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- CRON TRIGGERS
|
||||||
|
-- Scheduled function execution using cron expressions
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_cron_triggers (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
cron_expression TEXT NOT NULL,
|
||||||
|
next_run_at TIMESTAMP,
|
||||||
|
last_run_at TIMESTAMP,
|
||||||
|
last_status TEXT,
|
||||||
|
last_error TEXT,
|
||||||
|
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_cron_triggers_function ON function_cron_triggers(function_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_cron_triggers_next_run ON function_cron_triggers(next_run_at)
|
||||||
|
WHERE enabled = TRUE;
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- DATABASE TRIGGERS
|
||||||
|
-- Trigger functions on database changes (INSERT/UPDATE/DELETE)
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_db_triggers (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
table_name TEXT NOT NULL,
|
||||||
|
operation TEXT NOT NULL CHECK(operation IN ('INSERT', 'UPDATE', 'DELETE')),
|
||||||
|
condition TEXT,
|
||||||
|
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_db_triggers_function ON function_db_triggers(function_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_db_triggers_table ON function_db_triggers(table_name, operation)
|
||||||
|
WHERE enabled = TRUE;
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- PUBSUB TRIGGERS
|
||||||
|
-- Trigger functions on pubsub messages
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_pubsub_triggers (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
topic TEXT NOT NULL,
|
||||||
|
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_pubsub_triggers_function ON function_pubsub_triggers(function_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_pubsub_triggers_topic ON function_pubsub_triggers(topic)
|
||||||
|
WHERE enabled = TRUE;
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- ONE-TIME TIMERS
|
||||||
|
-- Schedule functions to run once at a specific time
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_timers (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
run_at TIMESTAMP NOT NULL,
|
||||||
|
payload TEXT,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending' CHECK(status IN ('pending', 'running', 'completed', 'failed')),
|
||||||
|
error TEXT,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
completed_at TIMESTAMP,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_timers_function ON function_timers(function_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_timers_pending ON function_timers(run_at)
|
||||||
|
WHERE status = 'pending';
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- BACKGROUND JOBS
|
||||||
|
-- Long-running async function execution
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_jobs (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
payload TEXT,
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending' CHECK(status IN ('pending', 'running', 'completed', 'failed', 'cancelled')),
|
||||||
|
progress INTEGER NOT NULL DEFAULT 0 CHECK(progress >= 0 AND progress <= 100),
|
||||||
|
result TEXT,
|
||||||
|
error TEXT,
|
||||||
|
started_at TIMESTAMP,
|
||||||
|
completed_at TIMESTAMP,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_jobs_function ON function_jobs(function_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_jobs_status ON function_jobs(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_jobs_pending ON function_jobs(created_at)
|
||||||
|
WHERE status = 'pending';
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- INVOCATION LOGS
|
||||||
|
-- Record of all function invocations for debugging and metrics
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_invocations (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
request_id TEXT NOT NULL,
|
||||||
|
trigger_type TEXT NOT NULL,
|
||||||
|
caller_wallet TEXT,
|
||||||
|
input_size INTEGER,
|
||||||
|
output_size INTEGER,
|
||||||
|
started_at TIMESTAMP NOT NULL,
|
||||||
|
completed_at TIMESTAMP,
|
||||||
|
duration_ms INTEGER,
|
||||||
|
status TEXT CHECK(status IN ('success', 'error', 'timeout')),
|
||||||
|
error_message TEXT,
|
||||||
|
memory_used_mb REAL,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_invocations_function ON function_invocations(function_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_invocations_request ON function_invocations(request_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_invocations_time ON function_invocations(started_at);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_invocations_status ON function_invocations(function_id, status);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- FUNCTION LOGS
|
||||||
|
-- Captured log output from function execution
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_logs (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
function_id TEXT NOT NULL,
|
||||||
|
invocation_id TEXT NOT NULL,
|
||||||
|
level TEXT NOT NULL CHECK(level IN ('info', 'warn', 'error', 'debug')),
|
||||||
|
message TEXT NOT NULL,
|
||||||
|
timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (function_id) REFERENCES functions(id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY (invocation_id) REFERENCES function_invocations(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_logs_invocation ON function_logs(invocation_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_logs_function ON function_logs(function_id, timestamp);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- DB CHANGE TRACKING
|
||||||
|
-- Track last processed row for database triggers (CDC-like)
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_db_change_tracking (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
trigger_id TEXT NOT NULL UNIQUE,
|
||||||
|
last_row_id INTEGER,
|
||||||
|
last_updated_at TIMESTAMP,
|
||||||
|
last_check_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (trigger_id) REFERENCES function_db_triggers(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- RATE LIMITING
|
||||||
|
-- Track request counts for rate limiting
|
||||||
|
-- =============================================================================
|
||||||
|
CREATE TABLE IF NOT EXISTS function_rate_limits (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
window_key TEXT NOT NULL,
|
||||||
|
count INTEGER NOT NULL DEFAULT 0,
|
||||||
|
window_start TIMESTAMP NOT NULL,
|
||||||
|
UNIQUE(window_key, window_start)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_function_rate_limits_window ON function_rate_limits(window_key, window_start);
|
||||||
|
|
||||||
|
-- =============================================================================
|
||||||
|
-- MIGRATION VERSION TRACKING
|
||||||
|
-- =============================================================================
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (4);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
77
core/migrations/005_dns_records.sql
Normal file
77
core/migrations/005_dns_records.sql
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
-- Migration 005: DNS Records for CoreDNS Integration
|
||||||
|
-- This migration creates tables for managing DNS records with RQLite backend for CoreDNS
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- DNS records table for dynamic DNS management
|
||||||
|
CREATE TABLE IF NOT EXISTS dns_records (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
fqdn TEXT NOT NULL UNIQUE, -- Fully qualified domain name (e.g., myapp.node-7prvNa.orama.network)
|
||||||
|
record_type TEXT NOT NULL DEFAULT 'A', -- DNS record type: A, AAAA, CNAME, TXT
|
||||||
|
value TEXT NOT NULL, -- IP address or target value
|
||||||
|
ttl INTEGER NOT NULL DEFAULT 300, -- Time to live in seconds
|
||||||
|
namespace TEXT NOT NULL, -- Namespace that owns this record
|
||||||
|
deployment_id TEXT, -- Optional: deployment that created this record
|
||||||
|
node_id TEXT, -- Optional: specific node ID for node-specific routing
|
||||||
|
is_active BOOLEAN NOT NULL DEFAULT TRUE,-- Enable/disable without deleting
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
created_by TEXT NOT NULL -- Wallet address or 'system' for auto-created records
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for fast DNS lookups
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn ON dns_records(fqdn);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_namespace ON dns_records(namespace);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_deployment ON dns_records(deployment_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_node_id ON dns_records(node_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_active ON dns_records(is_active);
|
||||||
|
|
||||||
|
-- DNS nodes registry for tracking active nodes
|
||||||
|
CREATE TABLE IF NOT EXISTS dns_nodes (
|
||||||
|
id TEXT PRIMARY KEY, -- Node ID (e.g., node-7prvNa)
|
||||||
|
ip_address TEXT NOT NULL, -- Public IP address
|
||||||
|
internal_ip TEXT, -- Private IP for cluster communication
|
||||||
|
region TEXT, -- Geographic region
|
||||||
|
status TEXT NOT NULL DEFAULT 'active', -- active, draining, offline
|
||||||
|
last_seen TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
capabilities TEXT, -- JSON: ["wasm", "ipfs", "cache"]
|
||||||
|
metadata TEXT, -- JSON: additional node info
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for node health monitoring
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_nodes_status ON dns_nodes(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_nodes_last_seen ON dns_nodes(last_seen);
|
||||||
|
|
||||||
|
-- Reserved domains table to prevent subdomain collisions
|
||||||
|
CREATE TABLE IF NOT EXISTS reserved_domains (
|
||||||
|
domain TEXT PRIMARY KEY,
|
||||||
|
reason TEXT NOT NULL,
|
||||||
|
reserved_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Seed reserved domains
|
||||||
|
INSERT INTO reserved_domains (domain, reason) VALUES
|
||||||
|
('api.orama.network', 'API gateway endpoint'),
|
||||||
|
('www.orama.network', 'Marketing website'),
|
||||||
|
('admin.orama.network', 'Admin panel'),
|
||||||
|
('ns1.orama.network', 'Nameserver 1'),
|
||||||
|
('ns2.orama.network', 'Nameserver 2'),
|
||||||
|
('ns3.orama.network', 'Nameserver 3'),
|
||||||
|
('ns4.orama.network', 'Nameserver 4'),
|
||||||
|
('mail.orama.network', 'Email service'),
|
||||||
|
('cdn.orama.network', 'Content delivery'),
|
||||||
|
('docs.orama.network', 'Documentation'),
|
||||||
|
('status.orama.network', 'Status page')
|
||||||
|
ON CONFLICT(domain) DO NOTHING;
|
||||||
|
|
||||||
|
-- Mark migration as applied
|
||||||
|
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||||
|
version INTEGER PRIMARY KEY,
|
||||||
|
applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (5);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
74
core/migrations/006_namespace_sqlite.sql
Normal file
74
core/migrations/006_namespace_sqlite.sql
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
-- Migration 006: Per-Namespace SQLite Databases
|
||||||
|
-- This migration creates infrastructure for isolated SQLite databases per namespace
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- Namespace SQLite databases registry
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_sqlite_databases (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
namespace TEXT NOT NULL, -- Namespace that owns this database
|
||||||
|
database_name TEXT NOT NULL, -- Database name (unique per namespace)
|
||||||
|
home_node_id TEXT NOT NULL, -- Node ID where database file resides
|
||||||
|
file_path TEXT NOT NULL, -- Absolute path on home node
|
||||||
|
size_bytes BIGINT DEFAULT 0, -- Current database size
|
||||||
|
backup_cid TEXT, -- Latest backup CID in IPFS
|
||||||
|
last_backup_at TIMESTAMP, -- Last backup timestamp
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
created_by TEXT NOT NULL, -- Wallet address that created the database
|
||||||
|
|
||||||
|
UNIQUE(namespace, database_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for database lookups
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_namespace ON namespace_sqlite_databases(namespace);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_home_node ON namespace_sqlite_databases(home_node_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_name ON namespace_sqlite_databases(namespace, database_name);
|
||||||
|
|
||||||
|
-- SQLite database backups history
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_sqlite_backups (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
database_id TEXT NOT NULL, -- References namespace_sqlite_databases.id
|
||||||
|
backup_cid TEXT NOT NULL, -- IPFS CID of backup file
|
||||||
|
size_bytes BIGINT NOT NULL, -- Backup file size
|
||||||
|
backup_type TEXT NOT NULL, -- 'manual', 'scheduled', 'migration'
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
created_by TEXT NOT NULL,
|
||||||
|
|
||||||
|
FOREIGN KEY (database_id) REFERENCES namespace_sqlite_databases(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for backup history queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_sqlite_backups_database ON namespace_sqlite_backups(database_id, created_at DESC);
|
||||||
|
|
||||||
|
-- Namespace quotas for resource management (future use)
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_quotas (
|
||||||
|
namespace TEXT PRIMARY KEY,
|
||||||
|
|
||||||
|
-- Storage quotas
|
||||||
|
max_sqlite_databases INTEGER DEFAULT 10, -- Max SQLite databases per namespace
|
||||||
|
max_storage_bytes BIGINT DEFAULT 5368709120, -- 5GB default
|
||||||
|
max_ipfs_pins INTEGER DEFAULT 1000, -- Max pinned IPFS objects
|
||||||
|
|
||||||
|
-- Compute quotas
|
||||||
|
max_deployments INTEGER DEFAULT 20, -- Max concurrent deployments
|
||||||
|
max_cpu_percent INTEGER DEFAULT 200, -- Total CPU quota (2 cores)
|
||||||
|
max_memory_mb INTEGER DEFAULT 2048, -- Total memory quota
|
||||||
|
|
||||||
|
-- Rate limits
|
||||||
|
max_rqlite_queries_per_minute INTEGER DEFAULT 1000,
|
||||||
|
max_olric_ops_per_minute INTEGER DEFAULT 10000,
|
||||||
|
|
||||||
|
-- Current usage (updated periodically)
|
||||||
|
current_storage_bytes BIGINT DEFAULT 0,
|
||||||
|
current_deployments INTEGER DEFAULT 0,
|
||||||
|
current_sqlite_databases INTEGER DEFAULT 0,
|
||||||
|
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Mark migration as applied
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (6);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
178
core/migrations/007_deployments.sql
Normal file
178
core/migrations/007_deployments.sql
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
-- Migration 007: Deployments System
|
||||||
|
-- This migration creates the complete schema for managing custom deployments
|
||||||
|
-- (Static sites, Next.js, Go backends, Node.js backends)
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- Main deployments table
|
||||||
|
CREATE TABLE IF NOT EXISTS deployments (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
namespace TEXT NOT NULL, -- Owner namespace
|
||||||
|
name TEXT NOT NULL, -- Deployment name (unique per namespace)
|
||||||
|
type TEXT NOT NULL, -- 'static', 'nextjs', 'nextjs-static', 'go-backend', 'go-wasm', 'nodejs-backend'
|
||||||
|
version INTEGER NOT NULL DEFAULT 1, -- Monotonic version counter
|
||||||
|
status TEXT NOT NULL DEFAULT 'deploying', -- 'deploying', 'active', 'failed', 'stopped', 'updating'
|
||||||
|
|
||||||
|
-- Content storage
|
||||||
|
content_cid TEXT, -- IPFS CID for static content or built assets
|
||||||
|
build_cid TEXT, -- IPFS CID for build artifacts (Next.js SSR, binaries)
|
||||||
|
|
||||||
|
-- Runtime configuration
|
||||||
|
home_node_id TEXT, -- Node ID hosting stateful data/processes
|
||||||
|
port INTEGER, -- Allocated port (NULL for static/WASM)
|
||||||
|
subdomain TEXT, -- Custom subdomain (e.g., myapp)
|
||||||
|
environment TEXT, -- JSON: {"KEY": "value", ...}
|
||||||
|
|
||||||
|
-- Resource limits
|
||||||
|
memory_limit_mb INTEGER DEFAULT 256,
|
||||||
|
cpu_limit_percent INTEGER DEFAULT 50,
|
||||||
|
disk_limit_mb INTEGER DEFAULT 1024,
|
||||||
|
|
||||||
|
-- Health & monitoring
|
||||||
|
health_check_path TEXT DEFAULT '/health', -- HTTP path for health checks
|
||||||
|
health_check_interval INTEGER DEFAULT 30, -- Seconds between health checks
|
||||||
|
restart_policy TEXT DEFAULT 'always', -- 'always', 'on-failure', 'never'
|
||||||
|
max_restart_count INTEGER DEFAULT 10, -- Max restarts before marking as failed
|
||||||
|
|
||||||
|
-- Metadata
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
deployed_by TEXT NOT NULL, -- Wallet address or API key
|
||||||
|
|
||||||
|
UNIQUE(namespace, name)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for deployment lookups
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployments_namespace ON deployments(namespace);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployments_status ON deployments(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployments_home_node ON deployments(home_node_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployments_type ON deployments(type);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployments_subdomain ON deployments(subdomain);
|
||||||
|
|
||||||
|
-- Port allocations table (prevents port conflicts)
|
||||||
|
CREATE TABLE IF NOT EXISTS port_allocations (
|
||||||
|
node_id TEXT NOT NULL,
|
||||||
|
port INTEGER NOT NULL,
|
||||||
|
deployment_id TEXT NOT NULL,
|
||||||
|
allocated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
PRIMARY KEY (node_id, port),
|
||||||
|
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for finding allocated ports by node
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_port_allocations_node ON port_allocations(node_id, port);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_port_allocations_deployment ON port_allocations(deployment_id);
|
||||||
|
|
||||||
|
-- Home node assignments (namespace → node mapping)
|
||||||
|
CREATE TABLE IF NOT EXISTS home_node_assignments (
|
||||||
|
namespace TEXT PRIMARY KEY,
|
||||||
|
home_node_id TEXT NOT NULL,
|
||||||
|
assigned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
deployment_count INTEGER DEFAULT 0, -- Cached count for capacity planning
|
||||||
|
total_memory_mb INTEGER DEFAULT 0, -- Cached total memory usage
|
||||||
|
total_cpu_percent INTEGER DEFAULT 0 -- Cached total CPU usage
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for querying by node
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_home_node_by_node ON home_node_assignments(home_node_id);
|
||||||
|
|
||||||
|
-- Deployment domains (custom domain mapping)
|
||||||
|
CREATE TABLE IF NOT EXISTS deployment_domains (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
deployment_id TEXT NOT NULL,
|
||||||
|
namespace TEXT NOT NULL,
|
||||||
|
domain TEXT NOT NULL UNIQUE, -- Full domain (e.g., myapp.orama.network or custom)
|
||||||
|
routing_type TEXT NOT NULL DEFAULT 'balanced', -- 'balanced' or 'node_specific'
|
||||||
|
node_id TEXT, -- For node_specific routing
|
||||||
|
is_custom BOOLEAN DEFAULT FALSE, -- True for user's own domain
|
||||||
|
tls_cert_cid TEXT, -- IPFS CID for custom TLS certificate
|
||||||
|
verified_at TIMESTAMP, -- When custom domain was verified
|
||||||
|
verification_token TEXT, -- TXT record token for domain verification
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for domain lookups
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_domains_deployment ON deployment_domains(deployment_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_domains_domain ON deployment_domains(domain);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_domains_namespace ON deployment_domains(namespace);
|
||||||
|
|
||||||
|
-- Deployment history (version tracking and rollback)
|
||||||
|
CREATE TABLE IF NOT EXISTS deployment_history (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
deployment_id TEXT NOT NULL,
|
||||||
|
version INTEGER NOT NULL,
|
||||||
|
content_cid TEXT,
|
||||||
|
build_cid TEXT,
|
||||||
|
deployed_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
deployed_by TEXT NOT NULL,
|
||||||
|
status TEXT NOT NULL DEFAULT 'success', -- 'success', 'failed', 'rolled_back'
|
||||||
|
error_message TEXT,
|
||||||
|
rollback_from_version INTEGER, -- If this is a rollback, original version
|
||||||
|
|
||||||
|
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Indexes for history queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_history_deployment ON deployment_history(deployment_id, version DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_history_status ON deployment_history(status);
|
||||||
|
|
||||||
|
-- Deployment environment variables (separate for security)
|
||||||
|
CREATE TABLE IF NOT EXISTS deployment_env_vars (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
deployment_id TEXT NOT NULL,
|
||||||
|
key TEXT NOT NULL,
|
||||||
|
value TEXT NOT NULL, -- Encrypted in production
|
||||||
|
is_secret BOOLEAN DEFAULT FALSE, -- True for sensitive values
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
UNIQUE(deployment_id, key),
|
||||||
|
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for env var lookups
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_env_vars_deployment ON deployment_env_vars(deployment_id);
|
||||||
|
|
||||||
|
-- Deployment events log (audit trail)
|
||||||
|
CREATE TABLE IF NOT EXISTS deployment_events (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
deployment_id TEXT NOT NULL,
|
||||||
|
event_type TEXT NOT NULL, -- 'created', 'started', 'stopped', 'restarted', 'updated', 'deleted', 'health_check_failed'
|
||||||
|
message TEXT,
|
||||||
|
metadata TEXT, -- JSON: additional context
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
created_by TEXT, -- Wallet address or 'system'
|
||||||
|
|
||||||
|
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for event queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_events_deployment ON deployment_events(deployment_id, created_at DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_deployment_events_type ON deployment_events(event_type);
|
||||||
|
|
||||||
|
-- Process health checks (for dynamic deployments)
|
||||||
|
CREATE TABLE IF NOT EXISTS deployment_health_checks (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
deployment_id TEXT NOT NULL,
|
||||||
|
node_id TEXT NOT NULL,
|
||||||
|
status TEXT NOT NULL, -- 'healthy', 'unhealthy', 'unknown'
|
||||||
|
response_time_ms INTEGER,
|
||||||
|
status_code INTEGER,
|
||||||
|
error_message TEXT,
|
||||||
|
checked_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for health check queries (keep only recent checks)
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_health_checks_deployment ON deployment_health_checks(deployment_id, checked_at DESC);
|
||||||
|
|
||||||
|
-- Mark migration as applied
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (7);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
31
core/migrations/008_ipfs_namespace_tracking.sql
Normal file
31
core/migrations/008_ipfs_namespace_tracking.sql
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
-- Migration 008: IPFS Namespace Tracking
|
||||||
|
-- This migration adds namespace isolation for IPFS content by tracking CID ownership.
|
||||||
|
|
||||||
|
-- Table: ipfs_content_ownership
|
||||||
|
-- Tracks which namespace owns each CID uploaded to IPFS.
|
||||||
|
-- This enables namespace isolation so that:
|
||||||
|
-- - Namespace-A cannot GET/PIN/UNPIN Namespace-B's content
|
||||||
|
-- - Same CID can be uploaded by different namespaces (shared content)
|
||||||
|
CREATE TABLE IF NOT EXISTS ipfs_content_ownership (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
namespace TEXT NOT NULL,
|
||||||
|
name TEXT,
|
||||||
|
size_bytes BIGINT DEFAULT 0,
|
||||||
|
is_pinned BOOLEAN DEFAULT FALSE,
|
||||||
|
uploaded_at TIMESTAMP NOT NULL,
|
||||||
|
uploaded_by TEXT NOT NULL,
|
||||||
|
UNIQUE(cid, namespace)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Index for fast namespace + CID lookup
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ipfs_ownership_namespace_cid
|
||||||
|
ON ipfs_content_ownership(namespace, cid);
|
||||||
|
|
||||||
|
-- Index for fast CID lookup across all namespaces
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ipfs_ownership_cid
|
||||||
|
ON ipfs_content_ownership(cid);
|
||||||
|
|
||||||
|
-- Index for namespace-only queries (list all content for a namespace)
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ipfs_ownership_namespace
|
||||||
|
ON ipfs_content_ownership(namespace);
|
||||||
45
core/migrations/009_dns_records_multi.sql
Normal file
45
core/migrations/009_dns_records_multi.sql
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
-- Migration 009: Update DNS Records to Support Multiple Records per FQDN
|
||||||
|
-- This allows round-robin A records and multiple NS records for the same domain
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- SQLite doesn't support DROP CONSTRAINT, so we recreate the table
|
||||||
|
-- First, create the new table structure
|
||||||
|
CREATE TABLE IF NOT EXISTS dns_records_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
fqdn TEXT NOT NULL, -- Fully qualified domain name (e.g., myapp.node-7prvNa.orama.network)
|
||||||
|
record_type TEXT NOT NULL DEFAULT 'A',-- DNS record type: A, AAAA, CNAME, TXT, NS, SOA
|
||||||
|
value TEXT NOT NULL, -- IP address or target value
|
||||||
|
ttl INTEGER NOT NULL DEFAULT 300, -- Time to live in seconds
|
||||||
|
priority INTEGER DEFAULT 0, -- Priority for MX/SRV records, or weight for round-robin
|
||||||
|
namespace TEXT NOT NULL DEFAULT 'system', -- Namespace that owns this record
|
||||||
|
deployment_id TEXT, -- Optional: deployment that created this record
|
||||||
|
node_id TEXT, -- Optional: specific node ID for node-specific routing
|
||||||
|
is_active BOOLEAN NOT NULL DEFAULT TRUE,-- Enable/disable without deleting
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
created_by TEXT NOT NULL DEFAULT 'system', -- Wallet address or 'system' for auto-created records
|
||||||
|
UNIQUE(fqdn, record_type, value) -- Allow multiple records of same type for same FQDN, but not duplicates
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Copy existing data if the old table exists
|
||||||
|
INSERT OR IGNORE INTO dns_records_new (id, fqdn, record_type, value, ttl, namespace, deployment_id, node_id, is_active, created_at, updated_at, created_by)
|
||||||
|
SELECT id, fqdn, record_type, value, ttl, namespace, deployment_id, node_id, is_active, created_at, updated_at, created_by
|
||||||
|
FROM dns_records WHERE 1=1;
|
||||||
|
|
||||||
|
-- Drop old table and rename new one
|
||||||
|
DROP TABLE IF EXISTS dns_records;
|
||||||
|
ALTER TABLE dns_records_new RENAME TO dns_records;
|
||||||
|
|
||||||
|
-- Recreate indexes
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn ON dns_records(fqdn);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn_type ON dns_records(fqdn, record_type);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_namespace ON dns_records(namespace);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_deployment ON dns_records(deployment_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_node_id ON dns_records(node_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_dns_records_active ON dns_records(is_active);
|
||||||
|
|
||||||
|
-- Mark migration as applied
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (9);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
190
core/migrations/010_namespace_clusters.sql
Normal file
190
core/migrations/010_namespace_clusters.sql
Normal file
@ -0,0 +1,190 @@
|
|||||||
|
-- Migration 010: Namespace Clusters for Physical Isolation
|
||||||
|
-- Creates tables to manage per-namespace RQLite and Olric clusters
|
||||||
|
-- Each namespace gets its own 3-node cluster for complete isolation
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
-- Extend namespaces table with cluster status tracking
|
||||||
|
-- Note: SQLite doesn't support ADD COLUMN IF NOT EXISTS, so we handle this carefully
|
||||||
|
-- These columns track the provisioning state of the namespace's dedicated cluster
|
||||||
|
|
||||||
|
-- First check if columns exist, if not add them
|
||||||
|
-- cluster_status: 'none', 'provisioning', 'ready', 'degraded', 'failed', 'deprovisioning'
|
||||||
|
|
||||||
|
-- Create a new namespaces table with additional columns if needed
|
||||||
|
CREATE TABLE IF NOT EXISTS namespaces_new (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
name TEXT NOT NULL UNIQUE,
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
cluster_status TEXT DEFAULT 'none',
|
||||||
|
cluster_created_at TIMESTAMP,
|
||||||
|
cluster_ready_at TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Copy data from old table if it exists and new columns don't
|
||||||
|
INSERT OR IGNORE INTO namespaces_new (id, name, created_at, cluster_status)
|
||||||
|
SELECT id, name, created_at, 'none' FROM namespaces WHERE NOT EXISTS (
|
||||||
|
SELECT 1 FROM pragma_table_info('namespaces') WHERE name = 'cluster_status'
|
||||||
|
);
|
||||||
|
|
||||||
|
-- If the column already exists, this migration was partially applied - skip the table swap
|
||||||
|
-- We'll use a different approach: just ensure the new tables exist
|
||||||
|
|
||||||
|
-- Namespace clusters registry
|
||||||
|
-- One record per namespace that has a dedicated cluster
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_clusters (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
namespace_id INTEGER NOT NULL UNIQUE, -- FK to namespaces
|
||||||
|
namespace_name TEXT NOT NULL, -- Cached for easier lookups
|
||||||
|
status TEXT NOT NULL DEFAULT 'provisioning', -- provisioning, ready, degraded, failed, deprovisioning
|
||||||
|
|
||||||
|
-- Cluster configuration
|
||||||
|
rqlite_node_count INTEGER NOT NULL DEFAULT 3,
|
||||||
|
olric_node_count INTEGER NOT NULL DEFAULT 3,
|
||||||
|
gateway_node_count INTEGER NOT NULL DEFAULT 3,
|
||||||
|
|
||||||
|
-- Provisioning metadata
|
||||||
|
provisioned_by TEXT NOT NULL, -- Wallet address that triggered provisioning
|
||||||
|
provisioned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
ready_at TIMESTAMP,
|
||||||
|
last_health_check TIMESTAMP,
|
||||||
|
|
||||||
|
-- Error tracking
|
||||||
|
error_message TEXT,
|
||||||
|
retry_count INTEGER DEFAULT 0,
|
||||||
|
|
||||||
|
FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_namespace_clusters_status ON namespace_clusters(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_namespace_clusters_namespace ON namespace_clusters(namespace_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_namespace_clusters_name ON namespace_clusters(namespace_name);
|
||||||
|
|
||||||
|
-- Namespace cluster nodes
|
||||||
|
-- Tracks which physical nodes host services for each namespace cluster
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_cluster_nodes (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
namespace_cluster_id TEXT NOT NULL, -- FK to namespace_clusters
|
||||||
|
node_id TEXT NOT NULL, -- FK to dns_nodes (physical node)
|
||||||
|
|
||||||
|
-- Role in the cluster
|
||||||
|
-- Each node can have multiple roles (rqlite + olric + gateway)
|
||||||
|
role TEXT NOT NULL, -- 'rqlite_leader', 'rqlite_follower', 'olric', 'gateway'
|
||||||
|
|
||||||
|
-- Service ports (allocated from reserved range 10000-10099)
|
||||||
|
rqlite_http_port INTEGER, -- Port for RQLite HTTP API
|
||||||
|
rqlite_raft_port INTEGER, -- Port for RQLite Raft consensus
|
||||||
|
olric_http_port INTEGER, -- Port for Olric HTTP API
|
||||||
|
olric_memberlist_port INTEGER, -- Port for Olric memberlist gossip
|
||||||
|
gateway_http_port INTEGER, -- Port for Gateway HTTP
|
||||||
|
|
||||||
|
-- Service status
|
||||||
|
status TEXT NOT NULL DEFAULT 'pending', -- pending, starting, running, stopped, failed
|
||||||
|
process_pid INTEGER, -- PID of running process (for local management)
|
||||||
|
last_heartbeat TIMESTAMP,
|
||||||
|
error_message TEXT,
|
||||||
|
|
||||||
|
-- Join addresses for cluster formation
|
||||||
|
rqlite_join_address TEXT, -- Address to join RQLite cluster
|
||||||
|
olric_peers TEXT, -- JSON array of Olric peer addresses
|
||||||
|
|
||||||
|
-- Metadata
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
UNIQUE(namespace_cluster_id, node_id, role),
|
||||||
|
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_cluster ON namespace_cluster_nodes(namespace_cluster_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_node ON namespace_cluster_nodes(node_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_status ON namespace_cluster_nodes(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_role ON namespace_cluster_nodes(role);
|
||||||
|
|
||||||
|
-- Namespace port allocations
|
||||||
|
-- Manages the reserved port range (10000-10099) for namespace services
|
||||||
|
-- Each namespace instance on a node gets a block of 5 consecutive ports
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_port_allocations (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
node_id TEXT NOT NULL, -- Physical node ID
|
||||||
|
namespace_cluster_id TEXT NOT NULL, -- Namespace cluster this allocation belongs to
|
||||||
|
|
||||||
|
-- Port block (5 consecutive ports)
|
||||||
|
port_start INTEGER NOT NULL, -- Start of port block (e.g., 10000)
|
||||||
|
port_end INTEGER NOT NULL, -- End of port block (e.g., 10004)
|
||||||
|
|
||||||
|
-- Individual port assignments within the block
|
||||||
|
rqlite_http_port INTEGER NOT NULL, -- port_start + 0
|
||||||
|
rqlite_raft_port INTEGER NOT NULL, -- port_start + 1
|
||||||
|
olric_http_port INTEGER NOT NULL, -- port_start + 2
|
||||||
|
olric_memberlist_port INTEGER NOT NULL, -- port_start + 3
|
||||||
|
gateway_http_port INTEGER NOT NULL, -- port_start + 4
|
||||||
|
|
||||||
|
allocated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
-- Prevent overlapping allocations on same node
|
||||||
|
UNIQUE(node_id, port_start),
|
||||||
|
-- One allocation per namespace per node
|
||||||
|
UNIQUE(namespace_cluster_id, node_id),
|
||||||
|
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ns_port_alloc_node ON namespace_port_allocations(node_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_ns_port_alloc_cluster ON namespace_port_allocations(namespace_cluster_id);
|
||||||
|
|
||||||
|
-- Namespace cluster events
|
||||||
|
-- Audit log for cluster provisioning and lifecycle events
|
||||||
|
CREATE TABLE IF NOT EXISTS namespace_cluster_events (
|
||||||
|
id TEXT PRIMARY KEY, -- UUID
|
||||||
|
namespace_cluster_id TEXT NOT NULL,
|
||||||
|
event_type TEXT NOT NULL, -- Event types listed below
|
||||||
|
node_id TEXT, -- Optional: specific node this event relates to
|
||||||
|
message TEXT,
|
||||||
|
metadata TEXT, -- JSON for additional event data
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Event types:
|
||||||
|
-- 'provisioning_started' - Cluster provisioning began
|
||||||
|
-- 'nodes_selected' - 3 nodes were selected for the cluster
|
||||||
|
-- 'ports_allocated' - Ports allocated on a node
|
||||||
|
-- 'rqlite_started' - RQLite instance started on a node
|
||||||
|
-- 'rqlite_joined' - RQLite instance joined the cluster
|
||||||
|
-- 'rqlite_leader_elected' - RQLite leader election completed
|
||||||
|
-- 'olric_started' - Olric instance started on a node
|
||||||
|
-- 'olric_joined' - Olric instance joined memberlist
|
||||||
|
-- 'gateway_started' - Gateway instance started on a node
|
||||||
|
-- 'dns_created' - DNS records created for namespace
|
||||||
|
-- 'cluster_ready' - All services ready, cluster is operational
|
||||||
|
-- 'cluster_degraded' - One or more nodes are unhealthy
|
||||||
|
-- 'cluster_failed' - Cluster failed to provision or operate
|
||||||
|
-- 'node_failed' - Specific node became unhealthy
|
||||||
|
-- 'node_recovered' - Node recovered from failure
|
||||||
|
-- 'deprovisioning_started' - Cluster deprovisioning began
|
||||||
|
-- 'deprovisioned' - Cluster fully deprovisioned
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_cluster_events_cluster ON namespace_cluster_events(namespace_cluster_id, created_at DESC);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_cluster_events_type ON namespace_cluster_events(event_type);
|
||||||
|
|
||||||
|
-- Global deployment registry
|
||||||
|
-- Prevents duplicate deployment subdomains across all namespaces
|
||||||
|
-- Since deployments now use {name}-{random}.{domain}, we track used subdomains globally
|
||||||
|
CREATE TABLE IF NOT EXISTS global_deployment_subdomains (
|
||||||
|
subdomain TEXT PRIMARY KEY, -- Full subdomain (e.g., 'myapp-f3o4if')
|
||||||
|
namespace TEXT NOT NULL, -- Owner namespace
|
||||||
|
deployment_id TEXT NOT NULL, -- FK to deployments (in namespace cluster)
|
||||||
|
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
|
||||||
|
-- No FK to deployments since deployments are in namespace-specific clusters
|
||||||
|
UNIQUE(subdomain)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_global_subdomains_namespace ON global_deployment_subdomains(namespace);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_global_subdomains_deployment ON global_deployment_subdomains(deployment_id);
|
||||||
|
|
||||||
|
-- Mark migration as applied
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (10);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
19
core/migrations/011_dns_nameservers.sql
Normal file
19
core/migrations/011_dns_nameservers.sql
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
-- Migration 011: DNS Nameservers Table
|
||||||
|
-- Maps NS hostnames (ns1, ns2, ns3) to specific node IDs and IPs
|
||||||
|
-- Provides stable NS assignment that survives restarts and re-seeding
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS dns_nameservers (
|
||||||
|
hostname TEXT PRIMARY KEY, -- e.g., "ns1", "ns2", "ns3"
|
||||||
|
node_id TEXT NOT NULL, -- Peer ID of the assigned node
|
||||||
|
ip_address TEXT NOT NULL, -- IP address of the assigned node
|
||||||
|
domain TEXT NOT NULL, -- Base domain (e.g., "dbrs.space")
|
||||||
|
assigned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
UNIQUE(node_id, domain) -- A node can only hold one NS slot per domain
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT OR IGNORE INTO schema_migrations(version) VALUES (11);
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user