pushed more changes

This commit is contained in:
anonpenguin23 2026-01-24 16:00:28 +02:00
parent 00c9792780
commit fb229af2a0
16 changed files with 1192 additions and 837 deletions

View File

@ -34,6 +34,7 @@ jobs:
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
HOMEBREW_TAP_TOKEN: ${{ secrets.HOMEBREW_TAP_TOKEN }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -42,32 +43,26 @@ jobs:
path: dist/
retention-days: 5
# Optional: Publish to GitHub Packages (requires additional setup)
publish-packages:
# Verify release artifacts
verify-release:
runs-on: ubuntu-latest
needs: build-release
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: release-artifacts
path: dist/
- name: Publish to GitHub Packages
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: List release artifacts
run: |
echo "Publishing Debian packages to GitHub Packages..."
for deb in dist/*.deb; do
if [ -f "$deb" ]; then
curl -H "Authorization: token $GITHUB_TOKEN" \
-H "Content-Type: application/octet-stream" \
--data-binary @"$deb" \
"https://uploads.github.com/repos/${{ github.repository }}/releases/upload?name=$(basename "$deb")"
fi
done
echo "=== Release Artifacts ==="
ls -la dist/
echo ""
echo "=== .deb packages ==="
ls -la dist/*.deb 2>/dev/null || echo "No .deb files found"
echo ""
echo "=== Archives ==="
ls -la dist/*.tar.gz 2>/dev/null || echo "No .tar.gz files found"

View File

@ -1,14 +1,18 @@
# GoReleaser Configuration for DeBros Network
# Builds and releases the orama binary for multiple platforms
# Other binaries (node, gateway, identity) are installed via: orama setup
# Builds and releases orama (CLI) and orama-node binaries
# Publishes to: GitHub Releases, Homebrew, and apt (.deb packages)
project_name: debros-network
env:
- GO111MODULE=on
before:
hooks:
- go mod tidy
builds:
# orama binary - only build the CLI
# orama CLI binary
- id: orama
main: ./cmd/cli
binary: orama
@ -25,18 +29,107 @@ builds:
- -X main.date={{.Date}}
mod_timestamp: "{{ .CommitTimestamp }}"
# orama-node binary (Linux only for apt)
- id: orama-node
main: ./cmd/node
binary: orama-node
goos:
- linux
goarch:
- amd64
- arm64
ldflags:
- -s -w
- -X main.version={{.Version}}
- -X main.commit={{.ShortCommit}}
- -X main.date={{.Date}}
mod_timestamp: "{{ .CommitTimestamp }}"
archives:
# Tar.gz archives for orama
- id: binaries
# Tar.gz archives for orama CLI
- id: orama-archives
builds:
- orama
format: tar.gz
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
name_template: "orama_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- README.md
- LICENSE
- CHANGELOG.md
format_overrides:
- goos: windows
format: zip
# Tar.gz archives for orama-node
- id: orama-node-archives
builds:
- orama-node
format: tar.gz
name_template: "orama-node_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- README.md
- LICENSE
# Debian packages for apt
nfpms:
# orama CLI .deb package
- id: orama-deb
package_name: orama
builds:
- orama
vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <support@debros.io>
description: CLI tool for the Orama decentralized network
license: MIT
formats:
- deb
bindir: /usr/bin
section: utils
priority: optional
contents:
- src: ./README.md
dst: /usr/share/doc/orama/README.md
deb:
lintian_overrides:
- statically-linked-binary
# orama-node .deb package
- id: orama-node-deb
package_name: orama-node
builds:
- orama-node
vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <support@debros.io>
description: Node daemon for the Orama decentralized network
license: MIT
formats:
- deb
bindir: /usr/bin
section: net
priority: optional
contents:
- src: ./README.md
dst: /usr/share/doc/orama-node/README.md
deb:
lintian_overrides:
- statically-linked-binary
# Homebrew tap for macOS (orama CLI only)
brews:
- name: orama
ids:
- orama-archives
repository:
owner: DeBrosOfficial
name: homebrew-tap
token: "{{ .Env.HOMEBREW_TAP_TOKEN }}"
folder: Formula
homepage: https://github.com/DeBrosOfficial/network
description: CLI tool for the Orama decentralized network
license: MIT
install: |
bin.install "orama"
test: |
system "#{bin}/orama", "--version"
checksum:
name_template: "checksums.txt"
@ -64,3 +157,5 @@ release:
draft: false
prerelease: auto
name_template: "Release {{.Version}}"
extra_files:
- glob: ./dist/*.deb

View File

@ -330,12 +330,29 @@ curl -X DELETE http://localhost:6001/v1/functions/hello-world?namespace=default
### Installation
**macOS (Homebrew):**
```bash
# Install via APT
echo "deb https://debrosficial.github.io/network/apt stable main" | sudo tee /etc/apt/sources.list.d/debros.list
brew install DeBrosOfficial/tap/orama
```
sudo apt update && sudo apt install orama
**Linux (Debian/Ubuntu):**
```bash
# Download and install the latest .deb package
curl -sL https://github.com/DeBrosOfficial/network/releases/latest/download/orama_$(curl -s https://api.github.com/repos/DeBrosOfficial/network/releases/latest | grep tag_name | cut -d '"' -f 4 | tr -d 'v')_linux_amd64.deb -o orama.deb
sudo dpkg -i orama.deb
```
**From Source:**
```bash
go install github.com/DeBrosOfficial/network/cmd/cli@latest
```
**Setup (after installation):**
```bash
sudo orama install --interactive
```

BIN
orama-cli-linux Executable file

Binary file not shown.

View File

@ -12,6 +12,7 @@ import (
type BinaryInstaller struct {
arch string
logWriter io.Writer
oramaHome string
// Embedded installers
rqlite *installers.RQLiteInstaller
@ -19,18 +20,24 @@ type BinaryInstaller struct {
ipfsCluster *installers.IPFSClusterInstaller
olric *installers.OlricInstaller
gateway *installers.GatewayInstaller
coredns *installers.CoreDNSInstaller
caddy *installers.CaddyInstaller
}
// NewBinaryInstaller creates a new binary installer
func NewBinaryInstaller(arch string, logWriter io.Writer) *BinaryInstaller {
oramaHome := "/home/debros"
return &BinaryInstaller{
arch: arch,
logWriter: logWriter,
oramaHome: oramaHome,
rqlite: installers.NewRQLiteInstaller(arch, logWriter),
ipfs: installers.NewIPFSInstaller(arch, logWriter),
ipfsCluster: installers.NewIPFSClusterInstaller(arch, logWriter),
olric: installers.NewOlricInstaller(arch, logWriter),
gateway: installers.NewGatewayInstaller(arch, logWriter),
coredns: installers.NewCoreDNSInstaller(arch, logWriter, oramaHome),
caddy: installers.NewCaddyInstaller(arch, logWriter, oramaHome),
}
}
@ -110,6 +117,26 @@ func (bi *BinaryInstaller) InstallAnyoneClient() error {
return bi.gateway.InstallAnyoneClient()
}
// InstallCoreDNS builds and installs CoreDNS with the custom RQLite plugin
func (bi *BinaryInstaller) InstallCoreDNS() error {
return bi.coredns.Install()
}
// ConfigureCoreDNS creates CoreDNS configuration files
func (bi *BinaryInstaller) ConfigureCoreDNS(domain string, rqliteDSN string, ns1IP, ns2IP, ns3IP string) error {
return bi.coredns.Configure(domain, rqliteDSN, ns1IP, ns2IP, ns3IP)
}
// InstallCaddy builds and installs Caddy with the custom orama DNS module
func (bi *BinaryInstaller) InstallCaddy() error {
return bi.caddy.Install()
}
// ConfigureCaddy creates Caddy configuration files
func (bi *BinaryInstaller) ConfigureCaddy(domain string, email string, acmeEndpoint string) error {
return bi.caddy.Configure(domain, email, acmeEndpoint)
}
// Mock system commands for testing (if needed)
var execCommand = exec.Command

View File

@ -0,0 +1,395 @@
package installers
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
)
const (
caddyVersion = "2.10.2"
xcaddyRepo = "github.com/caddyserver/xcaddy/cmd/xcaddy@latest"
)
// CaddyInstaller handles Caddy installation with custom DNS module
type CaddyInstaller struct {
*BaseInstaller
version string
oramaHome string
dnsModule string // Path to the orama DNS module source
}
// NewCaddyInstaller creates a new Caddy installer
func NewCaddyInstaller(arch string, logWriter io.Writer, oramaHome string) *CaddyInstaller {
return &CaddyInstaller{
BaseInstaller: NewBaseInstaller(arch, logWriter),
version: caddyVersion,
oramaHome: oramaHome,
dnsModule: filepath.Join(oramaHome, "src", "pkg", "caddy", "dns", "orama"),
}
}
// IsInstalled checks if Caddy with orama DNS module is already installed
func (ci *CaddyInstaller) IsInstalled() bool {
caddyPath := "/usr/bin/caddy"
if _, err := os.Stat(caddyPath); os.IsNotExist(err) {
return false
}
// Verify it has the orama DNS module
cmd := exec.Command(caddyPath, "list-modules")
output, err := cmd.Output()
if err != nil {
return false
}
return containsLine(string(output), "dns.providers.orama")
}
// Install builds and installs Caddy with the custom orama DNS module
func (ci *CaddyInstaller) Install() error {
if ci.IsInstalled() {
fmt.Fprintf(ci.logWriter, " ✓ Caddy with orama DNS module already installed\n")
return nil
}
fmt.Fprintf(ci.logWriter, " Building Caddy with orama DNS module...\n")
// Check if Go is available
if _, err := exec.LookPath("go"); err != nil {
return fmt.Errorf("go not found - required to build Caddy. Please install Go first")
}
goPath := os.Getenv("PATH") + ":/usr/local/go/bin"
buildDir := "/tmp/caddy-build"
// Clean up any previous build
os.RemoveAll(buildDir)
if err := os.MkdirAll(buildDir, 0755); err != nil {
return fmt.Errorf("failed to create build directory: %w", err)
}
defer os.RemoveAll(buildDir)
// Install xcaddy if not available
if _, err := exec.LookPath("xcaddy"); err != nil {
fmt.Fprintf(ci.logWriter, " Installing xcaddy...\n")
cmd := exec.Command("go", "install", xcaddyRepo)
cmd.Env = append(os.Environ(), "PATH="+goPath, "GOBIN=/usr/local/bin")
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to install xcaddy: %w\n%s", err, string(output))
}
}
// Create the orama DNS module in build directory
fmt.Fprintf(ci.logWriter, " Creating orama DNS module...\n")
moduleDir := filepath.Join(buildDir, "caddy-dns-orama")
if err := os.MkdirAll(moduleDir, 0755); err != nil {
return fmt.Errorf("failed to create module directory: %w", err)
}
// Write the provider.go file
providerCode := ci.generateProviderCode()
if err := os.WriteFile(filepath.Join(moduleDir, "provider.go"), []byte(providerCode), 0644); err != nil {
return fmt.Errorf("failed to write provider.go: %w", err)
}
// Write go.mod
goMod := ci.generateGoMod()
if err := os.WriteFile(filepath.Join(moduleDir, "go.mod"), []byte(goMod), 0644); err != nil {
return fmt.Errorf("failed to write go.mod: %w", err)
}
// Run go mod tidy
tidyCmd := exec.Command("go", "mod", "tidy")
tidyCmd.Dir = moduleDir
tidyCmd.Env = append(os.Environ(), "PATH="+goPath)
if output, err := tidyCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to run go mod tidy: %w\n%s", err, string(output))
}
// Build Caddy with xcaddy
fmt.Fprintf(ci.logWriter, " Building Caddy binary...\n")
xcaddyPath := "/usr/local/bin/xcaddy"
if _, err := os.Stat(xcaddyPath); os.IsNotExist(err) {
xcaddyPath = "xcaddy" // Try PATH
}
buildCmd := exec.Command(xcaddyPath, "build",
"v"+ci.version,
"--with", "github.com/DeBrosOfficial/caddy-dns-orama="+moduleDir,
"--output", filepath.Join(buildDir, "caddy"))
buildCmd.Dir = buildDir
buildCmd.Env = append(os.Environ(), "PATH="+goPath)
if output, err := buildCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build Caddy: %w\n%s", err, string(output))
}
// Verify the binary has orama DNS module
verifyCmd := exec.Command(filepath.Join(buildDir, "caddy"), "list-modules")
output, err := verifyCmd.Output()
if err != nil {
return fmt.Errorf("failed to verify Caddy binary: %w", err)
}
if !containsLine(string(output), "dns.providers.orama") {
return fmt.Errorf("Caddy binary does not contain orama DNS module")
}
// Install the binary
fmt.Fprintf(ci.logWriter, " Installing Caddy binary...\n")
srcBinary := filepath.Join(buildDir, "caddy")
dstBinary := "/usr/bin/caddy"
data, err := os.ReadFile(srcBinary)
if err != nil {
return fmt.Errorf("failed to read built binary: %w", err)
}
if err := os.WriteFile(dstBinary, data, 0755); err != nil {
return fmt.Errorf("failed to install binary: %w", err)
}
// Grant CAP_NET_BIND_SERVICE to allow binding to ports 80/443
if err := exec.Command("setcap", "cap_net_bind_service=+ep", dstBinary).Run(); err != nil {
fmt.Fprintf(ci.logWriter, " ⚠️ Warning: failed to setcap on caddy: %v\n", err)
}
fmt.Fprintf(ci.logWriter, " ✓ Caddy with orama DNS module installed\n")
return nil
}
// Configure creates Caddy configuration files
func (ci *CaddyInstaller) Configure(domain string, email string, acmeEndpoint string) error {
configDir := "/etc/caddy"
if err := os.MkdirAll(configDir, 0755); err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
// Create Caddyfile
caddyfile := ci.generateCaddyfile(domain, email, acmeEndpoint)
if err := os.WriteFile(filepath.Join(configDir, "Caddyfile"), []byte(caddyfile), 0644); err != nil {
return fmt.Errorf("failed to write Caddyfile: %w", err)
}
return nil
}
// generateProviderCode creates the orama DNS provider code
func (ci *CaddyInstaller) generateProviderCode() string {
return `// Package orama implements a DNS provider for Caddy that uses the Orama Network
// gateway's internal ACME API for DNS-01 challenge validation.
package orama
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/libdns/libdns"
)
func init() {
caddy.RegisterModule(Provider{})
}
// Provider wraps the Orama DNS provider for Caddy.
type Provider struct {
// Endpoint is the URL of the Orama gateway's ACME API
// Default: http://localhost:6001/v1/internal/acme
Endpoint string ` + "`json:\"endpoint,omitempty\"`" + `
}
// CaddyModule returns the Caddy module information.
func (Provider) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "dns.providers.orama",
New: func() caddy.Module { return new(Provider) },
}
}
// Provision sets up the module.
func (p *Provider) Provision(ctx caddy.Context) error {
if p.Endpoint == "" {
p.Endpoint = "http://localhost:6001/v1/internal/acme"
}
return nil
}
// UnmarshalCaddyfile parses the Caddyfile configuration.
func (p *Provider) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
for d.NextBlock(0) {
switch d.Val() {
case "endpoint":
if !d.NextArg() {
return d.ArgErr()
}
p.Endpoint = d.Val()
default:
return d.Errf("unrecognized option: %s", d.Val())
}
}
}
return nil
}
// AppendRecords adds records to the zone. For ACME, this presents the challenge.
func (p *Provider) AppendRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {
var added []libdns.Record
for _, rec := range records {
rr := rec.RR()
if rr.Type != "TXT" {
continue
}
fqdn := rr.Name + "." + zone
payload := map[string]string{
"fqdn": fqdn,
"value": rr.Data,
}
body, err := json.Marshal(payload)
if err != nil {
return added, fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", p.Endpoint+"/present", bytes.NewReader(body))
if err != nil {
return added, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return added, fmt.Errorf("failed to present challenge: %w", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return added, fmt.Errorf("present failed with status %d", resp.StatusCode)
}
added = append(added, rec)
}
return added, nil
}
// DeleteRecords removes records from the zone. For ACME, this cleans up the challenge.
func (p *Provider) DeleteRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {
var deleted []libdns.Record
for _, rec := range records {
rr := rec.RR()
if rr.Type != "TXT" {
continue
}
fqdn := rr.Name + "." + zone
payload := map[string]string{
"fqdn": fqdn,
"value": rr.Data,
}
body, err := json.Marshal(payload)
if err != nil {
return deleted, fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", p.Endpoint+"/cleanup", bytes.NewReader(body))
if err != nil {
return deleted, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return deleted, fmt.Errorf("failed to cleanup challenge: %w", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return deleted, fmt.Errorf("cleanup failed with status %d", resp.StatusCode)
}
deleted = append(deleted, rec)
}
return deleted, nil
}
// GetRecords returns the records in the zone. Not used for ACME.
func (p *Provider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) {
return nil, nil
}
// SetRecords sets the records in the zone. Not used for ACME.
func (p *Provider) SetRecords(ctx context.Context, zone string, records []libdns.Record) ([]libdns.Record, error) {
return nil, nil
}
// Interface guards
var (
_ caddy.Module = (*Provider)(nil)
_ caddy.Provisioner = (*Provider)(nil)
_ caddyfile.Unmarshaler = (*Provider)(nil)
_ libdns.RecordAppender = (*Provider)(nil)
_ libdns.RecordDeleter = (*Provider)(nil)
_ libdns.RecordGetter = (*Provider)(nil)
_ libdns.RecordSetter = (*Provider)(nil)
)
`
}
// generateGoMod creates the go.mod file for the module
func (ci *CaddyInstaller) generateGoMod() string {
return `module github.com/DeBrosOfficial/caddy-dns-orama
go 1.22
require (
github.com/caddyserver/caddy/v2 v2.` + caddyVersion[2:] + `
github.com/libdns/libdns v1.1.0
)
`
}
// generateCaddyfile creates the Caddyfile configuration
func (ci *CaddyInstaller) generateCaddyfile(domain, email, acmeEndpoint string) string {
return fmt.Sprintf(`{
email %s
}
*.%s {
tls {
dns orama {
endpoint %s
}
}
reverse_proxy localhost:6001
}
:443 {
tls {
dns orama {
endpoint %s
}
}
reverse_proxy localhost:6001
}
:80 {
reverse_proxy localhost:6001
}
`, email, domain, acmeEndpoint, acmeEndpoint)
}

View File

@ -0,0 +1,362 @@
package installers
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
)
const (
coreDNSVersion = "1.12.0"
coreDNSRepo = "https://github.com/coredns/coredns.git"
)
// CoreDNSInstaller handles CoreDNS installation with RQLite plugin
type CoreDNSInstaller struct {
*BaseInstaller
version string
oramaHome string
rqlitePlugin string // Path to the RQLite plugin source
}
// NewCoreDNSInstaller creates a new CoreDNS installer
func NewCoreDNSInstaller(arch string, logWriter io.Writer, oramaHome string) *CoreDNSInstaller {
return &CoreDNSInstaller{
BaseInstaller: NewBaseInstaller(arch, logWriter),
version: coreDNSVersion,
oramaHome: oramaHome,
rqlitePlugin: filepath.Join(oramaHome, "src", "pkg", "coredns", "rqlite"),
}
}
// IsInstalled checks if CoreDNS with RQLite plugin is already installed
func (ci *CoreDNSInstaller) IsInstalled() bool {
// Check if coredns binary exists
corednsPath := "/usr/local/bin/coredns"
if _, err := os.Stat(corednsPath); os.IsNotExist(err) {
return false
}
// Verify it has the rqlite plugin
cmd := exec.Command(corednsPath, "-plugins")
output, err := cmd.Output()
if err != nil {
return false
}
return containsLine(string(output), "rqlite")
}
// Install builds and installs CoreDNS with the custom RQLite plugin
func (ci *CoreDNSInstaller) Install() error {
if ci.IsInstalled() {
fmt.Fprintf(ci.logWriter, " ✓ CoreDNS with RQLite plugin already installed\n")
return nil
}
fmt.Fprintf(ci.logWriter, " Building CoreDNS with RQLite plugin...\n")
// Check if Go is available
if _, err := exec.LookPath("go"); err != nil {
return fmt.Errorf("go not found - required to build CoreDNS. Please install Go first")
}
// Check if RQLite plugin source exists
if _, err := os.Stat(ci.rqlitePlugin); os.IsNotExist(err) {
return fmt.Errorf("RQLite plugin source not found at %s - ensure the repository is cloned", ci.rqlitePlugin)
}
buildDir := "/tmp/coredns-build"
// Clean up any previous build
os.RemoveAll(buildDir)
if err := os.MkdirAll(buildDir, 0755); err != nil {
return fmt.Errorf("failed to create build directory: %w", err)
}
defer os.RemoveAll(buildDir)
// Clone CoreDNS
fmt.Fprintf(ci.logWriter, " Cloning CoreDNS v%s...\n", ci.version)
cmd := exec.Command("git", "clone", "--depth", "1", "--branch", "v"+ci.version, coreDNSRepo, buildDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to clone CoreDNS: %w\n%s", err, string(output))
}
// Copy custom RQLite plugin
fmt.Fprintf(ci.logWriter, " Copying RQLite plugin...\n")
pluginDir := filepath.Join(buildDir, "plugin", "rqlite")
if err := os.MkdirAll(pluginDir, 0755); err != nil {
return fmt.Errorf("failed to create plugin directory: %w", err)
}
// Copy all .go files from the RQLite plugin
files, err := os.ReadDir(ci.rqlitePlugin)
if err != nil {
return fmt.Errorf("failed to read plugin source: %w", err)
}
for _, file := range files {
if file.IsDir() || filepath.Ext(file.Name()) != ".go" {
continue
}
srcPath := filepath.Join(ci.rqlitePlugin, file.Name())
dstPath := filepath.Join(pluginDir, file.Name())
data, err := os.ReadFile(srcPath)
if err != nil {
return fmt.Errorf("failed to read %s: %w", file.Name(), err)
}
if err := os.WriteFile(dstPath, data, 0644); err != nil {
return fmt.Errorf("failed to write %s: %w", file.Name(), err)
}
}
// Create plugin.cfg with our custom RQLite plugin
fmt.Fprintf(ci.logWriter, " Configuring plugins...\n")
pluginCfg := ci.generatePluginConfig()
pluginCfgPath := filepath.Join(buildDir, "plugin.cfg")
if err := os.WriteFile(pluginCfgPath, []byte(pluginCfg), 0644); err != nil {
return fmt.Errorf("failed to write plugin.cfg: %w", err)
}
// Add dependencies
fmt.Fprintf(ci.logWriter, " Adding dependencies...\n")
goPath := os.Getenv("PATH") + ":/usr/local/go/bin"
getCmd := exec.Command("go", "get", "github.com/miekg/dns@latest")
getCmd.Dir = buildDir
getCmd.Env = append(os.Environ(), "PATH="+goPath)
if output, err := getCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to get miekg/dns: %w\n%s", err, string(output))
}
getCmd = exec.Command("go", "get", "go.uber.org/zap@latest")
getCmd.Dir = buildDir
getCmd.Env = append(os.Environ(), "PATH="+goPath)
if output, err := getCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to get zap: %w\n%s", err, string(output))
}
tidyCmd := exec.Command("go", "mod", "tidy")
tidyCmd.Dir = buildDir
tidyCmd.Env = append(os.Environ(), "PATH="+goPath)
if output, err := tidyCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to run go mod tidy: %w\n%s", err, string(output))
}
// Generate plugin code
fmt.Fprintf(ci.logWriter, " Generating plugin code...\n")
genCmd := exec.Command("go", "generate")
genCmd.Dir = buildDir
genCmd.Env = append(os.Environ(), "PATH="+goPath)
if output, err := genCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to generate: %w\n%s", err, string(output))
}
// Build CoreDNS
fmt.Fprintf(ci.logWriter, " Building CoreDNS binary...\n")
buildCmd := exec.Command("go", "build", "-o", "coredns")
buildCmd.Dir = buildDir
buildCmd.Env = append(os.Environ(), "PATH="+goPath, "CGO_ENABLED=0")
if output, err := buildCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build CoreDNS: %w\n%s", err, string(output))
}
// Verify the binary has rqlite plugin
verifyCmd := exec.Command(filepath.Join(buildDir, "coredns"), "-plugins")
output, err := verifyCmd.Output()
if err != nil {
return fmt.Errorf("failed to verify CoreDNS binary: %w", err)
}
if !containsLine(string(output), "rqlite") {
return fmt.Errorf("CoreDNS binary does not contain rqlite plugin")
}
// Install the binary
fmt.Fprintf(ci.logWriter, " Installing CoreDNS binary...\n")
srcBinary := filepath.Join(buildDir, "coredns")
dstBinary := "/usr/local/bin/coredns"
data, err := os.ReadFile(srcBinary)
if err != nil {
return fmt.Errorf("failed to read built binary: %w", err)
}
if err := os.WriteFile(dstBinary, data, 0755); err != nil {
return fmt.Errorf("failed to install binary: %w", err)
}
fmt.Fprintf(ci.logWriter, " ✓ CoreDNS with RQLite plugin installed\n")
return nil
}
// Configure creates CoreDNS configuration files
func (ci *CoreDNSInstaller) Configure(domain string, rqliteDSN string, ns1IP, ns2IP, ns3IP string) error {
configDir := "/etc/coredns"
if err := os.MkdirAll(configDir, 0755); err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
// Create Corefile
corefile := ci.generateCorefile(domain, rqliteDSN, configDir)
if err := os.WriteFile(filepath.Join(configDir, "Corefile"), []byte(corefile), 0644); err != nil {
return fmt.Errorf("failed to write Corefile: %w", err)
}
// Create zone file
zonefile := ci.generateZoneFile(domain, ns1IP, ns2IP, ns3IP)
if err := os.WriteFile(filepath.Join(configDir, "db."+domain), []byte(zonefile), 0644); err != nil {
return fmt.Errorf("failed to write zone file: %w", err)
}
return nil
}
// generatePluginConfig creates the plugin.cfg for CoreDNS
func (ci *CoreDNSInstaller) generatePluginConfig() string {
return `# CoreDNS plugins with RQLite support for dynamic DNS records
metadata:metadata
cancel:cancel
tls:tls
reload:reload
nsid:nsid
bufsize:bufsize
root:root
bind:bind
debug:debug
trace:trace
ready:ready
health:health
pprof:pprof
prometheus:metrics
errors:errors
log:log
dnstap:dnstap
local:local
dns64:dns64
acl:acl
any:any
chaos:chaos
loadbalance:loadbalance
cache:cache
rewrite:rewrite
header:header
dnssec:dnssec
autopath:autopath
minimal:minimal
template:template
transfer:transfer
hosts:hosts
file:file
auto:auto
secondary:secondary
loop:loop
forward:forward
grpc:grpc
erratic:erratic
whoami:whoami
on:github.com/coredns/caddy/onevent
sign:sign
view:view
rqlite:rqlite
`
}
// generateCorefile creates the CoreDNS configuration
func (ci *CoreDNSInstaller) generateCorefile(domain, rqliteDSN, configDir string) string {
return fmt.Sprintf(`# CoreDNS configuration for %s
# Uses RQLite for dynamic DNS records (deployments, ACME challenges)
# Falls back to static zone file for base records (SOA, NS)
%s {
# First try RQLite for dynamic records (TXT for ACME, A for deployments)
rqlite {
dsn %s
refresh 5s
ttl 60
cache_size 10000
}
# Fall back to static zone file for SOA/NS records
file %s/db.%s
# Enable logging and error reporting
log
errors
cache 60
}
# Forward all other queries to upstream DNS
. {
forward . 8.8.8.8 8.8.4.4 1.1.1.1
cache 300
errors
}
`, domain, domain, rqliteDSN, configDir, domain)
}
// generateZoneFile creates the static DNS zone file
func (ci *CoreDNSInstaller) generateZoneFile(domain, ns1IP, ns2IP, ns3IP string) string {
return fmt.Sprintf(`$ORIGIN %s.
$TTL 300
@ IN SOA ns1.%s. admin.%s. (
2024012401 ; Serial
3600 ; Refresh
1800 ; Retry
604800 ; Expire
300 ) ; Negative TTL
; Nameservers
@ IN NS ns1.%s.
@ IN NS ns2.%s.
@ IN NS ns3.%s.
; Nameserver A records
ns1 IN A %s
ns2 IN A %s
ns3 IN A %s
; Root domain points to all nodes (round-robin)
@ IN A %s
@ IN A %s
@ IN A %s
; Wildcard fallback (RQLite records take precedence for specific subdomains)
* IN A %s
* IN A %s
* IN A %s
`, domain, domain, domain, domain, domain, domain,
ns1IP, ns2IP, ns3IP,
ns1IP, ns2IP, ns3IP,
ns1IP, ns2IP, ns3IP)
}
// containsLine checks if a string contains a specific line
func containsLine(text, line string) bool {
for _, l := range splitLines(text) {
if l == line || l == "dns."+line {
return true
}
}
return false
}
// splitLines splits a string into lines
func splitLines(text string) []string {
var lines []string
var current string
for _, c := range text {
if c == '\n' {
lines = append(lines, current)
current = ""
} else {
current += string(c)
}
}
if current != "" {
lines = append(lines, current)
}
return lines
}

View File

@ -269,11 +269,21 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
ps.logf(" ⚠️ anyone-client install warning: %v", err)
}
// Install DeBros binaries
// Install DeBros binaries (must be done before CoreDNS since we need the RQLite plugin source)
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.oramaHome, ps.skipRepoUpdate); err != nil {
return fmt.Errorf("failed to install DeBros binaries: %w", err)
}
// Install CoreDNS with RQLite plugin (for dynamic DNS records and ACME challenges)
if err := ps.binaryInstaller.InstallCoreDNS(); err != nil {
ps.logf(" ⚠️ CoreDNS install warning: %v", err)
}
// Install Caddy with orama DNS module (for SSL certificate management)
if err := ps.binaryInstaller.InstallCaddy(); err != nil {
ps.logf(" ⚠️ Caddy install warning: %v", err)
}
ps.logf(" ✓ All binaries installed")
return nil
}
@ -431,6 +441,39 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP s
exec.Command("chown", "debros:debros", olricConfigPath).Run()
ps.logf(" ✓ Olric config generated")
// Configure CoreDNS (if domain is provided)
if domain != "" {
// Get node IPs from peer addresses or use the VPS IP for all
ns1IP := vpsIP
ns2IP := vpsIP
ns3IP := vpsIP
if len(peerAddresses) >= 1 && peerAddresses[0] != "" {
ns1IP = peerAddresses[0]
}
if len(peerAddresses) >= 2 && peerAddresses[1] != "" {
ns2IP = peerAddresses[1]
}
if len(peerAddresses) >= 3 && peerAddresses[2] != "" {
ns3IP = peerAddresses[2]
}
rqliteDSN := "http://localhost:5001"
if err := ps.binaryInstaller.ConfigureCoreDNS(domain, rqliteDSN, ns1IP, ns2IP, ns3IP); err != nil {
ps.logf(" ⚠️ CoreDNS config warning: %v", err)
} else {
ps.logf(" ✓ CoreDNS config generated")
}
// Configure Caddy
email := "admin@" + domain
acmeEndpoint := "http://localhost:6001/v1/internal/acme"
if err := ps.binaryInstaller.ConfigureCaddy(domain, email, acmeEndpoint); err != nil {
ps.logf(" ⚠️ Caddy config warning: %v", err)
} else {
ps.logf(" ✓ Caddy config generated")
}
}
return nil
}
@ -490,6 +533,31 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
}
ps.logf(" ✓ Anyone Client service created")
// CoreDNS service (for dynamic DNS with RQLite)
if _, err := os.Stat("/usr/local/bin/coredns"); err == nil {
corednsUnit := ps.serviceGenerator.GenerateCoreDNSService()
if err := ps.serviceController.WriteServiceUnit("coredns.service", corednsUnit); err != nil {
ps.logf(" ⚠️ Failed to write CoreDNS service: %v", err)
} else {
ps.logf(" ✓ CoreDNS service created")
}
}
// Caddy service (for SSL/TLS with DNS-01 ACME challenges)
if _, err := os.Stat("/usr/bin/caddy"); err == nil {
// Create caddy user if it doesn't exist
exec.Command("useradd", "-r", "-s", "/sbin/nologin", "caddy").Run()
exec.Command("mkdir", "-p", "/var/lib/caddy").Run()
exec.Command("chown", "caddy:caddy", "/var/lib/caddy").Run()
caddyUnit := ps.serviceGenerator.GenerateCaddyService()
if err := ps.serviceController.WriteServiceUnit("caddy.service", caddyUnit); err != nil {
ps.logf(" ⚠️ Failed to write Caddy service: %v", err)
} else {
ps.logf(" ✓ Caddy service created")
}
}
// Reload systemd daemon
if err := ps.serviceController.DaemonReload(); err != nil {
return fmt.Errorf("failed to reload systemd: %w", err)
@ -500,6 +568,14 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
// Note: debros-gateway.service is no longer needed - each node has an embedded gateway
// Note: debros-rqlite.service is NOT created - RQLite is managed by each node internally
services := []string{"debros-ipfs.service", "debros-ipfs-cluster.service", "debros-olric.service", "debros-node.service", "debros-anyone-client.service"}
// Add CoreDNS and Caddy if installed
if _, err := os.Stat("/usr/local/bin/coredns"); err == nil {
services = append(services, "coredns.service")
}
if _, err := os.Stat("/usr/bin/caddy"); err == nil {
services = append(services, "caddy.service")
}
for _, svc := range services {
if err := ps.serviceController.EnableService(svc); err != nil {
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)

View File

@ -324,6 +324,61 @@ WantedBy=multi-user.target
`, ssg.oramaHome, logFile, ssg.oramaDir)
}
// GenerateCoreDNSService generates the CoreDNS systemd unit
func (ssg *SystemdServiceGenerator) GenerateCoreDNSService() string {
return `[Unit]
Description=CoreDNS DNS Server with RQLite backend
Documentation=https://coredns.io
After=network-online.target debros-node.service
Wants=network-online.target debros-node.service
[Service]
Type=simple
User=root
ExecStart=/usr/local/bin/coredns -conf /etc/coredns/Corefile
Restart=on-failure
RestartSec=5
SyslogIdentifier=coredns
NoNewPrivileges=true
ProtectSystem=full
ProtectHome=true
[Install]
WantedBy=multi-user.target
`
}
// GenerateCaddyService generates the Caddy systemd unit for SSL/TLS
func (ssg *SystemdServiceGenerator) GenerateCaddyService() string {
return `[Unit]
Description=Caddy HTTP/2 Server
Documentation=https://caddyserver.com/docs/
After=network-online.target debros-node.service coredns.service
Wants=network-online.target
Requires=debros-node.service
[Service]
Type=simple
User=caddy
Group=caddy
ExecStart=/usr/bin/caddy run --environ --config /etc/caddy/Caddyfile
ExecReload=/usr/bin/caddy reload --config /etc/caddy/Caddyfile
TimeoutStopSec=5s
LimitNOFILE=1048576
LimitNPROC=512
PrivateTmp=true
ProtectSystem=full
AmbientCapabilities=CAP_NET_BIND_SERVICE
Restart=on-failure
RestartSec=5
SyslogIdentifier=caddy
[Install]
WantedBy=multi-user.target
`
}
// SystemdController manages systemd service operations
type SystemdController struct {
systemdDir string

133
pkg/gateway/acme_handler.go Normal file
View File

@ -0,0 +1,133 @@
package gateway
import (
"encoding/json"
"net/http"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"go.uber.org/zap"
)
// ACMERequest represents the request body for ACME DNS-01 challenges
// from the lego httpreq provider
type ACMERequest struct {
FQDN string `json:"fqdn"` // e.g., "_acme-challenge.example.com."
Value string `json:"value"` // The challenge token
}
// acmePresentHandler handles DNS-01 challenge presentation
// POST /v1/internal/acme/present
// Creates a TXT record in the dns_records table for ACME validation
func (g *Gateway) acmePresentHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
var req ACMERequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
g.logger.Error("Failed to decode ACME present request", zap.Error(err))
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FQDN == "" || req.Value == "" {
http.Error(w, "fqdn and value are required", http.StatusBadRequest)
return
}
// Normalize FQDN (ensure trailing dot for DNS format)
fqdn := strings.TrimSuffix(req.FQDN, ".")
fqdn = strings.ToLower(fqdn) + "." // Add trailing dot for DNS format
g.logger.Info("ACME DNS-01 challenge: presenting TXT record",
zap.String("fqdn", fqdn),
zap.String("value_prefix", req.Value[:min(10, len(req.Value))]+"..."),
)
// Insert TXT record into dns_records
db := g.client.Database()
ctx := client.WithInternalAuth(r.Context())
// First, delete any existing ACME challenge for this FQDN (in case of retry)
deleteQuery := `DELETE FROM dns_records WHERE fqdn = ? AND record_type = 'TXT' AND namespace = 'acme'`
_, _ = db.Query(ctx, deleteQuery, fqdn)
// Insert new TXT record
insertQuery := `INSERT INTO dns_records (fqdn, record_type, value, ttl, namespace, is_active, created_at, updated_at, created_by)
VALUES (?, 'TXT', ?, 60, 'acme', TRUE, datetime('now'), datetime('now'), 'system')`
_, err := db.Query(ctx, insertQuery, fqdn, req.Value)
if err != nil {
g.logger.Error("Failed to insert ACME TXT record", zap.Error(err))
http.Error(w, "Failed to create DNS record", http.StatusInternalServerError)
return
}
g.logger.Info("ACME TXT record created",
zap.String("fqdn", fqdn),
)
// Give DNS a moment to propagate (CoreDNS reads from RQLite)
time.Sleep(100 * time.Millisecond)
w.WriteHeader(http.StatusOK)
}
// acmeCleanupHandler handles DNS-01 challenge cleanup
// POST /v1/internal/acme/cleanup
// Removes the TXT record after ACME validation completes
func (g *Gateway) acmeCleanupHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
var req ACMERequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
g.logger.Error("Failed to decode ACME cleanup request", zap.Error(err))
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FQDN == "" {
http.Error(w, "fqdn is required", http.StatusBadRequest)
return
}
// Normalize FQDN (ensure trailing dot for DNS format)
fqdn := strings.TrimSuffix(req.FQDN, ".")
fqdn = strings.ToLower(fqdn) + "." // Add trailing dot for DNS format
g.logger.Info("ACME DNS-01 challenge: cleaning up TXT record",
zap.String("fqdn", fqdn),
)
// Delete TXT record from dns_records
db := g.client.Database()
ctx := client.WithInternalAuth(r.Context())
deleteQuery := `DELETE FROM dns_records WHERE fqdn = ? AND record_type = 'TXT' AND namespace = 'acme'`
_, err := db.Query(ctx, deleteQuery, fqdn)
if err != nil {
g.logger.Error("Failed to delete ACME TXT record", zap.Error(err))
http.Error(w, "Failed to delete DNS record", http.StatusInternalServerError)
return
}
g.logger.Info("ACME TXT record deleted",
zap.String("fqdn", fqdn),
)
w.WriteHeader(http.StatusOK)
}
// min returns the smaller of two integers
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -199,7 +199,7 @@ func isPublicPath(p string) bool {
}
switch p {
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key", "/v1/network/status", "/v1/network/peers", "/v1/internal/tls/check":
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key", "/v1/network/status", "/v1/network/peers", "/v1/internal/tls/check", "/v1/internal/acme/present", "/v1/internal/acme/cleanup":
return true
default:
return false

View File

@ -18,6 +18,10 @@ func (g *Gateway) Routes() http.Handler {
// TLS check endpoint for Caddy on-demand TLS
mux.HandleFunc("/v1/internal/tls/check", g.tlsCheckHandler)
// ACME DNS-01 challenge endpoints (for Caddy httpreq DNS provider)
mux.HandleFunc("/v1/internal/acme/present", g.acmePresentHandler)
mux.HandleFunc("/v1/internal/acme/cleanup", g.acmeCleanupHandler)
// auth endpoints
mux.HandleFunc("/v1/auth/jwks", g.authService.JWKSHandler)
mux.HandleFunc("/.well-known/jwks.json", g.authService.JWKSHandler)

View File

@ -1,84 +0,0 @@
#!/bin/bash
set -e
# Deploy CoreDNS to nameserver nodes
# Usage: ./deploy-coredns.sh <node1_ip> <node2_ip> <node3_ip> <node4_ip>
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
if [ $# -lt 4 ]; then
echo "Usage: $0 <node1_ip> <node2_ip> <node3_ip> <node4_ip>"
echo "Example: $0 1.2.3.4 1.2.3.5 1.2.3.6 1.2.3.7"
exit 1
fi
NODES=("$1" "$2" "$3" "$4")
BINARY="$PROJECT_ROOT/bin/coredns-custom"
COREFILE="$PROJECT_ROOT/configs/coredns/Corefile"
SYSTEMD_SERVICE="$PROJECT_ROOT/configs/coredns/coredns.service"
# Check if binary exists
if [ ! -f "$BINARY" ]; then
echo "❌ CoreDNS binary not found at $BINARY"
echo "Run ./build-coredns.sh first"
exit 1
fi
echo "🚀 Deploying CoreDNS to ${#NODES[@]} nodes..."
echo ""
for i in "${!NODES[@]}"; do
node="${NODES[$i]}"
node_num=$((i + 1))
echo "[$node_num/4] Deploying to ns${node_num}.orama.network ($node)..."
# Copy binary
echo " → Copying binary..."
scp "$BINARY" "debros@$node:/tmp/coredns"
ssh "debros@$node" "sudo mv /tmp/coredns /usr/local/bin/coredns && sudo chmod +x /usr/local/bin/coredns"
# Copy Corefile
echo " → Copying configuration..."
ssh "debros@$node" "sudo mkdir -p /etc/coredns"
scp "$COREFILE" "debros@$node:/tmp/Corefile"
ssh "debros@$node" "sudo mv /tmp/Corefile /etc/coredns/Corefile"
# Copy systemd service
echo " → Installing systemd service..."
scp "$SYSTEMD_SERVICE" "debros@$node:/tmp/coredns.service"
ssh "debros@$node" "sudo mv /tmp/coredns.service /etc/systemd/system/coredns.service"
# Start service
echo " → Starting CoreDNS..."
ssh "debros@$node" "sudo systemctl daemon-reload"
ssh "debros@$node" "sudo systemctl enable coredns"
ssh "debros@$node" "sudo systemctl restart coredns"
# Check status
echo " → Checking status..."
if ssh "debros@$node" "sudo systemctl is-active --quiet coredns"; then
echo " ✅ CoreDNS running on ns${node_num}.orama.network"
else
echo " ❌ CoreDNS failed to start on ns${node_num}.orama.network"
echo " Check logs: ssh debros@$node sudo journalctl -u coredns -n 50"
fi
echo ""
done
echo "✅ Deployment complete!"
echo ""
echo "Next steps:"
echo " 1. Test DNS resolution: dig @${NODES[0]} test.orama.network"
echo " 2. Update registrar NS records (ONLY after testing):"
echo " NS orama.network. ns1.orama.network."
echo " NS orama.network. ns2.orama.network."
echo " NS orama.network. ns3.orama.network."
echo " NS orama.network. ns4.orama.network."
echo " A ns1.orama.network. ${NODES[0]}"
echo " A ns2.orama.network. ${NODES[1]}"
echo " A ns3.orama.network. ${NODES[2]}"
echo " A ns4.orama.network. ${NODES[3]}"
echo ""

View File

@ -1,240 +0,0 @@
#!/bin/bash
# install-coredns.sh - Install and configure CoreDNS for DeBros Network nodes
# This script sets up a simple wildcard DNS server for deployment subdomains
set -euo pipefail
COREDNS_VERSION="${COREDNS_VERSION:-1.11.1}"
ARCH="linux_amd64"
INSTALL_DIR="/usr/local/bin"
CONFIG_DIR="/etc/coredns"
DATA_DIR="/var/lib/coredns"
USER="debros"
# Configuration - Override these with environment variables
DOMAIN="${DOMAIN:-dbrs.space}"
NODE_IP="${NODE_IP:-}" # Auto-detected if not provided
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if running as root
if [ "$EUID" -ne 0 ]; then
log_error "This script must be run as root"
exit 1
fi
# Check if debros user exists
if ! id -u "$USER" >/dev/null 2>&1; then
log_warn "User '$USER' does not exist. Creating..."
useradd -r -m -s /bin/bash "$USER" || true
fi
# Auto-detect node IP if not provided
if [ -z "$NODE_IP" ]; then
NODE_IP=$(hostname -I | awk '{print $1}')
log_info "Auto-detected node IP: $NODE_IP"
fi
if [ -z "$NODE_IP" ]; then
log_error "Could not detect node IP. Please set NODE_IP environment variable."
exit 1
fi
log_info "Installing CoreDNS $COREDNS_VERSION for domain $DOMAIN..."
# Disable systemd-resolved stub listener to free port 53
log_info "Configuring systemd-resolved..."
mkdir -p /etc/systemd/resolved.conf.d/
cat > /etc/systemd/resolved.conf.d/disable-stub.conf << 'EOF'
[Resolve]
DNSStubListener=no
EOF
systemctl restart systemd-resolved || true
# Download CoreDNS
cd /tmp
DOWNLOAD_URL="https://github.com/coredns/coredns/releases/download/v${COREDNS_VERSION}/coredns_${COREDNS_VERSION}_${ARCH}.tgz"
log_info "Downloading from $DOWNLOAD_URL"
curl -sSL "$DOWNLOAD_URL" -o coredns.tgz
if [ $? -ne 0 ]; then
log_error "Failed to download CoreDNS"
exit 1
fi
# Extract and install
log_info "Extracting CoreDNS..."
tar -xzf coredns.tgz
chmod +x coredns
mv coredns "$INSTALL_DIR/"
log_info "CoreDNS installed to $INSTALL_DIR/coredns"
# Create directories
log_info "Creating directories..."
mkdir -p "$CONFIG_DIR"
mkdir -p "$DATA_DIR"
chown -R "$USER:$USER" "$DATA_DIR"
# Create Corefile for simple wildcard DNS
log_info "Creating Corefile..."
cat > "$CONFIG_DIR/Corefile" << EOF
# CoreDNS configuration for $DOMAIN
# Serves wildcard DNS for deployment subdomains
$DOMAIN {
file $CONFIG_DIR/db.$DOMAIN
log
errors
}
# Forward all other queries to upstream DNS
. {
forward . 8.8.8.8 8.8.4.4 1.1.1.1
cache 300
errors
}
EOF
# Create zone file
log_info "Creating zone file for $DOMAIN..."
SERIAL=$(date +%Y%m%d%H)
cat > "$CONFIG_DIR/db.$DOMAIN" << EOF
\$ORIGIN $DOMAIN.
\$TTL 300
@ IN SOA ns1.$DOMAIN. admin.$DOMAIN. (
$SERIAL ; Serial
3600 ; Refresh
1800 ; Retry
604800 ; Expire
300 ) ; Negative TTL
; Nameservers
@ IN NS ns1.$DOMAIN.
@ IN NS ns2.$DOMAIN.
@ IN NS ns3.$DOMAIN.
; Glue records - update these with actual nameserver IPs
ns1 IN A $NODE_IP
ns2 IN A $NODE_IP
ns3 IN A $NODE_IP
; Root domain
@ IN A $NODE_IP
; Wildcard for all subdomains (deployments)
* IN A $NODE_IP
EOF
# Create systemd service
log_info "Creating systemd service..."
cat > /etc/systemd/system/coredns.service << EOF
[Unit]
Description=CoreDNS DNS Server
Documentation=https://coredns.io
After=network.target
[Service]
Type=simple
User=root
ExecStart=$INSTALL_DIR/coredns -conf $CONFIG_DIR/Corefile
Restart=on-failure
RestartSec=5
# Security hardening
NoNewPrivileges=true
ProtectSystem=full
ProtectHome=true
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
# Set up iptables redirect for port 80 -> gateway port 6001
log_info "Setting up port 80 redirect to gateway port 6001..."
iptables -t nat -C PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 6001 2>/dev/null || \
iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 6001
# Make iptables rules persistent
mkdir -p /etc/network/if-pre-up.d/
cat > /etc/network/if-pre-up.d/iptables-redirect << 'EOF'
#!/bin/sh
iptables -t nat -C PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 6001 2>/dev/null || \
iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 6001
EOF
chmod +x /etc/network/if-pre-up.d/iptables-redirect
# Configure firewall
log_info "Configuring firewall..."
if command -v ufw >/dev/null 2>&1; then
ufw allow 53/tcp >/dev/null 2>&1 || true
ufw allow 53/udp >/dev/null 2>&1 || true
ufw allow 80/tcp >/dev/null 2>&1 || true
log_info "Firewall rules added for ports 53 (DNS) and 80 (HTTP)"
else
log_warn "UFW not found. Please manually configure firewall for ports 53 and 80"
fi
# Enable and start CoreDNS
log_info "Starting CoreDNS..."
systemctl enable coredns
systemctl start coredns
# Verify installation
sleep 2
if systemctl is-active --quiet coredns; then
log_info "CoreDNS is running"
else
log_error "CoreDNS failed to start. Check: journalctl -u coredns"
exit 1
fi
# Test DNS resolution
log_info "Testing DNS resolution..."
if dig @localhost test.$DOMAIN +short | grep -q "$NODE_IP"; then
log_info "DNS test passed: test.$DOMAIN resolves to $NODE_IP"
else
log_warn "DNS test failed or returned unexpected result"
fi
# Cleanup
rm -f /tmp/coredns.tgz
echo
log_info "============================================"
log_info "CoreDNS installation complete!"
log_info "============================================"
echo
log_info "Configuration:"
log_info " Domain: $DOMAIN"
log_info " Node IP: $NODE_IP"
log_info " Corefile: $CONFIG_DIR/Corefile"
log_info " Zone file: $CONFIG_DIR/db.$DOMAIN"
echo
log_info "Commands:"
log_info " Status: sudo systemctl status coredns"
log_info " Logs: sudo journalctl -u coredns -f"
log_info " Test: dig @localhost anything.$DOMAIN"
echo
log_info "Note: Update the zone file with other nameserver IPs for redundancy:"
log_info " sudo vi $CONFIG_DIR/db.$DOMAIN"
echo
log_info "Done!"

View File

@ -1,45 +0,0 @@
#!/bin/bash
# Install git hooks from .githooks/ to .git/hooks/
# This ensures the pre-push hook runs automatically
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
GITHOOKS_DIR="$REPO_ROOT/.githooks"
GIT_HOOKS_DIR="$REPO_ROOT/.git/hooks"
if [ ! -d "$GITHOOKS_DIR" ]; then
echo "Error: .githooks directory not found at $GITHOOKS_DIR"
exit 1
fi
if [ ! -d "$GIT_HOOKS_DIR" ]; then
echo "Error: .git/hooks directory not found at $GIT_HOOKS_DIR"
echo "Are you in a git repository?"
exit 1
fi
echo "Installing git hooks..."
# Copy all hooks from .githooks/ to .git/hooks/
for hook in "$GITHOOKS_DIR"/*; do
if [ -f "$hook" ]; then
hook_name=$(basename "$hook")
dest="$GIT_HOOKS_DIR/$hook_name"
echo " Installing $hook_name..."
cp "$hook" "$dest"
chmod +x "$dest"
# Make sure the hook can find the repo root
# The hooks already use relative paths, so this should work
fi
done
echo "✓ Git hooks installed successfully!"
echo ""
echo "The following hooks are now active:"
ls -1 "$GIT_HOOKS_DIR"/* 2>/dev/null | xargs -n1 basename || echo " (none)"

View File

@ -1,435 +0,0 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NOCOLOR='\033[0m'
log() { echo -e "${CYAN}[update-changelog]${NOCOLOR} $1"; }
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1"; }
success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; }
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1"; }
# File paths
CHANGELOG_FILE="CHANGELOG.md"
MAKEFILE="Makefile"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$REPO_ROOT"
# Load environment variables from .env file if it exists
if [ -f "$REPO_ROOT/.env" ]; then
# Export variables from .env file (more portable than source <())
set -a
while IFS='=' read -r key value; do
# Skip comments and empty lines
[[ "$key" =~ ^#.*$ ]] && continue
[[ -z "$key" ]] && continue
# Remove quotes if present
value=$(echo "$value" | sed -e 's/^"//' -e 's/"$//' -e "s/^'//" -e "s/'$//")
export "$key=$value"
done < "$REPO_ROOT/.env"
set +a
fi
# OpenRouter API key
# Priority: 1. Environment variable, 2. .env file, 3. Exit with error
if [ -z "$OPENROUTER_API_KEY" ]; then
error "OPENROUTER_API_KEY not found!"
echo ""
echo "Please set the API key in one of these ways:"
echo " 1. Create a .env file in the repo root with:"
echo " OPENROUTER_API_KEY=your-api-key-here"
echo ""
echo " 2. Set it as an environment variable:"
echo " export OPENROUTER_API_KEY=your-api-key-here"
echo ""
echo " 3. Copy .env.example to .env and fill in your key:"
echo " cp .env.example .env"
echo ""
echo "Get your API key from: https://openrouter.ai/keys"
exit 1
fi
# Check dependencies
if ! command -v jq > /dev/null 2>&1; then
error "jq is required but not installed. Install it with: brew install jq (macOS) or apt-get install jq (Linux)"
exit 1
fi
if ! command -v curl > /dev/null 2>&1; then
error "curl is required but not installed"
exit 1
fi
# Check for skip flag
# To skip changelog generation, set SKIP_CHANGELOG=1 before committing:
# SKIP_CHANGELOG=1 git commit -m "your message"
# SKIP_CHANGELOG=1 git commit
if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then
log "Skipping changelog update (SKIP_CHANGELOG is set)"
exit 0
fi
# Check if we're in a git repo
if ! git rev-parse --git-dir > /dev/null 2>&1; then
error "Not in a git repository"
exit 1
fi
# Get current branch
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
REMOTE_BRANCH="origin/$CURRENT_BRANCH"
# Check if remote branch exists
if ! git rev-parse --verify "$REMOTE_BRANCH" > /dev/null 2>&1; then
warning "Remote branch $REMOTE_BRANCH does not exist. Using main/master as baseline."
if git rev-parse --verify "origin/main" > /dev/null 2>&1; then
REMOTE_BRANCH="origin/main"
elif git rev-parse --verify "origin/master" > /dev/null 2>&1; then
REMOTE_BRANCH="origin/master"
else
warning "No remote branch found. Using HEAD as baseline."
REMOTE_BRANCH="HEAD"
fi
fi
# Gather all git diffs
log "Collecting git diffs..."
# Check if running from pre-commit context
if [ "$CHANGELOG_CONTEXT" = "pre-commit" ]; then
log "Running in pre-commit context - analyzing staged changes only"
# Unstaged changes (usually none in pre-commit, but check anyway)
UNSTAGED_DIFF=$(git diff 2>/dev/null || echo "")
UNSTAGED_COUNT=$(echo "$UNSTAGED_DIFF" | grep -c "^diff\|^index" 2>/dev/null || echo "0")
[ -z "$UNSTAGED_COUNT" ] && UNSTAGED_COUNT="0"
# Staged changes (these are what we're committing)
STAGED_DIFF=$(git diff --cached 2>/dev/null || echo "")
STAGED_COUNT=$(echo "$STAGED_DIFF" | grep -c "^diff\|^index" 2>/dev/null || echo "0")
[ -z "$STAGED_COUNT" ] && STAGED_COUNT="0"
# No unpushed commits analysis in pre-commit context
UNPUSHED_DIFF=""
UNPUSHED_COMMITS="0"
log "Found: $UNSTAGED_COUNT unstaged file(s), $STAGED_COUNT staged file(s)"
else
# Pre-push context - analyze everything
# Unstaged changes
UNSTAGED_DIFF=$(git diff 2>/dev/null || echo "")
UNSTAGED_COUNT=$(echo "$UNSTAGED_DIFF" | grep -c "^diff\|^index" 2>/dev/null || echo "0")
[ -z "$UNSTAGED_COUNT" ] && UNSTAGED_COUNT="0"
# Staged changes
STAGED_DIFF=$(git diff --cached 2>/dev/null || echo "")
STAGED_COUNT=$(echo "$STAGED_DIFF" | grep -c "^diff\|^index" 2>/dev/null || echo "0")
[ -z "$STAGED_COUNT" ] && STAGED_COUNT="0"
# Unpushed commits
UNPUSHED_DIFF=$(git diff "$REMOTE_BRANCH"..HEAD 2>/dev/null || echo "")
UNPUSHED_COMMITS=$(git rev-list --count "$REMOTE_BRANCH"..HEAD 2>/dev/null || echo "0")
[ -z "$UNPUSHED_COMMITS" ] && UNPUSHED_COMMITS="0"
# Check if the only unpushed commit is a changelog update commit
# If so, exclude it from the diff to avoid infinite loops
if [ "$UNPUSHED_COMMITS" -gt 0 ]; then
LATEST_COMMIT_MSG=$(git log -1 --pretty=%B HEAD 2>/dev/null || echo "")
if echo "$LATEST_COMMIT_MSG" | grep -q "chore: update changelog and version"; then
# If the latest commit is a changelog commit, check if there are other commits
if [ "$UNPUSHED_COMMITS" -eq 1 ]; then
log "Latest commit is a changelog update. No other changes detected. Skipping changelog update."
# Clean up any old preview files
rm -f "$REPO_ROOT/.changelog_preview.tmp" "$REPO_ROOT/.changelog_version.tmp"
exit 0
else
# Multiple commits, exclude the latest changelog commit from diff
log "Multiple unpushed commits detected. Excluding latest changelog commit from analysis."
# Get all commits except the latest one
UNPUSHED_DIFF=$(git diff "$REMOTE_BRANCH"..HEAD~1 2>/dev/null || echo "")
UNPUSHED_COMMITS=$(git rev-list --count "$REMOTE_BRANCH"..HEAD~1 2>/dev/null || echo "0")
[ -z "$UNPUSHED_COMMITS" ] && UNPUSHED_COMMITS="0"
fi
fi
fi
log "Found: $UNSTAGED_COUNT unstaged file(s), $STAGED_COUNT staged file(s), $UNPUSHED_COMMITS unpushed commit(s)"
fi
# Combine all diffs
if [ "$CHANGELOG_CONTEXT" = "pre-commit" ]; then
ALL_DIFFS="${UNSTAGED_DIFF}
---
STAGED CHANGES:
---
${STAGED_DIFF}"
else
ALL_DIFFS="${UNSTAGED_DIFF}
---
STAGED CHANGES:
---
${STAGED_DIFF}
---
UNPUSHED COMMITS:
---
${UNPUSHED_DIFF}"
fi
# Check if there are any changes
if [ "$CHANGELOG_CONTEXT" = "pre-commit" ]; then
# In pre-commit, only check staged changes
if [ -z "$(echo "$STAGED_DIFF" | tr -d '[:space:]')" ]; then
log "No staged changes detected. Skipping changelog update."
rm -f "$REPO_ROOT/.changelog_preview.tmp" "$REPO_ROOT/.changelog_version.tmp"
exit 0
fi
else
# In pre-push, check all changes
if [ -z "$(echo "$UNSTAGED_DIFF$STAGED_DIFF$UNPUSHED_DIFF" | tr -d '[:space:]')" ]; then
log "No changes detected (unstaged, staged, or unpushed). Skipping changelog update."
rm -f "$REPO_ROOT/.changelog_preview.tmp" "$REPO_ROOT/.changelog_version.tmp"
exit 0
fi
fi
# Get current version from Makefile
CURRENT_VERSION=$(grep "^VERSION :=" "$MAKEFILE" | sed 's/.*:= *//' | tr -d ' ')
if [ -z "$CURRENT_VERSION" ]; then
error "Could not find VERSION in Makefile"
exit 1
fi
log "Current version: $CURRENT_VERSION"
# Get today's date programmatically (YYYY-MM-DD format)
TODAY_DATE=$(date +%Y-%m-%d)
log "Using date: $TODAY_DATE"
# Prepare prompt for OpenRouter
PROMPT="You are analyzing git diffs to create a changelog entry. Based on the following git diffs, create a simple, easy-to-understand changelog entry.
Current version: $CURRENT_VERSION
Git diffs:
\`\`\`
$ALL_DIFFS
\`\`\`
Please respond with ONLY a valid JSON object in this exact format:
{
\"version\": \"x.y.z\",
\"bump_type\": \"minor\" or \"patch\",
\"added\": [\"item1\", \"item2\"],
\"changed\": [\"item1\", \"item2\"],
\"fixed\": [\"item1\", \"item2\"]
}
Rules:
- Bump version based on changes: use \"minor\" for new features, \"patch\" for bug fixes and small changes
- Never bump major version (keep major version the same)
- Keep descriptions simple and easy to understand (1-2 sentences max per item)
- Only include items that actually changed
- If a category is empty, use an empty array []
- Do NOT include a date field - the date will be set programmatically"
# Call OpenRouter API
log "Calling OpenRouter API to generate changelog..."
# Prepare the JSON payload properly
PROMPT_ESCAPED=$(echo "$PROMPT" | jq -Rs .)
REQUEST_BODY=$(cat <<EOF
{
"model": "google/gemini-2.5-flash-preview-09-2025",
"messages": [
{
"role": "user",
"content": $PROMPT_ESCAPED
}
],
"temperature": 0.3
}
EOF
)
# Debug: Check API key format (first 10 chars only)
API_KEY_PREFIX="${OPENROUTER_API_KEY:0:10}..."
log "Using API key: $API_KEY_PREFIX (length: ${#OPENROUTER_API_KEY})"
set +e # Temporarily disable exit on error to check curl response
RESPONSE=$(curl -s -w "\nHTTP_CODE:%{http_code}" -X POST "https://openrouter.ai/api/v1/chat/completions" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $OPENROUTER_API_KEY" \
-d "$REQUEST_BODY")
CURL_EXIT_CODE=$?
# Extract HTTP code and response body
HTTP_CODE=$(echo "$RESPONSE" | grep -o "HTTP_CODE:[0-9]*" | cut -d: -f2)
RESPONSE_BODY=$(echo "$RESPONSE" | sed '/HTTP_CODE:/d')
set -e # Re-enable exit on error
log "HTTP Status Code: $HTTP_CODE"
# Check if API call succeeded
if [ $CURL_EXIT_CODE -ne 0 ] || [ -z "$RESPONSE_BODY" ]; then
error "Failed to call OpenRouter API"
if [ $CURL_EXIT_CODE -ne 0 ]; then
echo "Network error (curl exit code: $CURL_EXIT_CODE)"
else
echo "Empty response from API"
fi
exit 1
fi
# Check for API errors in response
if echo "$RESPONSE_BODY" | jq -e '.error' > /dev/null 2>&1; then
error "OpenRouter API error:"
ERROR_MESSAGE=$(echo "$RESPONSE_BODY" | jq -r '.error.message // .error' 2>/dev/null || echo "$RESPONSE_BODY")
echo "$ERROR_MESSAGE"
echo ""
error "Full API response:"
echo "$RESPONSE_BODY" | jq '.' 2>/dev/null || echo "$RESPONSE_BODY"
echo ""
error "The API key may be invalid or expired. Please verify your OpenRouter API key at https://openrouter.ai/keys"
echo ""
error "To test your API key manually, run:"
echo " curl https://openrouter.ai/api/v1/chat/completions \\"
echo " -H \"Content-Type: application/json\" \\"
echo " -H \"Authorization: Bearer YOUR_API_KEY\" \\"
echo " -d '{\"model\": \"google/gemini-2.5-flash-preview-09-2025\", \"messages\": [{\"role\": \"user\", \"content\": \"test\"}]}'"
exit 1
fi
# Extract JSON from response
JSON_CONTENT=$(echo "$RESPONSE_BODY" | jq -r '.choices[0].message.content' 2>/dev/null)
# Check if content was extracted
if [ -z "$JSON_CONTENT" ] || [ "$JSON_CONTENT" = "null" ]; then
error "Failed to extract content from API response"
echo "Response: $RESPONSE_BODY"
exit 1
fi
# Try to extract JSON if it's wrapped in markdown code blocks
if echo "$JSON_CONTENT" | grep -q '```json'; then
JSON_CONTENT=$(echo "$JSON_CONTENT" | sed -n '/```json/,/```/p' | sed '1d;$d')
elif echo "$JSON_CONTENT" | grep -q '```'; then
JSON_CONTENT=$(echo "$JSON_CONTENT" | sed -n '/```/,/```/p' | sed '1d;$d')
fi
# Validate JSON
if ! echo "$JSON_CONTENT" | jq . > /dev/null 2>&1; then
error "Invalid JSON response from API:"
echo "$JSON_CONTENT"
exit 1
fi
# Parse JSON
NEW_VERSION=$(echo "$JSON_CONTENT" | jq -r '.version')
BUMP_TYPE=$(echo "$JSON_CONTENT" | jq -r '.bump_type')
ADDED=$(echo "$JSON_CONTENT" | jq -r '.added[]?' | sed 's/^/- /')
CHANGED=$(echo "$JSON_CONTENT" | jq -r '.changed[]?' | sed 's/^/- /')
FIXED=$(echo "$JSON_CONTENT" | jq -r '.fixed[]?' | sed 's/^/- /')
log "Generated version: $NEW_VERSION ($BUMP_TYPE bump)"
log "Date: $TODAY_DATE"
# Validate version format
if ! echo "$NEW_VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then
error "Invalid version format: $NEW_VERSION"
exit 1
fi
# Validate bump type
if [ "$BUMP_TYPE" != "minor" ] && [ "$BUMP_TYPE" != "patch" ]; then
error "Invalid bump type: $BUMP_TYPE (must be 'minor' or 'patch')"
exit 1
fi
# Update Makefile
log "Updating Makefile..."
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS sed requires backup extension
sed -i '' "s/^VERSION := .*/VERSION := $NEW_VERSION/" "$MAKEFILE"
else
# Linux sed
sed -i "s/^VERSION := .*/VERSION := $NEW_VERSION/" "$MAKEFILE"
fi
success "Makefile updated to version $NEW_VERSION"
# Update CHANGELOG.md
log "Updating CHANGELOG.md..."
# Create changelog entry
CHANGELOG_ENTRY="## [$NEW_VERSION] - $TODAY_DATE
### Added
"
if [ -n "$ADDED" ]; then
CHANGELOG_ENTRY+="$ADDED"$'\n'
else
CHANGELOG_ENTRY+="\n"
fi
CHANGELOG_ENTRY+="
### Changed
"
if [ -n "$CHANGED" ]; then
CHANGELOG_ENTRY+="$CHANGED"$'\n'
else
CHANGELOG_ENTRY+="\n"
fi
CHANGELOG_ENTRY+="
### Deprecated
### Removed
### Fixed
"
if [ -n "$FIXED" ]; then
CHANGELOG_ENTRY+="$FIXED"$'\n'
else
CHANGELOG_ENTRY+="\n"
fi
CHANGELOG_ENTRY+="
"
# Save preview to temp file for pre-push hook
PREVIEW_FILE="$REPO_ROOT/.changelog_preview.tmp"
echo "$CHANGELOG_ENTRY" > "$PREVIEW_FILE"
echo "$NEW_VERSION" > "$REPO_ROOT/.changelog_version.tmp"
# Insert after [Unreleased] section using awk (more portable)
# Find the line number after [Unreleased] section (after the "### Fixed" line)
INSERT_LINE=$(awk '/^## \[Unreleased\]/{found=1} found && /^### Fixed$/{print NR+1; exit}' "$CHANGELOG_FILE")
if [ -z "$INSERT_LINE" ]; then
# Fallback: insert after line 16 (after [Unreleased] section)
INSERT_LINE=16
fi
# Use a temp file approach to insert multiline content
TMP_FILE=$(mktemp)
{
head -n $((INSERT_LINE - 1)) "$CHANGELOG_FILE"
printf '%s' "$CHANGELOG_ENTRY"
tail -n +$INSERT_LINE "$CHANGELOG_FILE"
} > "$TMP_FILE"
mv "$TMP_FILE" "$CHANGELOG_FILE"
success "CHANGELOG.md updated with version $NEW_VERSION"
log "Changelog update complete!"
log "New version: $NEW_VERSION"
log "Bump type: $BUMP_TYPE"