Service registry & discovery system that aggregates infrastructure metadata from Incus, K8s, APISIX, and BunnyCDN into NocoDB. Includes FastAPI HTTP API, systemd timer for 15-min auto-sync, and dual-mode collectors (REST API for container deployment, CLI/SSH fallback for local use). Deployed to jp1:infra-tool with Tailscale socket proxy for host network visibility. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
102 lines
3.3 KiB
Python
102 lines
3.3 KiB
Python
"""Orchestrate collectors → NocoDB sync."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import click
|
|
|
|
import nocodb_client
|
|
from collectors import incus, apisix, bunnycdn, k8s
|
|
|
|
|
|
def _sync_table(table: str, records: list[dict], *, dry_run: bool = False, source: str = "") -> dict:
|
|
stats = {"created": 0, "updated": 0, "skipped": 0}
|
|
seen = set()
|
|
|
|
for rec in records:
|
|
title = rec.get("Title", "")
|
|
if not title:
|
|
continue
|
|
seen.add(title)
|
|
if dry_run:
|
|
click.echo(f" [DRY] {table}: {title}")
|
|
stats["skipped"] += 1
|
|
continue
|
|
action = nocodb_client.upsert(table, title, rec)
|
|
stats[action] = stats.get(action, 0) + 1
|
|
|
|
# mark rows not seen in this scan as unknown (scoped by source)
|
|
# infra_containers has no 'source' column — all rows are from incus
|
|
if not dry_run and table == "infra_services" and source:
|
|
nocodb_client.mark_unseen(table, seen, source_filter=source)
|
|
elif not dry_run and table == "infra_containers":
|
|
nocodb_client.mark_unseen(table, seen)
|
|
|
|
return stats
|
|
|
|
|
|
def sync_incus(*, dry_run: bool = False) -> dict:
|
|
click.echo("Collecting Incus containers...")
|
|
records = incus.collect()
|
|
click.echo(f" Found {len(records)} containers")
|
|
return _sync_table("infra_containers", records, dry_run=dry_run, source="incus")
|
|
|
|
|
|
def sync_k8s(*, dry_run: bool = False) -> dict:
|
|
click.echo("Collecting K8s services...")
|
|
records = k8s.collect_services()
|
|
click.echo(f" Found {len(records)} services")
|
|
return _sync_table("infra_services", records, dry_run=dry_run, source="k8s")
|
|
|
|
|
|
def sync_apisix(*, dry_run: bool = False) -> dict:
|
|
click.echo("Collecting APISIX routes...")
|
|
routes = apisix.collect_routes()
|
|
click.echo(f" Found {len(routes)} routes")
|
|
r_stats = _sync_table("infra_routes", routes, dry_run=dry_run)
|
|
|
|
click.echo("Deriving APISIX gateway services...")
|
|
services = apisix.collect_services()
|
|
s_stats = _sync_table("infra_services", services, dry_run=dry_run, source="apisix")
|
|
|
|
return {"routes": r_stats, "services": s_stats}
|
|
|
|
|
|
def sync_bunnycdn(*, dry_run: bool = False) -> dict:
|
|
click.echo("Collecting BunnyCDN zones...")
|
|
zones = bunnycdn.collect_zones()
|
|
click.echo(f" Found {len(zones)} zones")
|
|
z_stats = _sync_table("infra_cdn_zones", zones, dry_run=dry_run)
|
|
|
|
click.echo("Deriving CDN services...")
|
|
services = bunnycdn.collect_services()
|
|
s_stats = _sync_table("infra_services", services, dry_run=dry_run, source="bunnycdn")
|
|
|
|
return {"zones": z_stats, "services": s_stats}
|
|
|
|
|
|
SYNC_MAP = {
|
|
"incus": sync_incus,
|
|
"k8s": sync_k8s,
|
|
"apisix": sync_apisix,
|
|
"bunnycdn": sync_bunnycdn,
|
|
}
|
|
|
|
# collection order: incus → k8s → apisix → bunnycdn
|
|
SYNC_ORDER = ["incus", "k8s", "apisix", "bunnycdn"]
|
|
|
|
|
|
def sync_all(*, dry_run: bool = False, source: str | None = None) -> dict:
|
|
results = {}
|
|
sources = [source] if source else SYNC_ORDER
|
|
for src in sources:
|
|
fn = SYNC_MAP.get(src)
|
|
if not fn:
|
|
click.echo(f"Unknown source: {src}", err=True)
|
|
continue
|
|
try:
|
|
results[src] = fn(dry_run=dry_run)
|
|
except Exception as e:
|
|
click.echo(f" ERROR ({src}): {e}", err=True)
|
|
results[src] = {"error": str(e)}
|
|
return results
|