stargazer/main.rs
1//! Stargazer: D-STAR network observatory.
2//!
3//! A headless Kubernetes-deployed service that discovers active D-STAR
4//! reflectors, monitors real-time activity, captures voice transmissions with
5//! metadata, decodes AMBE audio to MP3, and uploads completed streams to an
6//! `SDRTrunk`-compatible Rdio API server for transcription.
7//!
8//! # Architecture
9//!
10//! Stargazer operates in three tiers:
11//!
12//! - **Tier 1** (Discovery): polls Pi-Star, XLX API, and ircDDB to build a
13//! reflector registry.
14//! - **Tier 2** (Monitoring): connects to active XLX reflectors via UDP JSON
15//! monitor protocol for real-time activity events.
16//! - **Tier 3** (Capture): establishes full D-STAR protocol connections to
17//! capture and decode voice streams.
18//!
19//! All tiers run as independent tokio tasks. A background upload processor
20//! sends completed streams to the Rdio API server. An HTTP API provides
21//! operational visibility and manual session control.
22//!
23//! # Usage
24//!
25//! ```text
26//! stargazer --config stargazer.toml
27//! ```
28
29// Dependencies used by submodules but not yet referenced from main.rs directly.
30// Each stub module will use these once implemented; the `use as _` suppresses
31// the unused_crate_dependencies lint per file (not a blanket allow).
32use dstar_gateway as _;
33use dstar_gateway_core as _;
34use mbelib_rs as _;
35use mp3lame_encoder as _;
36use quick_xml as _;
37use reqwest as _;
38use scraper as _;
39use thiserror as _;
40
41mod api;
42mod config;
43mod db;
44mod tier1;
45mod tier2;
46mod tier3;
47mod upload;
48
49use std::path::PathBuf;
50
51use clap::Parser;
52
53/// D-STAR network observatory — reflector monitoring and voice capture service.
54#[derive(Debug, Parser)]
55#[command(name = "stargazer", version, about)]
56struct Cli {
57 /// Path to the TOML configuration file.
58 #[arg(long, default_value = "stargazer.toml")]
59 config: PathBuf,
60}
61
62fn main() {
63 let cli = Cli::parse();
64
65 // Initialize structured JSON logging with env-filter support.
66 // Use `RUST_LOG=stargazer=debug` to control verbosity.
67 tracing_subscriber::fmt()
68 .json()
69 .with_env_filter(
70 tracing_subscriber::EnvFilter::try_from_default_env()
71 .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("stargazer=info")),
72 )
73 .init();
74
75 let config = match config::load(&cli.config) {
76 Ok(c) => c,
77 Err(e) => {
78 tracing::error!(path = %cli.config.display(), error = %e, "failed to load config");
79 std::process::exit(1);
80 }
81 };
82
83 let runtime = match tokio::runtime::Builder::new_multi_thread()
84 .enable_all()
85 .build()
86 {
87 Ok(rt) => rt,
88 Err(e) => {
89 tracing::error!(error = %e, "failed to build tokio runtime");
90 std::process::exit(1);
91 }
92 };
93
94 runtime.block_on(run(config));
95}
96
97/// Top-level async entry point.
98///
99/// Connects to Postgres, runs migrations, spawns all tier orchestrators
100/// and the upload processor, starts the HTTP API, then waits for a
101/// shutdown signal (SIGTERM / ctrl-c).
102///
103/// Each tier runs as an independent tokio task — a crash in Tier 2
104/// does not affect Tier 3. On shutdown, all tasks are aborted
105/// (graceful drain will be refined later).
106async fn run(config: config::Config) {
107 tracing::info!(
108 postgres_url = %config.postgres.url,
109 tier1_pistar = config.tier1.pistar,
110 tier2_max_monitors = config.tier2.max_concurrent_monitors,
111 tier3_max_connections = config.tier3.max_concurrent_connections,
112 server_listen = %config.server.listen,
113 "stargazer starting"
114 );
115
116 // Connect to Postgres and run schema migrations.
117 let pool = match db::connect(&config.postgres).await {
118 Ok(p) => p,
119 Err(e) => {
120 tracing::error!(error = %e, "failed to connect to postgres");
121 return;
122 }
123 };
124 if let Err(e) = db::migrate(&pool).await {
125 tracing::error!(error = %e, "failed to run migrations");
126 return;
127 }
128 tracing::info!("database connected and migrated");
129
130 // Spawn each tier as an independent tokio task so that a failure
131 // in one does not take down the others.
132 let api_handle = tokio::spawn(api::serve(config.server.listen, pool.clone()));
133 let t1_handle = tokio::spawn(tier1::run(config.tier1, pool.clone()));
134 let t2_handle = tokio::spawn(tier2::run(config.tier2, pool.clone()));
135 let t3_handle = tokio::spawn(tier3::run(config.tier3, config.audio, pool.clone()));
136 let upload_handle = tokio::spawn(upload::run(config.rdio, pool.clone()));
137
138 tracing::info!("all tiers started");
139
140 // Wait for shutdown signal.
141 match tokio::signal::ctrl_c().await {
142 Ok(()) => tracing::info!("received ctrl-c, shutting down"),
143 Err(e) => tracing::error!(error = %e, "failed to listen for ctrl-c"),
144 }
145
146 // Abort all tasks. Graceful shutdown (flush pending writes,
147 // disconnect sessions, drain upload queue) will be added later.
148 api_handle.abort();
149 t1_handle.abort();
150 t2_handle.abort();
151 t3_handle.abort();
152 upload_handle.abort();
153}