stargazer/tier1/mod.rs
1//! Tier 1: discovery and sweep.
2//!
3//! Lightweight polling of public data sources to build a continuously updated
4//! picture of which D-STAR reflectors exist and which are currently active.
5//!
6//! Three data sources are polled on independent intervals:
7//!
8//! - **Pi-Star hosts** (`DStar_Hosts.json`) — canonical list of reflector
9//! addresses, polled daily.
10//! - **XLX API** (`xlxapi.rlx.lu`) — XML feed of XLX reflector status and
11//! connected nodes, polled every 10 minutes.
12//! - **ircDDB last-heard** (`status.ircddb.net`) — HTML page of recent D-STAR
13//! activity across the network, scraped every 60 seconds.
14//!
15//! Discovered reflectors and activity observations are written to the
16//! `reflectors` and `activity_log` `PostgreSQL` tables, which drive Tier 2
17//! monitoring decisions.
18//!
19//! # Error handling
20//!
21//! Individual fetch failures are logged at `warn` level and retried on the next
22//! interval tick. A single source going down does not affect the other two —
23//! `tokio::select!` fires whichever timer expires next, regardless of prior
24//! failures.
25
26mod error;
27mod ircddb;
28mod pistar;
29mod xlx_api;
30
31use std::time::Duration;
32
33use crate::config::Tier1Config;
34
35/// Runs the Tier 1 discovery sweep loop.
36///
37/// Spawns three concurrent polling timers — one per data source — and runs
38/// until the task is cancelled. Each timer fires independently at the interval
39/// specified in `config`. When a timer fires, the corresponding fetcher runs
40/// to completion; errors are logged but never propagated, so a transient
41/// failure in one source does not block the others.
42///
43/// A shared `reqwest::Client` is used across all fetchers to benefit from
44/// connection pooling and keep-alive.
45///
46/// # Errors
47///
48/// Returns an error only if all three fetchers encounter a non-retryable
49/// condition simultaneously (currently unreachable — the function runs
50/// indefinitely until cancelled).
51pub(crate) async fn run(
52 config: Tier1Config,
53 pool: sqlx::PgPool,
54) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
55 tracing::info!(
56 pistar_interval_secs = config.pistar,
57 xlx_api_interval_secs = config.xlx_api,
58 ircddb_interval_secs = config.ircddb,
59 "tier1 discovery sweep starting"
60 );
61
62 // Shared HTTP client with reasonable timeouts for public API polling.
63 let client = reqwest::Client::builder()
64 .timeout(Duration::from_secs(30))
65 .user_agent("stargazer/0.1 (D-STAR observatory)")
66 .build()
67 .map_err(|e| -> Box<dyn std::error::Error + Send + Sync> { Box::new(e) })?;
68
69 // Independent interval timers for each data source. `tick()` fires
70 // immediately on the first call, so each source is polled once at startup
71 // before settling into the configured cadence.
72 let mut pistar_interval = tokio::time::interval(Duration::from_secs(config.pistar));
73 let mut xlx_interval = tokio::time::interval(Duration::from_secs(config.xlx_api));
74 let mut ircddb_interval = tokio::time::interval(Duration::from_secs(config.ircddb));
75
76 loop {
77 tokio::select! {
78 _ = pistar_interval.tick() => {
79 match pistar::fetch_and_store(&client, &pool).await {
80 Ok(count) => {
81 tracing::debug!(count, "pi-star fetch completed");
82 }
83 Err(e) => {
84 tracing::warn!(error = %e, "pi-star fetch failed");
85 }
86 }
87 }
88 _ = xlx_interval.tick() => {
89 match xlx_api::fetch_and_store(&client, &pool).await {
90 Ok(count) => {
91 tracing::debug!(count, "xlx-api fetch completed");
92 }
93 Err(e) => {
94 tracing::warn!(error = %e, "xlx-api fetch failed");
95 }
96 }
97 }
98 _ = ircddb_interval.tick() => {
99 match ircddb::fetch_and_store(&client, &pool).await {
100 Ok(count) => {
101 tracing::debug!(count, "ircddb scrape completed");
102 }
103 Err(e) => {
104 tracing::warn!(error = %e, "ircddb scrape failed");
105 }
106 }
107 }
108 }
109 }
110}