mirror of
https://github.com/craigerl/aprsd.git
synced 2026-03-31 04:05:32 -04:00
Add stats_store_interval config option to control how frequently the statsstore.json file is written to disk. Default remains 10 seconds for backward compatibility. This allows reducing disk I/O in production deployments and can help avoid potential file corruption issues when external processes read the stats file.
204 lines
7.0 KiB
Python
204 lines
7.0 KiB
Python
import datetime
|
|
import logging
|
|
import threading
|
|
import time
|
|
|
|
import requests
|
|
from loguru import logger
|
|
from oslo_config import cfg
|
|
|
|
from aprsd.packets import seen_list
|
|
from aprsd.stats import collector
|
|
from aprsd.threads import APRSDThread
|
|
from aprsd.utils import objectstore
|
|
|
|
CONF = cfg.CONF
|
|
LOG = logging.getLogger('APRSD')
|
|
LOGU = logger
|
|
|
|
|
|
class StatsStore(objectstore.ObjectStoreMixin):
|
|
"""Container to save the stats from the collector."""
|
|
|
|
def __init__(self):
|
|
self.lock = threading.RLock()
|
|
|
|
def add(self, stats: dict):
|
|
with self.lock:
|
|
self.data = stats
|
|
|
|
|
|
class APRSDStatsStoreThread(APRSDThread):
|
|
"""Save APRSD Stats to disk periodically."""
|
|
|
|
daemon = False
|
|
|
|
def __init__(self):
|
|
super().__init__('StatsStore')
|
|
# Use config value for period, default to 10 seconds
|
|
self.period = CONF.stats_store_interval
|
|
|
|
def loop(self):
|
|
stats = collector.Collector().collect()
|
|
ss = StatsStore()
|
|
ss.add(stats)
|
|
ss.save()
|
|
|
|
self.wait()
|
|
return True
|
|
|
|
|
|
class APRSDPushStatsThread(APRSDThread):
|
|
"""Push the local stats to a remote API."""
|
|
|
|
def __init__(
|
|
self, push_url=None, frequency_seconds=None, send_packetlist: bool = False
|
|
):
|
|
super().__init__('PushStats')
|
|
self.push_url = push_url if push_url else CONF.push_stats.push_url
|
|
self.period = (
|
|
frequency_seconds
|
|
if frequency_seconds
|
|
else CONF.push_stats.frequency_seconds
|
|
)
|
|
self.send_packetlist = send_packetlist
|
|
|
|
def loop(self):
|
|
stats_json = collector.Collector().collect(serializable=True)
|
|
url = f'{self.push_url}/stats'
|
|
headers = {'Content-Type': 'application/json'}
|
|
# Remove the PacketList section to reduce payload size
|
|
if not self.send_packetlist:
|
|
if 'PacketList' in stats_json:
|
|
del stats_json['PacketList']['packets']
|
|
|
|
now = datetime.datetime.now()
|
|
time_format = '%m-%d-%Y %H:%M:%S'
|
|
stats = {
|
|
'time': now.strftime(time_format),
|
|
'stats': stats_json,
|
|
}
|
|
|
|
try:
|
|
response = requests.post(url, json=stats, headers=headers, timeout=5)
|
|
response.raise_for_status()
|
|
|
|
if response.status_code == 200:
|
|
LOGU.info(f'Successfully pushed stats to {self.push_url}')
|
|
else:
|
|
LOGU.warning(
|
|
f'Failed to push stats to {self.push_url}: HTTP {response.status_code}'
|
|
)
|
|
|
|
except requests.exceptions.RequestException as e:
|
|
LOGU.error(f'Error pushing stats to {self.push_url}: {e}')
|
|
except Exception as e:
|
|
LOGU.error(f'Unexpected error in stats push: {e}')
|
|
|
|
self.wait()
|
|
return True
|
|
|
|
|
|
class StatsLogThread(APRSDThread):
|
|
"""Log the stats from the PacketList."""
|
|
|
|
period = 10
|
|
|
|
def __init__(self):
|
|
super().__init__('PacketStatsLog')
|
|
self._last_total_rx = 0
|
|
self.start_time = time.time()
|
|
|
|
def loop(self):
|
|
# log the stats every 10 seconds
|
|
stats_json = collector.Collector().collect(serializable=True)
|
|
stats = stats_json['PacketList']
|
|
total_rx = stats['rx']
|
|
rx_delta = total_rx - self._last_total_rx
|
|
rate = rx_delta / self.period
|
|
|
|
# Get unique callsigns count from SeenList stats
|
|
seen_list_instance = seen_list.SeenList()
|
|
# stats() returns data while holding lock internally, so copy it immediately
|
|
seen_list_stats = seen_list_instance.stats()
|
|
seen_list_instance.save()
|
|
# Copy the stats to avoid holding references to locked data
|
|
seen_list_stats = seen_list_stats.copy()
|
|
unique_callsigns_count = len(seen_list_stats)
|
|
|
|
# Calculate uptime
|
|
elapsed = time.time() - self.start_time
|
|
elapsed_minutes = elapsed / 60
|
|
elapsed_hours = elapsed / 3600
|
|
|
|
# Log summary stats
|
|
LOGU.opt(colors=True).info(
|
|
f'<green>RX Rate: {rate:.2f} pps</green> '
|
|
f'<yellow>Total RX: {total_rx}</yellow> '
|
|
f'<red>RX Last {self.period} secs: {rx_delta}</red> '
|
|
)
|
|
LOGU.opt(colors=True).info(
|
|
f'<cyan>Uptime: {elapsed:.0f}s ({elapsed_minutes:.1f}m / {elapsed_hours:.2f}h)</cyan> '
|
|
f'<magenta>Unique Callsigns: {unique_callsigns_count}</magenta>',
|
|
)
|
|
self._last_total_rx = total_rx
|
|
|
|
# Log individual type stats, sorted by RX count (descending)
|
|
sorted_types = sorted(
|
|
stats['types'].items(), key=lambda x: x[1]['rx'], reverse=True
|
|
)
|
|
for k, v in sorted_types:
|
|
# Calculate percentage of this packet type compared to total RX
|
|
percentage = (v['rx'] / total_rx * 100) if total_rx > 0 else 0.0
|
|
# Format values first, then apply colors
|
|
packet_type_str = f'{k:<15}'
|
|
rx_count_str = f'{v["rx"]:6d}'
|
|
tx_count_str = f'{v["tx"]:6d}'
|
|
percentage_str = f'{percentage:5.1f}%'
|
|
# Use different colors for RX count based on threshold (matching mqtt_injest.py)
|
|
rx_color_tag = (
|
|
'green' if v['rx'] > 100 else 'yellow' if v['rx'] > 10 else 'red'
|
|
)
|
|
LOGU.opt(colors=True).info(
|
|
f' <cyan>{packet_type_str}</cyan>: '
|
|
f'<{rx_color_tag}>RX: {rx_count_str}</{rx_color_tag}> '
|
|
f'<red>TX: {tx_count_str}</red> '
|
|
f'<magenta>({percentage_str})</magenta>',
|
|
)
|
|
|
|
# Extract callsign counts from seen_list stats
|
|
callsign_counts = {}
|
|
for callsign, data in seen_list_stats.items():
|
|
if isinstance(data, dict) and 'count' in data:
|
|
callsign_counts[callsign] = data['count']
|
|
|
|
# Sort callsigns by packet count (descending) and get top 10
|
|
sorted_callsigns = sorted(
|
|
callsign_counts.items(), key=lambda x: x[1], reverse=True
|
|
)[:10]
|
|
|
|
# Log top 10 callsigns
|
|
if sorted_callsigns:
|
|
LOGU.opt(colors=True).info('<cyan>Top 10 Callsigns by Packet Count:</cyan>')
|
|
total_ranks = len(sorted_callsigns)
|
|
for rank, (callsign, count) in enumerate(sorted_callsigns, 1):
|
|
# Calculate percentage of this callsign compared to total RX
|
|
percentage = (count / total_rx * 100) if total_rx > 0 else 0.0
|
|
# Use different colors based on rank: most packets (rank 1) = red,
|
|
# least packets (last rank) = green, middle = yellow
|
|
if rank == 1:
|
|
count_color_tag = 'red'
|
|
elif rank == total_ranks:
|
|
count_color_tag = 'green'
|
|
else:
|
|
count_color_tag = 'yellow'
|
|
LOGU.opt(colors=True).info(
|
|
f' <cyan>{rank:2d}.</cyan> '
|
|
f'<white>{callsign:<12}</white>: '
|
|
f'<{count_color_tag}>{count:6d} packets</{count_color_tag}> '
|
|
f'<magenta>({percentage:5.1f}%)</magenta>',
|
|
)
|
|
|
|
self.wait()
|
|
return True
|