Had AI collapse appropriate files to avoid nested structs

This commit is contained in:
Emil Lerch 2025-12-18 13:23:06 -08:00
parent 1800c68081
commit 015418fccb
Signed by: lobo
GPG key ID: A7B62D657EF764F8
19 changed files with 919 additions and 919 deletions

43
src/cache/Cache.zig vendored Normal file
View file

@ -0,0 +1,43 @@
const std = @import("std");
const LRU = @import("LRU.zig");
const Cache = @This();
allocator: std.mem.Allocator,
lru: LRU,
cache_dir: []const u8,
file_threshold: usize,
pub const Config = struct {
max_entries: usize = 10_000,
file_threshold: usize = 1024,
cache_dir: []const u8,
};
pub fn init(allocator: std.mem.Allocator, config: Config) !Cache {
std.fs.makeDirAbsolute(config.cache_dir) catch |err| {
if (err != error.PathAlreadyExists) return err;
};
return Cache{
.allocator = allocator,
.lru = try LRU.init(allocator, config.max_entries),
.cache_dir = try allocator.dupe(u8, config.cache_dir),
.file_threshold = config.file_threshold,
};
}
pub fn get(self: *Cache, key: []const u8) ?[]const u8 {
return self.lru.get(key);
}
pub fn put(self: *Cache, key: []const u8, value: []const u8, ttl_seconds: u64) !void {
const now = std.time.milliTimestamp();
const expires = now + @as(i64, @intCast(ttl_seconds * 1000));
try self.lru.put(key, value, expires);
}
pub fn deinit(self: *Cache) void {
self.lru.deinit();
self.allocator.free(self.cache_dir);
}

106
src/cache/LRU.zig vendored Normal file
View file

@ -0,0 +1,106 @@
const std = @import("std");
const LRU = @This();
allocator: std.mem.Allocator,
map: std.StringHashMap(Entry),
max_entries: usize,
const Entry = struct {
value: []const u8,
expires: i64,
access_count: u64,
};
pub fn init(allocator: std.mem.Allocator, max_entries: usize) !LRU {
return LRU{
.allocator = allocator,
.map = std.StringHashMap(Entry).init(allocator),
.max_entries = max_entries,
};
}
pub fn get(self: *LRU, key: []const u8) ?[]const u8 {
var entry = self.map.getPtr(key) orelse return null;
const now = std.time.milliTimestamp();
if (now > entry.expires) {
self.remove(key);
return null;
}
entry.access_count += 1;
return entry.value;
}
pub fn put(self: *LRU, key: []const u8, value: []const u8, expires: i64) !void {
if (self.map.get(key)) |old_entry| {
self.allocator.free(old_entry.value);
_ = self.map.remove(key);
}
if (self.map.count() >= self.max_entries) {
self.evictOldest();
}
const key_copy = try self.allocator.dupe(u8, key);
const value_copy = try self.allocator.dupe(u8, value);
try self.map.put(key_copy, .{
.value = value_copy,
.expires = expires,
.access_count = 0,
});
}
fn evictOldest(self: *LRU) void {
var oldest_key: ?[]const u8 = null;
var oldest_access: u64 = std.math.maxInt(u64);
var it = self.map.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.access_count < oldest_access) {
oldest_access = entry.value_ptr.access_count;
oldest_key = entry.key_ptr.*;
}
}
if (oldest_key) |key| {
self.remove(key);
}
}
fn remove(self: *LRU, key: []const u8) void {
if (self.map.fetchRemove(key)) |kv| {
self.allocator.free(kv.value.value);
self.allocator.free(kv.key);
}
}
pub fn deinit(self: *LRU) void {
var it = self.map.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.value_ptr.value);
self.allocator.free(entry.key_ptr.*);
}
self.map.deinit();
}
test "LRU basic operations" {
var lru = try LRU.init(std.testing.allocator, 3);
defer lru.deinit();
try lru.put("key1", "value1", 9999999999999);
try std.testing.expectEqualStrings("value1", lru.get("key1").?);
}
test "LRU eviction" {
var lru = try LRU.init(std.testing.allocator, 2);
defer lru.deinit();
try lru.put("key1", "value1", 9999999999999);
try lru.put("key2", "value2", 9999999999999);
try lru.put("key3", "value3", 9999999999999);
try std.testing.expect(lru.get("key1") == null);
}

43
src/cache/cache.zig vendored
View file

@ -1,43 +0,0 @@
const std = @import("std");
const LRU = @import("lru.zig").LRU;
pub const Cache = struct {
allocator: std.mem.Allocator,
lru: LRU,
cache_dir: []const u8,
file_threshold: usize,
pub const Config = struct {
max_entries: usize = 10_000,
file_threshold: usize = 1024,
cache_dir: []const u8,
};
pub fn init(allocator: std.mem.Allocator, config: Config) !Cache {
std.fs.makeDirAbsolute(config.cache_dir) catch |err| {
if (err != error.PathAlreadyExists) return err;
};
return Cache{
.allocator = allocator,
.lru = try LRU.init(allocator, config.max_entries),
.cache_dir = try allocator.dupe(u8, config.cache_dir),
.file_threshold = config.file_threshold,
};
}
pub fn get(self: *Cache, key: []const u8) ?[]const u8 {
return self.lru.get(key);
}
pub fn put(self: *Cache, key: []const u8, value: []const u8, ttl_seconds: u64) !void {
const now = std.time.milliTimestamp();
const expires = now + @as(i64, @intCast(ttl_seconds * 1000));
try self.lru.put(key, value, expires);
}
pub fn deinit(self: *Cache) void {
self.lru.deinit();
self.allocator.free(self.cache_dir);
}
};

106
src/cache/lru.zig vendored
View file

@ -1,106 +0,0 @@
const std = @import("std");
pub const LRU = struct {
allocator: std.mem.Allocator,
map: std.StringHashMap(Entry),
max_entries: usize,
const Entry = struct {
value: []const u8,
expires: i64,
access_count: u64,
};
pub fn init(allocator: std.mem.Allocator, max_entries: usize) !LRU {
return LRU{
.allocator = allocator,
.map = std.StringHashMap(Entry).init(allocator),
.max_entries = max_entries,
};
}
pub fn get(self: *LRU, key: []const u8) ?[]const u8 {
var entry = self.map.getPtr(key) orelse return null;
const now = std.time.milliTimestamp();
if (now > entry.expires) {
self.remove(key);
return null;
}
entry.access_count += 1;
return entry.value;
}
pub fn put(self: *LRU, key: []const u8, value: []const u8, expires: i64) !void {
if (self.map.get(key)) |old_entry| {
self.allocator.free(old_entry.value);
_ = self.map.remove(key);
}
if (self.map.count() >= self.max_entries) {
self.evictOldest();
}
const key_copy = try self.allocator.dupe(u8, key);
const value_copy = try self.allocator.dupe(u8, value);
try self.map.put(key_copy, .{
.value = value_copy,
.expires = expires,
.access_count = 0,
});
}
fn evictOldest(self: *LRU) void {
var oldest_key: ?[]const u8 = null;
var oldest_access: u64 = std.math.maxInt(u64);
var it = self.map.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.access_count < oldest_access) {
oldest_access = entry.value_ptr.access_count;
oldest_key = entry.key_ptr.*;
}
}
if (oldest_key) |key| {
self.remove(key);
}
}
fn remove(self: *LRU, key: []const u8) void {
if (self.map.fetchRemove(key)) |kv| {
self.allocator.free(kv.value.value);
self.allocator.free(kv.key);
}
}
pub fn deinit(self: *LRU) void {
var it = self.map.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.value_ptr.value);
self.allocator.free(entry.key_ptr.*);
}
self.map.deinit();
}
};
test "LRU basic operations" {
var lru = try LRU.init(std.testing.allocator, 3);
defer lru.deinit();
try lru.put("key1", "value1", 9999999999999);
try std.testing.expectEqualStrings("value1", lru.get("key1").?);
}
test "LRU eviction" {
var lru = try LRU.init(std.testing.allocator, 2);
defer lru.deinit();
try lru.put("key1", "value1", 9999999999999);
try lru.put("key2", "value2", 9999999999999);
try lru.put("key3", "value3", 9999999999999);
try std.testing.expect(lru.get("key1") == null);
}

149
src/http/RateLimiter.zig Normal file
View file

@ -0,0 +1,149 @@
const std = @import("std");
const RateLimiter = @This();
allocator: std.mem.Allocator,
buckets: std.StringHashMap(TokenBucket),
config: Config,
mutex: std.Thread.Mutex,
pub const Config = struct {
capacity: u32 = 300,
refill_rate: u32 = 5,
refill_interval_ms: u64 = 200,
};
const TokenBucket = struct {
tokens: f64,
capacity: u32,
last_refill: i64,
fn refill(self: *TokenBucket, now: i64, rate: u32, interval_ms: u64) void {
const elapsed = now - self.last_refill;
const intervals = @as(f64, @floatFromInt(elapsed)) / @as(f64, @floatFromInt(interval_ms));
const new_tokens = intervals * @as(f64, @floatFromInt(rate));
self.tokens = @min(
self.tokens + new_tokens,
@as(f64, @floatFromInt(self.capacity)),
);
self.last_refill = now;
}
fn consume(self: *TokenBucket, count: f64) bool {
if (self.tokens >= count) {
self.tokens -= count;
return true;
}
return false;
}
};
pub fn init(allocator: std.mem.Allocator, config: Config) !RateLimiter {
return RateLimiter{
.allocator = allocator,
.buckets = std.StringHashMap(TokenBucket).init(allocator),
.config = config,
.mutex = .{},
};
}
pub fn check(self: *RateLimiter, ip: []const u8) !void {
self.mutex.lock();
defer self.mutex.unlock();
const now = std.time.milliTimestamp();
const result = try self.buckets.getOrPut(ip);
if (!result.found_existing) {
const ip_copy = try self.allocator.dupe(u8, ip);
result.key_ptr.* = ip_copy;
result.value_ptr.* = TokenBucket{
.tokens = @floatFromInt(self.config.capacity),
.capacity = self.config.capacity,
.last_refill = now,
};
}
var bucket = result.value_ptr;
bucket.refill(now, self.config.refill_rate, self.config.refill_interval_ms);
if (!bucket.consume(1.0)) {
return error.RateLimitExceeded;
}
}
pub fn deinit(self: *RateLimiter) void {
var it = self.buckets.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key_ptr.*);
}
self.buckets.deinit();
}
test "rate limiter allows requests within capacity" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 10,
.refill_rate = 1,
.refill_interval_ms = 1000,
});
defer limiter.deinit();
var i: usize = 0;
while (i < 10) : (i += 1) {
try limiter.check("1.2.3.4");
}
}
test "rate limiter blocks after capacity exhausted" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 5,
.refill_rate = 1,
.refill_interval_ms = 1000,
});
defer limiter.deinit();
var i: usize = 0;
while (i < 5) : (i += 1) {
try limiter.check("1.2.3.4");
}
try std.testing.expectError(error.RateLimitExceeded, limiter.check("1.2.3.4"));
}
test "rate limiter refills tokens over time" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 10,
.refill_rate = 5,
.refill_interval_ms = 100,
});
defer limiter.deinit();
var i: usize = 0;
while (i < 10) : (i += 1) {
try limiter.check("1.2.3.4");
}
try std.testing.expectError(error.RateLimitExceeded, limiter.check("1.2.3.4"));
std.Thread.sleep(250 * std.time.ns_per_ms);
try limiter.check("1.2.3.4");
}
test "rate limiter tracks different IPs separately" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 2,
.refill_rate = 1,
.refill_interval_ms = 1000,
});
defer limiter.deinit();
try limiter.check("1.2.3.4");
try limiter.check("1.2.3.4");
try std.testing.expectError(error.RateLimitExceeded, limiter.check("1.2.3.4"));
try limiter.check("5.6.7.8");
try limiter.check("5.6.7.8");
}

66
src/http/Server.zig Normal file
View file

@ -0,0 +1,66 @@
const std = @import("std");
const httpz = @import("httpz");
const handler = @import("handler.zig");
const RateLimiter = @import("RateLimiter.zig");
const middleware = @import("middleware.zig");
const Server = @This();
allocator: std.mem.Allocator,
httpz_server: httpz.Server(*Context),
context: Context,
const Context = struct {
options: handler.HandleWeatherOptions,
rate_limiter: *RateLimiter,
};
pub fn init(
allocator: std.mem.Allocator,
host: []const u8,
port: u16,
options: handler.HandleWeatherOptions,
rate_limiter: *RateLimiter,
) !Server {
const ctx = try allocator.create(Context);
ctx.* = .{
.options = options,
.rate_limiter = rate_limiter,
};
var httpz_server = try httpz.Server(*Context).init(allocator, .{
.address = host,
.port = port,
}, ctx);
var router = try httpz_server.router(.{});
router.get("/", handleWeatherRoot, .{});
router.get("/:location", handleWeatherLocation, .{});
return Server{
.allocator = allocator,
.httpz_server = httpz_server,
.context = ctx.*,
};
}
fn handleWeatherRoot(ctx: *Context, req: *httpz.Request, res: *httpz.Response) !void {
try middleware.rateLimitMiddleware(ctx.rate_limiter, req, res);
if (res.status == 429) return;
try handler.handleWeather(&ctx.options, req, res);
}
fn handleWeatherLocation(ctx: *Context, req: *httpz.Request, res: *httpz.Response) !void {
try middleware.rateLimitMiddleware(ctx.rate_limiter, req, res);
if (res.status == 429) return;
try handler.handleWeatherLocation(&ctx.options, req, res);
}
pub fn listen(self: *Server) !void {
std.log.info("wttr listening on port {d}", .{self.httpz_server.config.port.?});
try self.httpz_server.listen();
}
pub fn deinit(self: *Server) void {
self.httpz_server.deinit();
}

View file

@ -1,6 +1,6 @@
const std = @import("std");
const httpz = @import("httpz");
const Cache = @import("../cache/cache.zig").Cache;
const Cache = @import("../cache/Cache.zig");
const WeatherProvider = @import("../weather/provider.zig").WeatherProvider;
const Resolver = @import("../location/resolver.zig").Resolver;
const Location = @import("../location/resolver.zig").Location;
@ -16,7 +16,7 @@ pub const HandleWeatherOptions = struct {
cache: *Cache,
provider: WeatherProvider,
resolver: *Resolver,
geoip: *@import("../location/geoip.zig").GeoIP,
geoip: *@import("../location/GeoIP.zig"),
};
pub fn handleWeather(

View file

@ -1,6 +1,6 @@
const std = @import("std");
const httpz = @import("httpz");
const RateLimiter = @import("rate_limiter.zig").RateLimiter;
const RateLimiter = @import("RateLimiter.zig");
pub fn rateLimitMiddleware(limiter: *RateLimiter, req: *httpz.Request, res: *httpz.Response) !void {
const ip = req.address.in.sa.addr;

View file

@ -1,149 +0,0 @@
const std = @import("std");
pub const RateLimiter = struct {
allocator: std.mem.Allocator,
buckets: std.StringHashMap(TokenBucket),
config: Config,
mutex: std.Thread.Mutex,
pub const Config = struct {
capacity: u32 = 300,
refill_rate: u32 = 5,
refill_interval_ms: u64 = 200,
};
const TokenBucket = struct {
tokens: f64,
capacity: u32,
last_refill: i64,
fn refill(self: *TokenBucket, now: i64, rate: u32, interval_ms: u64) void {
const elapsed = now - self.last_refill;
const intervals = @as(f64, @floatFromInt(elapsed)) / @as(f64, @floatFromInt(interval_ms));
const new_tokens = intervals * @as(f64, @floatFromInt(rate));
self.tokens = @min(
self.tokens + new_tokens,
@as(f64, @floatFromInt(self.capacity)),
);
self.last_refill = now;
}
fn consume(self: *TokenBucket, count: f64) bool {
if (self.tokens >= count) {
self.tokens -= count;
return true;
}
return false;
}
};
pub fn init(allocator: std.mem.Allocator, config: Config) !RateLimiter {
return RateLimiter{
.allocator = allocator,
.buckets = std.StringHashMap(TokenBucket).init(allocator),
.config = config,
.mutex = .{},
};
}
pub fn check(self: *RateLimiter, ip: []const u8) !void {
self.mutex.lock();
defer self.mutex.unlock();
const now = std.time.milliTimestamp();
const result = try self.buckets.getOrPut(ip);
if (!result.found_existing) {
const ip_copy = try self.allocator.dupe(u8, ip);
result.key_ptr.* = ip_copy;
result.value_ptr.* = TokenBucket{
.tokens = @floatFromInt(self.config.capacity),
.capacity = self.config.capacity,
.last_refill = now,
};
}
var bucket = result.value_ptr;
bucket.refill(now, self.config.refill_rate, self.config.refill_interval_ms);
if (!bucket.consume(1.0)) {
return error.RateLimitExceeded;
}
}
pub fn deinit(self: *RateLimiter) void {
var it = self.buckets.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key_ptr.*);
}
self.buckets.deinit();
}
};
test "rate limiter allows requests within capacity" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 10,
.refill_rate = 1,
.refill_interval_ms = 1000,
});
defer limiter.deinit();
var i: usize = 0;
while (i < 10) : (i += 1) {
try limiter.check("1.2.3.4");
}
}
test "rate limiter blocks after capacity exhausted" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 5,
.refill_rate = 1,
.refill_interval_ms = 1000,
});
defer limiter.deinit();
var i: usize = 0;
while (i < 5) : (i += 1) {
try limiter.check("1.2.3.4");
}
try std.testing.expectError(error.RateLimitExceeded, limiter.check("1.2.3.4"));
}
test "rate limiter refills tokens over time" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 10,
.refill_rate = 5,
.refill_interval_ms = 100,
});
defer limiter.deinit();
var i: usize = 0;
while (i < 10) : (i += 1) {
try limiter.check("1.2.3.4");
}
try std.testing.expectError(error.RateLimitExceeded, limiter.check("1.2.3.4"));
std.Thread.sleep(250 * std.time.ns_per_ms);
try limiter.check("1.2.3.4");
}
test "rate limiter tracks different IPs separately" {
var limiter = try RateLimiter.init(std.testing.allocator, .{
.capacity = 2,
.refill_rate = 1,
.refill_interval_ms = 1000,
});
defer limiter.deinit();
try limiter.check("1.2.3.4");
try limiter.check("1.2.3.4");
try std.testing.expectError(error.RateLimitExceeded, limiter.check("1.2.3.4"));
try limiter.check("5.6.7.8");
try limiter.check("5.6.7.8");
}

View file

@ -1,66 +0,0 @@
const std = @import("std");
const httpz = @import("httpz");
const handler = @import("handler.zig");
const RateLimiter = @import("rate_limiter.zig").RateLimiter;
const middleware = @import("middleware.zig");
pub const Server = struct {
allocator: std.mem.Allocator,
httpz_server: httpz.Server(*Context),
context: Context,
const Context = struct {
options: handler.HandleWeatherOptions,
rate_limiter: *RateLimiter,
};
pub fn init(
allocator: std.mem.Allocator,
host: []const u8,
port: u16,
options: handler.HandleWeatherOptions,
rate_limiter: *RateLimiter,
) !Server {
const ctx = try allocator.create(Context);
ctx.* = .{
.options = options,
.rate_limiter = rate_limiter,
};
var httpz_server = try httpz.Server(*Context).init(allocator, .{
.address = host,
.port = port,
}, ctx);
var router = try httpz_server.router(.{});
router.get("/", handleWeatherRoot, .{});
router.get("/:location", handleWeatherLocation, .{});
return Server{
.allocator = allocator,
.httpz_server = httpz_server,
.context = ctx.*,
};
}
fn handleWeatherRoot(ctx: *Context, req: *httpz.Request, res: *httpz.Response) !void {
try middleware.rateLimitMiddleware(ctx.rate_limiter, req, res);
if (res.status == 429) return;
try handler.handleWeather(&ctx.options, req, res);
}
fn handleWeatherLocation(ctx: *Context, req: *httpz.Request, res: *httpz.Response) !void {
try middleware.rateLimitMiddleware(ctx.rate_limiter, req, res);
if (res.status == 429) return;
try handler.handleWeatherLocation(&ctx.options, req, res);
}
pub fn listen(self: *Server) !void {
std.log.info("wttr listening on port {d}", .{self.httpz_server.config.port.?});
try self.httpz_server.listen();
}
pub fn deinit(self: *Server) void {
self.httpz_server.deinit();
}
};

131
src/location/Airports.zig Normal file
View file

@ -0,0 +1,131 @@
const std = @import("std");
pub const Airport = struct {
iata: []const u8,
name: []const u8,
latitude: f64,
longitude: f64,
};
const Airports = @This();
allocator: std.mem.Allocator,
airports: std.StringHashMap(Airport),
pub fn initFromFile(allocator: std.mem.Allocator, file_path: []const u8) !Airports {
const file = try std.fs.cwd().openFile(file_path, .{});
defer file.close();
const csv_data = try file.readToEndAlloc(allocator, 10 * 1024 * 1024); // 10MB max
defer allocator.free(csv_data);
return try init(allocator, csv_data);
}
pub fn init(allocator: std.mem.Allocator, csv_data: []const u8) !Airports {
var airports = std.StringHashMap(Airport).init(allocator);
var lines = std.mem.splitScalar(u8, csv_data, '\n');
while (lines.next()) |line| {
if (line.len == 0) continue;
const airport = parseAirportLine(allocator, line) catch continue;
if (airport.iata.len == 3) {
try airports.put(airport.iata, airport);
}
}
return Airports{
.allocator = allocator,
.airports = airports,
};
}
pub fn deinit(self: *Airports) void {
var it = self.airports.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key_ptr.*);
self.allocator.free(entry.value_ptr.name);
}
self.airports.deinit();
}
pub fn lookup(self: *Airports, iata_code: []const u8) ?Airport {
return self.airports.get(iata_code);
}
fn parseAirportLine(allocator: std.mem.Allocator, line: []const u8) !Airport {
// CSV format: ID,Name,City,Country,IATA,ICAO,Lat,Lon,...
var fields = std.mem.splitScalar(u8, line, ',');
_ = fields.next() orelse return error.InvalidFormat; // ID
const name_quoted = fields.next() orelse return error.InvalidFormat; // Name
_ = fields.next() orelse return error.InvalidFormat; // City
_ = fields.next() orelse return error.InvalidFormat; // Country
const iata_quoted = fields.next() orelse return error.InvalidFormat; // IATA
_ = fields.next() orelse return error.InvalidFormat; // ICAO
const lat_str = fields.next() orelse return error.InvalidFormat; // Lat
const lon_str = fields.next() orelse return error.InvalidFormat; // Lon
// Remove quotes from fields
const name = try unquote(allocator, name_quoted);
const iata = try unquote(allocator, iata_quoted);
// Skip if IATA is "\\N" (null)
if (std.mem.eql(u8, iata, "\\N")) {
allocator.free(name);
allocator.free(iata);
return error.NoIATA;
}
const lat = try std.fmt.parseFloat(f64, lat_str);
const lon = try std.fmt.parseFloat(f64, lon_str);
return Airport{
.iata = iata,
.name = name,
.latitude = lat,
.longitude = lon,
};
}
fn unquote(allocator: std.mem.Allocator, quoted: []const u8) ![]const u8 {
if (quoted.len >= 2 and quoted[0] == '"' and quoted[quoted.len - 1] == '"') {
return allocator.dupe(u8, quoted[1 .. quoted.len - 1]);
}
return allocator.dupe(u8, quoted);
}
test "parseAirportLine valid" {
const allocator = std.testing.allocator;
const line = "1,\"Goroka Airport\",\"Goroka\",\"Papua New Guinea\",\"GKA\",\"AYGA\",-6.081689834590001,145.391998291,5282,10,\"U\",\"Pacific/Port_Moresby\",\"airport\",\"OurAirports\"";
const airport = try Airports.parseAirportLine(allocator, line);
defer allocator.free(airport.iata);
defer allocator.free(airport.name);
try std.testing.expectEqualStrings("GKA", airport.iata);
try std.testing.expectEqualStrings("Goroka Airport", airport.name);
try std.testing.expectApproxEqAbs(@as(f64, -6.081689834590001), airport.latitude, 0.0001);
try std.testing.expectApproxEqAbs(@as(f64, 145.391998291), airport.longitude, 0.0001);
}
test "parseAirportLine with null IATA" {
const allocator = std.testing.allocator;
const line = "1,\"Test Airport\",\"City\",\"Country\",\"\\N\",\"ICAO\",0.0,0.0";
try std.testing.expectError(error.NoIATA, Airports.parseAirportLine(allocator, line));
}
test "AirportDB lookup" {
const allocator = std.testing.allocator;
const csv = "1,\"Munich Airport\",\"Munich\",\"Germany\",\"MUC\",\"EDDM\",48.353802,11.7861,1487,1,\"E\",\"Europe/Berlin\",\"airport\",\"OurAirports\"";
var db = try Airports.init(allocator, csv);
defer db.deinit();
const result = db.lookup("MUC");
try std.testing.expect(result != null);
try std.testing.expectEqualStrings("Munich Airport", result.?.name);
try std.testing.expectApproxEqAbs(@as(f64, 48.353802), result.?.latitude, 0.0001);
}

146
src/location/GeoCache.zig Normal file
View file

@ -0,0 +1,146 @@
const std = @import("std");
const GeoCache = @This();
allocator: std.mem.Allocator,
cache: std.StringHashMap(CachedLocation),
cache_file: ?[]const u8,
pub const CachedLocation = struct {
name: []const u8,
latitude: f64,
longitude: f64,
};
pub fn init(allocator: std.mem.Allocator, cache_file: ?[]const u8) !GeoCache {
var cache = std.StringHashMap(CachedLocation).init(allocator);
// Load from file if specified
if (cache_file) |file_path| {
loadFromFile(allocator, &cache, file_path) catch |err| {
std.log.warn("Failed to load geocoding cache from {s}: {}", .{ file_path, err });
};
}
return GeoCache{
.allocator = allocator,
.cache = cache,
.cache_file = if (cache_file) |f| try allocator.dupe(u8, f) else null,
};
}
pub fn deinit(self: *GeoCache) void {
// Save to file if specified
if (self.cache_file) |file_path| {
self.saveToFile(file_path) catch |err| {
std.log.warn("Failed to save geocoding cache to {s}: {}", .{ file_path, err });
};
}
var it = self.cache.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key_ptr.*);
self.allocator.free(entry.value_ptr.name);
}
self.cache.deinit();
if (self.cache_file) |f| self.allocator.free(f);
}
pub fn get(self: *GeoCache, query: []const u8) ?CachedLocation {
return self.cache.get(query);
}
pub fn put(self: *GeoCache, query: []const u8, location: CachedLocation) !void {
const key = try self.allocator.dupe(u8, query);
const value = CachedLocation{
.name = try self.allocator.dupe(u8, location.name),
.latitude = location.latitude,
.longitude = location.longitude,
};
try self.cache.put(key, value);
}
fn loadFromFile(allocator: std.mem.Allocator, cache: *std.StringHashMap(CachedLocation), file_path: []const u8) !void {
const file = try std.fs.cwd().openFile(file_path, .{});
defer file.close();
const content = try file.readToEndAlloc(allocator, 10 * 1024 * 1024); // 10MB max
defer allocator.free(content);
const parsed = try std.json.parseFromSlice(
std.json.Value,
allocator,
content,
.{},
);
defer parsed.deinit();
var it = parsed.value.object.iterator();
while (it.next()) |entry| {
const obj = entry.value_ptr.object;
const key = try allocator.dupe(u8, entry.key_ptr.*);
const value = CachedLocation{
.name = try allocator.dupe(u8, obj.get("name").?.string),
.latitude = obj.get("latitude").?.float,
.longitude = obj.get("longitude").?.float,
};
try cache.put(key, value);
}
}
fn saveToFile(self: *GeoCache, file_path: []const u8) !void {
const file = try std.fs.cwd().createFile(file_path, .{});
defer file.close();
var buffer: [4096]u8 = undefined;
var file_writer = file.writer(&buffer);
const writer = &file_writer.interface;
try writer.writeAll("{\n");
var it = self.cache.iterator();
var first = true;
while (it.next()) |entry| {
if (!first) try writer.writeAll(",\n");
first = false;
try writer.print(" {any}: {any}", .{
std.json.fmt(entry.key_ptr.*, .{}),
std.json.fmt(.{
.name = entry.value_ptr.name,
.latitude = entry.value_ptr.latitude,
.longitude = entry.value_ptr.longitude,
}, .{}),
});
}
try writer.writeAll("\n}\n");
try writer.flush();
}
test "GeoCache basic operations" {
const allocator = std.testing.allocator;
var cache = try GeoCache.init(allocator, null);
defer cache.deinit();
// Test put and get
try cache.put("London", .{
.name = "London, UK",
.latitude = 51.5074,
.longitude = -0.1278,
});
const result = cache.get("London");
try std.testing.expect(result != null);
try std.testing.expectApproxEqAbs(@as(f64, 51.5074), result.?.latitude, 0.0001);
try std.testing.expectApproxEqAbs(@as(f64, -0.1278), result.?.longitude, 0.0001);
}
test "GeoCache miss returns null" {
const allocator = std.testing.allocator;
var cache = try GeoCache.init(allocator, null);
defer cache.deinit();
const result = cache.get("NonExistent");
try std.testing.expect(result == null);
}

187
src/location/GeoIP.zig Normal file
View file

@ -0,0 +1,187 @@
const std = @import("std");
pub const Coordinates = struct {
latitude: f64,
longitude: f64,
};
pub const MMDB = extern struct {
filename: [*:0]const u8,
flags: u32,
file_content: ?*anyopaque,
file_size: usize,
data_section: ?*anyopaque,
data_section_size: u32,
metadata_section: ?*anyopaque,
metadata_section_size: u32,
full_record_byte_size: u16,
depth: u16,
ipv4_start_node: extern struct {
node_value: u32,
netmask: u16,
},
metadata: extern struct {
node_count: u32,
record_size: u16,
ip_version: u16,
database_type: [*:0]const u8,
languages: extern struct {
count: usize,
names: [*][*:0]const u8,
},
binary_format_major_version: u16,
binary_format_minor_version: u16,
build_epoch: u64,
description: extern struct {
count: usize,
descriptions: [*]?*anyopaque,
},
},
};
pub const MMDBLookupResult = extern struct {
found_entry: bool,
entry: MMDBEntry,
netmask: u16,
};
pub const MMDBEntry = extern struct {
mmdb: *MMDB,
offset: u32,
};
pub const MMDBEntryData = extern struct {
has_data: bool,
data_type: u32,
offset: u32,
offset_to_next: u32,
data_size: u32,
utf8_string: [*:0]const u8,
double_value: f64,
bytes: [*]const u8,
uint16: u16,
uint32: u32,
int32: i32,
uint64: u64,
uint128: u128,
boolean: bool,
float_value: f32,
};
extern fn MMDB_open(filename: [*:0]const u8, flags: u32, mmdb: *MMDB) c_int;
extern fn MMDB_close(mmdb: *MMDB) void;
extern fn MMDB_lookup_string(mmdb: *MMDB, ipstr: [*:0]const u8, gai_error: *c_int, mmdb_error: *c_int) MMDBLookupResult;
extern fn MMDB_get_value(entry: *MMDBEntry, entry_data: *MMDBEntryData, ...) c_int;
extern fn MMDB_strerror(error_code: c_int) [*:0]const u8;
const GeoIP = @This();
mmdb: MMDB,
pub fn init(db_path: []const u8) !GeoIP {
var mmdb: MMDB = undefined;
const path_z = try std.heap.c_allocator.dupeZ(u8, db_path);
defer std.heap.c_allocator.free(path_z);
const status = MMDB_open(path_z.ptr, 0, &mmdb);
if (status != 0) {
return error.CannotOpenDatabase;
}
return GeoIP{ .mmdb = mmdb };
}
pub fn deinit(self: *GeoIP) void {
MMDB_close(&self.mmdb);
}
pub fn lookup(self: *GeoIP, ip: []const u8) !?Coordinates {
const ip_z = try std.heap.c_allocator.dupeZ(u8, ip);
defer std.heap.c_allocator.free(ip_z);
var gai_error: c_int = 0;
var mmdb_error: c_int = 0;
const result = MMDB_lookup_string(&self.mmdb, ip_z.ptr, &gai_error, &mmdb_error);
if (gai_error != 0 or mmdb_error != 0) {
return null;
}
if (!result.found_entry) {
return null;
}
return try self.extractCoordinates(result.entry);
}
pub fn isUSIP(self: *GeoIP, ip: []const u8) bool {
const ip_z = std.heap.c_allocator.dupeZ(u8, ip) catch return false;
defer std.heap.c_allocator.free(ip_z);
var gai_error: c_int = 0;
var mmdb_error: c_int = 0;
const result = MMDB_lookup_string(&self.mmdb, ip_z.ptr, &gai_error, &mmdb_error);
if (gai_error != 0 or mmdb_error != 0 or !result.found_entry) {
return false;
}
var entry_mut = result.entry;
var country_data: MMDBEntryData = undefined;
const null_term: [*:0]const u8 = @ptrCast(&[_]u8{0});
const status = MMDB_get_value(&entry_mut, &country_data, "country\x00", "iso_code\x00", null_term);
if (status != 0 or !country_data.has_data) {
return false;
}
const country_code = std.mem.span(country_data.utf8_string);
return std.mem.eql(u8, country_code, "US");
}
fn extractCoordinates(self: *GeoIP, entry: MMDBEntry) !Coordinates {
_ = self;
var entry_mut = entry;
var latitude_data: MMDBEntryData = undefined;
var longitude_data: MMDBEntryData = undefined;
const lat_status = MMDB_get_value(&entry_mut, &latitude_data, "location", "latitude", @as([*:0]const u8, @ptrCast(&[_]u8{0})));
const lon_status = MMDB_get_value(&entry_mut, &longitude_data, "location", "longitude", @as([*:0]const u8, @ptrCast(&[_]u8{0})));
if (lat_status != 0 or lon_status != 0 or !latitude_data.has_data or !longitude_data.has_data) {
return error.CoordinatesNotFound;
}
return Coordinates{
.latitude = latitude_data.double_value,
.longitude = longitude_data.double_value,
};
}
test "MMDB functions are callable" {
const mmdb_error = MMDB_strerror(0);
try std.testing.expect(mmdb_error[0] != 0);
}
test "GeoIP init with invalid path fails" {
const result = GeoIP.init("/nonexistent/path.mmdb");
try std.testing.expectError(error.CannotOpenDatabase, result);
}
test "isUSIP detects US IPs" {
var geoip = GeoIP.init("./GeoLite2-City.mmdb") catch {
std.debug.print("Skipping test - GeoLite2-City.mmdb not found\n", .{});
return error.SkipZigTest;
};
defer geoip.deinit();
// Test that the function doesn't crash with various IPs
_ = geoip.isUSIP("8.8.8.8");
_ = geoip.isUSIP("1.1.1.1");
// Test invalid IP returns false
const invalid = geoip.isUSIP("invalid");
try std.testing.expect(!invalid);
}

View file

@ -1,131 +0,0 @@
const std = @import("std");
pub const Airport = struct {
iata: []const u8,
name: []const u8,
latitude: f64,
longitude: f64,
};
pub const AirportDB = struct {
allocator: std.mem.Allocator,
airports: std.StringHashMap(Airport),
pub fn initFromFile(allocator: std.mem.Allocator, file_path: []const u8) !AirportDB {
const file = try std.fs.cwd().openFile(file_path, .{});
defer file.close();
const csv_data = try file.readToEndAlloc(allocator, 10 * 1024 * 1024); // 10MB max
defer allocator.free(csv_data);
return try init(allocator, csv_data);
}
pub fn init(allocator: std.mem.Allocator, csv_data: []const u8) !AirportDB {
var airports = std.StringHashMap(Airport).init(allocator);
var lines = std.mem.splitScalar(u8, csv_data, '\n');
while (lines.next()) |line| {
if (line.len == 0) continue;
const airport = parseAirportLine(allocator, line) catch continue;
if (airport.iata.len == 3) {
try airports.put(airport.iata, airport);
}
}
return AirportDB{
.allocator = allocator,
.airports = airports,
};
}
pub fn deinit(self: *AirportDB) void {
var it = self.airports.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key_ptr.*);
self.allocator.free(entry.value_ptr.name);
}
self.airports.deinit();
}
pub fn lookup(self: *AirportDB, iata_code: []const u8) ?Airport {
return self.airports.get(iata_code);
}
fn parseAirportLine(allocator: std.mem.Allocator, line: []const u8) !Airport {
// CSV format: ID,Name,City,Country,IATA,ICAO,Lat,Lon,...
var fields = std.mem.splitScalar(u8, line, ',');
_ = fields.next() orelse return error.InvalidFormat; // ID
const name_quoted = fields.next() orelse return error.InvalidFormat; // Name
_ = fields.next() orelse return error.InvalidFormat; // City
_ = fields.next() orelse return error.InvalidFormat; // Country
const iata_quoted = fields.next() orelse return error.InvalidFormat; // IATA
_ = fields.next() orelse return error.InvalidFormat; // ICAO
const lat_str = fields.next() orelse return error.InvalidFormat; // Lat
const lon_str = fields.next() orelse return error.InvalidFormat; // Lon
// Remove quotes from fields
const name = try unquote(allocator, name_quoted);
const iata = try unquote(allocator, iata_quoted);
// Skip if IATA is "\\N" (null)
if (std.mem.eql(u8, iata, "\\N")) {
allocator.free(name);
allocator.free(iata);
return error.NoIATA;
}
const lat = try std.fmt.parseFloat(f64, lat_str);
const lon = try std.fmt.parseFloat(f64, lon_str);
return Airport{
.iata = iata,
.name = name,
.latitude = lat,
.longitude = lon,
};
}
fn unquote(allocator: std.mem.Allocator, quoted: []const u8) ![]const u8 {
if (quoted.len >= 2 and quoted[0] == '"' and quoted[quoted.len - 1] == '"') {
return allocator.dupe(u8, quoted[1 .. quoted.len - 1]);
}
return allocator.dupe(u8, quoted);
}
};
test "parseAirportLine valid" {
const allocator = std.testing.allocator;
const line = "1,\"Goroka Airport\",\"Goroka\",\"Papua New Guinea\",\"GKA\",\"AYGA\",-6.081689834590001,145.391998291,5282,10,\"U\",\"Pacific/Port_Moresby\",\"airport\",\"OurAirports\"";
const airport = try AirportDB.parseAirportLine(allocator, line);
defer allocator.free(airport.iata);
defer allocator.free(airport.name);
try std.testing.expectEqualStrings("GKA", airport.iata);
try std.testing.expectEqualStrings("Goroka Airport", airport.name);
try std.testing.expectApproxEqAbs(@as(f64, -6.081689834590001), airport.latitude, 0.0001);
try std.testing.expectApproxEqAbs(@as(f64, 145.391998291), airport.longitude, 0.0001);
}
test "parseAirportLine with null IATA" {
const allocator = std.testing.allocator;
const line = "1,\"Test Airport\",\"City\",\"Country\",\"\\N\",\"ICAO\",0.0,0.0";
try std.testing.expectError(error.NoIATA, AirportDB.parseAirportLine(allocator, line));
}
test "AirportDB lookup" {
const allocator = std.testing.allocator;
const csv = "1,\"Munich Airport\",\"Munich\",\"Germany\",\"MUC\",\"EDDM\",48.353802,11.7861,1487,1,\"E\",\"Europe/Berlin\",\"airport\",\"OurAirports\"";
var db = try AirportDB.init(allocator, csv);
defer db.deinit();
const result = db.lookup("MUC");
try std.testing.expect(result != null);
try std.testing.expectEqualStrings("Munich Airport", result.?.name);
try std.testing.expectApproxEqAbs(@as(f64, 48.353802), result.?.latitude, 0.0001);
}

View file

@ -1,146 +0,0 @@
const std = @import("std");
pub const GeoCache = struct {
allocator: std.mem.Allocator,
cache: std.StringHashMap(CachedLocation),
cache_file: ?[]const u8,
pub const CachedLocation = struct {
name: []const u8,
latitude: f64,
longitude: f64,
};
pub fn init(allocator: std.mem.Allocator, cache_file: ?[]const u8) !GeoCache {
var cache = std.StringHashMap(CachedLocation).init(allocator);
// Load from file if specified
if (cache_file) |file_path| {
loadFromFile(allocator, &cache, file_path) catch |err| {
std.log.warn("Failed to load geocoding cache from {s}: {}", .{ file_path, err });
};
}
return GeoCache{
.allocator = allocator,
.cache = cache,
.cache_file = if (cache_file) |f| try allocator.dupe(u8, f) else null,
};
}
pub fn deinit(self: *GeoCache) void {
// Save to file if specified
if (self.cache_file) |file_path| {
self.saveToFile(file_path) catch |err| {
std.log.warn("Failed to save geocoding cache to {s}: {}", .{ file_path, err });
};
}
var it = self.cache.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key_ptr.*);
self.allocator.free(entry.value_ptr.name);
}
self.cache.deinit();
if (self.cache_file) |f| self.allocator.free(f);
}
pub fn get(self: *GeoCache, query: []const u8) ?CachedLocation {
return self.cache.get(query);
}
pub fn put(self: *GeoCache, query: []const u8, location: CachedLocation) !void {
const key = try self.allocator.dupe(u8, query);
const value = CachedLocation{
.name = try self.allocator.dupe(u8, location.name),
.latitude = location.latitude,
.longitude = location.longitude,
};
try self.cache.put(key, value);
}
fn loadFromFile(allocator: std.mem.Allocator, cache: *std.StringHashMap(CachedLocation), file_path: []const u8) !void {
const file = try std.fs.cwd().openFile(file_path, .{});
defer file.close();
const content = try file.readToEndAlloc(allocator, 10 * 1024 * 1024); // 10MB max
defer allocator.free(content);
const parsed = try std.json.parseFromSlice(
std.json.Value,
allocator,
content,
.{},
);
defer parsed.deinit();
var it = parsed.value.object.iterator();
while (it.next()) |entry| {
const obj = entry.value_ptr.object;
const key = try allocator.dupe(u8, entry.key_ptr.*);
const value = CachedLocation{
.name = try allocator.dupe(u8, obj.get("name").?.string),
.latitude = obj.get("latitude").?.float,
.longitude = obj.get("longitude").?.float,
};
try cache.put(key, value);
}
}
fn saveToFile(self: *GeoCache, file_path: []const u8) !void {
const file = try std.fs.cwd().createFile(file_path, .{});
defer file.close();
var buffer: [4096]u8 = undefined;
var file_writer = file.writer(&buffer);
const writer = &file_writer.interface;
try writer.writeAll("{\n");
var it = self.cache.iterator();
var first = true;
while (it.next()) |entry| {
if (!first) try writer.writeAll(",\n");
first = false;
try writer.print(" {any}: {any}", .{
std.json.fmt(entry.key_ptr.*, .{}),
std.json.fmt(.{
.name = entry.value_ptr.name,
.latitude = entry.value_ptr.latitude,
.longitude = entry.value_ptr.longitude,
}, .{}),
});
}
try writer.writeAll("\n}\n");
try writer.flush();
}
};
test "GeoCache basic operations" {
const allocator = std.testing.allocator;
var cache = try GeoCache.init(allocator, null);
defer cache.deinit();
// Test put and get
try cache.put("London", .{
.name = "London, UK",
.latitude = 51.5074,
.longitude = -0.1278,
});
const result = cache.get("London");
try std.testing.expect(result != null);
try std.testing.expectApproxEqAbs(@as(f64, 51.5074), result.?.latitude, 0.0001);
try std.testing.expectApproxEqAbs(@as(f64, -0.1278), result.?.longitude, 0.0001);
}
test "GeoCache miss returns null" {
const allocator = std.testing.allocator;
var cache = try GeoCache.init(allocator, null);
defer cache.deinit();
const result = cache.get("NonExistent");
try std.testing.expect(result == null);
}

View file

@ -1,187 +0,0 @@
const std = @import("std");
pub const Coordinates = struct {
latitude: f64,
longitude: f64,
};
pub const MMDB = extern struct {
filename: [*:0]const u8,
flags: u32,
file_content: ?*anyopaque,
file_size: usize,
data_section: ?*anyopaque,
data_section_size: u32,
metadata_section: ?*anyopaque,
metadata_section_size: u32,
full_record_byte_size: u16,
depth: u16,
ipv4_start_node: extern struct {
node_value: u32,
netmask: u16,
},
metadata: extern struct {
node_count: u32,
record_size: u16,
ip_version: u16,
database_type: [*:0]const u8,
languages: extern struct {
count: usize,
names: [*][*:0]const u8,
},
binary_format_major_version: u16,
binary_format_minor_version: u16,
build_epoch: u64,
description: extern struct {
count: usize,
descriptions: [*]?*anyopaque,
},
},
};
pub const MMDBLookupResult = extern struct {
found_entry: bool,
entry: MMDBEntry,
netmask: u16,
};
pub const MMDBEntry = extern struct {
mmdb: *MMDB,
offset: u32,
};
pub const MMDBEntryData = extern struct {
has_data: bool,
data_type: u32,
offset: u32,
offset_to_next: u32,
data_size: u32,
utf8_string: [*:0]const u8,
double_value: f64,
bytes: [*]const u8,
uint16: u16,
uint32: u32,
int32: i32,
uint64: u64,
uint128: u128,
boolean: bool,
float_value: f32,
};
extern fn MMDB_open(filename: [*:0]const u8, flags: u32, mmdb: *MMDB) c_int;
extern fn MMDB_close(mmdb: *MMDB) void;
extern fn MMDB_lookup_string(mmdb: *MMDB, ipstr: [*:0]const u8, gai_error: *c_int, mmdb_error: *c_int) MMDBLookupResult;
extern fn MMDB_get_value(entry: *MMDBEntry, entry_data: *MMDBEntryData, ...) c_int;
extern fn MMDB_strerror(error_code: c_int) [*:0]const u8;
pub const GeoIP = struct {
mmdb: MMDB,
pub fn init(db_path: []const u8) !GeoIP {
var mmdb: MMDB = undefined;
const path_z = try std.heap.c_allocator.dupeZ(u8, db_path);
defer std.heap.c_allocator.free(path_z);
const status = MMDB_open(path_z.ptr, 0, &mmdb);
if (status != 0) {
return error.CannotOpenDatabase;
}
return GeoIP{ .mmdb = mmdb };
}
pub fn deinit(self: *GeoIP) void {
MMDB_close(&self.mmdb);
}
pub fn lookup(self: *GeoIP, ip: []const u8) !?Coordinates {
const ip_z = try std.heap.c_allocator.dupeZ(u8, ip);
defer std.heap.c_allocator.free(ip_z);
var gai_error: c_int = 0;
var mmdb_error: c_int = 0;
const result = MMDB_lookup_string(&self.mmdb, ip_z.ptr, &gai_error, &mmdb_error);
if (gai_error != 0 or mmdb_error != 0) {
return null;
}
if (!result.found_entry) {
return null;
}
return try self.extractCoordinates(result.entry);
}
pub fn isUSIP(self: *GeoIP, ip: []const u8) bool {
const ip_z = std.heap.c_allocator.dupeZ(u8, ip) catch return false;
defer std.heap.c_allocator.free(ip_z);
var gai_error: c_int = 0;
var mmdb_error: c_int = 0;
const result = MMDB_lookup_string(&self.mmdb, ip_z.ptr, &gai_error, &mmdb_error);
if (gai_error != 0 or mmdb_error != 0 or !result.found_entry) {
return false;
}
var entry_mut = result.entry;
var country_data: MMDBEntryData = undefined;
const null_term: [*:0]const u8 = @ptrCast(&[_]u8{0});
const status = MMDB_get_value(&entry_mut, &country_data, "country\x00", "iso_code\x00", null_term);
if (status != 0 or !country_data.has_data) {
return false;
}
const country_code = std.mem.span(country_data.utf8_string);
return std.mem.eql(u8, country_code, "US");
}
fn extractCoordinates(self: *GeoIP, entry: MMDBEntry) !Coordinates {
_ = self;
var entry_mut = entry;
var latitude_data: MMDBEntryData = undefined;
var longitude_data: MMDBEntryData = undefined;
const lat_status = MMDB_get_value(&entry_mut, &latitude_data, "location", "latitude", @as([*:0]const u8, @ptrCast(&[_]u8{0})));
const lon_status = MMDB_get_value(&entry_mut, &longitude_data, "location", "longitude", @as([*:0]const u8, @ptrCast(&[_]u8{0})));
if (lat_status != 0 or lon_status != 0 or !latitude_data.has_data or !longitude_data.has_data) {
return error.CoordinatesNotFound;
}
return Coordinates{
.latitude = latitude_data.double_value,
.longitude = longitude_data.double_value,
};
}
};
test "MMDB functions are callable" {
const mmdb_error = MMDB_strerror(0);
try std.testing.expect(mmdb_error[0] != 0);
}
test "GeoIP init with invalid path fails" {
const result = GeoIP.init("/nonexistent/path.mmdb");
try std.testing.expectError(error.CannotOpenDatabase, result);
}
test "isUSIP detects US IPs" {
var geoip = GeoIP.init("./GeoLite2-City.mmdb") catch {
std.debug.print("Skipping test - GeoLite2-City.mmdb not found\n", .{});
return error.SkipZigTest;
};
defer geoip.deinit();
// Test that the function doesn't crash with various IPs
_ = geoip.isUSIP("8.8.8.8");
_ = geoip.isUSIP("1.1.1.1");
// Test invalid IP returns false
const invalid = geoip.isUSIP("invalid");
try std.testing.expect(!invalid);
}

View file

@ -1,7 +1,7 @@
const std = @import("std");
const GeoIP = @import("geoip.zig").GeoIP;
const GeoCache = @import("geocache.zig").GeoCache;
const AirportDB = @import("airports.zig").AirportDB;
const GeoIP = @import("GeoIP.zig");
const GeoCache = @import("GeoCache.zig");
const Airports = @import("Airports.zig");
pub const Location = struct {
name: []const u8,
@ -21,9 +21,9 @@ pub const Resolver = struct {
allocator: std.mem.Allocator,
geoip: ?*GeoIP,
geocache: *GeoCache,
airports: ?*AirportDB,
airports: ?*Airports,
pub fn init(allocator: std.mem.Allocator, geoip: ?*GeoIP, geocache: *GeoCache, airports: ?*AirportDB) Resolver {
pub fn init(allocator: std.mem.Allocator, geoip: ?*GeoIP, geocache: *GeoCache, airports: ?*Airports) Resolver {
return .{
.allocator = allocator,
.geoip = geoip,

View file

@ -1,13 +1,13 @@
const std = @import("std");
const config = @import("config.zig");
const Cache = @import("cache/cache.zig").Cache;
const MetNo = @import("weather/metno.zig").MetNo;
const Cache = @import("cache/Cache.zig");
const MetNo = @import("weather/MetNo.zig");
const types = @import("weather/types.zig");
const Server = @import("http/server.zig").Server;
const RateLimiter = @import("http/rate_limiter.zig").RateLimiter;
const GeoIP = @import("location/geoip.zig").GeoIP;
const GeoCache = @import("location/geocache.zig").GeoCache;
const AirportDB = @import("location/airports.zig").AirportDB;
const Server = @import("http/Server.zig");
const RateLimiter = @import("http/RateLimiter.zig");
const GeoIP = @import("location/GeoIP.zig");
const GeoCache = @import("location/GeoCache.zig");
const Airports = @import("location/Airports.zig");
const Resolver = @import("location/resolver.zig").Resolver;
const geolite_downloader = @import("location/geolite_downloader.zig");
@ -57,9 +57,9 @@ pub fn main() !void {
defer geocache.deinit();
// Initialize airports database
var airports_db: ?AirportDB = null;
var airports_db: ?Airports = null;
if (cfg.airports_dat_path) |path| {
airports_db = AirportDB.initFromFile(allocator, path) catch |err| blk: {
airports_db = Airports.initFromFile(allocator, path) catch |err| blk: {
std.log.warn("Failed to load airports database: {}", .{err});
break :blk null;
};
@ -100,17 +100,17 @@ pub fn main() !void {
test {
std.testing.refAllDecls(@This());
_ = @import("config.zig");
_ = @import("cache/lru.zig");
_ = @import("cache/LRU.zig");
_ = @import("weather/mock.zig");
_ = @import("http/rate_limiter.zig");
_ = @import("http/RateLimiter.zig");
_ = @import("http/query.zig");
_ = @import("http/help.zig");
_ = @import("render/line.zig");
_ = @import("render/json.zig");
_ = @import("render/v2.zig");
_ = @import("render/custom.zig");
_ = @import("location/geoip.zig");
_ = @import("location/geocache.zig");
_ = @import("location/airports.zig");
_ = @import("location/GeoIP.zig");
_ = @import("location/GeoCache.zig");
_ = @import("location/Airports.zig");
_ = @import("location/resolver.zig");
}

View file

@ -2,82 +2,82 @@ const std = @import("std");
const weather_provider = @import("provider.zig");
const types = @import("types.zig");
pub const MetNo = struct {
allocator: std.mem.Allocator,
const MetNo = @This();
pub fn init(allocator: std.mem.Allocator) !MetNo {
return MetNo{
.allocator = allocator,
};
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) !MetNo {
return MetNo{
.allocator = allocator,
};
}
pub fn provider(self: *MetNo) weather_provider.WeatherProvider {
return .{
.ptr = self,
.vtable = &.{
.fetch = fetch,
.deinit = deinitProvider,
},
};
}
fn fetch(ptr: *anyopaque, allocator: std.mem.Allocator, location: []const u8) !types.WeatherData {
const self: *MetNo = @ptrCast(@alignCast(ptr));
// Parse location as "lat,lon" or use default
const coords = parseLocation(location) catch Coords{ .lat = 51.5074, .lon = -0.1278 };
const url = try std.fmt.allocPrint(
self.allocator,
"https://api.met.no/weatherapi/locationforecast/2.0/compact?lat={d:.4}&lon={d:.4}",
.{ coords.lat, coords.lon },
);
defer self.allocator.free(url);
// Fetch weather data from met.no API
var client = std.http.Client{ .allocator = self.allocator };
defer client.deinit();
const uri = try std.Uri.parse(url);
var response_buf: [1024 * 1024]u8 = undefined;
var writer = std.Io.Writer.fixed(&response_buf);
const result = try client.fetch(.{
.location = .{ .uri = uri },
.method = .GET,
.response_writer = &writer,
.extra_headers = &.{
.{ .name = "User-Agent", .value = "wttr.in-zig/1.0 github.com/chubin/wttr.in" },
},
});
if (result.status != .ok) {
return error.WeatherApiFailed;
}
pub fn provider(self: *MetNo) weather_provider.WeatherProvider {
return .{
.ptr = self,
.vtable = &.{
.fetch = fetch,
.deinit = deinitProvider,
},
};
}
const response_body = response_buf[0..writer.end];
fn fetch(ptr: *anyopaque, allocator: std.mem.Allocator, location: []const u8) !types.WeatherData {
const self: *MetNo = @ptrCast(@alignCast(ptr));
// Parse JSON response
const parsed = try std.json.parseFromSlice(
std.json.Value,
allocator,
response_body,
.{},
);
defer parsed.deinit();
// Parse location as "lat,lon" or use default
const coords = parseLocation(location) catch Coords{ .lat = 51.5074, .lon = -0.1278 };
return try parseMetNoResponse(allocator, location, parsed.value);
}
const url = try std.fmt.allocPrint(
self.allocator,
"https://api.met.no/weatherapi/locationforecast/2.0/compact?lat={d:.4}&lon={d:.4}",
.{ coords.lat, coords.lon },
);
defer self.allocator.free(url);
fn deinitProvider(ptr: *anyopaque) void {
const self: *MetNo = @ptrCast(@alignCast(ptr));
self.deinit();
}
// Fetch weather data from met.no API
var client = std.http.Client{ .allocator = self.allocator };
defer client.deinit();
const uri = try std.Uri.parse(url);
var response_buf: [1024 * 1024]u8 = undefined;
var writer = std.Io.Writer.fixed(&response_buf);
const result = try client.fetch(.{
.location = .{ .uri = uri },
.method = .GET,
.response_writer = &writer,
.extra_headers = &.{
.{ .name = "User-Agent", .value = "wttr.in-zig/1.0 github.com/chubin/wttr.in" },
},
});
if (result.status != .ok) {
return error.WeatherApiFailed;
}
const response_body = response_buf[0..writer.end];
// Parse JSON response
const parsed = try std.json.parseFromSlice(
std.json.Value,
allocator,
response_body,
.{},
);
defer parsed.deinit();
return try parseMetNoResponse(allocator, location, parsed.value);
}
fn deinitProvider(ptr: *anyopaque) void {
const self: *MetNo = @ptrCast(@alignCast(ptr));
self.deinit();
}
pub fn deinit(self: *MetNo) void {
_ = self;
}
};
pub fn deinit(self: *MetNo) void {
_ = self;
}
const Coords = struct {
lat: f64,