avoid mtime for cache decisions
This commit is contained in:
parent
d6007e4305
commit
2b62827bdb
2 changed files with 63 additions and 33 deletions
68
src/cache/store.zig
vendored
68
src/cache/store.zig
vendored
|
|
@ -95,7 +95,32 @@ pub const Store = struct {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write raw SRF data for a symbol and data type.
|
/// Write raw SRF data for a symbol and data type with an embedded expiry timestamp.
|
||||||
|
/// Inserts a `# good_until::{unix_seconds}` comment after the `#!srfv1` header so
|
||||||
|
/// freshness can be determined from the file content rather than filesystem mtime.
|
||||||
|
pub fn writeWithExpiry(self: *Store, symbol: []const u8, data_type: DataType, data: []const u8, ttl_seconds: i64) !void {
|
||||||
|
try self.ensureSymbolDir(symbol);
|
||||||
|
const path = try self.symbolPath(symbol, data_type.fileName());
|
||||||
|
defer self.allocator.free(path);
|
||||||
|
|
||||||
|
const file = try std.fs.cwd().createFile(path, .{});
|
||||||
|
defer file.close();
|
||||||
|
|
||||||
|
const header = "#!srfv1\n";
|
||||||
|
if (std.mem.startsWith(u8, data, header)) {
|
||||||
|
try file.writeAll(header);
|
||||||
|
var expiry_buf: [48]u8 = undefined;
|
||||||
|
const expiry = std.time.timestamp() + ttl_seconds;
|
||||||
|
const expiry_line = std.fmt.bufPrint(&expiry_buf, "# good_until::{d}\n", .{expiry}) catch return;
|
||||||
|
try file.writeAll(expiry_line);
|
||||||
|
try file.writeAll(data[header.len..]);
|
||||||
|
} else {
|
||||||
|
// Unexpected format -- write as-is to avoid data loss
|
||||||
|
try file.writeAll(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write raw SRF data for a symbol and data type (no expiry metadata).
|
||||||
pub fn writeRaw(self: *Store, symbol: []const u8, data_type: DataType, data: []const u8) !void {
|
pub fn writeRaw(self: *Store, symbol: []const u8, data_type: DataType, data: []const u8) !void {
|
||||||
try self.ensureSymbolDir(symbol);
|
try self.ensureSymbolDir(symbol);
|
||||||
const path = try self.symbolPath(symbol, data_type.fileName());
|
const path = try self.symbolPath(symbol, data_type.fileName());
|
||||||
|
|
@ -106,30 +131,35 @@ pub const Store = struct {
|
||||||
try file.writeAll(data);
|
try file.writeAll(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if a cached data file exists and is within its TTL.
|
/// Check if a cached data file is fresh by reading the embedded expiry timestamp.
|
||||||
/// Negative cache entries (fetch_failed) are always considered fresh.
|
/// - Negative cache entries (# fetch_failed) are always fresh.
|
||||||
pub fn isFresh(self: *Store, symbol: []const u8, data_type: DataType, ttl_seconds: i64) !bool {
|
/// - Files with `# good_until::{timestamp}` are fresh if now < timestamp.
|
||||||
// Negative cache entries never expire (cleared only by --refresh / invalidate)
|
/// - Files without expiry metadata are considered stale (triggers re-fetch + rewrite).
|
||||||
if (self.isNegative(symbol, data_type)) return true;
|
pub fn isFresh(self: *Store, symbol: []const u8, data_type: DataType) !bool {
|
||||||
|
|
||||||
if (ttl_seconds < 0) {
|
|
||||||
// Infinite TTL: just check existence
|
|
||||||
const path = try self.symbolPath(symbol, data_type.fileName());
|
|
||||||
defer self.allocator.free(path);
|
|
||||||
std.fs.cwd().access(path, .{}) catch return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const path = try self.symbolPath(symbol, data_type.fileName());
|
const path = try self.symbolPath(symbol, data_type.fileName());
|
||||||
defer self.allocator.free(path);
|
defer self.allocator.free(path);
|
||||||
|
|
||||||
const file = std.fs.cwd().openFile(path, .{}) catch return false;
|
const file = std.fs.cwd().openFile(path, .{}) catch return false;
|
||||||
defer file.close();
|
defer file.close();
|
||||||
|
|
||||||
const stat = file.stat() catch return false;
|
// Read enough to find the good_until or fetch_failed comment lines
|
||||||
const mtime_s: i64 = @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
|
var buf: [128]u8 = undefined;
|
||||||
const now_s: i64 = std.time.timestamp();
|
const n = file.readAll(&buf) catch return false;
|
||||||
return (now_s - mtime_s) < ttl_seconds;
|
const content = buf[0..n];
|
||||||
|
|
||||||
|
// Negative cache entry -- always fresh
|
||||||
|
if (std.mem.indexOf(u8, content, "# fetch_failed")) |_| return true;
|
||||||
|
|
||||||
|
// Look for embedded expiry
|
||||||
|
if (std.mem.indexOf(u8, content, "# good_until::")) |idx| {
|
||||||
|
const after = content[idx + "# good_until::".len ..];
|
||||||
|
const end = std.mem.indexOfScalar(u8, after, '\n') orelse after.len;
|
||||||
|
const expiry = std.fmt.parseInt(i64, after[0..end], 10) catch return false;
|
||||||
|
return std.time.timestamp() < expiry;
|
||||||
|
}
|
||||||
|
|
||||||
|
// No expiry info (legacy file or missing metadata) -- stale
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the modification time (unix seconds) of a cached data file.
|
/// Get the modification time (unix seconds) of a cached data file.
|
||||||
|
|
|
||||||
|
|
@ -125,7 +125,7 @@ pub const DataService = struct {
|
||||||
const cached_raw = s.readRaw(symbol, .candles_daily) catch return DataError.CacheError;
|
const cached_raw = s.readRaw(symbol, .candles_daily) catch return DataError.CacheError;
|
||||||
if (cached_raw) |data| {
|
if (cached_raw) |data| {
|
||||||
defer self.allocator.free(data);
|
defer self.allocator.free(data);
|
||||||
const fresh = s.isFresh(symbol, .candles_daily, cache.Ttl.candles_latest) catch false;
|
const fresh = s.isFresh(symbol, .candles_daily) catch false;
|
||||||
if (fresh) {
|
if (fresh) {
|
||||||
const candles = cache.Store.deserializeCandles(self.allocator, data) catch null;
|
const candles = cache.Store.deserializeCandles(self.allocator, data) catch null;
|
||||||
if (candles) |c| return .{ .data = c, .source = .cached, .timestamp = s.getMtime(symbol, .candles_daily) orelse std.time.timestamp() };
|
if (candles) |c| return .{ .data = c, .source = .cached, .timestamp = s.getMtime(symbol, .candles_daily) orelse std.time.timestamp() };
|
||||||
|
|
@ -147,7 +147,7 @@ pub const DataService = struct {
|
||||||
if (fetched.len > 0) {
|
if (fetched.len > 0) {
|
||||||
if (cache.Store.serializeCandles(self.allocator, fetched)) |srf_data| {
|
if (cache.Store.serializeCandles(self.allocator, fetched)) |srf_data| {
|
||||||
defer self.allocator.free(srf_data);
|
defer self.allocator.free(srf_data);
|
||||||
s.writeRaw(symbol, .candles_daily, srf_data) catch {};
|
s.writeWithExpiry(symbol, .candles_daily, srf_data, cache.Ttl.candles_latest) catch {};
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -162,7 +162,7 @@ pub const DataService = struct {
|
||||||
const cached_raw = s.readRaw(symbol, .dividends) catch return DataError.CacheError;
|
const cached_raw = s.readRaw(symbol, .dividends) catch return DataError.CacheError;
|
||||||
if (cached_raw) |data| {
|
if (cached_raw) |data| {
|
||||||
defer self.allocator.free(data);
|
defer self.allocator.free(data);
|
||||||
const fresh = s.isFresh(symbol, .dividends, cache.Ttl.dividends) catch false;
|
const fresh = s.isFresh(symbol, .dividends) catch false;
|
||||||
if (fresh) {
|
if (fresh) {
|
||||||
const divs = cache.Store.deserializeDividends(self.allocator, data) catch null;
|
const divs = cache.Store.deserializeDividends(self.allocator, data) catch null;
|
||||||
if (divs) |d| return .{ .data = d, .source = .cached, .timestamp = s.getMtime(symbol, .dividends) orelse std.time.timestamp() };
|
if (divs) |d| return .{ .data = d, .source = .cached, .timestamp = s.getMtime(symbol, .dividends) orelse std.time.timestamp() };
|
||||||
|
|
@ -178,7 +178,7 @@ pub const DataService = struct {
|
||||||
if (fetched.len > 0) {
|
if (fetched.len > 0) {
|
||||||
if (cache.Store.serializeDividends(self.allocator, fetched)) |srf_data| {
|
if (cache.Store.serializeDividends(self.allocator, fetched)) |srf_data| {
|
||||||
defer self.allocator.free(srf_data);
|
defer self.allocator.free(srf_data);
|
||||||
s.writeRaw(symbol, .dividends, srf_data) catch {};
|
s.writeWithExpiry(symbol, .dividends, srf_data, cache.Ttl.dividends) catch {};
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -193,7 +193,7 @@ pub const DataService = struct {
|
||||||
const cached_raw = s.readRaw(symbol, .splits) catch return DataError.CacheError;
|
const cached_raw = s.readRaw(symbol, .splits) catch return DataError.CacheError;
|
||||||
if (cached_raw) |data| {
|
if (cached_raw) |data| {
|
||||||
defer self.allocator.free(data);
|
defer self.allocator.free(data);
|
||||||
const fresh = s.isFresh(symbol, .splits, cache.Ttl.splits) catch false;
|
const fresh = s.isFresh(symbol, .splits) catch false;
|
||||||
if (fresh) {
|
if (fresh) {
|
||||||
const splits = cache.Store.deserializeSplits(self.allocator, data) catch null;
|
const splits = cache.Store.deserializeSplits(self.allocator, data) catch null;
|
||||||
if (splits) |sp| return .{ .data = sp, .source = .cached, .timestamp = s.getMtime(symbol, .splits) orelse std.time.timestamp() };
|
if (splits) |sp| return .{ .data = sp, .source = .cached, .timestamp = s.getMtime(symbol, .splits) orelse std.time.timestamp() };
|
||||||
|
|
@ -208,7 +208,7 @@ pub const DataService = struct {
|
||||||
|
|
||||||
if (cache.Store.serializeSplits(self.allocator, fetched)) |srf_data| {
|
if (cache.Store.serializeSplits(self.allocator, fetched)) |srf_data| {
|
||||||
defer self.allocator.free(srf_data);
|
defer self.allocator.free(srf_data);
|
||||||
s.writeRaw(symbol, .splits, srf_data) catch {};
|
s.writeWithExpiry(symbol, .splits, srf_data, cache.Ttl.splits) catch {};
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
|
|
||||||
return .{ .data = fetched, .source = .fetched, .timestamp = std.time.timestamp() };
|
return .{ .data = fetched, .source = .fetched, .timestamp = std.time.timestamp() };
|
||||||
|
|
@ -222,7 +222,7 @@ pub const DataService = struct {
|
||||||
const cached_raw = s.readRaw(symbol, .options) catch return DataError.CacheError;
|
const cached_raw = s.readRaw(symbol, .options) catch return DataError.CacheError;
|
||||||
if (cached_raw) |data| {
|
if (cached_raw) |data| {
|
||||||
defer self.allocator.free(data);
|
defer self.allocator.free(data);
|
||||||
const fresh = s.isFresh(symbol, .options, cache.Ttl.options) catch false;
|
const fresh = s.isFresh(symbol, .options) catch false;
|
||||||
if (fresh) {
|
if (fresh) {
|
||||||
const chains = cache.Store.deserializeOptions(self.allocator, data) catch null;
|
const chains = cache.Store.deserializeOptions(self.allocator, data) catch null;
|
||||||
if (chains) |c| return .{ .data = c, .source = .cached, .timestamp = s.getMtime(symbol, .options) orelse std.time.timestamp() };
|
if (chains) |c| return .{ .data = c, .source = .cached, .timestamp = s.getMtime(symbol, .options) orelse std.time.timestamp() };
|
||||||
|
|
@ -238,7 +238,7 @@ pub const DataService = struct {
|
||||||
if (fetched.len > 0) {
|
if (fetched.len > 0) {
|
||||||
if (cache.Store.serializeOptions(self.allocator, fetched)) |srf_data| {
|
if (cache.Store.serializeOptions(self.allocator, fetched)) |srf_data| {
|
||||||
defer self.allocator.free(srf_data);
|
defer self.allocator.free(srf_data);
|
||||||
s.writeRaw(symbol, .options, srf_data) catch {};
|
s.writeWithExpiry(symbol, .options, srf_data, cache.Ttl.options) catch {};
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -253,7 +253,7 @@ pub const DataService = struct {
|
||||||
const cached_raw = s.readRaw(symbol, .earnings) catch return DataError.CacheError;
|
const cached_raw = s.readRaw(symbol, .earnings) catch return DataError.CacheError;
|
||||||
if (cached_raw) |data| {
|
if (cached_raw) |data| {
|
||||||
defer self.allocator.free(data);
|
defer self.allocator.free(data);
|
||||||
const fresh = s.isFresh(symbol, .earnings, cache.Ttl.earnings) catch false;
|
const fresh = s.isFresh(symbol, .earnings) catch false;
|
||||||
if (fresh) {
|
if (fresh) {
|
||||||
const events = cache.Store.deserializeEarnings(self.allocator, data) catch null;
|
const events = cache.Store.deserializeEarnings(self.allocator, data) catch null;
|
||||||
if (events) |e| return .{ .data = e, .source = .cached, .timestamp = s.getMtime(symbol, .earnings) orelse std.time.timestamp() };
|
if (events) |e| return .{ .data = e, .source = .cached, .timestamp = s.getMtime(symbol, .earnings) orelse std.time.timestamp() };
|
||||||
|
|
@ -273,7 +273,7 @@ pub const DataService = struct {
|
||||||
if (fetched.len > 0) {
|
if (fetched.len > 0) {
|
||||||
if (cache.Store.serializeEarnings(self.allocator, fetched)) |srf_data| {
|
if (cache.Store.serializeEarnings(self.allocator, fetched)) |srf_data| {
|
||||||
defer self.allocator.free(srf_data);
|
defer self.allocator.free(srf_data);
|
||||||
s.writeRaw(symbol, .earnings, srf_data) catch {};
|
s.writeWithExpiry(symbol, .earnings, srf_data, cache.Ttl.earnings) catch {};
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -288,7 +288,7 @@ pub const DataService = struct {
|
||||||
const cached_raw = s.readRaw(symbol, .etf_profile) catch return DataError.CacheError;
|
const cached_raw = s.readRaw(symbol, .etf_profile) catch return DataError.CacheError;
|
||||||
if (cached_raw) |data| {
|
if (cached_raw) |data| {
|
||||||
defer self.allocator.free(data);
|
defer self.allocator.free(data);
|
||||||
const fresh = s.isFresh(symbol, .etf_profile, cache.Ttl.etf_profile) catch false;
|
const fresh = s.isFresh(symbol, .etf_profile) catch false;
|
||||||
if (fresh) {
|
if (fresh) {
|
||||||
const profile = cache.Store.deserializeEtfProfile(self.allocator, data) catch null;
|
const profile = cache.Store.deserializeEtfProfile(self.allocator, data) catch null;
|
||||||
if (profile) |p| return .{ .data = p, .source = .cached, .timestamp = s.getMtime(symbol, .etf_profile) orelse std.time.timestamp() };
|
if (profile) |p| return .{ .data = p, .source = .cached, .timestamp = s.getMtime(symbol, .etf_profile) orelse std.time.timestamp() };
|
||||||
|
|
@ -303,7 +303,7 @@ pub const DataService = struct {
|
||||||
|
|
||||||
if (cache.Store.serializeEtfProfile(self.allocator, fetched)) |srf_data| {
|
if (cache.Store.serializeEtfProfile(self.allocator, fetched)) |srf_data| {
|
||||||
defer self.allocator.free(srf_data);
|
defer self.allocator.free(srf_data);
|
||||||
s.writeRaw(symbol, .etf_profile, srf_data) catch {};
|
s.writeWithExpiry(symbol, .etf_profile, srf_data, cache.Ttl.etf_profile) catch {};
|
||||||
} else |_| {}
|
} else |_| {}
|
||||||
|
|
||||||
return .{ .data = fetched, .source = .fetched, .timestamp = std.time.timestamp() };
|
return .{ .data = fetched, .source = .fetched, .timestamp = std.time.timestamp() };
|
||||||
|
|
@ -388,10 +388,10 @@ pub const DataService = struct {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if candle data is fresh in cache (within TTL) without reading/deserializing.
|
/// Check if candle data is fresh in cache without reading/deserializing.
|
||||||
pub fn isCandleCacheFresh(self: *DataService, symbol: []const u8) bool {
|
pub fn isCandleCacheFresh(self: *DataService, symbol: []const u8) bool {
|
||||||
var s = self.store();
|
var s = self.store();
|
||||||
return s.isFresh(symbol, .candles_daily, cache.Ttl.candles_latest) catch false;
|
return s.isFresh(symbol, .candles_daily) catch false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Estimate wait time (in seconds) before the next TwelveData API call can proceed.
|
/// Estimate wait time (in seconds) before the next TwelveData API call can proceed.
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue