zfin/src/cache/store.zig
Emil Lerch 1cd775c27e
All checks were successful
Generic zig build / build (push) Successful in 33s
refactor risk module/better sharpe ratios/adjust valuation for covered calls
2026-03-17 09:45:30 -07:00

933 lines
38 KiB
Zig

const std = @import("std");
const srf = @import("srf");
const Date = @import("../models/date.zig").Date;
const Candle = @import("../models/candle.zig").Candle;
const Dividend = @import("../models/dividend.zig").Dividend;
const DividendType = @import("../models/dividend.zig").DividendType;
const Split = @import("../models/split.zig").Split;
const EarningsEvent = @import("../models/earnings.zig").EarningsEvent;
const ReportTime = @import("../models/earnings.zig").ReportTime;
const EtfProfile = @import("../models/etf_profile.zig").EtfProfile;
const Holding = @import("../models/etf_profile.zig").Holding;
const SectorWeight = @import("../models/etf_profile.zig").SectorWeight;
const Lot = @import("../models/portfolio.zig").Lot;
const LotType = @import("../models/portfolio.zig").LotType;
const Portfolio = @import("../models/portfolio.zig").Portfolio;
const OptionsChain = @import("../models/option.zig").OptionsChain;
const OptionContract = @import("../models/option.zig").OptionContract;
/// TTL durations in seconds for cache expiry.
pub const Ttl = struct {
const s_per_day = std.time.s_per_day;
/// Historical candles older than 1 day never expire
pub const candles_historical: i64 = -1; // infinite
/// Latest day's candle refreshes every 23h45m (15-min buffer for cron jitter)
pub const candles_latest: i64 = s_per_day - 15 * std.time.s_per_min;
/// Dividend data refreshes biweekly
pub const dividends: i64 = 14 * s_per_day;
/// Split data refreshes biweekly
pub const splits: i64 = 14 * s_per_day;
/// Options chains refresh hourly
pub const options: i64 = std.time.s_per_hour;
/// Earnings refresh monthly, with smart refresh after announcements
pub const earnings: i64 = 30 * s_per_day;
/// ETF profiles refresh monthly
pub const etf_profile: i64 = 30 * s_per_day;
};
pub const DataType = enum {
candles_daily,
candles_meta,
dividends,
splits,
options,
earnings,
etf_profile,
meta,
pub fn fileName(self: DataType) []const u8 {
return switch (self) {
.candles_daily => "candles_daily.srf",
.candles_meta => "candles_meta.srf",
.dividends => "dividends.srf",
.splits => "splits.srf",
.options => "options.srf",
.earnings => "earnings.srf",
.etf_profile => "etf_profile.srf",
.meta => "meta.srf",
};
}
pub fn ttl(self: DataType) i64 {
return switch (self) {
.dividends => Ttl.dividends,
.splits => Ttl.splits,
.options => Ttl.options,
.earnings => Ttl.earnings,
.etf_profile => Ttl.etf_profile,
.candles_daily, .candles_meta, .meta => 0,
};
}
};
/// Persistent SRF-backed cache with per-symbol, per-data-type files.
///
/// Layout:
/// {cache_dir}/{SYMBOL}/candles_daily.srf
/// {cache_dir}/{SYMBOL}/dividends.srf
/// {cache_dir}/{SYMBOL}/meta.srf
/// ...
pub const Store = struct {
cache_dir: []const u8,
allocator: std.mem.Allocator,
/// Optional post-processing callback applied to each record during deserialization.
/// Used to dupe strings that outlive the SRF iterator, or apply domain-specific transforms.
pub const PostProcessFn = fn (*anyopaque, std.mem.Allocator) anyerror!void;
pub fn init(allocator: std.mem.Allocator, cache_dir: []const u8) Store {
return .{
.cache_dir = cache_dir,
.allocator = allocator,
};
}
// ── Generic typed API ────────────────────────────────────────
/// Map a model type to its cache DataType.
pub fn dataTypeFor(comptime T: type) DataType {
return switch (T) {
Candle => .candles_daily,
Dividend => .dividends,
Split => .splits,
EarningsEvent => .earnings,
OptionsChain => .options,
EtfProfile => .etf_profile,
else => @compileError("unsupported type for Store"),
};
}
/// The data payload for a given type: single struct for EtfProfile, slice for everything else.
pub fn DataFor(comptime T: type) type {
return if (T == EtfProfile) EtfProfile else []T;
}
pub fn CacheResult(comptime T: type) type {
return struct { data: DataFor(T), timestamp: i64 };
}
pub const Freshness = enum { fresh_only, any };
/// Read and deserialize cached data. With `.fresh_only`, returns null if stale.
/// With `.any`, returns data regardless of freshness.
pub fn read(
self: *Store,
comptime T: type,
symbol: []const u8,
comptime postProcess: ?*const fn (*T, std.mem.Allocator) anyerror!void,
comptime freshness: Freshness,
) ?CacheResult(T) {
const raw = self.readRaw(symbol, dataTypeFor(T)) catch return null;
const data = raw orelse return null;
defer self.allocator.free(data);
if (T == EtfProfile or T == OptionsChain) {
const is_negative = std.mem.eql(u8, data, negative_cache_content);
if (is_negative) {
if (freshness == .fresh_only) {
// Negative entries are always fresh — return empty data
if (T == EtfProfile)
return .{ .data = EtfProfile{ .symbol = "" }, .timestamp = std.time.timestamp() };
if (T == OptionsChain)
return .{ .data = &.{}, .timestamp = std.time.timestamp() };
}
return null;
}
var reader = std.Io.Reader.fixed(data);
var it = srf.iterator(&reader, self.allocator, .{ .alloc_strings = false }) catch return null;
defer it.deinit();
if (freshness == .fresh_only) {
if (it.expires == null) return null;
if (!it.isFresh()) return null;
}
const timestamp = it.created orelse std.time.timestamp();
if (T == EtfProfile) {
const profile = deserializeEtfProfile(self.allocator, &it) catch return null;
return .{ .data = profile, .timestamp = timestamp };
}
if (T == OptionsChain) {
const items = deserializeOptions(self.allocator, &it) catch return null;
return .{ .data = items, .timestamp = timestamp };
}
}
return readSlice(T, self.allocator, data, postProcess, freshness);
}
/// Serialize data and write to cache with the given TTL.
/// Accepts a slice for most types, or a single struct for EtfProfile.
pub fn write(
self: *Store,
comptime T: type,
symbol: []const u8,
items: DataFor(T),
ttl: i64,
) void {
const expires = std.time.timestamp() + ttl;
const data_type = dataTypeFor(T);
if (T == EtfProfile) {
const srf_data = serializeEtfProfile(self.allocator, items, .{ .expires = expires }) catch return;
defer self.allocator.free(srf_data);
self.writeRaw(symbol, data_type, srf_data) catch {};
return;
}
if (T == OptionsChain) {
const srf_data = serializeOptions(self.allocator, items, .{ .expires = expires }) catch return;
defer self.allocator.free(srf_data);
self.writeRaw(symbol, data_type, srf_data) catch {};
return;
}
const srf_data = serializeWithMeta(T, self.allocator, items, .{ .expires = expires }) catch return;
defer self.allocator.free(srf_data);
self.writeRaw(symbol, data_type, srf_data) catch {};
}
// ── Candle-specific API ──────────────────────────────────────
/// Write a full set of candles to cache (no expiry — historical facts don't expire).
/// Also updates candle metadata.
pub fn cacheCandles(self: *Store, symbol: []const u8, candles: []const Candle) void {
if (serializeCandles(self.allocator, candles, .{})) |srf_data| {
defer self.allocator.free(srf_data);
self.writeRaw(symbol, .candles_daily, srf_data) catch {};
} else |_| {}
if (candles.len > 0) {
const last = candles[candles.len - 1];
self.updateCandleMeta(symbol, last.close, last.date);
}
}
/// Append new candle records to the existing cache file.
/// Falls back to a full rewrite if append fails (e.g. file doesn't exist).
/// Also updates candle metadata.
pub fn appendCandles(self: *Store, symbol: []const u8, new_candles: []const Candle) void {
if (new_candles.len == 0) return;
if (serializeCandles(self.allocator, new_candles, .{ .emit_directives = false })) |srf_data| {
defer self.allocator.free(srf_data);
self.appendRaw(symbol, .candles_daily, srf_data) catch {
// Append failed (file missing?) — fall back to full load + rewrite
if (self.read(Candle, symbol, null, .any)) |existing| {
defer self.allocator.free(existing.data);
const merged = self.allocator.alloc(Candle, existing.data.len + new_candles.len) catch return;
defer self.allocator.free(merged);
@memcpy(merged[0..existing.data.len], existing.data);
@memcpy(merged[existing.data.len..], new_candles);
if (serializeCandles(self.allocator, merged, .{})) |full_data| {
defer self.allocator.free(full_data);
self.writeRaw(symbol, .candles_daily, full_data) catch {};
} else |_| {}
}
};
} else |_| {}
const last = new_candles[new_candles.len - 1];
self.updateCandleMeta(symbol, last.close, last.date);
}
/// Write (or refresh) candle metadata without touching the candle data file.
pub fn updateCandleMeta(self: *Store, symbol: []const u8, last_close: f64, last_date: Date) void {
self.updateCandleMetaWithProvider(symbol, last_close, last_date, .twelvedata);
}
/// Write candle metadata with a specific provider source.
pub fn updateCandleMetaWithProvider(self: *Store, symbol: []const u8, last_close: f64, last_date: Date, provider: CandleProvider) void {
const expires = std.time.timestamp() + Ttl.candles_latest;
const meta = CandleMeta{
.last_close = last_close,
.last_date = last_date,
.provider = provider,
};
if (serializeCandleMeta(self.allocator, meta, .{ .expires = expires })) |meta_data| {
defer self.allocator.free(meta_data);
self.writeRaw(symbol, .candles_meta, meta_data) catch {};
} else |_| {}
}
// ── Cache management ─────────────────────────────────────────
/// Ensure the cache directory for a symbol exists.
pub fn ensureSymbolDir(self: *Store, symbol: []const u8) !void {
const path = try self.symbolPath(symbol, "");
defer self.allocator.free(path);
std.fs.cwd().makePath(path) catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return err,
};
}
/// Clear all cached data for a symbol.
pub fn clearSymbol(self: *Store, symbol: []const u8) !void {
const path = try self.symbolPath(symbol, "");
defer self.allocator.free(path);
std.fs.cwd().deleteTree(path) catch {};
}
/// Content of a negative cache entry (fetch failed, don't retry until --refresh).
pub const negative_cache_content = "#!srfv1\n# fetch_failed\n";
/// Write a negative cache entry for a symbol + data type.
/// This records that a fetch was attempted and failed, preventing repeated
/// network requests for symbols that will never resolve.
/// Cleared by --refresh (which calls clearData/invalidate).
pub fn writeNegative(self: *Store, symbol: []const u8, data_type: DataType) void {
self.writeRaw(symbol, data_type, negative_cache_content) catch {};
}
/// Check if a cached data file is a negative entry (fetch_failed marker).
/// Negative entries are always considered "fresh" -- they never expire.
pub fn isNegative(self: *Store, symbol: []const u8, data_type: DataType) bool {
const path = self.symbolPath(symbol, data_type.fileName()) catch return false;
defer self.allocator.free(path);
const file = std.fs.cwd().openFile(path, .{}) catch return false;
defer file.close();
var buf: [negative_cache_content.len]u8 = undefined;
const n = file.readAll(&buf) catch return false;
return n == negative_cache_content.len and
std.mem.eql(u8, buf[0..n], negative_cache_content);
}
/// Clear a specific data type for a symbol.
pub fn clearData(self: *Store, symbol: []const u8, data_type: DataType) void {
const path = self.symbolPath(symbol, data_type.fileName()) catch return;
defer self.allocator.free(path);
std.fs.cwd().deleteFile(path) catch {};
}
/// Read the close price from the candle metadata file.
/// Returns null if no metadata exists.
pub fn readLastClose(self: *Store, symbol: []const u8) ?f64 {
const raw = self.readRaw(symbol, .candles_meta) catch return null;
const data = raw orelse return null;
defer self.allocator.free(data);
const meta = deserializeCandleMeta(self.allocator, data) catch return null;
return meta.last_close;
}
/// Read the full candle metadata (last_close, last_date) plus the `#!created=` timestamp.
/// Returns null if no metadata exists.
pub fn readCandleMeta(self: *Store, symbol: []const u8) ?struct { meta: CandleMeta, created: i64 } {
const raw = self.readRaw(symbol, .candles_meta) catch return null;
const data = raw orelse return null;
defer self.allocator.free(data);
var reader = std.Io.Reader.fixed(data);
var it = srf.iterator(&reader, self.allocator, .{ .alloc_strings = false }) catch return null;
defer it.deinit();
const created = it.created orelse std.time.timestamp();
const fields = (it.next() catch return null) orelse return null;
const meta = fields.to(CandleMeta) catch return null;
return .{ .meta = meta, .created = created };
}
/// Check if candle metadata is fresh using the embedded `#!expires=` directive.
pub fn isCandleMetaFresh(self: *Store, symbol: []const u8) bool {
const raw = self.readRaw(symbol, .candles_meta) catch return false;
const data = raw orelse return false;
defer self.allocator.free(data);
if (std.mem.indexOf(u8, data, "# fetch_failed")) |_| return true;
var reader = std.Io.Reader.fixed(data);
const it = srf.iterator(&reader, self.allocator, .{}) catch return false;
defer it.deinit();
if (it.expires == null) return false;
return it.isFresh();
}
/// Clear all cached data.
pub fn clearAll(self: *Store) !void {
std.fs.cwd().deleteTree(self.cache_dir) catch {};
}
// ── Public types ─────────────────────────────────────────────
/// Metadata stored in the separate candles_meta.srf file.
/// Allows fast price lookups and freshness checks without parsing the full candle file.
/// The `#!created=` directive tracks when this metadata was written (replaces fetched_at).
pub const CandleMeta = struct {
last_close: f64,
last_date: Date,
/// Which provider sourced the candle data. Used during incremental refresh
/// to go directly to the right provider instead of trying TwelveData first.
provider: CandleProvider = .tiingo,
};
pub const CandleProvider = enum {
twelvedata,
yahoo,
tiingo,
pub fn fromString(s: []const u8) CandleProvider {
if (std.mem.eql(u8, s, "yahoo")) return .yahoo;
if (std.mem.eql(u8, s, "tiingo")) return .tiingo;
return .twelvedata;
}
};
// ── Private I/O ──────────────────────────────────────────────
fn readRaw(self: *Store, symbol: []const u8, data_type: DataType) !?[]const u8 {
const path = try self.symbolPath(symbol, data_type.fileName());
defer self.allocator.free(path);
return std.fs.cwd().readFileAlloc(self.allocator, path, 50 * 1024 * 1024) catch |err| switch (err) {
error.FileNotFound => return null,
else => return err,
};
}
/// Write raw bytes to a cache file. Used by server sync to write
/// pre-serialized SRF data directly to the cache.
pub fn writeRaw(self: *Store, symbol: []const u8, data_type: DataType, data: []const u8) !void {
try self.ensureSymbolDir(symbol);
const path = try self.symbolPath(symbol, data_type.fileName());
defer self.allocator.free(path);
const file = try std.fs.cwd().createFile(path, .{});
defer file.close();
try file.writeAll(data);
}
fn appendRaw(self: *Store, symbol: []const u8, data_type: DataType, data: []const u8) !void {
const path = try self.symbolPath(symbol, data_type.fileName());
defer self.allocator.free(path);
const file = std.fs.cwd().openFile(path, .{ .mode = .write_only }) catch {
return error.FileNotFound;
};
defer file.close();
try file.seekFromEnd(0);
try file.writeAll(data);
}
fn symbolPath(self: *Store, symbol: []const u8, file_name: []const u8) ![]const u8 {
if (file_name.len == 0) {
return std.fs.path.join(self.allocator, &.{ self.cache_dir, symbol });
}
return std.fs.path.join(self.allocator, &.{ self.cache_dir, symbol, file_name });
}
// ── Private serialization: generic ───────────────────────────
/// Generic SRF deserializer with optional freshness check.
/// Single-pass: creates one iterator, optionally checks freshness, extracts
/// `#!created=` timestamp, and deserializes all records.
fn readSlice(
comptime T: type,
allocator: std.mem.Allocator,
data: []const u8,
comptime postProcess: ?*const fn (*T, std.mem.Allocator) anyerror!void,
comptime freshness: Freshness,
) ?CacheResult(T) {
var reader = std.Io.Reader.fixed(data);
var it = srf.iterator(&reader, allocator, .{ .alloc_strings = false }) catch return null;
defer it.deinit();
if (freshness == .fresh_only) {
// Negative cache entries are always "fresh" — they match exactly
const is_negative = std.mem.eql(u8, data, negative_cache_content);
if (!is_negative) {
if (it.expires == null) return null;
if (!it.isFresh()) return null;
}
}
const timestamp: i64 = it.created orelse std.time.timestamp();
var items: std.ArrayList(T) = .empty;
defer {
if (items.items.len != 0) {
if (comptime @hasDecl(T, "deinit")) {
for (items.items) |item| item.deinit(allocator);
}
items.deinit(allocator);
}
}
while (it.next() catch return null) |fields| {
var item = fields.to(T) catch continue;
if (comptime postProcess) |pp| {
pp(&item, allocator) catch {
if (comptime @hasDecl(T, "deinit")) item.deinit(allocator);
return null;
};
}
items.append(allocator, item) catch {
if (comptime @hasDecl(T, "deinit")) item.deinit(allocator);
return null;
};
}
const result = items.toOwnedSlice(allocator) catch return null;
items = .empty; // prevent defer from freeing the returned slice
return .{ .data = result, .timestamp = timestamp };
}
/// Generic SRF serializer: emit directives (including `#!created=`) then data records.
fn serializeWithMeta(
comptime T: type,
allocator: std.mem.Allocator,
items: []const T,
options: srf.FormatOptions,
) ![]const u8 {
var aw: std.Io.Writer.Allocating = .init(allocator);
errdefer aw.deinit();
var opts = options;
opts.created = std.time.timestamp();
try aw.writer.print("{f}", .{srf.fmtFrom(T, allocator, items, opts)});
return aw.toOwnedSlice();
}
// ── Private serialization: candles ───────────────────────────
fn serializeCandles(allocator: std.mem.Allocator, candles: []const Candle, options: srf.FormatOptions) ![]const u8 {
var aw: std.Io.Writer.Allocating = .init(allocator);
errdefer aw.deinit();
try aw.writer.print("{f}", .{srf.fmtFrom(Candle, allocator, candles, options)});
return aw.toOwnedSlice();
}
fn serializeCandleMeta(allocator: std.mem.Allocator, meta: CandleMeta, options: srf.FormatOptions) ![]const u8 {
var aw: std.Io.Writer.Allocating = .init(allocator);
errdefer aw.deinit();
const items = [_]CandleMeta{meta};
var opts = options;
opts.created = std.time.timestamp();
try aw.writer.print("{f}", .{srf.fmtFrom(CandleMeta, allocator, &items, opts)});
return aw.toOwnedSlice();
}
fn deserializeCandleMeta(allocator: std.mem.Allocator, data: []const u8) !CandleMeta {
var reader = std.Io.Reader.fixed(data);
var it = srf.iterator(&reader, allocator, .{ .alloc_strings = false }) catch return error.InvalidData;
defer it.deinit();
const fields = (try it.next()) orelse return error.InvalidData;
return fields.to(CandleMeta) catch error.InvalidData;
}
// ── Private serialization: options (bespoke) ─────────────────
const ChainHeader = struct {
expiration: Date,
symbol: []const u8,
price: ?f64 = null,
};
const OptionsRecord = union(enum) {
pub const srf_tag_field = "type";
chain: ChainHeader,
call: OptionContract,
put: OptionContract,
};
fn serializeOptions(allocator: std.mem.Allocator, chains: []const OptionsChain, options: srf.FormatOptions) ![]const u8 {
var records: std.ArrayList(OptionsRecord) = .empty;
defer records.deinit(allocator);
for (chains) |chain| {
try records.append(allocator, .{ .chain = .{
.expiration = chain.expiration,
.symbol = chain.underlying_symbol,
.price = chain.underlying_price,
} });
for (chain.calls) |c|
try records.append(allocator, .{ .call = c });
for (chain.puts) |p|
try records.append(allocator, .{ .put = p });
}
var aw: std.Io.Writer.Allocating = .init(allocator);
errdefer aw.deinit();
var opts = options;
opts.created = std.time.timestamp();
try aw.writer.print("{f}", .{srf.fmtFrom(OptionsRecord, allocator, records.items, opts)});
return aw.toOwnedSlice();
}
fn deserializeOptions(allocator: std.mem.Allocator, it: anytype) ![]OptionsChain {
var chains: std.ArrayList(OptionsChain) = .empty;
errdefer {
for (chains.items) |*ch| {
allocator.free(ch.underlying_symbol);
allocator.free(ch.calls);
allocator.free(ch.puts);
}
chains.deinit(allocator);
}
var exp_map = std.AutoHashMap(i32, usize).init(allocator);
defer exp_map.deinit();
var calls_map = std.AutoHashMap(usize, std.ArrayList(OptionContract)).init(allocator);
defer {
var iter = calls_map.valueIterator();
while (iter.next()) |v| v.deinit(allocator);
calls_map.deinit();
}
var puts_map = std.AutoHashMap(usize, std.ArrayList(OptionContract)).init(allocator);
defer {
var iter = puts_map.valueIterator();
while (iter.next()) |v| v.deinit(allocator);
puts_map.deinit();
}
while (try it.next()) |fields| {
const opt_rec = fields.to(OptionsRecord) catch continue;
switch (opt_rec) {
.chain => |ch| {
const idx = chains.items.len;
try chains.append(allocator, .{
.underlying_symbol = try allocator.dupe(u8, ch.symbol),
.underlying_price = ch.price,
.expiration = ch.expiration,
.calls = &.{},
.puts = &.{},
});
try exp_map.put(ch.expiration.days, idx);
},
.call => |c| {
if (exp_map.get(c.expiration.days)) |idx| {
const entry = try calls_map.getOrPut(idx);
if (!entry.found_existing) entry.value_ptr.* = .empty;
try entry.value_ptr.append(allocator, c);
}
},
.put => |c| {
if (exp_map.get(c.expiration.days)) |idx| {
const entry = try puts_map.getOrPut(idx);
if (!entry.found_existing) entry.value_ptr.* = .empty;
try entry.value_ptr.append(allocator, c);
}
},
}
}
for (chains.items, 0..) |*chain, idx| {
if (calls_map.getPtr(idx)) |cl| {
chain.calls = try cl.toOwnedSlice(allocator);
}
if (puts_map.getPtr(idx)) |pl| {
chain.puts = try pl.toOwnedSlice(allocator);
}
}
return chains.toOwnedSlice(allocator);
}
// ── Private serialization: ETF profile (bespoke) ─────────────
const EtfRecord = union(enum) {
pub const srf_tag_field = "type";
meta: EtfProfile,
sector: SectorWeight,
holding: Holding,
};
fn serializeEtfProfile(allocator: std.mem.Allocator, profile: EtfProfile, options: srf.FormatOptions) ![]const u8 {
var records: std.ArrayList(EtfRecord) = .empty;
defer records.deinit(allocator);
try records.append(allocator, .{ .meta = profile });
if (profile.sectors) |sectors| {
for (sectors) |s| try records.append(allocator, .{ .sector = s });
}
if (profile.holdings) |holdings| {
for (holdings) |h| try records.append(allocator, .{ .holding = h });
}
var aw: std.Io.Writer.Allocating = .init(allocator);
errdefer aw.deinit();
var opts = options;
opts.created = std.time.timestamp();
try aw.writer.print("{f}", .{srf.fmtFrom(EtfRecord, allocator, records.items, opts)});
return aw.toOwnedSlice();
}
fn deserializeEtfProfile(allocator: std.mem.Allocator, it: anytype) !EtfProfile {
var profile = EtfProfile{ .symbol = "" };
var sectors: std.ArrayList(SectorWeight) = .empty;
errdefer {
for (sectors.items) |s| allocator.free(s.name);
sectors.deinit(allocator);
}
var holdings: std.ArrayList(Holding) = .empty;
errdefer {
for (holdings.items) |h| {
if (h.symbol) |s| allocator.free(s);
allocator.free(h.name);
}
holdings.deinit(allocator);
}
while (try it.next()) |fields| {
const etf_rec = fields.to(EtfRecord) catch continue;
switch (etf_rec) {
.meta => |m| {
profile = m;
},
.sector => |s| {
const duped = try allocator.dupe(u8, s.name);
try sectors.append(allocator, .{ .name = duped, .weight = s.weight });
},
.holding => |h| {
const duped_sym = if (h.symbol) |s| try allocator.dupe(u8, s) else null;
const duped_name = try allocator.dupe(u8, h.name);
try holdings.append(allocator, .{ .symbol = duped_sym, .name = duped_name, .weight = h.weight });
},
}
}
if (sectors.items.len > 0) {
profile.sectors = try sectors.toOwnedSlice(allocator);
} else {
sectors.deinit(allocator);
}
if (holdings.items.len > 0) {
profile.holdings = try holdings.toOwnedSlice(allocator);
} else {
holdings.deinit(allocator);
}
return profile;
}
};
/// Serialize a portfolio (list of lots) to SRF format.
pub fn serializePortfolio(allocator: std.mem.Allocator, lots: []const Lot) ![]const u8 {
var aw: std.Io.Writer.Allocating = .init(allocator);
errdefer aw.deinit();
try aw.writer.print("{f}", .{srf.fmtFrom(Lot, allocator, lots, .{})});
return aw.toOwnedSlice();
}
/// Deserialize a portfolio from SRF data. Caller owns the returned Portfolio.
pub fn deserializePortfolio(allocator: std.mem.Allocator, data: []const u8) !Portfolio {
var lots: std.ArrayList(Lot) = .empty;
errdefer {
for (lots.items) |lot| {
allocator.free(lot.symbol);
if (lot.note) |n| allocator.free(n);
if (lot.account) |a| allocator.free(a);
if (lot.ticker) |t| allocator.free(t);
if (lot.underlying) |u| allocator.free(u);
}
lots.deinit(allocator);
}
var reader = std.Io.Reader.fixed(data);
var it = srf.iterator(&reader, allocator, .{ .alloc_strings = false }) catch return error.InvalidData;
defer it.deinit();
var skipped: usize = 0;
while (try it.next()) |fields| {
const line = it.state.line;
var lot = fields.to(Lot) catch {
std.log.warn("portfolio: could not parse record at line {d}", .{line});
skipped += 1;
continue;
};
// Dupe owned strings before iterator.deinit() frees the backing buffer
lot.symbol = try allocator.dupe(u8, lot.symbol);
if (lot.note) |n| lot.note = try allocator.dupe(u8, n);
if (lot.account) |a| lot.account = try allocator.dupe(u8, a);
if (lot.ticker) |t| lot.ticker = try allocator.dupe(u8, t);
if (lot.underlying) |u| lot.underlying = try allocator.dupe(u8, u);
// Cash lots without a symbol get a placeholder
if (lot.symbol.len == 0) {
allocator.free(lot.symbol);
lot.symbol = switch (lot.security_type) {
.cash => try allocator.dupe(u8, "CASH"),
.illiquid => try allocator.dupe(u8, "ILLIQUID"),
else => {
std.log.warn("portfolio: record at line {d} has no symbol, skipping", .{line});
if (lot.note) |n| allocator.free(n);
if (lot.account) |a| allocator.free(a);
if (lot.ticker) |t| allocator.free(t);
if (lot.underlying) |u| allocator.free(u);
skipped += 1;
continue;
},
};
}
try lots.append(allocator, lot);
}
if (skipped > 0) {
std.log.warn("portfolio: {d} record(s) could not be parsed and were skipped", .{skipped});
}
return .{
.lots = try lots.toOwnedSlice(allocator),
.allocator = allocator,
};
}
test "dividend serialize/deserialize round-trip" {
const allocator = std.testing.allocator;
const divs = [_]Dividend{
.{ .ex_date = Date.fromYmd(2024, 3, 15), .amount = 0.8325, .pay_date = Date.fromYmd(2024, 3, 28), .frequency = 4, .type = .regular },
.{ .ex_date = Date.fromYmd(2024, 6, 14), .amount = 0.9148, .type = .special },
};
const data = try Store.serializeWithMeta(Dividend, allocator, &divs, .{});
defer allocator.free(data);
// No postProcess needed — test data has no currency strings to dupe
const result = Store.readSlice(Dividend, allocator, data, null, .any) orelse return error.TestUnexpectedResult;
const parsed = result.data;
defer allocator.free(parsed);
try std.testing.expectEqual(@as(usize, 2), parsed.len);
try std.testing.expect(parsed[0].ex_date.eql(Date.fromYmd(2024, 3, 15)));
try std.testing.expectApproxEqAbs(@as(f64, 0.8325), parsed[0].amount, 0.0001);
try std.testing.expect(parsed[0].pay_date != null);
try std.testing.expect(parsed[0].pay_date.?.eql(Date.fromYmd(2024, 3, 28)));
try std.testing.expectEqual(@as(?u8, 4), parsed[0].frequency);
try std.testing.expectEqual(DividendType.regular, parsed[0].type);
try std.testing.expect(parsed[1].ex_date.eql(Date.fromYmd(2024, 6, 14)));
try std.testing.expectApproxEqAbs(@as(f64, 0.9148), parsed[1].amount, 0.0001);
try std.testing.expect(parsed[1].pay_date == null);
try std.testing.expectEqual(DividendType.special, parsed[1].type);
}
test "split serialize/deserialize round-trip" {
const allocator = std.testing.allocator;
const splits = [_]Split{
.{ .date = Date.fromYmd(2020, 8, 31), .numerator = 4, .denominator = 1 },
.{ .date = Date.fromYmd(2014, 6, 9), .numerator = 7, .denominator = 1 },
};
const data = try Store.serializeWithMeta(Split, allocator, &splits, .{});
defer allocator.free(data);
const result = Store.readSlice(Split, allocator, data, null, .any) orelse return error.TestUnexpectedResult;
const parsed = result.data;
defer allocator.free(parsed);
try std.testing.expectEqual(@as(usize, 2), parsed.len);
try std.testing.expect(parsed[0].date.eql(Date.fromYmd(2020, 8, 31)));
try std.testing.expectApproxEqAbs(@as(f64, 4), parsed[0].numerator, 0.001);
try std.testing.expectApproxEqAbs(@as(f64, 1), parsed[0].denominator, 0.001);
try std.testing.expect(parsed[1].date.eql(Date.fromYmd(2014, 6, 9)));
try std.testing.expectApproxEqAbs(@as(f64, 7), parsed[1].numerator, 0.001);
}
test "portfolio serialize/deserialize round-trip" {
const allocator = std.testing.allocator;
const lots = [_]Lot{
.{ .symbol = "AMZN", .shares = 10, .open_date = Date.fromYmd(2022, 3, 15), .open_price = 150.25 },
.{ .symbol = "AMZN", .shares = 5, .open_date = Date.fromYmd(2023, 6, 1), .open_price = 125.00, .close_date = Date.fromYmd(2024, 1, 15), .close_price = 185.50 },
.{ .symbol = "VTI", .shares = 100, .open_date = Date.fromYmd(2022, 1, 10), .open_price = 220.00 },
};
const data = try serializePortfolio(allocator, &lots);
defer allocator.free(data);
var portfolio = try deserializePortfolio(allocator, data);
defer portfolio.deinit();
try std.testing.expectEqual(@as(usize, 3), portfolio.lots.len);
try std.testing.expectEqualStrings("AMZN", portfolio.lots[0].symbol);
try std.testing.expectApproxEqAbs(@as(f64, 10), portfolio.lots[0].shares, 0.01);
try std.testing.expect(portfolio.lots[0].isOpen());
try std.testing.expectEqualStrings("AMZN", portfolio.lots[1].symbol);
try std.testing.expectApproxEqAbs(@as(f64, 5), portfolio.lots[1].shares, 0.01);
try std.testing.expect(!portfolio.lots[1].isOpen());
try std.testing.expect(portfolio.lots[1].close_date.?.eql(Date.fromYmd(2024, 1, 15)));
try std.testing.expectApproxEqAbs(@as(f64, 185.50), portfolio.lots[1].close_price.?, 0.01);
try std.testing.expectEqualStrings("VTI", portfolio.lots[2].symbol);
}
test "portfolio: cash lots without symbol get CASH placeholder" {
const allocator = std.testing.allocator;
// Raw SRF with a cash lot that has no symbol field
const data =
\\#!srfv1
\\security_type::cash,shares:num:598.66,open_date::2026-02-25,open_price:num:1.00,account::Savings
\\symbol::AAPL,shares:num:10,open_date::2024-01-15,open_price:num:150.00
\\
;
var portfolio = try deserializePortfolio(allocator, data);
defer portfolio.deinit();
try std.testing.expectEqual(@as(usize, 2), portfolio.lots.len);
// Cash lot: no symbol in data -> gets "CASH" placeholder
try std.testing.expectEqualStrings("CASH", portfolio.lots[0].symbol);
try std.testing.expectEqual(LotType.cash, portfolio.lots[0].security_type);
try std.testing.expectApproxEqAbs(@as(f64, 598.66), portfolio.lots[0].shares, 0.01);
try std.testing.expectEqualStrings("Savings", portfolio.lots[0].account.?);
// Stock lot: symbol present
try std.testing.expectEqualStrings("AAPL", portfolio.lots[1].symbol);
}
test "portfolio: price_ratio round-trip" {
const allocator = std.testing.allocator;
const data =
\\#!srfv1
\\symbol::02315N600,shares:num:100,open_date::2024-01-15,open_price:num:140.00,ticker::VTTHX,price_ratio:num:5.185,note::VANGUARD TARGET 2035
\\symbol::AAPL,shares:num:10,open_date::2024-03-01,open_price:num:150.00
\\
;
var portfolio = try deserializePortfolio(allocator, data);
defer portfolio.deinit();
try std.testing.expectEqual(@as(usize, 2), portfolio.lots.len);
// CUSIP lot with price_ratio and ticker
try std.testing.expectEqualStrings("02315N600", portfolio.lots[0].symbol);
try std.testing.expectEqualStrings("VTTHX", portfolio.lots[0].ticker.?);
try std.testing.expectEqualStrings("VTTHX", portfolio.lots[0].priceSymbol());
try std.testing.expectApproxEqAbs(@as(f64, 5.185), portfolio.lots[0].price_ratio, 0.001);
try std.testing.expectEqualStrings("VANGUARD TARGET 2035", portfolio.lots[0].note.?);
// Regular lot — no price_ratio (default 1.0)
try std.testing.expectEqualStrings("AAPL", portfolio.lots[1].symbol);
try std.testing.expectApproxEqAbs(@as(f64, 1.0), portfolio.lots[1].price_ratio, 0.001);
try std.testing.expect(portfolio.lots[1].ticker == null);
// Round-trip: serialize and deserialize again
const reserialized = try serializePortfolio(allocator, portfolio.lots);
defer allocator.free(reserialized);
var portfolio2 = try deserializePortfolio(allocator, reserialized);
defer portfolio2.deinit();
try std.testing.expectEqual(@as(usize, 2), portfolio2.lots.len);
try std.testing.expectApproxEqAbs(@as(f64, 5.185), portfolio2.lots[0].price_ratio, 0.001);
try std.testing.expectEqualStrings("VTTHX", portfolio2.lots[0].ticker.?);
try std.testing.expectApproxEqAbs(@as(f64, 1.0), portfolio2.lots[1].price_ratio, 0.001);
}