remove load of releases.xml and other perf/stability improvements

Loading the output file and trying to do a diff was a bad idea, and added a lot
of unnecessary code. It was also broken after adding the markdown/html support,
as releases were being escaped a second time after loading originally. It was
my idea, not the AIs :(.

After removing this, the filtering logic was wonky (this was the AI), as
what was generated had multiple copies of identical functions, so the
sorting was ineffective, and there were multiple copies of the release
arraylist being copied everywhere. Now, releases are created once, by
the provider (before it was 1: via XML, 2: by the provider, 3: copied
again into all the releases, and then a compaction occurred prior to a
sort).

For all the sorting, and all the filtering, the release date was being
parsed (and allocated). That parsing now occurs once by the provider,
and is stored as an i64 that is used until the final atom generation.

Finally, GitHub results were being sorted by each page, which was then
thrown into the full GitHub results without regard for where they landed
again. This sort has been moved to occur after all threads have filled
the ArrayList

Benchmark 1 (3 runs): ./before config.json
  measurement          mean ± σ            min … max           outliers         delta
  wall_time          12.4s  ±  800ms    11.7s  … 13.3s           0 ( 0%)        0%
  peak_rss            105MB ± 9.85MB    93.8MB …  112MB          0 ( 0%)        0%
  cpu_cycles         6.56G  ±  145M     6.40G  … 6.69G           0 ( 0%)        0%
  instructions       23.5G  ±  613M     22.9G  … 24.1G           0 ( 0%)        0%
  cache_references   11.2M  ±  273K     10.9M  … 11.4M           0 ( 0%)        0%
  cache_misses       3.30M  ±  139K     3.16M  … 3.44M           0 ( 0%)        0%
  branch_misses      10.7M  ±  724K     10.2M  … 11.5M           0 ( 0%)        0%
Benchmark 2 (3 runs): ./after config.json
  measurement          mean ± σ            min … max           outliers         delta
  wall_time          11.8s  ±  531ms    11.2s  … 12.1s           0 ( 0%)          -  4.8% ± 12.4%
  peak_rss           94.8MB ± 3.81MB    92.3MB … 99.2MB          0 ( 0%)          -  9.7% ± 16.1%
  cpu_cycles         6.55G  ± 54.1M     6.49G  … 6.59G           0 ( 0%)          -  0.1% ±  3.8%
  instructions       23.2G  ±  225M     22.9G  … 23.3G           0 ( 0%)          -  1.5% ±  4.4%
  cache_references   10.8M  ±  232K     10.6M  … 11.1M           0 ( 0%)          -  3.3% ±  5.2%
  cache_misses       2.96M  ± 59.0K     2.89M  … 3.01M           0 ( 0%)        - 10.3% ±  7.3%
  branch_misses      10.5M  ± 23.1K     10.5M  … 10.5M           0 ( 0%)          -  1.7% ± 10.9%
This commit is contained in:
Emil Lerch 2025-07-15 19:48:06 -07:00
parent 50a0b44340
commit ea08ac7497
Signed by: lobo
GPG key ID: A7B62D657EF764F8
10 changed files with 182 additions and 1332 deletions

View file

@ -201,7 +201,13 @@ pub fn generateFeed(allocator: Allocator, releases: []const Release) ![]u8 {
try writer.writeAll("</id>\n");
try writer.writeAll(" <updated>");
try escapeXml(writer, release.published_at);
const published = zeit.Instant{
.timestamp = release.published_at * std.time.ns_per_s,
.timezone = &zeit.utc,
};
// try escapeXml(writer, release.published_at);
// try std.testing.expect(std.mem.indexOf(u8, atom_content, "<updated>2024-01-01T00:00:00Z</updated>") != null);
try published.time().strftime(writer, "%Y-%m-%dT%H:%M:%SZ");
try writer.writeAll("</updated>\n");
try writer.writeAll(" <author><name>");
@ -276,7 +282,10 @@ test "Atom feed generation with markdown" {
Release{
.repo_name = "test/repo",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/repo/releases/tag/v1.0.0",
.description = "## What's Changed\n* Fixed bug\n* Added feature",
.provider = "github",
@ -301,7 +310,10 @@ test "Atom feed with fallback markdown" {
Release{
.repo_name = "test/repo",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/repo/releases/tag/v1.0.0",
.description = "```javascript\nconst x = 1;\n```",
.provider = "github",
@ -323,7 +335,10 @@ test "Atom feed with special characters" {
Release{
.repo_name = "test/repo<script>",
.tag_name = "v1.0.0 & more",
.published_at = "2024-01-01T00:00:00Z",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/repo/releases/tag/v1.0.0",
.description = "Test \"release\" with <special> chars & symbols",
.provider = "github",

View file

@ -10,8 +10,8 @@ const Codeberg = @import("providers/Codeberg.zig");
const SourceHut = @import("providers/SourceHut.zig");
const atom = @import("atom.zig");
const config = @import("config.zig");
const xml_parser = @import("xml_parser.zig");
const zeit = @import("zeit");
const utils = @import("utils.zig");
const Provider = @import("Provider.zig");
@ -56,13 +56,13 @@ fn printInfo(comptime fmt: []const u8, args: anytype) void {
stderr.print(fmt, args) catch {};
}
// Configuration: Only include releases from the last 6 months
const RELEASE_AGE_LIMIT_SECONDS: i64 = 365 * 24 * 60 * 60 / 2; // 6 months in seconds
// Configuration: Only include releases from the last 365 days
const RELEASE_AGE_LIMIT_SECONDS: i64 = 365 * std.time.s_per_day; // Get from last 365 days
pub const Release = struct {
repo_name: []const u8,
tag_name: []const u8,
published_at: []const u8,
published_at: i64,
html_url: []const u8,
description: []const u8,
provider: []const u8,
@ -70,7 +70,6 @@ pub const Release = struct {
pub fn deinit(self: Release, allocator: Allocator) void {
allocator.free(self.repo_name);
allocator.free(self.tag_name);
allocator.free(self.published_at);
allocator.free(self.html_url);
allocator.free(self.description);
allocator.free(self.provider);
@ -86,7 +85,6 @@ const ProviderResult = struct {
const ThreadContext = struct {
provider: Provider,
latest_release_date: i64,
result: *ProviderResult,
allocator: Allocator,
};
@ -123,22 +121,8 @@ pub fn main() !u8 {
};
defer app_config.deinit();
// Load existing releases to determine last check time per provider
var existing_releases = loadExistingReleases(allocator, output_file) catch ArrayList(Release).init(allocator);
defer {
for (existing_releases.items) |release| {
release.deinit(allocator);
}
existing_releases.deinit();
}
var new_releases = ArrayList(Release).init(allocator);
defer {
for (new_releases.items) |release| {
release.deinit(allocator);
}
new_releases.deinit();
}
var all_releases = ArrayList(Release).init(allocator);
defer all_releases.deinit();
printInfo("Fetching releases from all providers concurrently...\n", .{});
@ -170,20 +154,22 @@ pub fn main() !u8 {
};
// Fetch releases from all providers concurrently using thread pool
const provider_results = try fetchReleasesFromAllProviders(allocator, providers.items, existing_releases.items);
const provider_results = try fetchReleasesFromAllProviders(allocator, providers.items);
defer {
for (provider_results) |*result| {
// Don't free the releases here - they're transferred to new_releases
result.releases.deinit();
// Free error messages if they exist
if (result.error_msg) |error_msg| {
if (result.error_msg) |error_msg|
allocator.free(error_msg);
}
for (result.releases.items) |release|
release.deinit(allocator);
result.releases.deinit();
}
allocator.free(provider_results);
}
// Check for provider errors and report them
const now = std.time.timestamp();
const cutoff_time = now - RELEASE_AGE_LIMIT_SECONDS;
var has_errors = false;
for (provider_results) |result| {
if (result.error_msg) |error_msg| {
@ -198,41 +184,26 @@ pub fn main() !u8 {
return 1;
}
// Combine all new releases from threaded providers
var original_count: usize = 0;
// Combine all releases from threaded providers
for (provider_results) |result| {
try new_releases.appendSlice(result.releases.items);
original_count += result.releases.items.len;
// Results should be sorted already...we will find the oldest applicable release,
// then copy into all_releases
var last_index: usize = 0;
for (result.releases.items) |release| {
if (release.published_at >= cutoff_time) {
last_index += 1;
} else break;
}
// Total releases in feed: 1170 of 3591 total in last 365 days
std.log.debug("last_index: {} : {s}", .{ last_index, result.provider_name });
try all_releases.appendSlice(result.releases.items[0..last_index]);
}
// Combine all releases (existing and new)
var all_releases = ArrayList(Release).init(allocator);
defer all_releases.deinit();
// Add new releases
try all_releases.appendSlice(new_releases.items);
// Add all existing releases
try all_releases.appendSlice(existing_releases.items);
// Sort all releases by published date (most recent first)
std.mem.sort(Release, all_releases.items, {}, compareReleasesByDate);
// Filter releases by age in-place - zero extra allocations
const now = std.time.timestamp();
const cutoff_time = now - RELEASE_AGE_LIMIT_SECONDS;
var write_index: usize = 0;
const original_count = all_releases.items.len;
for (all_releases.items) |release| {
const release_time = parseReleaseTimestamp(release.published_at) catch 0;
if (release_time >= cutoff_time) {
all_releases.items[write_index] = release;
write_index += 1;
}
}
// Shrink the array to only include filtered items
all_releases.shrinkRetainingCapacity(write_index);
std.mem.sort(Release, all_releases.items, {}, utils.compareReleasesByDate);
// Generate Atom feed from filtered releases
const atom_content = try atom.generateFeed(allocator, all_releases.items);
@ -247,88 +218,12 @@ pub fn main() !u8 {
_ = checkFileSizeAndWarn(atom_content.len);
// Log to stderr for user feedback
printInfo("Found {} new releases\n", .{new_releases.items.len});
printInfo("Total releases in feed: {} (filtered from {} total, showing last {} days)\n", .{ all_releases.items.len, original_count, @divTrunc(RELEASE_AGE_LIMIT_SECONDS, 24 * 60 * 60) });
printInfo("Total releases in feed: {} of {} total in last {} days\n", .{ all_releases.items.len, original_count, @divTrunc(RELEASE_AGE_LIMIT_SECONDS, std.time.s_per_day) });
printInfo("Updated feed written to: {s}\n", .{output_file});
return 0;
}
fn loadExistingReleases(allocator: Allocator, filename: []const u8) !ArrayList(Release) {
const file = std.fs.cwd().openFile(filename, .{}) catch |err| switch (err) {
error.FileNotFound => {
printInfo("No existing releases file found, starting fresh\n", .{});
return ArrayList(Release).init(allocator);
},
else => return err,
};
defer file.close();
const content = try file.readToEndAlloc(allocator, 10 * 1024 * 1024);
defer allocator.free(content);
printInfo("Loading existing releases from {s}...\n", .{filename});
const releases = try parseReleasesFromXml(allocator, content);
printInfo("Loaded {} existing releases\n", .{releases.items.len});
return releases;
}
fn parseReleasesFromXml(allocator: Allocator, xml_content: []const u8) !ArrayList(Release) {
const releases = xml_parser.parseAtomFeed(allocator, xml_content) catch |err| {
printError("Warning: Failed to parse XML content: {}\n", .{err});
printInfo("Starting fresh with no existing releases\n", .{});
return ArrayList(Release).init(allocator);
};
return releases;
}
pub fn filterNewReleases(allocator: Allocator, all_releases: []const Release, since_timestamp: i64) !ArrayList(Release) {
var new_releases = ArrayList(Release).init(allocator);
for (all_releases) |release| {
// Parse the published_at timestamp
const release_time = parseReleaseTimestamp(release.published_at) catch continue;
if (release_time > since_timestamp) {
// This is a new release, duplicate it for our list
const new_release = Release{
.repo_name = try allocator.dupe(u8, release.repo_name),
.tag_name = try allocator.dupe(u8, release.tag_name),
.published_at = try allocator.dupe(u8, release.published_at),
.html_url = try allocator.dupe(u8, release.html_url),
.description = try allocator.dupe(u8, release.description),
.provider = try allocator.dupe(u8, release.provider),
};
try new_releases.append(new_release);
}
}
return new_releases;
}
pub fn parseReleaseTimestamp(date_str: []const u8) !i64 {
// Try parsing as direct timestamp first
if (std.fmt.parseInt(i64, date_str, 10)) |timestamp| {
return timestamp;
} else |_| {
// Try parsing as ISO 8601 format using Zeit
const instant = zeit.instant(.{
.source = .{ .iso8601 = date_str },
}) catch return 0;
// Zeit returns nanoseconds, convert to seconds
const seconds = @divTrunc(instant.timestamp, 1_000_000_000);
return @intCast(seconds);
}
}
pub fn compareReleasesByDate(context: void, a: Release, b: Release) bool {
_ = context;
const timestamp_a = parseReleaseTimestamp(a.published_at) catch 0;
const timestamp_b = parseReleaseTimestamp(b.published_at) catch 0;
return timestamp_a > timestamp_b; // Most recent first
}
fn formatTimestampForDisplay(allocator: Allocator, timestamp: i64) ![]const u8 {
if (timestamp == 0) {
return try allocator.dupe(u8, "beginning of time");
@ -359,7 +254,6 @@ fn formatTimestampForDisplay(allocator: Allocator, timestamp: i64) ![]const u8 {
fn fetchReleasesFromAllProviders(
allocator: Allocator,
providers: []const Provider,
existing_releases: []const Release,
) ![]ProviderResult {
var results = try allocator.alloc(ProviderResult, providers.len);
@ -382,20 +276,8 @@ fn fetchReleasesFromAllProviders(
// Calculate the latest release date for each provider from existing releases
for (providers, 0..) |provider, i| {
// Find the latest release date for this provider
var latest_date: i64 = 0;
for (existing_releases) |release| {
if (std.mem.eql(u8, release.provider, provider.getName())) {
const release_time = parseReleaseTimestamp(release.published_at) catch 0;
if (release_time > latest_date) {
latest_date = release_time;
}
}
}
contexts[i] = ThreadContext{
.provider = provider,
.latest_release_date = latest_date,
.result = &results[i],
.allocator = allocator,
};
@ -413,45 +295,23 @@ fn fetchReleasesFromAllProviders(
fn fetchProviderReleases(context: *const ThreadContext) void {
const provider = context.provider;
const latest_release_date = context.latest_release_date;
const result = context.result;
const allocator = context.allocator;
const since_str = formatTimestampForDisplay(allocator, latest_release_date) catch "unknown";
defer if (!std.mem.eql(u8, since_str, "unknown")) allocator.free(since_str);
printInfo("Fetching releases from {s} (since: {s})...\n", .{ provider.getName(), since_str });
printInfo("Fetching releases from {s}...\n", .{provider.getName()});
// Start timing
const start_time = std.time.milliTimestamp();
if (provider.fetchReleases(allocator)) |all_releases| {
defer {
for (all_releases.items) |release| {
release.deinit(allocator);
}
all_releases.deinit();
}
const releases_or_err = provider.fetchReleases(allocator);
const end_time = std.time.milliTimestamp();
const duration_ms: u64 = @intCast(end_time - start_time);
result.duration_ms = duration_ms;
// Filter releases newer than latest known release
const filtered = filterNewReleases(allocator, all_releases.items, latest_release_date) catch |err| {
const error_msg = std.fmt.allocPrint(allocator, "Error filtering releases: {}", .{err}) catch "Unknown filter error";
result.error_msg = error_msg;
return;
};
// Calculate duration
const end_time = std.time.milliTimestamp();
const duration_ms: u64 = @intCast(end_time - start_time);
result.duration_ms = duration_ms;
result.releases = filtered;
printInfo("✓ {s}: Found {} new releases in {d}ms\n", .{ provider.getName(), filtered.items.len, duration_ms });
if (releases_or_err) |all_releases| {
result.releases = all_releases;
printInfo("✓ {s}: Found {} releases in {d}ms\n", .{ provider.getName(), result.releases.items.len, duration_ms });
} else |err| {
// Calculate duration even for errors
const end_time = std.time.milliTimestamp();
const duration_ms: u64 = @intCast(end_time - start_time);
result.duration_ms = duration_ms;
const error_msg = std.fmt.allocPrint(allocator, "Error fetching releases: {}", .{err}) catch "Unknown fetch error";
result.error_msg = error_msg;
// Don't print error here - it will be handled in main function
@ -498,7 +358,10 @@ test "atom feed generation" {
Release{
.repo_name = "test/repo",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/repo/releases/tag/v1.0.0",
.description = "Test release",
.provider = "github",
@ -535,207 +398,6 @@ test "atom feed generation" {
try std.testing.expect(std.mem.indexOf(u8, atom_content, "<category term=\"github\"/>") != null);
}
test "loadExistingReleases with valid XML" {
const allocator = std.testing.allocator;
// Test XML content
const test_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>test/repo - v1.0.0</title>
\\ <link href="https://github.com/test/repo/releases/tag/v1.0.0"/>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <summary>Test release</summary>
\\ <category term="github"/>
\\</entry>
\\</feed>
;
// Parse releases directly from XML content
var releases = try parseReleasesFromXml(allocator, test_xml);
defer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
try std.testing.expectEqual(@as(usize, 1), releases.items.len);
try std.testing.expectEqualStrings("test/repo", releases.items[0].repo_name);
try std.testing.expectEqualStrings("v1.0.0", releases.items[0].tag_name);
}
test "loadExistingReleases with nonexistent file" {
const allocator = std.testing.allocator;
var releases = try loadExistingReleases(allocator, "nonexistent_file.xml");
defer releases.deinit();
try std.testing.expectEqual(@as(usize, 0), releases.items.len);
}
test "loadExistingReleases with malformed XML" {
const allocator = std.testing.allocator;
const malformed_xml = "This is not valid XML at all!";
// Should handle gracefully and return empty list
var releases = try parseReleasesFromXml(allocator, malformed_xml);
defer releases.deinit();
try std.testing.expectEqual(@as(usize, 0), releases.items.len);
}
test "parseReleaseTimestamp with various formats" {
// Test ISO 8601 format
const timestamp1 = try parseReleaseTimestamp("2024-01-01T00:00:00Z");
try std.testing.expect(timestamp1 > 0);
// Test direct timestamp
const timestamp2 = try parseReleaseTimestamp("1704067200");
try std.testing.expectEqual(@as(i64, 1704067200), timestamp2);
// Test invalid format (should return 0)
const timestamp3 = parseReleaseTimestamp("invalid") catch 0;
try std.testing.expectEqual(@as(i64, 0), timestamp3);
// Test empty string
const timestamp4 = parseReleaseTimestamp("") catch 0;
try std.testing.expectEqual(@as(i64, 0), timestamp4);
// Test different ISO formats
const timestamp5 = try parseReleaseTimestamp("2024-12-25T15:30:45Z");
try std.testing.expect(timestamp5 > timestamp1);
}
test "filterNewReleases correctly filters by timestamp" {
const allocator = std.testing.allocator;
const old_release = Release{
.repo_name = "test/old",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.html_url = "https://github.com/test/old/releases/tag/v1.0.0",
.description = "Old release",
.provider = "github",
};
const new_release = Release{
.repo_name = "test/new",
.tag_name = "v2.0.0",
.published_at = "2024-06-01T00:00:00Z",
.html_url = "https://github.com/test/new/releases/tag/v2.0.0",
.description = "New release",
.provider = "github",
};
const all_releases = [_]Release{ old_release, new_release };
// Filter with timestamp between the two releases
const march_timestamp = try parseReleaseTimestamp("2024-03-01T00:00:00Z");
var filtered = try filterNewReleases(allocator, &all_releases, march_timestamp);
defer {
for (filtered.items) |release| {
release.deinit(allocator);
}
filtered.deinit();
}
// Should only contain the new release
try std.testing.expectEqual(@as(usize, 1), filtered.items.len);
try std.testing.expectEqualStrings("test/new", filtered.items[0].repo_name);
}
test "loadExistingReleases handles various XML structures" {
const allocator = std.testing.allocator;
// Test with minimal valid XML
const minimal_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>minimal/repo - v1.0.0</title>
\\ <link href="https://github.com/minimal/repo/releases/tag/v1.0.0"/>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\</entry>
\\</feed>
;
// Parse releases directly from XML content
var releases = try parseReleasesFromXml(allocator, minimal_xml);
defer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
try std.testing.expectEqual(@as(usize, 1), releases.items.len);
try std.testing.expectEqualStrings("minimal/repo", releases.items[0].repo_name);
try std.testing.expectEqualStrings("v1.0.0", releases.items[0].tag_name);
try std.testing.expectEqualStrings("2024-01-01T00:00:00Z", releases.items[0].published_at);
}
test "loadExistingReleases with complex XML content" {
const allocator = std.testing.allocator;
// Test with complex XML including escaped characters and multiple entries
const complex_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<subtitle>New releases from starred repositories</subtitle>
\\<link href="https://github.com" rel="alternate"/>
\\<link href="https://example.com/releases.xml" rel="self"/>
\\<id>https://example.com/releases</id>
\\<updated>2024-01-01T00:00:00Z</updated>
\\<entry>
\\ <title>complex/repo &amp; more - v1.0.0 &lt;beta&gt;</title>
\\ <link href="https://github.com/complex/repo/releases/tag/v1.0.0"/>
\\ <id>https://github.com/complex/repo/releases/tag/v1.0.0</id>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <author><n>github</n></author>
\\ <summary>Release with &quot;special&quot; characters &amp; symbols</summary>
\\ <category term="github"/>
\\</entry>
\\<entry>
\\ <title>another/repo - v2.0.0</title>
\\ <link href="https://gitlab.com/another/repo/-/releases/v2.0.0"/>
\\ <id>https://gitlab.com/another/repo/-/releases/v2.0.0</id>
\\ <updated>2024-01-02T12:30:45Z</updated>
\\ <author><n>gitlab</n></author>
\\ <summary>Another release</summary>
\\ <category term="gitlab"/>
\\</entry>
\\</feed>
;
// Parse releases directly from XML content
var releases = try parseReleasesFromXml(allocator, complex_xml);
defer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
try std.testing.expectEqual(@as(usize, 2), releases.items.len);
// Check first release with escaped characters
try std.testing.expectEqualStrings("complex/repo & more", releases.items[0].repo_name);
try std.testing.expectEqualStrings("v1.0.0 <beta>", releases.items[0].tag_name);
try std.testing.expectEqualStrings("Release with \"special\" characters & symbols", releases.items[0].description);
try std.testing.expectEqualStrings("github", releases.items[0].provider);
// Check second release
try std.testing.expectEqualStrings("another/repo", releases.items[1].repo_name);
try std.testing.expectEqualStrings("v2.0.0", releases.items[1].tag_name);
try std.testing.expectEqualStrings("gitlab", releases.items[1].provider);
}
test "formatTimestampForDisplay produces valid ISO dates" {
const allocator = std.testing.allocator;
@ -752,87 +414,6 @@ test "formatTimestampForDisplay produces valid ISO dates" {
try std.testing.expect(std.mem.indexOf(u8, known_result, "T") != null);
}
test "XML parsing handles malformed entries gracefully" {
const allocator = std.testing.allocator;
// Test with partially malformed XML (missing closing tags, etc.)
const malformed_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>good/repo - v1.0.0</title>
\\ <link href="https://github.com/good/repo/releases/tag/v1.0.0"/>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\</entry>
\\<entry>
\\ <title>broken/repo - v2.0.0
\\ <link href="https://github.com/broken/repo/releases/tag/v2.0.0"/>
\\ <updated>2024-01-02T00:00:00Z</updated>
\\</entry>
\\<entry>
\\ <title>another/good - v3.0.0</title>
\\ <link href="https://github.com/another/good/releases/tag/v3.0.0"/>
\\ <updated>2024-01-03T00:00:00Z</updated>
\\</entry>
\\</feed>
;
var releases = try xml_parser.parseAtomFeed(allocator, malformed_xml);
defer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
// Should parse the good entries and skip/handle the malformed one gracefully
try std.testing.expect(releases.items.len >= 2);
// Check that we got the good entries
var found_good = false;
var found_another_good = false;
for (releases.items) |release| {
if (std.mem.eql(u8, release.repo_name, "good/repo")) {
found_good = true;
}
if (std.mem.eql(u8, release.repo_name, "another/good")) {
found_another_good = true;
}
}
try std.testing.expect(found_good);
try std.testing.expect(found_another_good);
}
test "compareReleasesByDate" {
const release1 = Release{
.repo_name = "test/repo1",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.html_url = "https://github.com/test/repo1/releases/tag/v1.0.0",
.description = "First release",
.provider = "github",
};
const release2 = Release{
.repo_name = "test/repo2",
.tag_name = "v2.0.0",
.published_at = "2024-01-02T00:00:00Z",
.html_url = "https://github.com/test/repo2/releases/tag/v2.0.0",
.description = "Second release",
.provider = "github",
};
// release2 should come before release1 (more recent first)
try std.testing.expect(compareReleasesByDate({}, release2, release1));
try std.testing.expect(!compareReleasesByDate({}, release1, release2));
}
// Import XML parser tests
test {
std.testing.refAllDecls(@import("xml_parser_tests.zig"));
}
test "Age-based release filtering" {
const allocator = std.testing.allocator;
@ -844,32 +425,29 @@ test "Age-based release filtering" {
const recent_release = Release{
.repo_name = "test/recent",
.tag_name = "v1.0.0",
.published_at = try std.fmt.allocPrint(allocator, "{}", .{now - 86400}), // 1 day ago
.published_at = now - std.time.s_per_day, // 1 day ago
.html_url = "https://github.com/test/recent/releases/tag/v1.0.0",
.description = "Recent release",
.provider = "github",
};
defer allocator.free(recent_release.published_at);
const old_release = Release{
.repo_name = "test/old",
.tag_name = "v0.1.0",
.published_at = try std.fmt.allocPrint(allocator, "{}", .{two_years_ago}),
.published_at = two_years_ago,
.html_url = "https://github.com/test/old/releases/tag/v0.1.0",
.description = "Old release",
.provider = "github",
};
defer allocator.free(old_release.published_at);
const borderline_release = Release{
.repo_name = "test/borderline",
.tag_name = "v0.5.0",
.published_at = try std.fmt.allocPrint(allocator, "{}", .{one_year_ago + 3600}), // 1 hour within limit
.published_at = one_year_ago + std.time.s_per_hour, // 1 hour within limit
.html_url = "https://github.com/test/borderline/releases/tag/v0.5.0",
.description = "Borderline release",
.provider = "github",
};
defer allocator.free(borderline_release.published_at);
const releases = [_]Release{ recent_release, old_release, borderline_release };
@ -880,7 +458,7 @@ test "Age-based release filtering" {
const cutoff_time = now - RELEASE_AGE_LIMIT_SECONDS;
for (releases) |release| {
const release_time = parseReleaseTimestamp(release.published_at) catch 0;
const release_time = release.published_at;
if (release_time >= cutoff_time) {
try filtered.append(release);
}
@ -909,7 +487,7 @@ test "Age-based release filtering" {
try std.testing.expect(!found_old);
}
// Import timestamp tests
// Import others
test {
std.testing.refAllDecls(@import("timestamp_tests.zig"));
std.testing.refAllDecls(@import("atom.zig"));

View file

@ -3,7 +3,7 @@ const http = std.http;
const json = std.json;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const zeit = @import("zeit");
const utils = @import("../utils.zig");
const Release = @import("../main.zig").Release;
const Provider = @import("../Provider.zig");
@ -235,7 +235,7 @@ fn getRepoReleases(allocator: Allocator, client: *http.Client, token: []const u8
const release = Release{
.repo_name = try allocator.dupe(u8, repo),
.tag_name = try allocator.dupe(u8, tag_name_value.string),
.published_at = try allocator.dupe(u8, published_at_value.string),
.published_at = try utils.parseReleaseTimestamp(published_at_value.string),
.html_url = try allocator.dupe(u8, html_url_value.string),
.description = try allocator.dupe(u8, body_str),
.provider = try allocator.dupe(u8, "codeberg"),
@ -249,31 +249,11 @@ fn getRepoReleases(allocator: Allocator, client: *http.Client, token: []const u8
}
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
std.mem.sort(Release, releases.items, {}, utils.compareReleasesByDate);
return releases;
}
fn compareReleasesByDate(context: void, a: Release, b: Release) bool {
_ = context;
const timestamp_a = parseTimestamp(a.published_at) catch 0;
const timestamp_b = parseTimestamp(b.published_at) catch 0;
return timestamp_a > timestamp_b; // Most recent first
}
fn parseTimestamp(date_str: []const u8) !i64 {
// Try parsing as direct timestamp first
if (std.fmt.parseInt(i64, date_str, 10)) |timestamp| {
return timestamp;
} else |_| {
// Try parsing as ISO 8601 format using Zeit
const instant = zeit.instant(.{
.source = .{ .iso8601 = date_str },
}) catch return 0;
return @intCast(instant.timestamp);
}
}
test "codeberg provider name" {
const allocator = std.testing.allocator;
_ = allocator;
@ -343,7 +323,7 @@ test "codeberg release parsing with live data snapshot" {
}
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
std.mem.sort(Release, releases.items, {}, utils.compareReleasesByDate);
// Verify parsing and sorting
try std.testing.expectEqual(@as(usize, 3), releases.items.len);

View file

@ -4,7 +4,7 @@ const json = std.json;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const Thread = std.Thread;
const zeit = @import("zeit");
const utils = @import("../utils.zig");
const Release = @import("../main.zig").Release;
const Provider = @import("../Provider.zig");
@ -53,8 +53,8 @@ pub fn fetchReleases(self: *Self, allocator: Allocator) !ArrayList(Release) {
}
const starred_duration: u64 = @intCast(starred_end_time - starred_start_time);
std.log.debug("GitHub: Found {} starred repositories in {}ms\n", .{ starred_repos.items.len, starred_duration });
std.log.debug("GitHub: Processing {} starred repositories with thread pool...\n", .{starred_repos.items.len});
std.log.debug("GitHub: Found {} starred repositories in {}ms", .{ starred_repos.items.len, starred_duration });
std.log.debug("GitHub: Processing {} starred repositories with thread pool...", .{starred_repos.items.len});
const thread_start_time = std.time.milliTimestamp();
@ -116,6 +116,9 @@ pub fn fetchReleases(self: *Self, allocator: Allocator) !ArrayList(Release) {
std.log.debug("GitHub: Thread pool completed in {}ms using {} threads ({} successful, {} failed)\n", .{ thread_duration, thread_count, successful_repos, failed_repos });
std.log.debug("GitHub: Total time (including pagination): {}ms\n", .{total_duration});
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
return releases;
}
@ -394,7 +397,7 @@ fn getRepoReleases(allocator: Allocator, client: *http.Client, token: []const u8
const release = Release{
.repo_name = try allocator.dupe(u8, repo),
.tag_name = try allocator.dupe(u8, obj.get("tag_name").?.string),
.published_at = try allocator.dupe(u8, obj.get("published_at").?.string),
.published_at = try utils.parseReleaseTimestamp(obj.get("published_at").?.string),
.html_url = try allocator.dupe(u8, obj.get("html_url").?.string),
.description = try allocator.dupe(u8, body_str),
.provider = try allocator.dupe(u8, "github"),
@ -403,30 +406,12 @@ fn getRepoReleases(allocator: Allocator, client: *http.Client, token: []const u8
try releases.append(release);
}
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
return releases;
}
fn compareReleasesByDate(context: void, a: Release, b: Release) bool {
_ = context;
const timestamp_a = parseTimestamp(a.published_at) catch 0;
const timestamp_b = parseTimestamp(b.published_at) catch 0;
return timestamp_a > timestamp_b; // Most recent first
}
fn parseTimestamp(date_str: []const u8) !i64 {
// Try parsing as direct timestamp first
if (std.fmt.parseInt(i64, date_str, 10)) |timestamp| {
return timestamp;
} else |_| {
// Try parsing as ISO 8601 format using Zeit
const instant = zeit.instant(.{
.source = .{ .iso8601 = date_str },
}) catch return 0;
return @intCast(instant.timestamp);
}
return a.published_at > b.published_at;
}
test "github provider" {
@ -497,7 +482,7 @@ test "github release parsing with live data snapshot" {
const release = Release{
.repo_name = try allocator.dupe(u8, "example/repo"),
.tag_name = try allocator.dupe(u8, obj.get("tag_name").?.string),
.published_at = try allocator.dupe(u8, obj.get("published_at").?.string),
.published_at = try utils.parseReleaseTimestamp(obj.get("published_at").?.string),
.html_url = try allocator.dupe(u8, obj.get("html_url").?.string),
.description = try allocator.dupe(u8, body_str),
.provider = try allocator.dupe(u8, "github"),
@ -514,6 +499,6 @@ test "github release parsing with live data snapshot" {
try std.testing.expectEqualStrings("v1.2.0", releases.items[0].tag_name);
try std.testing.expectEqualStrings("v1.1.0", releases.items[1].tag_name);
try std.testing.expectEqualStrings("v1.0.0", releases.items[2].tag_name);
try std.testing.expectEqualStrings("2024-01-15T10:30:00Z", releases.items[0].published_at);
try std.testing.expectEqual(try @import("zeit").instant(.{ .source = .{ .iso8601 = "2024-01-15T10:30:00Z" } }), releases.items[0].published_at);
try std.testing.expectEqualStrings("github", releases.items[0].provider);
}

View file

@ -3,7 +3,7 @@ const http = std.http;
const json = std.json;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const zeit = @import("zeit");
const utils = @import("../utils.zig");
const Release = @import("../main.zig").Release;
const Provider = @import("../Provider.zig");
@ -222,7 +222,7 @@ fn getProjectReleases(allocator: Allocator, client: *http.Client, token: []const
const release = Release{
.repo_name = try allocator.dupe(u8, obj.get("name").?.string),
.tag_name = try allocator.dupe(u8, obj.get("tag_name").?.string),
.published_at = try allocator.dupe(u8, obj.get("created_at").?.string),
.published_at = try utils.parseReleaseTimestamp(obj.get("created_at").?.string),
.html_url = try allocator.dupe(u8, obj.get("_links").?.object.get("self").?.string),
.description = try allocator.dupe(u8, desc_str),
.provider = try allocator.dupe(u8, "gitlab"),
@ -236,31 +236,11 @@ fn getProjectReleases(allocator: Allocator, client: *http.Client, token: []const
}
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
std.mem.sort(Release, releases.items, {}, utils.compareReleasesByDate);
return releases;
}
fn compareReleasesByDate(context: void, a: Release, b: Release) bool {
_ = context;
const timestamp_a = parseTimestamp(a.published_at) catch 0;
const timestamp_b = parseTimestamp(b.published_at) catch 0;
return timestamp_a > timestamp_b; // Most recent first
}
fn parseTimestamp(date_str: []const u8) !i64 {
// Try parsing as direct timestamp first
if (std.fmt.parseInt(i64, date_str, 10)) |timestamp| {
return timestamp;
} else |_| {
// Try parsing as ISO 8601 format using Zeit
const instant = zeit.instant(.{
.source = .{ .iso8601 = date_str },
}) catch return 0;
return @intCast(instant.timestamp);
}
}
test "gitlab provider" {
const allocator = std.testing.allocator;
@ -348,7 +328,7 @@ test "gitlab release parsing with live data snapshot" {
}
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
std.mem.sort(Release, releases.items, {}, utils.compareReleasesByDate);
// Verify parsing and sorting
try std.testing.expectEqual(@as(usize, 3), releases.items.len);

View file

@ -3,7 +3,7 @@ const http = std.http;
const json = std.json;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const zeit = @import("zeit");
const utils = @import("../utils.zig");
const Release = @import("../main.zig").Release;
const Provider = @import("../Provider.zig");
@ -54,7 +54,7 @@ pub fn fetchReleasesForReposFiltered(self: *Self, allocator: Allocator, reposito
var latest_date: i64 = 0;
for (existing_releases) |release| {
if (std.mem.eql(u8, release.provider, "sourcehut")) {
const release_time = parseReleaseTimestamp(release.published_at) catch 0;
const release_time = utils.parseReleaseTimestamp(release.published_at) catch 0;
if (release_time > latest_date) {
latest_date = release_time;
}
@ -153,7 +153,7 @@ fn fetchReleasesMultiRepo(allocator: Allocator, client: *http.Client, token: []c
const release = Release{
.repo_name = try std.fmt.allocPrint(allocator, "~{s}/{s}", .{ tag_data.username, tag_data.reponame }),
.tag_name = try allocator.dupe(u8, tag_data.tag_name),
.published_at = try allocator.dupe(u8, commit_date),
.published_at = try utils.parseReleaseTimestamp(commit_date),
.html_url = try std.fmt.allocPrint(allocator, "https://git.sr.ht/~{s}/{s}/refs/{s}", .{ tag_data.username, tag_data.reponame, tag_data.tag_name }),
.description = try std.fmt.allocPrint(allocator, "Tag {s} (commit: {s})", .{ tag_data.tag_name, tag_data.commit_id }),
.provider = try allocator.dupe(u8, "sourcehut"),
@ -166,7 +166,7 @@ fn fetchReleasesMultiRepo(allocator: Allocator, client: *http.Client, token: []c
}
// Sort releases by date (most recent first)
std.mem.sort(Release, releases.items, {}, compareReleasesByDate);
std.mem.sort(Release, releases.items, {}, utils.compareReleasesByDate);
return releases;
}
@ -567,30 +567,6 @@ fn makeGraphQLRequest(allocator: Allocator, client: *http.Client, token: []const
return try req.reader().readAllAlloc(allocator, 10 * 1024 * 1024);
}
fn compareReleasesByDate(context: void, a: Release, b: Release) bool {
_ = context;
const timestamp_a = parseTimestamp(a.published_at) catch 0;
const timestamp_b = parseTimestamp(b.published_at) catch 0;
return timestamp_a > timestamp_b; // Most recent first
}
fn parseReleaseTimestamp(date_str: []const u8) !i64 {
return parseTimestamp(date_str);
}
fn parseTimestamp(date_str: []const u8) !i64 {
// Try parsing as direct timestamp first
if (std.fmt.parseInt(i64, date_str, 10)) |timestamp| {
return timestamp;
} else |_| {
// Try parsing as ISO 8601 format using Zeit
const instant = zeit.instant(.{
.source = .{ .iso8601 = date_str },
}) catch return 0;
return @intCast(instant.timestamp);
}
}
fn filterNewReleases(allocator: Allocator, all_releases: []const Release, since_timestamp: i64) !ArrayList(Release) {
var new_releases = ArrayList(Release).init(allocator);
errdefer {
@ -602,7 +578,7 @@ fn filterNewReleases(allocator: Allocator, all_releases: []const Release, since_
for (all_releases) |release| {
// Parse the published_at timestamp
const release_time = parseReleaseTimestamp(release.published_at) catch continue;
const release_time = utils.parseReleaseTimestamp(release.published_at) catch continue;
if (release_time > since_timestamp) {
// This is a new release, duplicate it for our list

View file

@ -1,7 +1,8 @@
const std = @import("std");
const zeit = @import("zeit");
const main = @import("main.zig");
const utils = @import("utils.zig");
const config = @import("config.zig");
const xml_parser = @import("xml_parser.zig");
const Release = main.Release;
@ -46,7 +47,7 @@ test "parseReleaseTimestamp handles edge cases" {
};
for (test_cases) |test_case| {
const result = main.parseReleaseTimestamp(test_case.input) catch 0;
const result = utils.parseReleaseTimestamp(test_case.input) catch 0;
if (test_case.expected_valid) {
try std.testing.expect(result > 0);
} else {
@ -55,147 +56,26 @@ test "parseReleaseTimestamp handles edge cases" {
}
// Test the special case of "0" timestamp - this should return 0
const zero_result = main.parseReleaseTimestamp("0") catch 0;
const zero_result = utils.parseReleaseTimestamp("0") catch 0;
try std.testing.expectEqual(@as(i64, 0), zero_result);
// Test specific known values
const known_timestamp = main.parseReleaseTimestamp("1704067200") catch 0;
const known_timestamp = utils.parseReleaseTimestamp("1704067200") catch 0;
try std.testing.expectEqual(@as(i64, 1704067200), known_timestamp);
// Test that date-only format works
const date_only_result = main.parseReleaseTimestamp("2024-01-01") catch 0;
const date_only_result = utils.parseReleaseTimestamp("2024-01-01") catch 0;
try std.testing.expectEqual(@as(i64, 1704067200), date_only_result);
}
test "filterNewReleases with various timestamp scenarios" {
const allocator = std.testing.allocator;
const releases = [_]Release{
Release{
.repo_name = "test/very-old",
.tag_name = "v0.1.0",
.published_at = "2023-01-01T00:00:00Z",
.html_url = "https://github.com/test/very-old/releases/tag/v0.1.0",
.description = "Very old release",
.provider = "github",
},
Release{
.repo_name = "test/old",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.html_url = "https://github.com/test/old/releases/tag/v1.0.0",
.description = "Old release",
.provider = "github",
},
Release{
.repo_name = "test/recent",
.tag_name = "v2.0.0",
.published_at = "2024-06-01T00:00:00Z",
.html_url = "https://github.com/test/recent/releases/tag/v2.0.0",
.description = "Recent release",
.provider = "github",
},
Release{
.repo_name = "test/newest",
.tag_name = "v3.0.0",
.published_at = "2024-12-01T00:00:00Z",
.html_url = "https://github.com/test/newest/releases/tag/v3.0.0",
.description = "Newest release",
.provider = "github",
},
};
// Test filtering from beginning of time (should get all)
{
var filtered = try main.filterNewReleases(allocator, &releases, 0);
defer {
for (filtered.items) |release| {
release.deinit(allocator);
}
filtered.deinit();
}
try std.testing.expectEqual(@as(usize, 4), filtered.items.len);
}
// Test filtering from middle of 2024 (should get recent and newest)
{
const march_2024 = main.parseReleaseTimestamp("2024-03-01T00:00:00Z") catch 0;
var filtered = try main.filterNewReleases(allocator, &releases, march_2024);
defer {
for (filtered.items) |release| {
release.deinit(allocator);
}
filtered.deinit();
}
try std.testing.expectEqual(@as(usize, 2), filtered.items.len);
// Should contain recent and newest
var found_recent = false;
var found_newest = false;
for (filtered.items) |release| {
if (std.mem.eql(u8, release.repo_name, "test/recent")) {
found_recent = true;
}
if (std.mem.eql(u8, release.repo_name, "test/newest")) {
found_newest = true;
}
}
try std.testing.expect(found_recent);
try std.testing.expect(found_newest);
}
// Test filtering from future (should get none)
{
const future = main.parseReleaseTimestamp("2025-01-01T00:00:00Z") catch 0;
var filtered = try main.filterNewReleases(allocator, &releases, future);
defer {
for (filtered.items) |release| {
release.deinit(allocator);
}
filtered.deinit();
}
try std.testing.expectEqual(@as(usize, 0), filtered.items.len);
}
}
test "XML parsing preserves timestamp precision" {
const allocator = std.testing.allocator;
const precise_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>precise/repo - v1.0.0</title>
\\ <link href="https://github.com/precise/repo/releases/tag/v1.0.0"/>
\\ <updated>2024-06-15T14:30:45Z</updated>
\\ <summary>Precise timestamp test</summary>
\\ <category term="github"/>
\\</entry>
\\</feed>
;
var releases = try xml_parser.parseAtomFeed(allocator, precise_xml);
defer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
try std.testing.expectEqual(@as(usize, 1), releases.items.len);
try std.testing.expectEqualStrings("2024-06-15T14:30:45Z", releases.items[0].published_at);
// Verify the timestamp can be parsed correctly
const parsed_timestamp = main.parseReleaseTimestamp(releases.items[0].published_at) catch 0;
try std.testing.expect(parsed_timestamp > 0);
}
test "compareReleasesByDate with various timestamp formats" {
const release_iso_early = Release{
.repo_name = "test/iso-early",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/iso-early/releases/tag/v1.0.0",
.description = "Early ISO format",
.provider = "github",
@ -204,7 +84,10 @@ test "compareReleasesByDate with various timestamp formats" {
const release_iso_late = Release{
.repo_name = "test/iso-late",
.tag_name = "v2.0.0",
.published_at = "2024-12-01T00:00:00Z",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-12-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/iso-late/releases/tag/v2.0.0",
.description = "Late ISO format",
.provider = "github",
@ -213,17 +96,17 @@ test "compareReleasesByDate with various timestamp formats" {
const release_invalid = Release{
.repo_name = "test/invalid",
.tag_name = "v3.0.0",
.published_at = "invalid-date",
.published_at = 0,
.html_url = "https://github.com/test/invalid/releases/tag/v3.0.0",
.description = "Invalid format",
.provider = "github",
};
// Later date should come before earlier date (more recent first)
try std.testing.expect(main.compareReleasesByDate({}, release_iso_late, release_iso_early));
try std.testing.expect(!main.compareReleasesByDate({}, release_iso_early, release_iso_late));
try std.testing.expect(utils.compareReleasesByDate({}, release_iso_late, release_iso_early));
try std.testing.expect(!utils.compareReleasesByDate({}, release_iso_early, release_iso_late));
// Invalid timestamps should be treated as 0 and come last
try std.testing.expect(main.compareReleasesByDate({}, release_iso_early, release_invalid));
try std.testing.expect(main.compareReleasesByDate({}, release_iso_late, release_invalid));
try std.testing.expect(utils.compareReleasesByDate({}, release_iso_early, release_invalid));
try std.testing.expect(utils.compareReleasesByDate({}, release_iso_late, release_invalid));
}

73
src/utils.zig Normal file
View file

@ -0,0 +1,73 @@
const std = @import("std");
const zeit = @import("zeit");
const Release = @import("main.zig").Release;
/// Parse a timestamp string into Unix timestamp (seconds since epoch)
/// Handles both direct integer timestamps and ISO 8601 date strings
pub fn parseReleaseTimestamp(date_str: []const u8) !i64 {
// Try parsing as direct timestamp first
if (std.fmt.parseInt(i64, date_str, 10)) |timestamp| {
return timestamp;
} else |_| {
// Try parsing as ISO 8601 format using Zeit
const instant = zeit.instant(.{
.source = .{ .iso8601 = date_str },
}) catch |err| {
if (!@import("builtin").is_test)
std.log.err("Error parsing date_str: {s}", .{date_str});
return err;
};
// Zeit returns nanoseconds, convert to seconds
const seconds = @divTrunc(instant.timestamp, std.time.ns_per_s);
return @intCast(seconds);
}
}
test "parseReleaseTimestamp with various formats" {
// Test ISO 8601 format
const timestamp1 = try parseReleaseTimestamp("2024-01-01T00:00:00Z");
try std.testing.expect(timestamp1 > 0);
// Test direct timestamp
const timestamp2 = try parseReleaseTimestamp("1704067200");
try std.testing.expectEqual(@as(i64, 1704067200), timestamp2);
// Test ISO format with milliseconds
const timestamp3 = try parseReleaseTimestamp("2024-01-01T12:30:45.123Z");
try std.testing.expect(timestamp3 > timestamp1);
}
pub fn compareReleasesByDate(context: void, a: Release, b: Release) bool {
_ = context;
return a.published_at > b.published_at; // Most recent first
}
test "compareReleasesByDate" {
const release1 = Release{
.repo_name = "test/repo1",
.tag_name = "v1.0.0",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-01T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/repo1/releases/tag/v1.0.0",
.description = "First release",
.provider = "github",
};
const release2 = Release{
.repo_name = "test/repo2",
.tag_name = "v2.0.0",
.published_at = @intCast(@divTrunc(
(try zeit.instant(.{ .source = .{ .iso8601 = "2024-01-02T00:00:00Z" } })).timestamp,
std.time.ns_per_s,
)),
.html_url = "https://github.com/test/repo2/releases/tag/v2.0.0",
.description = "Second release",
.provider = "github",
};
// release2 should come before release1 (more recent first)
try std.testing.expect(compareReleasesByDate({}, release2, release1));
try std.testing.expect(!compareReleasesByDate({}, release1, release2));
}

View file

@ -1,335 +0,0 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const Release = @import("main.zig").Release;
pub const ParseError = error{
InvalidXml,
MalformedEntry,
OutOfMemory,
};
pub fn parseAtomFeed(allocator: Allocator, xml_content: []const u8) !ArrayList(Release) {
var releases = ArrayList(Release).init(allocator);
errdefer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
var entry_start: ?usize = null;
var pos: usize = 0;
while (pos < xml_content.len) {
// Find next entry
if (std.mem.indexOf(u8, xml_content[pos..], "<entry>")) |entry_offset| {
entry_start = pos + entry_offset;
pos = entry_start.? + 7; // Move past "<entry>"
// Find the end of this entry
if (std.mem.indexOf(u8, xml_content[pos..], "</entry>")) |end_offset| {
const entry_end = pos + end_offset;
const entry_content = xml_content[entry_start.? .. entry_end + 8]; // Include "</entry>"
if (parseEntry(allocator, entry_content)) |release| {
try releases.append(release);
} else |err| {
const stderr = std.io.getStdErr().writer();
stderr.print("Warning: Failed to parse entry: {}\n", .{err}) catch {};
}
pos = entry_end + 8; // Move past "</entry>"
} else {
break; // No closing tag found
}
} else {
break; // No more entries
}
}
return releases;
}
fn parseEntry(allocator: Allocator, entry_xml: []const u8) !Release {
var release = Release{
.repo_name = try allocator.dupe(u8, ""),
.tag_name = try allocator.dupe(u8, ""),
.published_at = try allocator.dupe(u8, ""),
.html_url = try allocator.dupe(u8, ""),
.description = try allocator.dupe(u8, ""),
.provider = try allocator.dupe(u8, ""),
};
errdefer release.deinit(allocator);
// Parse title to extract repo_name and tag_name
if (extractTagContent(entry_xml, "title", allocator)) |title| {
defer allocator.free(title);
if (std.mem.lastIndexOf(u8, title, " - ")) |dash_pos| {
allocator.free(release.repo_name);
allocator.free(release.tag_name);
release.repo_name = try allocator.dupe(u8, title[0..dash_pos]);
release.tag_name = try allocator.dupe(u8, title[dash_pos + 3 ..]);
}
}
// Parse link href attribute
if (extractLinkHref(entry_xml, allocator)) |url| {
allocator.free(release.html_url);
release.html_url = url;
}
// Parse updated timestamp
if (extractTagContent(entry_xml, "updated", allocator)) |updated| {
allocator.free(release.published_at);
release.published_at = updated;
}
// Parse content (description) - try content first, then fall back to summary
if (extractTagContent(entry_xml, "content", allocator)) |content| {
allocator.free(release.description);
release.description = content;
} else if (extractTagContent(entry_xml, "summary", allocator)) |summary| {
allocator.free(release.description);
release.description = summary;
}
// Parse category term attribute (provider)
if (extractCategoryTerm(entry_xml, allocator)) |provider| {
allocator.free(release.provider);
release.provider = provider;
}
return release;
}
fn extractTagContent(xml: []const u8, tag_name: []const u8, allocator: Allocator) ?[]u8 {
const open_tag = std.fmt.allocPrint(allocator, "<{s}>", .{tag_name}) catch return null;
defer allocator.free(open_tag);
const close_tag = std.fmt.allocPrint(allocator, "</{s}>", .{tag_name}) catch return null;
defer allocator.free(close_tag);
if (std.mem.indexOf(u8, xml, open_tag)) |start_pos| {
const content_start = start_pos + open_tag.len;
if (std.mem.indexOf(u8, xml[content_start..], close_tag)) |end_offset| {
const content_end = content_start + end_offset;
const content = xml[content_start..content_end];
return unescapeXml(allocator, content) catch null;
}
}
// Also try with attributes (e.g., <content type="html">)
const open_tag_with_attrs = std.fmt.allocPrint(allocator, "<{s} ", .{tag_name}) catch return null;
defer allocator.free(open_tag_with_attrs);
if (std.mem.indexOf(u8, xml, open_tag_with_attrs)) |start_pos| {
// Find the end of the opening tag
if (std.mem.indexOf(u8, xml[start_pos..], ">")) |tag_end_offset| {
const content_start = start_pos + tag_end_offset + 1;
if (std.mem.indexOf(u8, xml[content_start..], close_tag)) |end_offset| {
const content_end = content_start + end_offset;
const content = xml[content_start..content_end];
return unescapeXml(allocator, content) catch null;
}
}
}
return null;
}
fn extractLinkHref(xml: []const u8, allocator: Allocator) ?[]u8 {
const pattern = "<link href=\"";
if (std.mem.indexOf(u8, xml, pattern)) |start_pos| {
const content_start = start_pos + pattern.len;
if (std.mem.indexOf(u8, xml[content_start..], "\"")) |end_offset| {
const content_end = content_start + end_offset;
const href = xml[content_start..content_end];
return allocator.dupe(u8, href) catch null;
}
}
return null;
}
fn extractCategoryTerm(xml: []const u8, allocator: Allocator) ?[]u8 {
const pattern = "<category term=\"";
if (std.mem.indexOf(u8, xml, pattern)) |start_pos| {
const content_start = start_pos + pattern.len;
if (std.mem.indexOf(u8, xml[content_start..], "\"")) |end_offset| {
const content_end = content_start + end_offset;
const term = xml[content_start..content_end];
return allocator.dupe(u8, term) catch null;
}
}
return null;
}
fn unescapeXml(allocator: Allocator, input: []const u8) ![]u8 {
var result = ArrayList(u8).init(allocator);
defer result.deinit();
var i: usize = 0;
while (i < input.len) {
if (input[i] == '&') {
if (std.mem.startsWith(u8, input[i..], "&lt;")) {
try result.append('<');
i += 4;
} else if (std.mem.startsWith(u8, input[i..], "&gt;")) {
try result.append('>');
i += 4;
} else if (std.mem.startsWith(u8, input[i..], "&amp;")) {
try result.append('&');
i += 5;
} else if (std.mem.startsWith(u8, input[i..], "&quot;")) {
try result.append('"');
i += 6;
} else if (std.mem.startsWith(u8, input[i..], "&apos;")) {
try result.append('\'');
i += 6;
} else {
try result.append(input[i]);
i += 1;
}
} else {
try result.append(input[i]);
i += 1;
}
}
return result.toOwnedSlice();
}
// Tests
test "parse simple atom entry" {
const allocator = std.testing.allocator;
const entry_xml =
\\<entry>
\\ <title>test/repo - v1.0.0</title>
\\ <link href="https://github.com/test/repo/releases/tag/v1.0.0"/>
\\ <id>https://github.com/test/repo/releases/tag/v1.0.0</id>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <author><n>github</n></author>
\\ <summary>Test release</summary>
\\ <category term="github"/>
\\</entry>
;
const release = try parseEntry(allocator, entry_xml);
defer release.deinit(allocator);
try std.testing.expectEqualStrings("test/repo", release.repo_name);
try std.testing.expectEqualStrings("v1.0.0", release.tag_name);
try std.testing.expectEqualStrings("https://github.com/test/repo/releases/tag/v1.0.0", release.html_url);
try std.testing.expectEqualStrings("2024-01-01T00:00:00Z", release.published_at);
try std.testing.expectEqualStrings("Test release", release.description);
try std.testing.expectEqualStrings("github", release.provider);
}
test "parse atom entry with escaped characters" {
const allocator = std.testing.allocator;
const entry_xml =
\\<entry>
\\ <title>test/repo&lt;script&gt; - v1.0.0 &amp; more</title>
\\ <link href="https://github.com/test/repo/releases/tag/v1.0.0"/>
\\ <id>https://github.com/test/repo/releases/tag/v1.0.0</id>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <author><n>github</n></author>
\\ <summary>Test &quot;release&quot; with &lt;special&gt; chars &amp; symbols</summary>
\\ <category term="github"/>
\\</entry>
;
const release = try parseEntry(allocator, entry_xml);
defer release.deinit(allocator);
try std.testing.expectEqualStrings("test/repo<script>", release.repo_name);
try std.testing.expectEqualStrings("v1.0.0 & more", release.tag_name);
try std.testing.expectEqualStrings("Test \"release\" with <special> chars & symbols", release.description);
}
test "parse full atom feed" {
const allocator = std.testing.allocator;
const atom_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<subtitle>New releases from starred repositories</subtitle>
\\<link href="https://github.com" rel="alternate"/>
\\<link href="https://example.com/releases.xml" rel="self"/>
\\<id>https://example.com/releases</id>
\\<updated>2024-01-01T00:00:00Z</updated>
\\<entry>
\\ <title>test/repo1 - v1.0.0</title>
\\ <link href="https://github.com/test/repo1/releases/tag/v1.0.0"/>
\\ <id>https://github.com/test/repo1/releases/tag/v1.0.0</id>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <author><n>github</n></author>
\\ <summary>First release</summary>
\\ <category term="github"/>
\\</entry>
\\<entry>
\\ <title>test/repo2 - v2.0.0</title>
\\ <link href="https://github.com/test/repo2/releases/tag/v2.0.0"/>
\\ <id>https://github.com/test/repo2/releases/tag/v2.0.0</id>
\\ <updated>2024-01-02T00:00:00Z</updated>
\\ <author><n>github</n></author>
\\ <summary>Second release</summary>
\\ <category term="github"/>
\\</entry>
\\</feed>
;
var releases = try parseAtomFeed(allocator, atom_xml);
defer {
for (releases.items) |release| {
release.deinit(allocator);
}
releases.deinit();
}
try std.testing.expectEqual(@as(usize, 2), releases.items.len);
try std.testing.expectEqualStrings("test/repo1", releases.items[0].repo_name);
try std.testing.expectEqualStrings("v1.0.0", releases.items[0].tag_name);
try std.testing.expectEqualStrings("First release", releases.items[0].description);
try std.testing.expectEqualStrings("test/repo2", releases.items[1].repo_name);
try std.testing.expectEqualStrings("v2.0.0", releases.items[1].tag_name);
try std.testing.expectEqualStrings("Second release", releases.items[1].description);
}
test "XML unescaping" {
const allocator = std.testing.allocator;
const input = "Test &lt;tag&gt; &amp; &quot;quotes&quot; &amp; &apos;apostrophes&apos;";
const result = try unescapeXml(allocator, input);
defer allocator.free(result);
const expected = "Test <tag> & \"quotes\" & 'apostrophes'";
try std.testing.expectEqualStrings(expected, result);
}
test "parse entry with missing fields" {
const allocator = std.testing.allocator;
const entry_xml =
\\<entry>
\\ <title>test/repo - v1.0.0</title>
\\ <link href="https://github.com/test/repo/releases/tag/v1.0.0"/>
\\</entry>
;
const release = try parseEntry(allocator, entry_xml);
defer release.deinit(allocator);
try std.testing.expectEqualStrings("test/repo", release.repo_name);
try std.testing.expectEqualStrings("v1.0.0", release.tag_name);
try std.testing.expectEqualStrings("https://github.com/test/repo/releases/tag/v1.0.0", release.html_url);
// Missing fields should be empty strings
try std.testing.expectEqualStrings("", release.published_at);
try std.testing.expectEqualStrings("", release.description);
try std.testing.expectEqualStrings("", release.provider);
}

View file

@ -1,285 +0,0 @@
const std = @import("std");
const testing = std.testing;
const xml_parser = @import("xml_parser.zig");
const atom = @import("atom.zig");
const Release = @import("main.zig").Release;
test "round trip: generate atom feed and parse it back" {
const allocator = testing.allocator;
// Create test releases
const original_releases = [_]Release{
Release{
.repo_name = "test/repo1",
.tag_name = "v1.0.0",
.published_at = "2024-01-01T00:00:00Z",
.html_url = "https://github.com/test/repo1/releases/tag/v1.0.0",
.description = "First release",
.provider = "github",
},
Release{
.repo_name = "test/repo2",
.tag_name = "v2.0.0",
.published_at = "2024-01-02T00:00:00Z",
.html_url = "https://github.com/test/repo2/releases/tag/v2.0.0",
.description = "Second release",
.provider = "github",
},
};
// Generate atom feed
const atom_content = try atom.generateFeed(allocator, &original_releases);
defer allocator.free(atom_content);
// Parse it back
var parsed_releases = try xml_parser.parseAtomFeed(allocator, atom_content);
defer {
for (parsed_releases.items) |release| {
release.deinit(allocator);
}
parsed_releases.deinit();
}
// Verify we got the same data back
try testing.expectEqual(@as(usize, 2), parsed_releases.items.len);
try testing.expectEqualStrings("test/repo1", parsed_releases.items[0].repo_name);
try testing.expectEqualStrings("v1.0.0", parsed_releases.items[0].tag_name);
try testing.expectEqualStrings("2024-01-01T00:00:00Z", parsed_releases.items[0].published_at);
try testing.expectEqualStrings("https://github.com/test/repo1/releases/tag/v1.0.0", parsed_releases.items[0].html_url);
try testing.expectEqualStrings("<p>First release</p>\n", parsed_releases.items[0].description);
try testing.expectEqualStrings("github", parsed_releases.items[0].provider);
try testing.expectEqualStrings("test/repo2", parsed_releases.items[1].repo_name);
try testing.expectEqualStrings("v2.0.0", parsed_releases.items[1].tag_name);
try testing.expectEqualStrings("2024-01-02T00:00:00Z", parsed_releases.items[1].published_at);
try testing.expectEqualStrings("https://github.com/test/repo2/releases/tag/v2.0.0", parsed_releases.items[1].html_url);
try testing.expectEqualStrings("<p>Second release</p>\n", parsed_releases.items[1].description);
try testing.expectEqualStrings("github", parsed_releases.items[1].provider);
}
test "parse atom feed with special characters" {
const allocator = testing.allocator;
// Create releases with special characters
const original_releases = [_]Release{
Release{
.repo_name = "test/repo<script>",
.tag_name = "v1.0.0 & more",
.published_at = "2024-01-01T00:00:00Z",
.html_url = "https://github.com/test/repo/releases/tag/v1.0.0",
.description = "Test \"release\" with <special> chars & symbols",
.provider = "github",
},
};
// Generate atom feed (this should escape the characters)
const atom_content = try atom.generateFeed(allocator, &original_releases);
defer allocator.free(atom_content);
// Verify the XML contains escaped characters in the title (not in content)
try testing.expect(std.mem.indexOf(u8, atom_content, "&lt;script&gt;") != null);
try testing.expect(std.mem.indexOf(u8, atom_content, "&amp; more") != null);
// The content will be XML-escaped HTML, so quotes in HTML will be &amp;quot;
try testing.expect(std.mem.indexOf(u8, atom_content, "&amp;quot;release&amp;quot;") != null);
// Parse it back (this should unescape the characters)
var parsed_releases = try xml_parser.parseAtomFeed(allocator, atom_content);
defer {
for (parsed_releases.items) |release| {
release.deinit(allocator);
}
parsed_releases.deinit();
}
// Verify the parsed data has the original unescaped characters
try testing.expectEqual(@as(usize, 1), parsed_releases.items.len);
try testing.expectEqualStrings("test/repo<script>", parsed_releases.items[0].repo_name);
try testing.expectEqualStrings("v1.0.0 & more", parsed_releases.items[0].tag_name);
try testing.expectEqualStrings("<pre>Test &quot;release&quot; with &lt;special&gt; chars &amp; symbols</pre>\n", parsed_releases.items[0].description);
}
test "parse malformed atom feed gracefully" {
const allocator = testing.allocator;
const malformed_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>test/repo1 - v1.0.0</title>
\\ <link href="https://github.com/test/repo1/releases/tag/v1.0.0"/>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <summary>Good entry</summary>
\\ <category term="github"/>
\\</entry>
\\<entry>
\\ <title>test/repo2 - v2.0.0</title>
\\ <!-- Missing closing entry tag -->
\\<entry>
\\ <title>test/repo3 - v3.0.0</title>
\\ <link href="https://github.com/test/repo3/releases/tag/v3.0.0"/>
\\ <updated>2024-01-03T00:00:00Z</updated>
\\ <summary>Another good entry</summary>
\\ <category term="github"/>
\\</entry>
\\</feed>
;
var parsed_releases = try xml_parser.parseAtomFeed(allocator, malformed_xml);
defer {
for (parsed_releases.items) |release| {
release.deinit(allocator);
}
parsed_releases.deinit();
}
// Should parse the valid entries and skip the malformed one
// Note: The malformed entry (repo2) will be parsed but will contain mixed content
// The parser finds the first closing </entry> tag which belongs to repo3
try testing.expectEqual(@as(usize, 2), parsed_releases.items.len);
try testing.expectEqualStrings("test/repo1", parsed_releases.items[0].repo_name);
try testing.expectEqualStrings("test/repo2", parsed_releases.items[1].repo_name); // This gets the first title found
}
test "parse empty atom feed" {
const allocator = testing.allocator;
const empty_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<subtitle>New releases from starred repositories</subtitle>
\\<link href="https://github.com" rel="alternate"/>
\\<link href="https://example.com/releases.xml" rel="self"/>
\\<id>https://example.com/releases</id>
\\<updated>2024-01-01T00:00:00Z</updated>
\\</feed>
;
var parsed_releases = try xml_parser.parseAtomFeed(allocator, empty_xml);
defer parsed_releases.deinit();
try testing.expectEqual(@as(usize, 0), parsed_releases.items.len);
}
test "parse atom feed with multiline summaries" {
const allocator = testing.allocator;
const multiline_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>test/repo - v1.0.0</title>
\\ <link href="https://github.com/test/repo/releases/tag/v1.0.0"/>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <summary>This is a multiline
\\summary with line breaks
\\and multiple paragraphs</summary>
\\ <category term="github"/>
\\</entry>
\\</feed>
;
var parsed_releases = try xml_parser.parseAtomFeed(allocator, multiline_xml);
defer {
for (parsed_releases.items) |release| {
release.deinit(allocator);
}
parsed_releases.deinit();
}
try testing.expectEqual(@as(usize, 1), parsed_releases.items.len);
const expected_summary = "This is a multiline\nsummary with line breaks\nand multiple paragraphs";
try testing.expectEqualStrings(expected_summary, parsed_releases.items[0].description);
}
test "parse atom feed with different providers" {
const allocator = testing.allocator;
const multi_provider_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<title>Repository Releases</title>
\\<entry>
\\ <title>github/repo - v1.0.0</title>
\\ <link href="https://github.com/github/repo/releases/tag/v1.0.0"/>
\\ <updated>2024-01-01T00:00:00Z</updated>
\\ <summary>GitHub release</summary>
\\ <category term="github"/>
\\</entry>
\\<entry>
\\ <title>gitlab/repo - v2.0.0</title>
\\ <link href="https://gitlab.com/gitlab/repo/-/releases/v2.0.0"/>
\\ <updated>2024-01-02T00:00:00Z</updated>
\\ <summary>GitLab release</summary>
\\ <category term="gitlab"/>
\\</entry>
\\<entry>
\\ <title>codeberg/repo - v3.0.0</title>
\\ <link href="https://codeberg.org/codeberg/repo/releases/tag/v3.0.0"/>
\\ <updated>2024-01-03T00:00:00Z</updated>
\\ <summary>Codeberg release</summary>
\\ <category term="codeberg"/>
\\</entry>
\\<entry>
\\ <title>~user/repo - v4.0.0</title>
\\ <link href="https://git.sr.ht/~user/repo/refs/v4.0.0"/>
\\ <updated>2024-01-04T00:00:00Z</updated>
\\ <summary>SourceHut release</summary>
\\ <category term="sourcehut"/>
\\</entry>
\\</feed>
;
var parsed_releases = try xml_parser.parseAtomFeed(allocator, multi_provider_xml);
defer {
for (parsed_releases.items) |release| {
release.deinit(allocator);
}
parsed_releases.deinit();
}
try testing.expectEqual(@as(usize, 4), parsed_releases.items.len);
try testing.expectEqualStrings("github", parsed_releases.items[0].provider);
try testing.expectEqualStrings("gitlab", parsed_releases.items[1].provider);
try testing.expectEqualStrings("codeberg", parsed_releases.items[2].provider);
try testing.expectEqualStrings("sourcehut", parsed_releases.items[3].provider);
}
test "parse atom feed with missing optional fields" {
const allocator = testing.allocator;
const minimal_xml =
\\<?xml version="1.0" encoding="UTF-8"?>
\\<feed xmlns="http://www.w3.org/2005/Atom">
\\<entry>
\\ <title>test/repo - v1.0.0</title>
\\ <link href="https://github.com/test/repo/releases/tag/v1.0.0"/>
\\</entry>
\\</feed>
;
var parsed_releases = try xml_parser.parseAtomFeed(allocator, minimal_xml);
defer {
for (parsed_releases.items) |release| {
release.deinit(allocator);
}
parsed_releases.deinit();
}
try testing.expectEqual(@as(usize, 1), parsed_releases.items.len);
const release = parsed_releases.items[0];
try testing.expectEqualStrings("test/repo", release.repo_name);
try testing.expectEqualStrings("v1.0.0", release.tag_name);
try testing.expectEqualStrings("https://github.com/test/repo/releases/tag/v1.0.0", release.html_url);
// Missing fields should be empty strings
try testing.expectEqualStrings("", release.published_at);
try testing.expectEqualStrings("", release.description);
try testing.expectEqualStrings("", release.provider);
}