rate limiter refactor
This commit is contained in:
parent
d6a104d2d5
commit
fcd85aa64e
7 changed files with 98 additions and 92 deletions
93
src/net/RateLimiter.zig
Normal file
93
src/net/RateLimiter.zig
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
//! Token-bucket rate limiter.
|
||||
//!
|
||||
//! Enforces a maximum number of requests per time window using the
|
||||
//! token bucket algorithm. Tokens refill continuously; each request
|
||||
//! consumes one token. When the bucket is empty, callers can either
|
||||
//! poll with `tryAcquire` or block with `acquire`.
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
/// Maximum tokens (requests) in the bucket
|
||||
max_tokens: u32,
|
||||
/// Current available tokens
|
||||
tokens: f64,
|
||||
/// Tokens added per nanosecond
|
||||
refill_rate_per_ns: f64,
|
||||
/// Last time tokens were refilled
|
||||
last_refill: i128,
|
||||
|
||||
const RateLimiter = @This();
|
||||
|
||||
/// Create a rate limiter.
|
||||
/// `max_per_window` is the max requests allowed in `window_ns` nanoseconds.
|
||||
pub fn init(max_per_window: u32, window_ns: u64) RateLimiter {
|
||||
return .{
|
||||
.max_tokens = max_per_window,
|
||||
.tokens = @floatFromInt(max_per_window),
|
||||
.refill_rate_per_ns = @as(f64, @floatFromInt(max_per_window)) / @as(f64, @floatFromInt(window_ns)),
|
||||
.last_refill = std.time.nanoTimestamp(),
|
||||
};
|
||||
}
|
||||
|
||||
/// Convenience: N requests per minute
|
||||
pub fn perMinute(n: u32) RateLimiter {
|
||||
return init(n, std.time.ns_per_min);
|
||||
}
|
||||
|
||||
/// Convenience: N requests per day
|
||||
pub fn perDay(n: u32) RateLimiter {
|
||||
return init(n, std.time.ns_per_day);
|
||||
}
|
||||
|
||||
/// Try to acquire a token. Returns true if granted, false if rate-limited.
|
||||
/// Caller should sleep and retry if false.
|
||||
pub fn tryAcquire(self: *RateLimiter) bool {
|
||||
self.refill();
|
||||
if (self.tokens >= 1.0) {
|
||||
self.tokens -= 1.0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Acquire a token, blocking (sleeping) until one is available.
|
||||
pub fn acquire(self: *RateLimiter) void {
|
||||
while (!self.tryAcquire()) {
|
||||
// Sleep for the time needed to generate 1 token
|
||||
const wait_ns: u64 = @intFromFloat(1.0 / self.refill_rate_per_ns);
|
||||
std.Thread.sleep(wait_ns);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns estimated wait time in nanoseconds until a token is available.
|
||||
/// Returns 0 if a token is available now.
|
||||
pub fn estimateWaitNs(self: *RateLimiter) u64 {
|
||||
self.refill();
|
||||
if (self.tokens >= 1.0) return 0;
|
||||
const deficit = 1.0 - self.tokens;
|
||||
return @intFromFloat(deficit / self.refill_rate_per_ns);
|
||||
}
|
||||
|
||||
fn refill(self: *RateLimiter) void {
|
||||
const now = std.time.nanoTimestamp();
|
||||
const elapsed = now - self.last_refill;
|
||||
if (elapsed <= 0) return;
|
||||
|
||||
const new_tokens = @as(f64, @floatFromInt(elapsed)) * self.refill_rate_per_ns;
|
||||
self.tokens = @min(self.tokens + new_tokens, @as(f64, @floatFromInt(self.max_tokens)));
|
||||
self.last_refill = now;
|
||||
}
|
||||
|
||||
test "rate limiter basic" {
|
||||
var rl = RateLimiter.perMinute(60);
|
||||
// Should have full bucket initially
|
||||
try std.testing.expect(rl.tryAcquire());
|
||||
}
|
||||
|
||||
test "rate limiter exhaustion" {
|
||||
var rl = RateLimiter.init(2, std.time.ns_per_s);
|
||||
try std.testing.expect(rl.tryAcquire());
|
||||
try std.testing.expect(rl.tryAcquire());
|
||||
// Bucket should be empty now
|
||||
try std.testing.expect(!rl.tryAcquire());
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
const std = @import("std");
|
||||
|
||||
/// Token-bucket rate limiter. Enforces a maximum number of requests per time window.
|
||||
pub const RateLimiter = struct {
|
||||
/// Maximum tokens (requests) in the bucket
|
||||
max_tokens: u32,
|
||||
/// Current available tokens
|
||||
tokens: f64,
|
||||
/// Tokens added per nanosecond
|
||||
refill_rate_per_ns: f64,
|
||||
/// Last time tokens were refilled
|
||||
last_refill: i128,
|
||||
|
||||
/// Create a rate limiter.
|
||||
/// `max_per_window` is the max requests allowed in `window_ns` nanoseconds.
|
||||
pub fn init(max_per_window: u32, window_ns: u64) RateLimiter {
|
||||
return .{
|
||||
.max_tokens = max_per_window,
|
||||
.tokens = @floatFromInt(max_per_window),
|
||||
.refill_rate_per_ns = @as(f64, @floatFromInt(max_per_window)) / @as(f64, @floatFromInt(window_ns)),
|
||||
.last_refill = std.time.nanoTimestamp(),
|
||||
};
|
||||
}
|
||||
|
||||
/// Convenience: N requests per minute
|
||||
pub fn perMinute(n: u32) RateLimiter {
|
||||
return init(n, 60 * std.time.ns_per_s);
|
||||
}
|
||||
|
||||
/// Convenience: N requests per day
|
||||
pub fn perDay(n: u32) RateLimiter {
|
||||
return init(n, 24 * 3600 * std.time.ns_per_s);
|
||||
}
|
||||
|
||||
/// Try to acquire a token. Returns true if granted, false if rate-limited.
|
||||
/// Caller should sleep and retry if false.
|
||||
pub fn tryAcquire(self: *RateLimiter) bool {
|
||||
self.refill();
|
||||
if (self.tokens >= 1.0) {
|
||||
self.tokens -= 1.0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Acquire a token, blocking (sleeping) until one is available.
|
||||
pub fn acquire(self: *RateLimiter) void {
|
||||
while (!self.tryAcquire()) {
|
||||
// Sleep for the time needed to generate 1 token
|
||||
const wait_ns: u64 = @intFromFloat(1.0 / self.refill_rate_per_ns);
|
||||
std.Thread.sleep(wait_ns);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns estimated wait time in nanoseconds until a token is available.
|
||||
/// Returns 0 if a token is available now.
|
||||
pub fn estimateWaitNs(self: *RateLimiter) u64 {
|
||||
self.refill();
|
||||
if (self.tokens >= 1.0) return 0;
|
||||
const deficit = 1.0 - self.tokens;
|
||||
return @intFromFloat(deficit / self.refill_rate_per_ns);
|
||||
}
|
||||
|
||||
fn refill(self: *RateLimiter) void {
|
||||
const now = std.time.nanoTimestamp();
|
||||
const elapsed = now - self.last_refill;
|
||||
if (elapsed <= 0) return;
|
||||
|
||||
const new_tokens = @as(f64, @floatFromInt(elapsed)) * self.refill_rate_per_ns;
|
||||
self.tokens = @min(self.tokens + new_tokens, @as(f64, @floatFromInt(self.max_tokens)));
|
||||
self.last_refill = now;
|
||||
}
|
||||
};
|
||||
|
||||
test "rate limiter basic" {
|
||||
var rl = RateLimiter.perMinute(60);
|
||||
// Should have full bucket initially
|
||||
try std.testing.expect(rl.tryAcquire());
|
||||
}
|
||||
|
||||
test "rate limiter exhaustion" {
|
||||
var rl = RateLimiter.init(2, std.time.ns_per_s);
|
||||
try std.testing.expect(rl.tryAcquire());
|
||||
try std.testing.expect(rl.tryAcquire());
|
||||
// Bucket should be empty now
|
||||
try std.testing.expect(!rl.tryAcquire());
|
||||
}
|
||||
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const http = @import("../net/http.zig");
|
||||
const RateLimiter = @import("../net/rate_limiter.zig").RateLimiter;
|
||||
const RateLimiter = @import("../net/RateLimiter.zig");
|
||||
const Date = @import("../models/date.zig").Date;
|
||||
const EtfProfile = @import("../models/etf_profile.zig").EtfProfile;
|
||||
const Holding = @import("../models/etf_profile.zig").Holding;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const http = @import("../net/http.zig");
|
||||
const RateLimiter = @import("../net/rate_limiter.zig").RateLimiter;
|
||||
const RateLimiter = @import("../net/RateLimiter.zig");
|
||||
const Date = @import("../models/date.zig").Date;
|
||||
const OptionContract = @import("../models/option.zig").OptionContract;
|
||||
const OptionsChain = @import("../models/option.zig").OptionsChain;
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const http = @import("../net/http.zig");
|
||||
const RateLimiter = @import("../net/rate_limiter.zig").RateLimiter;
|
||||
const RateLimiter = @import("../net/RateLimiter.zig");
|
||||
const Date = @import("../models/date.zig").Date;
|
||||
const OptionContract = @import("../models/option.zig").OptionContract;
|
||||
const OptionsChain = @import("../models/option.zig").OptionsChain;
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const http = @import("../net/http.zig");
|
||||
const RateLimiter = @import("../net/rate_limiter.zig").RateLimiter;
|
||||
const RateLimiter = @import("../net/RateLimiter.zig");
|
||||
const Date = @import("../models/date.zig").Date;
|
||||
const Candle = @import("../models/candle.zig").Candle;
|
||||
const Dividend = @import("../models/dividend.zig").Dividend;
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
const std = @import("std");
|
||||
const http = @import("../net/http.zig");
|
||||
const RateLimiter = @import("../net/rate_limiter.zig").RateLimiter;
|
||||
const RateLimiter = @import("../net/RateLimiter.zig");
|
||||
const Date = @import("../models/date.zig").Date;
|
||||
const Candle = @import("../models/candle.zig").Candle;
|
||||
const provider = @import("provider.zig");
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue