Compare commits
4 Commits
2662591c91
...
883bc6f52f
Author | SHA1 | Date | |
---|---|---|---|
883bc6f52f | |||
ecbe134869 | |||
194fdb8217 | |||
180b0ec2ff |
|
@ -175,7 +175,7 @@ pub fn run(allocator: ?std.mem.Allocator, event_handler: HandlerFn) !void { // T
|
|||
// TODO: We should catch these potential alloc errors too
|
||||
// TODO: This whole loop should be in another function so we can catch everything at once
|
||||
const response_url = try std.fmt.allocPrint(req_allocator, "{s}{s}{s}/{s}/response", .{ prefix, lambda_runtime_uri, postfix, req_id });
|
||||
defer alloc.free(response_url);
|
||||
defer req_allocator.free(response_url);
|
||||
const response_uri = try std.Uri.parse(response_url);
|
||||
const response_content = try std.fmt.allocPrint(req_allocator, "{s} \"content\": \"{s}\" {s}", .{ "{", event_response, "}" });
|
||||
var resp_req = try client.request(.POST, response_uri, empty_headers, .{});
|
||||
|
@ -344,7 +344,11 @@ fn lambda_request(allocator: std.mem.Allocator, request: []const u8) ![]u8 {
|
|||
// booleans to know when to shut down. This function is designed for a
|
||||
// single request/response pair only
|
||||
|
||||
server_remaining_requests = 2; // Tell our server to run for just two requests
|
||||
lambda_remaining_requests = 1; // in case anyone messed with this, we will make sure we start
|
||||
server_remaining_requests = lambda_remaining_requests.? * 2; // Lambda functions
|
||||
// fetch from the server,
|
||||
// then post back. Always
|
||||
// 2, no more, no less
|
||||
server_response = request; // set our instructions to lambda, which in our
|
||||
// physical model above, is the server response
|
||||
defer server_response = "unset"; // set it back so we don't get confused later
|
||||
|
@ -359,10 +363,9 @@ fn lambda_request(allocator: std.mem.Allocator, request: []const u8) ![]u8 {
|
|||
defer server_thread.join(); // we'll be shutting everything down before we exit
|
||||
|
||||
// Now we need to start the lambda framework, following a siimilar pattern
|
||||
lambda_remaining_requests = 1; // in case anyone messed with this, we will make sure we start
|
||||
const lambda_thread = try test_run(allocator, handler); // We want our function under test to report leaks
|
||||
lambda_thread.join();
|
||||
return server_request_aka_lambda_response;
|
||||
return try allocator.dupe(u8, server_request_aka_lambda_response);
|
||||
}
|
||||
|
||||
test "basic request" {
|
||||
|
@ -371,6 +374,12 @@ test "basic request" {
|
|||
const request =
|
||||
\\{"foo": "bar", "baz": "qux"}
|
||||
;
|
||||
|
||||
// This is what's actually coming back. Is this right?
|
||||
const expected_response =
|
||||
\\{ "content": "{"foo": "bar", "baz": "qux"}" }
|
||||
;
|
||||
const lambda_response = try lambda_request(allocator, request);
|
||||
try std.testing.expectEqualStrings(lambda_response, request);
|
||||
defer allocator.free(lambda_response);
|
||||
try std.testing.expectEqualStrings(expected_response, lambda_response);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user