createTable handler

This commit is contained in:
Emil Lerch 2024-01-29 10:27:25 -08:00
parent acdb78aea5
commit 1a53c3cf8a
Signed by: lobo
GPG Key ID: A7B62D657EF764F8
7 changed files with 1039 additions and 87 deletions

View File

@ -1,5 +1,5 @@
const std = @import("std");
const configureUniversalLambdaBuild = @import("universal_lambda_build").configureBuild;
const universal_lambda = @import("universal_lambda_build");
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
@ -60,6 +60,7 @@ pub fn build(b: *std.Build) !void {
.target = target,
.optimize = optimize,
});
_ = try universal_lambda.addModules(b, unit_tests);
const run_unit_tests = b.addRunArtifact(unit_tests);
@ -69,7 +70,7 @@ pub fn build(b: *std.Build) !void {
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_unit_tests.step);
try configureUniversalLambdaBuild(b, exe);
try universal_lambda.configureBuild(b, exe);
const aws_dep = b.dependency("aws", .{
.target = target,

View File

@ -4,16 +4,16 @@
.dependencies = .{
.aws = .{
.url = "https://git.lerch.org/lobo/aws-sdk-for-zig/archive/825d93720a92bcaedb3d00cd04764469fdec0c86.tar.gz",
.hash = "122038e86ca453cbb0b4d5534380470eeb0656fdbab9aca2b7d2dc77756ab659204a",
.url = "https://git.lerch.org/lobo/aws-sdk-for-zig/archive/d08d0f338fb86f7d679a998ff4f65f4e2d0db595.tar.gz",
.hash = "122096bb1480cef9cc4a8abf112f54214de4ab500f6863e86fc919cb5a99533ff7ce",
},
.sqlite = .{
.url = "https://github.com/vrischmann/zig-sqlite/archive/19535aab5760eeaf2979a9dadfca3bb21d1594b9.tar.gz",
.hash = "12208c654deea149cee27eaa45d0e6515c3d8f97d775a4156cbcce0ff424b5d26ea3",
},
.universal_lambda_build = .{
.url = "https://git.lerch.org/lobo/universal-lambda-zig/archive/6c89380fea51686b775a93d9a68150262a20d513.tar.gz",
.hash = "1220173c05fa58d0dceda2e2de99edb1a68b859006747cfcf80d7c908dda95f87db2",
.url = "https://git.lerch.org/lobo/universal-lambda-zig/archive/e5a1099f741ddd6327e015e4c068de5c18d09393.tar.gz",
.hash = "122037f0b35ab67002ef039410ae4ddb6805e14c111557ab0ae2ec7837211f7a1c51",
},
.flexilib = .{
.url = "https://git.lerch.org/lobo/flexilib/archive/3d3dab9c792651477932e2b61c9f4794ac694dcb.tar.gz",

27
src/Account.zig Normal file
View File

@ -0,0 +1,27 @@
const std = @import("std");
const encryption = @import("encryption.zig");
const test_account_key = "09aGW6z6QofVsPlWP9FGqVnshxHWAWrKZwLkwkgWs7w=";
const Self = @This();
allocator: std.mem.Allocator,
root_account_key: *[encryption.key_length]u8,
pub fn accountForId(allocator: std.mem.Allocator, account_id: []const u8) !Self {
// TODO: Allow environment variables to house encoded keys. If not in the
// environment, check with LocalDB table to get it. We're
// building LocalDB, though, so we need that working first...
if (!std.mem.eql(u8, account_id, "1234")) return error.NotImplemented;
var key = try allocator.alloc(u8, encryption.key_length);
errdefer allocator.free(key);
try encryption.decodeKey(key[0..encryption.key_length], test_account_key.*);
return Self{
.allocator = allocator,
.root_account_key = key[0..encryption.key_length],
};
}
pub fn deinit(self: Self) void {
self.allocator.free(self.root_account_key);
}

View File

@ -0,0 +1,14 @@
const std = @import("std");
allocator: std.mem.Allocator,
event_data: []const u8,
headers: std.http.Headers,
status: std.http.Status,
reason: ?[]const u8,
account_id: []const u8,
output_format: OutputFormat,
pub const OutputFormat = enum {
text,
json,
};

View File

@ -1,84 +1,726 @@
const std = @import("std");
const sqlite = @import("sqlite");
const AuthenticatedRequest = @import("AuthenticatedRequest.zig");
const Account = @import("Account.zig");
const encryption = @import("encryption.zig");
pub var data_dir: []const u8 = "";
pub fn handler(allocator: std.mem.Allocator, account_id: []const u8, event_data: []const u8) ![]const u8 {
_ = event_data;
// Request:
//
// {
// "AttributeDefinitions": [{"AttributeName": "Artist", "AttributeType": "S"}, {"AttributeName": "SongTitle", "AttributeType": "S"}],
// "TableName": "dm",
// "KeySchema": [
// {"AttributeName": "Artist", "KeyType": "HASH"},
// {"AttributeName": "SongTitle", "KeyType": "RANGE"}
// ],
// "ProvisionedThroughput":
// {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
// "Tags": [{"Key": "Owner", "Value": "blueTeam"}]
// }
//
// These are in the original casing so as to make the error messages nice
const RequiredFields = enum(u3) {
// zig fmt: off
TableName = 1 << 0,
AttributeDefinitions = 1 << 1,
KeySchema = 1 << 2,
// zig fmt: on
};
const AttributeTypeDescriptor = enum(u4) {
S = 0,
N = 1,
B = 2,
BOOL = 3,
NULL = 4,
M = 5,
L = 6,
SS = 7,
NS = 8,
BS = 9,
};
const AttributeTypeName = enum(4) {
String = 0,
Number = 1,
Binary = 2,
Boolean = 3,
Null = 4,
Map = 5,
List = 6,
StringSet = 7,
NumberSet = 8,
BinarySet = 9,
};
const AttributeDefinition = struct {
name: []const u8,
type: AttributeTypeDescriptor,
};
const TableInfo = struct {
attribute_definitions: []*AttributeDefinition,
// gsi_list: []const u8, // Not sure how this is used
// gsi_description_list: []const u8, // Not sure how this is used
// sqlite_index: []const u8, // Not sure how this is used
table_key: [encryption.encoded_key_length]u8,
};
const Params = struct {
table_name: []const u8,
table_info: TableInfo,
read_capacity_units: ?i64 = null,
write_capacity_units: ?i64 = null,
billing_mode_pay_per_request: bool = false,
};
pub fn handler(request: *AuthenticatedRequest, writer: anytype) ![]const u8 {
const allocator = request.allocator;
const account_id = request.account_id;
var parsed = try std.json.parseFromSlice(std.json.Value, allocator, request.event_data, .{});
defer parsed.deinit();
const request_params = try parseRequest(request, parsed, writer);
defer {
for (request_params.table_info.attribute_definitions) |d| {
allocator.free(d.*.name);
allocator.destroy(d);
}
allocator.free(request_params.table_info.attribute_definitions);
}
var db = try dbForAccount(allocator, account_id);
const account = try Account.accountForId(allocator, account_id); // This will get us the encryption key needed
defer account.deinit();
// TODO: better to do all encryption when request params are parsed?
const table_name = try encryption.encryptAndEncode(allocator, account.root_account_key.*, request_params.table_name);
defer allocator.free(table_name);
// We'll json serialize our table_info structure, encrypt, encode, and plow in
const table_info_string = try std.json.stringifyAlloc(allocator, request_params.table_info, .{ .whitespace = .indent_2 });
defer allocator.free(table_info_string);
const table_info = try encryption.encryptAndEncode(allocator, account.root_account_key.*, table_info_string);
defer allocator.free(table_info);
try insertIntoDm(
&db,
table_name,
table_info,
request_params.read_capacity_units orelse 0,
request_params.write_capacity_units orelse 0,
request_params.billing_mode_pay_per_request,
);
// Server side Input validation error on live DDB results in this for a 2 char table name
// 400 - bad request
// {"__type":"com.amazon.coral.validate#ValidationException","message":"TableName must be at least 3 characters long and at most 255 characters long"}
// TODO: We'll need hold of the server response object here so we can muck with status
// for client validation issues such as "table names must be > 2"
// Tableinfo for Music collection example becomes:
//
// {
// "Attributes": [
// {
// "AttributeName": "Artist",
// "AttributeType": "S"
// },
// {
// "AttributeName": "SongTitle",
// "AttributeType": "S"
// }
// ],
// "GSIList": [],
// "GSIDescList": [],
// "SQLiteIndex": {
// "": [
// {
// "DynamoDBAttribute": {
// "AttributeName": "Artist",
// "AttributeType": "S"
// },
// "KeyType": "HASH",
// "SQLiteColumnName": "hashKey",
// "SQLiteDataType": "TEXT"
// },
// {
// "DynamoDBAttribute": {
// "AttributeName": "SongTitle",
// "AttributeType": "S"
// },
// "KeyType": "RANGE",
// "SQLiteColumnName": "rangeKey",
// "SQLiteDataType": "TEXT"
// }
// ]
// },
// "UniqueIndexes": [
// {
// "DynamoDBAttribute": {
// "AttributeName": "Artist",
// "AttributeType": "S"
// },
// "KeyType": "HASH",
// "SQLiteColumnName": "hashKey",
// "SQLiteDataType": "TEXT"
// },
// {
// "DynamoDBAttribute": {
// "AttributeName": "SongTitle",
// "AttributeType": "S"
// },
// "KeyType": "RANGE",
// "SQLiteColumnName": "rangeKey",
// "SQLiteDataType": "TEXT"
// }
// ],
// "UniqueGSIIndexes": []
// }
//
var diags = sqlite.Diagnostics{};
// TODO: If the file exists, this will blow up
// It doesn't seem that I can bind a variable here. But it actually doesn't matter as we're
// encoding the name...
// IF NOT EXISTS doesn't apply - we want this to bounce if the table exists
const create_stmt = try std.fmt.allocPrint(allocator,
\\CREATE TABLE '{s}' (
\\ hashKey TEXT DEFAULT NULL,
\\ rangeKey TEXT DEFAULT NULL,
\\ hashValue BLOB NOT NULL,
\\ rangeValue BLOB NOT NULL,
\\ itemSize INTEGER DEFAULT 0,
\\ ObjectJSON BLOB NOT NULL,
\\ PRIMARY KEY(hashKey, rangeKey)
\\)
, .{table_name});
defer allocator.free(create_stmt);
// db.exec requires a comptime statement. execDynamic does not
db.execDynamic(
create_stmt,
.{ .diags = &diags },
.{},
) catch |e| {
std.log.debug("SqlLite Diags: {}", .{diags});
return e;
};
const create_index_stmt = try std.fmt.allocPrint(
allocator,
"CREATE INDEX \"{s}*HVI\" ON \"{s}\" (hashValue)",
.{ table_name, table_name },
);
defer allocator.free(create_index_stmt);
try db.execDynamic(create_index_stmt, .{}, .{});
var al = std.ArrayList(u8).init(allocator);
var response_writer = al.writer();
try response_writer.print("table created for account {s}\n", .{account_id});
return al.toOwnedSlice();
}
fn insertIntoDm(
db: *sqlite.Db,
table_name: []const u8,
table_info: []const u8,
read_capacity_units: i64,
write_capacity_units: i64,
billing_mode_pay_per_request: bool,
) !void {
// const current_time = std.time.nanotimestamp();
const current_time = std.time.microTimestamp(); // SQLlite integers are only 64bit max
try db.exec(
\\INSERT INTO dm(
\\ TableName,
\\ CreationDateTime,
\\ LastDecreaseDate,
\\ LastIncreaseDate,
\\ NumberOfDecreasesToday,
\\ ReadCapacityUnits,
\\ WriteCapacityUnits,
\\ TableInfo,
\\ BillingMode,
\\ PayPerRequestDateTime
\\ ) VALUES (
\\ $tablename{[]const u8},
\\ $createdate{i64},
\\ $lastdecreasedate{usize},
\\ $lastincreasedate{usize},
\\ $numberofdecreasestoday{usize},
\\ $readcapacityunits{i64},
\\ $writecapacityunits{i64},
\\ $tableinfo{[]const u8},
\\ $billingmode{usize},
\\ $payperrequestdatetime{usize}
\\ )
, .{}, .{
table_name,
current_time,
@as(usize, 0),
@as(usize, 0),
@as(usize, 0),
read_capacity_units,
write_capacity_units,
table_info,
if (billing_mode_pay_per_request) @as(usize, 1) else @as(usize, 0),
@as(usize, 0),
});
}
/// Gets the database for this account. If under test, a memory database is used
/// instead. Will initialize the database with appropriate metadata tables
fn dbForAccount(allocator: std.mem.Allocator, account_id: []const u8) !sqlite.Db {
// TODO: Need to move this function somewhere central
// TODO: Need configuration for what directory to use
// TODO: File names should align to account ids
// TODO: Should this be a pool, and if so, how would we know when to close?
const file_without_path = try std.fmt.allocPrint(allocator, "ddb-{s}.db", .{account_id});
const file_without_path = try std.fmt.allocPrint(allocator, "ddb-{s}.sqlite3", .{account_id});
defer allocator.free(file_without_path);
const db_file_name = try std.fs.path.join(allocator, &[_][]const u8{ data_dir, file_without_path });
const db_file_name = try std.fs.path.joinZ(allocator, &[_][]const u8{ data_dir, file_without_path });
defer allocator.free(db_file_name);
const mode = if (@import("builtin").is_test) sqlite.Db.Mode.Memory else sqlite.Db.Mode{ .File = "donotuse.db" };
const exists = std.fs.cwd().statFile(file_without_path) catch null;
const mode = if (@import("builtin").is_test) sqlite.Db.Mode.Memory else sqlite.Db.Mode{ .File = db_file_name };
const new = mode == .Memory or (std.fs.cwd().statFile(file_without_path) catch null == null);
var db = try sqlite.Db.init(.{
.mode = mode,
.open_flags = .{
.write = true,
.create = exists == null,
.create = new,
},
.threading_mode = .MultiThread,
});
// TODO: Create metadata table by account on first create
// DDB minimum table name length is 3. DDB local creates this table with metadata
// This of course is only if the database is first run
// try db.exec(
// \\CREATE TABLE dm (
// \\ TableName TEXT,
// \\ CreationDateTime INTEGER,
// \\ LastDecreaseDate INTEGER,
// \\ LastIncreaseDate INTEGER,
// \\ NumberOfDecreasesToday INTEGER,
// \\ ReadCapacityUnits INTEGER,
// \\ WriteCapacityUnits INTEGER,
// \\ TableInfo BLOB,
// \\ BillingMode INTEGER DEFAULT 0,
// \\ PayPerRequestDateTime INTEGER DEFAULT 0,
// \\ PRIMARY KEY(TableName)
// );
//
// Tableinfo for Music collection example becomes:
//
// {"Attributes":[{"AttributeName":"Artist","AttributeType":"S"},{"AttributeName":"SongTitle","AttributeType":"S"}],"GSIList":[],"GSIDescList":[],"SQLiteIndex":{"":[{"DynamoDBAttribute":{"AttributeName":"Artist","AttributeType":"S"},"KeyType":"HASH","SQLiteColumnName":"hashKey","SQLiteDataType":"TEXT"},{"DynamoDBAttribute":{"AttributeName":"SongTitle","AttributeType":"S"},"KeyType":"RANGE","SQLiteColumnName":"rangeKey","SQLiteDataType":"TEXT"}]},"UniqueIndexes":[{"DynamoDBAttribute":{"AttributeName":"Artist","AttributeType":"S"},"KeyType":"HASH","SQLiteColumnName":"hashKey","SQLiteDataType":"TEXT"},{"DynamoDBAttribute":{"AttributeName":"SongTitle","AttributeType":"S"},"KeyType":"RANGE","SQLiteColumnName":"rangeKey","SQLiteDataType":"TEXT"}],"UniqueGSIIndexes":[]}
try db.exec("CREATE TABLE user(id integer primary key, age integer, name text)", .{}, .{});
var al = std.ArrayList(u8).init(allocator);
var writer = al.writer();
try writer.print("table created for account {s}\n", .{account_id});
return al.toOwnedSlice();
// This is what the music collection sample creates
// CREATE TABLE IF NOT EXISTS "MusicCollection" (hashKey TEXT DEFAULT NULL, rangeKey TEXT DEFAULT NULL, hashValue BLOB NOT NULL, rangeValue BLOB NOT NULL, itemSize INTEGER DEFAULT 0, ObjectJSON BLOB NOT NULL, PRIMARY KEY(hashKey, rangeKey));
// CREATE INDEX "MusicCollection*HVI" ON "MusicCollection" (hashValue);
if (new)
try db.exec(
\\CREATE TABLE dm (
\\ TableName TEXT,
\\ CreationDateTime INTEGER,
\\ LastDecreaseDate INTEGER,
\\ LastIncreaseDate INTEGER,
\\ NumberOfDecreasesToday INTEGER,
\\ ReadCapacityUnits INTEGER,
\\ WriteCapacityUnits INTEGER,
\\ TableInfo BLOB,
\\ BillingMode INTEGER DEFAULT 0,
\\ PayPerRequestDateTime INTEGER DEFAULT 0,
\\ PRIMARY KEY(TableName))
, .{}, .{});
return db;
}
fn parseRequest(
request: *AuthenticatedRequest,
parsed: std.json.Parsed(std.json.Value),
writer: anytype,
) !Params {
var param_iterator = parsed.value.object.iterator();
var required: @typeInfo(RequiredFields).Enum.tag_type = 0;
var request_params = Params{
.table_name = undefined,
.table_info = .{
.attribute_definitions = undefined,
.table_key = undefined,
},
};
// This is a new table, so we will generate a random key for table data
// In this way, key rotation can happen on the account without needing
// re-encryption of the table data. Table info will be encrypted with the
// account root key, and all data in 'dm' as well as table names will
// need to be updated when that key is rotated.
encryption.randomEncodedKey(&request_params.table_info.table_key);
// Request:
//
// "AttributeDefinitions": [
// {
// "AttributeName": "Artist",
// "AttributeType": "S"
// },
// {
// "AttributeName": "SongTitle",
// "AttributeType": "S"
// }
// ],
// "TableName": "dm",
// "KeySchema": [
// {
// "AttributeName": "Artist",
// "KeyType": "HASH"
// },
// {
// "AttributeName": "SongTitle",
// "KeyType": "RANGE"
// }
// ],
// "ProvisionedThroughput": {
// "ReadCapacityUnits": 5,
// "WriteCapacityUnits": 5
// },
// "Tags": [
// {
// "Key": "Owner",
// "Value": "blueTeam"
// }
// ]
// }
var attribute_definitions_assigned = false;
errdefer {
if (attribute_definitions_assigned) {
for (request_params.table_info.attribute_definitions) |d| {
request.allocator.free(d.*.name);
request.allocator.destroy(d);
}
request.allocator.free(request_params.table_info.attribute_definitions);
}
}
while (param_iterator.next()) |p| {
const key = p.key_ptr.*;
const val = p.value_ptr.*;
if (std.mem.eql(u8, key, "TableName")) {
if (val.string.len < 3 or val.string.len > 255) {
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"TableName must be at least 3 characters long and at most 255 characters long",
);
}
required |= @intFromEnum(RequiredFields.TableName);
request_params.table_name = val.string;
continue;
}
if (std.mem.eql(u8, key, "BillingMode")) {
if (val != .string)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"KeySchema must be an array",
);
if (!std.mem.eql(u8, val.string, "PROVISIONED") and
!std.mem.eql(u8, val.string, "PAY_PER_REQUEST"))
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"BillingMode must be PROVISIONED or PAY_PER_REQUEST)",
);
if (std.mem.eql(u8, val.string, "PAY_PER_REQUEST"))
request_params.billing_mode_pay_per_request = true;
}
if (std.mem.eql(u8, key, "KeySchema")) {
required |= @intFromEnum(RequiredFields.KeySchema);
if (val != .array)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"KeySchema must be an array",
);
if (val.array.items.len == 0)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"KeySchema array cannot be empty",
);
continue;
}
if (std.mem.eql(u8, key, "ProvisionedThroughput")) {
if (val != .object) {
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"ProvisionedThroughput must be an object",
);
}
if (val.object.get("ReadCapacityUnits")) |v| {
if (v != .integer or v.integer < 1) {
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"ReadCapacityUnits must be a positive number",
);
}
request_params.read_capacity_units = v.integer;
}
if (val.object.get("WriteCapacityUnits")) |v| {
if (v != .integer or v.integer < 1) {
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"ReadCapacityUnits must be a positive number",
);
}
request_params.write_capacity_units = v.integer;
}
continue;
}
if (std.mem.eql(u8, key, "AttributeDefinitions")) {
required |= @intFromEnum(RequiredFields.AttributeDefinitions);
if (val != .array)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"AttributeDefinitions must be an array",
);
if (val.array.items.len == 0)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"AttributeDefinitions array cannot be empty",
);
request_params.table_info.attribute_definitions = try parseAttributeDefinitions(request, val.array.items, writer);
attribute_definitions_assigned = true;
continue;
}
if (std.mem.eql(u8, key, "Tags")) {
continue;
}
if (std.mem.eql(u8, key, "LocalSecondaryIndexes")) {
try writer.print("Parameter '{s}' not implemented", .{key});
request.status = .not_implemented;
return error.NotImplemented;
}
try writer.print("Unrecognized request parameter: {s}", .{key});
request.status = .bad_request;
return error.UnrecognizedRequestParameter;
}
if (required != std.math.maxInt(@typeInfo(RequiredFields).Enum.tag_type)) {
// We are missing one or more required fields
for (std.meta.tags(RequiredFields)) |t| {
if (required & @intFromEnum(t) == 0) {
try writer.print("Missing required request parameter: {s}", .{@tagName(t)});
request.status = .bad_request;
return error.MissingRequiredParameter;
}
}
}
if (!request_params.billing_mode_pay_per_request and
(request_params.read_capacity_units == null or
request_params.write_capacity_units == null))
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"ReadCapacityUnits and WriteCapacityUnits required when BillingMode = 'PAY_PER_REQUEST'",
);
return request_params;
}
fn parseAttributeDefinitions(request: *AuthenticatedRequest, definitions: []std.json.Value, writer: anytype) ![]*AttributeDefinition {
const allocator = request.allocator;
var rc = try allocator.alloc(*AttributeDefinition, definitions.len);
errdefer allocator.free(rc);
// "AttributeDefinitions": [
// {
// "AttributeName": "Artist",
// "AttributeType": "S"
// },
// {
// "AttributeName": "SongTitle",
// "AttributeType": "S"
// }
// ],
for (definitions, 0..) |d, i| {
if (d != .object)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"Attribute definitions array can only consist of objects with AttributeName and AttributeType strings",
);
const name = d.object.get("AttributeName");
const attribute_type = d.object.get("AttributeType");
if (name == null or name.? != .string or attribute_type == null or attribute_type.? != .string)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"Attribute definitions array can only consist of objects with AttributeName and AttributeType strings",
);
const type_string = attribute_type.?.string;
const type_enum = std.meta.stringToEnum(AttributeTypeDescriptor, type_string);
if (type_enum == null)
try returnException(
request,
.bad_request,
error.ValidationException,
writer,
"Attribute type invalid",
); // TODO: This is kind of a lousy error message
// TODO: This can leak memory if a later validation error occurs.
// we are de-facto passed an arena here, but we shouldn't assume that
var definition = try allocator.create(AttributeDefinition);
definition.name = try allocator.dupe(u8, name.?.string);
definition.type = type_enum.?;
rc[i] = definition;
}
return rc;
}
fn returnException(
request: *AuthenticatedRequest,
status: std.http.Status,
err: anyerror,
writer: anytype,
message: []const u8,
) !void {
switch (request.output_format) {
.json => try writer.print(
\\{{"__type":"{s}","message":"{s}"}}
,
.{ @errorName(err), message },
),
.text => try writer.print(
"{s}: {s}\n",
.{ @errorName(err), message },
),
}
request.status = status;
return err;
}
// Full request syntax:
//
// {
// "AttributeDefinitions": [
// {
// "AttributeName": "string",
// "AttributeType": "string"
// }
// ],
// "BillingMode": "string",
// "DeletionProtectionEnabled": boolean,
// "GlobalSecondaryIndexes": [
// {
// "IndexName": "string",
// "KeySchema": [
// {
// "AttributeName": "string",
// "KeyType": "string"
// }
// ],
// "Projection": {
// "NonKeyAttributes": [ "string" ],
// "ProjectionType": "string"
// },
// "ProvisionedThroughput": {
// "ReadCapacityUnits": number,
// "WriteCapacityUnits": number
// }
// }
// ],
// "KeySchema": [
// {
// "AttributeName": "string",
// "KeyType": "string"
// }
// ],
// "LocalSecondaryIndexes": [
// {
// "IndexName": "string",
// "KeySchema": [
// {
// "AttributeName": "string",
// "KeyType": "string"
// }
// ],
// "Projection": {
// "NonKeyAttributes": [ "string" ],
// "ProjectionType": "string"
// }
// }
// ],
// "ProvisionedThroughput": {
// "ReadCapacityUnits": number,
// "WriteCapacityUnits": number
// },
// "SSESpecification": {
// "Enabled": boolean,
// "KMSMasterKeyId": "string",
// "SSEType": "string"
// },
// "StreamSpecification": {
// "StreamEnabled": boolean,
// "StreamViewType": "string"
// },
// "TableClass": "string",
// "TableName": "string",
// "Tags": [
// {
// "Key": "string",
// "Value": "string"
// }
// ]
// }
test "can create a table" {
const allocator = std.testing.allocator;
const request =
var request = AuthenticatedRequest{
.allocator = allocator,
.event_data =
\\ {
\\ "AttributeDefinitions":
\\ [
\\ {"AttributeName": "Artist", "AttributeType": "S"},
\\ {"AttributeName": "SongTitle", "AttributeType": "S"}
\\ ],
\\ "TableName": "MusicCollection",
\\ "KeySchema": [
\\ {"AttributeName": "Artist", "KeyType": "HASH"},
\\ {"AttributeName": "SongTitle", "KeyType": "RANGE"}
\\ ],
\\ "ProvisionedThroughput":
\\ {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
\\ "Tags": [{"Key": "Owner", "Value": "blueTeam"}]
\\ }
,
.account_id = "1234",
.status = .ok,
.reason = null,
.headers = std.http.Headers.init(allocator),
.output_format = .text,
};
const output = try handler(&request, std.io.null_writer);
defer allocator.free(output);
// TODO: test output
}
test "will fail an unrecognized request parameter" {
const allocator = std.testing.allocator;
var request = AuthenticatedRequest{
.allocator = allocator,
.event_data =
\\ {
\\ "Unrecognized":
\\ [
\\ {"AttributeName": "Artist", "AttributeType": "S"},
\\ {"AttributeName": "SongTitle", "AttributeType": "S"}
\\ ],
\\ "TableName": "MusicCollection",
\\ "KeySchema": [
\\ {"AttributeName": "Artist", "KeyType": "HASH"},
\\ {"AttributeName": "SongTitle", "KeyType": "RANGE"}
\\ ],
\\ "ProvisionedThroughput":
\\ {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
\\ "Tags": [{"Key": "Owner", "Value": "blueTeam"}]
\\ }
,
.account_id = "1234",
.status = .ok,
.reason = null,
.headers = std.http.Headers.init(allocator),
.output_format = .text,
};
var al = std.ArrayList(u8).init(allocator);
defer al.deinit();
try std.testing.expectError(error.UnrecognizedRequestParameter, handler(&request, al.writer()));
try std.testing.expectEqual(std.http.Status.bad_request, request.status);
try std.testing.expectEqualStrings("Unrecognized request parameter: Unrecognized", al.items);
}
test "will fail on short table names (json)" {
try failOnShortTableNames(.json);
}
test "will fail on short table names (text)" {
try failOnShortTableNames(.text);
}
fn failOnShortTableNames(format: AuthenticatedRequest.OutputFormat) !void {
const allocator = std.testing.allocator;
var request = AuthenticatedRequest{
.allocator = allocator,
.event_data =
\\ {
\\ "AttributeDefinitions":
\\ [
@ -94,8 +736,28 @@ test "can create a table" {
\\ {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
\\ "Tags": [{"Key": "Owner", "Value": "blueTeam"}]
\\ }
;
const output = try handler(allocator, "1234", request);
defer allocator.free(output);
// TODO: test output
,
.account_id = "1234",
.status = .ok,
.reason = null,
.headers = std.http.Headers.init(allocator),
.output_format = format,
};
var al = std.ArrayList(u8).init(allocator);
defer al.deinit();
try std.testing.expectError(error.ValidationException, handler(&request, al.writer()));
try std.testing.expectEqual(std.http.Status.bad_request, request.status);
switch (format) {
.json => try std.testing.expectEqualStrings(
// This is the actual message. Also, what should we do about content type here?
// and what about running from console? and...and...
//\\{"__type":"com.amazon.coral.validate#ValidationException","message":"TableName must be at least 3 characters long and at most 255 characters long"}
\\{"__type":"ValidationException","message":"TableName must be at least 3 characters long and at most 255 characters long"}
, al.items),
.text => try std.testing.expectEqualStrings(
// This is the actual message. Also, what should we do about content type here?
// and what about running from console? and...and...
//\\{"__type":"com.amazon.coral.validate#ValidationException","message":"TableName must be at least 3 characters long and at most 255 characters long"}
"ValidationException: TableName must be at least 3 characters long and at most 255 characters long\n", al.items),
}
}

179
src/encryption.zig Normal file
View File

@ -0,0 +1,179 @@
const std = @import("std");
const pbkdf2_iterations = 1000000; // https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2
pub const salt_length = 256 / 8; // https://crypto.stackexchange.com/a/56132
pub const encoded_salt_length = std.base64.standard.Encoder.calcSize(salt_length);
pub const key_length = std.crypto.aead.salsa_poly.XSalsa20Poly1305.key_length;
pub const encoded_key_length = std.base64.standard.Encoder.calcSize(key_length);
/// Generates a random salt of appropriate length
pub fn randomSalt(salt: *[salt_length]u8) void {
std.crypto.random.bytes(salt);
}
/// Generates a random salt of appropriate length, encoded into ASCII
pub fn randomEncodedSalt(encoded_salt: *[encoded_salt_length]u8) void {
var salt: [salt_length]u8 = undefined;
randomSalt(salt[0..]);
_ = std.base64.standard.Encoder.encode(encoded_salt, salt[0..]);
}
/// Generates a random key of appropriate length
pub fn randomKey(key: *[key_length]u8) void {
std.crypto.random.bytes(key);
}
/// Generates a random key of appropriate length, encoded into ASCII
pub fn randomEncodedKey(encoded_key: *[encoded_key_length]u8) void {
var key: [key_length]u8 = undefined;
randomKey(key[0..]);
_ = std.base64.standard.Encoder.encode(encoded_key, key[0..]);
}
/// Decodes key from encoded version
pub fn decodeKey(key: *[key_length]u8, encoded_key: [encoded_key_length]u8) !void {
try std.base64.standard.Decoder.decode(key, encoded_key[0..]);
}
// Derives key bytes given a plain text password and salt. It is recommended
// to use randomSalt to generate a salt - for storage, recommend a suitable ASCII encoding
pub fn deriveKey(derived_key: *[key_length]u8, password: []const u8, salt: []const u8) !void {
// Derive key using PBKDF2
try std.crypto.pwhash.pbkdf2(derived_key[0..], password, salt, pbkdf2_iterations, std.crypto.auth.hmac.sha2.HmacSha256);
}
// Derives key bytes given a plain text password and ascii encoded salt.
// Enables encryption with a single line of code, e.g.
// data = try encrypt(allocator, try deriveKeyFromEncodedSalt(password, salt), message);
//
// and decryption with:
//
// message = try decrypt(allocator, try deriveKeyFromEncodedSalt(password, salt) data);
pub fn deriveKeyFromEncodedSalt(derived_key: *[key_length]u8, password: []const u8, encoded_salt: []const u8) ![key_length]u8 {
var salt: [salt_length]u8 = undefined;
try std.base64.standard.Decoder.decode(&salt, encoded_salt);
try deriveKey(derived_key, password, salt[0..]);
return derived_key.*;
}
/// Encrypts data. Use deriveKey function to get a key from password/salt
/// Caller owns memory
pub fn encrypt(allocator: std.mem.Allocator, key: [key_length]u8, plaintext: []const u8) ![]const u8 {
var ciphertext = try allocator.alloc(
u8,
std.crypto.aead.salsa_poly.XSalsa20Poly1305.nonce_length + std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length + plaintext.len,
);
errdefer allocator.free(ciphertext);
// Create the nonce
const nonce_length = std.crypto.aead.salsa_poly.XSalsa20Poly1305.nonce_length;
std.crypto.random.bytes(ciphertext[0..nonce_length]); // add nonce to beginning of our ciphertext
const nonce = ciphertext[0..nonce_length];
const tag = ciphertext[nonce_length .. nonce_length + std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length];
const c = ciphertext[nonce_length + std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length ..];
// Do the encryption
std.crypto.aead.salsa_poly.XSalsa20Poly1305.encrypt(
c,
tag,
plaintext,
"ad",
nonce.*,
key,
);
return ciphertext;
}
/// Encrypts data. Use deriveKey function to get a key from password/salt
/// Caller owns memory
pub fn encryptAndEncode(allocator: std.mem.Allocator, key: [key_length]u8, plaintext: []const u8) ![]const u8 {
const ciphertext = try encrypt(allocator, key, plaintext);
defer allocator.free(ciphertext);
const Encoder = std.base64.standard.Encoder;
var encoded_ciphertext = try allocator.alloc(u8, Encoder.calcSize(ciphertext.len));
errdefer allocator.free(encoded_ciphertext);
return Encoder.encode(encoded_ciphertext, ciphertext);
}
/// Decrypts data. Use deriveKey function to get a key from password/salt
pub fn decrypt(allocator: std.mem.Allocator, key: [key_length]u8, ciphertext: []const u8) ![]const u8 {
var plaintext = try allocator.alloc(
u8,
ciphertext.len - std.crypto.aead.salsa_poly.XSalsa20Poly1305.nonce_length - std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length,
);
errdefer allocator.free(plaintext);
const nonce_length = std.crypto.aead.salsa_poly.XSalsa20Poly1305.nonce_length;
const nonce = ciphertext[0..nonce_length].*;
const tag = ciphertext[nonce_length .. nonce_length + std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length].*;
const c = ciphertext[nonce_length + std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length ..];
try std.crypto.aead.salsa_poly.XSalsa20Poly1305.decrypt(
plaintext,
c,
tag,
"ad",
nonce,
key,
);
return plaintext;
}
// This is a pretty long running test...
// test "can encrypt and decrypt data with simpler api" {
// const allocator = std.testing.allocator;
// const plaintext = "Hello, Zig!";
// const password = "mySecurePassword";
// var key: [key_length]u8 = undefined;
// var salt: [encoded_salt_length]u8 = undefined;
// randomEncodedSalt(salt[0..]);
//
// const ciphertext = try encrypt(allocator, try deriveKeyFromEncodedSalt(&key, password, salt[0..]), plaintext);
// defer allocator.free(ciphertext);
// std.log.debug("Ciphertext: {s}\n", .{std.fmt.fmtSliceHexLower(ciphertext)});
// const decrypted_text = try decrypt(allocator, key, ciphertext);
// defer allocator.free(decrypted_text);
// try std.testing.expectEqualStrings(plaintext, decrypted_text[0..]);
// }
test "can encrypt and decrypt data with simpler api but without KDF" {
const allocator = std.testing.allocator;
const plaintext = "Hello, Zig!";
var key: [key_length]u8 = undefined;
var encoded_key: [encoded_key_length]u8 = undefined;
randomEncodedKey(encoded_key[0..]);
// std.testing.log_level = .debug;
std.log.debug("Encoded key: {s}", .{encoded_key});
try decodeKey(&key, encoded_key);
const ciphertext = try encrypt(allocator, key, plaintext);
defer allocator.free(ciphertext);
std.log.debug("Ciphertext: {s}\n", .{std.fmt.fmtSliceHexLower(ciphertext)});
const decrypted_text = try decrypt(allocator, key, ciphertext);
defer allocator.free(decrypted_text);
try std.testing.expectEqualStrings(plaintext, decrypted_text[0..]);
}
// test "can encrypt and decrypt data" {
// var tag: [std.crypto.aead.salsa_poly.XSalsa20Poly1305.tag_length]u8 = undefined;
// const password = "mySecurePassword";
//
// var salt: [salt_length]u8 = undefined;
// randomSalt(salt[0..]);
//
// // Derive key using PBKDF2
// var derived_key: [std.crypto.aead.salsa_poly.XSalsa20Poly1305.key_length]u8 = undefined;
// try std.crypto.pwhash.pbkdf2(&derived_key, password, &salt, pbkdf2_iterations, std.crypto.auth.hmac.sha2.HmacSha256);
//
// var nonce: [std.crypto.aead.salsa_poly.XSalsa20Poly1305.nonce_length]u8 = undefined;
// std.crypto.random.bytes(&nonce);
//
// const plaintext = "Hello, Zig!";
// var ciphertext = [_]u8{0} ** plaintext.len;
// std.crypto.aead.salsa_poly.XSalsa20Poly1305.encrypt(ciphertext[0..], &tag, plaintext, "", nonce, derived_key);
// var decrypted_text = [_]u8{0} ** ciphertext.len;
// try std.crypto.aead.salsa_poly.XSalsa20Poly1305.decrypt(&decrypted_text, ciphertext[0..], tag, "", nonce, derived_key);
//
// std.log.debug("Ciphertext: {s}\n", .{std.fmt.fmtSliceHexLower(&ciphertext)});
//
// try std.testing.expectEqualStrings(plaintext, decrypted_text[0..]);
// }

View File

@ -1,7 +1,11 @@
const std = @import("std");
const universal_lambda = @import("universal_lambda_handler");
const helper = @import("universal_lambda_helpers");
const universal_lambda_interface = @import("universal_lambda_interface");
const universal_lambda_options = @import("universal_lambda_build_options");
const signing = @import("aws-signing");
const AuthenticatedRequest = @import("AuthenticatedRequest.zig");
const log = std.log.scoped(.dynamodb);
pub const std_options = struct {
pub const log_scope_levels = &[_]std.log.ScopeLevel{.{ .scope = .aws_signing, .level = .info }};
@ -11,42 +15,107 @@ pub fn main() !u8 {
return try universal_lambda.run(null, handler);
}
var test_credential: signing.Credentials = undefined;
pub fn handler(allocator: std.mem.Allocator, event_data: []const u8, context: universal_lambda.Context) ![]const u8 {
pub fn handler(allocator: std.mem.Allocator, event_data: []const u8, context: universal_lambda_interface.Context) ![]const u8 {
const access_key = try allocator.dupe(u8, "ACCESS");
const secret_key = try allocator.dupe(u8, "SECRET");
test_credential = signing.Credentials.init(allocator, access_key, secret_key, null);
defer test_credential.deinit();
var headers = try helper.allHeaders(allocator, context);
defer headers.deinit();
var fis = std.io.fixedBufferStream(event_data);
var request = signing.UnverifiedRequest{
.method = std.http.Method.PUT,
.target = try helper.findTarget(allocator, context),
.headers = headers.http_headers.*,
};
const auth_bypass =
@import("builtin").mode == .Debug and try std.process.hasEnvVar(allocator, "DEBUG_AUTHN_BYPASS");
const is_authenticated = auth_bypass or
try signing.verify(allocator, request, fis.reader(), getCreds);
// Universal lambda should check these and convert them to http
if (!is_authenticated) return error.Unauthenticated;
try authenticateUser(allocator, context, context.request.target, context.request.headers, fis.reader());
try setContentType(&context.headers, "application/x-amz-json-1.0", false);
// https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#API_CreateTable_Examples
// Operation is in X-Amz-Target
// event_data is json
// X-Amz-Target: DynamoDB_20120810.CreateTable
const target_value = headers.http_headers.getFirstValue("X-Amz-Target").?;
const operation = target_value[std.mem.lastIndexOf(u8, target_value, ".").? + 1 ..];
const account_id = try accountId(allocator, headers.http_headers.*);
const target_value_or_null = context.request.headers.getFirstValue("X-Amz-Target");
const target_value = if (target_value_or_null) |t| t else {
context.status = .bad_request;
context.reason = "Missing X-Amz-Target header";
return error.XAmzTargetHeaderMissing;
};
const operation_or_null = std.mem.lastIndexOf(u8, target_value, ".");
const operation = if (operation_or_null) |o| target_value[o + 1 ..] else {
context.status = .bad_request;
context.reason = "Missing operation in X-Amz-Target";
return error.XAmzTargetHeaderMalformed;
};
var authenticated_request = AuthenticatedRequest{
.allocator = allocator,
.event_data = event_data,
.account_id = try accountId(allocator, context.request.headers),
.status = context.status,
.reason = context.reason,
.headers = context.request.headers,
.output_format = switch (universal_lambda_options.build_type) {
// This may seem to be dumb, but we want to be cognizant of
// any new platforms and explicitly consider them
.awslambda, .standalone_server, .cloudflare, .flexilib => .json,
.exe_run => .text,
},
};
const writer = context.writer();
if (std.ascii.eqlIgnoreCase("CreateTable", operation))
return @import("createtable.zig").handler(allocator, account_id, event_data);
try std.io.getStdErr().writer().print("Operation '{s}' unsupported\n", .{operation});
return executeOperation(&authenticated_request, context, writer, @import("createtable.zig").handler);
try writer.print("Operation '{s}' unsupported\n", .{operation});
context.status = .bad_request;
return error.OperationUnsupported;
}
fn setContentType(headers: *std.http.Headers, content_type: []const u8, overwrite: bool) !void {
if (headers.contains("content-type")) {
if (!overwrite) return;
_ = headers.delete("content-type");
}
try headers.append("Content-Type", content_type);
}
fn executeOperation(
request: *AuthenticatedRequest,
context: universal_lambda_interface.Context,
writer: anytype,
operation: fn (*AuthenticatedRequest, anytype) anyerror![]const u8,
) ![]const u8 {
return operation(request, writer) catch |err| {
context.status = request.status;
context.reason = request.reason;
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
return err;
};
}
fn authenticateUser(allocator: std.mem.Allocator, context: universal_lambda_interface.Context, target: []const u8, headers: std.http.Headers, body_reader: anytype) !void {
var request = signing.UnverifiedRequest{
.method = std.http.Method.PUT,
.target = target,
.headers = headers,
};
const auth_bypass =
@import("builtin").mode == .Debug and try std.process.hasEnvVar(allocator, "DEBUG_AUTHN_BYPASS");
const is_authenticated = auth_bypass or
signing.verify(allocator, request, body_reader, getCreds) catch |err| {
if (std.mem.eql(u8, "AuthorizationHeaderMissing", @errorName(err))) {
context.status = .unauthorized;
return error.Unauthenticated;
}
log.err("Caught error on signature verifcation: {any}", .{err});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
context.status = .unauthorized;
return error.Unauthenticated;
};
// Universal lambda should check these and convert them to http
if (!is_authenticated) {
context.status = .unauthorized;
return error.Unauthenticated;
}
}
// TODO: Get hook these functions up to IAM for great good
var test_credential: signing.Credentials = undefined;
fn getCreds(access: []const u8) ?signing.Credentials {
if (std.mem.eql(u8, access, "ACCESS")) return test_credential;
return null;