Compare commits

...

10 Commits

7 changed files with 346 additions and 67 deletions

View File

@ -0,0 +1,62 @@
name: AWS-Zig Build
run-name: ${{ github.actor }} building ddblocal
on:
push:
branches:
- '*'
- '!zig-develop*'
env:
ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
jobs:
build-zig-0.11.0-amd64-host:
runs-on: ubuntu-latest
# Need to use the default container with node and all that, so we can
# use JS-based actions like actions/checkout@v3...
# container:
# image: alpine:3.15.0
env:
ZIG_VERSION: 0.11.0
ARCH: x86_64
steps:
- name: Check out repository code
uses: actions/checkout@v3
# ARCH is fine, but we can't substitute directly because zig
# uses x86_64 instead of amd64. They also use aarch64 instead of arm64.
#
# However, arm64/linux isn't quite fully tier 1 yet, so this is more of a
# TODO: https://github.com/ziglang/zig/issues/2443
- name: Install zig
run: |
wget -q https://ziglang.org/download/${ZIG_VERSION}/zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
tar x -C /usr/local -f zig-linux-${ARCH}-${ZIG_VERSION}.tar.xz
ln -s /usr/local/zig-linux-${ARCH}-${ZIG_VERSION}/zig /usr/local/bin/zig
- name: Run tests
run: zig build test --verbose
- name: Build example
run: ( cd example && zig build ) # Make sure example builds
# - name: Sign
# id: sign
# uses: https://git.lerch.org/lobo/action-hsm-sign@v1
# with:
# pin: ${{ secrets.HSM_USER_PIN }}
# files: ???
# public_key: 'https://emil.lerch.org/serverpublic.pem'
# - run: |
# echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
# - run: |
# echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
# - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
# - run: |
# echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
# - run: |
# echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
# - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
- name: Notify
uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
if: always()
with:
host: ${{ secrets.NTFY_HOST }}
topic: ${{ secrets.NTFY_TOPIC }}
user: ${{ secrets.NTFY_USER }}
password: ${{ secrets.NTFY_PASSWORD }}

4
.gitignore vendored
View File

@ -1,2 +1,6 @@
core
zig-*/
access_keys.csv
*.sqlite3
shared-local-instance.db
*.json

View File

@ -1,8 +1,8 @@
DDB Local
=========
This project presents itself as [Amazon DynamoDB](https://aws.amazon.com/dynamodb/),
but uses Sqlite for data storage
This project presents itself as [Amazon
DynamoDB](https://aws.amazon.com/dynamodb/), but uses Sqlite for data storage
only supports a handful of operations, and even then not with full fidelity:
* CreateTable
@ -11,34 +11,10 @@ only supports a handful of operations, and even then not with full fidelity:
UpdateItem, PutItem and GetItem should be trivial to implement. Project name
mostly mirrors [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html),
but doesn't have the overhead of a full Java VM, etc. On small data sets, this static executable
but doesn't have the overhead of a full Java VM, etc. On small data sets, this
executable will use <10MB of resident memory.
^^^ TODO: New measurement
Running as Docker
-----------------
TODO/Not accurate
Latest version can be found at [https://r.lerch.org/repo/ddbbolt/tags/](https://r.lerch.org/repo/ddbbolt/tags/).
Versions are tagged with the short hash of the git commit, and are
built as a multi-architecture image based on a scratch image.
You can run the docker image with a command like:
```sh
docker run \
--volume=$(pwd)/ddbbolt:/data \
-e FILE=/data/ddb.db \
-e PORT=8080 \
-p 8080:8080 \
-d \
--name=ddbbolt \
--restart=unless-stopped \
r.lerch.org/ddbbolt:f501abe
```
Security
--------
@ -46,16 +22,24 @@ This uses typical IAM authentication, but does not have authorization
implemented yet. This provides a chicken and egg problem, because we need a
data store for access keys/secret keys, which would be great to have in...DDB.
Therefore, DDB is designed to adhere to the following algorithm:
1. Check if this is a test account (used for `zig build test`). This uses hard-coded creds.
2. Check if the account information is in `access_keys.csv`. This file is loaded at startup
and contains the root credentials and keys necessary for bootstrap. Future plans
are to enable encryption of this file and decryption using an HSM, as it is critical
to everything.
3. Call various services (primarily STS and IAM) if credentials do not exist in #1/#2.
As such, we effectively need a control plane instance on DDB, with appropriate
access keys/secret keys stored somewhere other than DDB. Therefore, the following
environment variables are planned:
* IAM_ACCOUNT_ID
* IAM_ACCESS_KEY
* IAM_SECRET_KEY
* IAM_SECRET_FILE: File that will contain the above three values, allowing for cred rotation
* STS_SERVICE_ENDPOINT
* IAM_SERVICE_ENDPOINT
* STS_SERVICE_ENDPOINT (tbd - may not be named this)
* IAM_SERVICE_ENDPOINT (tbd - may not be named this)
Secret file, thought here is that we can open/read file only if authentication succeeds, but access key
does not match the ADMIN_ACCESS_KEY. This is a bit of a timing oracle, but not sure we care that much

21
access_keys_sample.csv Normal file
View File

@ -0,0 +1,21 @@
# This file should be used sparingly. We should probably have a mechanism
# built in to decrypt it with PKCS#11 or something, because it's the keys
# to everything (this file -> database metadata -> table level keys).
#
# Normal operation is to allow LocalDb to get Account ID from LocalIAM
# (actually I think this is LocalSTS) with Account ID in hand, LocalDb will
# retrieve (or generate/retrieve) the encryption key from LocalDb
#
# This is, of course, a recursive operation, so at some point we need the base
# case, which is a key that's not stored in the database (or the source code)
#
# That...is the purpose of this file. The existing/new is intended to enable
# key rotations. This system uses envelope encryption, so table data is
# encrypted with a key that is stored in the ddbLocal metadata table. On
# request for rotation, the table metadata will use the existing encoded
# encryption key to read, then re-encrypt the metadata with the new encoded
# encryption key. Key rotation, however, remains a TODO in the code
#
# To generate a line for this file, use `zig build generate_credentials`
#
# Access Key,Secret key,Account Id,Existing encoded encryption key, New encoded encryption key
1 # This file should be used sparingly. We should probably have a mechanism
2 # built in to decrypt it with PKCS#11 or something, because it's the keys
3 # to everything (this file -> database metadata -> table level keys).
4 #
5 # Normal operation is to allow LocalDb to get Account ID from LocalIAM
6 # (actually I think this is LocalSTS) with Account ID in hand, LocalDb will
7 # retrieve (or generate/retrieve) the encryption key from LocalDb
8 #
9 # This is, of course, a recursive operation, so at some point we need the base
10 # case, which is a key that's not stored in the database (or the source code)
11 #
12 # That...is the purpose of this file. The existing/new is intended to enable
13 # key rotations. This system uses envelope encryption, so table data is
14 # encrypted with a key that is stored in the ddbLocal metadata table. On
15 # request for rotation, the table metadata will use the existing encoded
16 # encryption key to read, then re-encrypt the metadata with the new encoded
17 # encryption key. Key rotation, however, remains a TODO in the code
18 #
19 # To generate a line for this file, use `zig build generate_credentials`
20 #
21 # Access Key,Secret key,Account Id,Existing encoded encryption key, New encoded encryption key

View File

@ -1,6 +1,47 @@
const std = @import("std");
const universal_lambda = @import("universal_lambda_build");
const test_targets = [_]std.zig.CrossTarget{
.{}, // native
// We seem to have compile erros with the rest, all due to sqlite
// I believe either zig+c files or zig-sqlite is not super cross-target friendly
// .{
// .cpu_arch = .x86_64,
// .os_tag = .linux,
// },
// .{
// .cpu_arch = .aarch64,
// .os_tag = .linux,
// },
// .{
// .cpu_arch = .riscv64,
// .os_tag = .linux,
// },
// .{
// .cpu_arch = .arm,
// .os_tag = .linux,
// },
// .{
// .cpu_arch = .x86_64,
// .os_tag = .windows,
// },
// .{
// .cpu_arch = .aarch64,
// .os_tag = .macos,
// },
// .{
// .cpu_arch = .x86_64,
// .os_tag = .macos,
// },
// Since we are using sqlite, we cannot use wasm32/wasi at this time. Even
// with compile errors above, I do not believe wasi will be easily supported
// .{
// .cpu_arch = .wasm32,
// .os_tag = .wasi,
// },
};
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
@ -53,23 +94,6 @@ pub fn build(b: *std.Build) !void {
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const unit_tests = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
_ = try universal_lambda.addModules(b, unit_tests);
const run_unit_tests = b.addRunArtifact(unit_tests);
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request
// running the unit tests.
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_unit_tests.step);
try universal_lambda.configureBuild(b, exe);
const aws_dep = b.dependency("aws", .{
@ -83,12 +107,33 @@ pub fn build(b: *std.Build) !void {
.use_bundled = true,
});
const sqlite_module = sqlite_dep.module("sqlite");
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request
// running the unit tests.
const test_step = b.step("test", "Run unit tests");
for (test_targets) |t| {
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const unit_tests = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
.target = t,
.optimize = optimize,
});
_ = try universal_lambda.addModules(b, unit_tests);
const run_unit_tests = b.addRunArtifact(unit_tests);
// run_unit_tests.skip_foreign_checks = true;
test_step.dependOn(&run_unit_tests.step);
for (&[_]*std.Build.Step.Compile{ exe, unit_tests }) |cs| {
cs.addModule("aws-signing", aws_signing_module);
cs.addModule("sqlite", sqlite_module);
cs.addIncludePath(.{ .path = "c" });
cs.linkLibrary(sqlite_dep.artifact("sqlite"));
}
}
var creds_step = b.step("generate_credentials", "Generate credentials for access_keys.csv");
creds_step.makeFn = generateCredentials;

View File

@ -10,14 +10,10 @@ const Self = @This();
allocator: std.mem.Allocator,
root_account_key: *[encryption.key_length]u8,
pub var root_key_mapping: ?std.StringHashMap([]const u8) = null;
pub fn accountForId(allocator: std.mem.Allocator, account_id: []const u8) !Self {
// TODO: Allow environment variables to house encoded keys. If not in the
// environment, check with LocalDB table to get it. We're
// building LocalDB, though, so we need that working first...
if (!std.mem.eql(u8, account_id, "1234")) {
log.err("Got account id '{s}', but only '1234' is valid right now", .{account_id});
return error.NotImplemented;
}
if (std.mem.eql(u8, account_id, "1234")) {
var key = try allocator.alloc(u8, encryption.key_length);
errdefer allocator.free(key);
try encryption.decodeKey(key[0..encryption.key_length], test_account_key.*);
@ -27,6 +23,24 @@ pub fn accountForId(allocator: std.mem.Allocator, account_id: []const u8) !Self
};
}
// Check our root mappings (populated elsewhere)
if (root_key_mapping) |m| {
if (m.get(account_id)) |k| {
var key = try allocator.alloc(u8, encryption.key_length);
errdefer allocator.free(key);
try encryption.decodeKey(key[0..encryption.key_length], @constCast(k[0..encryption.encoded_key_length]).*);
return Self{
.allocator = allocator,
.root_account_key = key[0..encryption.key_length],
};
}
}
// TODO: Check STS
log.err("Got account id '{s}', but could not find this ('1234' is test account). STS GetAccessKeyInfo not implemented", .{account_id});
return error.NotImplemented;
}
pub fn deinit(self: Self) void {
std.crypto.utils.secureZero(u8, self.root_account_key);
self.allocator.free(self.root_account_key);

View File

@ -4,6 +4,7 @@ const universal_lambda_interface = @import("universal_lambda_interface");
const universal_lambda_options = @import("universal_lambda_build_options");
const signing = @import("aws-signing");
const AuthenticatedRequest = @import("AuthenticatedRequest.zig");
const Account = @import("Account.zig");
const log = std.log.scoped(.dynamodb);
@ -12,10 +13,35 @@ pub const std_options = struct {
};
pub fn main() !u8 {
var fb_allocator = std.heap.FixedBufferAllocator.init(&creds_buf);
const allocator = fb_allocator.allocator();
fillRootCreds(allocator) catch |e| {
log.err("Error filling root creds. Base authentication will not work until this is fixed: {}", .{e});
return e;
};
return try universal_lambda.run(null, handler);
}
pub fn handler(allocator: std.mem.Allocator, event_data: []const u8, context: universal_lambda_interface.Context) ![]const u8 {
const builtin = @import("builtin");
var rss: std.os.rusage = undefined;
if (builtin.os.tag == .linux and builtin.mode == .Debug)
rss = std.os.getrusage(std.os.rusage.SELF);
defer if (builtin.os.tag == .linux and builtin.mode == .Debug) { // and debug mode) {
const rusage = std.os.getrusage(std.os.rusage.SELF);
log.debug(
"Request complete, max RSS of process: {d}M. Incremental: {d}K, User: {d}μs, System: {d}μs",
.{
@divTrunc(rusage.maxrss, 1024),
rusage.maxrss - rss.maxrss,
(rusage.utime.tv_sec - rss.utime.tv_sec) * std.time.us_per_s +
rusage.utime.tv_usec - rss.utime.tv_usec,
(rusage.stime.tv_sec - rss.stime.tv_sec) * std.time.us_per_s +
rusage.stime.tv_usec - rss.stime.tv_usec,
},
);
};
const access_key = try allocator.dupe(u8, "ACCESS");
const secret_key = try allocator.dupe(u8, "SECRET");
test_credential = signing.Credentials.init(allocator, access_key, secret_key, null);
@ -118,17 +144,140 @@ fn authenticateUser(allocator: std.mem.Allocator, context: universal_lambda_inte
}
}
// TODO: Get hook these functions up to IAM for great good
var test_credential: signing.Credentials = undefined;
var root_creds: std.StringHashMap(signing.Credentials) = undefined;
var root_account_mapping: std.StringHashMap([]const u8) = undefined;
var creds_buf: [8192]u8 = undefined;
fn getCreds(access: []const u8) ?signing.Credentials {
// We have 3 levels of access here
//
// 1. Test creds, used strictly for debugging
// 2. Creds from the root file, ideally used only for bootstrapping
// 3. Creds from STS GetAccessKeyInfo API call, which should be 99%+ of ops
if (std.mem.eql(u8, access, "ACCESS")) return test_credential;
log.debug("Creds for access key {s}: {any}", .{ access, root_creds.get(access) != null });
if (root_creds.get(access)) |c| return c;
log.err("Creds not found in store. STS GetAccessKeyInfo call is not yet implemented", .{});
return null;
}
fn fillRootCreds(allocator: std.mem.Allocator) !void {
root_creds = std.StringHashMap(signing.Credentials).init(allocator);
root_account_mapping = std.StringHashMap([]const u8).init(allocator);
Account.root_key_mapping = std.StringHashMap([]const u8).init(allocator);
var file = std.fs.cwd().openFile("access_keys.csv", .{}) catch |e| {
log.err("Could not open access_keys.csv to access root creds: {}", .{e});
return e;
};
defer file.close();
var buf_reader = std.io.bufferedReader(file.reader());
const reader = buf_reader.reader();
var file_buf: [8192]u8 = undefined; // intentionally kept small here...this should be used sparingly
var file_fb_allocator = std.heap.FixedBufferAllocator.init(&file_buf);
const file_allocator = file_fb_allocator.allocator();
var line = std.ArrayList(u8).init(file_allocator);
defer line.deinit();
const line_writer = line.writer();
var line_num: usize = 1;
while (reader.streamUntilDelimiter(line_writer, '\n', null)) : (line_num += 1) {
defer line.clearRetainingCapacity();
var relevant_line = line.items[0 .. std.mem.indexOfScalar(u8, line.items, '#') orelse line.items.len];
const relevant_line_trimmed = std.mem.trim(u8, relevant_line, " \t");
var value_iterator = std.mem.splitScalar(u8, relevant_line_trimmed, ',');
if (std.mem.trim(u8, value_iterator.peek().?, " \t").len == 0) continue;
var val_num: usize = 0;
var access_key: []const u8 = undefined;
var secret_key: []const u8 = undefined;
var account_id: []const u8 = undefined;
var existing_key: []const u8 = undefined;
var new_key: []const u8 = undefined;
while (value_iterator.next()) |val| : (val_num += 1) {
const actual_val = std.mem.trim(u8, val, " \t");
switch (val_num) {
0 => access_key = actual_val,
1 => secret_key = actual_val,
2 => account_id = actual_val,
3 => existing_key = actual_val,
4 => new_key = actual_val,
else => {
log.err("access_keys.csv Error on line {d}: too many values", .{line_num});
return error.TooManyValues;
},
}
}
if (val_num < 4) {
log.err("access_keys.csv Error on line {d}: too few values", .{line_num});
return error.TooFewValues;
}
const global_access_key = try allocator.dupe(u8, access_key);
try root_creds.put(global_access_key, .{
.access_key = global_access_key, // we need to copy all these into our global buffer
.secret_key = try allocator.dupe(u8, secret_key),
.session_token = null,
.allocator = NullAllocator.init(),
});
const global_account_id = try allocator.dupe(u8, account_id);
try root_account_mapping.put(global_access_key, global_account_id);
try Account.root_key_mapping.?.put(global_account_id, try allocator.dupe(u8, existing_key));
// TODO: key rotation will need another hash map, can be triggered on val_num == 5
} else |e| switch (e) {
error.EndOfStream => {}, // will this work without \n at the end of file?
else => return e,
}
}
const NullAllocator = struct {
const thing = 0;
const vtable = std.mem.Allocator.VTable{
.alloc = alloc,
.resize = resize,
.free = free,
};
fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
_ = ctx;
_ = len;
_ = ptr_align;
_ = ret_addr;
return null;
}
fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool {
_ = ctx;
_ = buf;
_ = buf_align;
_ = new_len;
_ = ret_addr;
return false;
}
fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
_ = ctx;
_ = buf;
_ = buf_align;
_ = ret_addr;
}
pub fn init() std.mem.Allocator {
return .{
.ptr = @ptrFromInt(@intFromPtr(&thing)),
.vtable = &vtable,
};
}
};
fn accountForAccessKey(allocator: std.mem.Allocator, access_key: []const u8) ![]const u8 {
_ = allocator;
log.debug("Finding account for access key: '{s}'", .{access_key});
return "1234";
// Since this happens after authentication, we can assume our root creds store
// is populated
if (root_account_mapping.get(access_key)) |account| return account;
log.err("Creds not found in store. STS GetAccessKeyInfo call is not yet implemented", .{});
return error.NotImplemented;
}
/// Function assumes an authenticated request, so signing.verify must be called
/// and returned true before calling this function. If authentication header