initial commit - much to do
This commit is contained in:
parent
0177f8c838
commit
ae7327713f
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
core
|
||||
zig-*/
|
21
LICENSE
Normal file
21
LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2023 Emil Lerch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
78
README.md
Normal file
78
README.md
Normal file
|
@ -0,0 +1,78 @@
|
|||
DDB Local
|
||||
=========
|
||||
|
||||
This project presents itself as [Amazon DynamoDB](https://aws.amazon.com/dynamodb/),
|
||||
but uses Sqlite for data storage
|
||||
only supports a handful of operations, and even then not with full fidelity:
|
||||
|
||||
* CreateTable
|
||||
* BatchGetItem
|
||||
* BatchWriteItem
|
||||
|
||||
UpdateItem, PutItem and GetItem should be trivial to implement. Project name
|
||||
mostly mirrors [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html),
|
||||
but doesn't have the overhead of a full Java VM, etc. On small data sets, this static executable
|
||||
executable will use <10MB of resident memory.
|
||||
^^^ TODO: New measurement
|
||||
|
||||
Running as Docker
|
||||
-----------------
|
||||
|
||||
TODO/Not accurate
|
||||
|
||||
Latest version can be found at [https://r.lerch.org/repo/ddbbolt/tags/](https://r.lerch.org/repo/ddbbolt/tags/).
|
||||
Versions are tagged with the short hash of the git commit, and are
|
||||
built as a multi-architecture image based on a scratch image.
|
||||
|
||||
You can run the docker image with a command like:
|
||||
|
||||
```sh
|
||||
docker run \
|
||||
--volume=$(pwd)/ddbbolt:/data \
|
||||
-e FILE=/data/ddb.db \
|
||||
-e PORT=8080 \
|
||||
-p 8080:8080 \
|
||||
-d \
|
||||
--name=ddbbolt \
|
||||
--restart=unless-stopped \
|
||||
r.lerch.org/ddbbolt:f501abe
|
||||
```
|
||||
|
||||
|
||||
Security
|
||||
--------
|
||||
|
||||
This uses typical IAM authentication, but does not have authorization
|
||||
implemented yet. This provides a chicken and egg problem, because we need a
|
||||
data store for access keys/secret keys, which would be great to have in...DDB.
|
||||
|
||||
As such, we effectively need a control plane instance on DDB, with appropriate
|
||||
access keys/secret keys stored somewhere other than DDB. Therefore, the following
|
||||
environment variables are planned:
|
||||
|
||||
* IAM_ACCOUNT_ID
|
||||
* IAM_ACCESS_KEY
|
||||
* IAM_SECRET_KEY
|
||||
* IAM_SECRET_FILE: File that will contain the above three values, allowing for cred rotation
|
||||
* STS_SERVICE_ENDPOINT
|
||||
* IAM_SERVICE_ENDPOINT
|
||||
|
||||
Secret file, thought here is that we can open/read file only if authentication succeeds, but access key
|
||||
does not match the ADMIN_ACCESS_KEY. This is a bit of a timing oracle, but not sure we care that much
|
||||
|
||||
Note that IAM does not have public APIs to perform authentication on access keys,
|
||||
nor does it seem to do authorization.
|
||||
|
||||
STS is used to [translate access keys -> account ids](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetAccessKeyInfo.html).
|
||||
|
||||
|
||||
Our plan is to use the aws zig library for authentication, and IAM for authorization,
|
||||
but we'll do that as a bin item.
|
||||
|
||||
High level, we have a DDB bootstrap with IAM account id/access key. Those credentials
|
||||
can then add new, we'll call them "root user" records in the IAM table with
|
||||
their own account id/access keys.
|
||||
|
||||
Those "root users" can then do whatever they want in their own tables, but cannot
|
||||
touch tables to any other account, including the IAM account. IAM account can only
|
||||
touch tables in their own account.
|
82
build.zig
Normal file
82
build.zig
Normal file
|
@ -0,0 +1,82 @@
|
|||
const std = @import("std");
|
||||
const configureUniversalLambdaBuild = @import("universal_lambda_build").configureBuild;
|
||||
|
||||
// Although this function looks imperative, note that its job is to
|
||||
// declaratively construct a build graph that will be executed by an external
|
||||
// runner.
|
||||
pub fn build(b: *std.Build) !void {
|
||||
// Standard target options allows the person running `zig build` to choose
|
||||
// what target to build for. Here we do not override the defaults, which
|
||||
// means any target is allowed, and the default is native. Other options
|
||||
// for restricting supported target set are available.
|
||||
const target = b.standardTargetOptions(.{});
|
||||
|
||||
// Standard optimization options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
|
||||
// set a preferred release mode, allowing the user to decide how to optimize.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "ddblocal",
|
||||
// In this case the main source file is merely a path, however, in more
|
||||
// complicated build scripts, this could be a generated file.
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// This declares intent for the executable to be installed into the
|
||||
// standard location when the user invokes the "install" step (the default
|
||||
// step when running `zig build`).
|
||||
b.installArtifact(exe);
|
||||
|
||||
// This *creates* a Run step in the build graph, to be executed when another
|
||||
// step is evaluated that depends on it. The next line below will establish
|
||||
// such a dependency.
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
|
||||
// By making the run step depend on the install step, it will be run from the
|
||||
// installation directory rather than directly from within the cache directory.
|
||||
// This is not necessary, however, if the application depends on other installed
|
||||
// files, this ensures they will be present and in the expected location.
|
||||
run_cmd.step.dependOn(b.getInstallStep());
|
||||
|
||||
// This allows the user to pass arguments to the application in the build
|
||||
// command itself, like this: `zig build run -- arg1 arg2 etc`
|
||||
if (b.args) |args| {
|
||||
run_cmd.addArgs(args);
|
||||
}
|
||||
|
||||
// This creates a build step. It will be visible in the `zig build --help` menu,
|
||||
// and can be selected like this: `zig build run`
|
||||
// This will evaluate the `run` step rather than the default, which is "install".
|
||||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
// Creates a step for unit testing. This only builds the test executable
|
||||
// but does not run it.
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||
|
||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||
// the `zig build --help` menu, providing a way for the user to request
|
||||
// running the unit tests.
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_unit_tests.step);
|
||||
|
||||
try configureUniversalLambdaBuild(b, exe);
|
||||
|
||||
const aws_dep = b.dependency("aws", .{
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
const aws_signing_module = aws_dep.module("aws-signing");
|
||||
for (&[_]*std.Build.Step.Compile{ exe, unit_tests }) |cs| {
|
||||
cs.addModule("aws-signing", aws_signing_module);
|
||||
}
|
||||
}
|
19
build.zig.zon
Normal file
19
build.zig.zon
Normal file
|
@ -0,0 +1,19 @@
|
|||
.{
|
||||
.name = "ddblocal",
|
||||
.version = "0.0.1",
|
||||
|
||||
.dependencies = .{
|
||||
.aws = .{
|
||||
.url = "https://git.lerch.org/lobo/aws-sdk-for-zig/archive/825d93720a92bcaedb3d00cd04764469fdec0c86.tar.gz",
|
||||
.hash = "122038e86ca453cbb0b4d5534380470eeb0656fdbab9aca2b7d2dc77756ab659204a",
|
||||
},
|
||||
.universal_lambda_build = .{
|
||||
.url = "https://git.lerch.org/lobo/universal-lambda-zig/archive/d8b536651531ee95ceb4fae65ca5f29c5ed6ef29.tar.gz",
|
||||
.hash = "1220de5b5f23fddb794e2e735ee8312b9cd0d1302d5b8e3902f785e904f515506ccf",
|
||||
},
|
||||
.flexilib = .{
|
||||
.url = "https://git.lerch.org/lobo/flexilib/archive/c44ad2ba84df735421bef23a2ad612968fb50f06.tar.gz",
|
||||
.hash = "122051fdfeefdd75653d3dd678c8aa297150c2893f5fad0728e0d953481383690dbc",
|
||||
},
|
||||
},
|
||||
}
|
57
src/main.zig
Normal file
57
src/main.zig
Normal file
|
@ -0,0 +1,57 @@
|
|||
const std = @import("std");
|
||||
const universal_lambda = @import("universal_lambda_handler");
|
||||
const helper = @import("universal_lambda_helpers");
|
||||
const signing = @import("aws-signing");
|
||||
|
||||
pub const std_options = struct {
|
||||
pub const log_scope_levels = &[_]std.log.ScopeLevel{.{ .scope = .aws_signing, .level = .info }};
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
try universal_lambda.run(null, handler);
|
||||
}
|
||||
|
||||
var test_credential: signing.Credentials = undefined;
|
||||
pub fn handler(allocator: std.mem.Allocator, event_data: []const u8, context: universal_lambda.Context) ![]const u8 {
|
||||
const access_key = try allocator.dupe(u8, "ACCESS");
|
||||
const secret_key = try allocator.dupe(u8, "SECRET");
|
||||
test_credential = signing.Credentials.init(allocator, access_key, secret_key, null);
|
||||
defer test_credential.deinit();
|
||||
|
||||
var headers = try helper.allHeaders(allocator, context);
|
||||
defer headers.deinit();
|
||||
var fis = std.io.fixedBufferStream(event_data);
|
||||
var request = signing.UnverifiedRequest{
|
||||
.method = std.http.Method.PUT,
|
||||
.target = try helper.findTarget(allocator, context),
|
||||
.headers = headers.http_headers.*,
|
||||
};
|
||||
|
||||
const auth_bypass =
|
||||
@import("builtin").mode == .Debug and try std.process.hasEnvVar(allocator, "DEBUG_AUTHN_BYPASS");
|
||||
const is_authenticated = auth_bypass or
|
||||
try signing.verify(allocator, request, fis.reader(), getCreds);
|
||||
|
||||
// https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html#API_CreateTable_Examples
|
||||
// Operation is in X-Amz-Target
|
||||
// event_data is json
|
||||
var al = std.ArrayList(u8).init(allocator);
|
||||
var writer = al.writer();
|
||||
try writer.print("Mode: {}\nAuthenticated: {}\nValue for header 'Foo' is: {s}\n", .{
|
||||
@import("builtin").mode,
|
||||
is_authenticated,
|
||||
headers.http_headers.getFirstValue("foo") orelse "undefined",
|
||||
});
|
||||
return al.items;
|
||||
}
|
||||
|
||||
fn getCreds(access: []const u8) ?signing.Credentials {
|
||||
if (std.mem.eql(u8, access, "ACCESS")) return test_credential;
|
||||
return null;
|
||||
}
|
||||
test "simple test" {
|
||||
var list = std.ArrayList(i32).init(std.testing.allocator);
|
||||
defer list.deinit(); // try commenting this out and see if zig detects the memory leak!
|
||||
try list.append(42);
|
||||
try std.testing.expectEqual(@as(i32, 42), list.pop());
|
||||
}
|
Loading…
Reference in New Issue
Block a user