Compare commits
	
		
			32 commits
		
	
	
		
			1e8ed763ce
			...
			c0f29236c8
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| c0f29236c8 | |||
| a85e185ce7 | |||
| 3d27b84e2d | |||
| 46685043e5 | |||
| a05c1dfa10 | |||
| 1fd42fbe84 | |||
| 014f739b09 | |||
| be42b07086 | |||
| 8d6dcc2a13 | |||
| db2a612023 | |||
| e66c82468a | |||
| acba4d7962 | |||
| 3e9fab6ca5 | |||
| b928a5ec21 | |||
| b753c4e441 | |||
| c7544b930b | |||
| 9880aa0ba9 | |||
| 8d36300f27 | |||
| a01c01522c | |||
| 8cc3059744 | |||
| a3967b8652 | |||
| d0dd2507d4 | |||
| 2793e1e17b | |||
| 7505d87bf1 | |||
| 9fd6755684 | |||
| ea93542da8 | |||
| 3bf6adc13e | |||
| 87323ecb71 | |||
| a5b78384f5 | |||
| 042dfad64b | |||
| 5b1a6a6e01 | |||
| eb449eabb0 | 
					 22 changed files with 1928 additions and 1454 deletions
				
			
		|  | @ -17,8 +17,9 @@ steps: | ||||||
|   - tar x -C /usr/local -f zig-linux-x86_64-0.9.0.tar.xz |   - tar x -C /usr/local -f zig-linux-x86_64-0.9.0.tar.xz | ||||||
|   - rm /usr/local/bin/zig |   - rm /usr/local/bin/zig | ||||||
|   - ln -s /usr/local/zig-linux-x86_64-0.9.0/zig /usr/local/bin/zig |   - ln -s /usr/local/zig-linux-x86_64-0.9.0/zig /usr/local/bin/zig | ||||||
|  |   - apk add git | ||||||
|   - (cd codegen && zig build test) |   - (cd codegen && zig build test) | ||||||
|   - zig build # implicitly does a codegen |   - zig build -Dfetch # implicitly does a codegen | ||||||
|   - zig build test |   - zig build test | ||||||
| - name: notify | - name: notify | ||||||
|   image: plugins/matrix |   image: plugins/matrix | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
										
									
									
										vendored
									
									
								
							|  | @ -7,3 +7,5 @@ service_manifest.zig | ||||||
| demo | demo | ||||||
| src/models/ | src/models/ | ||||||
| smithy/zig-out/ | smithy/zig-out/ | ||||||
|  | libs/ | ||||||
|  | src/git_version.zig | ||||||
|  |  | ||||||
							
								
								
									
										20
									
								
								Amazon_Root_CA_1.pem
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								Amazon_Root_CA_1.pem
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,20 @@ | ||||||
|  | -----BEGIN CERTIFICATE----- | ||||||
|  | MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF | ||||||
|  | ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 | ||||||
|  | b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL | ||||||
|  | MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv | ||||||
|  | b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj | ||||||
|  | ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM | ||||||
|  | 9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw | ||||||
|  | IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 | ||||||
|  | VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L | ||||||
|  | 93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm | ||||||
|  | jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC | ||||||
|  | AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA | ||||||
|  | A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI | ||||||
|  | U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs | ||||||
|  | N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv | ||||||
|  | o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU | ||||||
|  | 5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy | ||||||
|  | rqXRfboQnoZsG4q5WTP468SQvvG5 | ||||||
|  | -----END CERTIFICATE----- | ||||||
							
								
								
									
										36
									
								
								CopyStep.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								CopyStep.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,36 @@ | ||||||
|  | const std = @import("std"); | ||||||
|  | const CopyStep = @This(); | ||||||
|  | 
 | ||||||
|  | step: std.build.Step, | ||||||
|  | builder: *std.build.Builder, | ||||||
|  | from_path: []const u8 = null, | ||||||
|  | to_path: []const u8 = null, | ||||||
|  | 
 | ||||||
|  | pub fn create( | ||||||
|  |     b: *std.build.Builder, | ||||||
|  |     from_path_relative: []const u8, | ||||||
|  |     to_path_relative: []const u8, | ||||||
|  | ) *CopyStep { | ||||||
|  |     var result = b.allocator.create(CopyStep) catch @panic("memory"); | ||||||
|  |     result.* = CopyStep{ | ||||||
|  |         .step = std.build.Step.init(.custom, "copy a file", b.allocator, make), | ||||||
|  |         .builder = b, | ||||||
|  |         .from_path = std.fs.path.resolve(b.allocator, &[_][]const u8{ | ||||||
|  |             b.build_root, | ||||||
|  |             from_path_relative, | ||||||
|  |         }) catch @panic("memory"), | ||||||
|  |         .to_path = std.fs.path.resolve(b.allocator, &[_][]const u8{ | ||||||
|  |             b.build_root, | ||||||
|  |             to_path_relative, | ||||||
|  |         }) catch @panic("memory"), | ||||||
|  |     }; | ||||||
|  |     return result; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn make(step: *std.build.Step) !void { | ||||||
|  |     const self = @fieldParentPtr(CopyStep, "step", step); | ||||||
|  |     std.fs.copyFileAbsolute(self.from_path, self.to_path, .{}) catch |e| { | ||||||
|  |         std.log.err("Error copying {s} to {s}: {s}", .{ self.from_path, self.to_path, e }); | ||||||
|  |         std.os.exit(1); | ||||||
|  |     }; | ||||||
|  | } | ||||||
							
								
								
									
										110
									
								
								Dockerfile
									
										
									
									
									
								
							
							
						
						
									
										110
									
								
								Dockerfile
									
										
									
									
									
								
							|  | @ -1,110 +0,0 @@ | ||||||
| # We are looking for a static build, so we need to be on a musl system |  | ||||||
| # Zig uses clang, so for best compatibility, everything should be built |  | ||||||
| # using that compiler |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # Establish a base container with build tools common to most projects |  | ||||||
| FROM alpine:3.13 AS base |  | ||||||
| # gcc gets us libgcc.a, even though the build should be using clang |  | ||||||
| RUN apk add --no-cache clang git cmake make lld musl-dev gcc && \ |  | ||||||
|     rm /usr/bin/ld && \ |  | ||||||
|     ln -s /usr/bin/ld.lld /usr/bin/ld && rm /usr/bin/gcc # just to be sure |  | ||||||
| 
 |  | ||||||
| FROM base AS common |  | ||||||
| RUN git clone --depth 1 -b v0.5.2 https://github.com/awslabs/aws-c-common && \ |  | ||||||
|     mkdir aws-c-common-build && cd aws-c-common-build && \ |  | ||||||
|     cmake ../aws-c-common && \ |  | ||||||
|     make -j12 && make test && make install |  | ||||||
| 
 |  | ||||||
| RUN tar -czf aws-c-common-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| # The only tags currently on the repo are from 9/2020 and don't install |  | ||||||
| # anything, so we'll use current head of main branch (d60b60e) |  | ||||||
| FROM base AS awslc |  | ||||||
| RUN apk add --no-cache perl go g++ linux-headers && rm /usr/bin/g++ && rm /usr/bin/c++ && \ |  | ||||||
|     git clone --depth 1000 https://github.com/awslabs/aws-lc && cd aws-lc && \ |  | ||||||
|     git reset d60b60e --hard && cd .. && \ |  | ||||||
|     cmake -S aws-lc -B aws-lc/build -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_PREFIX_PATH=/usr/local -DCMAKE_INSTALL_PREFIX=/usr/local && \ |  | ||||||
|     cmake --build aws-lc/build --config RelWithDebInfo --target install |  | ||||||
| 
 |  | ||||||
| RUN tar -czf aws-lc-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM base AS s2n |  | ||||||
| ENV S2N_LIBCRYPTO=awslc |  | ||||||
| COPY --from=awslc /aws-lc-clang.tgz / |  | ||||||
| RUN git clone --depth 1 -b v1.0.5 https://github.com/aws/s2n-tls && \ |  | ||||||
|     tar -xzf aws-lc-clang.tgz && \ |  | ||||||
|     mkdir s2n-build && cd s2n-build && \ |  | ||||||
|     cmake ../s2n-tls && \ |  | ||||||
|     make -j12 && make install |  | ||||||
| 
 |  | ||||||
| RUN tar -czf s2n-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM base AS cal |  | ||||||
| COPY --from=awslc /aws-lc-clang.tgz / |  | ||||||
| COPY --from=common /aws-c-common-clang.tgz / |  | ||||||
| # RUN git clone --depth 1 -b v0.5.5 https://github.com/awslabs/aws-c-cal && \ |  | ||||||
| RUN git clone --depth 1 -b static-musl-builds https://github.com/elerch/aws-c-cal && \ |  | ||||||
|     tar -xzf aws-c-common-clang.tgz && \ |  | ||||||
|     tar -xzf aws-lc-clang.tgz && \ |  | ||||||
|     mkdir cal-build && cd cal-build && \ |  | ||||||
|     cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-cal && \ |  | ||||||
|     make -j12 && make install |  | ||||||
| # No make test: |  | ||||||
| #  40 - ecdsa_p384_test_key_gen_export (Failed) |  | ||||||
| RUN tar -czf aws-c-cal-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM base AS compression |  | ||||||
| COPY --from=common /aws-c-common-clang.tgz / |  | ||||||
| RUN git clone --depth 1 -b v0.2.10 https://github.com/awslabs/aws-c-compression && \ |  | ||||||
|     tar -xzf aws-c-common-clang.tgz && \ |  | ||||||
|     mkdir compression-build && cd compression-build && \ |  | ||||||
|     cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-compression && \ |  | ||||||
|     make -j12 && make test && make install |  | ||||||
| 
 |  | ||||||
| RUN tar -czf aws-c-compression-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM base AS io |  | ||||||
| # Cal includes common and openssl |  | ||||||
| COPY --from=cal /aws-c-cal-clang.tgz / |  | ||||||
| COPY --from=s2n /s2n-clang.tgz / |  | ||||||
| RUN git clone --depth 1 -b v0.9.1 https://github.com/awslabs/aws-c-io && \ |  | ||||||
|     tar -xzf s2n-clang.tgz && \ |  | ||||||
|     tar -xzf aws-c-cal-clang.tgz && \ |  | ||||||
|     mkdir io-build && cd io-build && \ |  | ||||||
|     cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-io && \ |  | ||||||
|     make -j12 && make install |  | ||||||
| 
 |  | ||||||
| RUN tar -czf aws-c-io-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM base AS http |  | ||||||
| # Cal includes common and openssl |  | ||||||
| # 2 test failures on musl - both "download medium file" |  | ||||||
| COPY --from=io /aws-c-io-clang.tgz / |  | ||||||
| COPY --from=compression /aws-c-compression-clang.tgz / |  | ||||||
| # RUN git clone --depth 1 -b v0.5.19 https://github.com/awslabs/aws-c-http && \ |  | ||||||
| RUN git clone --depth 1 -b v0.6.1 https://github.com/awslabs/aws-c-http && \ |  | ||||||
|     tar -xzf aws-c-io-clang.tgz && \ |  | ||||||
|     tar -xzf aws-c-compression-clang.tgz && \ |  | ||||||
|     mkdir http-build && cd http-build && \ |  | ||||||
|     cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-http && \ |  | ||||||
|     make -j12 && make install |  | ||||||
| 
 |  | ||||||
| RUN tar -czf aws-c-http-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM base AS auth |  | ||||||
| # http should have all other dependencies |  | ||||||
| COPY --from=http /aws-c-http-clang.tgz / |  | ||||||
| RUN git clone --depth 1 -b v0.5.0 https://github.com/awslabs/aws-c-auth && \ |  | ||||||
|     tar -xzf aws-c-http-clang.tgz && \ |  | ||||||
|     mkdir auth-build && cd auth-build && \ |  | ||||||
|     cmake -DCMAKE_MODULE_PATH=/usr/local/lib64/cmake ../aws-c-auth && \ |  | ||||||
|     make -j12 && make install # chunked_signing_test fails |  | ||||||
| 
 |  | ||||||
| RUN tar -czf aws-c-auth-clang.tgz /usr/local/* |  | ||||||
| 
 |  | ||||||
| FROM alpine:3.13 as final |  | ||||||
| COPY --from=auth /aws-c-auth-clang.tgz / |  | ||||||
| ADD https://ziglang.org/download/0.9.0/zig-linux-x86_64-0.9.0.tar.xz / |  | ||||||
| RUN tar -xzf /aws-c-auth-clang.tgz && mkdir /src && tar -C /usr/local -xf zig-linux* && \ |  | ||||||
|     ln -s /usr/local/zig-linux*/zig /usr/local/bin/zig |  | ||||||
							
								
								
									
										206
									
								
								GitRepoStep.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										206
									
								
								GitRepoStep.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,206 @@ | ||||||
|  | //! Publish Date: 2021_10_17 | ||||||
|  | //! This file is hosted at github.com/marler8997/zig-build-repos and is meant to be copied | ||||||
|  | //! to projects that use it. | ||||||
|  | const std = @import("std"); | ||||||
|  | const GitRepoStep = @This(); | ||||||
|  | 
 | ||||||
|  | pub const ShaCheck = enum { | ||||||
|  |     none, | ||||||
|  |     warn, | ||||||
|  |     err, | ||||||
|  | 
 | ||||||
|  |     pub fn reportFail(self: ShaCheck, comptime fmt: []const u8, args: anytype) void { | ||||||
|  |         switch (self) { | ||||||
|  |             .none => unreachable, | ||||||
|  |             .warn => std.log.warn(fmt, args), | ||||||
|  |             .err => { | ||||||
|  |                 std.log.err(fmt, args); | ||||||
|  |                 std.os.exit(0xff); | ||||||
|  |             }, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | step: std.build.Step, | ||||||
|  | builder: *std.build.Builder, | ||||||
|  | url: []const u8, | ||||||
|  | name: []const u8, | ||||||
|  | branch: ?[]const u8 = null, | ||||||
|  | sha: []const u8, | ||||||
|  | path: []const u8 = null, | ||||||
|  | sha_check: ShaCheck = .warn, | ||||||
|  | fetch_enabled: bool, | ||||||
|  | 
 | ||||||
|  | var cached_default_fetch_option: ?bool = null; | ||||||
|  | pub fn defaultFetchOption(b: *std.build.Builder) bool { | ||||||
|  |     if (cached_default_fetch_option) |_| {} else { | ||||||
|  |         cached_default_fetch_option = if (b.option(bool, "fetch", "automatically fetch network resources")) |o| o else false; | ||||||
|  |     } | ||||||
|  |     return cached_default_fetch_option.?; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub fn create(b: *std.build.Builder, opt: struct { | ||||||
|  |     url: []const u8, | ||||||
|  |     branch: ?[]const u8 = null, | ||||||
|  |     sha: []const u8, | ||||||
|  |     path: ?[]const u8 = null, | ||||||
|  |     sha_check: ShaCheck = .warn, | ||||||
|  |     fetch_enabled: ?bool = null, | ||||||
|  | }) *GitRepoStep { | ||||||
|  |     var result = b.allocator.create(GitRepoStep) catch @panic("memory"); | ||||||
|  |     const name = std.fs.path.basename(opt.url); | ||||||
|  |     result.* = GitRepoStep{ | ||||||
|  |         .step = std.build.Step.init(.custom, "clone a git repository", b.allocator, make), | ||||||
|  |         .builder = b, | ||||||
|  |         .url = opt.url, | ||||||
|  |         .name = name, | ||||||
|  |         .branch = opt.branch, | ||||||
|  |         .sha = opt.sha, | ||||||
|  |         .path = if (opt.path) |p| (b.allocator.dupe(u8, p) catch @panic("memory")) else (std.fs.path.resolve(b.allocator, &[_][]const u8{ | ||||||
|  |             b.build_root, | ||||||
|  |             "libs", | ||||||
|  |             name, | ||||||
|  |         })) catch @panic("memory"), | ||||||
|  |         .sha_check = opt.sha_check, | ||||||
|  |         .fetch_enabled = if (opt.fetch_enabled) |fe| fe else defaultFetchOption(b), | ||||||
|  |     }; | ||||||
|  |     return result; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TODO: this should be included in std.build, it helps find bugs in build files | ||||||
|  | fn hasDependency(step: *const std.build.Step, dep_candidate: *const std.build.Step) bool { | ||||||
|  |     for (step.dependencies.items) |dep| { | ||||||
|  |         // TODO: should probably use step.loop_flag to prevent infinite recursion | ||||||
|  |         //       when a circular reference is encountered, or maybe keep track of | ||||||
|  |         //       the steps encounterd with a hash set | ||||||
|  |         if (dep == dep_candidate or hasDependency(dep, dep_candidate)) | ||||||
|  |             return true; | ||||||
|  |     } | ||||||
|  |     return false; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn make(step: *std.build.Step) !void { | ||||||
|  |     const self = @fieldParentPtr(GitRepoStep, "step", step); | ||||||
|  | 
 | ||||||
|  |     std.fs.accessAbsolute(self.path, std.fs.File.OpenFlags{ .read = true }) catch { | ||||||
|  |         const branch_args = if (self.branch) |b| &[2][]const u8{ " -b ", b } else &[2][]const u8{ "", "" }; | ||||||
|  |         if (!self.fetch_enabled) { | ||||||
|  |             std.debug.print("Error: git repository '{s}' does not exist\n", .{self.path}); | ||||||
|  |             std.debug.print("       Use -Dfetch to download it automatically, or run the following to clone it:\n", .{}); | ||||||
|  |             std.debug.print("       git clone {s}{s}{s} {s} && git -C {3s} checkout {s} -b for_ziget\n", .{ self.url, branch_args[0], branch_args[1], self.path, self.sha }); | ||||||
|  |             std.os.exit(1); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         { | ||||||
|  |             var args = std.ArrayList([]const u8).init(self.builder.allocator); | ||||||
|  |             defer args.deinit(); | ||||||
|  |             try args.append("git"); | ||||||
|  |             try args.append("clone"); | ||||||
|  |             try args.append("--recurse-submodules"); | ||||||
|  |             try args.append(self.url); | ||||||
|  |             // TODO: clone it to a temporary location in case of failure | ||||||
|  |             //       also, remove that temporary location before running | ||||||
|  |             try args.append(self.path); | ||||||
|  |             if (self.branch) |branch| { | ||||||
|  |                 try args.append("-b"); | ||||||
|  |                 try args.append(branch); | ||||||
|  |             } | ||||||
|  |             try run(self.builder, args.items); | ||||||
|  |         } | ||||||
|  |         try run(self.builder, &[_][]const u8{ | ||||||
|  |             "git", | ||||||
|  |             "-C", | ||||||
|  |             self.path, | ||||||
|  |             "checkout", | ||||||
|  |             self.sha, | ||||||
|  |             "-b", | ||||||
|  |             "fordep", | ||||||
|  |         }); | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     try self.checkSha(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn checkSha(self: GitRepoStep) !void { | ||||||
|  |     if (self.sha_check == .none) | ||||||
|  |         return; | ||||||
|  | 
 | ||||||
|  |     const result: union(enum) { failed: anyerror, output: []const u8 } = blk: { | ||||||
|  |         const result = std.ChildProcess.exec(.{ | ||||||
|  |             .allocator = self.builder.allocator, | ||||||
|  |             .argv = &[_][]const u8{ | ||||||
|  |                 "git", | ||||||
|  |                 "-C", | ||||||
|  |                 self.path, | ||||||
|  |                 "rev-parse", | ||||||
|  |                 "HEAD", | ||||||
|  |             }, | ||||||
|  |             .cwd = self.builder.build_root, | ||||||
|  |             .env_map = self.builder.env_map, | ||||||
|  |         }) catch |e| break :blk .{ .failed = e }; | ||||||
|  |         try std.io.getStdErr().writer().writeAll(result.stderr); | ||||||
|  |         switch (result.term) { | ||||||
|  |             .Exited => |code| { | ||||||
|  |                 if (code == 0) break :blk .{ .output = result.stdout }; | ||||||
|  |                 break :blk .{ .failed = error.GitProcessNonZeroExit }; | ||||||
|  |             }, | ||||||
|  |             .Signal => break :blk .{ .failed = error.GitProcessFailedWithSignal }, | ||||||
|  |             .Stopped => break :blk .{ .failed = error.GitProcessWasStopped }, | ||||||
|  |             .Unknown => break :blk .{ .failed = error.GitProcessFailed }, | ||||||
|  |         } | ||||||
|  |     }; | ||||||
|  |     switch (result) { | ||||||
|  |         .failed => |err| { | ||||||
|  |             return self.sha_check.reportFail("failed to retreive sha for repository '{s}': {s}", .{ self.name, @errorName(err) }); | ||||||
|  |         }, | ||||||
|  |         .output => |output| { | ||||||
|  |             if (!std.mem.eql(u8, std.mem.trimRight(u8, output, "\n\r"), self.sha)) { | ||||||
|  |                 return self.sha_check.reportFail("repository '{s}' sha does not match\nexpected: {s}\nactual  : {s}\n", .{ self.name, self.sha, output }); | ||||||
|  |             } | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn run(builder: *std.build.Builder, argv: []const []const u8) !void { | ||||||
|  |     { | ||||||
|  |         var msg = std.ArrayList(u8).init(builder.allocator); | ||||||
|  |         defer msg.deinit(); | ||||||
|  |         const writer = msg.writer(); | ||||||
|  |         var prefix: []const u8 = ""; | ||||||
|  |         for (argv) |arg| { | ||||||
|  |             try writer.print("{s}\"{s}\"", .{ prefix, arg }); | ||||||
|  |             prefix = " "; | ||||||
|  |         } | ||||||
|  |         std.log.info("[RUN] {s}", .{msg.items}); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     const child = try std.ChildProcess.init(argv, builder.allocator); | ||||||
|  |     defer child.deinit(); | ||||||
|  | 
 | ||||||
|  |     child.stdin_behavior = .Ignore; | ||||||
|  |     child.stdout_behavior = .Inherit; | ||||||
|  |     child.stderr_behavior = .Inherit; | ||||||
|  |     child.cwd = builder.build_root; | ||||||
|  |     child.env_map = builder.env_map; | ||||||
|  | 
 | ||||||
|  |     try child.spawn(); | ||||||
|  |     const result = try child.wait(); | ||||||
|  |     switch (result) { | ||||||
|  |         .Exited => |code| if (code != 0) { | ||||||
|  |             std.log.err("git clone failed with exit code {}", .{code}); | ||||||
|  |             std.os.exit(0xff); | ||||||
|  |         }, | ||||||
|  |         else => { | ||||||
|  |             std.log.err("git clone failed with: {}", .{result}); | ||||||
|  |             std.os.exit(0xff); | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Get's the repository path and also verifies that the step requesting the path | ||||||
|  | // is dependent on this step. | ||||||
|  | pub fn getPath(self: *const GitRepoStep, who_wants_to_know: *const std.build.Step) []const u8 { | ||||||
|  |     if (!hasDependency(who_wants_to_know, &self.step)) | ||||||
|  |         @panic("a step called GitRepoStep.getPath but has not added it as a dependency"); | ||||||
|  |     return self.path; | ||||||
|  | } | ||||||
							
								
								
									
										129
									
								
								README.md
									
										
									
									
									
								
							
							
						
						
									
										129
									
								
								README.md
									
										
									
									
									
								
							|  | @ -1,122 +1,69 @@ | ||||||
| # AWS SDK for Zig | # AWS SDK for Zig (zig-native branch) | ||||||
| 
 | 
 | ||||||
| [](https://drone.lerch.org/lobo/aws-sdk-for-zig) | [](https://drone.lerch.org/api/badges/lobo/aws-sdk-for-zig/) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ### NOTE: All tests pass, but credentials currently must be passed through environment | ||||||
| 
 | 
 | ||||||
| This SDK currently supports all AWS services except EC2 and S3. These two | This SDK currently supports all AWS services except EC2 and S3. These two | ||||||
| services only support XML, and zig 0.8.0 and master both trigger compile | services only support XML, and zig 0.8.0 and master both trigger compile | ||||||
| errors while incorporating the XML parser. S3 also requires some plumbing | errors while incorporating the XML parser. S3 also requires some plumbing | ||||||
| tweaks in the signature calculation, which is planned for a zig version | tweaks in the signature calculation. Examples of usage are in src/main.zig. | ||||||
| (probably self-hosted 0.9.0) that no longer has an error triggered. Examples |  | ||||||
| of usage are in src/main.zig. |  | ||||||
| 
 | 
 | ||||||
| This is designed to be built statically using the `aws_c_*` libraries, so | Current executable size for the demo is 868k after compiling with -Drelease-safe | ||||||
| we inherit a lot of the goodness of the work going on there. Current | and [stripping the executable after compilation](https://github.com/ziglang/zig/issues/351). | ||||||
| executable size is 9.7M, about half of which is due to the SSL library. | This is for x86_linux, (which is all that's tested at the moment). | ||||||
| Running strip on the executable after compilation (it seems zig strip |  | ||||||
| only goes so far), reduces this to 4.3M. This is for x86_linux, |  | ||||||
| (which is all that's tested at the moment). |  | ||||||
| 
 |  | ||||||
| # 2022-01-10 SDK Update |  | ||||||
| 
 |  | ||||||
| To get smaller executable size and better portability with faster compilation, |  | ||||||
| my intent is to rework the http communications using |  | ||||||
| [requestz](https://github.com/ducdetronquito/requestz).  This relies on a |  | ||||||
| couple other projects, and will require the creation of a zig implementation |  | ||||||
| for [Signature Version 4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html), |  | ||||||
| along with support for some S3 quirks, etc. It will also reduce compatibility |  | ||||||
| with some edge cases. Long term I think this is a better approach, however and |  | ||||||
| will remove (or replace) a ton of the C dependencies as well as avoid a bunch |  | ||||||
| of complexity such as the event loop C implementation found in the AWS |  | ||||||
| libraries, which eventually could be replaced by zig async. I have created a |  | ||||||
| [new branch](https://github.com/elerch/aws-sdk-for-zig/tree/zig-native) |  | ||||||
| for this work as master is currently working fairly well. I'd also love to have |  | ||||||
| an official package manager in zig, but I know this is high on the priority |  | ||||||
| list for the foundation. In the meantime I will depend on git commands executed |  | ||||||
| by build.zig to handle packages. |  | ||||||
| 
 | 
 | ||||||
| ## Building | ## Building | ||||||
| 
 | 
 | ||||||
| I am assuming here that if you're playing with zig, you pretty much know | `zig build` should work. It will build the code generation project, run | ||||||
| what you're doing, so I will stay brief. | the code generation, then build the main project with the generated code. | ||||||
| 
 | 
 | ||||||
| First, the dependencies are required. Use the Dockerfile to build these. | First time build should use `zig build -Dfetch` to fetch dependent packages | ||||||
| a `docker build` will do, but be prepared for it to run a while. The | (zfetch and friends). | ||||||
| Dockerfile has been tested on x86_64 linux, but I do hope to get arm64 |  | ||||||
| supported as well. |  | ||||||
| 
 |  | ||||||
| Once that's done, you'll have an alpine image with all dependencies ready |  | ||||||
| to go and zig master installed. There are some build-related things still |  | ||||||
| broken in 0.8.0 and hopefully 0.8.1 will address those and we can be on |  | ||||||
| a standard release. |  | ||||||
| 
 |  | ||||||
| * `zig build` should work. It will build the code generation project, run |  | ||||||
|   the code generation, then build the main project with the generated code. |  | ||||||
|   There is also a Makefile included, but this hasn't been used in a while |  | ||||||
|   and I'm not sure that works at the moment. |  | ||||||
| 
 | 
 | ||||||
| ## Running | ## Running | ||||||
| 
 | 
 | ||||||
| This library uses the aws c libraries for it's work, so it operates like most | This library mimics the aws c libraries for it's work, so it operates like most | ||||||
| other 'AWS things'. Note that I tested by setting the appropriate environment | other 'AWS things'. main.zig gives you a handful of examples for working with services. | ||||||
| variables, so config files haven't gotten a run through. |  | ||||||
| main.zig gives you a handful of examples for working with services. |  | ||||||
| For local testing or alternative endpoints, there's no real standard, so | For local testing or alternative endpoints, there's no real standard, so | ||||||
| there is code to look for `AWS_ENDPOINT_URL` environment variable that will | there is code to look for `AWS_ENDPOINT_URL` environment variable that will | ||||||
| supercede all other configuration. | supersede all other configuration. Note that an alternative endpoint may | ||||||
|  | require passing in a client option to specify an different TLS root certificate | ||||||
|  | (pass null to disable certificate verification). | ||||||
| 
 | 
 | ||||||
| ## Dependencies | Given that credential handling is still very basic, you may want to look at | ||||||
|  | the [old branch](https://github.com/elerch/aws-sdk-for-zig/tree/aws-crt) if | ||||||
|  | your needs include something more robust. Note that that branch supports | ||||||
|  | x86_64 linux only. | ||||||
| 
 | 
 | ||||||
|  | ## Limitations | ||||||
| 
 | 
 | ||||||
| Full dependency tree: | There are many nuances of AWS V4 signature calculation. S3 is not supported | ||||||
| aws-c-auth | because it uses many of these test cases. STS tokens using a session token | ||||||
|    * s2n | are not yet implemented, though should be trivial. I have also seen a few | ||||||
|       * aws-lc | service errors caused by discrepancies in signatures, though I don't know yet | ||||||
|    * aws-c-common | if this was an issue in the service itself (has not repro'd) or if there | ||||||
|    * aws-c-compression | is a latent bug. | ||||||
|      * aws-c-common |  | ||||||
|    * aws-c-http |  | ||||||
|      * s2n |  | ||||||
|      * aws-c-common |  | ||||||
|      * aws-c-io |  | ||||||
|        * aws-c-common |  | ||||||
|        * s2n |  | ||||||
|          * aws-lc |  | ||||||
|        * aws-c-cal |  | ||||||
|          * aws-c-common |  | ||||||
|          * aws-lc |  | ||||||
|      * aws-c-compression |  | ||||||
|        * aws-c-common |  | ||||||
|    * aws-c-cal |  | ||||||
|      * aws-c-common |  | ||||||
|      * aws-lc |  | ||||||
| 
 | 
 | ||||||
| Build order based on above: | Only environment variable based credentials can be used at the moment. | ||||||
| 
 |  | ||||||
| 1. aws-c-common |  | ||||||
| 1. aws-lc |  | ||||||
| 2. s2n |  | ||||||
| 2. aws-c-cal |  | ||||||
| 2. aws-c-compression |  | ||||||
| 3. aws-c-io |  | ||||||
| 4. aws-c-http |  | ||||||
| 5. aws-c-auth |  | ||||||
| 
 |  | ||||||
| Dockerfile in this repo will manage this |  | ||||||
| 
 | 
 | ||||||
| TODO List: | TODO List: | ||||||
| 
 | 
 | ||||||
| * Implement jitter/exponential backoff. This appears to be configuration of | * Move to new Docker image for CI/CD | ||||||
|   `aws_c_io` and should therefore be trivial | * Add STS key support | ||||||
|  | * Implement credentials provider | ||||||
|  | * Implement jitter/exponential backoff | ||||||
| * Implement timeouts and other TODO's in the code | * Implement timeouts and other TODO's in the code | ||||||
| * Switch to aws-c-cal upstream once [PR for full static musl build support is merged](https://github.com/awslabs/aws-c-cal/pull/89) |  | ||||||
|   (see Dockerfile) |  | ||||||
| * Implement [AWS restXml protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-restxml-protocol.html). | * Implement [AWS restXml protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-restxml-protocol.html). | ||||||
|   Includes S3. Total service count 4. This may be blocked due to the same issue as EC2. |   Includes S3. Total service count 4. This may be blocked due to the same issue as EC2. | ||||||
| * Implement [AWS EC2 query protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-ec2-query-protocol.html). | * Implement [AWS EC2 query protocol](https://awslabs.github.io/smithy/1.0/spec/aws/aws-ec2-query-protocol.html). | ||||||
|   Includes EC2. Total service count 1. This is currently blocked, probably on |   Includes EC2. Total service count 1. This may be blocked on a compiler bug, | ||||||
|   self-hosted compiler coming in zig 0.9.0 (January 2022) due to compiler bug |   though has not been tested with zig 0.9.0. It may need to wait for zig 0.10.0 | ||||||
|  |   when self-hosted compiler is likely to be completed (zig 0.10.0 eta May 2022) | ||||||
|   discovered. More details and llvm ir log can be found in the |   discovered. More details and llvm ir log can be found in the | ||||||
|   [XML branch](https://git.lerch.org/lobo/aws-sdk-for-zig/src/branch/xml). |   [XML branch](https://git.lerch.org/lobo/aws-sdk-for-zig/src/branch/xml). | ||||||
|  | * Implement sigv4a signing | ||||||
| 
 | 
 | ||||||
| Compiler wishlist/watchlist: | Compiler wishlist/watchlist: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										229
									
								
								VersionStep.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										229
									
								
								VersionStep.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,229 @@ | ||||||
|  | //! Publish Date: 2022-01-12 | ||||||
|  | //! This file is hosted at ??? and is meant to be copied | ||||||
|  | //! to projects that use it. Sample usage: | ||||||
|  | //! | ||||||
|  | //! const version = VersionStep.create(b, null); | ||||||
|  | //! exe.step.dependOn(&version.step); | ||||||
|  | 
 | ||||||
|  | const std = @import("std"); | ||||||
|  | const Step = @This(); | ||||||
|  | 
 | ||||||
|  | step: std.build.Step, | ||||||
|  | builder: *std.build.Builder, | ||||||
|  | version_path: []const u8, | ||||||
|  | 
 | ||||||
|  | // Creates a step that will add the git version info in a file in src/ | ||||||
|  | // so it can be consumed by additional code. If version_path is not specified, | ||||||
|  | // it will default to "git_version.zig". This should be part of .gitignore | ||||||
|  | pub fn create(b: *std.build.Builder, version_path: ?[]const u8) *Step { | ||||||
|  |     var result = b.allocator.create(Step) catch @panic("memory"); | ||||||
|  |     result.* = Step{ | ||||||
|  |         .step = std.build.Step.init(.custom, "create version file", b.allocator, make), | ||||||
|  |         .builder = b, | ||||||
|  |         .version_path = std.fs.path.resolve(b.allocator, &[_][]const u8{ | ||||||
|  |             b.build_root, | ||||||
|  |             "src", | ||||||
|  |             version_path orelse "git_version.zig", | ||||||
|  |         }) catch @panic("memory"), | ||||||
|  |     }; | ||||||
|  |     return result; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn make(step: *std.build.Step) !void { | ||||||
|  |     const self = @fieldParentPtr(Step, "step", step); | ||||||
|  |     const file = try std.fs.createFileAbsolute(self.version_path, .{}); | ||||||
|  |     defer file.close(); | ||||||
|  |     const version = try getGitVersion( | ||||||
|  |         self.builder.allocator, | ||||||
|  |         self.builder.build_root, | ||||||
|  |         self.builder.env_map, | ||||||
|  |     ); | ||||||
|  |     defer version.deinit(); | ||||||
|  |     try file.writer().print( | ||||||
|  |         \\pub const hash = "{s}"; | ||||||
|  |         \\pub const abbreviated_hash = "{s}"; | ||||||
|  |         \\pub const commit_date = "{s}"; | ||||||
|  |         \\pub const branch = "{s}"; | ||||||
|  |         \\pub const dirty = {b}; | ||||||
|  |         \\pub const pretty_version = "{s}"; | ||||||
|  |     , .{ | ||||||
|  |         version.hash, | ||||||
|  |         version.abbreviated_hash, | ||||||
|  |         version.commit_date, | ||||||
|  |         version.branch, | ||||||
|  |         version.dirty, | ||||||
|  |         version.pretty_version, | ||||||
|  |     }); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const GitVersion = struct { | ||||||
|  |     hash: []const u8, | ||||||
|  |     abbreviated_hash: []const u8, | ||||||
|  |     commit_date: []const u8, | ||||||
|  |     branch: []const u8, | ||||||
|  |     dirty: bool, | ||||||
|  |     pretty_version: []const u8, | ||||||
|  | 
 | ||||||
|  |     allocator: std.mem.Allocator, | ||||||
|  | 
 | ||||||
|  |     const Self = @This(); | ||||||
|  | 
 | ||||||
|  |     pub fn deinit(self: Self) void { | ||||||
|  |         self.allocator.free(self.hash); | ||||||
|  |         self.allocator.free(self.abbreviated_hash); | ||||||
|  |         self.allocator.free(self.commit_date); | ||||||
|  |         self.allocator.free(self.branch); | ||||||
|  |         self.allocator.free(self.pretty_version); | ||||||
|  |     } | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | fn getGitVersion(allocator: std.mem.Allocator, git_working_root: ?[]const u8, env: anytype) !GitVersion { | ||||||
|  |     // git log -1 --pretty="%H%n%h%n%ci%n%D" | ||||||
|  |     // 3bf6adc13e4aa653a7b75b1b5e9c9db5215df8e1 | ||||||
|  |     // 3bf6adc | ||||||
|  |     // 2022-01-12 12:21:28 -0800 | ||||||
|  |     // HEAD -> zig-native | ||||||
|  | 
 | ||||||
|  |     const log_output = run( | ||||||
|  |         allocator, | ||||||
|  |         &[_][]const u8{ | ||||||
|  |             "git", | ||||||
|  |             "log", | ||||||
|  |             "-1", | ||||||
|  |             "--pretty=%H%n%h%n%ci%n%D", | ||||||
|  |         }, | ||||||
|  |         git_working_root, | ||||||
|  |         env, | ||||||
|  |     ) catch |e| { | ||||||
|  |         if (std.os.getenv("DRONE_COMMIT_SHA") != null) | ||||||
|  |             return getGitVersionFromDrone(allocator); | ||||||
|  |         return e; | ||||||
|  |     }; | ||||||
|  |     defer allocator.free(log_output); | ||||||
|  |     const line_data = try getLines(allocator, 4, log_output); | ||||||
|  |     const hash = line_data[0]; | ||||||
|  |     const abbrev_hash = line_data[1]; | ||||||
|  |     const date = line_data[2]; | ||||||
|  |     const branch = line_data[3]; | ||||||
|  | 
 | ||||||
|  |     // git status --porcelain | ||||||
|  |     const status_output = try run( | ||||||
|  |         allocator, | ||||||
|  |         &[_][]const u8{ | ||||||
|  |             "git", | ||||||
|  |             "status", | ||||||
|  |             "--porcelain", | ||||||
|  |         }, | ||||||
|  |         git_working_root, | ||||||
|  |         env, | ||||||
|  |     ); | ||||||
|  |     const dirty = blk: { | ||||||
|  |         if (status_output.len > 0) { | ||||||
|  |             allocator.free(status_output); | ||||||
|  |             break :blk true; | ||||||
|  |         } | ||||||
|  |         break :blk false; | ||||||
|  |     }; | ||||||
|  |     const dirty_str = blk: { | ||||||
|  |         if (dirty) { | ||||||
|  |             break :blk " (dirty)"; | ||||||
|  |         } | ||||||
|  |         break :blk ""; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     return GitVersion{ | ||||||
|  |         .hash = hash, | ||||||
|  |         .abbreviated_hash = abbrev_hash, | ||||||
|  |         .commit_date = date, | ||||||
|  |         .branch = branch, | ||||||
|  |         .allocator = allocator, | ||||||
|  |         .dirty = dirty, | ||||||
|  |         .pretty_version = try prettyVersion(allocator, abbrev_hash, date, dirty_str), | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | fn prettyVersion(allocator: std.mem.Allocator, abbrev_hash: []const u8, date: []const u8, dirty_str: []const u8) ![]const u8 { | ||||||
|  |     const pretty_version: []const u8 = try std.fmt.allocPrint( | ||||||
|  |         allocator, | ||||||
|  |         "version {s}, committed at {s}{s}", | ||||||
|  |         .{ | ||||||
|  |             abbrev_hash, | ||||||
|  |             date, | ||||||
|  |             dirty_str, | ||||||
|  |         }, | ||||||
|  |     ); | ||||||
|  |     return pretty_version; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn getGitVersionFromDrone(allocator: std.mem.Allocator) !GitVersion { | ||||||
|  |     const abbrev_hash = std.os.getenv("DRONE_COMMIT_SHA").?[0..7]; // This isn't quite how git works, but ok | ||||||
|  |     const date = std.os.getenv("DRONE_BUILD_STARTED").?; // this is a timestamp :( | ||||||
|  |     return GitVersion{ | ||||||
|  |         .hash = std.os.getenv("DRONE_COMMIT_SHA").?, | ||||||
|  |         .abbreviated_hash = abbrev_hash, | ||||||
|  |         .commit_date = date, | ||||||
|  |         .branch = std.os.getenv("DRONE_COMMIT_BRANCH").?, | ||||||
|  |         .allocator = allocator, | ||||||
|  |         .dirty = false, | ||||||
|  |         .pretty_version = try prettyVersion(allocator, abbrev_hash, date, ""), | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | fn getLines(allocator: std.mem.Allocator, comptime line_count: u32, data: []const u8) ![line_count][]u8 { | ||||||
|  |     var line: u32 = 0; | ||||||
|  |     var start: u32 = 0; | ||||||
|  |     var current: u32 = 0; | ||||||
|  |     var line_data: [line_count][]u8 = undefined; | ||||||
|  |     errdefer { | ||||||
|  |         while (line > 0) { | ||||||
|  |             allocator.free(line_data[line]); | ||||||
|  |             line -= 1; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     for (data) |c| { | ||||||
|  |         // try std.io.getStdErr().writer().print("line: {d}, c: {c}, cur: {d}, strt: {d}\n", .{ line, c, current, start }); | ||||||
|  |         if (c == '\n') { | ||||||
|  |             line_data[line] = try allocator.dupe(u8, data[start..current]); | ||||||
|  |             // try std.io.getStdErr().writer().print("c: {d}, s: {d}, data: '{s}'\n", .{ current, start, line_data[line] }); | ||||||
|  |             start = current + 1; | ||||||
|  |             line += 1; | ||||||
|  |         } | ||||||
|  |         current += 1; | ||||||
|  |     } | ||||||
|  |     return line_data; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // env is a std.process.BufMap, but that's private, which is a little weird tbh | ||||||
|  | fn run(allocator: std.mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env: anytype) ![]const u8 { | ||||||
|  |     { | ||||||
|  |         var msg = std.ArrayList(u8).init(allocator); | ||||||
|  |         defer msg.deinit(); | ||||||
|  |         const writer = msg.writer(); | ||||||
|  |         var prefix: []const u8 = ""; | ||||||
|  |         for (argv) |arg| { | ||||||
|  |             try writer.print("{s}\"{s}\"", .{ prefix, arg }); | ||||||
|  |             prefix = " "; | ||||||
|  |         } | ||||||
|  |         std.log.info("[RUN] {s}", .{msg.items}); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     const result = try std.ChildProcess.exec(.{ | ||||||
|  |         .allocator = allocator, | ||||||
|  |         .argv = argv, | ||||||
|  |         .cwd = cwd, | ||||||
|  |         .env_map = env, | ||||||
|  |     }); | ||||||
|  |     defer if (result.stderr.len > 0) allocator.free(result.stderr); | ||||||
|  |     try std.io.getStdErr().writer().writeAll(result.stderr); | ||||||
|  | 
 | ||||||
|  |     switch (result.term) { | ||||||
|  |         .Exited => |code| if (code != 0) { | ||||||
|  |             std.log.err("process failed with exit code: {}", .{code}); | ||||||
|  | 
 | ||||||
|  |             std.os.exit(0xff); | ||||||
|  |         }, | ||||||
|  |         else => { | ||||||
|  |             std.log.err("process failed due to exception: {}", .{result}); | ||||||
|  |             std.os.exit(0xff); | ||||||
|  |         }, | ||||||
|  |     } | ||||||
|  |     return result.stdout; | ||||||
|  | } | ||||||
							
								
								
									
										123
									
								
								build.zig
									
										
									
									
									
								
							
							
						
						
									
										123
									
								
								build.zig
									
										
									
									
									
								
							|  | @ -1,8 +1,18 @@ | ||||||
| const std = @import("std"); | const std = @import("std"); | ||||||
| const builtin = @import("builtin"); | const builtin = @import("builtin"); | ||||||
| const Builder = @import("std").build.Builder; | const Builder = @import("std").build.Builder; | ||||||
|  | const GitRepoStep = @import("GitRepoStep.zig"); | ||||||
|  | const CopyStep = @import("CopyStep.zig"); | ||||||
|  | const tst = @import("build_test.zig"); | ||||||
|  | const VersionStep = @import("VersionStep.zig"); | ||||||
| 
 | 
 | ||||||
| pub fn build(b: *Builder) !void { | pub fn build(b: *Builder) !void { | ||||||
|  |     const zfetch_repo = GitRepoStep.create(b, .{ | ||||||
|  |         .url = "https://github.com/truemedian/zfetch", | ||||||
|  |         // .branch = "0.1.10", // branch also takes tags. Tag 0.1.10 isn't quite new enough | ||||||
|  |         .sha = "271cab5da4d12c8f08e67aa0cd5268da100e52f1", | ||||||
|  |     }); | ||||||
|  | 
 | ||||||
|     // Standard target options allows the person running `zig build` to choose |     // Standard target options allows the person running `zig build` to choose | ||||||
|     // what target to build for. Here we do not override the defaults, which |     // what target to build for. Here we do not override the defaults, which | ||||||
|     // means any target is allowed, and the default is native. Other options |     // means any target is allowed, and the default is native. Other options | ||||||
|  | @ -17,46 +27,33 @@ pub fn build(b: *Builder) !void { | ||||||
|     // https://github.com/ziglang/zig/issues/855 |     // https://github.com/ziglang/zig/issues/855 | ||||||
|     exe.addPackagePath("smithy", "smithy/src/smithy.zig"); |     exe.addPackagePath("smithy", "smithy/src/smithy.zig"); | ||||||
| 
 | 
 | ||||||
|     // This bitfield workaround will end up requiring a bunch of headers that |  | ||||||
|     // currently mean building in the docker container is the best way to build |  | ||||||
|     // TODO: Determine if it's a good idea to copy these files out of our |  | ||||||
|     // docker container to the local fs so we can just build even outside |  | ||||||
|     // the container. And maybe, just maybe these even get committed to |  | ||||||
|     // source control? |  | ||||||
|     exe.addCSourceFile("src/bitfield-workaround.c", &[_][]const u8{"-std=c99"}); |  | ||||||
|     const c_include_dirs = .{ |  | ||||||
|         "./src/", |  | ||||||
|         "/usr/local/include", |  | ||||||
|     }; |  | ||||||
|     inline for (c_include_dirs) |dir| |  | ||||||
|         exe.addIncludeDir(dir); |  | ||||||
| 
 |  | ||||||
|     const dependent_objects = .{ |  | ||||||
|         "/usr/local/lib64/libs2n.a", |  | ||||||
|         "/usr/local/lib64/libcrypto.a", |  | ||||||
|         "/usr/local/lib64/libssl.a", |  | ||||||
|         "/usr/local/lib64/libaws-c-auth.a", |  | ||||||
|         "/usr/local/lib64/libaws-c-cal.a", |  | ||||||
|         "/usr/local/lib64/libaws-c-common.a", |  | ||||||
|         "/usr/local/lib64/libaws-c-compression.a", |  | ||||||
|         "/usr/local/lib64/libaws-c-http.a", |  | ||||||
|         "/usr/local/lib64/libaws-c-io.a", |  | ||||||
|     }; |  | ||||||
|     inline for (dependent_objects) |obj| |  | ||||||
|         exe.addObjectFile(obj); |  | ||||||
| 
 |  | ||||||
|     exe.linkSystemLibrary("c"); |  | ||||||
|     exe.setTarget(target); |     exe.setTarget(target); | ||||||
|     exe.setBuildMode(mode); |     exe.setBuildMode(mode); | ||||||
| 
 | 
 | ||||||
|     exe.override_dest_dir = .{ .custom = ".." }; |  | ||||||
|     exe.linkage = .static; |     exe.linkage = .static; | ||||||
| 
 | 
 | ||||||
|     // TODO: Strip doesn't actually fully strip the executable. If we're on |     // TODO: Strip doesn't actually fully strip the executable. If we're on | ||||||
|     //       linux we can run strip on the result, probably at the expense |     //       linux we can run strip on the result, probably at the expense | ||||||
|     //       of busting cache logic |     //       of busting cache logic | ||||||
|     const is_strip = b.option(bool, "strip", "strip exe [true]") orelse true; |     exe.strip = b.option(bool, "strip", "strip exe [true]") orelse true; | ||||||
|     exe.strip = is_strip; |     const copy_deps = CopyStep.create( | ||||||
|  |         b, | ||||||
|  |         "zfetch_deps.zig", | ||||||
|  |         "libs/zfetch/deps.zig", | ||||||
|  |     ); | ||||||
|  |     copy_deps.step.dependOn(&zfetch_repo.step); | ||||||
|  | 
 | ||||||
|  |     const version = VersionStep.create(b, null); | ||||||
|  |     exe.step.dependOn(&version.step); | ||||||
|  |     exe.step.dependOn(©_deps.step); | ||||||
|  | 
 | ||||||
|  |     // This import won't work unless we're already cloned. The way around | ||||||
|  |     // this is to have a multi-stage build process, but that's a lot of work. | ||||||
|  |     // Instead, I've copied the addPackage and tweaked it for the build prefix | ||||||
|  |     // so we'll have to keep that in sync with upstream | ||||||
|  |     // const zfetch = @import("libs/zfetch/build.zig"); | ||||||
|  |     exe.addPackage(getZfetchPackage(b, "libs/zfetch") catch unreachable); | ||||||
|  |     exe.addPackagePath("iguanaTLS", "libs/zfetch/libs/iguanaTLS/src/main.zig"); | ||||||
| 
 | 
 | ||||||
|     const run_cmd = exe.run(); |     const run_cmd = exe.run(); | ||||||
|     run_cmd.step.dependOn(b.getInstallStep()); |     run_cmd.step.dependOn(b.getInstallStep()); | ||||||
|  | @ -67,27 +64,17 @@ pub fn build(b: *Builder) !void { | ||||||
|     const run_step = b.step("run", "Run the app"); |     const run_step = b.step("run", "Run the app"); | ||||||
|     run_step.dependOn(&run_cmd.step); |     run_step.dependOn(&run_cmd.step); | ||||||
| 
 | 
 | ||||||
|     const test_step = b.step("test", "Run library tests"); |     var test_step = try tst.addTestStep(b, mode, exe.packages.items); | ||||||
|     var build_dir = try std.fs.openDirAbsolute(b.build_root, .{}); |     test_step.dependOn(&version.step); | ||||||
|     defer build_dir.close(); |  | ||||||
|     var src_dir = try build_dir.openDir("src", .{ .iterate = true }); |  | ||||||
|     defer src_dir.close(); |  | ||||||
|     var iterator = src_dir.iterate(); |  | ||||||
|     while (try iterator.next()) |entry| { |  | ||||||
|         if (std.mem.endsWith(u8, entry.name, ".zig")) { |  | ||||||
|             const name = try std.fmt.allocPrint(b.allocator, "src/{s}", .{entry.name}); |  | ||||||
|             defer b.allocator.free(name); |  | ||||||
|             const t = b.addTest(name); |  | ||||||
|             t.addPackagePath("smithy", "smithy/src/smithy.zig"); |  | ||||||
|             t.setBuildMode(mode); |  | ||||||
|             test_step.dependOn(&t.step); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| 
 | 
 | ||||||
|     // TODO: Support > linux |     if (target.getOs().tag == .linux) { | ||||||
|     if (builtin.os.tag == .linux) { |         // TODO: Support > linux with RunStep | ||||||
|  |         // std.build.RunStep.create(null,null).cwd(std.fs.path.resolve(b.build_root, "codegen")).addArgs(...) | ||||||
|         const codegen = b.step("gen", "Generate zig service code from smithy models"); |         const codegen = b.step("gen", "Generate zig service code from smithy models"); | ||||||
|         codegen.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step); |         codegen.dependOn(&b.addSystemCommand(&.{ "/bin/sh", "-c", "cd codegen && zig build" }).step); | ||||||
|  | 
 | ||||||
|  |         // This can probably be triggered instead by GitRepoStep cloning the repo | ||||||
|  |         // with models | ||||||
|         // Since codegen binary is built every time, if it's newer than our |         // Since codegen binary is built every time, if it's newer than our | ||||||
|         // service manifest we know it needs to be regenerated. So this step |         // service manifest we know it needs to be regenerated. So this step | ||||||
|         // will remove the service manifest if codegen has been touched, thereby |         // will remove the service manifest if codegen has been touched, thereby | ||||||
|  | @ -110,3 +97,39 @@ pub fn build(b: *Builder) !void { | ||||||
| 
 | 
 | ||||||
|     exe.install(); |     exe.install(); | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | fn getDependency(comptime lib_prefix: []const u8, comptime name: []const u8, comptime root: []const u8) !std.build.Pkg { | ||||||
|  |     const path = lib_prefix ++ "/libs/" ++ name ++ "/" ++ root; | ||||||
|  | 
 | ||||||
|  |     // We don't actually care if the dependency has been checked out, as | ||||||
|  |     // GitRepoStep will handle that for us | ||||||
|  |     // Make sure that the dependency has been checked out. | ||||||
|  |     // std.fs.cwd().access(path, .{}) catch |err| switch (err) { | ||||||
|  |     //     error.FileNotFound => { | ||||||
|  |     //         std.log.err("zfetch: dependency '{s}' not checked out", .{name}); | ||||||
|  |     // | ||||||
|  |     //         return err; | ||||||
|  |     //     }, | ||||||
|  |     //     else => return err, | ||||||
|  |     // }; | ||||||
|  | 
 | ||||||
|  |     return std.build.Pkg{ | ||||||
|  |         .name = name, | ||||||
|  |         .path = .{ .path = path }, | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub fn getZfetchPackage(b: *std.build.Builder, comptime lib_prefix: []const u8) !std.build.Pkg { | ||||||
|  |     var dependencies = b.allocator.alloc(std.build.Pkg, 4) catch unreachable; | ||||||
|  | 
 | ||||||
|  |     dependencies[0] = try getDependency(lib_prefix, "iguanaTLS", "src/main.zig"); | ||||||
|  |     dependencies[1] = try getDependency(lib_prefix, "network", "network.zig"); | ||||||
|  |     dependencies[2] = try getDependency(lib_prefix, "uri", "uri.zig"); | ||||||
|  |     dependencies[3] = try getDependency(lib_prefix, "hzzp", "src/main.zig"); | ||||||
|  | 
 | ||||||
|  |     return std.build.Pkg{ | ||||||
|  |         .name = "zfetch", | ||||||
|  |         .path = .{ .path = lib_prefix ++ "/src/main.zig" }, | ||||||
|  |         .dependencies = dependencies, | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										28
									
								
								build_test.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								build_test.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,28 @@ | ||||||
|  | //! Publish Date: 2022-01-12 | ||||||
|  | //! This file is hosted at ??? and is meant to be copied | ||||||
|  | //! to projects that use it. Sample usage: | ||||||
|  | //! | ||||||
|  | //! const @"test" = @import("build_test.zig"); | ||||||
|  | //! var test_step = try @"test".addTestStep(b, mode, exe.packages.items); | ||||||
|  | const std = @import("std"); | ||||||
|  | 
 | ||||||
|  | pub fn addTestStep(b: *std.build.Builder, mode: std.builtin.Mode, packages: []std.build.Pkg) !*std.build.Step { | ||||||
|  |     const test_step = b.step("test", "Run all tests"); | ||||||
|  |     var src_dir = try std.fs.openDirAbsolute(try std.fs.path.resolve(b.allocator, &[_][]const u8{ | ||||||
|  |         b.build_root, | ||||||
|  |         "src", | ||||||
|  |     }), .{ .iterate = true }); | ||||||
|  |     defer src_dir.close(); | ||||||
|  |     var iterator = src_dir.iterate(); | ||||||
|  |     while (try iterator.next()) |entry| { | ||||||
|  |         if (std.mem.endsWith(u8, entry.name, ".zig")) { | ||||||
|  |             const name = try std.fmt.allocPrint(b.allocator, "src/{s}", .{entry.name}); | ||||||
|  |             defer b.allocator.free(name); | ||||||
|  |             const t = b.addTest(name); | ||||||
|  |             for (packages) |package| t.addPackage(package); | ||||||
|  |             t.setBuildMode(mode); | ||||||
|  |             test_step.dependOn(&t.step); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     return test_step; | ||||||
|  | } | ||||||
							
								
								
									
										13
									
								
								src/aws.zig
									
										
									
									
									
								
							
							
						
						
									
										13
									
								
								src/aws.zig
									
										
									
									
									
								
							|  | @ -1,6 +1,6 @@ | ||||||
| const std = @import("std"); | const std = @import("std"); | ||||||
| 
 | 
 | ||||||
| const awshttp = @import("awshttp.zig"); | const awshttp = @import("aws_http.zig"); | ||||||
| const json = @import("json.zig"); | const json = @import("json.zig"); | ||||||
| const url = @import("url.zig"); | const url = @import("url.zig"); | ||||||
| const case = @import("case.zig"); | const case = @import("case.zig"); | ||||||
|  | @ -25,16 +25,19 @@ pub const services = servicemodel.services; | ||||||
| /// This will give you a constant with service data for sts, ec2, s3 and ddb only | /// This will give you a constant with service data for sts, ec2, s3 and ddb only | ||||||
| pub const Services = servicemodel.Services; | pub const Services = servicemodel.Services; | ||||||
| 
 | 
 | ||||||
|  | pub const ClientOptions = struct { | ||||||
|  |     trust_pem: ?[]const u8 = awshttp.default_root_ca, | ||||||
|  | }; | ||||||
| pub const Client = struct { | pub const Client = struct { | ||||||
|     allocator: std.mem.Allocator, |     allocator: std.mem.Allocator, | ||||||
|     aws_http: awshttp.AwsHttp, |     aws_http: awshttp.AwsHttp, | ||||||
| 
 | 
 | ||||||
|     const Self = @This(); |     const Self = @This(); | ||||||
| 
 | 
 | ||||||
|     pub fn init(allocator: std.mem.Allocator) Self { |     pub fn init(allocator: std.mem.Allocator, options: ClientOptions) !Self { | ||||||
|         return .{ |         return Self{ | ||||||
|             .allocator = allocator, |             .allocator = allocator, | ||||||
|             .aws_http = awshttp.AwsHttp.init(allocator), |             .aws_http = try awshttp.AwsHttp.init(allocator, options.trust_pem), | ||||||
|         }; |         }; | ||||||
|     } |     } | ||||||
|     pub fn deinit(self: *Client) void { |     pub fn deinit(self: *Client) void { | ||||||
|  | @ -227,7 +230,7 @@ pub fn Request(comptime action: anytype) type { | ||||||
|             // look at the return type |             // look at the return type | ||||||
|             var isJson: bool = undefined; |             var isJson: bool = undefined; | ||||||
|             for (response.headers) |h| { |             for (response.headers) |h| { | ||||||
|                 if (std.mem.eql(u8, "Content-Type", h.name)) { |                 if (std.ascii.eqlIgnoreCase("Content-Type", h.name)) { | ||||||
|                     if (std.mem.startsWith(u8, h.value, "application/json")) { |                     if (std.mem.startsWith(u8, h.value, "application/json")) { | ||||||
|                         isJson = true; |                         isJson = true; | ||||||
|                     } else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.0")) { |                     } else if (std.mem.startsWith(u8, h.value, "application/x-amz-json-1.0")) { | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								src/aws_authentication.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								src/aws_authentication.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,6 @@ | ||||||
|  | pub const Credentials = struct { | ||||||
|  |     access_key: []const u8, | ||||||
|  |     secret_key: []const u8, | ||||||
|  |     session_token: ?[]const u8, | ||||||
|  |     // uint64_t expiration_timepoint_seconds); | ||||||
|  | }; | ||||||
							
								
								
									
										23
									
								
								src/aws_credentials.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								src/aws_credentials.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,23 @@ | ||||||
|  | //! Implements the standard credential chain: | ||||||
|  | //! 1. Environment variables | ||||||
|  | //! 2. Web identity token from STS | ||||||
|  | //! 3. Credentials/config files | ||||||
|  | //! 4. ECS Container credentials, using AWS_CONTAINER_CREDENTIALS_RELATIVE_URI | ||||||
|  | //! 5. EC2 instance profile credentials | ||||||
|  | const std = @import("std"); | ||||||
|  | const auth = @import("aws_authentication.zig"); | ||||||
|  | 
 | ||||||
|  | pub fn getCredentials(allocator: std.mem.Allocator) !auth.Credentials { | ||||||
|  |     _ = allocator; | ||||||
|  |     if (getEnvironmentCredentials()) |cred| return cred; | ||||||
|  |     // TODO: 2-5 | ||||||
|  |     return error.NotImplemented; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn getEnvironmentCredentials() ?auth.Credentials { | ||||||
|  |     return auth.Credentials{ | ||||||
|  |         .access_key = std.os.getenv("AWS_ACCESS_KEY_ID") orelse return null, | ||||||
|  |         .secret_key = std.os.getenv("AWS_SECRET_ACCESS_KEY") orelse return null, | ||||||
|  |         .session_token = std.os.getenv("AWS_SESSION_TOKEN"), | ||||||
|  |     }; | ||||||
|  | } | ||||||
							
								
								
									
										296
									
								
								src/aws_http.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										296
									
								
								src/aws_http.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,296 @@ | ||||||
|  | //! This module provides a low level http interface for working with AWS | ||||||
|  | //! It also provides an option to operate outside the AWS ecosystem through | ||||||
|  | //! the makeRequest call with a null signingOptions. | ||||||
|  | //! | ||||||
|  | //! Typical usage: | ||||||
|  | //! const client = awshttp.AwsHttp.init(allocator); | ||||||
|  | //! defer client.deinit() | ||||||
|  | //! const result = client.callApi (or client.makeRequest) | ||||||
|  | //! defer result.deinit(); | ||||||
|  | const std = @import("std"); | ||||||
|  | const base = @import("aws_http_base.zig"); | ||||||
|  | const signing = @import("aws_signing.zig"); | ||||||
|  | const credentials = @import("aws_credentials.zig"); | ||||||
|  | const zfetch = @import("zfetch"); | ||||||
|  | const tls = @import("iguanaTLS"); | ||||||
|  | 
 | ||||||
|  | const CN_NORTH_1_HASH = std.hash_map.hashString("cn-north-1"); | ||||||
|  | const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1"); | ||||||
|  | const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1"); | ||||||
|  | const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1"); | ||||||
|  | 
 | ||||||
|  | const log = std.log.scoped(.awshttp); | ||||||
|  | 
 | ||||||
|  | const amazon_root_ca_1 = @embedFile("../Amazon_Root_CA_1.pem"); | ||||||
|  | 
 | ||||||
|  | pub const default_root_ca = amazon_root_ca_1; | ||||||
|  | 
 | ||||||
|  | pub const AwsError = error{ | ||||||
|  |     AddHeaderError, | ||||||
|  |     AlpnError, | ||||||
|  |     CredentialsError, | ||||||
|  |     HttpClientConnectError, | ||||||
|  |     HttpRequestError, | ||||||
|  |     SignableError, | ||||||
|  |     SigningInitiationError, | ||||||
|  |     TlsError, | ||||||
|  |     RequestCreateError, | ||||||
|  |     SetupConnectionError, | ||||||
|  |     StatusCodeError, | ||||||
|  |     SetRequestMethodError, | ||||||
|  |     SetRequestPathError, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub const Options = struct { | ||||||
|  |     region: []const u8 = "aws-global", | ||||||
|  |     dualstack: bool = false, | ||||||
|  |     sigv4_service_name: ?[]const u8 = null, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub const Header = base.Header; | ||||||
|  | pub const HttpRequest = base.Request; | ||||||
|  | pub const HttpResult = base.Result; | ||||||
|  | 
 | ||||||
|  | const EndPoint = struct { | ||||||
|  |     uri: []const u8, | ||||||
|  |     host: []const u8, | ||||||
|  |     scheme: []const u8, | ||||||
|  |     port: u16, | ||||||
|  |     allocator: std.mem.Allocator, | ||||||
|  | 
 | ||||||
|  |     fn deinit(self: EndPoint) void { | ||||||
|  |         self.allocator.free(self.uri); | ||||||
|  |     } | ||||||
|  | }; | ||||||
|  | pub const AwsHttp = struct { | ||||||
|  |     allocator: std.mem.Allocator, | ||||||
|  |     trust_chain: ?tls.x509.CertificateChain, | ||||||
|  | 
 | ||||||
|  |     const Self = @This(); | ||||||
|  | 
 | ||||||
|  |     /// Recommend usage is init(allocator, awshttp.default_root_ca) | ||||||
|  |     /// Passing null for root_pem will result in no TLS verification | ||||||
|  |     pub fn init(allocator: std.mem.Allocator, root_pem: ?[]const u8) !Self { | ||||||
|  |         var trust_chain: ?tls.x509.CertificateChain = null; | ||||||
|  |         if (root_pem) |p| { | ||||||
|  |             var fbs = std.io.fixedBufferStream(p); | ||||||
|  |             trust_chain = try tls.x509.CertificateChain.from_pem(allocator, fbs.reader()); | ||||||
|  |         } | ||||||
|  |         return Self{ | ||||||
|  |             .allocator = allocator, | ||||||
|  |             .trust_chain = trust_chain, | ||||||
|  |             // .credentialsProvider = // creds provider could be useful | ||||||
|  |         }; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     pub fn deinit(self: *AwsHttp) void { | ||||||
|  |         if (self.trust_chain) |c| c.deinit(); | ||||||
|  |         _ = self; | ||||||
|  |         log.debug("Deinit complete", .{}); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// callApi allows the calling of AWS APIs through a higher-level interface. | ||||||
|  |     /// It will calculate the appropriate endpoint and action parameters for the | ||||||
|  |     /// service called, and will set up the signing options. The return | ||||||
|  |     /// value is simply a raw HttpResult | ||||||
|  |     pub fn callApi(self: Self, service: []const u8, request: HttpRequest, options: Options) !HttpResult { | ||||||
|  |         const endpoint = try regionSubDomain(self.allocator, service, options.region, options.dualstack); | ||||||
|  |         defer endpoint.deinit(); | ||||||
|  |         log.debug("Calling endpoint {s}", .{endpoint.uri}); | ||||||
|  |         const creds = try credentials.getCredentials(self.allocator); | ||||||
|  |         // defer allocator.free(), except sometimes we don't need freeing... | ||||||
|  |         const signing_config: signing.Config = .{ | ||||||
|  |             .region = options.region, | ||||||
|  |             .service = options.sigv4_service_name orelse service, | ||||||
|  |             .credentials = creds, | ||||||
|  |         }; | ||||||
|  |         return try self.makeRequest(endpoint, request, signing_config); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     /// makeRequest is a low level http/https function that can be used inside | ||||||
|  |     /// or outside the context of AWS services. To use it outside AWS, simply | ||||||
|  |     /// pass a null value in for signing_options. | ||||||
|  |     /// | ||||||
|  |     /// Otherwise, it will simply take a URL endpoint (without path information), | ||||||
|  |     /// HTTP method (e.g. GET, POST, etc.), and request body. | ||||||
|  |     /// | ||||||
|  |     /// At the moment this does not allow changing headers, but addtional | ||||||
|  |     /// ones are possible. This is likely to change. Current headers are: | ||||||
|  |     /// | ||||||
|  |     /// Accept: application/json | ||||||
|  |     /// User-Agent: zig-aws 1.0, Powered by the AWS Common Runtime. | ||||||
|  |     /// Content-Type: application/x-www-form-urlencoded | ||||||
|  |     /// Content-Length: (length of body) | ||||||
|  |     /// | ||||||
|  |     /// Return value is an HttpResult, which will need the caller to deinit(). | ||||||
|  |     pub fn makeRequest(self: Self, endpoint: EndPoint, request: HttpRequest, signing_config: ?signing.Config) !HttpResult { | ||||||
|  |         var request_cp = request; | ||||||
|  | 
 | ||||||
|  |         log.debug("Path: {s}", .{request_cp.path}); | ||||||
|  |         log.debug("Query: {s}", .{request_cp.query}); | ||||||
|  |         log.debug("Method: {s}", .{request_cp.method}); | ||||||
|  |         log.debug("body length: {d}", .{request_cp.body.len}); | ||||||
|  |         log.debug("Body\n====\n{s}\n====", .{request_cp.body}); | ||||||
|  | 
 | ||||||
|  |         var request_headers = std.ArrayList(base.Header).init(self.allocator); | ||||||
|  |         defer request_headers.deinit(); | ||||||
|  | 
 | ||||||
|  |         const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers); | ||||||
|  |         defer if (len) |l| self.allocator.free(l); | ||||||
|  |         request_cp.headers = request_headers.items; | ||||||
|  | 
 | ||||||
|  |         if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, request_cp, opts); | ||||||
|  |         defer { | ||||||
|  |             if (signing_config) |opts| { | ||||||
|  |                 signing.freeSignedRequest(self.allocator, &request_cp, opts); | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         try zfetch.init(); // This only does anything on Windows. Not sure how performant it is to do this on every request | ||||||
|  |         defer zfetch.deinit(); | ||||||
|  |         var headers = zfetch.Headers.init(self.allocator); | ||||||
|  |         defer headers.deinit(); | ||||||
|  |         for (request_cp.headers) |header| | ||||||
|  |             try headers.appendValue(header.name, header.value); | ||||||
|  |         log.debug("All Request Headers (zfetch):", .{}); | ||||||
|  |         for (headers.list.items) |h| { | ||||||
|  |             log.debug("\t{s}: {s}", .{ h.name, h.value }); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request.path, request.query }); | ||||||
|  |         defer self.allocator.free(url); | ||||||
|  |         log.debug("Request url: {s}", .{url}); | ||||||
|  |         var req = try zfetch.Request.init(self.allocator, url, self.trust_chain); | ||||||
|  |         defer req.deinit(); | ||||||
|  | 
 | ||||||
|  |         const method = std.meta.stringToEnum(zfetch.Method, request_cp.method).?; | ||||||
|  |         try req.do(method, headers, if (request_cp.body.len == 0) null else request_cp.body); | ||||||
|  | 
 | ||||||
|  |         // TODO: Timeout - is this now above us? | ||||||
|  |         log.debug("request_complete. Response code {d}: {s}", .{ req.status.code, req.status.reason }); | ||||||
|  |         log.debug("Response headers:", .{}); | ||||||
|  |         var resp_headers = try std.ArrayList(Header).initCapacity(self.allocator, req.headers.list.items.len); | ||||||
|  |         defer resp_headers.deinit(); | ||||||
|  |         var content_length: usize = 0; | ||||||
|  |         for (req.headers.list.items) |h| { | ||||||
|  |             log.debug("    {s}: {s}", .{ h.name, h.value }); | ||||||
|  |             resp_headers.appendAssumeCapacity(.{ | ||||||
|  |                 .name = try (self.allocator.dupe(u8, h.name)), | ||||||
|  |                 .value = try (self.allocator.dupe(u8, h.value)), | ||||||
|  |             }); | ||||||
|  |             if (content_length == 0 and std.ascii.eqlIgnoreCase("content-length", h.name)) | ||||||
|  |                 content_length = std.fmt.parseInt(usize, h.value, 10) catch 0; | ||||||
|  |         } | ||||||
|  |         const reader = req.reader(); | ||||||
|  |         var buf: [65535]u8 = undefined; | ||||||
|  |         var resp_payload = try std.ArrayList(u8).initCapacity(self.allocator, content_length); | ||||||
|  |         defer resp_payload.deinit(); | ||||||
|  | 
 | ||||||
|  |         while (true) { | ||||||
|  |             const read = try reader.read(&buf); | ||||||
|  |             try resp_payload.appendSlice(buf[0..read]); | ||||||
|  |             if (read == 0) break; | ||||||
|  |         } | ||||||
|  |         log.debug("raw response body:\n{s}", .{resp_payload.items}); | ||||||
|  | 
 | ||||||
|  |         const rc = HttpResult{ | ||||||
|  |             .response_code = req.status.code, | ||||||
|  |             .body = resp_payload.toOwnedSlice(), | ||||||
|  |             .headers = resp_headers.toOwnedSlice(), | ||||||
|  |             .allocator = self.allocator, | ||||||
|  |         }; | ||||||
|  |         return rc; | ||||||
|  |     } | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | fn addHeaders(allocator: std.mem.Allocator, headers: *std.ArrayList(base.Header), host: []const u8, body: []const u8, content_type: []const u8, additional_headers: []Header) !?[]const u8 { | ||||||
|  |     try headers.append(.{ .name = "Accept", .value = "application/json" }); | ||||||
|  |     try headers.append(.{ .name = "Host", .value = host }); | ||||||
|  |     try headers.append(.{ .name = "User-Agent", .value = "zig-aws 1.0, Powered by the AWS Common Runtime." }); | ||||||
|  |     try headers.append(.{ .name = "Content-Type", .value = content_type }); | ||||||
|  |     try headers.appendSlice(additional_headers); | ||||||
|  |     if (body.len > 0) { | ||||||
|  |         const len = try std.fmt.allocPrint(allocator, "{d}", .{body.len}); | ||||||
|  |         try headers.append(.{ .name = "Content-Length", .value = len }); | ||||||
|  |         return len; | ||||||
|  |     } | ||||||
|  |     return null; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn regionSubDomain(allocator: std.mem.Allocator, service: []const u8, region: []const u8, useDualStack: bool) !EndPoint { | ||||||
|  |     const environment_override = std.os.getenv("AWS_ENDPOINT_URL"); | ||||||
|  |     if (environment_override) |override| { | ||||||
|  |         const uri = try allocator.dupeZ(u8, override); | ||||||
|  |         return endPointFromUri(allocator, uri); | ||||||
|  |     } | ||||||
|  |     // Fallback to us-east-1 if global endpoint does not exist. | ||||||
|  |     const realregion = if (std.mem.eql(u8, region, "aws-global")) "us-east-1" else region; | ||||||
|  |     const dualstack = if (useDualStack) ".dualstack" else ""; | ||||||
|  | 
 | ||||||
|  |     const domain = switch (std.hash_map.hashString(region)) { | ||||||
|  |         US_ISO_EAST_1_HASH => "c2s.ic.gov", | ||||||
|  |         CN_NORTH_1_HASH, CN_NORTHWEST_1_HASH => "amazonaws.com.cn", | ||||||
|  |         US_ISOB_EAST_1_HASH => "sc2s.sgov.gov", | ||||||
|  |         else => "amazonaws.com", | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     const uri = try std.fmt.allocPrintZ(allocator, "https://{s}{s}.{s}.{s}", .{ service, dualstack, realregion, domain }); | ||||||
|  |     const host = uri["https://".len..]; | ||||||
|  |     log.debug("host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 }); | ||||||
|  |     return EndPoint{ | ||||||
|  |         .uri = uri, | ||||||
|  |         .host = host, | ||||||
|  |         .scheme = "https", | ||||||
|  |         .port = 443, | ||||||
|  |         .allocator = allocator, | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// creates an endpoint from a uri string. | ||||||
|  | /// | ||||||
|  | /// allocator: Will be used only to construct the EndPoint struct | ||||||
|  | /// uri: string constructed in such a way that deallocation is needed | ||||||
|  | fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint { | ||||||
|  |     var scheme: []const u8 = ""; | ||||||
|  |     var host: []const u8 = ""; | ||||||
|  |     var port: u16 = 443; | ||||||
|  |     var host_start: usize = 0; | ||||||
|  |     var host_end: usize = 0; | ||||||
|  |     for (uri) |ch, i| { | ||||||
|  |         switch (ch) { | ||||||
|  |             ':' => { | ||||||
|  |                 if (!std.mem.eql(u8, scheme, "")) { | ||||||
|  |                     // here to end is port - this is likely a bug if ipv6 address used | ||||||
|  |                     const rest_of_uri = uri[i + 1 ..]; | ||||||
|  |                     port = try std.fmt.parseUnsigned(u16, rest_of_uri, 10); | ||||||
|  |                     host_end = i; | ||||||
|  |                 } | ||||||
|  |             }, | ||||||
|  |             '/' => { | ||||||
|  |                 if (host_start == 0) { | ||||||
|  |                     host_start = i + 2; | ||||||
|  |                     scheme = uri[0 .. i - 1]; | ||||||
|  |                     if (std.mem.eql(u8, scheme, "http")) { | ||||||
|  |                         port = 80; | ||||||
|  |                     } else { | ||||||
|  |                         port = 443; | ||||||
|  |                     } | ||||||
|  |                 } | ||||||
|  |             }, | ||||||
|  |             else => continue, | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     if (host_end == 0) { | ||||||
|  |         host_end = uri.len; | ||||||
|  |     } | ||||||
|  |     host = uri[host_start..host_end]; | ||||||
|  | 
 | ||||||
|  |     log.debug("host: {s}, scheme: {s}, port: {}", .{ host, scheme, port }); | ||||||
|  |     return EndPoint{ | ||||||
|  |         .uri = uri, | ||||||
|  |         .host = host, | ||||||
|  |         .scheme = scheme, | ||||||
|  |         .allocator = allocator, | ||||||
|  |         .port = port, | ||||||
|  |     }; | ||||||
|  | } | ||||||
							
								
								
									
										33
									
								
								src/aws_http_base.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								src/aws_http_base.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,33 @@ | ||||||
|  | //! This module provides base data structures for aws http requests | ||||||
|  | const std = @import("std"); | ||||||
|  | const log = std.log.scoped(.aws_base); | ||||||
|  | pub const Request = struct { | ||||||
|  |     path: []const u8 = "/", | ||||||
|  |     query: []const u8 = "", | ||||||
|  |     body: []const u8 = "", | ||||||
|  |     method: []const u8 = "POST", | ||||||
|  |     content_type: []const u8 = "application/json", // Can we get away with this? | ||||||
|  |     headers: []Header = &[_]Header{}, | ||||||
|  | }; | ||||||
|  | pub const Result = struct { | ||||||
|  |     response_code: u16, // actually 3 digits can fit in u10 | ||||||
|  |     body: []const u8, | ||||||
|  |     headers: []Header, | ||||||
|  |     allocator: std.mem.Allocator, | ||||||
|  | 
 | ||||||
|  |     pub fn deinit(self: Result) void { | ||||||
|  |         self.allocator.free(self.body); | ||||||
|  |         for (self.headers) |h| { | ||||||
|  |             self.allocator.free(h.name); | ||||||
|  |             self.allocator.free(h.value); | ||||||
|  |         } | ||||||
|  |         self.allocator.free(self.headers); | ||||||
|  |         log.debug("http result deinit complete", .{}); | ||||||
|  |         return; | ||||||
|  |     } | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub const Header = struct { | ||||||
|  |     name: []const u8, | ||||||
|  |     value: []const u8, | ||||||
|  | }; | ||||||
							
								
								
									
										833
									
								
								src/aws_signing.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										833
									
								
								src/aws_signing.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,833 @@ | ||||||
|  | const std = @import("std"); | ||||||
|  | const base = @import("aws_http_base.zig"); | ||||||
|  | const auth = @import("aws_authentication.zig"); | ||||||
|  | const date = @import("date.zig"); | ||||||
|  | 
 | ||||||
|  | const log = std.log.scoped(.aws_signing); | ||||||
|  | 
 | ||||||
|  | // see https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L186-L207 | ||||||
|  | pub const ConfigFlags = packed struct { | ||||||
|  |     // We assume the uri will be encoded once in preparation for transmission.  Certain services | ||||||
|  |     // do not decode before checking signature, requiring us to actually double-encode the uri in the canonical | ||||||
|  |     // request in order to pass a signature check. | ||||||
|  | 
 | ||||||
|  |     use_double_uri_encode: bool = true, | ||||||
|  | 
 | ||||||
|  |     // Controls whether or not the uri paths should be normalized when building the canonical request | ||||||
|  |     should_normalize_uri_path: bool = true, | ||||||
|  | 
 | ||||||
|  |     // Controls whether "X-Amz-Security-Token" is omitted from the canonical request. | ||||||
|  |     // "X-Amz-Security-Token" is added during signing, as a header or | ||||||
|  |     // query param, when credentials have a session token. | ||||||
|  |     // If false (the default), this parameter is included in the canonical request. | ||||||
|  |     // If true, this parameter is still added, but omitted from the canonical request. | ||||||
|  |     omit_session_token: bool = true, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub const Config = struct { | ||||||
|  |     // These two should be all you need to set most of the time | ||||||
|  |     service: []const u8, | ||||||
|  |     credentials: auth.Credentials, | ||||||
|  | 
 | ||||||
|  |     region: []const u8 = "aws-global", | ||||||
|  |     // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L38 | ||||||
|  |     algorithm: enum { v4, v4a } = .v4, | ||||||
|  |     // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L24 | ||||||
|  |     // config_type: ?? // CRT only has one value. We'll ignore for now | ||||||
|  | 
 | ||||||
|  |     // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L49 | ||||||
|  |     signature_type: enum { | ||||||
|  |         headers, // we only support this | ||||||
|  |         query_params, | ||||||
|  |         request_chunk, | ||||||
|  |         request_event, // not implemented by CRT | ||||||
|  |         canonical_request_headers, | ||||||
|  |         canonical_request_query_params, | ||||||
|  |     } = .headers, | ||||||
|  | 
 | ||||||
|  |     /// Used for testing. If null, will use current time | ||||||
|  |     signing_time: ?i64 = null, | ||||||
|  | 
 | ||||||
|  |     // In the CRT, should_sign_header is a function to allow header filtering. | ||||||
|  |     // The _ud would be a anyopaque user defined data for the function to use | ||||||
|  |     //     .should_sign_header = null, | ||||||
|  |     //     .should_sign_header_ud = null, | ||||||
|  | 
 | ||||||
|  |     // In the CRT, this is only used if the body has been precalculated. We don't have | ||||||
|  |     // this use case, and we'll ignore | ||||||
|  |     //     .signed_body_value = c.aws_byte_cursor_from_c_str(""), | ||||||
|  |     signed_body_header: SignatureType = .sha256, // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L131 | ||||||
|  | 
 | ||||||
|  |     // This is more complex in the CRT. We'll just take the creds. Someone | ||||||
|  |     // else can use a provider and get them in advance | ||||||
|  |     // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/include/aws/auth/signing_config.h#L225-L251 | ||||||
|  |     // If non-zero and the signing transform is query param, then signing will add X-Amz-Expires to the query | ||||||
|  |     // string, equal to the value specified here.  If this value is zero or if header signing is being used then | ||||||
|  |     // this parameter has no effect. | ||||||
|  |     expiration_in_seconds: u64 = 0, | ||||||
|  | 
 | ||||||
|  |     flags: ConfigFlags = .{}, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | pub const SignatureType = enum { sha256, none }; | ||||||
|  | pub const SigningError = error{ | ||||||
|  |     NotImplemented, | ||||||
|  |     S3NotImplemented, | ||||||
|  | 
 | ||||||
|  |     // There are a number of forbidden headers that the signing process | ||||||
|  |     // basically "owns". For clarity, and because zig does not have a way | ||||||
|  |     // to provide an error message | ||||||
|  |     // | ||||||
|  |     /// Used if the request headers already includes X-Amz-Date | ||||||
|  |     /// If a specific date is required, use a specific signing_time in config | ||||||
|  |     XAmzDateHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes Authorization | ||||||
|  |     AuthorizationHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-content-sha256 | ||||||
|  |     XAmzContentSha256HeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-signature | ||||||
|  |     XAmzSignatureHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-algorithm | ||||||
|  |     XAmzAlgorithmHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-credential | ||||||
|  |     XAmzCredentialHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-signedheaders | ||||||
|  |     XAmzSignedHeadersHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-security-token | ||||||
|  |     XAmzSecurityTokenHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-expires | ||||||
|  |     XAmzExpiresHeaderInRequest, | ||||||
|  |     /// Used if the request headers already includes x-amz-region-set | ||||||
|  |     XAmzRegionSetHeaderInRequest, | ||||||
|  | } || std.fmt.AllocPrintError; | ||||||
|  | 
 | ||||||
|  | const forbidden_headers = .{ | ||||||
|  |     .{ .name = "x-amz-content-sha256", .err = SigningError.XAmzContentSha256HeaderInRequest }, | ||||||
|  |     .{ .name = "Authorization", .err = SigningError.AuthorizationHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Signature", .err = SigningError.XAmzSignatureHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Algorithm", .err = SigningError.XAmzAlgorithmHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Credential", .err = SigningError.XAmzCredentialHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Date", .err = SigningError.XAmzDateHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-SignedHeaders", .err = SigningError.XAmzSignedHeadersHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Security-Token", .err = SigningError.XAmzSecurityTokenHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Expires", .err = SigningError.XAmzExpiresHeaderInRequest }, | ||||||
|  |     .{ .name = "X-Amz-Region-Set", .err = SigningError.XAmzRegionSetHeaderInRequest }, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | const skipped_headers = .{ | ||||||
|  |     "x-amzn-trace-id", | ||||||
|  |     "User-Agent", | ||||||
|  |     "connection", | ||||||
|  |     "sec-websocket-key", | ||||||
|  |     "sec-websocket-protocol", | ||||||
|  |     "sec-websocket-version", | ||||||
|  |     "upgrade", | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | /// Signs a request. Only header signing is currently supported. Note that | ||||||
|  | /// This adds two headers to the request, which will need to be freed by the | ||||||
|  | /// caller. Use freeSignedRequest with the same parameters to free | ||||||
|  | pub fn signRequest(allocator: std.mem.Allocator, request: base.Request, config: Config) SigningError!base.Request { | ||||||
|  |     try validateConfig(config); | ||||||
|  |     for (request.headers) |h| { | ||||||
|  |         inline for (forbidden_headers) |f| { | ||||||
|  |             if (std.ascii.eqlIgnoreCase(h.name, f.name)) | ||||||
|  |                 return f.err; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     var rc = request; | ||||||
|  | 
 | ||||||
|  |     const signing_time = config.signing_time orelse std.time.timestamp(); | ||||||
|  | 
 | ||||||
|  |     const signed_date = date.timestampToDateTime(signing_time); | ||||||
|  | 
 | ||||||
|  |     const signing_iso8601 = try std.fmt.allocPrint( | ||||||
|  |         allocator, | ||||||
|  |         "{:0>4}{:0>2}{:0>2}T{:0>2}{:0>2}{:0<2}Z", | ||||||
|  |         .{ | ||||||
|  |             signed_date.year, | ||||||
|  |             signed_date.month, | ||||||
|  |             signed_date.day, | ||||||
|  |             signed_date.hour, | ||||||
|  |             signed_date.minute, | ||||||
|  |             signed_date.second, | ||||||
|  |         }, | ||||||
|  |     ); | ||||||
|  |     errdefer freeSignedRequest(allocator, &rc, config); | ||||||
|  | 
 | ||||||
|  |     const newheaders = try allocator.alloc(base.Header, rc.headers.len + 2); | ||||||
|  |     errdefer allocator.free(newheaders); | ||||||
|  |     const oldheaders = rc.headers; | ||||||
|  |     errdefer freeSignedRequest(allocator, &rc, config); | ||||||
|  |     std.mem.copy(base.Header, newheaders, oldheaders); | ||||||
|  |     newheaders[newheaders.len - 2] = base.Header{ | ||||||
|  |         .name = "X-Amz-Date", | ||||||
|  |         .value = signing_iso8601, | ||||||
|  |     }; | ||||||
|  |     rc.headers = newheaders[0 .. newheaders.len - 1]; | ||||||
|  |     log.debug("Signing with access key: {s}", .{config.credentials.access_key}); | ||||||
|  |     const canonical_request = try createCanonicalRequest(allocator, rc, config); | ||||||
|  |     defer { | ||||||
|  |         allocator.free(canonical_request.arr); | ||||||
|  |         allocator.free(canonical_request.hash); | ||||||
|  |         allocator.free(canonical_request.headers.str); | ||||||
|  |         allocator.free(canonical_request.headers.signed_headers); | ||||||
|  |     } | ||||||
|  |     log.debug("Canonical request:\n{s}", .{canonical_request.arr}); | ||||||
|  |     log.debug("Canonical request hash: {s}", .{canonical_request.hash}); | ||||||
|  |     const scope = try std.fmt.allocPrint( | ||||||
|  |         allocator, | ||||||
|  |         "{:0>4}{:0>2}{:0>2}/{s}/{s}/aws4_request", | ||||||
|  |         .{ | ||||||
|  |             signed_date.year, | ||||||
|  |             signed_date.month, | ||||||
|  |             signed_date.day, | ||||||
|  |             config.region, | ||||||
|  |             config.service, | ||||||
|  |         }, | ||||||
|  |     ); | ||||||
|  |     defer allocator.free(scope); | ||||||
|  |     log.debug("Scope: {s}", .{scope}); | ||||||
|  | 
 | ||||||
|  |     //Algorithm + \n + | ||||||
|  |     //RequestDateTime + \n + | ||||||
|  |     //CredentialScope + \n + | ||||||
|  |     //HashedCanonicalRequest | ||||||
|  |     const string_to_sign_fmt = | ||||||
|  |         \\AWS4-HMAC-SHA256 | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |     ; | ||||||
|  |     const string_to_sign = try std.fmt.allocPrint( | ||||||
|  |         allocator, | ||||||
|  |         string_to_sign_fmt, | ||||||
|  |         .{ | ||||||
|  |             signing_iso8601, | ||||||
|  |             scope, | ||||||
|  |             canonical_request.hash, | ||||||
|  |         }, | ||||||
|  |     ); | ||||||
|  |     defer allocator.free(string_to_sign); | ||||||
|  |     log.debug("String to sign:\n{s}", .{string_to_sign}); | ||||||
|  | 
 | ||||||
|  |     const signing_key = try getSigningKey(allocator, scope[0..8], config); | ||||||
|  |     defer allocator.free(signing_key); | ||||||
|  | 
 | ||||||
|  |     const signature = try hmac(allocator, signing_key, string_to_sign); | ||||||
|  |     defer allocator.free(signature); | ||||||
|  |     newheaders[newheaders.len - 1] = base.Header{ | ||||||
|  |         .name = "Authorization", | ||||||
|  |         .value = try std.fmt.allocPrint( | ||||||
|  |             allocator, | ||||||
|  |             "AWS4-HMAC-SHA256 Credential={s}/{s}, SignedHeaders={s}, Signature={s}", | ||||||
|  |             .{ | ||||||
|  |                 config.credentials.access_key, | ||||||
|  |                 scope, | ||||||
|  |                 canonical_request.headers.signed_headers, | ||||||
|  |                 std.fmt.fmtSliceHexLower(signature), | ||||||
|  |             }, | ||||||
|  |         ), | ||||||
|  |     }; | ||||||
|  |     rc.headers = newheaders; | ||||||
|  |     return rc; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Frees allocated resources for the request, including the headers array | ||||||
|  | pub fn freeSignedRequest(allocator: std.mem.Allocator, request: *base.Request, config: Config) void { | ||||||
|  |     validateConfig(config) catch |e| { | ||||||
|  |         log.err("Signing validation failed during signature free: {}", .{e}); | ||||||
|  |         if (@errorReturnTrace()) |trace| { | ||||||
|  |             std.debug.dumpStackTrace(trace.*); | ||||||
|  |         } | ||||||
|  |         return; | ||||||
|  |     }; | ||||||
|  | 
 | ||||||
|  |     var remove_len: u2 = 0; | ||||||
|  |     for (request.headers) |h| { | ||||||
|  |         if (std.ascii.eqlIgnoreCase(h.name, "X-Amz-Date") or std.ascii.eqlIgnoreCase(h.name, "Authorization")) { | ||||||
|  |             allocator.free(h.value); | ||||||
|  |             remove_len += 1; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     if (remove_len > 0) | ||||||
|  |         request.headers = allocator.resize(request.headers, request.headers.len - remove_len).?; | ||||||
|  | 
 | ||||||
|  |     allocator.free(request.headers); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn getSigningKey(allocator: std.mem.Allocator, signing_date: []const u8, config: Config) ![]const u8 { | ||||||
|  |     // TODO: This is designed for lots of caching. We need to work that out | ||||||
|  |     // kSecret = your secret access key | ||||||
|  |     // kDate = HMAC("AWS4" + kSecret, Date) | ||||||
|  |     // kRegion = HMAC(kDate, Region) | ||||||
|  |     // kService = HMAC(kRegion, Service) | ||||||
|  |     // kSigning = HMAC(kService, "aws4_request") | ||||||
|  |     log.debug( | ||||||
|  |         \\signing key params: | ||||||
|  |         \\  key: (you wish) | ||||||
|  |         \\  date: {s} | ||||||
|  |         \\  region: {s} | ||||||
|  |         \\  service: {s} | ||||||
|  |     , .{ signing_date, config.region, config.service }); | ||||||
|  |     var secret = try std.fmt.allocPrint(allocator, "AWS4{s}", .{config.credentials.secret_key}); | ||||||
|  |     defer { | ||||||
|  |         for (secret) |_, i| secret[i] = 0; // zero our copy of secret | ||||||
|  |         allocator.free(secret); | ||||||
|  |     } | ||||||
|  |     // log.debug("secret: {s}", .{secret}); | ||||||
|  |     const k_date = try hmac(allocator, secret, signing_date); | ||||||
|  |     defer allocator.free(k_date); | ||||||
|  |     const k_region = try hmac(allocator, k_date, config.region); | ||||||
|  |     defer allocator.free(k_region); | ||||||
|  |     const k_service = try hmac(allocator, k_region, config.service); | ||||||
|  |     defer allocator.free(k_service); | ||||||
|  |     const k_signing = try hmac(allocator, k_service, "aws4_request"); | ||||||
|  |     return k_signing; | ||||||
|  | } | ||||||
|  | fn validateConfig(config: Config) SigningError!void { | ||||||
|  |     if (config.signature_type != .headers or | ||||||
|  |         config.signed_body_header != .sha256 or | ||||||
|  |         config.expiration_in_seconds != 0 or | ||||||
|  |         config.algorithm != .v4 or | ||||||
|  |         !config.flags.omit_session_token or | ||||||
|  |         !config.flags.should_normalize_uri_path or | ||||||
|  |         !config.flags.use_double_uri_encode) | ||||||
|  |         return SigningError.NotImplemented; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn hmac(allocator: std.mem.Allocator, key: []const u8, data: []const u8) ![]const u8 { | ||||||
|  |     var out: [std.crypto.auth.hmac.sha2.HmacSha256.mac_length]u8 = undefined; | ||||||
|  |     std.crypto.auth.hmac.sha2.HmacSha256.create(out[0..], data, key); | ||||||
|  |     return try allocator.dupe(u8, out[0..]); | ||||||
|  | } | ||||||
|  | const Hashed = struct { | ||||||
|  |     arr: []const u8, | ||||||
|  |     hash: []const u8, | ||||||
|  |     headers: CanonicalHeaders, | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | fn createCanonicalRequest(allocator: std.mem.Allocator, request: base.Request, config: Config) !Hashed { | ||||||
|  |     // CanonicalRequest = | ||||||
|  |     // HTTPRequestMethod + '\n' + | ||||||
|  |     // CanonicalURI + '\n' + | ||||||
|  |     // CanonicalQueryString + '\n' + | ||||||
|  |     // CanonicalHeaders + '\n' + | ||||||
|  |     // SignedHeaders + '\n' + | ||||||
|  |     // HexEncode(Hash(RequestPayload)) | ||||||
|  |     const fmt = | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |         \\{s} | ||||||
|  |     ; | ||||||
|  | 
 | ||||||
|  |     // TODO: This is all better as a writer - less allocations/copying | ||||||
|  |     const canonical_method = canonicalRequestMethod(request.method); | ||||||
|  |     const canonical_url = try canonicalUri(allocator, request.path, config.flags.use_double_uri_encode); | ||||||
|  |     defer allocator.free(canonical_url); | ||||||
|  |     log.debug("final uri: {s}", .{canonical_url}); | ||||||
|  |     const canonical_query = try canonicalQueryString(allocator, request.query); | ||||||
|  |     defer allocator.free(canonical_query); | ||||||
|  |     log.debug("canonical query: {s}", .{canonical_query}); | ||||||
|  |     const canonical_headers = try canonicalHeaders(allocator, request.headers); | ||||||
|  |     const payload_hash = try hash(allocator, request.body, config.signed_body_header); | ||||||
|  |     defer allocator.free(payload_hash); | ||||||
|  | 
 | ||||||
|  |     const canonical_request = try std.fmt.allocPrint(allocator, fmt, .{ | ||||||
|  |         canonical_method, | ||||||
|  |         canonical_url, | ||||||
|  |         canonical_query, | ||||||
|  |         canonical_headers.str, | ||||||
|  |         canonical_headers.signed_headers, | ||||||
|  |         payload_hash, | ||||||
|  |     }); | ||||||
|  |     errdefer allocator.free(canonical_request); | ||||||
|  |     log.debug("Canonical_request (just calculated):\n{s}", .{canonical_request}); | ||||||
|  |     const hashed = try hash(allocator, canonical_request, config.signed_body_header); | ||||||
|  |     return Hashed{ | ||||||
|  |         .arr = canonical_request, | ||||||
|  |         .hash = hashed, | ||||||
|  |         .headers = canonical_headers, | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn canonicalRequestMethod(method: []const u8) ![]const u8 { | ||||||
|  |     return method; // We assume it's good | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn canonicalUri(allocator: std.mem.Allocator, path: []const u8, double_encode: bool) ![]const u8 { | ||||||
|  |     // Add the canonical URI parameter, followed by a newline character. The | ||||||
|  |     // canonical URI is the URI-encoded version of the absolute path component | ||||||
|  |     // of the URI, which is everything in the URI from the HTTP host to the | ||||||
|  |     // question mark character ("?") that begins the query string parameters (if any). | ||||||
|  |     // | ||||||
|  |     // Normalize URI paths according to RFC 3986. Remove redundant and relative | ||||||
|  |     // path components. Each path segment must be URI-encoded twice | ||||||
|  |     // (except for Amazon S3 which only gets URI-encoded once). | ||||||
|  |     // | ||||||
|  |     // Note: In exception to this, you do not normalize URI paths for requests | ||||||
|  |     // to Amazon S3. For example, if you have a bucket with an object | ||||||
|  |     // named my-object//example//photo.user, use that path. Normalizing | ||||||
|  |     // the path to my-object/example/photo.user will cause the request to | ||||||
|  |     // fail. For more information, see Task 1: Create a Canonical Request in | ||||||
|  |     // the Amazon Simple Storage Service API Reference. | ||||||
|  |     // | ||||||
|  |     // If the absolute path is empty, use a forward slash (/) | ||||||
|  |     // | ||||||
|  |     // For now, we will "Remove redundant and relative path components". This | ||||||
|  |     // doesn't apply to S3 anyway, and we'll make it the callers's problem | ||||||
|  |     if (!double_encode) | ||||||
|  |         return SigningError.S3NotImplemented; | ||||||
|  |     if (path.len == 0 or path[0] == '?' or path[0] == '#') | ||||||
|  |         return try allocator.dupe(u8, "/"); | ||||||
|  |     log.debug("encoding path: {s}", .{path}); | ||||||
|  |     const encoded_once = try encodeUri(allocator, path); | ||||||
|  |     log.debug("encoded path (1): {s}", .{encoded_once}); | ||||||
|  |     if (!double_encode or std.mem.indexOf(u8, path, "%") != null) // TODO: Is the indexOf condition universally true? | ||||||
|  |         return encoded_once[0 .. std.mem.lastIndexOf(u8, encoded_once, "?") orelse encoded_once.len]; | ||||||
|  |     defer allocator.free(encoded_once); | ||||||
|  |     const encoded_twice = try encodeUri(allocator, encoded_once); | ||||||
|  |     log.debug("encoded path (2): {s}", .{encoded_twice}); | ||||||
|  |     return encoded_twice[0 .. std.mem.lastIndexOf(u8, encoded_twice, "?") orelse encoded_twice.len]; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn encodeParamPart(allocator: std.mem.Allocator, path: []const u8) ![]const u8 { | ||||||
|  |     const unreserved_marks = "-_.!~*'()"; | ||||||
|  |     var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len); | ||||||
|  |     defer encoded.deinit(); | ||||||
|  |     for (path) |c| { | ||||||
|  |         var should_encode = true; | ||||||
|  |         for (unreserved_marks) |r| | ||||||
|  |             if (r == c) { | ||||||
|  |                 should_encode = false; | ||||||
|  |                 break; | ||||||
|  |             }; | ||||||
|  |         if (should_encode and std.ascii.isAlNum(c)) | ||||||
|  |             should_encode = false; | ||||||
|  | 
 | ||||||
|  |         if (!should_encode) { | ||||||
|  |             try encoded.append(c); | ||||||
|  |             continue; | ||||||
|  |         } | ||||||
|  |         // Whatever remains, encode it | ||||||
|  |         try encoded.append('%'); | ||||||
|  |         const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})}); | ||||||
|  |         defer allocator.free(hex); | ||||||
|  |         try encoded.appendSlice(hex); | ||||||
|  |     } | ||||||
|  |     return encoded.toOwnedSlice(); | ||||||
|  | } | ||||||
|  | fn encodeUri(allocator: std.mem.Allocator, path: []const u8) ![]const u8 { | ||||||
|  |     const reserved_characters = ";,/?:@&=+$#"; | ||||||
|  |     const unreserved_marks = "-_.!~*'()"; | ||||||
|  |     var encoded = try std.ArrayList(u8).initCapacity(allocator, path.len); | ||||||
|  |     defer encoded.deinit(); | ||||||
|  |     for (path) |c| { | ||||||
|  |         var should_encode = true; | ||||||
|  |         for (reserved_characters) |r| | ||||||
|  |             if (r == c) { | ||||||
|  |                 should_encode = false; | ||||||
|  |                 break; | ||||||
|  |             }; | ||||||
|  |         if (should_encode) { | ||||||
|  |             for (unreserved_marks) |r| | ||||||
|  |                 if (r == c) { | ||||||
|  |                     should_encode = false; | ||||||
|  |                     break; | ||||||
|  |                 }; | ||||||
|  |         } | ||||||
|  |         if (should_encode and std.ascii.isAlNum(c)) | ||||||
|  |             should_encode = false; | ||||||
|  | 
 | ||||||
|  |         if (!should_encode) { | ||||||
|  |             try encoded.append(c); | ||||||
|  |             continue; | ||||||
|  |         } | ||||||
|  |         // Whatever remains, encode it | ||||||
|  |         try encoded.append('%'); | ||||||
|  |         const hex = try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexUpper(&[_]u8{c})}); | ||||||
|  |         defer allocator.free(hex); | ||||||
|  |         try encoded.appendSlice(hex); | ||||||
|  |     } | ||||||
|  |     return encoded.toOwnedSlice(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn canonicalQueryString(allocator: std.mem.Allocator, path: []const u8) ![]const u8 { | ||||||
|  |     //     To construct the canonical query string, complete the following steps: | ||||||
|  |     // | ||||||
|  |     //     Sort the parameter names by character code point in ascending order. | ||||||
|  |     //     Parameters with duplicate names should be sorted by value. For example, | ||||||
|  |     //     a parameter name that begins with the uppercase letter F precedes a | ||||||
|  |     //     parameter name that begins with a lowercase letter b. | ||||||
|  |     // | ||||||
|  |     //     URI-encode each parameter name and value according to the following rules: | ||||||
|  |     // | ||||||
|  |     //         Do not URI-encode any of the unreserved characters that RFC 3986 | ||||||
|  |     //         defines: A-Z, a-z, 0-9, hyphen ( - ), underscore ( _ ), period ( . ), and tilde ( ~ ). | ||||||
|  |     // | ||||||
|  |     //         Percent-encode all other characters with %XY, where X and Y are | ||||||
|  |     //         hexadecimal characters (0-9 and uppercase A-F). For example, the | ||||||
|  |     //         space character must be encoded as %20 (not using '+', as some | ||||||
|  |     //         encoding schemes do) and extended UTF-8 characters must be in the | ||||||
|  |     //         form %XY%ZA%BC. | ||||||
|  |     // | ||||||
|  |     //         Double-encode any equals ( = ) characters in parameter values. | ||||||
|  |     // | ||||||
|  |     //     Build the canonical query string by starting with the first parameter | ||||||
|  |     //     name in the sorted list. | ||||||
|  |     // | ||||||
|  |     //     For each parameter, append the URI-encoded parameter name, followed by | ||||||
|  |     //     the equals sign character (=), followed by the URI-encoded parameter | ||||||
|  |     //     value. Use an empty string for parameters that have no value. | ||||||
|  |     // | ||||||
|  |     //     Append the ampersand character (&) after each parameter value, except | ||||||
|  |     //     for the last value in the list. | ||||||
|  |     // | ||||||
|  |     // One option for the query API is to put all request parameters in the query | ||||||
|  |     // string. For example, you can do this for Amazon S3 to create a presigned | ||||||
|  |     // URL. In that case, the canonical query string must include not only | ||||||
|  |     // parameters for the request, but also the parameters used as part of the | ||||||
|  |     // signing process—the hashing algorithm, credential scope, date, and signed | ||||||
|  |     // headers parameters. | ||||||
|  |     // | ||||||
|  |     // The following example shows a query string that includes authentication | ||||||
|  |     // information. The example is formatted with line breaks for readability, but | ||||||
|  |     // the canonical query string must be one continuous line of text in your code. | ||||||
|  |     const first_question = std.mem.indexOf(u8, path, "?"); | ||||||
|  |     if (first_question == null) | ||||||
|  |         return try allocator.dupe(u8, ""); | ||||||
|  | 
 | ||||||
|  |     // We have a query string | ||||||
|  |     const query = path[first_question.? + 1 ..]; | ||||||
|  | 
 | ||||||
|  |     // Split this by component | ||||||
|  |     var portions = std.mem.split(u8, query, "&"); | ||||||
|  |     var sort_me = std.ArrayList([]const u8).init(allocator); | ||||||
|  |     defer sort_me.deinit(); | ||||||
|  |     while (portions.next()) |item| | ||||||
|  |         try sort_me.append(item); | ||||||
|  |     std.sort.sort([]const u8, sort_me.items, {}, lessThanBinary); | ||||||
|  | 
 | ||||||
|  |     var normalized = try std.ArrayList(u8).initCapacity(allocator, path.len); | ||||||
|  |     defer normalized.deinit(); | ||||||
|  |     var first = true; | ||||||
|  |     for (sort_me.items) |i| { | ||||||
|  |         if (!first) try normalized.append('&'); | ||||||
|  |         first = false; | ||||||
|  |         var first_equals = std.mem.indexOf(u8, i, "="); | ||||||
|  |         if (first_equals == null) { | ||||||
|  |             // Rare. This is "foo=" | ||||||
|  |             const normed_item = try encodeUri(allocator, i); | ||||||
|  |             defer allocator.free(normed_item); | ||||||
|  |             try normalized.appendSlice(i); // This should be encoded | ||||||
|  |             try normalized.append('='); | ||||||
|  |             continue; | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         // normal key=value stuff | ||||||
|  |         const key = try encodeParamPart(allocator, i[0..first_equals.?]); | ||||||
|  |         defer allocator.free(key); | ||||||
|  | 
 | ||||||
|  |         const value = try encodeParamPart(allocator, i[first_equals.? + 1 ..]); | ||||||
|  |         defer allocator.free(value); | ||||||
|  |         // Double-encode any = in the value. But not anything else? | ||||||
|  |         const weird_equals_in_value_thing = try replace(allocator, value, "%3D", "%253D"); | ||||||
|  |         defer allocator.free(weird_equals_in_value_thing); | ||||||
|  |         try normalized.appendSlice(key); | ||||||
|  |         try normalized.append('='); | ||||||
|  |         try normalized.appendSlice(weird_equals_in_value_thing); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     return normalized.toOwnedSlice(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn replace(allocator: std.mem.Allocator, haystack: []const u8, needle: []const u8, replacement_value: []const u8) ![]const u8 { | ||||||
|  |     var buffer = try allocator.alloc(u8, std.mem.replacementSize(u8, haystack, needle, replacement_value)); | ||||||
|  |     _ = std.mem.replace(u8, haystack, needle, replacement_value, buffer); | ||||||
|  |     return buffer; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn lessThanBinary(context: void, lhs: []const u8, rhs: []const u8) bool { | ||||||
|  |     _ = context; | ||||||
|  |     return std.mem.lessThan(u8, lhs, rhs); | ||||||
|  | } | ||||||
|  | const CanonicalHeaders = struct { | ||||||
|  |     str: []const u8, | ||||||
|  |     signed_headers: []const u8, | ||||||
|  | }; | ||||||
|  | fn canonicalHeaders(allocator: std.mem.Allocator, headers: []base.Header) !CanonicalHeaders { | ||||||
|  |     // | ||||||
|  |     // Doc example. Original: | ||||||
|  |     // | ||||||
|  |     // Host:iam.amazonaws.com\n | ||||||
|  |     // Content-Type:application/x-www-form-urlencoded; charset=utf-8\n | ||||||
|  |     // My-header1:    a   b   c  \n | ||||||
|  |     // X-Amz-Date:20150830T123600Z\n | ||||||
|  |     // My-Header2:    "a   b   c"  \n | ||||||
|  |     // | ||||||
|  |     // Canonical form: | ||||||
|  |     // content-type:application/x-www-form-urlencoded; charset=utf-8\n | ||||||
|  |     // host:iam.amazonaws.com\n | ||||||
|  |     // my-header1:a b c\n | ||||||
|  |     // my-header2:"a b c"\n | ||||||
|  |     // x-amz-date:20150830T123600Z\n | ||||||
|  |     var dest = try std.ArrayList(base.Header).initCapacity(allocator, headers.len); | ||||||
|  |     defer { | ||||||
|  |         for (dest.items) |h| { | ||||||
|  |             allocator.free(h.name); | ||||||
|  |             allocator.free(h.value); | ||||||
|  |         } | ||||||
|  |         dest.deinit(); | ||||||
|  |     } | ||||||
|  |     var total_len: usize = 0; | ||||||
|  |     var total_name_len: usize = 0; | ||||||
|  |     for (headers) |h| { | ||||||
|  |         var skip = false; | ||||||
|  |         inline for (skipped_headers) |s| { | ||||||
|  |             if (std.ascii.eqlIgnoreCase(s, h.name)) { | ||||||
|  |                 skip = true; | ||||||
|  |                 break; | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         if (skip) continue; | ||||||
|  | 
 | ||||||
|  |         total_len += (h.name.len + h.value.len + 2); | ||||||
|  |         total_name_len += (h.name.len + 1); | ||||||
|  |         const value = try canonicalHeaderValue(allocator, h.value); | ||||||
|  |         defer allocator.free(value); | ||||||
|  |         const n = try std.ascii.allocLowerString(allocator, h.name); | ||||||
|  |         const v = try std.fmt.allocPrint(allocator, "{s}", .{value}); | ||||||
|  |         try dest.append(.{ .name = n, .value = v }); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     std.sort.sort(base.Header, dest.items, {}, lessThan); | ||||||
|  | 
 | ||||||
|  |     var dest_str = try std.ArrayList(u8).initCapacity(allocator, total_len); | ||||||
|  |     defer dest_str.deinit(); | ||||||
|  |     var signed_headers = try std.ArrayList(u8).initCapacity(allocator, total_name_len); | ||||||
|  |     defer signed_headers.deinit(); | ||||||
|  |     var first = true; | ||||||
|  |     for (dest.items) |h| { | ||||||
|  |         dest_str.appendSliceAssumeCapacity(h.name); | ||||||
|  |         dest_str.appendAssumeCapacity(':'); | ||||||
|  |         dest_str.appendSliceAssumeCapacity(h.value); | ||||||
|  |         dest_str.appendAssumeCapacity('\n'); | ||||||
|  | 
 | ||||||
|  |         if (!first) signed_headers.appendAssumeCapacity(';'); | ||||||
|  |         first = false; | ||||||
|  |         signed_headers.appendSliceAssumeCapacity(h.name); | ||||||
|  |     } | ||||||
|  |     return CanonicalHeaders{ | ||||||
|  |         .str = dest_str.toOwnedSlice(), | ||||||
|  |         .signed_headers = signed_headers.toOwnedSlice(), | ||||||
|  |     }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]const u8 { | ||||||
|  |     var started = false; | ||||||
|  |     var in_quote = false; | ||||||
|  |     var start: usize = 0; | ||||||
|  |     const rc = try allocator.alloc(u8, value.len); | ||||||
|  |     var rc_inx: usize = 0; | ||||||
|  |     for (value) |c, i| { | ||||||
|  |         if (!started and !std.ascii.isSpace(c)) { | ||||||
|  |             started = true; | ||||||
|  |             start = i; | ||||||
|  |         } | ||||||
|  |         if (started) { | ||||||
|  |             if (!in_quote and i > 0 and std.ascii.isSpace(c) and std.ascii.isSpace(value[i - 1])) | ||||||
|  |                 continue; | ||||||
|  |             // if (c == '"') in_quote = !in_quote; | ||||||
|  |             rc[rc_inx] = c; | ||||||
|  |             rc_inx += 1; | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     // Trim end | ||||||
|  |     while (std.ascii.isSpace(rc[rc_inx - 1])) | ||||||
|  |         rc_inx -= 1; | ||||||
|  |     return rc[0..rc_inx]; | ||||||
|  | } | ||||||
|  | fn lessThan(context: void, lhs: base.Header, rhs: base.Header) bool { | ||||||
|  |     _ = context; | ||||||
|  |     return std.ascii.lessThanIgnoreCase(lhs.name, rhs.name); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn hash(allocator: std.mem.Allocator, payload: []const u8, sig_type: SignatureType) ![]const u8 { | ||||||
|  |     if (sig_type != .sha256) | ||||||
|  |         return error.NotImplemented; | ||||||
|  |     const to_hash = blk: { | ||||||
|  |         if (payload.len > 0) { | ||||||
|  |             break :blk payload; | ||||||
|  |         } | ||||||
|  |         break :blk ""; | ||||||
|  |     }; | ||||||
|  |     var out: [std.crypto.hash.sha2.Sha256.digest_length]u8 = undefined; | ||||||
|  |     std.crypto.hash.sha2.Sha256.hash(to_hash, &out, .{}); | ||||||
|  |     return try std.fmt.allocPrint(allocator, "{s}", .{std.fmt.fmtSliceHexLower(&out)}); | ||||||
|  | } | ||||||
|  | // SignedHeaders + '\n' + | ||||||
|  | // HexEncode(Hash(RequestPayload)) | ||||||
|  | test "canonical method" { | ||||||
|  |     const actual = try canonicalRequestMethod("GET"); | ||||||
|  |     try std.testing.expectEqualStrings("GET", actual); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | test "canonical uri" { | ||||||
|  |     const allocator = std.testing.allocator; | ||||||
|  |     const path = "/documents and settings/?foo=bar"; | ||||||
|  |     const expected = "/documents%2520and%2520settings/"; | ||||||
|  |     const actual = try canonicalUri(allocator, path, true); | ||||||
|  |     defer allocator.free(actual); | ||||||
|  |     try std.testing.expectEqualStrings(expected, actual); | ||||||
|  | 
 | ||||||
|  |     const slash = try canonicalUri(allocator, "", true); | ||||||
|  |     defer allocator.free(slash); | ||||||
|  |     try std.testing.expectEqualStrings("/", slash); | ||||||
|  | } | ||||||
|  | test "canonical query" { | ||||||
|  |     const allocator = std.testing.allocator; | ||||||
|  |     const path = "blahblahblah?foo=bar&zed=dead&qux&equals=x=y&Action=ListUsers&Version=2010-05-08"; | ||||||
|  | 
 | ||||||
|  |     // { | ||||||
|  |     //     std.testing.log_level = .debug; | ||||||
|  |     //     _ = try std.io.getStdErr().write("\n"); | ||||||
|  |     // } | ||||||
|  |     const expected = "Action=ListUsers&Version=2010-05-08&equals=x%253Dy&foo=bar&qux=&zed=dead"; | ||||||
|  |     const actual = try canonicalQueryString(allocator, path); | ||||||
|  |     defer allocator.free(actual); | ||||||
|  |     try std.testing.expectEqualStrings(expected, actual); | ||||||
|  | } | ||||||
|  | test "canonical headers" { | ||||||
|  |     const allocator = std.testing.allocator; | ||||||
|  |     var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5); | ||||||
|  |     defer headers.deinit(); | ||||||
|  |     try headers.append(.{ .name = "Host", .value = "iam.amazonaws.com" }); | ||||||
|  |     try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" }); | ||||||
|  |     try headers.append(.{ .name = "User-Agent", .value = "This header should be skipped" }); | ||||||
|  |     try headers.append(.{ .name = "My-header1", .value = "  a  b  c  " }); | ||||||
|  |     try headers.append(.{ .name = "X-Amz-Date", .value = "20150830T123600Z" }); | ||||||
|  |     try headers.append(.{ .name = "My-header2", .value = "  \"a  b  c\"  " }); | ||||||
|  |     const expected = | ||||||
|  |         \\content-type:application/x-www-form-urlencoded; charset=utf-8 | ||||||
|  |         \\host:iam.amazonaws.com | ||||||
|  |         \\my-header1:a b c | ||||||
|  |         \\my-header2:"a b c" | ||||||
|  |         \\x-amz-date:20150830T123600Z | ||||||
|  |         \\ | ||||||
|  |     ; | ||||||
|  |     const actual = try canonicalHeaders(allocator, headers.items); | ||||||
|  |     defer allocator.free(actual.str); | ||||||
|  |     defer allocator.free(actual.signed_headers); | ||||||
|  |     try std.testing.expectEqualStrings(expected, actual.str); | ||||||
|  |     try std.testing.expectEqualStrings("content-type;host;my-header1;my-header2;x-amz-date", actual.signed_headers); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | test "canonical request" { | ||||||
|  |     const allocator = std.testing.allocator; | ||||||
|  |     var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5); | ||||||
|  |     defer headers.deinit(); | ||||||
|  |     try headers.append(.{ .name = "User-agent", .value = "c sdk v1.0" }); | ||||||
|  |     // In contrast to AWS CRT (aws-c-auth), we add the date as part of the | ||||||
|  |     // signing operation. They add it as part of the canonicalization | ||||||
|  |     try headers.append(.{ .name = "X-Amz-Date", .value = "20150830T123600Z" }); | ||||||
|  |     try headers.append(.{ .name = "Host", .value = "example.amazonaws.com" }); | ||||||
|  |     const req = base.Request{ | ||||||
|  |         .path = "/", | ||||||
|  |         .method = "GET", | ||||||
|  |         .headers = headers.items, | ||||||
|  |     }; | ||||||
|  |     const request = try createCanonicalRequest(allocator, req, .{ | ||||||
|  |         .region = "us-west-2", // us-east-1 | ||||||
|  |         .service = "sts", // service | ||||||
|  |         .credentials = .{ | ||||||
|  |             .access_key = "AKIDEXAMPLE", | ||||||
|  |             .secret_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", | ||||||
|  |             .session_token = null, | ||||||
|  |         }, | ||||||
|  |         .signing_time = 1440938160, // 20150830T123600Z | ||||||
|  |     }); | ||||||
|  |     defer allocator.free(request.arr); | ||||||
|  |     defer allocator.free(request.hash); | ||||||
|  |     defer allocator.free(request.headers.str); | ||||||
|  |     defer allocator.free(request.headers.signed_headers); | ||||||
|  | 
 | ||||||
|  |     const expected = | ||||||
|  |         \\GET | ||||||
|  |         \\/ | ||||||
|  |         \\ | ||||||
|  |         \\host:example.amazonaws.com | ||||||
|  |         \\x-amz-date:20150830T123600Z | ||||||
|  |         \\ | ||||||
|  |         \\host;x-amz-date | ||||||
|  |         \\e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 | ||||||
|  |     ; | ||||||
|  |     try std.testing.expectEqualStrings(expected, request.arr); | ||||||
|  | } | ||||||
|  | test "can sign" { | ||||||
|  |     // [debug] (aws): call: prefix sts, sigv4 sts, version 2011-06-15, action GetCallerIdentity | ||||||
|  |     // [debug] (aws): proto: AwsProtocol.query | ||||||
|  |     // [debug] (awshttp): host: sts.us-west-2.amazonaws.com, scheme: https, port: 443 | ||||||
|  |     // [debug] (awshttp): Calling endpoint https://sts.us-west-2.amazonaws.com | ||||||
|  |     // [debug] (awshttp): Path: / | ||||||
|  |     // [debug] (awshttp): Query: | ||||||
|  |     // [debug] (awshttp): Method: POST | ||||||
|  |     // [debug] (awshttp): body length: 43 | ||||||
|  |     // [debug] (awshttp): Body | ||||||
|  |     // ==== | ||||||
|  |     // Action=GetCallerIdentity&Version=2011-06-15 | ||||||
|  |     // ==== | ||||||
|  |     // [debug] (awshttp): All Request Headers: | ||||||
|  |     // [debug] (awshttp):      Accept: application/json | ||||||
|  |     // [debug] (awshttp):      Host: sts.us-west-2.amazonaws.com | ||||||
|  |     // [debug] (awshttp):      User-Agent: zig-aws 1.0, Powered by the AWS Common Runtime. | ||||||
|  |     // [debug] (awshttp):      Content-Type: application/x-www-form-urlencoded | ||||||
|  |     // [debug] (awshttp):      Content-Length: 43 | ||||||
|  | 
 | ||||||
|  |     const allocator = std.testing.allocator; | ||||||
|  |     var headers = try std.ArrayList(base.Header).initCapacity(allocator, 5); | ||||||
|  |     defer headers.deinit(); | ||||||
|  |     try headers.append(.{ .name = "Content-Type", .value = "application/x-www-form-urlencoded; charset=utf-8" }); | ||||||
|  |     try headers.append(.{ .name = "Content-Length", .value = "13" }); | ||||||
|  |     try headers.append(.{ .name = "Host", .value = "example.amazonaws.com" }); | ||||||
|  |     var req = base.Request{ | ||||||
|  |         .path = "/", | ||||||
|  |         .query = "", | ||||||
|  |         .body = "Param1=value1", | ||||||
|  |         .method = "POST", | ||||||
|  |         .content_type = "application/json", | ||||||
|  |         .headers = headers.items, | ||||||
|  |     }; | ||||||
|  |     // { | ||||||
|  |     //     std.testing.log_level = .debug; | ||||||
|  |     //     _ = try std.io.getStdErr().write("\n"); | ||||||
|  |     // } | ||||||
|  | 
 | ||||||
|  |     // we could look at sigv4 signing tests at: | ||||||
|  |     // https://github.com/awslabs/aws-c-auth/blob/ace1311f8ef6ea890b26dd376031bed2721648eb/tests/sigv4_signing_tests.c#L1478 | ||||||
|  |     const config = Config{ | ||||||
|  |         .region = "us-east-1", | ||||||
|  |         .service = "service", | ||||||
|  |         .credentials = .{ | ||||||
|  |             .access_key = "AKIDEXAMPLE", | ||||||
|  |             .secret_key = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY", | ||||||
|  |             .session_token = null, // TODO: add session token. I think we need X-Amz-Security-Token for that. Also, x-amz-region-set looks like part of v4a that will need to be dealt with eventually | ||||||
|  |         }, | ||||||
|  |         .signing_time = 1440938160, // 20150830T123600Z | ||||||
|  |     }; | ||||||
|  |     // TODO: There is an x-amz-content-sha256. Investigate | ||||||
|  |     var signed_req = try signRequest(allocator, req, config); | ||||||
|  | 
 | ||||||
|  |     defer freeSignedRequest(allocator, &signed_req, config); | ||||||
|  |     try std.testing.expectEqualStrings("X-Amz-Date", signed_req.headers[signed_req.headers.len - 2].name); | ||||||
|  |     try std.testing.expectEqualStrings("20150830T123600Z", signed_req.headers[signed_req.headers.len - 2].value); | ||||||
|  | 
 | ||||||
|  |     // c_aws_auth tests don't seem to have valid data. Live endpoint is | ||||||
|  |     // accepting what we're doing | ||||||
|  |     const expected_auth = "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=2b9566917226a17022b710430a367d343cbff33af7ee50b0ff8f44d75a4a46d8"; | ||||||
|  | 
 | ||||||
|  |     try std.testing.expectEqualStrings("Authorization", signed_req.headers[signed_req.headers.len - 1].name); | ||||||
|  |     try std.testing.expectEqualStrings(expected_auth, signed_req.headers[signed_req.headers.len - 1].value); | ||||||
|  | } | ||||||
							
								
								
									
										1013
									
								
								src/awshttp.zig
									
										
									
									
									
								
							
							
						
						
									
										1013
									
								
								src/awshttp.zig
									
										
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -1,34 +0,0 @@ | ||||||
| #include <aws/auth/signing_config.h> |  | ||||||
| #include <aws/common/date_time.h> |  | ||||||
| 
 |  | ||||||
| #include "bitfield-workaround.h" |  | ||||||
| 
 |  | ||||||
| extern void *new_aws_signing_config( |  | ||||||
|     struct aws_allocator *allocator, |  | ||||||
|     const struct bitfield_workaround_aws_signing_config_aws *config) { |  | ||||||
|   struct aws_signing_config_aws *new_config = aws_mem_acquire(allocator, sizeof(struct aws_signing_config_aws)); |  | ||||||
| 
 |  | ||||||
|   new_config->algorithm                       = config->algorithm; |  | ||||||
|   new_config->config_type                     = config->config_type; |  | ||||||
|   new_config->signature_type                  = config->signature_type; |  | ||||||
|   new_config->region                          = config->region; |  | ||||||
|   new_config->service                         = config->service; |  | ||||||
|   new_config->should_sign_header              = config->should_sign_header; |  | ||||||
|   new_config->should_sign_header_ud           = config->should_sign_header_ud; |  | ||||||
|   new_config->flags.use_double_uri_encode     = config->flags.use_double_uri_encode; |  | ||||||
|   new_config->flags.should_normalize_uri_path = config->flags.should_normalize_uri_path; |  | ||||||
|   new_config->flags.omit_session_token        = config->flags.omit_session_token; |  | ||||||
|   new_config->signed_body_value               = config->signed_body_value; |  | ||||||
|   new_config->signed_body_header              = config->signed_body_header; |  | ||||||
|   new_config->credentials                     = config->credentials; |  | ||||||
|   new_config->credentials_provider            = config->credentials_provider; |  | ||||||
|   new_config->expiration_in_seconds           = config->expiration_in_seconds; |  | ||||||
| 
 |  | ||||||
|   aws_date_time_init_now(&new_config->date); |  | ||||||
| 
 |  | ||||||
|   return new_config; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| extern FILE *get_std_err() { |  | ||||||
|   return stderr; |  | ||||||
| } |  | ||||||
|  | @ -1,142 +0,0 @@ | ||||||
| #ifndef ZIG_AWS_BITFIELD_WORKAROUND_H |  | ||||||
| #define ZIG_AWS_BITFIELD_WORKAROUND_H |  | ||||||
| 
 |  | ||||||
| #include <aws/auth/auth.h> |  | ||||||
| #include <aws/auth/signing_config.h> |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| // Copied verbatim from https://github.com/awslabs/aws-c-auth/blob/main/include/aws/auth/signing_config.h#L127-L241
 |  | ||||||
| // However, the flags has changed to uint32_t without bitfield annotations
 |  | ||||||
| // as Zig does not support them yet. See https://github.com/ziglang/zig/issues/1499
 |  | ||||||
| // We've renamed as well to make clear what's going on
 |  | ||||||
| //
 |  | ||||||
| // Signing date is also somewhat problematic, so we removed it and it is
 |  | ||||||
| // part of the c code
 |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * Put all flags in here at the end.  If this grows, stay aware of bit-space overflow and ABI compatibilty. |  | ||||||
|  */ |  | ||||||
| struct bitfield_workaround_aws_signing_config_aws_flags { |  | ||||||
|     /**
 |  | ||||||
|      * We assume the uri will be encoded once in preparation for transmission.  Certain services |  | ||||||
|      * do not decode before checking signature, requiring us to actually double-encode the uri in the canonical |  | ||||||
|      * request in order to pass a signature check. |  | ||||||
|      */ |  | ||||||
|     uint32_t use_double_uri_encode; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * Controls whether or not the uri paths should be normalized when building the canonical request |  | ||||||
|      */ |  | ||||||
|     uint32_t should_normalize_uri_path; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * Controls whether "X-Amz-Security-Token" is omitted from the canonical request. |  | ||||||
|      * "X-Amz-Security-Token" is added during signing, as a header or |  | ||||||
|      * query param, when credentials have a session token. |  | ||||||
|      * If false (the default), this parameter is included in the canonical request. |  | ||||||
|      * If true, this parameter is still added, but omitted from the canonical request. |  | ||||||
|      */ |  | ||||||
|     uint32_t omit_session_token; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| /**
 |  | ||||||
|  * A configuration structure for use in AWS-related signing.  Currently covers sigv4 only, but is not required to. |  | ||||||
|  */ |  | ||||||
| struct bitfield_workaround_aws_signing_config_aws { |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * What kind of config structure is this? |  | ||||||
|      */ |  | ||||||
|     enum aws_signing_config_type config_type; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * What signing algorithm to use. |  | ||||||
|      */ |  | ||||||
|     enum aws_signing_algorithm algorithm; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * What sort of signature should be computed? |  | ||||||
|      */ |  | ||||||
|     enum aws_signature_type signature_type; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * The region to sign against |  | ||||||
|      */ |  | ||||||
|     struct aws_byte_cursor region; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * name of service to sign a request for |  | ||||||
|      */ |  | ||||||
|     struct aws_byte_cursor service; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * Raw date to use during the signing process. |  | ||||||
|      */ |  | ||||||
|     // struct aws_date_time date;
 |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * Optional function to control which headers are a part of the canonical request. |  | ||||||
|      * Skipping auth-required headers will result in an unusable signature.  Headers injected by the signing process |  | ||||||
|      * are not skippable. |  | ||||||
|      * |  | ||||||
|      * This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather |  | ||||||
|      * supplements it.  In particular, a header will get signed if and only if it returns true to both |  | ||||||
|      * the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined). |  | ||||||
|      */ |  | ||||||
|     aws_should_sign_header_fn *should_sign_header; |  | ||||||
|     void *should_sign_header_ud; |  | ||||||
| 
 |  | ||||||
|     /*
 |  | ||||||
|      * Put all flags in here at the end.  If this grows, stay aware of bit-space overflow and ABI compatibilty. |  | ||||||
|      */ |  | ||||||
|     struct bitfield_workaround_aws_signing_config_aws_flags flags; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * Optional string to use as the canonical request's body value. |  | ||||||
|      * If string is empty, a value will be calculated from the payload during signing. |  | ||||||
|      * Typically, this is the SHA-256 of the (request/chunk/event) payload, written as lowercase hex. |  | ||||||
|      * If this has been precalculated, it can be set here. Special values used by certain services can also be set |  | ||||||
|      * (e.g. "UNSIGNED-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-EVENTS"). |  | ||||||
|      */ |  | ||||||
|     struct aws_byte_cursor signed_body_value; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * Controls what body "hash" header, if any, should be added to the canonical request and the signed request: |  | ||||||
|      *   AWS_SBHT_NONE - no header should be added |  | ||||||
|      *   AWS_SBHT_X_AMZ_CONTENT_SHA256 - the body "hash" should be added in the X-Amz-Content-Sha256 header |  | ||||||
|      */ |  | ||||||
|     enum aws_signed_body_header_type signed_body_header; |  | ||||||
| 
 |  | ||||||
|     /*
 |  | ||||||
|      * Signing key control: |  | ||||||
|      * |  | ||||||
|      *   (1) If "credentials" is valid, use it |  | ||||||
|      *   (2) Else if "credentials_provider" is valid, query credentials from the provider and use the result |  | ||||||
|      *   (3) Else fail |  | ||||||
|      * |  | ||||||
|      */ |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * AWS Credentials to sign with. |  | ||||||
|      */ |  | ||||||
|     const struct aws_credentials *credentials; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * AWS credentials provider to fetch credentials from. |  | ||||||
|      */ |  | ||||||
|     struct aws_credentials_provider *credentials_provider; |  | ||||||
| 
 |  | ||||||
|     /**
 |  | ||||||
|      * If non-zero and the signing transform is query param, then signing will add X-Amz-Expires to the query |  | ||||||
|      * string, equal to the value specified here.  If this value is zero or if header signing is being used then |  | ||||||
|      * this parameter has no effect. |  | ||||||
|      */ |  | ||||||
|     uint64_t expiration_in_seconds; |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| extern void *new_aws_signing_config(struct aws_allocator *allocator, const struct bitfield_workaround_aws_signing_config_aws *config); |  | ||||||
| extern FILE *get_std_err(); |  | ||||||
| #endif |  | ||||||
							
								
								
									
										81
									
								
								src/date.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								src/date.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,81 @@ | ||||||
|  | // From https://gist.github.com/WoodyAtHome/3ef50b17f0fa2860ac52b97af12f8d15 | ||||||
|  | // Translated from German. We don't need any local time for this use case, and conversion | ||||||
|  | // really requires the TZ DB. | ||||||
|  | 
 | ||||||
|  | const std = @import("std"); | ||||||
|  | 
 | ||||||
|  | pub const DateTime = struct { day: u8, month: u8, year: u16, hour: u8, minute: u8, second: u8 }; | ||||||
|  | 
 | ||||||
|  | pub fn timestampToDateTime(timestamp: i64) DateTime { | ||||||
|  | 
 | ||||||
|  |     // aus https://de.wikipedia.org/wiki/Unixzeit | ||||||
|  |     const unixtime = @intCast(u64, timestamp); | ||||||
|  |     const SECONDS_PER_DAY = 86400; //*  24* 60 * 60 */ | ||||||
|  |     const DAYS_PER_YEAR = 365; //* Normal year (no leap year) */ | ||||||
|  |     const DAYS_IN_4_YEARS = 1461; //*   4*365 +   1 */ | ||||||
|  |     const DAYS_IN_100_YEARS = 36524; //* 100*365 +  25 - 1 */ | ||||||
|  |     const DAYS_IN_400_YEARS = 146097; //* 400*365 + 100 - 4 + 1 */ | ||||||
|  |     const DAY_NUMBER_ADJUSTED_1970_01_01 = 719468; //* Day number relates to March 1st */ | ||||||
|  | 
 | ||||||
|  |     var dayN: u64 = DAY_NUMBER_ADJUSTED_1970_01_01 + unixtime / SECONDS_PER_DAY; | ||||||
|  |     var seconds_since_midnight: u64 = unixtime % SECONDS_PER_DAY; | ||||||
|  |     var temp: u64 = 0; | ||||||
|  | 
 | ||||||
|  |     // Leap year rules for Gregorian Calendars | ||||||
|  |     // Any year divisible by 100 is not a leap year unless also divisible by 400 | ||||||
|  |     temp = 4 * (dayN + DAYS_IN_100_YEARS + 1) / DAYS_IN_400_YEARS - 1; | ||||||
|  |     var year = @intCast(u16, 100 * temp); | ||||||
|  |     dayN -= DAYS_IN_100_YEARS * temp + temp / 4; | ||||||
|  | 
 | ||||||
|  |     // For Julian calendars, each year divisible by 4 is a leap year | ||||||
|  |     temp = 4 * (dayN + DAYS_PER_YEAR + 1) / DAYS_IN_4_YEARS - 1; | ||||||
|  |     year += @intCast(u16, temp); | ||||||
|  |     dayN -= DAYS_PER_YEAR * temp + temp / 4; | ||||||
|  | 
 | ||||||
|  |     // dayN calculates the days of the year in relation to March 1 | ||||||
|  |     var month = @intCast(u8, (5 * dayN + 2) / 153); | ||||||
|  |     var day = @intCast(u8, dayN - (@intCast(u64, month) * 153 + 2) / 5 + 1); | ||||||
|  |     //  153 = 31+30+31+30+31 Days for the 5 months from March through July | ||||||
|  |     //  153 = 31+30+31+30+31 Days for the 5 months from August through December | ||||||
|  |     //        31+28          Days for January and February (see below) | ||||||
|  |     //  +2: Rounding adjustment | ||||||
|  |     //  +1: The first day in March is March 1st (not March 0) | ||||||
|  | 
 | ||||||
|  |     month += 3; // Convert from the day that starts on March 1st, to a human year */ | ||||||
|  |     if (month > 12) { // months 13 and 14 become 1 (January) und 2 (February) of the next year | ||||||
|  |         month -= 12; | ||||||
|  |         year += 1; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     var hours = @intCast(u8, seconds_since_midnight / 3600); | ||||||
|  |     var minutes = @intCast(u8, seconds_since_midnight % 3600 / 60); | ||||||
|  |     var seconds = @intCast(u8, seconds_since_midnight % 60); | ||||||
|  | 
 | ||||||
|  |     return DateTime{ .day = day, .month = month, .year = year, .hour = hours, .minute = minutes, .second = seconds }; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | fn printDateTime(dt: DateTime) void { | ||||||
|  |     std.log.debug("{:0>4}-{:0>2}-{:0>2}T{:0>2}:{:0>2}:{:0<2}Z", .{ | ||||||
|  |         dt.year, | ||||||
|  |         dt.month, | ||||||
|  |         dt.day, | ||||||
|  |         dt.hour, | ||||||
|  |         dt.minute, | ||||||
|  |         dt.second, | ||||||
|  |     }); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | pub fn printNowUtc() void { | ||||||
|  |     printDateTime(timestampToDateTime(std.time.timestamp())); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | test "GMT and localtime" { | ||||||
|  |     std.testing.log_level = .debug; | ||||||
|  |     std.log.debug("\n", .{}); | ||||||
|  |     printDateTime(timestampToDateTime(std.time.timestamp())); | ||||||
|  |     try std.testing.expectEqual(DateTime{ .year = 2020, .month = 8, .day = 28, .hour = 9, .minute = 32, .second = 27 }, timestampToDateTime(1598607147)); | ||||||
|  | 
 | ||||||
|  |     try std.testing.expectEqual(DateTime{ .year = 2020, .month = 11, .day = 1, .hour = 5, .minute = 6, .second = 7 }, timestampToDateTime(1604207167)); | ||||||
|  |     // Get time for date: https://wtools.io/convert-date-time-to-unix-time | ||||||
|  |     try std.testing.expectEqual(DateTime{ .year = 2015, .month = 08, .day = 30, .hour = 12, .minute = 36, .second = 00 }, timestampToDateTime(1440938160)); | ||||||
|  | } | ||||||
							
								
								
									
										21
									
								
								src/main.zig
									
										
									
									
									
								
							
							
						
						
									
										21
									
								
								src/main.zig
									
										
									
									
									
								
							|  | @ -1,8 +1,9 @@ | ||||||
| const std = @import("std"); | const std = @import("std"); | ||||||
| const aws = @import("aws.zig"); | const aws = @import("aws.zig"); | ||||||
| const json = @import("json.zig"); | const json = @import("json.zig"); | ||||||
|  | const version = @import("git_version.zig"); | ||||||
| 
 | 
 | ||||||
| var verbose = false; | var verbose: u8 = 0; | ||||||
| 
 | 
 | ||||||
| pub fn log( | pub fn log( | ||||||
|     comptime level: std.log.Level, |     comptime level: std.log.Level, | ||||||
|  | @ -10,8 +11,11 @@ pub fn log( | ||||||
|     comptime format: []const u8, |     comptime format: []const u8, | ||||||
|     args: anytype, |     args: anytype, | ||||||
| ) void { | ) void { | ||||||
|  |     // Ignore aws_signing messages | ||||||
|  |     if (verbose < 2 and scope == .aws_signing and @enumToInt(level) >= @enumToInt(std.log.Level.debug)) | ||||||
|  |         return; | ||||||
|     // Ignore awshttp messages |     // Ignore awshttp messages | ||||||
|     if (!verbose and scope == .awshttp and @enumToInt(level) >= @enumToInt(std.log.Level.debug)) |     if (verbose < 1 and scope == .awshttp and @enumToInt(level) >= @enumToInt(std.log.Level.debug)) | ||||||
|         return; |         return; | ||||||
|     const scope_prefix = "(" ++ @tagName(scope) ++ "): "; |     const scope_prefix = "(" ++ @tagName(scope) ++ "): "; | ||||||
|     const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix; |     const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix; | ||||||
|  | @ -37,20 +41,21 @@ const Tests = enum { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| pub fn main() anyerror!void { | pub fn main() anyerror!void { | ||||||
|     const c_allocator = std.heap.c_allocator; |     var gpa = std.heap.GeneralPurposeAllocator(.{}){}; | ||||||
|     var gpa = std.heap.GeneralPurposeAllocator(.{}){ |  | ||||||
|         .backing_allocator = c_allocator, |  | ||||||
|     }; |  | ||||||
|     defer _ = gpa.deinit(); |     defer _ = gpa.deinit(); | ||||||
|     const allocator = gpa.allocator(); |     const allocator = gpa.allocator(); | ||||||
|     var tests = std.ArrayList(Tests).init(allocator); |     var tests = std.ArrayList(Tests).init(allocator); | ||||||
|     defer tests.deinit(); |     defer tests.deinit(); | ||||||
|     var args = std.process.args(); |     var args = std.process.args(); | ||||||
|  |     var first = true; | ||||||
|     while (args.next(allocator)) |arg_or_error| { |     while (args.next(allocator)) |arg_or_error| { | ||||||
|         const arg = try arg_or_error; |         const arg = try arg_or_error; | ||||||
|         defer allocator.free(arg); |         defer allocator.free(arg); | ||||||
|  |         if (first) | ||||||
|  |             std.log.info("{s} {s}", .{ arg, version.pretty_version }); | ||||||
|  |         first = false; | ||||||
|         if (std.mem.eql(u8, "-v", arg)) { |         if (std.mem.eql(u8, "-v", arg)) { | ||||||
|             verbose = true; |             verbose += 1; | ||||||
|             continue; |             continue; | ||||||
|         } |         } | ||||||
|         inline for (@typeInfo(Tests).Enum.fields) |f| { |         inline for (@typeInfo(Tests).Enum.fields) |f| { | ||||||
|  | @ -66,7 +71,7 @@ pub fn main() anyerror!void { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     std.log.info("Start\n", .{}); |     std.log.info("Start\n", .{}); | ||||||
|     var client = aws.Client.init(allocator); |     var client = try aws.Client.init(allocator, .{}); | ||||||
|     const options = aws.Options{ |     const options = aws.Options{ | ||||||
|         .region = "us-west-2", |         .region = "us-west-2", | ||||||
|         .client = client, |         .client = client, | ||||||
|  |  | ||||||
							
								
								
									
										1
									
								
								zfetch_deps.zig
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								zfetch_deps.zig
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1 @@ | ||||||
|  | const use_submodules = 1; | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue