docker multi-platform build support and ci
Some checks failed
Generic zig build / build (push) Failing after 8s
Some checks failed
Generic zig build / build (push) Failing after 8s
This commit is contained in:
parent
45eae9800f
commit
ba251b4b80
2 changed files with 139 additions and 1 deletions
35
.github/workflows/build.yaml
vendored
35
.github/workflows/build.yaml
vendored
|
@ -26,6 +26,41 @@ jobs:
|
|||
curl --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
|
||||
--upload-file zig-out/bin/syncthing_events \
|
||||
https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/syncthing_events-x86_64-linux-${{ github.sha }}
|
||||
- name: Prepare docker image
|
||||
run: zig build docker
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
git.lerch.org/&{{ github.repository }}
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
type=schedule
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Log in to Gitea Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: git.lerch.org
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.PACKAGE_PUSH }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: zig-out
|
||||
platforms: linux/amd64,linux/arm64,linux/riscv64,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
|
||||
# load: true # will not work for multiplatform
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
- name: Notify
|
||||
uses: elerch/action-notify-ntfy@v2.github
|
||||
if: always() && env.GITEA_ACTIONS == 'true'
|
||||
|
|
105
build.zig
105
build.zig
|
@ -3,7 +3,7 @@ const std = @import("std");
|
|||
// Although this function looks imperative, note that its job is to
|
||||
// declaratively construct a build graph that will be executed by an external
|
||||
// runner.
|
||||
pub fn build(b: *std.Build) void {
|
||||
pub fn build(b: *std.Build) !void {
|
||||
// Standard target options allows the person running `zig build` to choose
|
||||
// what target to build for. Here we do not override the defaults, which
|
||||
// means any target is allowed, and the default is native. Other options
|
||||
|
@ -125,4 +125,107 @@ pub fn build(b: *std.Build) void {
|
|||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_lib_unit_tests.step);
|
||||
test_step.dependOn(&run_exe_unit_tests.step);
|
||||
|
||||
try docker(b, exe);
|
||||
}
|
||||
|
||||
fn docker(b: *std.Build, compile: *std.Build.Step.Compile) !void {
|
||||
const DockerTarget = struct {
|
||||
platform: []const u8,
|
||||
target: std.Target.Query,
|
||||
};
|
||||
// From docker source:
|
||||
// https://github.com/containerd/containerd/blob/52f02c3aa1e7ccd448060375c821cae4e3300cdb/test/init-buildx.sh#L45
|
||||
// Platforms: linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6
|
||||
const docker_targets = [_]DockerTarget{
|
||||
.{ .platform = "linux/amd64", .target = .{ .cpu_arch = .x86_64, .os_tag = .linux } },
|
||||
.{ .platform = "linux/arm64", .target = .{ .cpu_arch = .aarch64, .os_tag = .linux } },
|
||||
.{ .platform = "linux/riscv64", .target = .{ .cpu_arch = .riscv64, .os_tag = .linux } },
|
||||
.{ .platform = "linux/ppc64le", .target = .{ .cpu_arch = .powerpc64le, .os_tag = .linux } },
|
||||
.{ .platform = "linux/390x", .target = .{ .cpu_arch = .s390x, .os_tag = .linux } },
|
||||
.{ .platform = "linux/386", .target = .{ .cpu_arch = .x86, .os_tag = .linux } },
|
||||
.{ .platform = "linux/arm/v7", .target = .{ .cpu_arch = .arm, .os_tag = .linux, .abi = .musleabihf } }, // linux/arm/v7
|
||||
.{ .platform = "linux/arm/v6", .target = .{
|
||||
.cpu_arch = .arm,
|
||||
.os_tag = .linux,
|
||||
.abi = .musleabihf,
|
||||
.cpu_model = .{ .explicit = &std.Target.arm.cpu.arm1176jzf_s },
|
||||
} },
|
||||
};
|
||||
const SubPath = struct {
|
||||
path: [3][]const u8,
|
||||
len: usize,
|
||||
};
|
||||
// We are going to put all the binaries in paths that will be happy with
|
||||
// the dockerfile at the end, which means we need to get all the platforms
|
||||
// into slices. We can do this at comptime, but need to use arrays, so we
|
||||
// will hard code 3 element arrays which will hold our linux/arm/v7. If
|
||||
// deeper platforms are invented by docker later, we'll need to tweak the
|
||||
// hardcoded "3" values above and below, but at least we'll throw a compile
|
||||
// error to let the maintainer of the code know they screwed up by adding
|
||||
// a hardcoded platform above without changing the hardcoded length values.
|
||||
// By having the components chopped up this way, we should be able to build
|
||||
// all this from a Windows host
|
||||
comptime var dest_sub_paths: [docker_targets.len]SubPath = undefined;
|
||||
comptime {
|
||||
for (docker_targets, 0..) |dt, inx| {
|
||||
var si = std.mem.splitScalar(u8, dt.platform, '/');
|
||||
var sub_path: SubPath = undefined;
|
||||
sub_path.len = 1 + std.mem.count(u8, dt.platform, "/");
|
||||
if (sub_path.len > 3) @compileError("Docker platform cannot have more than 2 forward slashes");
|
||||
var jnx: usize = 0;
|
||||
while (si.next()) |s| : (jnx += 1)
|
||||
sub_path.path[jnx] = s;
|
||||
dest_sub_paths[inx] = sub_path;
|
||||
}
|
||||
}
|
||||
|
||||
const docker_step = b.step("docker", "Prepares the app for bundling as multi-platform docker image");
|
||||
for (docker_targets, 0..) |dt, i| {
|
||||
const target_module = b.createModule(.{
|
||||
.root_source_file = compile.root_module.root_source_file,
|
||||
.target = b.resolveTargetQuery(dt.target),
|
||||
.optimize = .ReleaseSafe,
|
||||
});
|
||||
for (compile.root_module.import_table.keys()) |k|
|
||||
target_module.addImport(k, compile.root_module.import_table.get(k).?);
|
||||
const target_exe = b.addExecutable(.{
|
||||
.name = compile.name,
|
||||
.root_module = target_module,
|
||||
});
|
||||
// We can't use our dest_sub_paths directly here, because adding
|
||||
// a value for "dest_sub_path" in the installArtifact options will also
|
||||
// override the use of the basename. So wee need to construct our own
|
||||
// slice. We know the number of path components though, so we will
|
||||
// alloc what we need (no free, since zig build uses an arena) and
|
||||
// copy our components in place
|
||||
var final_sub_path = try b.allocator.alloc([]const u8, dest_sub_paths[i].len + 1);
|
||||
for (dest_sub_paths[i].path, 0..) |p, j| final_sub_path[j] = p;
|
||||
final_sub_path[final_sub_path.len - 1] = target_exe.name; // add basename at end
|
||||
|
||||
docker_step.dependOn(&b.addInstallArtifact(target_exe, .{
|
||||
.dest_sub_path = try std.fs.path.join(b.allocator, final_sub_path),
|
||||
}).step);
|
||||
}
|
||||
|
||||
// The above will get us all the binaries, but we also need a dockerfile
|
||||
try dockerInstallDockerfile(b, docker_step, compile.name);
|
||||
}
|
||||
|
||||
fn dockerInstallDockerfile(b: *std.Build, docker_step: *std.Build.Step, exe_name: []const u8) !void {
|
||||
const dockerfile_fmt =
|
||||
\\FROM alpine:latest as build
|
||||
\\RUN apk --update add ca-certificates
|
||||
\\
|
||||
\\FROM scratch
|
||||
\\ARG TARGETPLATFORM
|
||||
\\ENV PATH=/bin
|
||||
\\COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
\\COPY bin/$TARGETPLATFORM/{s} /bin
|
||||
;
|
||||
const dockerfile_data = try std.fmt.allocPrint(b.allocator, dockerfile_fmt, .{exe_name});
|
||||
const writefiles = b.addWriteFiles();
|
||||
const dockerfile = writefiles.add("Dockerfile", dockerfile_data);
|
||||
|
||||
docker_step.dependOn(&b.addInstallFile(dockerfile, "Dockerfile").step);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue