zig fmt to take care of easy stuff
This commit is contained in:
parent
cfdf4a3141
commit
e6f7ab003d
|
@ -283,7 +283,7 @@ pub fn Request(comptime request_action: anytype) type {
|
||||||
header_name: []const u8,
|
header_name: []const u8,
|
||||||
};
|
};
|
||||||
comptime var fields = [_]?HeaderInfo{null} ** std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
comptime var fields = [_]?HeaderInfo{null} ** std.meta.fields(@TypeOf(action.Response.http_header)).len;
|
||||||
inline for (std.meta.fields(@TypeOf(action.Response.http_header))) |f, inx| {
|
inline for (std.meta.fields(@TypeOf(action.Response.http_header)), 0..) |f, inx| {
|
||||||
fields[inx] = HeaderInfo{
|
fields[inx] = HeaderInfo{
|
||||||
.name = f.name,
|
.name = f.name,
|
||||||
.T = @TypeOf(@field(full_response.response, f.name)),
|
.T = @TypeOf(@field(full_response.response, f.name)),
|
||||||
|
@ -901,7 +901,7 @@ fn buildPath(
|
||||||
defer buffer.deinit();
|
defer buffer.deinit();
|
||||||
var in_label = false;
|
var in_label = false;
|
||||||
var start: usize = 0;
|
var start: usize = 0;
|
||||||
for (raw_uri) |c, inx| {
|
for (raw_uri, 0..) |c, inx| {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
'{' => {
|
'{' => {
|
||||||
in_label = true;
|
in_label = true;
|
||||||
|
|
|
@ -449,7 +449,7 @@ const LineIterator = struct {
|
||||||
if (self.inx >= self.text.len) return null;
|
if (self.inx >= self.text.len) return null;
|
||||||
var current = self.inx;
|
var current = self.inx;
|
||||||
var start = self.inx;
|
var start = self.inx;
|
||||||
for (self.text[self.inx..]) |c, i| {
|
for (self.text[self.inx..], 0..) |c, i| {
|
||||||
if (c == '\n') {
|
if (c == '\n') {
|
||||||
// log.debug("got \\n: {d}", .{i});
|
// log.debug("got \\n: {d}", .{i});
|
||||||
current += i + 1;
|
current += i + 1;
|
||||||
|
@ -475,7 +475,7 @@ fn credsForText(text: []const u8, profile: []const u8) !PartialCredentials {
|
||||||
while (lines.next()) |line| {
|
while (lines.next()) |line| {
|
||||||
// log.debug("line: {s}", .{line});
|
// log.debug("line: {s}", .{line});
|
||||||
var section_start: ?usize = 0;
|
var section_start: ?usize = 0;
|
||||||
for (line) |c, i| {
|
for (line, 0..) |c, i| {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
'#' => break,
|
'#' => break,
|
||||||
'[' => section_start = i + 1,
|
'[' => section_start = i + 1,
|
||||||
|
@ -500,7 +500,7 @@ fn credsForText(text: []const u8, profile: []const u8) !PartialCredentials {
|
||||||
for (&[_][]const u8{
|
for (&[_][]const u8{
|
||||||
"aws_access_key_id",
|
"aws_access_key_id",
|
||||||
"aws_secret_access_key",
|
"aws_secret_access_key",
|
||||||
}) |needle, inx| {
|
}, 0..) |needle, inx| {
|
||||||
if (std.ascii.eqlIgnoreCase(key, needle)) {
|
if (std.ascii.eqlIgnoreCase(key, needle)) {
|
||||||
// TODO: Trim this out
|
// TODO: Trim this out
|
||||||
creds[inx] = trim(line[i + 1 ..]);
|
creds[inx] = trim(line[i + 1 ..]);
|
||||||
|
@ -527,7 +527,7 @@ fn trim(text: []const u8) []const u8 {
|
||||||
var start: ?usize = null;
|
var start: ?usize = null;
|
||||||
var end: ?usize = null;
|
var end: ?usize = null;
|
||||||
|
|
||||||
for (text) |c, i| switch (c) {
|
for (text, 0..) |c, i| switch (c) {
|
||||||
' ', '\t' => {},
|
' ', '\t' => {},
|
||||||
'#' => return trimmed(text, start, end),
|
'#' => return trimmed(text, start, end),
|
||||||
else => {
|
else => {
|
||||||
|
@ -584,7 +584,7 @@ fn getHomeDir(allocator: std.mem.Allocator) ![]const u8 {
|
||||||
&dir_path_ptr,
|
&dir_path_ptr,
|
||||||
)) {
|
)) {
|
||||||
std.os.windows.S_OK => {
|
std.os.windows.S_OK => {
|
||||||
defer std.os.windows.ole32.CoTaskMemFree(@ptrCast(*anyopaque, dir_path_ptr));
|
defer std.os.windows.ole32.CoTaskMemFree(@as(*anyopaque, @ptrCast(dir_path_ptr)));
|
||||||
const global_dir = std.unicode.utf16leToUtf8Alloc(allocator, std.mem.sliceTo(dir_path_ptr, 0)) catch |err| switch (err) {
|
const global_dir = std.unicode.utf16leToUtf8Alloc(allocator, std.mem.sliceTo(dir_path_ptr, 0)) catch |err| switch (err) {
|
||||||
error.UnexpectedSecondSurrogateHalf => return error.HomeDirUnavailable,
|
error.UnexpectedSecondSurrogateHalf => return error.HomeDirUnavailable,
|
||||||
error.ExpectedSecondSurrogateHalf => return error.HomeDirUnavailable,
|
error.ExpectedSecondSurrogateHalf => return error.HomeDirUnavailable,
|
||||||
|
|
|
@ -351,7 +351,7 @@ fn endpointException(
|
||||||
fn s3BucketFromPath(path: []const u8) []const u8 {
|
fn s3BucketFromPath(path: []const u8) []const u8 {
|
||||||
var in_bucket = false;
|
var in_bucket = false;
|
||||||
var start: usize = 0;
|
var start: usize = 0;
|
||||||
for (path) |c, inx| {
|
for (path, 0..) |c, inx| {
|
||||||
if (c == '/') {
|
if (c == '/') {
|
||||||
if (in_bucket) return path[start..inx];
|
if (in_bucket) return path[start..inx];
|
||||||
start = inx + 1;
|
start = inx + 1;
|
||||||
|
@ -370,7 +370,7 @@ fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8) !EndPoint {
|
||||||
var port: u16 = 443;
|
var port: u16 = 443;
|
||||||
var host_start: usize = 0;
|
var host_start: usize = 0;
|
||||||
var host_end: usize = 0;
|
var host_end: usize = 0;
|
||||||
for (uri) |ch, i| {
|
for (uri, 0..) |ch, i| {
|
||||||
switch (ch) {
|
switch (ch) {
|
||||||
':' => {
|
':' => {
|
||||||
if (!std.mem.eql(u8, scheme, "")) {
|
if (!std.mem.eql(u8, scheme, "")) {
|
||||||
|
|
|
@ -671,7 +671,7 @@ fn canonicalHeaderValue(allocator: std.mem.Allocator, value: []const u8) ![]cons
|
||||||
var start: usize = 0;
|
var start: usize = 0;
|
||||||
const rc = try allocator.alloc(u8, value.len);
|
const rc = try allocator.alloc(u8, value.len);
|
||||||
var rc_inx: usize = 0;
|
var rc_inx: usize = 0;
|
||||||
for (value) |c, i| {
|
for (value, 0..) |c, i| {
|
||||||
if (!started and !std.ascii.isSpace(c)) {
|
if (!started and !std.ascii.isSpace(c)) {
|
||||||
started = true;
|
started = true;
|
||||||
start = i;
|
start = i;
|
||||||
|
|
|
@ -9,7 +9,7 @@ pub fn snakeToCamel(allocator: std.mem.Allocator, name: []const u8) ![]u8 {
|
||||||
const rc = try allocator.alloc(u8, name.len + 1);
|
const rc = try allocator.alloc(u8, name.len + 1);
|
||||||
while (utf8_name.nextCodepoint()) |cp| {
|
while (utf8_name.nextCodepoint()) |cp| {
|
||||||
if (cp > 0xff) return error.UnicodeNotSupported;
|
if (cp > 0xff) return error.UnicodeNotSupported;
|
||||||
const ascii_char = @truncate(u8, cp);
|
const ascii_char = @as(u8, @truncate(cp));
|
||||||
if (ascii_char != '_') {
|
if (ascii_char != '_') {
|
||||||
if (previous_ascii == '_' and ascii_char >= 'a' and ascii_char <= 'z') {
|
if (previous_ascii == '_' and ascii_char >= 'a' and ascii_char <= 'z') {
|
||||||
const uppercase_char = ascii_char - ('a' - 'A');
|
const uppercase_char = ascii_char - ('a' - 'A');
|
||||||
|
|
20
src/date.zig
20
src/date.zig
|
@ -14,7 +14,7 @@ const DAYS_PER_YEAR = 365; //* Normal year (no leap year) */
|
||||||
pub fn timestampToDateTime(timestamp: i64) DateTime {
|
pub fn timestampToDateTime(timestamp: i64) DateTime {
|
||||||
|
|
||||||
// aus https://de.wikipedia.org/wiki/Unixzeit
|
// aus https://de.wikipedia.org/wiki/Unixzeit
|
||||||
const unixtime = @intCast(u64, timestamp);
|
const unixtime = @as(u64, @intCast(timestamp));
|
||||||
const DAYS_IN_4_YEARS = 1461; //* 4*365 + 1 */
|
const DAYS_IN_4_YEARS = 1461; //* 4*365 + 1 */
|
||||||
const DAYS_IN_100_YEARS = 36524; //* 100*365 + 25 - 1 */
|
const DAYS_IN_100_YEARS = 36524; //* 100*365 + 25 - 1 */
|
||||||
const DAYS_IN_400_YEARS = 146097; //* 400*365 + 100 - 4 + 1 */
|
const DAYS_IN_400_YEARS = 146097; //* 400*365 + 100 - 4 + 1 */
|
||||||
|
@ -27,17 +27,17 @@ pub fn timestampToDateTime(timestamp: i64) DateTime {
|
||||||
// Leap year rules for Gregorian Calendars
|
// Leap year rules for Gregorian Calendars
|
||||||
// Any year divisible by 100 is not a leap year unless also divisible by 400
|
// Any year divisible by 100 is not a leap year unless also divisible by 400
|
||||||
temp = 4 * (dayN + DAYS_IN_100_YEARS + 1) / DAYS_IN_400_YEARS - 1;
|
temp = 4 * (dayN + DAYS_IN_100_YEARS + 1) / DAYS_IN_400_YEARS - 1;
|
||||||
var year = @intCast(u16, 100 * temp);
|
var year = @as(u16, @intCast(100 * temp));
|
||||||
dayN -= DAYS_IN_100_YEARS * temp + temp / 4;
|
dayN -= DAYS_IN_100_YEARS * temp + temp / 4;
|
||||||
|
|
||||||
// For Julian calendars, each year divisible by 4 is a leap year
|
// For Julian calendars, each year divisible by 4 is a leap year
|
||||||
temp = 4 * (dayN + DAYS_PER_YEAR + 1) / DAYS_IN_4_YEARS - 1;
|
temp = 4 * (dayN + DAYS_PER_YEAR + 1) / DAYS_IN_4_YEARS - 1;
|
||||||
year += @intCast(u16, temp);
|
year += @as(u16, @intCast(temp));
|
||||||
dayN -= DAYS_PER_YEAR * temp + temp / 4;
|
dayN -= DAYS_PER_YEAR * temp + temp / 4;
|
||||||
|
|
||||||
// dayN calculates the days of the year in relation to March 1
|
// dayN calculates the days of the year in relation to March 1
|
||||||
var month = @intCast(u8, (5 * dayN + 2) / 153);
|
var month = @as(u8, @intCast((5 * dayN + 2) / 153));
|
||||||
var day = @intCast(u8, dayN - (@intCast(u64, month) * 153 + 2) / 5 + 1);
|
var day = @as(u8, @intCast(dayN - (@as(u64, @intCast(month)) * 153 + 2) / 5 + 1));
|
||||||
// 153 = 31+30+31+30+31 Days for the 5 months from March through July
|
// 153 = 31+30+31+30+31 Days for the 5 months from March through July
|
||||||
// 153 = 31+30+31+30+31 Days for the 5 months from August through December
|
// 153 = 31+30+31+30+31 Days for the 5 months from August through December
|
||||||
// 31+28 Days for January and February (see below)
|
// 31+28 Days for January and February (see below)
|
||||||
|
@ -50,9 +50,9 @@ pub fn timestampToDateTime(timestamp: i64) DateTime {
|
||||||
year += 1;
|
year += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
var hours = @intCast(u8, seconds_since_midnight / 3600);
|
var hours = @as(u8, @intCast(seconds_since_midnight / 3600));
|
||||||
var minutes = @intCast(u8, seconds_since_midnight % 3600 / 60);
|
var minutes = @as(u8, @intCast(seconds_since_midnight % 3600 / 60));
|
||||||
var seconds = @intCast(u8, seconds_since_midnight % 60);
|
var seconds = @as(u8, @intCast(seconds_since_midnight % 60));
|
||||||
|
|
||||||
return DateTime{ .day = day, .month = month, .year = year, .hour = hours, .minute = minutes, .second = seconds };
|
return DateTime{ .day = day, .month = month, .year = year, .hour = hours, .minute = minutes, .second = seconds };
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ pub fn parseEnglishToDateTime(data: []const u8) !DateTime {
|
||||||
var state = EnglishParsingState.Start;
|
var state = EnglishParsingState.Start;
|
||||||
// Anything not explicitly set by our string would be 0
|
// Anything not explicitly set by our string would be 0
|
||||||
var rc = DateTime{ .year = 0, .month = 0, .day = 0, .hour = 0, .minute = 0, .second = 0 };
|
var rc = DateTime{ .year = 0, .month = 0, .day = 0, .hour = 0, .minute = 0, .second = 0 };
|
||||||
for (data) |ch, i| {
|
for (data, 0..) |ch, i| {
|
||||||
_ = i;
|
_ = i;
|
||||||
switch (ch) {
|
switch (ch) {
|
||||||
',' => {},
|
',' => {},
|
||||||
|
@ -154,7 +154,7 @@ pub fn parseIso8601ToDateTime(data: []const u8) !DateTime {
|
||||||
// Anything not explicitly set by our string would be 0
|
// Anything not explicitly set by our string would be 0
|
||||||
var rc = DateTime{ .year = 0, .month = 0, .day = 0, .hour = 0, .minute = 0, .second = 0 };
|
var rc = DateTime{ .year = 0, .month = 0, .day = 0, .hour = 0, .minute = 0, .second = 0 };
|
||||||
var zulu_time = false;
|
var zulu_time = false;
|
||||||
for (data) |ch, i| {
|
for (data, 0..) |ch, i| {
|
||||||
_ = i;
|
_ = i;
|
||||||
switch (ch) {
|
switch (ch) {
|
||||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
||||||
|
|
|
@ -3,4 +3,4 @@ pub const abbreviated_hash = "a662f6f";
|
||||||
pub const commit_date = "2022-06-05 18:34:39 -0700";
|
pub const commit_date = "2022-06-05 18:34:39 -0700";
|
||||||
pub const branch = "HEAD -> master, origin/master";
|
pub const branch = "HEAD -> master, origin/master";
|
||||||
pub const dirty = true;
|
pub const dirty = true;
|
||||||
pub const pretty_version = "version a662f6f, committed at 2022-06-05 18:34:39 -0700 (dirty)";
|
pub const pretty_version = "version a662f6f, committed at 2022-06-05 18:34:39 -0700 (dirty)";
|
||||||
|
|
24
src/json.zig
24
src/json.zig
|
@ -111,7 +111,7 @@ pub const Token = union(enum) {
|
||||||
pub fn decodedLength(self: @This()) usize {
|
pub fn decodedLength(self: @This()) usize {
|
||||||
return self.count +% switch (self.escapes) {
|
return self.count +% switch (self.escapes) {
|
||||||
.None => 0,
|
.None => 0,
|
||||||
.Some => |s| @bitCast(usize, s.size_diff),
|
.Some => |s| @as(usize, @bitCast(s.size_diff)),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,7 +247,7 @@ pub const StreamingParser = struct {
|
||||||
pub fn fromInt(x: anytype) State {
|
pub fn fromInt(x: anytype) State {
|
||||||
debug.assert(x == 0 or x == 1);
|
debug.assert(x == 0 or x == 1);
|
||||||
const T = std.meta.Tag(State);
|
const T = std.meta.Tag(State);
|
||||||
return @intToEnum(State, @intCast(T, x));
|
return @as(State, @enumFromInt(@as(T, @intCast(x))));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1427,7 +1427,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Array => {
|
.Array => {
|
||||||
for (a) |e, i|
|
for (a, 0..) |e, i|
|
||||||
if (!parsedEqual(e, b[i])) return false;
|
if (!parsedEqual(e, b[i])) return false;
|
||||||
return true;
|
return true;
|
||||||
},
|
},
|
||||||
|
@ -1441,7 +1441,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
|
||||||
.One => return parsedEqual(a.*, b.*),
|
.One => return parsedEqual(a.*, b.*),
|
||||||
.Slice => {
|
.Slice => {
|
||||||
if (a.len != b.len) return false;
|
if (a.len != b.len) return false;
|
||||||
for (a) |e, i|
|
for (a, 0..) |e, i|
|
||||||
if (!parsedEqual(e, b[i])) return false;
|
if (!parsedEqual(e, b[i])) return false;
|
||||||
return true;
|
return true;
|
||||||
},
|
},
|
||||||
|
@ -1585,7 +1585,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
return try std.fmt.parseInt(T, numberToken.slice(tokens.slice, tokens.i - 1), 10);
|
return try std.fmt.parseInt(T, numberToken.slice(tokens.slice, tokens.i - 1), 10);
|
||||||
const float = try std.fmt.parseFloat(f128, numberToken.slice(tokens.slice, tokens.i - 1));
|
const float = try std.fmt.parseFloat(f128, numberToken.slice(tokens.slice, tokens.i - 1));
|
||||||
if (std.math.round(float) != float) return error.InvalidNumber;
|
if (std.math.round(float) != float) return error.InvalidNumber;
|
||||||
return @floatToInt(T, float);
|
return @as(T, @intFromFloat(float));
|
||||||
},
|
},
|
||||||
.Optional => |optionalInfo| {
|
.Optional => |optionalInfo| {
|
||||||
if (token == .Null) {
|
if (token == .Null) {
|
||||||
|
@ -1652,7 +1652,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
errdefer {
|
errdefer {
|
||||||
// TODO: why so high here? This was needed for ec2 describe instances
|
// TODO: why so high here? This was needed for ec2 describe instances
|
||||||
@setEvalBranchQuota(100000);
|
@setEvalBranchQuota(100000);
|
||||||
inline for (structInfo.fields) |field, i| {
|
inline for (structInfo.fields, 0..) |field, i| {
|
||||||
if (fields_seen[i] and !field.is_comptime) {
|
if (fields_seen[i] and !field.is_comptime) {
|
||||||
parseFree(field.field_type, @field(r, field.name), options);
|
parseFree(field.field_type, @field(r, field.name), options);
|
||||||
}
|
}
|
||||||
|
@ -1665,7 +1665,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
.String => |stringToken| {
|
.String => |stringToken| {
|
||||||
const key_source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
|
const key_source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
|
||||||
var found = false;
|
var found = false;
|
||||||
inline for (structInfo.fields) |field, i| {
|
inline for (structInfo.fields, 0..) |field, i| {
|
||||||
// TODO: using switches here segfault the compiler (#2727?)
|
// TODO: using switches here segfault the compiler (#2727?)
|
||||||
if ((stringToken.escapes == .None and mem.eql(u8, field.name, key_source_slice)) or (stringToken.escapes == .Some and (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice))) or (stringToken.escapes == .None and options.allow_camel_case_conversion and try camelCaseComp(field.name, key_source_slice, options)) or (stringToken.escapes == .None and options.allow_snake_case_conversion and try snakeCaseComp(field.name, key_source_slice, options))) {
|
if ((stringToken.escapes == .None and mem.eql(u8, field.name, key_source_slice)) or (stringToken.escapes == .Some and (field.name.len == stringToken.decodedLength() and encodesTo(field.name, key_source_slice))) or (stringToken.escapes == .None and options.allow_camel_case_conversion and try camelCaseComp(field.name, key_source_slice, options)) or (stringToken.escapes == .None and options.allow_snake_case_conversion and try snakeCaseComp(field.name, key_source_slice, options))) {
|
||||||
// if (switch (stringToken.escapes) {
|
// if (switch (stringToken.escapes) {
|
||||||
|
@ -1720,7 +1720,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
|
||||||
else => return error.UnexpectedToken,
|
else => return error.UnexpectedToken,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inline for (structInfo.fields) |field, i| {
|
inline for (structInfo.fields, 0..) |field, i| {
|
||||||
if (!fields_seen[i]) {
|
if (!fields_seen[i]) {
|
||||||
if (field.default_value) |default| {
|
if (field.default_value) |default| {
|
||||||
if (!field.is_comptime) {
|
if (!field.is_comptime) {
|
||||||
|
@ -2705,7 +2705,7 @@ test "string copy option" {
|
||||||
const copy_addr = &obj_copy.get("noescape").?.String[0];
|
const copy_addr = &obj_copy.get("noescape").?.String[0];
|
||||||
|
|
||||||
var found_nocopy = false;
|
var found_nocopy = false;
|
||||||
for (input) |_, index| {
|
for (input, 0..) |_, index| {
|
||||||
try testing.expect(copy_addr != &input[index]);
|
try testing.expect(copy_addr != &input[index]);
|
||||||
if (nocopy_addr == &input[index]) {
|
if (nocopy_addr == &input[index]) {
|
||||||
found_nocopy = true;
|
found_nocopy = true;
|
||||||
|
@ -2784,8 +2784,8 @@ fn outputUnicodeEscape(
|
||||||
assert(codepoint <= 0x10FFFF);
|
assert(codepoint <= 0x10FFFF);
|
||||||
// To escape an extended character that is not in the Basic Multilingual Plane,
|
// To escape an extended character that is not in the Basic Multilingual Plane,
|
||||||
// the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair.
|
// the character is represented as a 12-character sequence, encoding the UTF-16 surrogate pair.
|
||||||
const high = @intCast(u16, (codepoint - 0x10000) >> 10) + 0xD800;
|
const high = @as(u16, @intCast((codepoint - 0x10000) >> 10)) + 0xD800;
|
||||||
const low = @intCast(u16, codepoint & 0x3FF) + 0xDC00;
|
const low = @as(u16, @intCast(codepoint & 0x3FF)) + 0xDC00;
|
||||||
try out_stream.writeAll("\\u");
|
try out_stream.writeAll("\\u");
|
||||||
try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream);
|
try std.fmt.formatIntValue(high, "x", std.fmt.FormatOptions{ .width = 4, .fill = '0' }, out_stream);
|
||||||
try out_stream.writeAll("\\u");
|
try out_stream.writeAll("\\u");
|
||||||
|
@ -2957,7 +2957,7 @@ pub fn stringify(
|
||||||
if (child_options.whitespace) |*whitespace| {
|
if (child_options.whitespace) |*whitespace| {
|
||||||
whitespace.indent_level += 1;
|
whitespace.indent_level += 1;
|
||||||
}
|
}
|
||||||
for (value) |x, i| {
|
for (value, 0..) |x, i| {
|
||||||
if (i != 0) {
|
if (i != 0) {
|
||||||
try out_stream.writeByte(',');
|
try out_stream.writeByte(',');
|
||||||
}
|
}
|
||||||
|
|
12
src/main.zig
12
src/main.zig
|
@ -12,22 +12,22 @@ pub fn log(
|
||||||
args: anytype,
|
args: anytype,
|
||||||
) void {
|
) void {
|
||||||
// Ignore aws_signing messages
|
// Ignore aws_signing messages
|
||||||
if (verbose < 3 and scope == .aws_signing and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
if (verbose < 3 and scope == .aws_signing and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
|
||||||
return;
|
return;
|
||||||
// Ignore aws_credentials messages
|
// Ignore aws_credentials messages
|
||||||
if (verbose < 3 and scope == .aws_credentials and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
if (verbose < 3 and scope == .aws_credentials and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
|
||||||
return;
|
return;
|
||||||
// Ignore xml_shaper messages
|
// Ignore xml_shaper messages
|
||||||
if (verbose < 3 and scope == .xml_shaper and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
if (verbose < 3 and scope == .xml_shaper and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
|
||||||
return;
|
return;
|
||||||
// Ignore date messages
|
// Ignore date messages
|
||||||
if (verbose < 3 and scope == .date and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
if (verbose < 3 and scope == .date and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
|
||||||
return;
|
return;
|
||||||
// Ignore awshttp messages
|
// Ignore awshttp messages
|
||||||
if (verbose < 2 and scope == .awshttp and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
if (verbose < 2 and scope == .awshttp and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (verbose < 1 and scope == .aws and @enumToInt(level) >= @enumToInt(std.log.Level.debug))
|
if (verbose < 1 and scope == .aws and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
|
||||||
return;
|
return;
|
||||||
const scope_prefix = "(" ++ @tagName(scope) ++ "): ";
|
const scope_prefix = "(" ++ @tagName(scope) ++ "): ";
|
||||||
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
|
||||||
|
|
|
@ -7,7 +7,7 @@ pub fn Services(comptime service_imports: anytype) type {
|
||||||
// From here, the fields of our structure can be generated at comptime...
|
// From here, the fields of our structure can be generated at comptime...
|
||||||
var fields: [serviceCount(service_imports)]std.builtin.TypeInfo.StructField = undefined;
|
var fields: [serviceCount(service_imports)]std.builtin.TypeInfo.StructField = undefined;
|
||||||
|
|
||||||
for (fields) |*item, i| {
|
for (fields, 0..) |*item, i| {
|
||||||
const import_field = @field(service_list, @tagName(service_imports[i]));
|
const import_field = @field(service_list, @tagName(service_imports[i]));
|
||||||
item.* = .{
|
item.* = .{
|
||||||
.name = @tagName(service_imports[i]),
|
.name = @tagName(service_imports[i]),
|
||||||
|
|
|
@ -213,7 +213,7 @@ fn parseInternal(comptime T: type, element: *xml.Element, options: ParseOptions)
|
||||||
// }
|
// }
|
||||||
|
|
||||||
log.debug("Processing fields in struct: {s}", .{@typeName(T)});
|
log.debug("Processing fields in struct: {s}", .{@typeName(T)});
|
||||||
inline for (struct_info.fields) |field, i| {
|
inline for (struct_info.fields, 0..) |field, i| {
|
||||||
var name = field.name;
|
var name = field.name;
|
||||||
var found_value = false;
|
var found_value = false;
|
||||||
if (comptime std.meta.trait.hasFn("fieldNameFor")(T))
|
if (comptime std.meta.trait.hasFn("fieldNameFor")(T))
|
||||||
|
@ -368,7 +368,7 @@ pub fn fuzzyEqual(a: []const u8, b: []const u8, options: xml.PredicateOptions) !
|
||||||
|
|
||||||
fn normalize(val: []u8) []u8 {
|
fn normalize(val: []u8) []u8 {
|
||||||
var underscores: u64 = 0;
|
var underscores: u64 = 0;
|
||||||
for (val) |ch, i| {
|
for (val, 0..) |ch, i| {
|
||||||
if (ch == '_') {
|
if (ch == '_') {
|
||||||
underscores = underscores + 1;
|
underscores = underscores + 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user