Compare commits

...

17 Commits

Author SHA1 Message Date
xdBronch
6f6e841d00
Merge dbba600c6d into 87863a834b 2024-11-26 14:11:34 +01:00
Chris Boesch
87863a834b
std.math.complex: Add squared magnitude function (#21998)
Some checks are pending
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / x86_64-macos-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
ci / aarch64-windows (push) Waiting to run
2024-11-26 13:03:48 +00:00
Andrew Kelley
b0dcce93f7
Merge pull request #22075 from ziglang/fix-broken-pipe
Some checks are pending
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / x86_64-macos-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
ci / aarch64-windows (push) Waiting to run
std.io.Poller: handle EPIPE as EOF
2024-11-26 00:36:33 -05:00
David Rubin
a6af55cc6e ip: cleanup @constCast usages 2024-11-25 18:41:36 -05:00
Andrew Kelley
f6392b9526 cmake: don't add an unnecessary curses static lib dependency 2024-11-25 15:05:42 -08:00
Andrew Kelley
21f0fce28b CI: update macOS runner to 13
Apple has already dropped support for macOS 12.
GitHub Actions is dropping macOS 12 support now.
The Zig project is also dropping macOS 12 support now.

This commit also bumps default minimum macos version to 13.
2024-11-25 15:00:10 -08:00
Andrew Kelley
775b48dd10 std.io.Poller: handle EPIPE as EOF
closes #17483
2024-11-25 14:18:55 -08:00
Andrew Kelley
aa5341bf85 std.process.Child: explicit error set for wait 2024-11-25 14:18:55 -08:00
Andrew Kelley
f4e042a4c3
Merge pull request #21858 from francescoalemanno/patch-1
Some checks are pending
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / x86_64-macos-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
ci / aarch64-windows (push) Waiting to run
add improved std.hash.int - deprecate std.hash.uint32
2024-11-25 17:00:17 -05:00
Andrew Kelley
ca67f80b6e std.hash.int: avoid words like "easy" and "fast" in doc comments 2024-11-24 15:30:52 -08:00
Francesco Alemanno
ae6c24b490 std.hash.int: better handle odd bit sizes
Uses the non rational solution of a quadratic, I made it work up to 256
bits, added Mathematica code in case anyone wants to verify the magic
constant.

integers between sizes 3...15 were affected by fatal bias, it is best to
make them pass through the generic solution.

Thanks to RetroDev256 & Andrew feedback.
2024-11-24 15:29:20 -08:00
Andrew Kelley
d09fd249c0 std.hash.int: restore previous behavior
In the parent commit, I handled odd bit sizes by upcasting and
truncating. However it seems the else branch is intended to handle
those cases instead, so this commit reverts that behavior.
2024-11-24 15:27:03 -08:00
Andrew Kelley
5ad44c14b0 std.hash.int: use anytype instead of explicit type parameter
also
* allow signed ints, simply bitcast them to unsigned
* handle odd bit sizes by upcasting and then truncating
* naming conventions
* remove redundant code
* better use of testing API
2024-11-24 15:27:03 -08:00
Francesco Alemanno
aee6f7d7ee std.hash: improve simple hashing of unsigned integers
Before, the default bit mixer was very biased, and after a
lot of searching it turns out that selecting a better solution is hard.

I wrote a custom statistical analysis taylored for bit mixers in order
to select the best one at each size (u64/u32/u16), compared a lot of
mixers, and packaged the best ones in this commit.
2024-11-24 15:27:03 -08:00
Ilia Choly
e2f24a2d70 Allocator.free: document zero-length behavior
Some checks are pending
ci / x86_64-linux-debug (push) Waiting to run
ci / x86_64-linux-release (push) Waiting to run
ci / aarch64-linux-debug (push) Waiting to run
ci / aarch64-linux-release (push) Waiting to run
ci / x86_64-macos-release (push) Waiting to run
ci / aarch64-macos-debug (push) Waiting to run
ci / aarch64-macos-release (push) Waiting to run
ci / x86_64-windows-debug (push) Waiting to run
ci / x86_64-windows-release (push) Waiting to run
ci / aarch64-windows (push) Waiting to run
It wasn't immediately clear from the implementation whether passing
zero-length memory to free() was undefined behavior or intentionally
supported. Since ArrayList and other core data structures rely on
this behavior working correctly, this should be explicitly documented
as part of the public API contract.
2024-11-24 18:19:11 -05:00
Bruno Reis
c2db5d9cd1 treat errno(6) (NXIO) as expected error in openatZ 2024-11-24 18:17:56 -05:00
xdBronch
dbba600c6d Add getSentinel helper to Pointer and Array builtin.Types 2024-11-15 13:53:36 -05:00
11 changed files with 155 additions and 70 deletions

View File

@ -46,7 +46,7 @@ jobs:
- name: Build and Test
run: sh ci/aarch64-linux-release.sh
x86_64-macos-release:
runs-on: "macos-12"
runs-on: "macos-13"
env:
ARCH: "x86_64"
steps:

View File

@ -89,12 +89,7 @@ set(ZIG_SHARED_LLVM off CACHE BOOL "Prefer linking against shared LLVM libraries
set(ZIG_STATIC_LLVM ${ZIG_STATIC} CACHE BOOL "Prefer linking against static LLVM libraries")
set(ZIG_STATIC_ZLIB ${ZIG_STATIC} CACHE BOOL "Prefer linking against static zlib")
set(ZIG_STATIC_ZSTD ${ZIG_STATIC} CACHE BOOL "Prefer linking against static zstd")
if(APPLE AND ZIG_STATIC)
set(ZIG_STATIC_CURSES on)
else()
set(ZIG_STATIC_CURSES off)
endif()
set(ZIG_STATIC_CURSES ${ZIG_STATIC_CURSES} CACHE BOOL "Prefer linking against static curses")
set(ZIG_STATIC_CURSES OFF CACHE BOOL "Enable static linking against curses")
if (ZIG_SHARED_LLVM AND ZIG_STATIC_LLVM)
message(SEND_ERROR "-DZIG_SHARED_LLVM and -DZIG_STATIC_LLVM cannot both be enabled simultaneously")

View File

@ -531,7 +531,7 @@ pub const Os = struct {
},
.macos => .{
.semver = .{
.min = .{ .major = 11, .minor = 7, .patch = 1 },
.min = .{ .major = 13, .minor = 0, .patch = 0 },
.max = .{ .major = 15, .minor = 2, .patch = 0 },
},
},

View File

@ -607,6 +607,7 @@ pub const Type = union(enum) {
/// The type of the sentinel is the element type of the pointer, which is
/// the value of the `child` field in this struct. However there is no way
/// to refer to that type here, so we use pointer to `anyopaque`.
/// See `getSentinel` for an easier way to access this value.
sentinel: ?*const anyopaque,
/// This data structure is used by the Zig language code generation and
@ -617,6 +618,14 @@ pub const Type = union(enum) {
Slice,
C,
};
/// Returns the sentinel value casted to the child type
/// Asserts that `pointer.size` is `.Many` or `.Slice`
/// and that `pointer.sentinel` is non-null
pub fn getSentinel(pointer: Pointer) pointer.child {
std.debug.assert(pointer.size == .Many or pointer.size == .Slice);
return @as(*const pointer.child, @ptrCast(@alignCast(pointer.sentinel.?))).*;
}
};
/// This data structure is used by the Zig language code generation and
@ -628,7 +637,14 @@ pub const Type = union(enum) {
/// The type of the sentinel is the element type of the array, which is
/// the value of the `child` field in this struct. However there is no way
/// to refer to that type here, so we use pointer to `anyopaque`.
/// See `getSentinel` for an easier way to access this value.
sentinel: ?*const anyopaque,
/// Returns the sentinel value casted to the child type
/// Asserts that `array.sentinel` is non-null
pub fn getSentinel(array: Array) array.child {
return @as(*const array.child, @ptrCast(@alignCast(array.sentinel.?))).*;
}
};
/// This data structure is used by the Zig language code generation and

View File

@ -37,20 +37,85 @@ pub const XxHash3 = xxhash.XxHash3;
pub const XxHash64 = xxhash.XxHash64;
pub const XxHash32 = xxhash.XxHash32;
/// This is handy if you have a u32 and want a u32 and don't want to take a
/// detour through many layers of abstraction elsewhere in the std.hash
/// namespace.
/// Copied from https://nullprogram.com/blog/2018/07/31/
/// Integer-to-integer hashing for bit widths <= 256.
pub fn int(input: anytype) @TypeOf(input) {
// This function is only intended for integer types
const info = @typeInfo(@TypeOf(input)).int;
const bits = info.bits;
// Convert input to unsigned integer (easier to deal with)
const Uint = @Type(.{ .int = .{ .bits = bits, .signedness = .unsigned } });
const u_input: Uint = @bitCast(input);
if (bits > 256) @compileError("bit widths > 256 are unsupported, use std.hash.autoHash functionality.");
// For bit widths that don't have a dedicated function, use a heuristic
// construction with a multiplier suited to diffusion -
// a mod 2^bits where a^2 - 46 * a + 1 = 0 mod 2^(bits + 4),
// on Mathematica: bits = 256; BaseForm[Solve[1 - 46 a + a^2 == 0, a, Modulus -> 2^(bits + 4)][[-1]][[1]][[2]], 16]
const mult: Uint = @truncate(0xfac2e27ed2036860a062b5f264d80a512b00aa459b448bf1eca24d41c96f59e5b);
// The bit width of the input integer determines how to hash it
const output = switch (bits) {
0...2 => u_input *% mult,
16 => uint16(u_input),
32 => uint32(u_input),
64 => uint64(u_input),
else => blk: {
var x: Uint = u_input;
inline for (0..4) |_| {
x ^= x >> (bits / 2);
x *%= mult;
}
break :blk x;
},
};
return @bitCast(output);
}
/// Source: https://github.com/skeeto/hash-prospector
fn uint16(input: u16) u16 {
var x: u16 = input;
x = (x ^ (x >> 7)) *% 0x2993;
x = (x ^ (x >> 5)) *% 0xe877;
x = (x ^ (x >> 9)) *% 0x0235;
x = x ^ (x >> 10);
return x;
}
/// DEPRECATED: use std.hash.int()
/// Source: https://github.com/skeeto/hash-prospector
pub fn uint32(input: u32) u32 {
var x: u32 = input;
x ^= x >> 16;
x *%= 0x7feb352d;
x ^= x >> 15;
x *%= 0x846ca68b;
x ^= x >> 16;
x = (x ^ (x >> 17)) *% 0xed5ad4bb;
x = (x ^ (x >> 11)) *% 0xac4c1b51;
x = (x ^ (x >> 15)) *% 0x31848bab;
x = x ^ (x >> 14);
return x;
}
/// Source: https://github.com/jonmaiga/mx3
fn uint64(input: u64) u64 {
var x: u64 = input;
const c = 0xbea225f9eb34556d;
x = (x ^ (x >> 32)) *% c;
x = (x ^ (x >> 29)) *% c;
x = (x ^ (x >> 32)) *% c;
x = x ^ (x >> 29);
return x;
}
test int {
const expectEqual = @import("std").testing.expectEqual;
try expectEqual(0x1, int(@as(u1, 1)));
try expectEqual(0x3, int(@as(u2, 1)));
try expectEqual(0x4, int(@as(u3, 1)));
try expectEqual(0xD6, int(@as(u8, 1)));
try expectEqual(0x2880, int(@as(u16, 1)));
try expectEqual(0x2880, int(@as(i16, 1)));
try expectEqual(0x838380, int(@as(u24, 1)));
try expectEqual(0x42741D6, int(@as(u32, 1)));
try expectEqual(0x42741D6, int(@as(i32, 1)));
try expectEqual(0x71894DE00D9981F, int(@as(u64, 1)));
try expectEqual(0x71894DE00D9981F, int(@as(i64, 1)));
}
test {
_ = adler;
_ = auto_hash;

View File

@ -646,7 +646,10 @@ pub fn Poller(comptime StreamEnum: type) type {
// always check if there's some data waiting to be read first.
if (poll_fd.revents & posix.POLL.IN != 0) {
const buf = try q.writableWithSize(bump_amt);
const amt = try posix.read(poll_fd.fd, buf);
const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) {
error.BrokenPipe => 0, // Handle the same as EOF.
else => |e| return e,
};
q.update(amt);
if (amt == 0) {
// Remove the fd when the EOF condition is met.

View File

@ -115,6 +115,10 @@ pub fn Complex(comptime T: type) type {
pub fn magnitude(self: Self) T {
return @sqrt(self.re * self.re + self.im * self.im);
}
pub fn squaredMagnitude(self: Self) T {
return self.re * self.re + self.im * self.im;
}
};
}
@ -189,6 +193,13 @@ test "magnitude" {
try testing.expect(math.approxEqAbs(f32, c, 5.83095, epsilon));
}
test "squaredMagnitude" {
const a = Complex(f32).init(5, 3);
const c = a.squaredMagnitude();
try testing.expect(math.approxEqAbs(f32, c, math.pow(f32, a.magnitude(), 2), epsilon));
}
test {
_ = @import("complex/abs.zig");
_ = @import("complex/acosh.zig");

View File

@ -301,8 +301,9 @@ pub fn reallocAdvanced(
return mem.bytesAsSlice(T, new_bytes);
}
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
/// Free an array allocated with `alloc`.
/// If memory has length 0, free is a no-op.
/// To free a single item, see `destroy`.
pub fn free(self: Allocator, memory: anytype) void {
const Slice = @typeInfo(@TypeOf(memory)).pointer;
const bytes = mem.sliceAsBytes(memory);

View File

@ -1817,6 +1817,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: O, mode: mode_t) O
.OPNOTSUPP => return error.FileLocksNotSupported,
.AGAIN => return error.WouldBlock,
.TXTBSY => return error.FileBusy,
.NXIO => return error.NoDevice,
.ILSEQ => |err| if (native_os == .wasi)
return error.InvalidUtf8
else

View File

@ -293,19 +293,16 @@ pub fn killPosix(self: *ChildProcess) !Term {
error.ProcessNotFound => return error.AlreadyTerminated,
else => return err,
};
try self.waitUnwrapped();
self.waitUnwrapped();
return self.term.?;
}
pub const WaitError = SpawnError || std.os.windows.GetProcessMemoryInfoError;
/// Blocks until child process terminates and then cleans up all resources.
pub fn wait(self: *ChildProcess) !Term {
const term = if (native_os == .windows)
try self.waitWindows()
else
try self.waitPosix();
pub fn wait(self: *ChildProcess) WaitError!Term {
const term = if (native_os == .windows) try self.waitWindows() else self.waitPosix();
self.id = undefined;
return term;
}
@ -408,7 +405,7 @@ pub fn run(args: struct {
};
}
fn waitWindows(self: *ChildProcess) !Term {
fn waitWindows(self: *ChildProcess) WaitError!Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@ -418,17 +415,17 @@ fn waitWindows(self: *ChildProcess) !Term {
return self.term.?;
}
fn waitPosix(self: *ChildProcess) !Term {
fn waitPosix(self: *ChildProcess) SpawnError!Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
try self.waitUnwrapped();
self.waitUnwrapped();
return self.term.?;
}
fn waitUnwrappedWindows(self: *ChildProcess) !void {
fn waitUnwrappedWindows(self: *ChildProcess) WaitError!void {
const result = windows.WaitForSingleObjectEx(self.id, windows.INFINITE, false);
self.term = @as(SpawnError!Term, x: {
@ -450,7 +447,7 @@ fn waitUnwrappedWindows(self: *ChildProcess) !void {
return result;
}
fn waitUnwrapped(self: *ChildProcess) !void {
fn waitUnwrapped(self: *ChildProcess) void {
const res: posix.WaitPidResult = res: {
if (self.request_resource_usage_statistics) {
switch (native_os) {

View File

@ -1431,16 +1431,12 @@ pub const OptionalMapIndex = enum(u32) {
pub const MapIndex = enum(u32) {
_,
pub fn get(map_index: MapIndex, ip: *InternPool) *FieldMap {
pub fn get(map_index: MapIndex, ip: *const InternPool) *FieldMap {
const unwrapped_map_index = map_index.unwrap(ip);
const maps = ip.getLocalShared(unwrapped_map_index.tid).maps.acquire();
return &maps.view().items(.@"0")[unwrapped_map_index.index];
}
pub fn getConst(map_index: MapIndex, ip: *const InternPool) FieldMap {
return map_index.get(@constCast(ip)).*;
}
pub fn toOptional(i: MapIndex) OptionalMapIndex {
return @enumFromInt(@intFromEnum(i));
}
@ -1853,7 +1849,7 @@ pub const Key = union(enum) {
/// Look up field index based on field name.
pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = self.names_map.unwrap().?.getConst(ip);
const map = self.names_map.unwrap().?.get(ip);
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
@ -2101,13 +2097,13 @@ pub const Key = union(enum) {
comptime_args: Index.Slice,
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
fn analysisPtr(func: Func, ip: *InternPool) *FuncAnalysis {
fn analysisPtr(func: Func, ip: *const InternPool) *FuncAnalysis {
const extra = ip.getLocalShared(func.tid).extra.acquire();
return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]);
}
pub fn analysisUnordered(func: Func, ip: *const InternPool) FuncAnalysis {
return @atomicLoad(FuncAnalysis, func.analysisPtr(@constCast(ip)), .unordered);
return @atomicLoad(FuncAnalysis, func.analysisPtr(ip), .unordered);
}
pub fn setAnalysisState(func: Func, ip: *InternPool, state: FuncAnalysis.State) void {
@ -2144,23 +2140,23 @@ pub const Key = union(enum) {
}
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
fn zirBodyInstPtr(func: Func, ip: *InternPool) *TrackedInst.Index {
fn zirBodyInstPtr(func: Func, ip: *const InternPool) *TrackedInst.Index {
const extra = ip.getLocalShared(func.tid).extra.acquire();
return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]);
}
pub fn zirBodyInstUnordered(func: Func, ip: *const InternPool) TrackedInst.Index {
return @atomicLoad(TrackedInst.Index, func.zirBodyInstPtr(@constCast(ip)), .unordered);
return @atomicLoad(TrackedInst.Index, func.zirBodyInstPtr(ip), .unordered);
}
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
fn branchQuotaPtr(func: Func, ip: *InternPool) *u32 {
fn branchQuotaPtr(func: Func, ip: *const InternPool) *u32 {
const extra = ip.getLocalShared(func.tid).extra.acquire();
return &extra.view().items(.@"0")[func.branch_quota_extra_index];
}
pub fn branchQuotaUnordered(func: Func, ip: *const InternPool) u32 {
return @atomicLoad(u32, func.branchQuotaPtr(@constCast(ip)), .unordered);
return @atomicLoad(u32, func.branchQuotaPtr(ip), .unordered);
}
pub fn maxBranchQuota(func: Func, ip: *InternPool, new_branch_quota: u32) void {
@ -2173,14 +2169,14 @@ pub const Key = union(enum) {
}
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
fn resolvedErrorSetPtr(func: Func, ip: *InternPool) *Index {
fn resolvedErrorSetPtr(func: Func, ip: *const InternPool) *Index {
const extra = ip.getLocalShared(func.tid).extra.acquire();
assert(func.analysisUnordered(ip).inferred_error_set);
return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]);
}
pub fn resolvedErrorSetUnordered(func: Func, ip: *const InternPool) Index {
return @atomicLoad(Index, func.resolvedErrorSetPtr(@constCast(ip)), .unordered);
return @atomicLoad(Index, func.resolvedErrorSetPtr(ip), .unordered);
}
pub fn setResolvedErrorSet(func: Func, ip: *InternPool, ies: Index) void {
@ -3135,14 +3131,14 @@ pub const LoadedUnionType = struct {
/// This accessor is provided so that the tag type can be mutated, and so that
/// when it is mutated, the mutations are observed.
/// The returned pointer expires with any addition to the `InternPool`.
fn tagTypePtr(self: LoadedUnionType, ip: *InternPool) *Index {
fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index {
const extra = ip.getLocalShared(self.tid).extra.acquire();
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?;
return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]);
}
pub fn tagTypeUnordered(u: LoadedUnionType, ip: *const InternPool) Index {
return @atomicLoad(Index, u.tagTypePtr(@constCast(ip)), .unordered);
return @atomicLoad(Index, u.tagTypePtr(ip), .unordered);
}
pub fn setTagType(u: LoadedUnionType, ip: *InternPool, tag_type: Index) void {
@ -3154,14 +3150,14 @@ pub const LoadedUnionType = struct {
}
/// The returned pointer expires with any addition to the `InternPool`.
fn flagsPtr(self: LoadedUnionType, ip: *InternPool) *Tag.TypeUnion.Flags {
fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags {
const extra = ip.getLocalShared(self.tid).extra.acquire();
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?;
return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]);
}
pub fn flagsUnordered(u: LoadedUnionType, ip: *const InternPool) Tag.TypeUnion.Flags {
return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(@constCast(ip)), .unordered);
return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(ip), .unordered);
}
pub fn setStatus(u: LoadedUnionType, ip: *InternPool, status: Status) void {
@ -3254,25 +3250,25 @@ pub const LoadedUnionType = struct {
}
/// The returned pointer expires with any addition to the `InternPool`.
fn sizePtr(self: LoadedUnionType, ip: *InternPool) *u32 {
fn sizePtr(self: LoadedUnionType, ip: *const InternPool) *u32 {
const extra = ip.getLocalShared(self.tid).extra.acquire();
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?;
return &extra.view().items(.@"0")[self.extra_index + field_index];
}
pub fn sizeUnordered(u: LoadedUnionType, ip: *const InternPool) u32 {
return @atomicLoad(u32, u.sizePtr(@constCast(ip)), .unordered);
return @atomicLoad(u32, u.sizePtr(ip), .unordered);
}
/// The returned pointer expires with any addition to the `InternPool`.
fn paddingPtr(self: LoadedUnionType, ip: *InternPool) *u32 {
fn paddingPtr(self: LoadedUnionType, ip: *const InternPool) *u32 {
const extra = ip.getLocalShared(self.tid).extra.acquire();
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?;
return &extra.view().items(.@"0")[self.extra_index + field_index];
}
pub fn paddingUnordered(u: LoadedUnionType, ip: *const InternPool) u32 {
return @atomicLoad(u32, u.paddingPtr(@constCast(ip)), .unordered);
return @atomicLoad(u32, u.paddingPtr(ip), .unordered);
}
pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool {
@ -3480,7 +3476,7 @@ pub const LoadedStructType = struct {
if (i >= s.field_types.len) return null;
return i;
};
const map = names_map.getConst(ip);
const map = names_map.get(ip);
const adapter: NullTerminatedString.Adapter = .{ .strings = s.field_names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
@ -3523,7 +3519,7 @@ pub const LoadedStructType = struct {
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
fn flagsPtr(s: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags {
fn flagsPtr(s: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags {
assert(s.layout != .@"packed");
const extra = ip.getLocalShared(s.tid).extra.acquire();
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
@ -3531,12 +3527,12 @@ pub const LoadedStructType = struct {
}
pub fn flagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStruct.Flags {
return @atomicLoad(Tag.TypeStruct.Flags, s.flagsPtr(@constCast(ip)), .unordered);
return @atomicLoad(Tag.TypeStruct.Flags, s.flagsPtr(ip), .unordered);
}
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts that the struct is packed.
fn packedFlagsPtr(s: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags {
fn packedFlagsPtr(s: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags {
assert(s.layout == .@"packed");
const extra = ip.getLocalShared(s.tid).extra.acquire();
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
@ -3544,7 +3540,7 @@ pub const LoadedStructType = struct {
}
pub fn packedFlagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStructPacked.Flags {
return @atomicLoad(Tag.TypeStructPacked.Flags, s.packedFlagsPtr(@constCast(ip)), .unordered);
return @atomicLoad(Tag.TypeStructPacked.Flags, s.packedFlagsPtr(ip), .unordered);
}
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
@ -3794,7 +3790,7 @@ pub const LoadedStructType = struct {
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
fn sizePtr(s: LoadedStructType, ip: *InternPool) *u32 {
fn sizePtr(s: LoadedStructType, ip: *const InternPool) *u32 {
assert(s.layout != .@"packed");
const extra = ip.getLocalShared(s.tid).extra.acquire();
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
@ -3802,14 +3798,14 @@ pub const LoadedStructType = struct {
}
pub fn sizeUnordered(s: LoadedStructType, ip: *const InternPool) u32 {
return @atomicLoad(u32, s.sizePtr(@constCast(ip)), .unordered);
return @atomicLoad(u32, s.sizePtr(ip), .unordered);
}
/// The backing integer type of the packed struct. Whether zig chooses
/// this type or the user specifies it, it is stored here. This will be
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
fn backingIntTypePtr(s: LoadedStructType, ip: *InternPool) *Index {
fn backingIntTypePtr(s: LoadedStructType, ip: *const InternPool) *Index {
assert(s.layout == .@"packed");
const extra = ip.getLocalShared(s.tid).extra.acquire();
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
@ -3817,7 +3813,7 @@ pub const LoadedStructType = struct {
}
pub fn backingIntTypeUnordered(s: LoadedStructType, ip: *const InternPool) Index {
return @atomicLoad(Index, s.backingIntTypePtr(@constCast(ip)), .unordered);
return @atomicLoad(Index, s.backingIntTypePtr(ip), .unordered);
}
pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, backing_int_ty: Index) void {
@ -4190,7 +4186,7 @@ pub const LoadedEnumType = struct {
/// Look up field index based on field name.
pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = self.names_map.getConst(ip);
const map = self.names_map.get(ip);
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
@ -4210,7 +4206,7 @@ pub const LoadedEnumType = struct {
else => unreachable,
};
if (self.values_map.unwrap()) |values_map| {
const map = values_map.getConst(ip);
const map = values_map.get(ip);
const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) };
const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null;
return @intCast(field_index);
@ -11731,7 +11727,7 @@ pub fn isFuncBody(ip: *const InternPool, func: Index) bool {
};
}
fn funcAnalysisPtr(ip: *InternPool, func: Index) *FuncAnalysis {
fn funcAnalysisPtr(ip: *const InternPool, func: Index) *FuncAnalysis {
const unwrapped_func = func.unwrap(ip);
const extra = unwrapped_func.getExtra(ip);
const item = unwrapped_func.getItem(ip);
@ -11757,7 +11753,7 @@ fn funcAnalysisPtr(ip: *InternPool, func: Index) *FuncAnalysis {
}
pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis {
return @atomicLoad(FuncAnalysis, @constCast(ip).funcAnalysisPtr(func), .unordered);
return @atomicLoad(FuncAnalysis, ip.funcAnalysisPtr(func), .unordered);
}
pub fn funcSetCallsOrAwaitsErrorableFn(ip: *InternPool, func: Index) void {
@ -11833,7 +11829,7 @@ fn iesResolvedPtr(ip: *InternPool, ies_index: Index) *Index {
/// Returns a mutable pointer to the resolved error set type of an inferred
/// error set function. The returned pointer is invalidated when anything is
/// added to `ip`.
fn funcIesResolvedPtr(ip: *InternPool, func_index: Index) *Index {
fn funcIesResolvedPtr(ip: *const InternPool, func_index: Index) *Index {
assert(ip.funcAnalysisUnordered(func_index).inferred_error_set);
const unwrapped_func = func_index.unwrap(ip);
const func_extra = unwrapped_func.getExtra(ip);
@ -11861,7 +11857,7 @@ fn funcIesResolvedPtr(ip: *InternPool, func_index: Index) *Index {
}
pub fn funcIesResolvedUnordered(ip: *const InternPool, index: Index) Index {
return @atomicLoad(Index, @constCast(ip).funcIesResolvedPtr(index), .unordered);
return @atomicLoad(Index, ip.funcIesResolvedPtr(index), .unordered);
}
pub fn funcSetIesResolved(ip: *InternPool, index: Index, ies: Index) void {