stage2: fix tuple assigned to variable

Before this we would see ZIR code like this:
```
%69 = alloc_inferred_mut()
%70 = array_base_ptr(%69)
%71 = elem_ptr_imm(%70, 0)
```
This would crash the compiler because it expects to see a
`coerce_result_ptr` instruction after `alloc_inferred_mut`, but that
does not happen in this case because there is no type to coerce the
result pointer to.

In this commit I modified AstGen so that it has similar codegen as when
using a const instead of a var:
```
%69 = alloc_inferred_mut()
%76 = array_init_anon(.{%71, %73, %75})
%77 = store_to_inferred_ptr(%69, %76)
```

This does not obey result locations, meaning if you call a function
inside the initializer, it will end up doing a copy into the LHS.
Solving this problem, or changing the language to make this legal,
will be left for my future self to deal with. Hi future self!
I see you reading this commit log. Hope you're doing OK buddy.

Sema for `store_ptr` of a tuple where the pointer is in fact the same
element type as the operand had an issue where the comptime fields would
get incorrectly lowered to runtime stores to bogus addresses. This is
solved with an exception to the optimization in Sema for storing
pointers that handles tuples element-wise. In the case that we are
storing a tuple to itself, it skips the optimization. This results in
better code and avoids the problem. However this caused a regression in
GeneralPurposeAllocator from the standard library.

I regressed the test runner code back to the simpler path. It's too
hard to debug standard library code in the LLVM backend right now since
we don't have debug info hooked up. Also, we didn't have any behavior
test coverage of whatever was regressed, so let's try to get that
coverage added as a stepping stone to getting the standard library
working.
This commit is contained in:
Andrew Kelley 2022-03-04 17:19:36 -07:00
parent d3648cc030
commit f2a5d0bf94
4 changed files with 23 additions and 11 deletions

View File

@ -23,9 +23,7 @@ fn processArgs() void {
}
pub fn main() void {
if (builtin.zig_backend != .stage1 and
builtin.zig_backend != .stage2_llvm)
{
if (builtin.zig_backend != .stage1) {
return main2() catch @panic("test failure");
}
if (builtin.zig_backend == .stage1) processArgs();
@ -144,7 +142,8 @@ pub fn main2() anyerror!void {
};
}
if (builtin.zig_backend == .stage2_wasm or
builtin.zig_backend == .stage2_x86_64)
builtin.zig_backend == .stage2_x86_64 or
builtin.zig_backend == .stage2_llvm)
{
const passed = builtin.test_functions.len - skipped - failed;
const stderr = std.io.getStdErr();

View File

@ -1340,9 +1340,19 @@ fn arrayInitExpr(
return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, elem_type, .array_init);
}
},
.ptr, .inferred_ptr => |ptr_inst| {
.ptr => |ptr_inst| {
return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array);
},
.inferred_ptr => |ptr_inst| {
if (types.array == .none) {
// We treat this case differently so that we don't get a crash when
// analyzing array_base_ptr against an alloc_inferred_mut.
const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon);
return rvalue(gz, rl, result, node);
} else {
return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array);
}
},
.block_ptr => |block_gz| {
return arrayInitExprRlPtr(gz, scope, rl, node, block_gz.rl_ptr, array_init.ast.elements, types.array);
},

View File

@ -16458,11 +16458,13 @@ fn storePtr2(
// To generate better code for tuples, we detect a tuple operand here, and
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
// which would occur if we used `coerce`.
// However, we avoid this mechanism if the destination element type is
// the same tuple as the source, because the regular store will be better for this case.
const operand_ty = sema.typeOf(uncasted_operand);
if (operand_ty.castTag(.tuple)) |payload| {
const tuple_fields_len = payload.data.types.len;
var i: u32 = 0;
while (i < tuple_fields_len) : (i += 1) {
if (operand_ty.isTuple() and !elem_ty.eql(operand_ty)) {
const tuple = operand_ty.tupleFields();
for (tuple.types) |_, i_usize| {
const i = @intCast(u32, i_usize);
const elem_src = operand_src; // TODO better source location
const elem = try tupleField(sema, block, uncasted_operand, i, operand_src, elem_src);
const elem_index = try sema.addIntUnsigned(Type.usize, i);

View File

@ -976,12 +976,13 @@ test "fully anonymous list literal" {
}
test "tuple assigned to variable" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
var vec = .{ @as(i32, 22), @as(i32, 55), @as(i32, 99) };
try expect(vec.@"0" == 22);
try expect(vec.@"1" == 55);
try expect(vec.@"2" == 99);
try expect(vec[0] == 22);
try expect(vec[1] == 55);
try expect(vec[2] == 99);
}
test "comptime struct field" {