diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index 58a9d78974..0e2a0753ea 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -23,9 +23,7 @@ fn processArgs() void { } pub fn main() void { - if (builtin.zig_backend != .stage1 and - builtin.zig_backend != .stage2_llvm) - { + if (builtin.zig_backend != .stage1) { return main2() catch @panic("test failure"); } if (builtin.zig_backend == .stage1) processArgs(); @@ -144,7 +142,8 @@ pub fn main2() anyerror!void { }; } if (builtin.zig_backend == .stage2_wasm or - builtin.zig_backend == .stage2_x86_64) + builtin.zig_backend == .stage2_x86_64 or + builtin.zig_backend == .stage2_llvm) { const passed = builtin.test_functions.len - skipped - failed; const stderr = std.io.getStdErr(); diff --git a/src/AstGen.zig b/src/AstGen.zig index 3fe9630c75..cb710af900 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -1340,9 +1340,19 @@ fn arrayInitExpr( return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, elem_type, .array_init); } }, - .ptr, .inferred_ptr => |ptr_inst| { + .ptr => |ptr_inst| { return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array); }, + .inferred_ptr => |ptr_inst| { + if (types.array == .none) { + // We treat this case differently so that we don't get a crash when + // analyzing array_base_ptr against an alloc_inferred_mut. + const result = try arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon); + return rvalue(gz, rl, result, node); + } else { + return arrayInitExprRlPtr(gz, scope, rl, node, ptr_inst, array_init.ast.elements, types.array); + } + }, .block_ptr => |block_gz| { return arrayInitExprRlPtr(gz, scope, rl, node, block_gz.rl_ptr, array_init.ast.elements, types.array); }, diff --git a/src/Sema.zig b/src/Sema.zig index 774b0800e0..ad8c98d6ad 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16458,11 +16458,13 @@ fn storePtr2( // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy // which would occur if we used `coerce`. + // However, we avoid this mechanism if the destination element type is + // the same tuple as the source, because the regular store will be better for this case. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.castTag(.tuple)) |payload| { - const tuple_fields_len = payload.data.types.len; - var i: u32 = 0; - while (i < tuple_fields_len) : (i += 1) { + if (operand_ty.isTuple() and !elem_ty.eql(operand_ty)) { + const tuple = operand_ty.tupleFields(); + for (tuple.types) |_, i_usize| { + const i = @intCast(u32, i_usize); const elem_src = operand_src; // TODO better source location const elem = try tupleField(sema, block, uncasted_operand, i, operand_src, elem_src); const elem_index = try sema.addIntUnsigned(Type.usize, i); diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 307f0fc85f..fdf6da15a8 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -976,12 +976,13 @@ test "fully anonymous list literal" { } test "tuple assigned to variable" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - var vec = .{ @as(i32, 22), @as(i32, 55), @as(i32, 99) }; try expect(vec.@"0" == 22); try expect(vec.@"1" == 55); try expect(vec.@"2" == 99); + try expect(vec[0] == 22); + try expect(vec[1] == 55); + try expect(vec[2] == 99); } test "comptime struct field" {