diff --git a/src/AstGen.zig b/src/AstGen.zig index aece3eafec3c..2568b899808f 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -7907,6 +7907,48 @@ fn typeOf( return rvalue(gz, ri, typeof_inst, node); } +fn minMax( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + args: []const Ast.Node.Index, + comptime op: enum { min, max }, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (args.len < 2) { + return astgen.failNode(node, "expected at least 2 arguments, found 0", .{}); + } + if (args.len == 2) { + const tag: Zir.Inst.Tag = switch (op) { + .min => .min, + .max => .max, + }; + const a = try expr(gz, scope, .{ .rl = .none }, args[0]); + const b = try expr(gz, scope, .{ .rl = .none }, args[1]); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = a, + .rhs = b, + }); + return rvalue(gz, ri, result, node); + } + const payload_index = try addExtra(astgen, Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + var extra_index = try reserveExtra(gz.astgen, args.len); + for (args) |arg| { + const arg_ref = try expr(gz, scope, .{ .rl = .none }, arg); + astgen.extra.items[extra_index] = @enumToInt(arg_ref); + extra_index += 1; + } + const tag: Zir.Inst.Extended = switch (op) { + .min => .min_multi, + .max => .max_multi, + }; + const result = try gz.addExtendedMultiOpPayloadIndex(tag, payload_index, args.len); + return rvalue(gz, ri, result, node); +} + fn builtinCall( gz: *GenZir, scope: *Scope, @@ -7997,6 +8039,8 @@ fn builtinCall( .TypeOf => return typeOf( gz, scope, ri, node, params), .union_init => return unionInit(gz, scope, ri, node, params), .c_import => return cImport( gz, scope, node, params[0]), + .min => return minMax( gz, scope, ri, node, params, .min), + .max => return minMax( gz, scope, ri, node, params, .max), // zig fmt: on .@"export" => { @@ -8358,25 +8402,6 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, - .max => { - const a = try expr(gz, scope, .{ .rl = .none }, params[0]); - const b = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.max, node, Zir.Inst.Bin{ - .lhs = a, - .rhs = b, - }); - return rvalue(gz, ri, result, node); - }, - .min => { - const a = try expr(gz, scope, .{ .rl = .none }, params[0]); - const b = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.min, node, Zir.Inst.Bin{ - .lhs = a, - .rhs = b, - }); - return rvalue(gz, ri, result, node); - }, - .add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow), .sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow), .mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow), diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig index d31a1e7c25a7..426939afdf10 100644 --- a/src/BuiltinFn.zig +++ b/src/BuiltinFn.zig @@ -608,7 +608,7 @@ pub const list = list: { "@max", .{ .tag = .max, - .param_count = 2, + .param_count = null, }, }, .{ @@ -629,7 +629,7 @@ pub const list = list: { "@min", .{ .tag = .min, - .param_count = 2, + .param_count = null, }, }, .{ diff --git a/src/Sema.zig b/src/Sema.zig index 79f2fd7fca9e..13f4d684a151 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1137,6 +1137,8 @@ fn analyzeBodyInner( .asm_expr => try sema.zirAsm( block, extended, true), .typeof_peer => try sema.zirTypeofPeer( block, extended), .compile_log => try sema.zirCompileLog( extended), + .min_multi => try sema.zirMinMaxMulti( block, extended, .min), + .max_multi => try sema.zirMinMaxMulti( block, extended, .max), .add_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .sub_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .mul_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), @@ -12143,7 +12145,7 @@ fn zirShl( lhs_ty, try lhs_ty.maxInt(sema.arena, target), ); - const rhs_limited = try sema.analyzeMinMax(block, rhs_src, rhs, max_int, .min, rhs_src, rhs_src); + const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); } else { break :rhs rhs; @@ -21752,64 +21754,223 @@ fn zirMinMax( const rhs = try sema.resolveInst(extra.rhs); try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs)); try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs)); - return sema.analyzeMinMax(block, src, lhs, rhs, air_tag, lhs_src, rhs_src); + return sema.analyzeMinMax(block, src, air_tag, &.{ lhs, rhs }, &.{ lhs_src, rhs_src }); +} + +fn zirMinMaxMulti( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + comptime air_tag: Air.Inst.Tag, +) CompileError!Air.Inst.Ref { + const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); + const src_node = extra.data.src_node; + const src = LazySrcLoc.nodeOffset(src_node); + const operands = sema.code.refSlice(extra.end, extended.small); + + const air_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len); + const operand_srcs = try sema.arena.alloc(LazySrcLoc, operands.len); + + for (operands, air_refs, operand_srcs, 0..) |zir_ref, *air_ref, *op_src, i| { + op_src.* = switch (i) { + 0 => .{ .node_offset_builtin_call_arg0 = src_node }, + 1 => .{ .node_offset_builtin_call_arg1 = src_node }, + 2 => .{ .node_offset_builtin_call_arg2 = src_node }, + 3 => .{ .node_offset_builtin_call_arg3 = src_node }, + 4 => .{ .node_offset_builtin_call_arg4 = src_node }, + 5 => .{ .node_offset_builtin_call_arg5 = src_node }, + else => src, // TODO: better source location + }; + air_ref.* = try sema.resolveInst(zir_ref); + try sema.checkNumericType(block, op_src.*, sema.typeOf(air_ref.*)); + } + + return sema.analyzeMinMax(block, src, air_tag, air_refs, operand_srcs); } fn analyzeMinMax( sema: *Sema, block: *Block, src: LazySrcLoc, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, comptime air_tag: Air.Inst.Tag, - lhs_src: LazySrcLoc, - rhs_src: LazySrcLoc, + operands: []const Air.Inst.Ref, + operand_srcs: []const LazySrcLoc, ) CompileError!Air.Inst.Ref { - const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); + assert(operands.len == operand_srcs.len); + assert(operands.len > 0); - // TODO @max(max_int, undefined) should return max_int + if (operands.len == 1) return operands[0]; - const runtime_src = if (simd_op.lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); + const mod = sema.mod; + const target = mod.getTarget(); + const opFunc = switch (air_tag) { + .min => Value.numberMin, + .max => Value.numberMax, + else => unreachable, + }; - const rhs_val = simd_op.rhs_val orelse break :rs rhs_src; + // First, find all comptime-known arguments, and get their min/max + var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len); + var cur_minmax: ?Air.Inst.Ref = null; + var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null + for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| { + // Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call + // it in the runtime path anyway since the result type may have been refined + const uncasted_operand_val = (try sema.resolveMaybeUndefVal(operand)) orelse continue; + if (cur_minmax) |cur| { + const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src); + const cur_val = simd_op.lhs_val.?; // cur_minmax is comptime-known + const operand_val = simd_op.rhs_val.?; // we checked the operand was resolvable above + + runtime_known.unset(operand_idx); + + if (cur_val.isUndef()) continue; // result is also undef + if (operand_val.isUndef()) { + cur_minmax = try sema.addConstUndef(simd_op.result_ty); + continue; + } - if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); + try sema.resolveLazyValue(cur_val); + try sema.resolveLazyValue(operand_val); - try sema.resolveLazyValue(lhs_val); - try sema.resolveLazyValue(rhs_val); + const vec_len = simd_op.len orelse { + const result_val = opFunc(cur_val, operand_val, target); + cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); + continue; + }; + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const elems = try sema.arena.alloc(Value, vec_len); + for (elems, 0..) |*elem, i| { + const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); + elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + } + cur_minmax = try sema.addConstant( + simd_op.result_ty, + try Value.Tag.aggregate.create(sema.arena, elems), + ); + } else { + runtime_known.unset(operand_idx); + cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); + cur_minmax_src = operand_src; + } + } + + const comptime_refined_ty: ?Type = if (cur_minmax) |ct_minmax_ref| refined: { + // Refine the comptime-known result type based on the operation + const val = (try sema.resolveMaybeUndefVal(ct_minmax_ref)).?; + const orig_ty = sema.typeOf(ct_minmax_ref); + const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { + const elem_ty = orig_ty.childType(); + const len = orig_ty.vectorLen(); + + if (len == 0) break :blk orig_ty; + if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats + + var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_max: Value = cur_min; + for (1..len) |idx| { + const elem_val = try val.elemValue(mod, sema.arena, idx); + if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef + if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + } + + const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); + break :blk try Type.vector(sema.arena, len, refined_elem_ty); + } else blk: { + if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats + if (val.isUndef()) break :blk orig_ty; // can't refine undef + break :blk try Type.intFittingRange(target, sema.arena, val, val); + }; + + // Apply the refined type to the current value - this isn't strictly necessary in the + // runtime case since we'll refine again afterwards, but keeping things as small as possible + // will allow us to emit more optimal AIR (if all the runtime operands have smaller types + // than the non-refined comptime type). + if (!refined_ty.eql(orig_ty, mod)) { + if (std.debug.runtime_safety) { + assert(try sema.intFitsInType(val, refined_ty, null)); + } + cur_minmax = try sema.addConstant(refined_ty, val); + } + + break :refined refined_ty; + } else null; + + const runtime_idx = runtime_known.findFirstSet() orelse return cur_minmax.?; + const runtime_src = operand_srcs[runtime_idx]; + try sema.requireRuntimeBlock(block, src, runtime_src); + + // Now, iterate over runtime operands, emitting a min/max instruction for each. We'll refine the + // type again at the end, based on the comptime-known bound. + + // If the comptime-known part is undef we can avoid emitting actual instructions later + const known_undef = if (cur_minmax) |operand| blk: { + const val = (try sema.resolveMaybeUndefVal(operand)).?; + break :blk val.isUndef(); + } else false; + + if (cur_minmax == null) { + // No comptime operands - use the first operand as the starting value + assert(runtime_idx == 0); + cur_minmax = operands[0]; + cur_minmax_src = runtime_src; + runtime_known.unset(0); // don't look at this operand in the loop below + } + + var it = runtime_known.iterator(.{}); + while (it.next()) |idx| { + const lhs = cur_minmax.?; + const lhs_src = cur_minmax_src; + const rhs = operands[idx]; + const rhs_src = operand_srcs[idx]; + const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); + if (known_undef) { + cur_minmax = try sema.addConstant(simd_op.result_ty, Value.undef); + } else { + cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); + } + } + + if (comptime_refined_ty) |comptime_ty| refine: { + // Finally, refine the type based on the comptime-known bound. + if (known_undef) break :refine; // can't refine undef + const unrefined_ty = sema.typeOf(cur_minmax.?); + const is_vector = unrefined_ty.zigTypeTag() == .Vector; + const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + + if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats - const opFunc = switch (air_tag) { - .min => Value.numberMin, - .max => Value.numberMax, + // Compute the final bounds based on the runtime type and the comptime-known bound type + const min_val = switch (air_tag) { + .min => try unrefined_elem_ty.minInt(sema.arena, target), + .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct else => unreachable, }; - const target = sema.mod.getTarget(); - const vec_len = simd_op.len orelse { - const result_val = opFunc(lhs_val, rhs_val, target); - return sema.addConstant(simd_op.result_ty, result_val); + const max_val = switch (air_tag) { + .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, target), + else => unreachable, }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); - for (elems, 0..) |*elem, i| { - const lhs_elem_val = lhs_val.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem_val = rhs_val.elemValueBuffer(sema.mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); - } - return sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); - } else rs: { - if (simd_op.rhs_val) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); + + // Find the smallest type which can contain these bounds + const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + + const final_ty = if (is_vector) + try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + else + final_elem_ty; + + if (!final_ty.eql(unrefined_ty, mod)) { + // We've reduced the type - cast the result down + return block.addTyOp(.intcast, final_ty, cur_minmax.?); } - break :rs lhs_src; - }; + } - try sema.requireRuntimeBlock(block, src, runtime_src); - return block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); + return cur_minmax.?; } fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { diff --git a/src/Zir.zig b/src/Zir.zig index ab33b625f718..51c90c61cbc6 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -927,10 +927,10 @@ pub const Inst = struct { /// Implements the `@memset` builtin. /// Uses the `pl_node` union field with payload `Bin`. memset, - /// Implements the `@min` builtin. + /// Implements the `@min` builtin for 2 args. /// Uses the `pl_node` union field with payload `Bin` min, - /// Implements the `@max` builtin. + /// Implements the `@max` builtin for 2 args. /// Uses the `pl_node` union field with payload `Bin` max, /// Implements the `@cImport` builtin. @@ -1905,10 +1905,20 @@ pub const Inst = struct { compile_log, /// The builtin `@TypeOf` which returns the type after Peer Type Resolution /// of one or more params. - /// `operand` is payload index to `NodeMultiOp`. + /// `operand` is payload index to `TypeOfPeer`. /// `small` is `operands_len`. /// The AST node is the builtin call. typeof_peer, + /// Implements the `@min` builtin for more than 2 args. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + min_multi, + /// Implements the `@max` builtin for more than 2 args. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + max_multi, /// Implements the `@addWithOverflow` builtin. /// `operand` is payload index to `BinNode`. /// `small` is unused. diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a658103c1a96..dd093508b1bc 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4298,7 +4298,7 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn const val_ty = ptr_info.pointee_type; const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); - const limb_abi_size = @min(val_abi_size, 8); + const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const val_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -4434,7 +4434,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const ptr_info = ptr_ty.ptrInfo().data; const src_ty = ptr_ty.childType(); - const limb_abi_size = @min(ptr_info.host_size, 8); + const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; const src_bit_size = src_ty.bitSize(self.target.*); @@ -4652,7 +4652,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); - const limb_abi_size = @min(field_abi_size, 8); + const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); const field_bit_off = field_off % limb_abi_bits; @@ -5875,7 +5875,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s }, .memory, .indirect, .load_got, .load_direct, .load_tlv, .load_frame => { const OpInfo = ?struct { addr_reg: Register, addr_lock: RegisterLock }; - const limb_abi_size = @min(abi_size, 8); + const limb_abi_size: u32 = @min(abi_size, 8); const dst_info: OpInfo = switch (dst_mcv) { else => unreachable, diff --git a/src/print_zir.zig b/src/print_zir.zig index 922366dc8572..4e4f13641c33 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -482,6 +482,8 @@ const Writer = struct { .compile_log => try self.writeNodeMultiOp(stream, extended), .typeof_peer => try self.writeTypeofPeer(stream, extended), + .min_multi => try self.writeNodeMultiOp(stream, extended), + .max_multi => try self.writeNodeMultiOp(stream, extended), .select => try self.writeSelect(stream, extended), diff --git a/src/type.zig b/src/type.zig index b25f13342d0e..c9a6f49d3e28 100644 --- a/src/type.zig +++ b/src/type.zig @@ -6723,7 +6723,17 @@ pub const Type = extern union { pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type { const bits = smallestUnsignedBits(max); - return switch (bits) { + return intWithBits(arena, false, bits); + } + + pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type { + return if (sign) switch (bits) { + 8 => initTag(.i8), + 16 => initTag(.i16), + 32 => initTag(.i32), + 64 => initTag(.i64), + else => return Tag.int_signed.create(arena, bits), + } else switch (bits) { 1 => initTag(.u1), 8 => initTag(.u8), 16 => initTag(.u16), @@ -6733,6 +6743,61 @@ pub const Type = extern union { }; } + /// Given a value representing an integer, returns the number of bits necessary to represent + /// this value in an integer. If `sign` is true, returns the number of bits necessary in a + /// twos-complement integer; otherwise in an unsigned integer. + /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. + pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 { + assert(!val.isUndef()); + switch (val.tag()) { + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; + return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + // Zero is still a possibility, in which case unsigned is fine + for (limbs) |limb| { + if (limb != 0) break; + } else return 0; // val == 0 + assert(sign); + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; + return @intCast(u16, big.bitCountTwosComp()); + }, + .int_i64 => { + const x = val.castTag(.int_i64).?.data; + if (x >= 0) return smallestUnsignedBits(@intCast(u64, x)); + assert(sign); + return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + else => { + const x = val.toUnsignedInt(target); + return smallestUnsignedBits(x) + @boolToInt(sign); + }, + } + } + + /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither + /// value is undef. + /// TODO: if #3806 is implemented, this becomes trivial + pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type { + assert(!min.isUndef()); + assert(!max.isUndef()); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, target).compare(.lte)); + } + + const sign = min.orderAgainstZero() == .lt; + + const min_val_bits = intBitsForValue(target, min, sign); + const max_val_bits = intBitsForValue(target, max, sign); + const bits = @max(min_val_bits, max_val_bits); + + return intWithBits(arena, sign, bits); + } + /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 9a4ae40eef21..d7b93c56c081 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -106,3 +106,59 @@ test "@min/@max on lazy values" { const size = @max(@sizeOf(A), @sizeOf(B)); try expect(size == @sizeOf(B)); } + +test "@min/@max more than two arguments" { + const x: u32 = 30; + const y: u32 = 10; + const z: u32 = 20; + try expectEqual(@as(u32, 10), @min(x, y, z)); + try expectEqual(@as(u32, 30), @max(x, y, z)); +} + +test "@min/@max more than two vector arguments" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + const x: @Vector(2, u32) = .{ 3, 2 }; + const y: @Vector(2, u32) = .{ 4, 1 }; + const z: @Vector(2, u32) = .{ 5, 0 }; + try expectEqual(@Vector(2, u32){ 3, 0 }, @min(x, y, z)); + try expectEqual(@Vector(2, u32){ 5, 2 }, @max(x, y, z)); +} + +test "@min/@max notices bounds" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + var x: u16 = 20; + const y = 30; + var z: u32 = 100; + const min = @min(x, y, z); + const max = @max(x, y, z); + try expectEqual(x, min); + try expectEqual(u5, @TypeOf(min)); + try expectEqual(z, max); + try expectEqual(u32, @TypeOf(max)); +} + +test "@min/@max notices vector bounds" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + var x: @Vector(2, u16) = .{ 140, 40 }; + const y: @Vector(2, u64) = .{ 5, 100 }; + var z: @Vector(2, u32) = .{ 10, 300 }; + const min = @min(x, y, z); + const max = @max(x, y, z); + try expectEqual(@Vector(2, u32){ 5, 40 }, min); + try expectEqual(@Vector(2, u7), @TypeOf(min)); + try expectEqual(@Vector(2, u32){ 140, 300 }, max); + try expectEqual(@Vector(2, u32), @TypeOf(max)); +}