|
1 | 1 | /* |
2 | | - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. |
| 2 | + * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. |
3 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | 4 | * |
5 | 5 | * This code is free software; you can redistribute it and/or modify it |
@@ -1489,8 +1489,14 @@ class MacroAssembler: public Assembler { |
1489 | 1489 | void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
1490 | 1490 |
|
1491 | 1491 | // Simple version for AVX2 256bit vectors |
1492 | | - void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } |
1493 | | - void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } |
| 1492 | + void vpxor(XMMRegister dst, XMMRegister src) { |
| 1493 | + assert(UseAVX >= 2, "Should be at least AVX2"); |
| 1494 | + Assembler::vpxor(dst, dst, src, AVX_256bit); |
| 1495 | + } |
| 1496 | + void vpxor(XMMRegister dst, Address src) { |
| 1497 | + assert(UseAVX >= 2, "Should be at least AVX2"); |
| 1498 | + Assembler::vpxor(dst, dst, src, AVX_256bit); |
| 1499 | + } |
1494 | 1500 |
|
1495 | 1501 | void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } |
1496 | 1502 | void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); |
|
0 commit comments