| 1 | // RUN: %clang_cc1 -triple arm-none-linux-gnueabi -target-feature +neon \ |
| 2 | // RUN: -target-feature +crypto -target-cpu cortex-a57 -emit-llvm -O1 -o - %s | FileCheck %s |
| 3 | |
| 4 | // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ |
| 5 | // RUN: -target-feature +crypto -emit-llvm -O1 -o - %s | FileCheck %s |
| 6 | // RUN: not %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ |
| 7 | // RUN: -S -O3 -o - %s 2>&1 | FileCheck --check-prefix=CHECK-NO-CRYPTO %s |
| 8 | |
| 9 | // Test new aarch64 intrinsics and types |
| 10 | |
| 11 | #include <arm_neon.h> |
| 12 | |
| 13 | uint8x16_t test_vaeseq_u8(uint8x16_t data, uint8x16_t key) { |
| 14 | // CHECK-LABEL: @test_vaeseq_u8 |
| 15 | // CHECK-NO-CRYPTO: warning: implicit declaration of function 'vaeseq_u8' is invalid in C99 |
| 16 | return vaeseq_u8(data, key); |
| 17 | // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aese(<16 x i8> %data, <16 x i8> %key) |
| 18 | } |
| 19 | |
| 20 | uint8x16_t test_vaesdq_u8(uint8x16_t data, uint8x16_t key) { |
| 21 | // CHECK-LABEL: @test_vaesdq_u8 |
| 22 | return vaesdq_u8(data, key); |
| 23 | // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aesd(<16 x i8> %data, <16 x i8> %key) |
| 24 | } |
| 25 | |
| 26 | uint8x16_t test_vaesmcq_u8(uint8x16_t data) { |
| 27 | // CHECK-LABEL: @test_vaesmcq_u8 |
| 28 | return vaesmcq_u8(data); |
| 29 | // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aesmc(<16 x i8> %data) |
| 30 | } |
| 31 | |
| 32 | uint8x16_t test_vaesimcq_u8(uint8x16_t data) { |
| 33 | // CHECK-LABEL: @test_vaesimcq_u8 |
| 34 | return vaesimcq_u8(data); |
| 35 | // CHECK: call <16 x i8> @llvm.{{arm.neon|aarch64.crypto}}.aesimc(<16 x i8> %data) |
| 36 | } |
| 37 | |
| 38 | uint32_t test_vsha1h_u32(uint32_t hash_e) { |
| 39 | // CHECK-LABEL: @test_vsha1h_u32 |
| 40 | return vsha1h_u32(hash_e); |
| 41 | // CHECK: call i32 @llvm.{{arm.neon|aarch64.crypto}}.sha1h(i32 %hash_e) |
| 42 | } |
| 43 | |
| 44 | uint32x4_t test_vsha1su1q_u32(uint32x4_t w0_3, uint32x4_t w12_15) { |
| 45 | // CHECK-LABEL: @test_vsha1su1q_u32 |
| 46 | return vsha1su1q_u32(w0_3, w12_15); |
| 47 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1su1(<4 x i32> %w0_3, <4 x i32> %w12_15) |
| 48 | } |
| 49 | |
| 50 | uint32x4_t test_vsha256su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7) { |
| 51 | // CHECK-LABEL: @test_vsha256su0q_u32 |
| 52 | return vsha256su0q_u32(w0_3, w4_7); |
| 53 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7) |
| 54 | } |
| 55 | |
| 56 | uint32x4_t test_vsha1cq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { |
| 57 | // CHECK-LABEL: @test_vsha1cq_u32 |
| 58 | return vsha1cq_u32(hash_abcd, hash_e, wk); |
| 59 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) |
| 60 | } |
| 61 | |
| 62 | uint32x4_t test_vsha1pq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { |
| 63 | // CHECK-LABEL: @test_vsha1pq_u32 |
| 64 | return vsha1pq_u32(hash_abcd, hash_e, wk); |
| 65 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) |
| 66 | } |
| 67 | |
| 68 | uint32x4_t test_vsha1mq_u32(uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk) { |
| 69 | // CHECK-LABEL: @test_vsha1mq_u32 |
| 70 | return vsha1mq_u32(hash_abcd, hash_e, wk); |
| 71 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) |
| 72 | } |
| 73 | |
| 74 | uint32x4_t test_vsha1su0q_u32(uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11) { |
| 75 | // CHECK-LABEL: @test_vsha1su0q_u32 |
| 76 | return vsha1su0q_u32(w0_3, w4_7, w8_11); |
| 77 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha1su0(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11) |
| 78 | } |
| 79 | |
| 80 | uint32x4_t test_vsha256hq_u32(uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk) { |
| 81 | // CHECK-LABEL: @test_vsha256hq_u32 |
| 82 | return vsha256hq_u32(hash_abcd, hash_efgh, wk); |
| 83 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) |
| 84 | } |
| 85 | |
| 86 | uint32x4_t test_vsha256h2q_u32(uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk) { |
| 87 | // CHECK-LABEL: @test_vsha256h2q_u32 |
| 88 | return vsha256h2q_u32(hash_efgh, hash_abcd, wk); |
| 89 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) |
| 90 | } |
| 91 | |
| 92 | uint32x4_t test_vsha256su1q_u32(uint32x4_t w0_3, uint32x4_t w8_11, uint32x4_t w12_15) { |
| 93 | // CHECK-LABEL: @test_vsha256su1q_u32 |
| 94 | return vsha256su1q_u32(w0_3, w8_11, w12_15); |
| 95 | // CHECK: call <4 x i32> @llvm.{{arm.neon|aarch64.crypto}}.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) |
| 96 | } |
| 97 | |