| 1 | // Test target codegen - host bc file has to be created first. |
| 2 | // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc |
| 3 | // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CK1 --check-prefix CK1-64 |
| 4 | // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc |
| 5 | // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CK1 --check-prefix CK1-32 |
| 6 | // expected-no-diagnostics |
| 7 | #ifndef HEADER |
| 8 | #define HEADER |
| 9 | |
| 10 | #ifdef CK1 |
| 11 | |
| 12 | template <typename T> |
| 13 | int tmain(T argc) { |
| 14 | #pragma omp target |
| 15 | #pragma omp teams |
| 16 | argc = 0; |
| 17 | return 0; |
| 18 | } |
| 19 | |
| 20 | |
| 21 | int main (int argc, char **argv) { |
| 22 | #pragma omp target |
| 23 | #pragma omp teams |
| 24 | { |
| 25 | argc = 0; |
| 26 | } |
| 27 | return tmain(argv); |
| 28 | } |
| 29 | |
| 30 | // CK1: [[MEM_TY:%.+]] = type { [128 x i8] } |
| 31 | // CK1-DAG: [[SHARED_GLOBAL_RD:@.+]] = common addrspace(3) global [[MEM_TY]] zeroinitializer |
| 32 | // CK1-DAG: [[KERNEL_PTR:@.+]] = internal addrspace(3) global i8* null |
| 33 | // CK1-DAG: [[KERNEL_SIZE1:@.+]] = internal unnamed_addr constant i{{64|32}} 4 |
| 34 | // CK1-DAG: [[KERNEL_SIZE2:@.+]] = internal unnamed_addr constant i{{64|32}} {{8|4}} |
| 35 | // CK1-DAG: [[KERNEL_SHARED1:@.+]] = internal unnamed_addr constant i16 1 |
| 36 | // CK1-DAG: [[KERNEL_SHARED2:@.+]] = internal unnamed_addr constant i16 1 |
| 37 | |
| 38 | // only nvptx side: do not outline teams region and do not call fork_teams |
| 39 | // CK1: define {{.*}}void @{{[^,]+}}(i{{[0-9]+}} [[ARGC:%.+]]) |
| 40 | // CK1: {{.+}} = alloca i{{[0-9]+}}*, |
| 41 | // CK1: {{.+}} = alloca i{{[0-9]+}}*, |
| 42 | // CK1: [[ARGCADDR_PTR:%.+]] = alloca i{{[0-9]+}}*, |
| 43 | // CK1: [[ARGCADDR:%.+]] = alloca i{{[0-9]+}}, |
| 44 | // CK1: store {{.+}} 0, {{.+}}, |
| 45 | // CK1: store i{{[0-9]+}} [[ARGC]], i{{[0-9]+}}* [[ARGCADDR]], |
| 46 | // CK1-64: [[CONV:%.+]] = bitcast i{{[0-9]+}}* [[ARGCADDR]] to i{{[0-9]+}}* |
| 47 | // CK1: [[IS_SHARED:%.+]] = load i16, i16* [[KERNEL_SHARED1]], |
| 48 | // CK1: [[SIZE:%.+]] = load i{{64|32}}, i{{64|32}}* [[KERNEL_SIZE1]], |
| 49 | // CK1: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i{{64|32}} [[SIZE]], i16 [[IS_SHARED]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**)) |
| 50 | // CK1: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]], |
| 51 | // CK1: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i{{64|32}} 0 |
| 52 | // CK1-64: [[ARG:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[CONV]] |
| 53 | // CK1-32: [[ARG:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[ARGCADDR]] |
| 54 | // CK1: [[ARGCADDR:%.+]] = getelementptr inbounds %struct.{{.*}}, %struct.{{.*}}* %{{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 |
| 55 | // CK1: store i{{[0-9]+}} [[ARG]], i{{[0-9]+}}* [[ARGCADDR]], |
| 56 | // CK1: store i{{[0-9]+}}* [[ARGCADDR]], i{{[0-9]+}}** [[ARGCADDR_PTR]], |
| 57 | // CK1: [[ARGCADDR_PTR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[ARGCADDR_PTR]], |
| 58 | // CK1: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[ARGCADDR_PTR_REF]], |
| 59 | // CK1-NOT: call {{.*}}void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams( |
| 60 | // CK1: ret void |
| 61 | // CK1-NEXT: } |
| 62 | |
| 63 | // target region in template |
| 64 | // CK1: define {{.*}}void @{{[^,]+}}(i{{.+}}** [[ARGC:%.+]]) |
| 65 | // CK1: [[ARGCADDR_PTR:%.+]] = alloca i{{.+}}***, |
| 66 | // CK1: [[ARGCADDR:%.+]] = alloca i{{.+}}**, |
| 67 | // CK1: store i{{.+}}** [[ARGC]], i{{.+}}*** [[ARGCADDR]] |
| 68 | // CK1: [[IS_SHARED:%.+]] = load i16, i16* [[KERNEL_SHARED2]], |
| 69 | // CK1: [[SIZE:%.+]] = load i{{64|32}}, i{{64|32}}* [[KERNEL_SIZE2]], |
| 70 | // CK1: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i{{64|32}} [[SIZE]], i16 [[IS_SHARED]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**)) |
| 71 | // CK1: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]], |
| 72 | // CK1: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i{{64|32}} 0 |
| 73 | // CK1: [[ARG:%.+]] = load i{{[0-9]+}}**, i{{[0-9]+}}*** [[ARGCADDR]] |
| 74 | // CK1: [[ARGCADDR:%.+]] = getelementptr inbounds %struct.{{.*}}, %struct.{{.*}}* %{{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 |
| 75 | // CK1: store i{{[0-9]+}}** [[ARG]], i{{[0-9]+}}*** [[ARGCADDR]], |
| 76 | // CK1: store i8*** [[ARGCADDR]], i8**** [[ARGCADDR_PTR]], |
| 77 | // CK1: [[ARGCADDR_PTR_REF:%.+]] = load i{{.+}}**, i{{.+}}*** [[ARGCADDR_PTR]], |
| 78 | // CK1: store i{{[0-9]+}}** null, i{{[0-9]+}}*** [[ARGCADDR_PTR_REF]], |
| 79 | // CK1-NOT: call {{.*}}void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams( |
| 80 | // CK1: ret void |
| 81 | // CK1-NEXT: } |
| 82 | |
| 83 | |
| 84 | #endif // CK1 |
| 85 | |
| 86 | // Test target codegen - host bc file has to be created first. |
| 87 | // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc |
| 88 | // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CK2 --check-prefix CK2-64 |
| 89 | // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc |
| 90 | // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CK2 --check-prefix CK2-32 |
| 91 | // expected-no-diagnostics |
| 92 | #ifdef CK2 |
| 93 | |
| 94 | template <typename T> |
| 95 | int tmain(T argc) { |
| 96 | int a = 10; |
| 97 | int b = 5; |
| 98 | #pragma omp target |
| 99 | #pragma omp teams num_teams(a) thread_limit(b) |
| 100 | { |
| 101 | argc = 0; |
| 102 | } |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | int main (int argc, char **argv) { |
| 107 | int a = 20; |
| 108 | int b = 5; |
| 109 | #pragma omp target |
| 110 | #pragma omp teams num_teams(a) thread_limit(b) |
| 111 | { |
| 112 | argc = 0; |
| 113 | } |
| 114 | return tmain(argv); |
| 115 | } |
| 116 | |
| 117 | // CK2: [[MEM_TY:%.+]] = type { [128 x i8] } |
| 118 | // CK2-DAG: [[SHARED_GLOBAL_RD:@.+]] = common addrspace(3) global [[MEM_TY]] zeroinitializer |
| 119 | // CK2-DAG: [[KERNEL_PTR:@.+]] = internal addrspace(3) global i8* null |
| 120 | // CK2-DAG: [[KERNEL_SIZE1:@.+]] = internal unnamed_addr constant i{{64|32}} 4 |
| 121 | // CK2-DAG: [[KERNEL_SIZE2:@.+]] = internal unnamed_addr constant i{{64|32}} {{8|4}} |
| 122 | // CK2-DAG: [[KERNEL_SHARED1:@.+]] = internal unnamed_addr constant i16 1 |
| 123 | // CK2-DAG: [[KERNEL_SHARED2:@.+]] = internal unnamed_addr constant i16 1 |
| 124 | |
| 125 | // CK2: define {{.*}}void @{{[^,]+}}(i{{[0-9]+}} [[A_IN:%.+]], i{{[0-9]+}} [[B_IN:%.+]], i{{[0-9]+}} [[ARGC_IN:.+]]) |
| 126 | // CK2: {{.}} = alloca i{{[0-9]+}}*, |
| 127 | // CK2: {{.}} = alloca i{{[0-9]+}}*, |
| 128 | // CK2: [[ARGCADDR_PTR:%.+]] = alloca i{{[0-9]+}}*, |
| 129 | // CK2: [[AADDR:%.+]] = alloca i{{[0-9]+}}, |
| 130 | // CK2: [[BADDR:%.+]] = alloca i{{[0-9]+}}, |
| 131 | // CK2: [[ARGCADDR:%.+]] = alloca i{{[0-9]+}}, |
| 132 | // CK2: store i{{[0-9]+}} [[A_IN]], i{{[0-9]+}}* [[AADDR]], |
| 133 | // CK2: store i{{[0-9]+}} [[B_IN]], i{{[0-9]+}}* [[BADDR]], |
| 134 | // CK2: store i{{[0-9]+}} [[ARGC_IN]], i{{[0-9]+}}* [[ARGCADDR]], |
| 135 | // CK2-64: [[ACONV:%.+]] = bitcast i64* [[AADDR]] to i32* |
| 136 | // CK2-64: [[BCONV:%.+]] = bitcast i64* [[BADDR]] to i32* |
| 137 | // CK2-64: [[CONV:%.+]] = bitcast i64* [[ARGCADDR]] to i32* |
| 138 | // CK2: [[IS_SHARED:%.+]] = load i16, i16* [[KERNEL_SHARED1]], |
| 139 | // CK2: [[SIZE:%.+]] = load i{{64|32}}, i{{64|32}}* [[KERNEL_SIZE1]], |
| 140 | // CK2: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i{{64|32}} [[SIZE]], i16 [[IS_SHARED]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**)) |
| 141 | // CK2: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]], |
| 142 | // CK2: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i{{64|32}} 0 |
| 143 | // CK2-64: [[ARG:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[CONV]] |
| 144 | // CK2-32: [[ARG:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[ARGCADDR]] |
| 145 | // CK2: [[ARGCADDR:%.+]] = getelementptr inbounds %struct.{{.*}}, %struct.{{.*}}* %{{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 |
| 146 | // CK2: store i{{[0-9]+}} [[ARG]], i{{[0-9]+}}* [[ARGCADDR]], |
| 147 | // CK2: {{%.+}} = call i32 @__kmpc_global_thread_num( |
| 148 | // CK2: store i{{[0-9]+}}* [[ARGCADDR]], i{{[0-9]+}}** [[ARGCADDR_PTR]], |
| 149 | // CK2: [[ARGCADDR_PTR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[ARGCADDR_PTR]], |
| 150 | // CK2: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[ARGCADDR_PTR_REF]], |
| 151 | // CK2-NOT: {{.+}} = call i32 @__kmpc_push_num_teams( |
| 152 | // CK2-NOT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams( |
| 153 | // CK2: ret |
| 154 | |
| 155 | // CK2: define {{.*}}void @{{[^,]+}}(i{{[0-9]+}} [[A_IN:%.+]], i{{[0-9]+}} [[BP:%.+]], i{{[0-9]+}}** [[ARGC:%.+]]) |
| 156 | // CK2: [[ARGCADDR_PTR:%.+]] = alloca i{{[0-9]+}}***, |
| 157 | // CK2: [[AADDR:%.+]] = alloca i{{[0-9]+}}, |
| 158 | // CK2: [[BADDR:%.+]] = alloca i{{[0-9]+}}, |
| 159 | // CK2: [[ARGCADDR:%.+]] = alloca i{{[0-9]+}}**, |
| 160 | // CK2: store i{{[0-9]+}} [[A_IN]], i{{[0-9]+}}* [[AADDR]], |
| 161 | // CK2: store i{{[0-9]+}} [[B_IN]], i{{[0-9]+}}* [[BADDR]], |
| 162 | // CK2: store i{{[0-9]+}}** [[ARGC]], i{{[0-9]+}}*** [[ARGCADDR]], |
| 163 | // CK2: [[IS_SHARED:%.+]] = load i16, i16* [[KERNEL_SHARED2]], |
| 164 | // CK2: [[SIZE:%.+]] = load i{{64|32}}, i{{64|32}}* [[KERNEL_SIZE2]], |
| 165 | // CK2: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i{{64|32}} [[SIZE]], i16 [[IS_SHARED]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**)) |
| 166 | // CK2: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]], |
| 167 | // CK2: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i{{64|32}} 0 |
| 168 | // CK2: [[ARG:%.+]] = load i{{[0-9]+}}**, i{{[0-9]+}}*** [[ARGCADDR]] |
| 169 | // CK2: [[ARGCADDR:%.+]] = getelementptr inbounds %struct.{{.*}}, %struct.{{.*}}* %{{.*}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 |
| 170 | // CK2: store i{{[0-9]+}}** [[ARG]], i{{[0-9]+}}*** [[ARGCADDR]], |
| 171 | // CK2: {{%.+}} = call i32 @__kmpc_global_thread_num( |
| 172 | // CK2: store i{{[0-9]+}}*** [[ARGCADDR]], i{{[0-9]+}}**** [[ARGCADDR_PTR]], |
| 173 | // CK2: [[ARGCADDR_PTR_REF:%.+]] = load i{{[0-9]+}}***, i{{[0-9]+}}**** [[ARGCADDR_PTR]], |
| 174 | // CK2: store i{{[0-9]+}}** null, i{{[0-9]+}}*** [[ARGCADDR_PTR_REF]], |
| 175 | // CK2-NOT: {{.+}} = call i32 @__kmpc_push_num_teams( |
| 176 | // CK2-NOT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams( |
| 177 | // CK2: ret void |
| 178 | |
| 179 | #endif // CK2 |
| 180 | #endif |
| 181 | |