1
- ! RUN: %flang_fc1 -flang-experimental-hlfir - triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
2
- ! RUN: %flang_fc1 -flang-experimental-hlfir - triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
1
+ ! RUN: %flang_fc1 -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-LE" %s
2
+ ! RUN: %flang_fc1 -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR","LLVMIR-BE" %s
3
3
! REQUIRES: target=powerpc{{.*}}
4
4
5
5
!- ------------
@@ -19,8 +19,9 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
19
19
! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
20
20
! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 4
21
21
! LLVMIR-BE: %[[s:.*]] = sub i8 3, %[[u]]
22
- ! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i8 %[[u]]
23
- ! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i8 %[[s]]
22
+ ! LLVMIR-BE: %[[idx:.*]] = zext i8 %[[s]] to i64
23
+ ! LLVMIR-LE: %[[idx:.*]] = zext i8 %[[u]] to i64
24
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[idx]]
24
25
! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
25
26
26
27
r = vec_extract(x, i2)
@@ -29,8 +30,9 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
29
30
! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
30
31
! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 4
31
32
! LLVMIR-BE: %[[s:.*]] = sub i16 3, %[[u]]
32
- ! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i16 %[[u]]
33
- ! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i16 %[[s]]
33
+ ! LLVMIR-BE: %[[idx:.*]] = zext i16 %[[s]] to i64
34
+ ! LLVMIR-LE: %[[idx:.*]] = zext i16 %[[u]] to i64
35
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[idx]]
34
36
! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
35
37
36
38
r = vec_extract(x, i4)
@@ -39,18 +41,19 @@ subroutine vec_extract_testf32(x, i1, i2, i4, i8)
39
41
! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
40
42
! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 4
41
43
! LLVMIR-BE: %[[s:.*]] = sub i32 3, %[[u]]
42
- ! LLVMIR-LE: %[[r:.*]] = extractelement <4 x float> %[[x]], i32 %[[u]]
43
- ! LLVMIR-BE: %[[r:.*]] = extractelement <4 x float> %[[x]], i32 %[[s]]
44
+ ! LLVMIR-BE: %[[idx:.*]] = zext i32 %[[s]] to i64
45
+ ! LLVMIR-LE: %[[idx:.*]] = zext i32 %[[u]] to i64
46
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[idx]]
44
47
! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
45
48
46
49
r = vec_extract(x, i8)
47
50
48
51
! LLVMIR: %[[x:.*]] = load <4 x float>, ptr %{{[0-9]}}, align 16
49
52
! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
50
- ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 4
51
- ! LLVMIR-BE: %[[s :.*]] = sub i64 3, %[[u]]
52
- ! LLVMIR-LE: %[[r :.*]] = extractelement <4 x float> %[[x ]], i64 %[[u]]
53
- ! LLVMIR-BE : %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[s ]]
53
+ ! LLVMIR-BE : %[[u:.*]] = urem i64 %[[i8]], 4
54
+ ! LLVMIR-BE: %[[idx :.*]] = sub i64 3, %[[u]]
55
+ ! LLVMIR-LE: %[[idx :.*]] = urem i64 %[[i8 ]], 4
56
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x float> %[[x]], i64 %[[idx ]]
54
57
! LLVMIR: store float %[[r]], ptr %{{[0-9]}}, align 4
55
58
end subroutine vec_extract_testf32
56
59
@@ -68,8 +71,9 @@ subroutine vec_extract_testf64(x, i1, i2, i4, i8)
68
71
! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
69
72
! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 2
70
73
! LLVMIR-BE: %[[s:.*]] = sub i8 1, %[[u]]
71
- ! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i8 %[[u]]
72
- ! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i8 %[[s]]
74
+ ! LLVMIR-BE: %[[idx:.*]] = zext i8 %[[s]] to i64
75
+ ! LLVMIR-LE: %[[idx:.*]] = zext i8 %[[u]] to i64
76
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[idx]]
73
77
! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
74
78
75
79
r = vec_extract(x, i2)
@@ -78,8 +82,9 @@ subroutine vec_extract_testf64(x, i1, i2, i4, i8)
78
82
! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
79
83
! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 2
80
84
! LLVMIR-BE: %[[s:.*]] = sub i16 1, %[[u]]
81
- ! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i16 %[[u]]
82
- ! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i16 %[[s]]
85
+ ! LLVMIR-BE: %[[idx:.*]] = zext i16 %[[s]] to i64
86
+ ! LLVMIR-LE: %[[idx:.*]] = zext i16 %[[u]] to i64
87
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[idx]]
83
88
! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
84
89
85
90
@@ -89,18 +94,19 @@ subroutine vec_extract_testf64(x, i1, i2, i4, i8)
89
94
! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
90
95
! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 2
91
96
! LLVMIR-BE: %[[s:.*]] = sub i32 1, %[[u]]
92
- ! LLVMIR-LE: %[[r:.*]] = extractelement <2 x double> %[[x]], i32 %[[u]]
93
- ! LLVMIR-BE: %[[r:.*]] = extractelement <2 x double> %[[x]], i32 %[[s]]
97
+ ! LLVMIR-BE: %[[idx:.*]] = zext i32 %[[s]] to i64
98
+ ! LLVMIR-LE: %[[idx:.*]] = zext i32 %[[u]] to i64
99
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[idx]]
94
100
! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
95
101
96
102
r = vec_extract(x, i8)
97
103
98
104
! LLVMIR: %[[x:.*]] = load <2 x double>, ptr %{{[0-9]}}, align 16
99
105
! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
100
- ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 2
101
- ! LLVMIR-BE: %[[s :.*]] = sub i64 1, %[[u]]
102
- ! LLVMIR-LE: %[[r :.*]] = extractelement <2 x double> %[[x ]], i64 %[[u]]
103
- ! LLVMIR-BE : %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[s ]]
106
+ ! LLVMIR-BE : %[[u:.*]] = urem i64 %[[i8]], 2
107
+ ! LLVMIR-BE: %[[idx :.*]] = sub i64 1, %[[u]]
108
+ ! LLVMIR-LE: %[[idx :.*]] = urem i64 %[[i8 ]], 2
109
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x double> %[[x]], i64 %[[idx ]]
104
110
! LLVMIR: store double %[[r]], ptr %{{[0-9]}}, align 8
105
111
end subroutine vec_extract_testf64
106
112
@@ -118,8 +124,9 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
118
124
! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
119
125
! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 16
120
126
! LLVMIR-BE: %[[s:.*]] = sub i8 15, %[[u]]
121
- ! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i8 %[[u]]
122
- ! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i8 %[[s]]
127
+ ! LLVMIR-BE: %[[idx:.*]] = zext i8 %[[s]] to i64
128
+ ! LLVMIR-LE: %[[idx:.*]] = zext i8 %[[u]] to i64
129
+ ! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[idx]]
123
130
! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
124
131
125
132
r = vec_extract(x, i2)
@@ -128,8 +135,9 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
128
135
! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
129
136
! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 16
130
137
! LLVMIR-BE: %[[s:.*]] = sub i16 15, %[[u]]
131
- ! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i16 %[[u]]
132
- ! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i16 %[[s]]
138
+ ! LLVMIR-BE: %[[idx:.*]] = zext i16 %[[s]] to i64
139
+ ! LLVMIR-LE: %[[idx:.*]] = zext i16 %[[u]] to i64
140
+ ! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[idx]]
133
141
! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
134
142
135
143
r = vec_extract(x, i4)
@@ -138,18 +146,19 @@ subroutine vec_extract_testi8(x, i1, i2, i4, i8)
138
146
! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
139
147
! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 16
140
148
! LLVMIR-BE: %[[s:.*]] = sub i32 15, %[[u]]
141
- ! LLVMIR-LE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i32 %[[u]]
142
- ! LLVMIR-BE: %[[r:.*]] = extractelement <16 x i8> %[[x]], i32 %[[s]]
149
+ ! LLVMIR-BE: %[[idx:.*]] = zext i32 %[[s]] to i64
150
+ ! LLVMIR-LE: %[[idx:.*]] = zext i32 %[[u]] to i64
151
+ ! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[idx]]
143
152
! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
144
153
145
154
r = vec_extract(x, i8)
146
155
147
156
! LLVMIR: %[[x:.*]] = load <16 x i8>, ptr %{{[0-9]}}, align 16
148
157
! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
149
- ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 16
150
- ! LLVMIR-BE: %[[s :.*]] = sub i64 15, %[[u]]
151
- ! LLVMIR-LE: %[[r :.*]] = extractelement <16 x i8> %[[x ]], i64 %[[u]]
152
- ! LLVMIR-BE : %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[s ]]
158
+ ! LLVMIR-BE : %[[u:.*]] = urem i64 %[[i8]], 16
159
+ ! LLVMIR-BE: %[[idx :.*]] = sub i64 15, %[[u]]
160
+ ! LLVMIR-LE: %[[idx :.*]] = urem i64 %[[i8 ]], 16
161
+ ! LLVMIR: %[[r:.*]] = extractelement <16 x i8> %[[x]], i64 %[[idx ]]
153
162
! LLVMIR: store i8 %[[r]], ptr %{{[0-9]}}, align 1
154
163
end subroutine vec_extract_testi8
155
164
@@ -167,8 +176,9 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
167
176
! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
168
177
! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 8
169
178
! LLVMIR-BE: %[[s:.*]] = sub i8 7, %[[u]]
170
- ! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i8 %[[u]]
171
- ! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i8 %[[s]]
179
+ ! LLVMIR-BE: %[[idx:.*]] = zext i8 %[[s]] to i64
180
+ ! LLVMIR-LE: %[[idx:.*]] = zext i8 %[[u]] to i64
181
+ ! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[idx]]
172
182
! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
173
183
174
184
r = vec_extract(x, i2)
@@ -177,8 +187,9 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
177
187
! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
178
188
! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 8
179
189
! LLVMIR-BE: %[[s:.*]] = sub i16 7, %[[u]]
180
- ! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i16 %[[u]]
181
- ! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i16 %[[s]]
190
+ ! LLVMIR-BE: %[[idx:.*]] = zext i16 %[[s]] to i64
191
+ ! LLVMIR-LE: %[[idx:.*]] = zext i16 %[[u]] to i64
192
+ ! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[idx]]
182
193
! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
183
194
184
195
r = vec_extract(x, i4)
@@ -187,18 +198,19 @@ subroutine vec_extract_testi16(x, i1, i2, i4, i8)
187
198
! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
188
199
! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 8
189
200
! LLVMIR-BE: %[[s:.*]] = sub i32 7, %[[u]]
190
- ! LLVMIR-LE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i32 %[[u]]
191
- ! LLVMIR-BE: %[[r:.*]] = extractelement <8 x i16> %[[x]], i32 %[[s]]
201
+ ! LLVMIR-BE: %[[idx:.*]] = zext i32 %[[s]] to i64
202
+ ! LLVMIR-LE: %[[idx:.*]] = zext i32 %[[u]] to i64
203
+ ! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[idx]]
192
204
! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
193
205
194
206
r = vec_extract(x, i8)
195
207
196
208
! LLVMIR: %[[x:.*]] = load <8 x i16>, ptr %{{[0-9]}}, align 16
197
209
! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
198
- ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 8
199
- ! LLVMIR-BE: %[[s :.*]] = sub i64 7, %[[u]]
200
- ! LLVMIR-LE: %[[r :.*]] = extractelement <8 x i16> %[[x ]], i64 %[[u]]
201
- ! LLVMIR-BE : %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[s ]]
210
+ ! LLVMIR-BE : %[[u:.*]] = urem i64 %[[i8]], 8
211
+ ! LLVMIR-BE: %[[idx :.*]] = sub i64 7, %[[u]]
212
+ ! LLVMIR-LE: %[[idx :.*]] = urem i64 %[[i8 ]], 8
213
+ ! LLVMIR: %[[r:.*]] = extractelement <8 x i16> %[[x]], i64 %[[idx ]]
202
214
! LLVMIR: store i16 %[[r]], ptr %{{[0-9]}}, align 2
203
215
end subroutine vec_extract_testi16
204
216
@@ -216,8 +228,9 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
216
228
! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
217
229
! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 4
218
230
! LLVMIR-BE: %[[s:.*]] = sub i8 3, %[[u]]
219
- ! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i8 %[[u]]
220
- ! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i8 %[[s]]
231
+ ! LLVMIR-BE: %[[idx:.*]] = zext i8 %[[s]] to i64
232
+ ! LLVMIR-LE: %[[idx:.*]] = zext i8 %[[u]] to i64
233
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[idx]]
221
234
! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
222
235
223
236
r = vec_extract(x, i2)
@@ -226,8 +239,9 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
226
239
! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
227
240
! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 4
228
241
! LLVMIR-BE: %[[s:.*]] = sub i16 3, %[[u]]
229
- ! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i16 %[[u]]
230
- ! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i16 %[[s]]
242
+ ! LLVMIR-BE: %[[idx:.*]] = zext i16 %[[s]] to i64
243
+ ! LLVMIR-LE: %[[idx:.*]] = zext i16 %[[u]] to i64
244
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[idx]]
231
245
! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
232
246
233
247
r = vec_extract(x, i4)
@@ -236,18 +250,19 @@ subroutine vec_extract_testi32(x, i1, i2, i4, i8)
236
250
! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
237
251
! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 4
238
252
! LLVMIR-BE: %[[s:.*]] = sub i32 3, %[[u]]
239
- ! LLVMIR-LE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i32 %[[u]]
240
- ! LLVMIR-BE: %[[r:.*]] = extractelement <4 x i32> %[[x]], i32 %[[s]]
253
+ ! LLVMIR-BE: %[[idx:.*]] = zext i32 %[[s]] to i64
254
+ ! LLVMIR-LE: %[[idx:.*]] = zext i32 %[[u]] to i64
255
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[idx]]
241
256
! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
242
257
243
258
r = vec_extract(x, i8)
244
259
245
260
! LLVMIR: %[[x:.*]] = load <4 x i32>, ptr %{{[0-9]}}, align 16
246
261
! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
247
- ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 4
248
- ! LLVMIR-BE: %[[s :.*]] = sub i64 3, %[[u]]
249
- ! LLVMIR-LE: %[[r :.*]] = extractelement <4 x i32> %[[x ]], i64 %[[u]]
250
- ! LLVMIR-BE : %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[s ]]
262
+ ! LLVMIR-BE : %[[u:.*]] = urem i64 %[[i8]], 4
263
+ ! LLVMIR-BE: %[[idx :.*]] = sub i64 3, %[[u]]
264
+ ! LLVMIR-LE: %[[idx :.*]] = urem i64 %[[i8 ]], 4
265
+ ! LLVMIR: %[[r:.*]] = extractelement <4 x i32> %[[x]], i64 %[[idx ]]
251
266
! LLVMIR: store i32 %[[r]], ptr %{{[0-9]}}, align 4
252
267
end subroutine vec_extract_testi32
253
268
@@ -265,8 +280,9 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
265
280
! LLVMIR: %[[i1:.*]] = load i8, ptr %{{[0-9]}}, align 1
266
281
! LLVMIR: %[[u:.*]] = urem i8 %[[i1]], 2
267
282
! LLVMIR-BE: %[[s:.*]] = sub i8 1, %[[u]]
268
- ! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i8 %[[u]]
269
- ! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i8 %[[s]]
283
+ ! LLVMIR-BE: %[[idx:.*]] = zext i8 %[[s]] to i64
284
+ ! LLVMIR-LE: %[[idx:.*]] = zext i8 %[[u]] to i64
285
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[idx]]
270
286
! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
271
287
272
288
r = vec_extract(x, i2)
@@ -275,8 +291,9 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
275
291
! LLVMIR: %[[i2:.*]] = load i16, ptr %{{[0-9]}}, align 2
276
292
! LLVMIR: %[[u:.*]] = urem i16 %[[i2]], 2
277
293
! LLVMIR-BE: %[[s:.*]] = sub i16 1, %[[u]]
278
- ! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i16 %[[u]]
279
- ! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i16 %[[s]]
294
+ ! LLVMIR-BE: %[[idx:.*]] = zext i16 %[[s]] to i64
295
+ ! LLVMIR-LE: %[[idx:.*]] = zext i16 %[[u]] to i64
296
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[idx]]
280
297
! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
281
298
282
299
r = vec_extract(x, i4)
@@ -285,17 +302,18 @@ subroutine vec_extract_testi64(x, i1, i2, i4, i8)
285
302
! LLVMIR: %[[i4:.*]] = load i32, ptr %{{[0-9]}}, align 4
286
303
! LLVMIR: %[[u:.*]] = urem i32 %[[i4]], 2
287
304
! LLVMIR-BE: %[[s:.*]] = sub i32 1, %[[u]]
288
- ! LLVMIR-LE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i32 %[[u]]
289
- ! LLVMIR-BE: %[[r:.*]] = extractelement <2 x i64> %[[x]], i32 %[[s]]
305
+ ! LLVMIR-BE: %[[idx:.*]] = zext i32 %[[s]] to i64
306
+ ! LLVMIR-LE: %[[idx:.*]] = zext i32 %[[u]] to i64
307
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[idx]]
290
308
! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
291
309
292
310
r = vec_extract(x, i8)
293
311
294
312
! LLVMIR: %[[x:.*]] = load <2 x i64>, ptr %{{[0-9]}}, align 16
295
313
! LLVMIR: %[[i8:.*]] = load i64, ptr %{{[0-9]}}, align 8
296
- ! LLVMIR: %[[u:.*]] = urem i64 %[[i8]], 2
297
- ! LLVMIR-BE: %[[s :.*]] = sub i64 1, %[[u]]
298
- ! LLVMIR-LE: %[[r :.*]] = extractelement <2 x i64> %[[x ]], i64 %[[u]]
299
- ! LLVMIR-BE : %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[s ]]
314
+ ! LLVMIR-BE : %[[u:.*]] = urem i64 %[[i8]], 2
315
+ ! LLVMIR-BE: %[[idx :.*]] = sub i64 1, %[[u]]
316
+ ! LLVMIR-LE: %[[idx :.*]] = urem i64 %[[i8 ]], 2
317
+ ! LLVMIR: %[[r:.*]] = extractelement <2 x i64> %[[x]], i64 %[[idx ]]
300
318
! LLVMIR: store i64 %[[r]], ptr %{{[0-9]}}, align 8
301
319
end subroutine vec_extract_testi64
0 commit comments