1
2
3 package ssa
4
5 import "internal/buildcfg"
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/compile/internal/types"
9
10 func rewriteValueAMD64(v *Value) bool {
11 switch v.Op {
12 case OpAMD64ADCQ:
13 return rewriteValueAMD64_OpAMD64ADCQ(v)
14 case OpAMD64ADCQconst:
15 return rewriteValueAMD64_OpAMD64ADCQconst(v)
16 case OpAMD64ADDL:
17 return rewriteValueAMD64_OpAMD64ADDL(v)
18 case OpAMD64ADDLconst:
19 return rewriteValueAMD64_OpAMD64ADDLconst(v)
20 case OpAMD64ADDLconstmodify:
21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22 case OpAMD64ADDLload:
23 return rewriteValueAMD64_OpAMD64ADDLload(v)
24 case OpAMD64ADDLmodify:
25 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26 case OpAMD64ADDQ:
27 return rewriteValueAMD64_OpAMD64ADDQ(v)
28 case OpAMD64ADDQcarry:
29 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30 case OpAMD64ADDQconst:
31 return rewriteValueAMD64_OpAMD64ADDQconst(v)
32 case OpAMD64ADDQconstmodify:
33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34 case OpAMD64ADDQload:
35 return rewriteValueAMD64_OpAMD64ADDQload(v)
36 case OpAMD64ADDQmodify:
37 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38 case OpAMD64ADDSD:
39 return rewriteValueAMD64_OpAMD64ADDSD(v)
40 case OpAMD64ADDSDload:
41 return rewriteValueAMD64_OpAMD64ADDSDload(v)
42 case OpAMD64ADDSS:
43 return rewriteValueAMD64_OpAMD64ADDSS(v)
44 case OpAMD64ADDSSload:
45 return rewriteValueAMD64_OpAMD64ADDSSload(v)
46 case OpAMD64ANDL:
47 return rewriteValueAMD64_OpAMD64ANDL(v)
48 case OpAMD64ANDLconst:
49 return rewriteValueAMD64_OpAMD64ANDLconst(v)
50 case OpAMD64ANDLconstmodify:
51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52 case OpAMD64ANDLload:
53 return rewriteValueAMD64_OpAMD64ANDLload(v)
54 case OpAMD64ANDLmodify:
55 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56 case OpAMD64ANDNL:
57 return rewriteValueAMD64_OpAMD64ANDNL(v)
58 case OpAMD64ANDNQ:
59 return rewriteValueAMD64_OpAMD64ANDNQ(v)
60 case OpAMD64ANDQ:
61 return rewriteValueAMD64_OpAMD64ANDQ(v)
62 case OpAMD64ANDQconst:
63 return rewriteValueAMD64_OpAMD64ANDQconst(v)
64 case OpAMD64ANDQconstmodify:
65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66 case OpAMD64ANDQload:
67 return rewriteValueAMD64_OpAMD64ANDQload(v)
68 case OpAMD64ANDQmodify:
69 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70 case OpAMD64BSFQ:
71 return rewriteValueAMD64_OpAMD64BSFQ(v)
72 case OpAMD64BSWAPL:
73 return rewriteValueAMD64_OpAMD64BSWAPL(v)
74 case OpAMD64BSWAPQ:
75 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76 case OpAMD64BTCQconst:
77 return rewriteValueAMD64_OpAMD64BTCQconst(v)
78 case OpAMD64BTLconst:
79 return rewriteValueAMD64_OpAMD64BTLconst(v)
80 case OpAMD64BTQconst:
81 return rewriteValueAMD64_OpAMD64BTQconst(v)
82 case OpAMD64BTRQconst:
83 return rewriteValueAMD64_OpAMD64BTRQconst(v)
84 case OpAMD64BTSQconst:
85 return rewriteValueAMD64_OpAMD64BTSQconst(v)
86 case OpAMD64CMOVLCC:
87 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88 case OpAMD64CMOVLCS:
89 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90 case OpAMD64CMOVLEQ:
91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92 case OpAMD64CMOVLGE:
93 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94 case OpAMD64CMOVLGT:
95 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96 case OpAMD64CMOVLHI:
97 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98 case OpAMD64CMOVLLE:
99 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100 case OpAMD64CMOVLLS:
101 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102 case OpAMD64CMOVLLT:
103 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104 case OpAMD64CMOVLNE:
105 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106 case OpAMD64CMOVQCC:
107 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108 case OpAMD64CMOVQCS:
109 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110 case OpAMD64CMOVQEQ:
111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112 case OpAMD64CMOVQGE:
113 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114 case OpAMD64CMOVQGT:
115 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116 case OpAMD64CMOVQHI:
117 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118 case OpAMD64CMOVQLE:
119 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120 case OpAMD64CMOVQLS:
121 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122 case OpAMD64CMOVQLT:
123 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124 case OpAMD64CMOVQNE:
125 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126 case OpAMD64CMOVWCC:
127 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128 case OpAMD64CMOVWCS:
129 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130 case OpAMD64CMOVWEQ:
131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132 case OpAMD64CMOVWGE:
133 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134 case OpAMD64CMOVWGT:
135 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136 case OpAMD64CMOVWHI:
137 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138 case OpAMD64CMOVWLE:
139 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140 case OpAMD64CMOVWLS:
141 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142 case OpAMD64CMOVWLT:
143 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144 case OpAMD64CMOVWNE:
145 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146 case OpAMD64CMPB:
147 return rewriteValueAMD64_OpAMD64CMPB(v)
148 case OpAMD64CMPBconst:
149 return rewriteValueAMD64_OpAMD64CMPBconst(v)
150 case OpAMD64CMPBconstload:
151 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152 case OpAMD64CMPBload:
153 return rewriteValueAMD64_OpAMD64CMPBload(v)
154 case OpAMD64CMPL:
155 return rewriteValueAMD64_OpAMD64CMPL(v)
156 case OpAMD64CMPLconst:
157 return rewriteValueAMD64_OpAMD64CMPLconst(v)
158 case OpAMD64CMPLconstload:
159 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160 case OpAMD64CMPLload:
161 return rewriteValueAMD64_OpAMD64CMPLload(v)
162 case OpAMD64CMPQ:
163 return rewriteValueAMD64_OpAMD64CMPQ(v)
164 case OpAMD64CMPQconst:
165 return rewriteValueAMD64_OpAMD64CMPQconst(v)
166 case OpAMD64CMPQconstload:
167 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168 case OpAMD64CMPQload:
169 return rewriteValueAMD64_OpAMD64CMPQload(v)
170 case OpAMD64CMPW:
171 return rewriteValueAMD64_OpAMD64CMPW(v)
172 case OpAMD64CMPWconst:
173 return rewriteValueAMD64_OpAMD64CMPWconst(v)
174 case OpAMD64CMPWconstload:
175 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176 case OpAMD64CMPWload:
177 return rewriteValueAMD64_OpAMD64CMPWload(v)
178 case OpAMD64CMPXCHGLlock:
179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180 case OpAMD64CMPXCHGQlock:
181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182 case OpAMD64DIVSD:
183 return rewriteValueAMD64_OpAMD64DIVSD(v)
184 case OpAMD64DIVSDload:
185 return rewriteValueAMD64_OpAMD64DIVSDload(v)
186 case OpAMD64DIVSS:
187 return rewriteValueAMD64_OpAMD64DIVSS(v)
188 case OpAMD64DIVSSload:
189 return rewriteValueAMD64_OpAMD64DIVSSload(v)
190 case OpAMD64HMULL:
191 return rewriteValueAMD64_OpAMD64HMULL(v)
192 case OpAMD64HMULLU:
193 return rewriteValueAMD64_OpAMD64HMULLU(v)
194 case OpAMD64HMULQ:
195 return rewriteValueAMD64_OpAMD64HMULQ(v)
196 case OpAMD64HMULQU:
197 return rewriteValueAMD64_OpAMD64HMULQU(v)
198 case OpAMD64LEAL:
199 return rewriteValueAMD64_OpAMD64LEAL(v)
200 case OpAMD64LEAL1:
201 return rewriteValueAMD64_OpAMD64LEAL1(v)
202 case OpAMD64LEAL2:
203 return rewriteValueAMD64_OpAMD64LEAL2(v)
204 case OpAMD64LEAL4:
205 return rewriteValueAMD64_OpAMD64LEAL4(v)
206 case OpAMD64LEAL8:
207 return rewriteValueAMD64_OpAMD64LEAL8(v)
208 case OpAMD64LEAQ:
209 return rewriteValueAMD64_OpAMD64LEAQ(v)
210 case OpAMD64LEAQ1:
211 return rewriteValueAMD64_OpAMD64LEAQ1(v)
212 case OpAMD64LEAQ2:
213 return rewriteValueAMD64_OpAMD64LEAQ2(v)
214 case OpAMD64LEAQ4:
215 return rewriteValueAMD64_OpAMD64LEAQ4(v)
216 case OpAMD64LEAQ8:
217 return rewriteValueAMD64_OpAMD64LEAQ8(v)
218 case OpAMD64MOVBELstore:
219 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
220 case OpAMD64MOVBEQstore:
221 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
222 case OpAMD64MOVBEWstore:
223 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
224 case OpAMD64MOVBQSX:
225 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
226 case OpAMD64MOVBQSXload:
227 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
228 case OpAMD64MOVBQZX:
229 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
230 case OpAMD64MOVBatomicload:
231 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
232 case OpAMD64MOVBload:
233 return rewriteValueAMD64_OpAMD64MOVBload(v)
234 case OpAMD64MOVBstore:
235 return rewriteValueAMD64_OpAMD64MOVBstore(v)
236 case OpAMD64MOVBstoreconst:
237 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
238 case OpAMD64MOVLQSX:
239 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
240 case OpAMD64MOVLQSXload:
241 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
242 case OpAMD64MOVLQZX:
243 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
244 case OpAMD64MOVLatomicload:
245 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
246 case OpAMD64MOVLf2i:
247 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
248 case OpAMD64MOVLi2f:
249 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
250 case OpAMD64MOVLload:
251 return rewriteValueAMD64_OpAMD64MOVLload(v)
252 case OpAMD64MOVLstore:
253 return rewriteValueAMD64_OpAMD64MOVLstore(v)
254 case OpAMD64MOVLstoreconst:
255 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
256 case OpAMD64MOVOload:
257 return rewriteValueAMD64_OpAMD64MOVOload(v)
258 case OpAMD64MOVOstore:
259 return rewriteValueAMD64_OpAMD64MOVOstore(v)
260 case OpAMD64MOVOstoreconst:
261 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
262 case OpAMD64MOVQatomicload:
263 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
264 case OpAMD64MOVQf2i:
265 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
266 case OpAMD64MOVQi2f:
267 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
268 case OpAMD64MOVQload:
269 return rewriteValueAMD64_OpAMD64MOVQload(v)
270 case OpAMD64MOVQstore:
271 return rewriteValueAMD64_OpAMD64MOVQstore(v)
272 case OpAMD64MOVQstoreconst:
273 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
274 case OpAMD64MOVSDload:
275 return rewriteValueAMD64_OpAMD64MOVSDload(v)
276 case OpAMD64MOVSDstore:
277 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
278 case OpAMD64MOVSSload:
279 return rewriteValueAMD64_OpAMD64MOVSSload(v)
280 case OpAMD64MOVSSstore:
281 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
282 case OpAMD64MOVWQSX:
283 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
284 case OpAMD64MOVWQSXload:
285 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
286 case OpAMD64MOVWQZX:
287 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
288 case OpAMD64MOVWload:
289 return rewriteValueAMD64_OpAMD64MOVWload(v)
290 case OpAMD64MOVWstore:
291 return rewriteValueAMD64_OpAMD64MOVWstore(v)
292 case OpAMD64MOVWstoreconst:
293 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
294 case OpAMD64MULL:
295 return rewriteValueAMD64_OpAMD64MULL(v)
296 case OpAMD64MULLconst:
297 return rewriteValueAMD64_OpAMD64MULLconst(v)
298 case OpAMD64MULQ:
299 return rewriteValueAMD64_OpAMD64MULQ(v)
300 case OpAMD64MULQconst:
301 return rewriteValueAMD64_OpAMD64MULQconst(v)
302 case OpAMD64MULSD:
303 return rewriteValueAMD64_OpAMD64MULSD(v)
304 case OpAMD64MULSDload:
305 return rewriteValueAMD64_OpAMD64MULSDload(v)
306 case OpAMD64MULSS:
307 return rewriteValueAMD64_OpAMD64MULSS(v)
308 case OpAMD64MULSSload:
309 return rewriteValueAMD64_OpAMD64MULSSload(v)
310 case OpAMD64NEGL:
311 return rewriteValueAMD64_OpAMD64NEGL(v)
312 case OpAMD64NEGQ:
313 return rewriteValueAMD64_OpAMD64NEGQ(v)
314 case OpAMD64NOTL:
315 return rewriteValueAMD64_OpAMD64NOTL(v)
316 case OpAMD64NOTQ:
317 return rewriteValueAMD64_OpAMD64NOTQ(v)
318 case OpAMD64ORL:
319 return rewriteValueAMD64_OpAMD64ORL(v)
320 case OpAMD64ORLconst:
321 return rewriteValueAMD64_OpAMD64ORLconst(v)
322 case OpAMD64ORLconstmodify:
323 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
324 case OpAMD64ORLload:
325 return rewriteValueAMD64_OpAMD64ORLload(v)
326 case OpAMD64ORLmodify:
327 return rewriteValueAMD64_OpAMD64ORLmodify(v)
328 case OpAMD64ORQ:
329 return rewriteValueAMD64_OpAMD64ORQ(v)
330 case OpAMD64ORQconst:
331 return rewriteValueAMD64_OpAMD64ORQconst(v)
332 case OpAMD64ORQconstmodify:
333 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
334 case OpAMD64ORQload:
335 return rewriteValueAMD64_OpAMD64ORQload(v)
336 case OpAMD64ORQmodify:
337 return rewriteValueAMD64_OpAMD64ORQmodify(v)
338 case OpAMD64ROLB:
339 return rewriteValueAMD64_OpAMD64ROLB(v)
340 case OpAMD64ROLBconst:
341 return rewriteValueAMD64_OpAMD64ROLBconst(v)
342 case OpAMD64ROLL:
343 return rewriteValueAMD64_OpAMD64ROLL(v)
344 case OpAMD64ROLLconst:
345 return rewriteValueAMD64_OpAMD64ROLLconst(v)
346 case OpAMD64ROLQ:
347 return rewriteValueAMD64_OpAMD64ROLQ(v)
348 case OpAMD64ROLQconst:
349 return rewriteValueAMD64_OpAMD64ROLQconst(v)
350 case OpAMD64ROLW:
351 return rewriteValueAMD64_OpAMD64ROLW(v)
352 case OpAMD64ROLWconst:
353 return rewriteValueAMD64_OpAMD64ROLWconst(v)
354 case OpAMD64RORB:
355 return rewriteValueAMD64_OpAMD64RORB(v)
356 case OpAMD64RORL:
357 return rewriteValueAMD64_OpAMD64RORL(v)
358 case OpAMD64RORQ:
359 return rewriteValueAMD64_OpAMD64RORQ(v)
360 case OpAMD64RORW:
361 return rewriteValueAMD64_OpAMD64RORW(v)
362 case OpAMD64SARB:
363 return rewriteValueAMD64_OpAMD64SARB(v)
364 case OpAMD64SARBconst:
365 return rewriteValueAMD64_OpAMD64SARBconst(v)
366 case OpAMD64SARL:
367 return rewriteValueAMD64_OpAMD64SARL(v)
368 case OpAMD64SARLconst:
369 return rewriteValueAMD64_OpAMD64SARLconst(v)
370 case OpAMD64SARQ:
371 return rewriteValueAMD64_OpAMD64SARQ(v)
372 case OpAMD64SARQconst:
373 return rewriteValueAMD64_OpAMD64SARQconst(v)
374 case OpAMD64SARW:
375 return rewriteValueAMD64_OpAMD64SARW(v)
376 case OpAMD64SARWconst:
377 return rewriteValueAMD64_OpAMD64SARWconst(v)
378 case OpAMD64SARXLload:
379 return rewriteValueAMD64_OpAMD64SARXLload(v)
380 case OpAMD64SARXQload:
381 return rewriteValueAMD64_OpAMD64SARXQload(v)
382 case OpAMD64SBBLcarrymask:
383 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
384 case OpAMD64SBBQ:
385 return rewriteValueAMD64_OpAMD64SBBQ(v)
386 case OpAMD64SBBQcarrymask:
387 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
388 case OpAMD64SBBQconst:
389 return rewriteValueAMD64_OpAMD64SBBQconst(v)
390 case OpAMD64SETA:
391 return rewriteValueAMD64_OpAMD64SETA(v)
392 case OpAMD64SETAE:
393 return rewriteValueAMD64_OpAMD64SETAE(v)
394 case OpAMD64SETAEstore:
395 return rewriteValueAMD64_OpAMD64SETAEstore(v)
396 case OpAMD64SETAstore:
397 return rewriteValueAMD64_OpAMD64SETAstore(v)
398 case OpAMD64SETB:
399 return rewriteValueAMD64_OpAMD64SETB(v)
400 case OpAMD64SETBE:
401 return rewriteValueAMD64_OpAMD64SETBE(v)
402 case OpAMD64SETBEstore:
403 return rewriteValueAMD64_OpAMD64SETBEstore(v)
404 case OpAMD64SETBstore:
405 return rewriteValueAMD64_OpAMD64SETBstore(v)
406 case OpAMD64SETEQ:
407 return rewriteValueAMD64_OpAMD64SETEQ(v)
408 case OpAMD64SETEQstore:
409 return rewriteValueAMD64_OpAMD64SETEQstore(v)
410 case OpAMD64SETG:
411 return rewriteValueAMD64_OpAMD64SETG(v)
412 case OpAMD64SETGE:
413 return rewriteValueAMD64_OpAMD64SETGE(v)
414 case OpAMD64SETGEstore:
415 return rewriteValueAMD64_OpAMD64SETGEstore(v)
416 case OpAMD64SETGstore:
417 return rewriteValueAMD64_OpAMD64SETGstore(v)
418 case OpAMD64SETL:
419 return rewriteValueAMD64_OpAMD64SETL(v)
420 case OpAMD64SETLE:
421 return rewriteValueAMD64_OpAMD64SETLE(v)
422 case OpAMD64SETLEstore:
423 return rewriteValueAMD64_OpAMD64SETLEstore(v)
424 case OpAMD64SETLstore:
425 return rewriteValueAMD64_OpAMD64SETLstore(v)
426 case OpAMD64SETNE:
427 return rewriteValueAMD64_OpAMD64SETNE(v)
428 case OpAMD64SETNEstore:
429 return rewriteValueAMD64_OpAMD64SETNEstore(v)
430 case OpAMD64SHLL:
431 return rewriteValueAMD64_OpAMD64SHLL(v)
432 case OpAMD64SHLLconst:
433 return rewriteValueAMD64_OpAMD64SHLLconst(v)
434 case OpAMD64SHLQ:
435 return rewriteValueAMD64_OpAMD64SHLQ(v)
436 case OpAMD64SHLQconst:
437 return rewriteValueAMD64_OpAMD64SHLQconst(v)
438 case OpAMD64SHLXLload:
439 return rewriteValueAMD64_OpAMD64SHLXLload(v)
440 case OpAMD64SHLXQload:
441 return rewriteValueAMD64_OpAMD64SHLXQload(v)
442 case OpAMD64SHRB:
443 return rewriteValueAMD64_OpAMD64SHRB(v)
444 case OpAMD64SHRBconst:
445 return rewriteValueAMD64_OpAMD64SHRBconst(v)
446 case OpAMD64SHRL:
447 return rewriteValueAMD64_OpAMD64SHRL(v)
448 case OpAMD64SHRLconst:
449 return rewriteValueAMD64_OpAMD64SHRLconst(v)
450 case OpAMD64SHRQ:
451 return rewriteValueAMD64_OpAMD64SHRQ(v)
452 case OpAMD64SHRQconst:
453 return rewriteValueAMD64_OpAMD64SHRQconst(v)
454 case OpAMD64SHRW:
455 return rewriteValueAMD64_OpAMD64SHRW(v)
456 case OpAMD64SHRWconst:
457 return rewriteValueAMD64_OpAMD64SHRWconst(v)
458 case OpAMD64SHRXLload:
459 return rewriteValueAMD64_OpAMD64SHRXLload(v)
460 case OpAMD64SHRXQload:
461 return rewriteValueAMD64_OpAMD64SHRXQload(v)
462 case OpAMD64SUBL:
463 return rewriteValueAMD64_OpAMD64SUBL(v)
464 case OpAMD64SUBLconst:
465 return rewriteValueAMD64_OpAMD64SUBLconst(v)
466 case OpAMD64SUBLload:
467 return rewriteValueAMD64_OpAMD64SUBLload(v)
468 case OpAMD64SUBLmodify:
469 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
470 case OpAMD64SUBQ:
471 return rewriteValueAMD64_OpAMD64SUBQ(v)
472 case OpAMD64SUBQborrow:
473 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
474 case OpAMD64SUBQconst:
475 return rewriteValueAMD64_OpAMD64SUBQconst(v)
476 case OpAMD64SUBQload:
477 return rewriteValueAMD64_OpAMD64SUBQload(v)
478 case OpAMD64SUBQmodify:
479 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
480 case OpAMD64SUBSD:
481 return rewriteValueAMD64_OpAMD64SUBSD(v)
482 case OpAMD64SUBSDload:
483 return rewriteValueAMD64_OpAMD64SUBSDload(v)
484 case OpAMD64SUBSS:
485 return rewriteValueAMD64_OpAMD64SUBSS(v)
486 case OpAMD64SUBSSload:
487 return rewriteValueAMD64_OpAMD64SUBSSload(v)
488 case OpAMD64TESTB:
489 return rewriteValueAMD64_OpAMD64TESTB(v)
490 case OpAMD64TESTBconst:
491 return rewriteValueAMD64_OpAMD64TESTBconst(v)
492 case OpAMD64TESTL:
493 return rewriteValueAMD64_OpAMD64TESTL(v)
494 case OpAMD64TESTLconst:
495 return rewriteValueAMD64_OpAMD64TESTLconst(v)
496 case OpAMD64TESTQ:
497 return rewriteValueAMD64_OpAMD64TESTQ(v)
498 case OpAMD64TESTQconst:
499 return rewriteValueAMD64_OpAMD64TESTQconst(v)
500 case OpAMD64TESTW:
501 return rewriteValueAMD64_OpAMD64TESTW(v)
502 case OpAMD64TESTWconst:
503 return rewriteValueAMD64_OpAMD64TESTWconst(v)
504 case OpAMD64XADDLlock:
505 return rewriteValueAMD64_OpAMD64XADDLlock(v)
506 case OpAMD64XADDQlock:
507 return rewriteValueAMD64_OpAMD64XADDQlock(v)
508 case OpAMD64XCHGL:
509 return rewriteValueAMD64_OpAMD64XCHGL(v)
510 case OpAMD64XCHGQ:
511 return rewriteValueAMD64_OpAMD64XCHGQ(v)
512 case OpAMD64XORL:
513 return rewriteValueAMD64_OpAMD64XORL(v)
514 case OpAMD64XORLconst:
515 return rewriteValueAMD64_OpAMD64XORLconst(v)
516 case OpAMD64XORLconstmodify:
517 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
518 case OpAMD64XORLload:
519 return rewriteValueAMD64_OpAMD64XORLload(v)
520 case OpAMD64XORLmodify:
521 return rewriteValueAMD64_OpAMD64XORLmodify(v)
522 case OpAMD64XORQ:
523 return rewriteValueAMD64_OpAMD64XORQ(v)
524 case OpAMD64XORQconst:
525 return rewriteValueAMD64_OpAMD64XORQconst(v)
526 case OpAMD64XORQconstmodify:
527 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
528 case OpAMD64XORQload:
529 return rewriteValueAMD64_OpAMD64XORQload(v)
530 case OpAMD64XORQmodify:
531 return rewriteValueAMD64_OpAMD64XORQmodify(v)
532 case OpAdd16:
533 v.Op = OpAMD64ADDL
534 return true
535 case OpAdd32:
536 v.Op = OpAMD64ADDL
537 return true
538 case OpAdd32F:
539 v.Op = OpAMD64ADDSS
540 return true
541 case OpAdd64:
542 v.Op = OpAMD64ADDQ
543 return true
544 case OpAdd64F:
545 v.Op = OpAMD64ADDSD
546 return true
547 case OpAdd8:
548 v.Op = OpAMD64ADDL
549 return true
550 case OpAddPtr:
551 v.Op = OpAMD64ADDQ
552 return true
553 case OpAddr:
554 return rewriteValueAMD64_OpAddr(v)
555 case OpAnd16:
556 v.Op = OpAMD64ANDL
557 return true
558 case OpAnd32:
559 v.Op = OpAMD64ANDL
560 return true
561 case OpAnd64:
562 v.Op = OpAMD64ANDQ
563 return true
564 case OpAnd8:
565 v.Op = OpAMD64ANDL
566 return true
567 case OpAndB:
568 v.Op = OpAMD64ANDL
569 return true
570 case OpAtomicAdd32:
571 return rewriteValueAMD64_OpAtomicAdd32(v)
572 case OpAtomicAdd64:
573 return rewriteValueAMD64_OpAtomicAdd64(v)
574 case OpAtomicAnd32:
575 return rewriteValueAMD64_OpAtomicAnd32(v)
576 case OpAtomicAnd8:
577 return rewriteValueAMD64_OpAtomicAnd8(v)
578 case OpAtomicCompareAndSwap32:
579 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
580 case OpAtomicCompareAndSwap64:
581 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
582 case OpAtomicExchange32:
583 return rewriteValueAMD64_OpAtomicExchange32(v)
584 case OpAtomicExchange64:
585 return rewriteValueAMD64_OpAtomicExchange64(v)
586 case OpAtomicLoad32:
587 return rewriteValueAMD64_OpAtomicLoad32(v)
588 case OpAtomicLoad64:
589 return rewriteValueAMD64_OpAtomicLoad64(v)
590 case OpAtomicLoad8:
591 return rewriteValueAMD64_OpAtomicLoad8(v)
592 case OpAtomicLoadPtr:
593 return rewriteValueAMD64_OpAtomicLoadPtr(v)
594 case OpAtomicOr32:
595 return rewriteValueAMD64_OpAtomicOr32(v)
596 case OpAtomicOr8:
597 return rewriteValueAMD64_OpAtomicOr8(v)
598 case OpAtomicStore32:
599 return rewriteValueAMD64_OpAtomicStore32(v)
600 case OpAtomicStore64:
601 return rewriteValueAMD64_OpAtomicStore64(v)
602 case OpAtomicStore8:
603 return rewriteValueAMD64_OpAtomicStore8(v)
604 case OpAtomicStorePtrNoWB:
605 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
606 case OpAvg64u:
607 v.Op = OpAMD64AVGQU
608 return true
609 case OpBitLen16:
610 return rewriteValueAMD64_OpBitLen16(v)
611 case OpBitLen32:
612 return rewriteValueAMD64_OpBitLen32(v)
613 case OpBitLen64:
614 return rewriteValueAMD64_OpBitLen64(v)
615 case OpBitLen8:
616 return rewriteValueAMD64_OpBitLen8(v)
617 case OpBswap16:
618 return rewriteValueAMD64_OpBswap16(v)
619 case OpBswap32:
620 v.Op = OpAMD64BSWAPL
621 return true
622 case OpBswap64:
623 v.Op = OpAMD64BSWAPQ
624 return true
625 case OpCeil:
626 return rewriteValueAMD64_OpCeil(v)
627 case OpClosureCall:
628 v.Op = OpAMD64CALLclosure
629 return true
630 case OpCom16:
631 v.Op = OpAMD64NOTL
632 return true
633 case OpCom32:
634 v.Op = OpAMD64NOTL
635 return true
636 case OpCom64:
637 v.Op = OpAMD64NOTQ
638 return true
639 case OpCom8:
640 v.Op = OpAMD64NOTL
641 return true
642 case OpCondSelect:
643 return rewriteValueAMD64_OpCondSelect(v)
644 case OpConst16:
645 return rewriteValueAMD64_OpConst16(v)
646 case OpConst32:
647 v.Op = OpAMD64MOVLconst
648 return true
649 case OpConst32F:
650 v.Op = OpAMD64MOVSSconst
651 return true
652 case OpConst64:
653 v.Op = OpAMD64MOVQconst
654 return true
655 case OpConst64F:
656 v.Op = OpAMD64MOVSDconst
657 return true
658 case OpConst8:
659 return rewriteValueAMD64_OpConst8(v)
660 case OpConstBool:
661 return rewriteValueAMD64_OpConstBool(v)
662 case OpConstNil:
663 return rewriteValueAMD64_OpConstNil(v)
664 case OpCtz16:
665 return rewriteValueAMD64_OpCtz16(v)
666 case OpCtz16NonZero:
667 return rewriteValueAMD64_OpCtz16NonZero(v)
668 case OpCtz32:
669 return rewriteValueAMD64_OpCtz32(v)
670 case OpCtz32NonZero:
671 return rewriteValueAMD64_OpCtz32NonZero(v)
672 case OpCtz64:
673 return rewriteValueAMD64_OpCtz64(v)
674 case OpCtz64NonZero:
675 return rewriteValueAMD64_OpCtz64NonZero(v)
676 case OpCtz8:
677 return rewriteValueAMD64_OpCtz8(v)
678 case OpCtz8NonZero:
679 return rewriteValueAMD64_OpCtz8NonZero(v)
680 case OpCvt32Fto32:
681 v.Op = OpAMD64CVTTSS2SL
682 return true
683 case OpCvt32Fto64:
684 v.Op = OpAMD64CVTTSS2SQ
685 return true
686 case OpCvt32Fto64F:
687 v.Op = OpAMD64CVTSS2SD
688 return true
689 case OpCvt32to32F:
690 v.Op = OpAMD64CVTSL2SS
691 return true
692 case OpCvt32to64F:
693 v.Op = OpAMD64CVTSL2SD
694 return true
695 case OpCvt64Fto32:
696 v.Op = OpAMD64CVTTSD2SL
697 return true
698 case OpCvt64Fto32F:
699 v.Op = OpAMD64CVTSD2SS
700 return true
701 case OpCvt64Fto64:
702 v.Op = OpAMD64CVTTSD2SQ
703 return true
704 case OpCvt64to32F:
705 v.Op = OpAMD64CVTSQ2SS
706 return true
707 case OpCvt64to64F:
708 v.Op = OpAMD64CVTSQ2SD
709 return true
710 case OpCvtBoolToUint8:
711 v.Op = OpCopy
712 return true
713 case OpDiv128u:
714 v.Op = OpAMD64DIVQU2
715 return true
716 case OpDiv16:
717 return rewriteValueAMD64_OpDiv16(v)
718 case OpDiv16u:
719 return rewriteValueAMD64_OpDiv16u(v)
720 case OpDiv32:
721 return rewriteValueAMD64_OpDiv32(v)
722 case OpDiv32F:
723 v.Op = OpAMD64DIVSS
724 return true
725 case OpDiv32u:
726 return rewriteValueAMD64_OpDiv32u(v)
727 case OpDiv64:
728 return rewriteValueAMD64_OpDiv64(v)
729 case OpDiv64F:
730 v.Op = OpAMD64DIVSD
731 return true
732 case OpDiv64u:
733 return rewriteValueAMD64_OpDiv64u(v)
734 case OpDiv8:
735 return rewriteValueAMD64_OpDiv8(v)
736 case OpDiv8u:
737 return rewriteValueAMD64_OpDiv8u(v)
738 case OpEq16:
739 return rewriteValueAMD64_OpEq16(v)
740 case OpEq32:
741 return rewriteValueAMD64_OpEq32(v)
742 case OpEq32F:
743 return rewriteValueAMD64_OpEq32F(v)
744 case OpEq64:
745 return rewriteValueAMD64_OpEq64(v)
746 case OpEq64F:
747 return rewriteValueAMD64_OpEq64F(v)
748 case OpEq8:
749 return rewriteValueAMD64_OpEq8(v)
750 case OpEqB:
751 return rewriteValueAMD64_OpEqB(v)
752 case OpEqPtr:
753 return rewriteValueAMD64_OpEqPtr(v)
754 case OpFMA:
755 return rewriteValueAMD64_OpFMA(v)
756 case OpFloor:
757 return rewriteValueAMD64_OpFloor(v)
758 case OpGetCallerPC:
759 v.Op = OpAMD64LoweredGetCallerPC
760 return true
761 case OpGetCallerSP:
762 v.Op = OpAMD64LoweredGetCallerSP
763 return true
764 case OpGetClosurePtr:
765 v.Op = OpAMD64LoweredGetClosurePtr
766 return true
767 case OpGetG:
768 return rewriteValueAMD64_OpGetG(v)
769 case OpHasCPUFeature:
770 return rewriteValueAMD64_OpHasCPUFeature(v)
771 case OpHmul32:
772 v.Op = OpAMD64HMULL
773 return true
774 case OpHmul32u:
775 v.Op = OpAMD64HMULLU
776 return true
777 case OpHmul64:
778 v.Op = OpAMD64HMULQ
779 return true
780 case OpHmul64u:
781 v.Op = OpAMD64HMULQU
782 return true
783 case OpInterCall:
784 v.Op = OpAMD64CALLinter
785 return true
786 case OpIsInBounds:
787 return rewriteValueAMD64_OpIsInBounds(v)
788 case OpIsNonNil:
789 return rewriteValueAMD64_OpIsNonNil(v)
790 case OpIsSliceInBounds:
791 return rewriteValueAMD64_OpIsSliceInBounds(v)
792 case OpLeq16:
793 return rewriteValueAMD64_OpLeq16(v)
794 case OpLeq16U:
795 return rewriteValueAMD64_OpLeq16U(v)
796 case OpLeq32:
797 return rewriteValueAMD64_OpLeq32(v)
798 case OpLeq32F:
799 return rewriteValueAMD64_OpLeq32F(v)
800 case OpLeq32U:
801 return rewriteValueAMD64_OpLeq32U(v)
802 case OpLeq64:
803 return rewriteValueAMD64_OpLeq64(v)
804 case OpLeq64F:
805 return rewriteValueAMD64_OpLeq64F(v)
806 case OpLeq64U:
807 return rewriteValueAMD64_OpLeq64U(v)
808 case OpLeq8:
809 return rewriteValueAMD64_OpLeq8(v)
810 case OpLeq8U:
811 return rewriteValueAMD64_OpLeq8U(v)
812 case OpLess16:
813 return rewriteValueAMD64_OpLess16(v)
814 case OpLess16U:
815 return rewriteValueAMD64_OpLess16U(v)
816 case OpLess32:
817 return rewriteValueAMD64_OpLess32(v)
818 case OpLess32F:
819 return rewriteValueAMD64_OpLess32F(v)
820 case OpLess32U:
821 return rewriteValueAMD64_OpLess32U(v)
822 case OpLess64:
823 return rewriteValueAMD64_OpLess64(v)
824 case OpLess64F:
825 return rewriteValueAMD64_OpLess64F(v)
826 case OpLess64U:
827 return rewriteValueAMD64_OpLess64U(v)
828 case OpLess8:
829 return rewriteValueAMD64_OpLess8(v)
830 case OpLess8U:
831 return rewriteValueAMD64_OpLess8U(v)
832 case OpLoad:
833 return rewriteValueAMD64_OpLoad(v)
834 case OpLocalAddr:
835 return rewriteValueAMD64_OpLocalAddr(v)
836 case OpLsh16x16:
837 return rewriteValueAMD64_OpLsh16x16(v)
838 case OpLsh16x32:
839 return rewriteValueAMD64_OpLsh16x32(v)
840 case OpLsh16x64:
841 return rewriteValueAMD64_OpLsh16x64(v)
842 case OpLsh16x8:
843 return rewriteValueAMD64_OpLsh16x8(v)
844 case OpLsh32x16:
845 return rewriteValueAMD64_OpLsh32x16(v)
846 case OpLsh32x32:
847 return rewriteValueAMD64_OpLsh32x32(v)
848 case OpLsh32x64:
849 return rewriteValueAMD64_OpLsh32x64(v)
850 case OpLsh32x8:
851 return rewriteValueAMD64_OpLsh32x8(v)
852 case OpLsh64x16:
853 return rewriteValueAMD64_OpLsh64x16(v)
854 case OpLsh64x32:
855 return rewriteValueAMD64_OpLsh64x32(v)
856 case OpLsh64x64:
857 return rewriteValueAMD64_OpLsh64x64(v)
858 case OpLsh64x8:
859 return rewriteValueAMD64_OpLsh64x8(v)
860 case OpLsh8x16:
861 return rewriteValueAMD64_OpLsh8x16(v)
862 case OpLsh8x32:
863 return rewriteValueAMD64_OpLsh8x32(v)
864 case OpLsh8x64:
865 return rewriteValueAMD64_OpLsh8x64(v)
866 case OpLsh8x8:
867 return rewriteValueAMD64_OpLsh8x8(v)
868 case OpMax32F:
869 return rewriteValueAMD64_OpMax32F(v)
870 case OpMax64F:
871 return rewriteValueAMD64_OpMax64F(v)
872 case OpMin32F:
873 return rewriteValueAMD64_OpMin32F(v)
874 case OpMin64F:
875 return rewriteValueAMD64_OpMin64F(v)
876 case OpMod16:
877 return rewriteValueAMD64_OpMod16(v)
878 case OpMod16u:
879 return rewriteValueAMD64_OpMod16u(v)
880 case OpMod32:
881 return rewriteValueAMD64_OpMod32(v)
882 case OpMod32u:
883 return rewriteValueAMD64_OpMod32u(v)
884 case OpMod64:
885 return rewriteValueAMD64_OpMod64(v)
886 case OpMod64u:
887 return rewriteValueAMD64_OpMod64u(v)
888 case OpMod8:
889 return rewriteValueAMD64_OpMod8(v)
890 case OpMod8u:
891 return rewriteValueAMD64_OpMod8u(v)
892 case OpMove:
893 return rewriteValueAMD64_OpMove(v)
894 case OpMul16:
895 v.Op = OpAMD64MULL
896 return true
897 case OpMul32:
898 v.Op = OpAMD64MULL
899 return true
900 case OpMul32F:
901 v.Op = OpAMD64MULSS
902 return true
903 case OpMul64:
904 v.Op = OpAMD64MULQ
905 return true
906 case OpMul64F:
907 v.Op = OpAMD64MULSD
908 return true
909 case OpMul64uhilo:
910 v.Op = OpAMD64MULQU2
911 return true
912 case OpMul8:
913 v.Op = OpAMD64MULL
914 return true
915 case OpNeg16:
916 v.Op = OpAMD64NEGL
917 return true
918 case OpNeg32:
919 v.Op = OpAMD64NEGL
920 return true
921 case OpNeg32F:
922 return rewriteValueAMD64_OpNeg32F(v)
923 case OpNeg64:
924 v.Op = OpAMD64NEGQ
925 return true
926 case OpNeg64F:
927 return rewriteValueAMD64_OpNeg64F(v)
928 case OpNeg8:
929 v.Op = OpAMD64NEGL
930 return true
931 case OpNeq16:
932 return rewriteValueAMD64_OpNeq16(v)
933 case OpNeq32:
934 return rewriteValueAMD64_OpNeq32(v)
935 case OpNeq32F:
936 return rewriteValueAMD64_OpNeq32F(v)
937 case OpNeq64:
938 return rewriteValueAMD64_OpNeq64(v)
939 case OpNeq64F:
940 return rewriteValueAMD64_OpNeq64F(v)
941 case OpNeq8:
942 return rewriteValueAMD64_OpNeq8(v)
943 case OpNeqB:
944 return rewriteValueAMD64_OpNeqB(v)
945 case OpNeqPtr:
946 return rewriteValueAMD64_OpNeqPtr(v)
947 case OpNilCheck:
948 v.Op = OpAMD64LoweredNilCheck
949 return true
950 case OpNot:
951 return rewriteValueAMD64_OpNot(v)
952 case OpOffPtr:
953 return rewriteValueAMD64_OpOffPtr(v)
954 case OpOr16:
955 v.Op = OpAMD64ORL
956 return true
957 case OpOr32:
958 v.Op = OpAMD64ORL
959 return true
960 case OpOr64:
961 v.Op = OpAMD64ORQ
962 return true
963 case OpOr8:
964 v.Op = OpAMD64ORL
965 return true
966 case OpOrB:
967 v.Op = OpAMD64ORL
968 return true
969 case OpPanicBounds:
970 return rewriteValueAMD64_OpPanicBounds(v)
971 case OpPopCount16:
972 return rewriteValueAMD64_OpPopCount16(v)
973 case OpPopCount32:
974 v.Op = OpAMD64POPCNTL
975 return true
976 case OpPopCount64:
977 v.Op = OpAMD64POPCNTQ
978 return true
979 case OpPopCount8:
980 return rewriteValueAMD64_OpPopCount8(v)
981 case OpPrefetchCache:
982 v.Op = OpAMD64PrefetchT0
983 return true
984 case OpPrefetchCacheStreamed:
985 v.Op = OpAMD64PrefetchNTA
986 return true
987 case OpRotateLeft16:
988 v.Op = OpAMD64ROLW
989 return true
990 case OpRotateLeft32:
991 v.Op = OpAMD64ROLL
992 return true
993 case OpRotateLeft64:
994 v.Op = OpAMD64ROLQ
995 return true
996 case OpRotateLeft8:
997 v.Op = OpAMD64ROLB
998 return true
999 case OpRound32F:
1000 v.Op = OpCopy
1001 return true
1002 case OpRound64F:
1003 v.Op = OpCopy
1004 return true
1005 case OpRoundToEven:
1006 return rewriteValueAMD64_OpRoundToEven(v)
1007 case OpRsh16Ux16:
1008 return rewriteValueAMD64_OpRsh16Ux16(v)
1009 case OpRsh16Ux32:
1010 return rewriteValueAMD64_OpRsh16Ux32(v)
1011 case OpRsh16Ux64:
1012 return rewriteValueAMD64_OpRsh16Ux64(v)
1013 case OpRsh16Ux8:
1014 return rewriteValueAMD64_OpRsh16Ux8(v)
1015 case OpRsh16x16:
1016 return rewriteValueAMD64_OpRsh16x16(v)
1017 case OpRsh16x32:
1018 return rewriteValueAMD64_OpRsh16x32(v)
1019 case OpRsh16x64:
1020 return rewriteValueAMD64_OpRsh16x64(v)
1021 case OpRsh16x8:
1022 return rewriteValueAMD64_OpRsh16x8(v)
1023 case OpRsh32Ux16:
1024 return rewriteValueAMD64_OpRsh32Ux16(v)
1025 case OpRsh32Ux32:
1026 return rewriteValueAMD64_OpRsh32Ux32(v)
1027 case OpRsh32Ux64:
1028 return rewriteValueAMD64_OpRsh32Ux64(v)
1029 case OpRsh32Ux8:
1030 return rewriteValueAMD64_OpRsh32Ux8(v)
1031 case OpRsh32x16:
1032 return rewriteValueAMD64_OpRsh32x16(v)
1033 case OpRsh32x32:
1034 return rewriteValueAMD64_OpRsh32x32(v)
1035 case OpRsh32x64:
1036 return rewriteValueAMD64_OpRsh32x64(v)
1037 case OpRsh32x8:
1038 return rewriteValueAMD64_OpRsh32x8(v)
1039 case OpRsh64Ux16:
1040 return rewriteValueAMD64_OpRsh64Ux16(v)
1041 case OpRsh64Ux32:
1042 return rewriteValueAMD64_OpRsh64Ux32(v)
1043 case OpRsh64Ux64:
1044 return rewriteValueAMD64_OpRsh64Ux64(v)
1045 case OpRsh64Ux8:
1046 return rewriteValueAMD64_OpRsh64Ux8(v)
1047 case OpRsh64x16:
1048 return rewriteValueAMD64_OpRsh64x16(v)
1049 case OpRsh64x32:
1050 return rewriteValueAMD64_OpRsh64x32(v)
1051 case OpRsh64x64:
1052 return rewriteValueAMD64_OpRsh64x64(v)
1053 case OpRsh64x8:
1054 return rewriteValueAMD64_OpRsh64x8(v)
1055 case OpRsh8Ux16:
1056 return rewriteValueAMD64_OpRsh8Ux16(v)
1057 case OpRsh8Ux32:
1058 return rewriteValueAMD64_OpRsh8Ux32(v)
1059 case OpRsh8Ux64:
1060 return rewriteValueAMD64_OpRsh8Ux64(v)
1061 case OpRsh8Ux8:
1062 return rewriteValueAMD64_OpRsh8Ux8(v)
1063 case OpRsh8x16:
1064 return rewriteValueAMD64_OpRsh8x16(v)
1065 case OpRsh8x32:
1066 return rewriteValueAMD64_OpRsh8x32(v)
1067 case OpRsh8x64:
1068 return rewriteValueAMD64_OpRsh8x64(v)
1069 case OpRsh8x8:
1070 return rewriteValueAMD64_OpRsh8x8(v)
1071 case OpSelect0:
1072 return rewriteValueAMD64_OpSelect0(v)
1073 case OpSelect1:
1074 return rewriteValueAMD64_OpSelect1(v)
1075 case OpSelectN:
1076 return rewriteValueAMD64_OpSelectN(v)
1077 case OpSignExt16to32:
1078 v.Op = OpAMD64MOVWQSX
1079 return true
1080 case OpSignExt16to64:
1081 v.Op = OpAMD64MOVWQSX
1082 return true
1083 case OpSignExt32to64:
1084 v.Op = OpAMD64MOVLQSX
1085 return true
1086 case OpSignExt8to16:
1087 v.Op = OpAMD64MOVBQSX
1088 return true
1089 case OpSignExt8to32:
1090 v.Op = OpAMD64MOVBQSX
1091 return true
1092 case OpSignExt8to64:
1093 v.Op = OpAMD64MOVBQSX
1094 return true
1095 case OpSlicemask:
1096 return rewriteValueAMD64_OpSlicemask(v)
1097 case OpSpectreIndex:
1098 return rewriteValueAMD64_OpSpectreIndex(v)
1099 case OpSpectreSliceIndex:
1100 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1101 case OpSqrt:
1102 v.Op = OpAMD64SQRTSD
1103 return true
1104 case OpSqrt32:
1105 v.Op = OpAMD64SQRTSS
1106 return true
1107 case OpStaticCall:
1108 v.Op = OpAMD64CALLstatic
1109 return true
1110 case OpStore:
1111 return rewriteValueAMD64_OpStore(v)
1112 case OpSub16:
1113 v.Op = OpAMD64SUBL
1114 return true
1115 case OpSub32:
1116 v.Op = OpAMD64SUBL
1117 return true
1118 case OpSub32F:
1119 v.Op = OpAMD64SUBSS
1120 return true
1121 case OpSub64:
1122 v.Op = OpAMD64SUBQ
1123 return true
1124 case OpSub64F:
1125 v.Op = OpAMD64SUBSD
1126 return true
1127 case OpSub8:
1128 v.Op = OpAMD64SUBL
1129 return true
1130 case OpSubPtr:
1131 v.Op = OpAMD64SUBQ
1132 return true
1133 case OpTailCall:
1134 v.Op = OpAMD64CALLtail
1135 return true
1136 case OpTrunc:
1137 return rewriteValueAMD64_OpTrunc(v)
1138 case OpTrunc16to8:
1139 v.Op = OpCopy
1140 return true
1141 case OpTrunc32to16:
1142 v.Op = OpCopy
1143 return true
1144 case OpTrunc32to8:
1145 v.Op = OpCopy
1146 return true
1147 case OpTrunc64to16:
1148 v.Op = OpCopy
1149 return true
1150 case OpTrunc64to32:
1151 v.Op = OpCopy
1152 return true
1153 case OpTrunc64to8:
1154 v.Op = OpCopy
1155 return true
1156 case OpWB:
1157 v.Op = OpAMD64LoweredWB
1158 return true
1159 case OpXor16:
1160 v.Op = OpAMD64XORL
1161 return true
1162 case OpXor32:
1163 v.Op = OpAMD64XORL
1164 return true
1165 case OpXor64:
1166 v.Op = OpAMD64XORQ
1167 return true
1168 case OpXor8:
1169 v.Op = OpAMD64XORL
1170 return true
1171 case OpZero:
1172 return rewriteValueAMD64_OpZero(v)
1173 case OpZeroExt16to32:
1174 v.Op = OpAMD64MOVWQZX
1175 return true
1176 case OpZeroExt16to64:
1177 v.Op = OpAMD64MOVWQZX
1178 return true
1179 case OpZeroExt32to64:
1180 v.Op = OpAMD64MOVLQZX
1181 return true
1182 case OpZeroExt8to16:
1183 v.Op = OpAMD64MOVBQZX
1184 return true
1185 case OpZeroExt8to32:
1186 v.Op = OpAMD64MOVBQZX
1187 return true
1188 case OpZeroExt8to64:
1189 v.Op = OpAMD64MOVBQZX
1190 return true
1191 }
1192 return false
1193 }
1194 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1195 v_2 := v.Args[2]
1196 v_1 := v.Args[1]
1197 v_0 := v.Args[0]
1198
1199
1200
1201 for {
1202 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1203 x := v_0
1204 if v_1.Op != OpAMD64MOVQconst {
1205 continue
1206 }
1207 c := auxIntToInt64(v_1.AuxInt)
1208 carry := v_2
1209 if !(is32Bit(c)) {
1210 continue
1211 }
1212 v.reset(OpAMD64ADCQconst)
1213 v.AuxInt = int32ToAuxInt(int32(c))
1214 v.AddArg2(x, carry)
1215 return true
1216 }
1217 break
1218 }
1219
1220
1221 for {
1222 x := v_0
1223 y := v_1
1224 if v_2.Op != OpAMD64FlagEQ {
1225 break
1226 }
1227 v.reset(OpAMD64ADDQcarry)
1228 v.AddArg2(x, y)
1229 return true
1230 }
1231 return false
1232 }
1233 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1234 v_1 := v.Args[1]
1235 v_0 := v.Args[0]
1236
1237
1238 for {
1239 c := auxIntToInt32(v.AuxInt)
1240 x := v_0
1241 if v_1.Op != OpAMD64FlagEQ {
1242 break
1243 }
1244 v.reset(OpAMD64ADDQconstcarry)
1245 v.AuxInt = int32ToAuxInt(c)
1246 v.AddArg(x)
1247 return true
1248 }
1249 return false
1250 }
1251 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1252 v_1 := v.Args[1]
1253 v_0 := v.Args[0]
1254
1255
1256 for {
1257 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1258 x := v_0
1259 if v_1.Op != OpAMD64MOVLconst {
1260 continue
1261 }
1262 c := auxIntToInt32(v_1.AuxInt)
1263 v.reset(OpAMD64ADDLconst)
1264 v.AuxInt = int32ToAuxInt(c)
1265 v.AddArg(x)
1266 return true
1267 }
1268 break
1269 }
1270
1271
1272 for {
1273 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1274 x := v_0
1275 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1276 continue
1277 }
1278 y := v_1.Args[0]
1279 v.reset(OpAMD64LEAL8)
1280 v.AddArg2(x, y)
1281 return true
1282 }
1283 break
1284 }
1285
1286
1287 for {
1288 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1289 x := v_0
1290 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1291 continue
1292 }
1293 y := v_1.Args[0]
1294 v.reset(OpAMD64LEAL4)
1295 v.AddArg2(x, y)
1296 return true
1297 }
1298 break
1299 }
1300
1301
1302 for {
1303 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1304 x := v_0
1305 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1306 continue
1307 }
1308 y := v_1.Args[0]
1309 v.reset(OpAMD64LEAL2)
1310 v.AddArg2(x, y)
1311 return true
1312 }
1313 break
1314 }
1315
1316
1317 for {
1318 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1319 x := v_0
1320 if v_1.Op != OpAMD64ADDL {
1321 continue
1322 }
1323 y := v_1.Args[1]
1324 if y != v_1.Args[0] {
1325 continue
1326 }
1327 v.reset(OpAMD64LEAL2)
1328 v.AddArg2(x, y)
1329 return true
1330 }
1331 break
1332 }
1333
1334
1335 for {
1336 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1337 x := v_0
1338 if v_1.Op != OpAMD64ADDL {
1339 continue
1340 }
1341 _ = v_1.Args[1]
1342 v_1_0 := v_1.Args[0]
1343 v_1_1 := v_1.Args[1]
1344 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1345 if x != v_1_0 {
1346 continue
1347 }
1348 y := v_1_1
1349 v.reset(OpAMD64LEAL2)
1350 v.AddArg2(y, x)
1351 return true
1352 }
1353 }
1354 break
1355 }
1356
1357
1358 for {
1359 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1360 if v_0.Op != OpAMD64ADDLconst {
1361 continue
1362 }
1363 c := auxIntToInt32(v_0.AuxInt)
1364 x := v_0.Args[0]
1365 y := v_1
1366 v.reset(OpAMD64LEAL1)
1367 v.AuxInt = int32ToAuxInt(c)
1368 v.AddArg2(x, y)
1369 return true
1370 }
1371 break
1372 }
1373
1374
1375
1376 for {
1377 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1378 x := v_0
1379 if v_1.Op != OpAMD64LEAL {
1380 continue
1381 }
1382 c := auxIntToInt32(v_1.AuxInt)
1383 s := auxToSym(v_1.Aux)
1384 y := v_1.Args[0]
1385 if !(x.Op != OpSB && y.Op != OpSB) {
1386 continue
1387 }
1388 v.reset(OpAMD64LEAL1)
1389 v.AuxInt = int32ToAuxInt(c)
1390 v.Aux = symToAux(s)
1391 v.AddArg2(x, y)
1392 return true
1393 }
1394 break
1395 }
1396
1397
1398 for {
1399 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1400 x := v_0
1401 if v_1.Op != OpAMD64NEGL {
1402 continue
1403 }
1404 y := v_1.Args[0]
1405 v.reset(OpAMD64SUBL)
1406 v.AddArg2(x, y)
1407 return true
1408 }
1409 break
1410 }
1411
1412
1413
1414 for {
1415 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1416 x := v_0
1417 l := v_1
1418 if l.Op != OpAMD64MOVLload {
1419 continue
1420 }
1421 off := auxIntToInt32(l.AuxInt)
1422 sym := auxToSym(l.Aux)
1423 mem := l.Args[1]
1424 ptr := l.Args[0]
1425 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1426 continue
1427 }
1428 v.reset(OpAMD64ADDLload)
1429 v.AuxInt = int32ToAuxInt(off)
1430 v.Aux = symToAux(sym)
1431 v.AddArg3(x, ptr, mem)
1432 return true
1433 }
1434 break
1435 }
1436 return false
1437 }
1438 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1439 v_0 := v.Args[0]
1440
1441
1442 for {
1443 c := auxIntToInt32(v.AuxInt)
1444 if v_0.Op != OpAMD64ADDL {
1445 break
1446 }
1447 y := v_0.Args[1]
1448 x := v_0.Args[0]
1449 v.reset(OpAMD64LEAL1)
1450 v.AuxInt = int32ToAuxInt(c)
1451 v.AddArg2(x, y)
1452 return true
1453 }
1454
1455
1456 for {
1457 c := auxIntToInt32(v.AuxInt)
1458 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1459 break
1460 }
1461 x := v_0.Args[0]
1462 v.reset(OpAMD64LEAL1)
1463 v.AuxInt = int32ToAuxInt(c)
1464 v.AddArg2(x, x)
1465 return true
1466 }
1467
1468
1469
1470 for {
1471 c := auxIntToInt32(v.AuxInt)
1472 if v_0.Op != OpAMD64LEAL {
1473 break
1474 }
1475 d := auxIntToInt32(v_0.AuxInt)
1476 s := auxToSym(v_0.Aux)
1477 x := v_0.Args[0]
1478 if !(is32Bit(int64(c) + int64(d))) {
1479 break
1480 }
1481 v.reset(OpAMD64LEAL)
1482 v.AuxInt = int32ToAuxInt(c + d)
1483 v.Aux = symToAux(s)
1484 v.AddArg(x)
1485 return true
1486 }
1487
1488
1489
1490 for {
1491 c := auxIntToInt32(v.AuxInt)
1492 if v_0.Op != OpAMD64LEAL1 {
1493 break
1494 }
1495 d := auxIntToInt32(v_0.AuxInt)
1496 s := auxToSym(v_0.Aux)
1497 y := v_0.Args[1]
1498 x := v_0.Args[0]
1499 if !(is32Bit(int64(c) + int64(d))) {
1500 break
1501 }
1502 v.reset(OpAMD64LEAL1)
1503 v.AuxInt = int32ToAuxInt(c + d)
1504 v.Aux = symToAux(s)
1505 v.AddArg2(x, y)
1506 return true
1507 }
1508
1509
1510
1511 for {
1512 c := auxIntToInt32(v.AuxInt)
1513 if v_0.Op != OpAMD64LEAL2 {
1514 break
1515 }
1516 d := auxIntToInt32(v_0.AuxInt)
1517 s := auxToSym(v_0.Aux)
1518 y := v_0.Args[1]
1519 x := v_0.Args[0]
1520 if !(is32Bit(int64(c) + int64(d))) {
1521 break
1522 }
1523 v.reset(OpAMD64LEAL2)
1524 v.AuxInt = int32ToAuxInt(c + d)
1525 v.Aux = symToAux(s)
1526 v.AddArg2(x, y)
1527 return true
1528 }
1529
1530
1531
1532 for {
1533 c := auxIntToInt32(v.AuxInt)
1534 if v_0.Op != OpAMD64LEAL4 {
1535 break
1536 }
1537 d := auxIntToInt32(v_0.AuxInt)
1538 s := auxToSym(v_0.Aux)
1539 y := v_0.Args[1]
1540 x := v_0.Args[0]
1541 if !(is32Bit(int64(c) + int64(d))) {
1542 break
1543 }
1544 v.reset(OpAMD64LEAL4)
1545 v.AuxInt = int32ToAuxInt(c + d)
1546 v.Aux = symToAux(s)
1547 v.AddArg2(x, y)
1548 return true
1549 }
1550
1551
1552
1553 for {
1554 c := auxIntToInt32(v.AuxInt)
1555 if v_0.Op != OpAMD64LEAL8 {
1556 break
1557 }
1558 d := auxIntToInt32(v_0.AuxInt)
1559 s := auxToSym(v_0.Aux)
1560 y := v_0.Args[1]
1561 x := v_0.Args[0]
1562 if !(is32Bit(int64(c) + int64(d))) {
1563 break
1564 }
1565 v.reset(OpAMD64LEAL8)
1566 v.AuxInt = int32ToAuxInt(c + d)
1567 v.Aux = symToAux(s)
1568 v.AddArg2(x, y)
1569 return true
1570 }
1571
1572
1573
1574 for {
1575 c := auxIntToInt32(v.AuxInt)
1576 x := v_0
1577 if !(c == 0) {
1578 break
1579 }
1580 v.copyOf(x)
1581 return true
1582 }
1583
1584
1585 for {
1586 c := auxIntToInt32(v.AuxInt)
1587 if v_0.Op != OpAMD64MOVLconst {
1588 break
1589 }
1590 d := auxIntToInt32(v_0.AuxInt)
1591 v.reset(OpAMD64MOVLconst)
1592 v.AuxInt = int32ToAuxInt(c + d)
1593 return true
1594 }
1595
1596
1597 for {
1598 c := auxIntToInt32(v.AuxInt)
1599 if v_0.Op != OpAMD64ADDLconst {
1600 break
1601 }
1602 d := auxIntToInt32(v_0.AuxInt)
1603 x := v_0.Args[0]
1604 v.reset(OpAMD64ADDLconst)
1605 v.AuxInt = int32ToAuxInt(c + d)
1606 v.AddArg(x)
1607 return true
1608 }
1609
1610
1611 for {
1612 off := auxIntToInt32(v.AuxInt)
1613 x := v_0
1614 if x.Op != OpSP {
1615 break
1616 }
1617 v.reset(OpAMD64LEAL)
1618 v.AuxInt = int32ToAuxInt(off)
1619 v.AddArg(x)
1620 return true
1621 }
1622 return false
1623 }
1624 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1625 v_1 := v.Args[1]
1626 v_0 := v.Args[0]
1627
1628
1629
1630 for {
1631 valoff1 := auxIntToValAndOff(v.AuxInt)
1632 sym := auxToSym(v.Aux)
1633 if v_0.Op != OpAMD64ADDQconst {
1634 break
1635 }
1636 off2 := auxIntToInt32(v_0.AuxInt)
1637 base := v_0.Args[0]
1638 mem := v_1
1639 if !(ValAndOff(valoff1).canAdd32(off2)) {
1640 break
1641 }
1642 v.reset(OpAMD64ADDLconstmodify)
1643 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1644 v.Aux = symToAux(sym)
1645 v.AddArg2(base, mem)
1646 return true
1647 }
1648
1649
1650
1651 for {
1652 valoff1 := auxIntToValAndOff(v.AuxInt)
1653 sym1 := auxToSym(v.Aux)
1654 if v_0.Op != OpAMD64LEAQ {
1655 break
1656 }
1657 off2 := auxIntToInt32(v_0.AuxInt)
1658 sym2 := auxToSym(v_0.Aux)
1659 base := v_0.Args[0]
1660 mem := v_1
1661 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1662 break
1663 }
1664 v.reset(OpAMD64ADDLconstmodify)
1665 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1666 v.Aux = symToAux(mergeSym(sym1, sym2))
1667 v.AddArg2(base, mem)
1668 return true
1669 }
1670 return false
1671 }
1672 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1673 v_2 := v.Args[2]
1674 v_1 := v.Args[1]
1675 v_0 := v.Args[0]
1676 b := v.Block
1677 typ := &b.Func.Config.Types
1678
1679
1680
1681 for {
1682 off1 := auxIntToInt32(v.AuxInt)
1683 sym := auxToSym(v.Aux)
1684 val := v_0
1685 if v_1.Op != OpAMD64ADDQconst {
1686 break
1687 }
1688 off2 := auxIntToInt32(v_1.AuxInt)
1689 base := v_1.Args[0]
1690 mem := v_2
1691 if !(is32Bit(int64(off1) + int64(off2))) {
1692 break
1693 }
1694 v.reset(OpAMD64ADDLload)
1695 v.AuxInt = int32ToAuxInt(off1 + off2)
1696 v.Aux = symToAux(sym)
1697 v.AddArg3(val, base, mem)
1698 return true
1699 }
1700
1701
1702
1703 for {
1704 off1 := auxIntToInt32(v.AuxInt)
1705 sym1 := auxToSym(v.Aux)
1706 val := v_0
1707 if v_1.Op != OpAMD64LEAQ {
1708 break
1709 }
1710 off2 := auxIntToInt32(v_1.AuxInt)
1711 sym2 := auxToSym(v_1.Aux)
1712 base := v_1.Args[0]
1713 mem := v_2
1714 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1715 break
1716 }
1717 v.reset(OpAMD64ADDLload)
1718 v.AuxInt = int32ToAuxInt(off1 + off2)
1719 v.Aux = symToAux(mergeSym(sym1, sym2))
1720 v.AddArg3(val, base, mem)
1721 return true
1722 }
1723
1724
1725 for {
1726 off := auxIntToInt32(v.AuxInt)
1727 sym := auxToSym(v.Aux)
1728 x := v_0
1729 ptr := v_1
1730 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1731 break
1732 }
1733 y := v_2.Args[1]
1734 if ptr != v_2.Args[0] {
1735 break
1736 }
1737 v.reset(OpAMD64ADDL)
1738 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1739 v0.AddArg(y)
1740 v.AddArg2(x, v0)
1741 return true
1742 }
1743 return false
1744 }
1745 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1746 v_2 := v.Args[2]
1747 v_1 := v.Args[1]
1748 v_0 := v.Args[0]
1749
1750
1751
1752 for {
1753 off1 := auxIntToInt32(v.AuxInt)
1754 sym := auxToSym(v.Aux)
1755 if v_0.Op != OpAMD64ADDQconst {
1756 break
1757 }
1758 off2 := auxIntToInt32(v_0.AuxInt)
1759 base := v_0.Args[0]
1760 val := v_1
1761 mem := v_2
1762 if !(is32Bit(int64(off1) + int64(off2))) {
1763 break
1764 }
1765 v.reset(OpAMD64ADDLmodify)
1766 v.AuxInt = int32ToAuxInt(off1 + off2)
1767 v.Aux = symToAux(sym)
1768 v.AddArg3(base, val, mem)
1769 return true
1770 }
1771
1772
1773
1774 for {
1775 off1 := auxIntToInt32(v.AuxInt)
1776 sym1 := auxToSym(v.Aux)
1777 if v_0.Op != OpAMD64LEAQ {
1778 break
1779 }
1780 off2 := auxIntToInt32(v_0.AuxInt)
1781 sym2 := auxToSym(v_0.Aux)
1782 base := v_0.Args[0]
1783 val := v_1
1784 mem := v_2
1785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1786 break
1787 }
1788 v.reset(OpAMD64ADDLmodify)
1789 v.AuxInt = int32ToAuxInt(off1 + off2)
1790 v.Aux = symToAux(mergeSym(sym1, sym2))
1791 v.AddArg3(base, val, mem)
1792 return true
1793 }
1794 return false
1795 }
1796 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1797 v_1 := v.Args[1]
1798 v_0 := v.Args[0]
1799
1800
1801
1802 for {
1803 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1804 x := v_0
1805 if v_1.Op != OpAMD64MOVQconst {
1806 continue
1807 }
1808 t := v_1.Type
1809 c := auxIntToInt64(v_1.AuxInt)
1810 if !(is32Bit(c) && !t.IsPtr()) {
1811 continue
1812 }
1813 v.reset(OpAMD64ADDQconst)
1814 v.AuxInt = int32ToAuxInt(int32(c))
1815 v.AddArg(x)
1816 return true
1817 }
1818 break
1819 }
1820
1821
1822 for {
1823 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1824 x := v_0
1825 if v_1.Op != OpAMD64MOVLconst {
1826 continue
1827 }
1828 c := auxIntToInt32(v_1.AuxInt)
1829 v.reset(OpAMD64ADDQconst)
1830 v.AuxInt = int32ToAuxInt(c)
1831 v.AddArg(x)
1832 return true
1833 }
1834 break
1835 }
1836
1837
1838 for {
1839 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1840 x := v_0
1841 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1842 continue
1843 }
1844 y := v_1.Args[0]
1845 v.reset(OpAMD64LEAQ8)
1846 v.AddArg2(x, y)
1847 return true
1848 }
1849 break
1850 }
1851
1852
1853 for {
1854 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1855 x := v_0
1856 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1857 continue
1858 }
1859 y := v_1.Args[0]
1860 v.reset(OpAMD64LEAQ4)
1861 v.AddArg2(x, y)
1862 return true
1863 }
1864 break
1865 }
1866
1867
1868 for {
1869 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1870 x := v_0
1871 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1872 continue
1873 }
1874 y := v_1.Args[0]
1875 v.reset(OpAMD64LEAQ2)
1876 v.AddArg2(x, y)
1877 return true
1878 }
1879 break
1880 }
1881
1882
1883 for {
1884 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1885 x := v_0
1886 if v_1.Op != OpAMD64ADDQ {
1887 continue
1888 }
1889 y := v_1.Args[1]
1890 if y != v_1.Args[0] {
1891 continue
1892 }
1893 v.reset(OpAMD64LEAQ2)
1894 v.AddArg2(x, y)
1895 return true
1896 }
1897 break
1898 }
1899
1900
1901 for {
1902 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1903 x := v_0
1904 if v_1.Op != OpAMD64ADDQ {
1905 continue
1906 }
1907 _ = v_1.Args[1]
1908 v_1_0 := v_1.Args[0]
1909 v_1_1 := v_1.Args[1]
1910 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1911 if x != v_1_0 {
1912 continue
1913 }
1914 y := v_1_1
1915 v.reset(OpAMD64LEAQ2)
1916 v.AddArg2(y, x)
1917 return true
1918 }
1919 }
1920 break
1921 }
1922
1923
1924 for {
1925 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1926 if v_0.Op != OpAMD64ADDQconst {
1927 continue
1928 }
1929 c := auxIntToInt32(v_0.AuxInt)
1930 x := v_0.Args[0]
1931 y := v_1
1932 v.reset(OpAMD64LEAQ1)
1933 v.AuxInt = int32ToAuxInt(c)
1934 v.AddArg2(x, y)
1935 return true
1936 }
1937 break
1938 }
1939
1940
1941
1942 for {
1943 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1944 x := v_0
1945 if v_1.Op != OpAMD64LEAQ {
1946 continue
1947 }
1948 c := auxIntToInt32(v_1.AuxInt)
1949 s := auxToSym(v_1.Aux)
1950 y := v_1.Args[0]
1951 if !(x.Op != OpSB && y.Op != OpSB) {
1952 continue
1953 }
1954 v.reset(OpAMD64LEAQ1)
1955 v.AuxInt = int32ToAuxInt(c)
1956 v.Aux = symToAux(s)
1957 v.AddArg2(x, y)
1958 return true
1959 }
1960 break
1961 }
1962
1963
1964 for {
1965 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1966 x := v_0
1967 if v_1.Op != OpAMD64NEGQ {
1968 continue
1969 }
1970 y := v_1.Args[0]
1971 v.reset(OpAMD64SUBQ)
1972 v.AddArg2(x, y)
1973 return true
1974 }
1975 break
1976 }
1977
1978
1979
1980 for {
1981 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1982 x := v_0
1983 l := v_1
1984 if l.Op != OpAMD64MOVQload {
1985 continue
1986 }
1987 off := auxIntToInt32(l.AuxInt)
1988 sym := auxToSym(l.Aux)
1989 mem := l.Args[1]
1990 ptr := l.Args[0]
1991 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1992 continue
1993 }
1994 v.reset(OpAMD64ADDQload)
1995 v.AuxInt = int32ToAuxInt(off)
1996 v.Aux = symToAux(sym)
1997 v.AddArg3(x, ptr, mem)
1998 return true
1999 }
2000 break
2001 }
2002 return false
2003 }
2004 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2005 v_1 := v.Args[1]
2006 v_0 := v.Args[0]
2007
2008
2009
2010 for {
2011 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2012 x := v_0
2013 if v_1.Op != OpAMD64MOVQconst {
2014 continue
2015 }
2016 c := auxIntToInt64(v_1.AuxInt)
2017 if !(is32Bit(c)) {
2018 continue
2019 }
2020 v.reset(OpAMD64ADDQconstcarry)
2021 v.AuxInt = int32ToAuxInt(int32(c))
2022 v.AddArg(x)
2023 return true
2024 }
2025 break
2026 }
2027 return false
2028 }
2029 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2030 v_0 := v.Args[0]
2031
2032
2033 for {
2034 c := auxIntToInt32(v.AuxInt)
2035 if v_0.Op != OpAMD64ADDQ {
2036 break
2037 }
2038 y := v_0.Args[1]
2039 x := v_0.Args[0]
2040 v.reset(OpAMD64LEAQ1)
2041 v.AuxInt = int32ToAuxInt(c)
2042 v.AddArg2(x, y)
2043 return true
2044 }
2045
2046
2047 for {
2048 c := auxIntToInt32(v.AuxInt)
2049 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2050 break
2051 }
2052 x := v_0.Args[0]
2053 v.reset(OpAMD64LEAQ1)
2054 v.AuxInt = int32ToAuxInt(c)
2055 v.AddArg2(x, x)
2056 return true
2057 }
2058
2059
2060
2061 for {
2062 c := auxIntToInt32(v.AuxInt)
2063 if v_0.Op != OpAMD64LEAQ {
2064 break
2065 }
2066 d := auxIntToInt32(v_0.AuxInt)
2067 s := auxToSym(v_0.Aux)
2068 x := v_0.Args[0]
2069 if !(is32Bit(int64(c) + int64(d))) {
2070 break
2071 }
2072 v.reset(OpAMD64LEAQ)
2073 v.AuxInt = int32ToAuxInt(c + d)
2074 v.Aux = symToAux(s)
2075 v.AddArg(x)
2076 return true
2077 }
2078
2079
2080
2081 for {
2082 c := auxIntToInt32(v.AuxInt)
2083 if v_0.Op != OpAMD64LEAQ1 {
2084 break
2085 }
2086 d := auxIntToInt32(v_0.AuxInt)
2087 s := auxToSym(v_0.Aux)
2088 y := v_0.Args[1]
2089 x := v_0.Args[0]
2090 if !(is32Bit(int64(c) + int64(d))) {
2091 break
2092 }
2093 v.reset(OpAMD64LEAQ1)
2094 v.AuxInt = int32ToAuxInt(c + d)
2095 v.Aux = symToAux(s)
2096 v.AddArg2(x, y)
2097 return true
2098 }
2099
2100
2101
2102 for {
2103 c := auxIntToInt32(v.AuxInt)
2104 if v_0.Op != OpAMD64LEAQ2 {
2105 break
2106 }
2107 d := auxIntToInt32(v_0.AuxInt)
2108 s := auxToSym(v_0.Aux)
2109 y := v_0.Args[1]
2110 x := v_0.Args[0]
2111 if !(is32Bit(int64(c) + int64(d))) {
2112 break
2113 }
2114 v.reset(OpAMD64LEAQ2)
2115 v.AuxInt = int32ToAuxInt(c + d)
2116 v.Aux = symToAux(s)
2117 v.AddArg2(x, y)
2118 return true
2119 }
2120
2121
2122
2123 for {
2124 c := auxIntToInt32(v.AuxInt)
2125 if v_0.Op != OpAMD64LEAQ4 {
2126 break
2127 }
2128 d := auxIntToInt32(v_0.AuxInt)
2129 s := auxToSym(v_0.Aux)
2130 y := v_0.Args[1]
2131 x := v_0.Args[0]
2132 if !(is32Bit(int64(c) + int64(d))) {
2133 break
2134 }
2135 v.reset(OpAMD64LEAQ4)
2136 v.AuxInt = int32ToAuxInt(c + d)
2137 v.Aux = symToAux(s)
2138 v.AddArg2(x, y)
2139 return true
2140 }
2141
2142
2143
2144 for {
2145 c := auxIntToInt32(v.AuxInt)
2146 if v_0.Op != OpAMD64LEAQ8 {
2147 break
2148 }
2149 d := auxIntToInt32(v_0.AuxInt)
2150 s := auxToSym(v_0.Aux)
2151 y := v_0.Args[1]
2152 x := v_0.Args[0]
2153 if !(is32Bit(int64(c) + int64(d))) {
2154 break
2155 }
2156 v.reset(OpAMD64LEAQ8)
2157 v.AuxInt = int32ToAuxInt(c + d)
2158 v.Aux = symToAux(s)
2159 v.AddArg2(x, y)
2160 return true
2161 }
2162
2163
2164 for {
2165 if auxIntToInt32(v.AuxInt) != 0 {
2166 break
2167 }
2168 x := v_0
2169 v.copyOf(x)
2170 return true
2171 }
2172
2173
2174 for {
2175 c := auxIntToInt32(v.AuxInt)
2176 if v_0.Op != OpAMD64MOVQconst {
2177 break
2178 }
2179 d := auxIntToInt64(v_0.AuxInt)
2180 v.reset(OpAMD64MOVQconst)
2181 v.AuxInt = int64ToAuxInt(int64(c) + d)
2182 return true
2183 }
2184
2185
2186
2187 for {
2188 c := auxIntToInt32(v.AuxInt)
2189 if v_0.Op != OpAMD64ADDQconst {
2190 break
2191 }
2192 d := auxIntToInt32(v_0.AuxInt)
2193 x := v_0.Args[0]
2194 if !(is32Bit(int64(c) + int64(d))) {
2195 break
2196 }
2197 v.reset(OpAMD64ADDQconst)
2198 v.AuxInt = int32ToAuxInt(c + d)
2199 v.AddArg(x)
2200 return true
2201 }
2202
2203
2204 for {
2205 off := auxIntToInt32(v.AuxInt)
2206 x := v_0
2207 if x.Op != OpSP {
2208 break
2209 }
2210 v.reset(OpAMD64LEAQ)
2211 v.AuxInt = int32ToAuxInt(off)
2212 v.AddArg(x)
2213 return true
2214 }
2215 return false
2216 }
2217 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2218 v_1 := v.Args[1]
2219 v_0 := v.Args[0]
2220
2221
2222
2223 for {
2224 valoff1 := auxIntToValAndOff(v.AuxInt)
2225 sym := auxToSym(v.Aux)
2226 if v_0.Op != OpAMD64ADDQconst {
2227 break
2228 }
2229 off2 := auxIntToInt32(v_0.AuxInt)
2230 base := v_0.Args[0]
2231 mem := v_1
2232 if !(ValAndOff(valoff1).canAdd32(off2)) {
2233 break
2234 }
2235 v.reset(OpAMD64ADDQconstmodify)
2236 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2237 v.Aux = symToAux(sym)
2238 v.AddArg2(base, mem)
2239 return true
2240 }
2241
2242
2243
2244 for {
2245 valoff1 := auxIntToValAndOff(v.AuxInt)
2246 sym1 := auxToSym(v.Aux)
2247 if v_0.Op != OpAMD64LEAQ {
2248 break
2249 }
2250 off2 := auxIntToInt32(v_0.AuxInt)
2251 sym2 := auxToSym(v_0.Aux)
2252 base := v_0.Args[0]
2253 mem := v_1
2254 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2255 break
2256 }
2257 v.reset(OpAMD64ADDQconstmodify)
2258 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2259 v.Aux = symToAux(mergeSym(sym1, sym2))
2260 v.AddArg2(base, mem)
2261 return true
2262 }
2263 return false
2264 }
2265 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2266 v_2 := v.Args[2]
2267 v_1 := v.Args[1]
2268 v_0 := v.Args[0]
2269 b := v.Block
2270 typ := &b.Func.Config.Types
2271
2272
2273
2274 for {
2275 off1 := auxIntToInt32(v.AuxInt)
2276 sym := auxToSym(v.Aux)
2277 val := v_0
2278 if v_1.Op != OpAMD64ADDQconst {
2279 break
2280 }
2281 off2 := auxIntToInt32(v_1.AuxInt)
2282 base := v_1.Args[0]
2283 mem := v_2
2284 if !(is32Bit(int64(off1) + int64(off2))) {
2285 break
2286 }
2287 v.reset(OpAMD64ADDQload)
2288 v.AuxInt = int32ToAuxInt(off1 + off2)
2289 v.Aux = symToAux(sym)
2290 v.AddArg3(val, base, mem)
2291 return true
2292 }
2293
2294
2295
2296 for {
2297 off1 := auxIntToInt32(v.AuxInt)
2298 sym1 := auxToSym(v.Aux)
2299 val := v_0
2300 if v_1.Op != OpAMD64LEAQ {
2301 break
2302 }
2303 off2 := auxIntToInt32(v_1.AuxInt)
2304 sym2 := auxToSym(v_1.Aux)
2305 base := v_1.Args[0]
2306 mem := v_2
2307 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2308 break
2309 }
2310 v.reset(OpAMD64ADDQload)
2311 v.AuxInt = int32ToAuxInt(off1 + off2)
2312 v.Aux = symToAux(mergeSym(sym1, sym2))
2313 v.AddArg3(val, base, mem)
2314 return true
2315 }
2316
2317
2318 for {
2319 off := auxIntToInt32(v.AuxInt)
2320 sym := auxToSym(v.Aux)
2321 x := v_0
2322 ptr := v_1
2323 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2324 break
2325 }
2326 y := v_2.Args[1]
2327 if ptr != v_2.Args[0] {
2328 break
2329 }
2330 v.reset(OpAMD64ADDQ)
2331 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2332 v0.AddArg(y)
2333 v.AddArg2(x, v0)
2334 return true
2335 }
2336 return false
2337 }
2338 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2339 v_2 := v.Args[2]
2340 v_1 := v.Args[1]
2341 v_0 := v.Args[0]
2342
2343
2344
2345 for {
2346 off1 := auxIntToInt32(v.AuxInt)
2347 sym := auxToSym(v.Aux)
2348 if v_0.Op != OpAMD64ADDQconst {
2349 break
2350 }
2351 off2 := auxIntToInt32(v_0.AuxInt)
2352 base := v_0.Args[0]
2353 val := v_1
2354 mem := v_2
2355 if !(is32Bit(int64(off1) + int64(off2))) {
2356 break
2357 }
2358 v.reset(OpAMD64ADDQmodify)
2359 v.AuxInt = int32ToAuxInt(off1 + off2)
2360 v.Aux = symToAux(sym)
2361 v.AddArg3(base, val, mem)
2362 return true
2363 }
2364
2365
2366
2367 for {
2368 off1 := auxIntToInt32(v.AuxInt)
2369 sym1 := auxToSym(v.Aux)
2370 if v_0.Op != OpAMD64LEAQ {
2371 break
2372 }
2373 off2 := auxIntToInt32(v_0.AuxInt)
2374 sym2 := auxToSym(v_0.Aux)
2375 base := v_0.Args[0]
2376 val := v_1
2377 mem := v_2
2378 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2379 break
2380 }
2381 v.reset(OpAMD64ADDQmodify)
2382 v.AuxInt = int32ToAuxInt(off1 + off2)
2383 v.Aux = symToAux(mergeSym(sym1, sym2))
2384 v.AddArg3(base, val, mem)
2385 return true
2386 }
2387 return false
2388 }
2389 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2390 v_1 := v.Args[1]
2391 v_0 := v.Args[0]
2392
2393
2394
2395 for {
2396 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2397 x := v_0
2398 l := v_1
2399 if l.Op != OpAMD64MOVSDload {
2400 continue
2401 }
2402 off := auxIntToInt32(l.AuxInt)
2403 sym := auxToSym(l.Aux)
2404 mem := l.Args[1]
2405 ptr := l.Args[0]
2406 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2407 continue
2408 }
2409 v.reset(OpAMD64ADDSDload)
2410 v.AuxInt = int32ToAuxInt(off)
2411 v.Aux = symToAux(sym)
2412 v.AddArg3(x, ptr, mem)
2413 return true
2414 }
2415 break
2416 }
2417 return false
2418 }
2419 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2420 v_2 := v.Args[2]
2421 v_1 := v.Args[1]
2422 v_0 := v.Args[0]
2423 b := v.Block
2424 typ := &b.Func.Config.Types
2425
2426
2427
2428 for {
2429 off1 := auxIntToInt32(v.AuxInt)
2430 sym := auxToSym(v.Aux)
2431 val := v_0
2432 if v_1.Op != OpAMD64ADDQconst {
2433 break
2434 }
2435 off2 := auxIntToInt32(v_1.AuxInt)
2436 base := v_1.Args[0]
2437 mem := v_2
2438 if !(is32Bit(int64(off1) + int64(off2))) {
2439 break
2440 }
2441 v.reset(OpAMD64ADDSDload)
2442 v.AuxInt = int32ToAuxInt(off1 + off2)
2443 v.Aux = symToAux(sym)
2444 v.AddArg3(val, base, mem)
2445 return true
2446 }
2447
2448
2449
2450 for {
2451 off1 := auxIntToInt32(v.AuxInt)
2452 sym1 := auxToSym(v.Aux)
2453 val := v_0
2454 if v_1.Op != OpAMD64LEAQ {
2455 break
2456 }
2457 off2 := auxIntToInt32(v_1.AuxInt)
2458 sym2 := auxToSym(v_1.Aux)
2459 base := v_1.Args[0]
2460 mem := v_2
2461 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2462 break
2463 }
2464 v.reset(OpAMD64ADDSDload)
2465 v.AuxInt = int32ToAuxInt(off1 + off2)
2466 v.Aux = symToAux(mergeSym(sym1, sym2))
2467 v.AddArg3(val, base, mem)
2468 return true
2469 }
2470
2471
2472 for {
2473 off := auxIntToInt32(v.AuxInt)
2474 sym := auxToSym(v.Aux)
2475 x := v_0
2476 ptr := v_1
2477 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2478 break
2479 }
2480 y := v_2.Args[1]
2481 if ptr != v_2.Args[0] {
2482 break
2483 }
2484 v.reset(OpAMD64ADDSD)
2485 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2486 v0.AddArg(y)
2487 v.AddArg2(x, v0)
2488 return true
2489 }
2490 return false
2491 }
2492 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2493 v_1 := v.Args[1]
2494 v_0 := v.Args[0]
2495
2496
2497
2498 for {
2499 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2500 x := v_0
2501 l := v_1
2502 if l.Op != OpAMD64MOVSSload {
2503 continue
2504 }
2505 off := auxIntToInt32(l.AuxInt)
2506 sym := auxToSym(l.Aux)
2507 mem := l.Args[1]
2508 ptr := l.Args[0]
2509 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2510 continue
2511 }
2512 v.reset(OpAMD64ADDSSload)
2513 v.AuxInt = int32ToAuxInt(off)
2514 v.Aux = symToAux(sym)
2515 v.AddArg3(x, ptr, mem)
2516 return true
2517 }
2518 break
2519 }
2520 return false
2521 }
2522 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2523 v_2 := v.Args[2]
2524 v_1 := v.Args[1]
2525 v_0 := v.Args[0]
2526 b := v.Block
2527 typ := &b.Func.Config.Types
2528
2529
2530
2531 for {
2532 off1 := auxIntToInt32(v.AuxInt)
2533 sym := auxToSym(v.Aux)
2534 val := v_0
2535 if v_1.Op != OpAMD64ADDQconst {
2536 break
2537 }
2538 off2 := auxIntToInt32(v_1.AuxInt)
2539 base := v_1.Args[0]
2540 mem := v_2
2541 if !(is32Bit(int64(off1) + int64(off2))) {
2542 break
2543 }
2544 v.reset(OpAMD64ADDSSload)
2545 v.AuxInt = int32ToAuxInt(off1 + off2)
2546 v.Aux = symToAux(sym)
2547 v.AddArg3(val, base, mem)
2548 return true
2549 }
2550
2551
2552
2553 for {
2554 off1 := auxIntToInt32(v.AuxInt)
2555 sym1 := auxToSym(v.Aux)
2556 val := v_0
2557 if v_1.Op != OpAMD64LEAQ {
2558 break
2559 }
2560 off2 := auxIntToInt32(v_1.AuxInt)
2561 sym2 := auxToSym(v_1.Aux)
2562 base := v_1.Args[0]
2563 mem := v_2
2564 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2565 break
2566 }
2567 v.reset(OpAMD64ADDSSload)
2568 v.AuxInt = int32ToAuxInt(off1 + off2)
2569 v.Aux = symToAux(mergeSym(sym1, sym2))
2570 v.AddArg3(val, base, mem)
2571 return true
2572 }
2573
2574
2575 for {
2576 off := auxIntToInt32(v.AuxInt)
2577 sym := auxToSym(v.Aux)
2578 x := v_0
2579 ptr := v_1
2580 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2581 break
2582 }
2583 y := v_2.Args[1]
2584 if ptr != v_2.Args[0] {
2585 break
2586 }
2587 v.reset(OpAMD64ADDSS)
2588 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2589 v0.AddArg(y)
2590 v.AddArg2(x, v0)
2591 return true
2592 }
2593 return false
2594 }
2595 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2596 v_1 := v.Args[1]
2597 v_0 := v.Args[0]
2598 b := v.Block
2599 typ := &b.Func.Config.Types
2600
2601
2602 for {
2603 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2604 if v_0.Op != OpAMD64NOTL {
2605 continue
2606 }
2607 v_0_0 := v_0.Args[0]
2608 if v_0_0.Op != OpAMD64SHLL {
2609 continue
2610 }
2611 y := v_0_0.Args[1]
2612 v_0_0_0 := v_0_0.Args[0]
2613 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2614 continue
2615 }
2616 x := v_1
2617 v.reset(OpAMD64BTRL)
2618 v.AddArg2(x, y)
2619 return true
2620 }
2621 break
2622 }
2623
2624
2625 for {
2626 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2627 x := v_0
2628 if v_1.Op != OpAMD64MOVLconst {
2629 continue
2630 }
2631 c := auxIntToInt32(v_1.AuxInt)
2632 v.reset(OpAMD64ANDLconst)
2633 v.AuxInt = int32ToAuxInt(c)
2634 v.AddArg(x)
2635 return true
2636 }
2637 break
2638 }
2639
2640
2641 for {
2642 x := v_0
2643 if x != v_1 {
2644 break
2645 }
2646 v.copyOf(x)
2647 return true
2648 }
2649
2650
2651
2652 for {
2653 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2654 x := v_0
2655 l := v_1
2656 if l.Op != OpAMD64MOVLload {
2657 continue
2658 }
2659 off := auxIntToInt32(l.AuxInt)
2660 sym := auxToSym(l.Aux)
2661 mem := l.Args[1]
2662 ptr := l.Args[0]
2663 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2664 continue
2665 }
2666 v.reset(OpAMD64ANDLload)
2667 v.AuxInt = int32ToAuxInt(off)
2668 v.Aux = symToAux(sym)
2669 v.AddArg3(x, ptr, mem)
2670 return true
2671 }
2672 break
2673 }
2674
2675
2676
2677 for {
2678 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2679 x := v_0
2680 if v_1.Op != OpAMD64NOTL {
2681 continue
2682 }
2683 y := v_1.Args[0]
2684 if !(buildcfg.GOAMD64 >= 3) {
2685 continue
2686 }
2687 v.reset(OpAMD64ANDNL)
2688 v.AddArg2(x, y)
2689 return true
2690 }
2691 break
2692 }
2693
2694
2695
2696 for {
2697 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2698 x := v_0
2699 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2700 continue
2701 }
2702 v.reset(OpAMD64BLSIL)
2703 v.AddArg(x)
2704 return true
2705 }
2706 break
2707 }
2708
2709
2710
2711 for {
2712 t := v.Type
2713 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2714 x := v_0
2715 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2716 continue
2717 }
2718 v.reset(OpSelect0)
2719 v.Type = t
2720 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2721 v0.AddArg(x)
2722 v.AddArg(v0)
2723 return true
2724 }
2725 break
2726 }
2727 return false
2728 }
2729 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2730 v_0 := v.Args[0]
2731
2732
2733 for {
2734 c := auxIntToInt32(v.AuxInt)
2735 if v_0.Op != OpAMD64ANDLconst {
2736 break
2737 }
2738 d := auxIntToInt32(v_0.AuxInt)
2739 x := v_0.Args[0]
2740 v.reset(OpAMD64ANDLconst)
2741 v.AuxInt = int32ToAuxInt(c & d)
2742 v.AddArg(x)
2743 return true
2744 }
2745
2746
2747 for {
2748 if auxIntToInt32(v.AuxInt) != 0xFF {
2749 break
2750 }
2751 x := v_0
2752 v.reset(OpAMD64MOVBQZX)
2753 v.AddArg(x)
2754 return true
2755 }
2756
2757
2758 for {
2759 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2760 break
2761 }
2762 x := v_0
2763 v.reset(OpAMD64MOVWQZX)
2764 v.AddArg(x)
2765 return true
2766 }
2767
2768
2769
2770 for {
2771 c := auxIntToInt32(v.AuxInt)
2772 if !(c == 0) {
2773 break
2774 }
2775 v.reset(OpAMD64MOVLconst)
2776 v.AuxInt = int32ToAuxInt(0)
2777 return true
2778 }
2779
2780
2781
2782 for {
2783 c := auxIntToInt32(v.AuxInt)
2784 x := v_0
2785 if !(c == -1) {
2786 break
2787 }
2788 v.copyOf(x)
2789 return true
2790 }
2791
2792
2793 for {
2794 c := auxIntToInt32(v.AuxInt)
2795 if v_0.Op != OpAMD64MOVLconst {
2796 break
2797 }
2798 d := auxIntToInt32(v_0.AuxInt)
2799 v.reset(OpAMD64MOVLconst)
2800 v.AuxInt = int32ToAuxInt(c & d)
2801 return true
2802 }
2803 return false
2804 }
2805 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2806 v_1 := v.Args[1]
2807 v_0 := v.Args[0]
2808
2809
2810
2811 for {
2812 valoff1 := auxIntToValAndOff(v.AuxInt)
2813 sym := auxToSym(v.Aux)
2814 if v_0.Op != OpAMD64ADDQconst {
2815 break
2816 }
2817 off2 := auxIntToInt32(v_0.AuxInt)
2818 base := v_0.Args[0]
2819 mem := v_1
2820 if !(ValAndOff(valoff1).canAdd32(off2)) {
2821 break
2822 }
2823 v.reset(OpAMD64ANDLconstmodify)
2824 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2825 v.Aux = symToAux(sym)
2826 v.AddArg2(base, mem)
2827 return true
2828 }
2829
2830
2831
2832 for {
2833 valoff1 := auxIntToValAndOff(v.AuxInt)
2834 sym1 := auxToSym(v.Aux)
2835 if v_0.Op != OpAMD64LEAQ {
2836 break
2837 }
2838 off2 := auxIntToInt32(v_0.AuxInt)
2839 sym2 := auxToSym(v_0.Aux)
2840 base := v_0.Args[0]
2841 mem := v_1
2842 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2843 break
2844 }
2845 v.reset(OpAMD64ANDLconstmodify)
2846 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2847 v.Aux = symToAux(mergeSym(sym1, sym2))
2848 v.AddArg2(base, mem)
2849 return true
2850 }
2851 return false
2852 }
2853 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2854 v_2 := v.Args[2]
2855 v_1 := v.Args[1]
2856 v_0 := v.Args[0]
2857 b := v.Block
2858 typ := &b.Func.Config.Types
2859
2860
2861
2862 for {
2863 off1 := auxIntToInt32(v.AuxInt)
2864 sym := auxToSym(v.Aux)
2865 val := v_0
2866 if v_1.Op != OpAMD64ADDQconst {
2867 break
2868 }
2869 off2 := auxIntToInt32(v_1.AuxInt)
2870 base := v_1.Args[0]
2871 mem := v_2
2872 if !(is32Bit(int64(off1) + int64(off2))) {
2873 break
2874 }
2875 v.reset(OpAMD64ANDLload)
2876 v.AuxInt = int32ToAuxInt(off1 + off2)
2877 v.Aux = symToAux(sym)
2878 v.AddArg3(val, base, mem)
2879 return true
2880 }
2881
2882
2883
2884 for {
2885 off1 := auxIntToInt32(v.AuxInt)
2886 sym1 := auxToSym(v.Aux)
2887 val := v_0
2888 if v_1.Op != OpAMD64LEAQ {
2889 break
2890 }
2891 off2 := auxIntToInt32(v_1.AuxInt)
2892 sym2 := auxToSym(v_1.Aux)
2893 base := v_1.Args[0]
2894 mem := v_2
2895 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2896 break
2897 }
2898 v.reset(OpAMD64ANDLload)
2899 v.AuxInt = int32ToAuxInt(off1 + off2)
2900 v.Aux = symToAux(mergeSym(sym1, sym2))
2901 v.AddArg3(val, base, mem)
2902 return true
2903 }
2904
2905
2906 for {
2907 off := auxIntToInt32(v.AuxInt)
2908 sym := auxToSym(v.Aux)
2909 x := v_0
2910 ptr := v_1
2911 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2912 break
2913 }
2914 y := v_2.Args[1]
2915 if ptr != v_2.Args[0] {
2916 break
2917 }
2918 v.reset(OpAMD64ANDL)
2919 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2920 v0.AddArg(y)
2921 v.AddArg2(x, v0)
2922 return true
2923 }
2924 return false
2925 }
2926 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2927 v_2 := v.Args[2]
2928 v_1 := v.Args[1]
2929 v_0 := v.Args[0]
2930
2931
2932
2933 for {
2934 off1 := auxIntToInt32(v.AuxInt)
2935 sym := auxToSym(v.Aux)
2936 if v_0.Op != OpAMD64ADDQconst {
2937 break
2938 }
2939 off2 := auxIntToInt32(v_0.AuxInt)
2940 base := v_0.Args[0]
2941 val := v_1
2942 mem := v_2
2943 if !(is32Bit(int64(off1) + int64(off2))) {
2944 break
2945 }
2946 v.reset(OpAMD64ANDLmodify)
2947 v.AuxInt = int32ToAuxInt(off1 + off2)
2948 v.Aux = symToAux(sym)
2949 v.AddArg3(base, val, mem)
2950 return true
2951 }
2952
2953
2954
2955 for {
2956 off1 := auxIntToInt32(v.AuxInt)
2957 sym1 := auxToSym(v.Aux)
2958 if v_0.Op != OpAMD64LEAQ {
2959 break
2960 }
2961 off2 := auxIntToInt32(v_0.AuxInt)
2962 sym2 := auxToSym(v_0.Aux)
2963 base := v_0.Args[0]
2964 val := v_1
2965 mem := v_2
2966 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2967 break
2968 }
2969 v.reset(OpAMD64ANDLmodify)
2970 v.AuxInt = int32ToAuxInt(off1 + off2)
2971 v.Aux = symToAux(mergeSym(sym1, sym2))
2972 v.AddArg3(base, val, mem)
2973 return true
2974 }
2975 return false
2976 }
2977 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
2978 v_1 := v.Args[1]
2979 v_0 := v.Args[0]
2980
2981
2982 for {
2983 x := v_0
2984 if v_1.Op != OpAMD64SHLL {
2985 break
2986 }
2987 y := v_1.Args[1]
2988 v_1_0 := v_1.Args[0]
2989 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
2990 break
2991 }
2992 v.reset(OpAMD64BTRL)
2993 v.AddArg2(x, y)
2994 return true
2995 }
2996 return false
2997 }
2998 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
2999 v_1 := v.Args[1]
3000 v_0 := v.Args[0]
3001
3002
3003 for {
3004 x := v_0
3005 if v_1.Op != OpAMD64SHLQ {
3006 break
3007 }
3008 y := v_1.Args[1]
3009 v_1_0 := v_1.Args[0]
3010 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3011 break
3012 }
3013 v.reset(OpAMD64BTRQ)
3014 v.AddArg2(x, y)
3015 return true
3016 }
3017 return false
3018 }
3019 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3020 v_1 := v.Args[1]
3021 v_0 := v.Args[0]
3022 b := v.Block
3023 typ := &b.Func.Config.Types
3024
3025
3026 for {
3027 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3028 if v_0.Op != OpAMD64NOTQ {
3029 continue
3030 }
3031 v_0_0 := v_0.Args[0]
3032 if v_0_0.Op != OpAMD64SHLQ {
3033 continue
3034 }
3035 y := v_0_0.Args[1]
3036 v_0_0_0 := v_0_0.Args[0]
3037 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3038 continue
3039 }
3040 x := v_1
3041 v.reset(OpAMD64BTRQ)
3042 v.AddArg2(x, y)
3043 return true
3044 }
3045 break
3046 }
3047
3048
3049
3050 for {
3051 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3052 if v_0.Op != OpAMD64MOVQconst {
3053 continue
3054 }
3055 c := auxIntToInt64(v_0.AuxInt)
3056 x := v_1
3057 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3058 continue
3059 }
3060 v.reset(OpAMD64BTRQconst)
3061 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3062 v.AddArg(x)
3063 return true
3064 }
3065 break
3066 }
3067
3068
3069
3070 for {
3071 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3072 x := v_0
3073 if v_1.Op != OpAMD64MOVQconst {
3074 continue
3075 }
3076 c := auxIntToInt64(v_1.AuxInt)
3077 if !(is32Bit(c)) {
3078 continue
3079 }
3080 v.reset(OpAMD64ANDQconst)
3081 v.AuxInt = int32ToAuxInt(int32(c))
3082 v.AddArg(x)
3083 return true
3084 }
3085 break
3086 }
3087
3088
3089 for {
3090 x := v_0
3091 if x != v_1 {
3092 break
3093 }
3094 v.copyOf(x)
3095 return true
3096 }
3097
3098
3099
3100 for {
3101 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3102 x := v_0
3103 l := v_1
3104 if l.Op != OpAMD64MOVQload {
3105 continue
3106 }
3107 off := auxIntToInt32(l.AuxInt)
3108 sym := auxToSym(l.Aux)
3109 mem := l.Args[1]
3110 ptr := l.Args[0]
3111 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3112 continue
3113 }
3114 v.reset(OpAMD64ANDQload)
3115 v.AuxInt = int32ToAuxInt(off)
3116 v.Aux = symToAux(sym)
3117 v.AddArg3(x, ptr, mem)
3118 return true
3119 }
3120 break
3121 }
3122
3123
3124
3125 for {
3126 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3127 x := v_0
3128 if v_1.Op != OpAMD64NOTQ {
3129 continue
3130 }
3131 y := v_1.Args[0]
3132 if !(buildcfg.GOAMD64 >= 3) {
3133 continue
3134 }
3135 v.reset(OpAMD64ANDNQ)
3136 v.AddArg2(x, y)
3137 return true
3138 }
3139 break
3140 }
3141
3142
3143
3144 for {
3145 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3146 x := v_0
3147 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3148 continue
3149 }
3150 v.reset(OpAMD64BLSIQ)
3151 v.AddArg(x)
3152 return true
3153 }
3154 break
3155 }
3156
3157
3158
3159 for {
3160 t := v.Type
3161 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3162 x := v_0
3163 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3164 continue
3165 }
3166 v.reset(OpSelect0)
3167 v.Type = t
3168 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3169 v0.AddArg(x)
3170 v.AddArg(v0)
3171 return true
3172 }
3173 break
3174 }
3175 return false
3176 }
3177 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3178 v_0 := v.Args[0]
3179
3180
3181 for {
3182 c := auxIntToInt32(v.AuxInt)
3183 if v_0.Op != OpAMD64ANDQconst {
3184 break
3185 }
3186 d := auxIntToInt32(v_0.AuxInt)
3187 x := v_0.Args[0]
3188 v.reset(OpAMD64ANDQconst)
3189 v.AuxInt = int32ToAuxInt(c & d)
3190 v.AddArg(x)
3191 return true
3192 }
3193
3194
3195 for {
3196 if auxIntToInt32(v.AuxInt) != 0xFF {
3197 break
3198 }
3199 x := v_0
3200 v.reset(OpAMD64MOVBQZX)
3201 v.AddArg(x)
3202 return true
3203 }
3204
3205
3206 for {
3207 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3208 break
3209 }
3210 x := v_0
3211 v.reset(OpAMD64MOVWQZX)
3212 v.AddArg(x)
3213 return true
3214 }
3215
3216
3217 for {
3218 if auxIntToInt32(v.AuxInt) != 0 {
3219 break
3220 }
3221 v.reset(OpAMD64MOVQconst)
3222 v.AuxInt = int64ToAuxInt(0)
3223 return true
3224 }
3225
3226
3227 for {
3228 if auxIntToInt32(v.AuxInt) != -1 {
3229 break
3230 }
3231 x := v_0
3232 v.copyOf(x)
3233 return true
3234 }
3235
3236
3237 for {
3238 c := auxIntToInt32(v.AuxInt)
3239 if v_0.Op != OpAMD64MOVQconst {
3240 break
3241 }
3242 d := auxIntToInt64(v_0.AuxInt)
3243 v.reset(OpAMD64MOVQconst)
3244 v.AuxInt = int64ToAuxInt(int64(c) & d)
3245 return true
3246 }
3247 return false
3248 }
3249 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3250 v_1 := v.Args[1]
3251 v_0 := v.Args[0]
3252
3253
3254
3255 for {
3256 valoff1 := auxIntToValAndOff(v.AuxInt)
3257 sym := auxToSym(v.Aux)
3258 if v_0.Op != OpAMD64ADDQconst {
3259 break
3260 }
3261 off2 := auxIntToInt32(v_0.AuxInt)
3262 base := v_0.Args[0]
3263 mem := v_1
3264 if !(ValAndOff(valoff1).canAdd32(off2)) {
3265 break
3266 }
3267 v.reset(OpAMD64ANDQconstmodify)
3268 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3269 v.Aux = symToAux(sym)
3270 v.AddArg2(base, mem)
3271 return true
3272 }
3273
3274
3275
3276 for {
3277 valoff1 := auxIntToValAndOff(v.AuxInt)
3278 sym1 := auxToSym(v.Aux)
3279 if v_0.Op != OpAMD64LEAQ {
3280 break
3281 }
3282 off2 := auxIntToInt32(v_0.AuxInt)
3283 sym2 := auxToSym(v_0.Aux)
3284 base := v_0.Args[0]
3285 mem := v_1
3286 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3287 break
3288 }
3289 v.reset(OpAMD64ANDQconstmodify)
3290 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3291 v.Aux = symToAux(mergeSym(sym1, sym2))
3292 v.AddArg2(base, mem)
3293 return true
3294 }
3295 return false
3296 }
3297 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3298 v_2 := v.Args[2]
3299 v_1 := v.Args[1]
3300 v_0 := v.Args[0]
3301 b := v.Block
3302 typ := &b.Func.Config.Types
3303
3304
3305
3306 for {
3307 off1 := auxIntToInt32(v.AuxInt)
3308 sym := auxToSym(v.Aux)
3309 val := v_0
3310 if v_1.Op != OpAMD64ADDQconst {
3311 break
3312 }
3313 off2 := auxIntToInt32(v_1.AuxInt)
3314 base := v_1.Args[0]
3315 mem := v_2
3316 if !(is32Bit(int64(off1) + int64(off2))) {
3317 break
3318 }
3319 v.reset(OpAMD64ANDQload)
3320 v.AuxInt = int32ToAuxInt(off1 + off2)
3321 v.Aux = symToAux(sym)
3322 v.AddArg3(val, base, mem)
3323 return true
3324 }
3325
3326
3327
3328 for {
3329 off1 := auxIntToInt32(v.AuxInt)
3330 sym1 := auxToSym(v.Aux)
3331 val := v_0
3332 if v_1.Op != OpAMD64LEAQ {
3333 break
3334 }
3335 off2 := auxIntToInt32(v_1.AuxInt)
3336 sym2 := auxToSym(v_1.Aux)
3337 base := v_1.Args[0]
3338 mem := v_2
3339 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3340 break
3341 }
3342 v.reset(OpAMD64ANDQload)
3343 v.AuxInt = int32ToAuxInt(off1 + off2)
3344 v.Aux = symToAux(mergeSym(sym1, sym2))
3345 v.AddArg3(val, base, mem)
3346 return true
3347 }
3348
3349
3350 for {
3351 off := auxIntToInt32(v.AuxInt)
3352 sym := auxToSym(v.Aux)
3353 x := v_0
3354 ptr := v_1
3355 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3356 break
3357 }
3358 y := v_2.Args[1]
3359 if ptr != v_2.Args[0] {
3360 break
3361 }
3362 v.reset(OpAMD64ANDQ)
3363 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3364 v0.AddArg(y)
3365 v.AddArg2(x, v0)
3366 return true
3367 }
3368 return false
3369 }
3370 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3371 v_2 := v.Args[2]
3372 v_1 := v.Args[1]
3373 v_0 := v.Args[0]
3374
3375
3376
3377 for {
3378 off1 := auxIntToInt32(v.AuxInt)
3379 sym := auxToSym(v.Aux)
3380 if v_0.Op != OpAMD64ADDQconst {
3381 break
3382 }
3383 off2 := auxIntToInt32(v_0.AuxInt)
3384 base := v_0.Args[0]
3385 val := v_1
3386 mem := v_2
3387 if !(is32Bit(int64(off1) + int64(off2))) {
3388 break
3389 }
3390 v.reset(OpAMD64ANDQmodify)
3391 v.AuxInt = int32ToAuxInt(off1 + off2)
3392 v.Aux = symToAux(sym)
3393 v.AddArg3(base, val, mem)
3394 return true
3395 }
3396
3397
3398
3399 for {
3400 off1 := auxIntToInt32(v.AuxInt)
3401 sym1 := auxToSym(v.Aux)
3402 if v_0.Op != OpAMD64LEAQ {
3403 break
3404 }
3405 off2 := auxIntToInt32(v_0.AuxInt)
3406 sym2 := auxToSym(v_0.Aux)
3407 base := v_0.Args[0]
3408 val := v_1
3409 mem := v_2
3410 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3411 break
3412 }
3413 v.reset(OpAMD64ANDQmodify)
3414 v.AuxInt = int32ToAuxInt(off1 + off2)
3415 v.Aux = symToAux(mergeSym(sym1, sym2))
3416 v.AddArg3(base, val, mem)
3417 return true
3418 }
3419 return false
3420 }
3421 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3422 v_0 := v.Args[0]
3423 b := v.Block
3424
3425
3426 for {
3427 if v_0.Op != OpAMD64ORQconst {
3428 break
3429 }
3430 t := v_0.Type
3431 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3432 break
3433 }
3434 v_0_0 := v_0.Args[0]
3435 if v_0_0.Op != OpAMD64MOVBQZX {
3436 break
3437 }
3438 x := v_0_0.Args[0]
3439 v.reset(OpAMD64BSFQ)
3440 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3441 v0.AuxInt = int32ToAuxInt(1 << 8)
3442 v0.AddArg(x)
3443 v.AddArg(v0)
3444 return true
3445 }
3446
3447
3448 for {
3449 if v_0.Op != OpAMD64ORQconst {
3450 break
3451 }
3452 t := v_0.Type
3453 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3454 break
3455 }
3456 v_0_0 := v_0.Args[0]
3457 if v_0_0.Op != OpAMD64MOVWQZX {
3458 break
3459 }
3460 x := v_0_0.Args[0]
3461 v.reset(OpAMD64BSFQ)
3462 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3463 v0.AuxInt = int32ToAuxInt(1 << 16)
3464 v0.AddArg(x)
3465 v.AddArg(v0)
3466 return true
3467 }
3468 return false
3469 }
3470 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3471 v_0 := v.Args[0]
3472 b := v.Block
3473 typ := &b.Func.Config.Types
3474
3475
3476 for {
3477 if v_0.Op != OpAMD64BSWAPL {
3478 break
3479 }
3480 p := v_0.Args[0]
3481 v.copyOf(p)
3482 return true
3483 }
3484
3485
3486
3487 for {
3488 x := v_0
3489 if x.Op != OpAMD64MOVLload {
3490 break
3491 }
3492 i := auxIntToInt32(x.AuxInt)
3493 s := auxToSym(x.Aux)
3494 mem := x.Args[1]
3495 p := x.Args[0]
3496 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3497 break
3498 }
3499 b = x.Block
3500 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3501 v.copyOf(v0)
3502 v0.AuxInt = int32ToAuxInt(i)
3503 v0.Aux = symToAux(s)
3504 v0.AddArg2(p, mem)
3505 return true
3506 }
3507
3508
3509
3510 for {
3511 x := v_0
3512 if x.Op != OpAMD64MOVBELload {
3513 break
3514 }
3515 i := auxIntToInt32(x.AuxInt)
3516 s := auxToSym(x.Aux)
3517 mem := x.Args[1]
3518 p := x.Args[0]
3519 if !(x.Uses == 1) {
3520 break
3521 }
3522 b = x.Block
3523 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3524 v.copyOf(v0)
3525 v0.AuxInt = int32ToAuxInt(i)
3526 v0.Aux = symToAux(s)
3527 v0.AddArg2(p, mem)
3528 return true
3529 }
3530 return false
3531 }
3532 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3533 v_0 := v.Args[0]
3534 b := v.Block
3535 typ := &b.Func.Config.Types
3536
3537
3538 for {
3539 if v_0.Op != OpAMD64BSWAPQ {
3540 break
3541 }
3542 p := v_0.Args[0]
3543 v.copyOf(p)
3544 return true
3545 }
3546
3547
3548
3549 for {
3550 x := v_0
3551 if x.Op != OpAMD64MOVQload {
3552 break
3553 }
3554 i := auxIntToInt32(x.AuxInt)
3555 s := auxToSym(x.Aux)
3556 mem := x.Args[1]
3557 p := x.Args[0]
3558 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3559 break
3560 }
3561 b = x.Block
3562 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3563 v.copyOf(v0)
3564 v0.AuxInt = int32ToAuxInt(i)
3565 v0.Aux = symToAux(s)
3566 v0.AddArg2(p, mem)
3567 return true
3568 }
3569
3570
3571
3572 for {
3573 x := v_0
3574 if x.Op != OpAMD64MOVBEQload {
3575 break
3576 }
3577 i := auxIntToInt32(x.AuxInt)
3578 s := auxToSym(x.Aux)
3579 mem := x.Args[1]
3580 p := x.Args[0]
3581 if !(x.Uses == 1) {
3582 break
3583 }
3584 b = x.Block
3585 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3586 v.copyOf(v0)
3587 v0.AuxInt = int32ToAuxInt(i)
3588 v0.Aux = symToAux(s)
3589 v0.AddArg2(p, mem)
3590 return true
3591 }
3592 return false
3593 }
3594 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3595 v_0 := v.Args[0]
3596
3597
3598 for {
3599 c := auxIntToInt8(v.AuxInt)
3600 if v_0.Op != OpAMD64MOVQconst {
3601 break
3602 }
3603 d := auxIntToInt64(v_0.AuxInt)
3604 v.reset(OpAMD64MOVQconst)
3605 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3606 return true
3607 }
3608 return false
3609 }
3610 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3611 v_0 := v.Args[0]
3612
3613
3614
3615 for {
3616 c := auxIntToInt8(v.AuxInt)
3617 if v_0.Op != OpAMD64SHRQconst {
3618 break
3619 }
3620 d := auxIntToInt8(v_0.AuxInt)
3621 x := v_0.Args[0]
3622 if !((c + d) < 64) {
3623 break
3624 }
3625 v.reset(OpAMD64BTQconst)
3626 v.AuxInt = int8ToAuxInt(c + d)
3627 v.AddArg(x)
3628 return true
3629 }
3630
3631
3632
3633 for {
3634 c := auxIntToInt8(v.AuxInt)
3635 if v_0.Op != OpAMD64SHLQconst {
3636 break
3637 }
3638 d := auxIntToInt8(v_0.AuxInt)
3639 x := v_0.Args[0]
3640 if !(c > d) {
3641 break
3642 }
3643 v.reset(OpAMD64BTLconst)
3644 v.AuxInt = int8ToAuxInt(c - d)
3645 v.AddArg(x)
3646 return true
3647 }
3648
3649
3650 for {
3651 if auxIntToInt8(v.AuxInt) != 0 {
3652 break
3653 }
3654 s := v_0
3655 if s.Op != OpAMD64SHRQ {
3656 break
3657 }
3658 y := s.Args[1]
3659 x := s.Args[0]
3660 v.reset(OpAMD64BTQ)
3661 v.AddArg2(y, x)
3662 return true
3663 }
3664
3665
3666
3667 for {
3668 c := auxIntToInt8(v.AuxInt)
3669 if v_0.Op != OpAMD64SHRLconst {
3670 break
3671 }
3672 d := auxIntToInt8(v_0.AuxInt)
3673 x := v_0.Args[0]
3674 if !((c + d) < 32) {
3675 break
3676 }
3677 v.reset(OpAMD64BTLconst)
3678 v.AuxInt = int8ToAuxInt(c + d)
3679 v.AddArg(x)
3680 return true
3681 }
3682
3683
3684
3685 for {
3686 c := auxIntToInt8(v.AuxInt)
3687 if v_0.Op != OpAMD64SHLLconst {
3688 break
3689 }
3690 d := auxIntToInt8(v_0.AuxInt)
3691 x := v_0.Args[0]
3692 if !(c > d) {
3693 break
3694 }
3695 v.reset(OpAMD64BTLconst)
3696 v.AuxInt = int8ToAuxInt(c - d)
3697 v.AddArg(x)
3698 return true
3699 }
3700
3701
3702 for {
3703 if auxIntToInt8(v.AuxInt) != 0 {
3704 break
3705 }
3706 s := v_0
3707 if s.Op != OpAMD64SHRL {
3708 break
3709 }
3710 y := s.Args[1]
3711 x := s.Args[0]
3712 v.reset(OpAMD64BTL)
3713 v.AddArg2(y, x)
3714 return true
3715 }
3716
3717
3718 for {
3719 if auxIntToInt8(v.AuxInt) != 0 {
3720 break
3721 }
3722 s := v_0
3723 if s.Op != OpAMD64SHRXL {
3724 break
3725 }
3726 y := s.Args[1]
3727 x := s.Args[0]
3728 v.reset(OpAMD64BTL)
3729 v.AddArg2(y, x)
3730 return true
3731 }
3732 return false
3733 }
3734 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3735 v_0 := v.Args[0]
3736
3737
3738
3739 for {
3740 c := auxIntToInt8(v.AuxInt)
3741 if v_0.Op != OpAMD64SHRQconst {
3742 break
3743 }
3744 d := auxIntToInt8(v_0.AuxInt)
3745 x := v_0.Args[0]
3746 if !((c + d) < 64) {
3747 break
3748 }
3749 v.reset(OpAMD64BTQconst)
3750 v.AuxInt = int8ToAuxInt(c + d)
3751 v.AddArg(x)
3752 return true
3753 }
3754
3755
3756
3757 for {
3758 c := auxIntToInt8(v.AuxInt)
3759 if v_0.Op != OpAMD64SHLQconst {
3760 break
3761 }
3762 d := auxIntToInt8(v_0.AuxInt)
3763 x := v_0.Args[0]
3764 if !(c > d) {
3765 break
3766 }
3767 v.reset(OpAMD64BTQconst)
3768 v.AuxInt = int8ToAuxInt(c - d)
3769 v.AddArg(x)
3770 return true
3771 }
3772
3773
3774 for {
3775 if auxIntToInt8(v.AuxInt) != 0 {
3776 break
3777 }
3778 s := v_0
3779 if s.Op != OpAMD64SHRQ {
3780 break
3781 }
3782 y := s.Args[1]
3783 x := s.Args[0]
3784 v.reset(OpAMD64BTQ)
3785 v.AddArg2(y, x)
3786 return true
3787 }
3788 return false
3789 }
3790 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3791 v_0 := v.Args[0]
3792
3793
3794 for {
3795 c := auxIntToInt8(v.AuxInt)
3796 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3797 break
3798 }
3799 x := v_0.Args[0]
3800 v.reset(OpAMD64BTRQconst)
3801 v.AuxInt = int8ToAuxInt(c)
3802 v.AddArg(x)
3803 return true
3804 }
3805
3806
3807 for {
3808 c := auxIntToInt8(v.AuxInt)
3809 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3810 break
3811 }
3812 x := v_0.Args[0]
3813 v.reset(OpAMD64BTRQconst)
3814 v.AuxInt = int8ToAuxInt(c)
3815 v.AddArg(x)
3816 return true
3817 }
3818
3819
3820 for {
3821 c := auxIntToInt8(v.AuxInt)
3822 if v_0.Op != OpAMD64MOVQconst {
3823 break
3824 }
3825 d := auxIntToInt64(v_0.AuxInt)
3826 v.reset(OpAMD64MOVQconst)
3827 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3828 return true
3829 }
3830 return false
3831 }
3832 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3833 v_0 := v.Args[0]
3834
3835
3836 for {
3837 c := auxIntToInt8(v.AuxInt)
3838 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3839 break
3840 }
3841 x := v_0.Args[0]
3842 v.reset(OpAMD64BTSQconst)
3843 v.AuxInt = int8ToAuxInt(c)
3844 v.AddArg(x)
3845 return true
3846 }
3847
3848
3849 for {
3850 c := auxIntToInt8(v.AuxInt)
3851 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3852 break
3853 }
3854 x := v_0.Args[0]
3855 v.reset(OpAMD64BTSQconst)
3856 v.AuxInt = int8ToAuxInt(c)
3857 v.AddArg(x)
3858 return true
3859 }
3860
3861
3862 for {
3863 c := auxIntToInt8(v.AuxInt)
3864 if v_0.Op != OpAMD64MOVQconst {
3865 break
3866 }
3867 d := auxIntToInt64(v_0.AuxInt)
3868 v.reset(OpAMD64MOVQconst)
3869 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3870 return true
3871 }
3872 return false
3873 }
3874 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3875 v_2 := v.Args[2]
3876 v_1 := v.Args[1]
3877 v_0 := v.Args[0]
3878
3879
3880 for {
3881 x := v_0
3882 y := v_1
3883 if v_2.Op != OpAMD64InvertFlags {
3884 break
3885 }
3886 cond := v_2.Args[0]
3887 v.reset(OpAMD64CMOVLLS)
3888 v.AddArg3(x, y, cond)
3889 return true
3890 }
3891
3892
3893 for {
3894 x := v_1
3895 if v_2.Op != OpAMD64FlagEQ {
3896 break
3897 }
3898 v.copyOf(x)
3899 return true
3900 }
3901
3902
3903 for {
3904 x := v_1
3905 if v_2.Op != OpAMD64FlagGT_UGT {
3906 break
3907 }
3908 v.copyOf(x)
3909 return true
3910 }
3911
3912
3913 for {
3914 y := v_0
3915 if v_2.Op != OpAMD64FlagGT_ULT {
3916 break
3917 }
3918 v.copyOf(y)
3919 return true
3920 }
3921
3922
3923 for {
3924 y := v_0
3925 if v_2.Op != OpAMD64FlagLT_ULT {
3926 break
3927 }
3928 v.copyOf(y)
3929 return true
3930 }
3931
3932
3933 for {
3934 x := v_1
3935 if v_2.Op != OpAMD64FlagLT_UGT {
3936 break
3937 }
3938 v.copyOf(x)
3939 return true
3940 }
3941 return false
3942 }
3943 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
3944 v_2 := v.Args[2]
3945 v_1 := v.Args[1]
3946 v_0 := v.Args[0]
3947
3948
3949 for {
3950 x := v_0
3951 y := v_1
3952 if v_2.Op != OpAMD64InvertFlags {
3953 break
3954 }
3955 cond := v_2.Args[0]
3956 v.reset(OpAMD64CMOVLHI)
3957 v.AddArg3(x, y, cond)
3958 return true
3959 }
3960
3961
3962 for {
3963 y := v_0
3964 if v_2.Op != OpAMD64FlagEQ {
3965 break
3966 }
3967 v.copyOf(y)
3968 return true
3969 }
3970
3971
3972 for {
3973 y := v_0
3974 if v_2.Op != OpAMD64FlagGT_UGT {
3975 break
3976 }
3977 v.copyOf(y)
3978 return true
3979 }
3980
3981
3982 for {
3983 x := v_1
3984 if v_2.Op != OpAMD64FlagGT_ULT {
3985 break
3986 }
3987 v.copyOf(x)
3988 return true
3989 }
3990
3991
3992 for {
3993 x := v_1
3994 if v_2.Op != OpAMD64FlagLT_ULT {
3995 break
3996 }
3997 v.copyOf(x)
3998 return true
3999 }
4000
4001
4002 for {
4003 y := v_0
4004 if v_2.Op != OpAMD64FlagLT_UGT {
4005 break
4006 }
4007 v.copyOf(y)
4008 return true
4009 }
4010 return false
4011 }
4012 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4013 v_2 := v.Args[2]
4014 v_1 := v.Args[1]
4015 v_0 := v.Args[0]
4016 b := v.Block
4017
4018
4019 for {
4020 x := v_0
4021 y := v_1
4022 if v_2.Op != OpAMD64InvertFlags {
4023 break
4024 }
4025 cond := v_2.Args[0]
4026 v.reset(OpAMD64CMOVLEQ)
4027 v.AddArg3(x, y, cond)
4028 return true
4029 }
4030
4031
4032 for {
4033 x := v_1
4034 if v_2.Op != OpAMD64FlagEQ {
4035 break
4036 }
4037 v.copyOf(x)
4038 return true
4039 }
4040
4041
4042 for {
4043 y := v_0
4044 if v_2.Op != OpAMD64FlagGT_UGT {
4045 break
4046 }
4047 v.copyOf(y)
4048 return true
4049 }
4050
4051
4052 for {
4053 y := v_0
4054 if v_2.Op != OpAMD64FlagGT_ULT {
4055 break
4056 }
4057 v.copyOf(y)
4058 return true
4059 }
4060
4061
4062 for {
4063 y := v_0
4064 if v_2.Op != OpAMD64FlagLT_ULT {
4065 break
4066 }
4067 v.copyOf(y)
4068 return true
4069 }
4070
4071
4072 for {
4073 y := v_0
4074 if v_2.Op != OpAMD64FlagLT_UGT {
4075 break
4076 }
4077 v.copyOf(y)
4078 return true
4079 }
4080
4081
4082 for {
4083 x := v_0
4084 y := v_1
4085 if v_2.Op != OpAMD64TESTQ {
4086 break
4087 }
4088 _ = v_2.Args[1]
4089 v_2_0 := v_2.Args[0]
4090 v_2_1 := v_2.Args[1]
4091 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4092 s := v_2_0
4093 if s.Op != OpSelect0 {
4094 continue
4095 }
4096 blsr := s.Args[0]
4097 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4098 continue
4099 }
4100 v.reset(OpAMD64CMOVLEQ)
4101 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4102 v0.AddArg(blsr)
4103 v.AddArg3(x, y, v0)
4104 return true
4105 }
4106 break
4107 }
4108
4109
4110 for {
4111 x := v_0
4112 y := v_1
4113 if v_2.Op != OpAMD64TESTL {
4114 break
4115 }
4116 _ = v_2.Args[1]
4117 v_2_0 := v_2.Args[0]
4118 v_2_1 := v_2.Args[1]
4119 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4120 s := v_2_0
4121 if s.Op != OpSelect0 {
4122 continue
4123 }
4124 blsr := s.Args[0]
4125 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4126 continue
4127 }
4128 v.reset(OpAMD64CMOVLEQ)
4129 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4130 v0.AddArg(blsr)
4131 v.AddArg3(x, y, v0)
4132 return true
4133 }
4134 break
4135 }
4136 return false
4137 }
4138 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4139 v_2 := v.Args[2]
4140 v_1 := v.Args[1]
4141 v_0 := v.Args[0]
4142
4143
4144 for {
4145 x := v_0
4146 y := v_1
4147 if v_2.Op != OpAMD64InvertFlags {
4148 break
4149 }
4150 cond := v_2.Args[0]
4151 v.reset(OpAMD64CMOVLLE)
4152 v.AddArg3(x, y, cond)
4153 return true
4154 }
4155
4156
4157 for {
4158 x := v_1
4159 if v_2.Op != OpAMD64FlagEQ {
4160 break
4161 }
4162 v.copyOf(x)
4163 return true
4164 }
4165
4166
4167 for {
4168 x := v_1
4169 if v_2.Op != OpAMD64FlagGT_UGT {
4170 break
4171 }
4172 v.copyOf(x)
4173 return true
4174 }
4175
4176
4177 for {
4178 x := v_1
4179 if v_2.Op != OpAMD64FlagGT_ULT {
4180 break
4181 }
4182 v.copyOf(x)
4183 return true
4184 }
4185
4186
4187 for {
4188 y := v_0
4189 if v_2.Op != OpAMD64FlagLT_ULT {
4190 break
4191 }
4192 v.copyOf(y)
4193 return true
4194 }
4195
4196
4197 for {
4198 y := v_0
4199 if v_2.Op != OpAMD64FlagLT_UGT {
4200 break
4201 }
4202 v.copyOf(y)
4203 return true
4204 }
4205 return false
4206 }
4207 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4208 v_2 := v.Args[2]
4209 v_1 := v.Args[1]
4210 v_0 := v.Args[0]
4211
4212
4213 for {
4214 x := v_0
4215 y := v_1
4216 if v_2.Op != OpAMD64InvertFlags {
4217 break
4218 }
4219 cond := v_2.Args[0]
4220 v.reset(OpAMD64CMOVLLT)
4221 v.AddArg3(x, y, cond)
4222 return true
4223 }
4224
4225
4226 for {
4227 y := v_0
4228 if v_2.Op != OpAMD64FlagEQ {
4229 break
4230 }
4231 v.copyOf(y)
4232 return true
4233 }
4234
4235
4236 for {
4237 x := v_1
4238 if v_2.Op != OpAMD64FlagGT_UGT {
4239 break
4240 }
4241 v.copyOf(x)
4242 return true
4243 }
4244
4245
4246 for {
4247 x := v_1
4248 if v_2.Op != OpAMD64FlagGT_ULT {
4249 break
4250 }
4251 v.copyOf(x)
4252 return true
4253 }
4254
4255
4256 for {
4257 y := v_0
4258 if v_2.Op != OpAMD64FlagLT_ULT {
4259 break
4260 }
4261 v.copyOf(y)
4262 return true
4263 }
4264
4265
4266 for {
4267 y := v_0
4268 if v_2.Op != OpAMD64FlagLT_UGT {
4269 break
4270 }
4271 v.copyOf(y)
4272 return true
4273 }
4274 return false
4275 }
4276 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4277 v_2 := v.Args[2]
4278 v_1 := v.Args[1]
4279 v_0 := v.Args[0]
4280
4281
4282 for {
4283 x := v_0
4284 y := v_1
4285 if v_2.Op != OpAMD64InvertFlags {
4286 break
4287 }
4288 cond := v_2.Args[0]
4289 v.reset(OpAMD64CMOVLCS)
4290 v.AddArg3(x, y, cond)
4291 return true
4292 }
4293
4294
4295 for {
4296 y := v_0
4297 if v_2.Op != OpAMD64FlagEQ {
4298 break
4299 }
4300 v.copyOf(y)
4301 return true
4302 }
4303
4304
4305 for {
4306 x := v_1
4307 if v_2.Op != OpAMD64FlagGT_UGT {
4308 break
4309 }
4310 v.copyOf(x)
4311 return true
4312 }
4313
4314
4315 for {
4316 y := v_0
4317 if v_2.Op != OpAMD64FlagGT_ULT {
4318 break
4319 }
4320 v.copyOf(y)
4321 return true
4322 }
4323
4324
4325 for {
4326 y := v_0
4327 if v_2.Op != OpAMD64FlagLT_ULT {
4328 break
4329 }
4330 v.copyOf(y)
4331 return true
4332 }
4333
4334
4335 for {
4336 x := v_1
4337 if v_2.Op != OpAMD64FlagLT_UGT {
4338 break
4339 }
4340 v.copyOf(x)
4341 return true
4342 }
4343 return false
4344 }
4345 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4346 v_2 := v.Args[2]
4347 v_1 := v.Args[1]
4348 v_0 := v.Args[0]
4349
4350
4351 for {
4352 x := v_0
4353 y := v_1
4354 if v_2.Op != OpAMD64InvertFlags {
4355 break
4356 }
4357 cond := v_2.Args[0]
4358 v.reset(OpAMD64CMOVLGE)
4359 v.AddArg3(x, y, cond)
4360 return true
4361 }
4362
4363
4364 for {
4365 x := v_1
4366 if v_2.Op != OpAMD64FlagEQ {
4367 break
4368 }
4369 v.copyOf(x)
4370 return true
4371 }
4372
4373
4374 for {
4375 y := v_0
4376 if v_2.Op != OpAMD64FlagGT_UGT {
4377 break
4378 }
4379 v.copyOf(y)
4380 return true
4381 }
4382
4383
4384 for {
4385 y := v_0
4386 if v_2.Op != OpAMD64FlagGT_ULT {
4387 break
4388 }
4389 v.copyOf(y)
4390 return true
4391 }
4392
4393
4394 for {
4395 x := v_1
4396 if v_2.Op != OpAMD64FlagLT_ULT {
4397 break
4398 }
4399 v.copyOf(x)
4400 return true
4401 }
4402
4403
4404 for {
4405 x := v_1
4406 if v_2.Op != OpAMD64FlagLT_UGT {
4407 break
4408 }
4409 v.copyOf(x)
4410 return true
4411 }
4412 return false
4413 }
4414 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4415 v_2 := v.Args[2]
4416 v_1 := v.Args[1]
4417 v_0 := v.Args[0]
4418
4419
4420 for {
4421 x := v_0
4422 y := v_1
4423 if v_2.Op != OpAMD64InvertFlags {
4424 break
4425 }
4426 cond := v_2.Args[0]
4427 v.reset(OpAMD64CMOVLCC)
4428 v.AddArg3(x, y, cond)
4429 return true
4430 }
4431
4432
4433 for {
4434 x := v_1
4435 if v_2.Op != OpAMD64FlagEQ {
4436 break
4437 }
4438 v.copyOf(x)
4439 return true
4440 }
4441
4442
4443 for {
4444 y := v_0
4445 if v_2.Op != OpAMD64FlagGT_UGT {
4446 break
4447 }
4448 v.copyOf(y)
4449 return true
4450 }
4451
4452
4453 for {
4454 x := v_1
4455 if v_2.Op != OpAMD64FlagGT_ULT {
4456 break
4457 }
4458 v.copyOf(x)
4459 return true
4460 }
4461
4462
4463 for {
4464 x := v_1
4465 if v_2.Op != OpAMD64FlagLT_ULT {
4466 break
4467 }
4468 v.copyOf(x)
4469 return true
4470 }
4471
4472
4473 for {
4474 y := v_0
4475 if v_2.Op != OpAMD64FlagLT_UGT {
4476 break
4477 }
4478 v.copyOf(y)
4479 return true
4480 }
4481 return false
4482 }
4483 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4484 v_2 := v.Args[2]
4485 v_1 := v.Args[1]
4486 v_0 := v.Args[0]
4487
4488
4489 for {
4490 x := v_0
4491 y := v_1
4492 if v_2.Op != OpAMD64InvertFlags {
4493 break
4494 }
4495 cond := v_2.Args[0]
4496 v.reset(OpAMD64CMOVLGT)
4497 v.AddArg3(x, y, cond)
4498 return true
4499 }
4500
4501
4502 for {
4503 y := v_0
4504 if v_2.Op != OpAMD64FlagEQ {
4505 break
4506 }
4507 v.copyOf(y)
4508 return true
4509 }
4510
4511
4512 for {
4513 y := v_0
4514 if v_2.Op != OpAMD64FlagGT_UGT {
4515 break
4516 }
4517 v.copyOf(y)
4518 return true
4519 }
4520
4521
4522 for {
4523 y := v_0
4524 if v_2.Op != OpAMD64FlagGT_ULT {
4525 break
4526 }
4527 v.copyOf(y)
4528 return true
4529 }
4530
4531
4532 for {
4533 x := v_1
4534 if v_2.Op != OpAMD64FlagLT_ULT {
4535 break
4536 }
4537 v.copyOf(x)
4538 return true
4539 }
4540
4541
4542 for {
4543 x := v_1
4544 if v_2.Op != OpAMD64FlagLT_UGT {
4545 break
4546 }
4547 v.copyOf(x)
4548 return true
4549 }
4550 return false
4551 }
4552 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4553 v_2 := v.Args[2]
4554 v_1 := v.Args[1]
4555 v_0 := v.Args[0]
4556 b := v.Block
4557
4558
4559 for {
4560 x := v_0
4561 y := v_1
4562 if v_2.Op != OpAMD64InvertFlags {
4563 break
4564 }
4565 cond := v_2.Args[0]
4566 v.reset(OpAMD64CMOVLNE)
4567 v.AddArg3(x, y, cond)
4568 return true
4569 }
4570
4571
4572 for {
4573 y := v_0
4574 if v_2.Op != OpAMD64FlagEQ {
4575 break
4576 }
4577 v.copyOf(y)
4578 return true
4579 }
4580
4581
4582 for {
4583 x := v_1
4584 if v_2.Op != OpAMD64FlagGT_UGT {
4585 break
4586 }
4587 v.copyOf(x)
4588 return true
4589 }
4590
4591
4592 for {
4593 x := v_1
4594 if v_2.Op != OpAMD64FlagGT_ULT {
4595 break
4596 }
4597 v.copyOf(x)
4598 return true
4599 }
4600
4601
4602 for {
4603 x := v_1
4604 if v_2.Op != OpAMD64FlagLT_ULT {
4605 break
4606 }
4607 v.copyOf(x)
4608 return true
4609 }
4610
4611
4612 for {
4613 x := v_1
4614 if v_2.Op != OpAMD64FlagLT_UGT {
4615 break
4616 }
4617 v.copyOf(x)
4618 return true
4619 }
4620
4621
4622 for {
4623 x := v_0
4624 y := v_1
4625 if v_2.Op != OpAMD64TESTQ {
4626 break
4627 }
4628 _ = v_2.Args[1]
4629 v_2_0 := v_2.Args[0]
4630 v_2_1 := v_2.Args[1]
4631 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4632 s := v_2_0
4633 if s.Op != OpSelect0 {
4634 continue
4635 }
4636 blsr := s.Args[0]
4637 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4638 continue
4639 }
4640 v.reset(OpAMD64CMOVLNE)
4641 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4642 v0.AddArg(blsr)
4643 v.AddArg3(x, y, v0)
4644 return true
4645 }
4646 break
4647 }
4648
4649
4650 for {
4651 x := v_0
4652 y := v_1
4653 if v_2.Op != OpAMD64TESTL {
4654 break
4655 }
4656 _ = v_2.Args[1]
4657 v_2_0 := v_2.Args[0]
4658 v_2_1 := v_2.Args[1]
4659 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4660 s := v_2_0
4661 if s.Op != OpSelect0 {
4662 continue
4663 }
4664 blsr := s.Args[0]
4665 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4666 continue
4667 }
4668 v.reset(OpAMD64CMOVLNE)
4669 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4670 v0.AddArg(blsr)
4671 v.AddArg3(x, y, v0)
4672 return true
4673 }
4674 break
4675 }
4676 return false
4677 }
4678 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4679 v_2 := v.Args[2]
4680 v_1 := v.Args[1]
4681 v_0 := v.Args[0]
4682
4683
4684 for {
4685 x := v_0
4686 y := v_1
4687 if v_2.Op != OpAMD64InvertFlags {
4688 break
4689 }
4690 cond := v_2.Args[0]
4691 v.reset(OpAMD64CMOVQLS)
4692 v.AddArg3(x, y, cond)
4693 return true
4694 }
4695
4696
4697 for {
4698 x := v_1
4699 if v_2.Op != OpAMD64FlagEQ {
4700 break
4701 }
4702 v.copyOf(x)
4703 return true
4704 }
4705
4706
4707 for {
4708 x := v_1
4709 if v_2.Op != OpAMD64FlagGT_UGT {
4710 break
4711 }
4712 v.copyOf(x)
4713 return true
4714 }
4715
4716
4717 for {
4718 y := v_0
4719 if v_2.Op != OpAMD64FlagGT_ULT {
4720 break
4721 }
4722 v.copyOf(y)
4723 return true
4724 }
4725
4726
4727 for {
4728 y := v_0
4729 if v_2.Op != OpAMD64FlagLT_ULT {
4730 break
4731 }
4732 v.copyOf(y)
4733 return true
4734 }
4735
4736
4737 for {
4738 x := v_1
4739 if v_2.Op != OpAMD64FlagLT_UGT {
4740 break
4741 }
4742 v.copyOf(x)
4743 return true
4744 }
4745 return false
4746 }
4747 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4748 v_2 := v.Args[2]
4749 v_1 := v.Args[1]
4750 v_0 := v.Args[0]
4751
4752
4753 for {
4754 x := v_0
4755 y := v_1
4756 if v_2.Op != OpAMD64InvertFlags {
4757 break
4758 }
4759 cond := v_2.Args[0]
4760 v.reset(OpAMD64CMOVQHI)
4761 v.AddArg3(x, y, cond)
4762 return true
4763 }
4764
4765
4766 for {
4767 y := v_0
4768 if v_2.Op != OpAMD64FlagEQ {
4769 break
4770 }
4771 v.copyOf(y)
4772 return true
4773 }
4774
4775
4776 for {
4777 y := v_0
4778 if v_2.Op != OpAMD64FlagGT_UGT {
4779 break
4780 }
4781 v.copyOf(y)
4782 return true
4783 }
4784
4785
4786 for {
4787 x := v_1
4788 if v_2.Op != OpAMD64FlagGT_ULT {
4789 break
4790 }
4791 v.copyOf(x)
4792 return true
4793 }
4794
4795
4796 for {
4797 x := v_1
4798 if v_2.Op != OpAMD64FlagLT_ULT {
4799 break
4800 }
4801 v.copyOf(x)
4802 return true
4803 }
4804
4805
4806 for {
4807 y := v_0
4808 if v_2.Op != OpAMD64FlagLT_UGT {
4809 break
4810 }
4811 v.copyOf(y)
4812 return true
4813 }
4814 return false
4815 }
4816 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
4817 v_2 := v.Args[2]
4818 v_1 := v.Args[1]
4819 v_0 := v.Args[0]
4820 b := v.Block
4821
4822
4823 for {
4824 x := v_0
4825 y := v_1
4826 if v_2.Op != OpAMD64InvertFlags {
4827 break
4828 }
4829 cond := v_2.Args[0]
4830 v.reset(OpAMD64CMOVQEQ)
4831 v.AddArg3(x, y, cond)
4832 return true
4833 }
4834
4835
4836 for {
4837 x := v_1
4838 if v_2.Op != OpAMD64FlagEQ {
4839 break
4840 }
4841 v.copyOf(x)
4842 return true
4843 }
4844
4845
4846 for {
4847 y := v_0
4848 if v_2.Op != OpAMD64FlagGT_UGT {
4849 break
4850 }
4851 v.copyOf(y)
4852 return true
4853 }
4854
4855
4856 for {
4857 y := v_0
4858 if v_2.Op != OpAMD64FlagGT_ULT {
4859 break
4860 }
4861 v.copyOf(y)
4862 return true
4863 }
4864
4865
4866 for {
4867 y := v_0
4868 if v_2.Op != OpAMD64FlagLT_ULT {
4869 break
4870 }
4871 v.copyOf(y)
4872 return true
4873 }
4874
4875
4876 for {
4877 y := v_0
4878 if v_2.Op != OpAMD64FlagLT_UGT {
4879 break
4880 }
4881 v.copyOf(y)
4882 return true
4883 }
4884
4885
4886
4887 for {
4888 x := v_0
4889 if v_2.Op != OpSelect1 {
4890 break
4891 }
4892 v_2_0 := v_2.Args[0]
4893 if v_2_0.Op != OpAMD64BSFQ {
4894 break
4895 }
4896 v_2_0_0 := v_2_0.Args[0]
4897 if v_2_0_0.Op != OpAMD64ORQconst {
4898 break
4899 }
4900 c := auxIntToInt32(v_2_0_0.AuxInt)
4901 if !(c != 0) {
4902 break
4903 }
4904 v.copyOf(x)
4905 return true
4906 }
4907
4908
4909
4910 for {
4911 x := v_0
4912 if v_2.Op != OpSelect1 {
4913 break
4914 }
4915 v_2_0 := v_2.Args[0]
4916 if v_2_0.Op != OpAMD64BSRQ {
4917 break
4918 }
4919 v_2_0_0 := v_2_0.Args[0]
4920 if v_2_0_0.Op != OpAMD64ORQconst {
4921 break
4922 }
4923 c := auxIntToInt32(v_2_0_0.AuxInt)
4924 if !(c != 0) {
4925 break
4926 }
4927 v.copyOf(x)
4928 return true
4929 }
4930
4931
4932 for {
4933 x := v_0
4934 y := v_1
4935 if v_2.Op != OpAMD64TESTQ {
4936 break
4937 }
4938 _ = v_2.Args[1]
4939 v_2_0 := v_2.Args[0]
4940 v_2_1 := v_2.Args[1]
4941 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4942 s := v_2_0
4943 if s.Op != OpSelect0 {
4944 continue
4945 }
4946 blsr := s.Args[0]
4947 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4948 continue
4949 }
4950 v.reset(OpAMD64CMOVQEQ)
4951 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4952 v0.AddArg(blsr)
4953 v.AddArg3(x, y, v0)
4954 return true
4955 }
4956 break
4957 }
4958
4959
4960 for {
4961 x := v_0
4962 y := v_1
4963 if v_2.Op != OpAMD64TESTL {
4964 break
4965 }
4966 _ = v_2.Args[1]
4967 v_2_0 := v_2.Args[0]
4968 v_2_1 := v_2.Args[1]
4969 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4970 s := v_2_0
4971 if s.Op != OpSelect0 {
4972 continue
4973 }
4974 blsr := s.Args[0]
4975 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4976 continue
4977 }
4978 v.reset(OpAMD64CMOVQEQ)
4979 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4980 v0.AddArg(blsr)
4981 v.AddArg3(x, y, v0)
4982 return true
4983 }
4984 break
4985 }
4986 return false
4987 }
4988 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
4989 v_2 := v.Args[2]
4990 v_1 := v.Args[1]
4991 v_0 := v.Args[0]
4992
4993
4994 for {
4995 x := v_0
4996 y := v_1
4997 if v_2.Op != OpAMD64InvertFlags {
4998 break
4999 }
5000 cond := v_2.Args[0]
5001 v.reset(OpAMD64CMOVQLE)
5002 v.AddArg3(x, y, cond)
5003 return true
5004 }
5005
5006
5007 for {
5008 x := v_1
5009 if v_2.Op != OpAMD64FlagEQ {
5010 break
5011 }
5012 v.copyOf(x)
5013 return true
5014 }
5015
5016
5017 for {
5018 x := v_1
5019 if v_2.Op != OpAMD64FlagGT_UGT {
5020 break
5021 }
5022 v.copyOf(x)
5023 return true
5024 }
5025
5026
5027 for {
5028 x := v_1
5029 if v_2.Op != OpAMD64FlagGT_ULT {
5030 break
5031 }
5032 v.copyOf(x)
5033 return true
5034 }
5035
5036
5037 for {
5038 y := v_0
5039 if v_2.Op != OpAMD64FlagLT_ULT {
5040 break
5041 }
5042 v.copyOf(y)
5043 return true
5044 }
5045
5046
5047 for {
5048 y := v_0
5049 if v_2.Op != OpAMD64FlagLT_UGT {
5050 break
5051 }
5052 v.copyOf(y)
5053 return true
5054 }
5055 return false
5056 }
5057 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5058 v_2 := v.Args[2]
5059 v_1 := v.Args[1]
5060 v_0 := v.Args[0]
5061
5062
5063 for {
5064 x := v_0
5065 y := v_1
5066 if v_2.Op != OpAMD64InvertFlags {
5067 break
5068 }
5069 cond := v_2.Args[0]
5070 v.reset(OpAMD64CMOVQLT)
5071 v.AddArg3(x, y, cond)
5072 return true
5073 }
5074
5075
5076 for {
5077 y := v_0
5078 if v_2.Op != OpAMD64FlagEQ {
5079 break
5080 }
5081 v.copyOf(y)
5082 return true
5083 }
5084
5085
5086 for {
5087 x := v_1
5088 if v_2.Op != OpAMD64FlagGT_UGT {
5089 break
5090 }
5091 v.copyOf(x)
5092 return true
5093 }
5094
5095
5096 for {
5097 x := v_1
5098 if v_2.Op != OpAMD64FlagGT_ULT {
5099 break
5100 }
5101 v.copyOf(x)
5102 return true
5103 }
5104
5105
5106 for {
5107 y := v_0
5108 if v_2.Op != OpAMD64FlagLT_ULT {
5109 break
5110 }
5111 v.copyOf(y)
5112 return true
5113 }
5114
5115
5116 for {
5117 y := v_0
5118 if v_2.Op != OpAMD64FlagLT_UGT {
5119 break
5120 }
5121 v.copyOf(y)
5122 return true
5123 }
5124 return false
5125 }
5126 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5127 v_2 := v.Args[2]
5128 v_1 := v.Args[1]
5129 v_0 := v.Args[0]
5130
5131
5132 for {
5133 x := v_0
5134 y := v_1
5135 if v_2.Op != OpAMD64InvertFlags {
5136 break
5137 }
5138 cond := v_2.Args[0]
5139 v.reset(OpAMD64CMOVQCS)
5140 v.AddArg3(x, y, cond)
5141 return true
5142 }
5143
5144
5145 for {
5146 y := v_0
5147 if v_2.Op != OpAMD64FlagEQ {
5148 break
5149 }
5150 v.copyOf(y)
5151 return true
5152 }
5153
5154
5155 for {
5156 x := v_1
5157 if v_2.Op != OpAMD64FlagGT_UGT {
5158 break
5159 }
5160 v.copyOf(x)
5161 return true
5162 }
5163
5164
5165 for {
5166 y := v_0
5167 if v_2.Op != OpAMD64FlagGT_ULT {
5168 break
5169 }
5170 v.copyOf(y)
5171 return true
5172 }
5173
5174
5175 for {
5176 y := v_0
5177 if v_2.Op != OpAMD64FlagLT_ULT {
5178 break
5179 }
5180 v.copyOf(y)
5181 return true
5182 }
5183
5184
5185 for {
5186 x := v_1
5187 if v_2.Op != OpAMD64FlagLT_UGT {
5188 break
5189 }
5190 v.copyOf(x)
5191 return true
5192 }
5193 return false
5194 }
5195 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5196 v_2 := v.Args[2]
5197 v_1 := v.Args[1]
5198 v_0 := v.Args[0]
5199
5200
5201 for {
5202 x := v_0
5203 y := v_1
5204 if v_2.Op != OpAMD64InvertFlags {
5205 break
5206 }
5207 cond := v_2.Args[0]
5208 v.reset(OpAMD64CMOVQGE)
5209 v.AddArg3(x, y, cond)
5210 return true
5211 }
5212
5213
5214 for {
5215 x := v_1
5216 if v_2.Op != OpAMD64FlagEQ {
5217 break
5218 }
5219 v.copyOf(x)
5220 return true
5221 }
5222
5223
5224 for {
5225 y := v_0
5226 if v_2.Op != OpAMD64FlagGT_UGT {
5227 break
5228 }
5229 v.copyOf(y)
5230 return true
5231 }
5232
5233
5234 for {
5235 y := v_0
5236 if v_2.Op != OpAMD64FlagGT_ULT {
5237 break
5238 }
5239 v.copyOf(y)
5240 return true
5241 }
5242
5243
5244 for {
5245 x := v_1
5246 if v_2.Op != OpAMD64FlagLT_ULT {
5247 break
5248 }
5249 v.copyOf(x)
5250 return true
5251 }
5252
5253
5254 for {
5255 x := v_1
5256 if v_2.Op != OpAMD64FlagLT_UGT {
5257 break
5258 }
5259 v.copyOf(x)
5260 return true
5261 }
5262 return false
5263 }
5264 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5265 v_2 := v.Args[2]
5266 v_1 := v.Args[1]
5267 v_0 := v.Args[0]
5268
5269
5270 for {
5271 x := v_0
5272 y := v_1
5273 if v_2.Op != OpAMD64InvertFlags {
5274 break
5275 }
5276 cond := v_2.Args[0]
5277 v.reset(OpAMD64CMOVQCC)
5278 v.AddArg3(x, y, cond)
5279 return true
5280 }
5281
5282
5283 for {
5284 x := v_1
5285 if v_2.Op != OpAMD64FlagEQ {
5286 break
5287 }
5288 v.copyOf(x)
5289 return true
5290 }
5291
5292
5293 for {
5294 y := v_0
5295 if v_2.Op != OpAMD64FlagGT_UGT {
5296 break
5297 }
5298 v.copyOf(y)
5299 return true
5300 }
5301
5302
5303 for {
5304 x := v_1
5305 if v_2.Op != OpAMD64FlagGT_ULT {
5306 break
5307 }
5308 v.copyOf(x)
5309 return true
5310 }
5311
5312
5313 for {
5314 x := v_1
5315 if v_2.Op != OpAMD64FlagLT_ULT {
5316 break
5317 }
5318 v.copyOf(x)
5319 return true
5320 }
5321
5322
5323 for {
5324 y := v_0
5325 if v_2.Op != OpAMD64FlagLT_UGT {
5326 break
5327 }
5328 v.copyOf(y)
5329 return true
5330 }
5331 return false
5332 }
5333 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5334 v_2 := v.Args[2]
5335 v_1 := v.Args[1]
5336 v_0 := v.Args[0]
5337
5338
5339 for {
5340 x := v_0
5341 y := v_1
5342 if v_2.Op != OpAMD64InvertFlags {
5343 break
5344 }
5345 cond := v_2.Args[0]
5346 v.reset(OpAMD64CMOVQGT)
5347 v.AddArg3(x, y, cond)
5348 return true
5349 }
5350
5351
5352 for {
5353 y := v_0
5354 if v_2.Op != OpAMD64FlagEQ {
5355 break
5356 }
5357 v.copyOf(y)
5358 return true
5359 }
5360
5361
5362 for {
5363 y := v_0
5364 if v_2.Op != OpAMD64FlagGT_UGT {
5365 break
5366 }
5367 v.copyOf(y)
5368 return true
5369 }
5370
5371
5372 for {
5373 y := v_0
5374 if v_2.Op != OpAMD64FlagGT_ULT {
5375 break
5376 }
5377 v.copyOf(y)
5378 return true
5379 }
5380
5381
5382 for {
5383 x := v_1
5384 if v_2.Op != OpAMD64FlagLT_ULT {
5385 break
5386 }
5387 v.copyOf(x)
5388 return true
5389 }
5390
5391
5392 for {
5393 x := v_1
5394 if v_2.Op != OpAMD64FlagLT_UGT {
5395 break
5396 }
5397 v.copyOf(x)
5398 return true
5399 }
5400 return false
5401 }
5402 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5403 v_2 := v.Args[2]
5404 v_1 := v.Args[1]
5405 v_0 := v.Args[0]
5406 b := v.Block
5407
5408
5409 for {
5410 x := v_0
5411 y := v_1
5412 if v_2.Op != OpAMD64InvertFlags {
5413 break
5414 }
5415 cond := v_2.Args[0]
5416 v.reset(OpAMD64CMOVQNE)
5417 v.AddArg3(x, y, cond)
5418 return true
5419 }
5420
5421
5422 for {
5423 y := v_0
5424 if v_2.Op != OpAMD64FlagEQ {
5425 break
5426 }
5427 v.copyOf(y)
5428 return true
5429 }
5430
5431
5432 for {
5433 x := v_1
5434 if v_2.Op != OpAMD64FlagGT_UGT {
5435 break
5436 }
5437 v.copyOf(x)
5438 return true
5439 }
5440
5441
5442 for {
5443 x := v_1
5444 if v_2.Op != OpAMD64FlagGT_ULT {
5445 break
5446 }
5447 v.copyOf(x)
5448 return true
5449 }
5450
5451
5452 for {
5453 x := v_1
5454 if v_2.Op != OpAMD64FlagLT_ULT {
5455 break
5456 }
5457 v.copyOf(x)
5458 return true
5459 }
5460
5461
5462 for {
5463 x := v_1
5464 if v_2.Op != OpAMD64FlagLT_UGT {
5465 break
5466 }
5467 v.copyOf(x)
5468 return true
5469 }
5470
5471
5472 for {
5473 x := v_0
5474 y := v_1
5475 if v_2.Op != OpAMD64TESTQ {
5476 break
5477 }
5478 _ = v_2.Args[1]
5479 v_2_0 := v_2.Args[0]
5480 v_2_1 := v_2.Args[1]
5481 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5482 s := v_2_0
5483 if s.Op != OpSelect0 {
5484 continue
5485 }
5486 blsr := s.Args[0]
5487 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5488 continue
5489 }
5490 v.reset(OpAMD64CMOVQNE)
5491 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5492 v0.AddArg(blsr)
5493 v.AddArg3(x, y, v0)
5494 return true
5495 }
5496 break
5497 }
5498
5499
5500 for {
5501 x := v_0
5502 y := v_1
5503 if v_2.Op != OpAMD64TESTL {
5504 break
5505 }
5506 _ = v_2.Args[1]
5507 v_2_0 := v_2.Args[0]
5508 v_2_1 := v_2.Args[1]
5509 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5510 s := v_2_0
5511 if s.Op != OpSelect0 {
5512 continue
5513 }
5514 blsr := s.Args[0]
5515 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5516 continue
5517 }
5518 v.reset(OpAMD64CMOVQNE)
5519 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5520 v0.AddArg(blsr)
5521 v.AddArg3(x, y, v0)
5522 return true
5523 }
5524 break
5525 }
5526 return false
5527 }
5528 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5529 v_2 := v.Args[2]
5530 v_1 := v.Args[1]
5531 v_0 := v.Args[0]
5532
5533
5534 for {
5535 x := v_0
5536 y := v_1
5537 if v_2.Op != OpAMD64InvertFlags {
5538 break
5539 }
5540 cond := v_2.Args[0]
5541 v.reset(OpAMD64CMOVWLS)
5542 v.AddArg3(x, y, cond)
5543 return true
5544 }
5545
5546
5547 for {
5548 x := v_1
5549 if v_2.Op != OpAMD64FlagEQ {
5550 break
5551 }
5552 v.copyOf(x)
5553 return true
5554 }
5555
5556
5557 for {
5558 x := v_1
5559 if v_2.Op != OpAMD64FlagGT_UGT {
5560 break
5561 }
5562 v.copyOf(x)
5563 return true
5564 }
5565
5566
5567 for {
5568 y := v_0
5569 if v_2.Op != OpAMD64FlagGT_ULT {
5570 break
5571 }
5572 v.copyOf(y)
5573 return true
5574 }
5575
5576
5577 for {
5578 y := v_0
5579 if v_2.Op != OpAMD64FlagLT_ULT {
5580 break
5581 }
5582 v.copyOf(y)
5583 return true
5584 }
5585
5586
5587 for {
5588 x := v_1
5589 if v_2.Op != OpAMD64FlagLT_UGT {
5590 break
5591 }
5592 v.copyOf(x)
5593 return true
5594 }
5595 return false
5596 }
5597 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5598 v_2 := v.Args[2]
5599 v_1 := v.Args[1]
5600 v_0 := v.Args[0]
5601
5602
5603 for {
5604 x := v_0
5605 y := v_1
5606 if v_2.Op != OpAMD64InvertFlags {
5607 break
5608 }
5609 cond := v_2.Args[0]
5610 v.reset(OpAMD64CMOVWHI)
5611 v.AddArg3(x, y, cond)
5612 return true
5613 }
5614
5615
5616 for {
5617 y := v_0
5618 if v_2.Op != OpAMD64FlagEQ {
5619 break
5620 }
5621 v.copyOf(y)
5622 return true
5623 }
5624
5625
5626 for {
5627 y := v_0
5628 if v_2.Op != OpAMD64FlagGT_UGT {
5629 break
5630 }
5631 v.copyOf(y)
5632 return true
5633 }
5634
5635
5636 for {
5637 x := v_1
5638 if v_2.Op != OpAMD64FlagGT_ULT {
5639 break
5640 }
5641 v.copyOf(x)
5642 return true
5643 }
5644
5645
5646 for {
5647 x := v_1
5648 if v_2.Op != OpAMD64FlagLT_ULT {
5649 break
5650 }
5651 v.copyOf(x)
5652 return true
5653 }
5654
5655
5656 for {
5657 y := v_0
5658 if v_2.Op != OpAMD64FlagLT_UGT {
5659 break
5660 }
5661 v.copyOf(y)
5662 return true
5663 }
5664 return false
5665 }
5666 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5667 v_2 := v.Args[2]
5668 v_1 := v.Args[1]
5669 v_0 := v.Args[0]
5670
5671
5672 for {
5673 x := v_0
5674 y := v_1
5675 if v_2.Op != OpAMD64InvertFlags {
5676 break
5677 }
5678 cond := v_2.Args[0]
5679 v.reset(OpAMD64CMOVWEQ)
5680 v.AddArg3(x, y, cond)
5681 return true
5682 }
5683
5684
5685 for {
5686 x := v_1
5687 if v_2.Op != OpAMD64FlagEQ {
5688 break
5689 }
5690 v.copyOf(x)
5691 return true
5692 }
5693
5694
5695 for {
5696 y := v_0
5697 if v_2.Op != OpAMD64FlagGT_UGT {
5698 break
5699 }
5700 v.copyOf(y)
5701 return true
5702 }
5703
5704
5705 for {
5706 y := v_0
5707 if v_2.Op != OpAMD64FlagGT_ULT {
5708 break
5709 }
5710 v.copyOf(y)
5711 return true
5712 }
5713
5714
5715 for {
5716 y := v_0
5717 if v_2.Op != OpAMD64FlagLT_ULT {
5718 break
5719 }
5720 v.copyOf(y)
5721 return true
5722 }
5723
5724
5725 for {
5726 y := v_0
5727 if v_2.Op != OpAMD64FlagLT_UGT {
5728 break
5729 }
5730 v.copyOf(y)
5731 return true
5732 }
5733 return false
5734 }
5735 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5736 v_2 := v.Args[2]
5737 v_1 := v.Args[1]
5738 v_0 := v.Args[0]
5739
5740
5741 for {
5742 x := v_0
5743 y := v_1
5744 if v_2.Op != OpAMD64InvertFlags {
5745 break
5746 }
5747 cond := v_2.Args[0]
5748 v.reset(OpAMD64CMOVWLE)
5749 v.AddArg3(x, y, cond)
5750 return true
5751 }
5752
5753
5754 for {
5755 x := v_1
5756 if v_2.Op != OpAMD64FlagEQ {
5757 break
5758 }
5759 v.copyOf(x)
5760 return true
5761 }
5762
5763
5764 for {
5765 x := v_1
5766 if v_2.Op != OpAMD64FlagGT_UGT {
5767 break
5768 }
5769 v.copyOf(x)
5770 return true
5771 }
5772
5773
5774 for {
5775 x := v_1
5776 if v_2.Op != OpAMD64FlagGT_ULT {
5777 break
5778 }
5779 v.copyOf(x)
5780 return true
5781 }
5782
5783
5784 for {
5785 y := v_0
5786 if v_2.Op != OpAMD64FlagLT_ULT {
5787 break
5788 }
5789 v.copyOf(y)
5790 return true
5791 }
5792
5793
5794 for {
5795 y := v_0
5796 if v_2.Op != OpAMD64FlagLT_UGT {
5797 break
5798 }
5799 v.copyOf(y)
5800 return true
5801 }
5802 return false
5803 }
5804 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5805 v_2 := v.Args[2]
5806 v_1 := v.Args[1]
5807 v_0 := v.Args[0]
5808
5809
5810 for {
5811 x := v_0
5812 y := v_1
5813 if v_2.Op != OpAMD64InvertFlags {
5814 break
5815 }
5816 cond := v_2.Args[0]
5817 v.reset(OpAMD64CMOVWLT)
5818 v.AddArg3(x, y, cond)
5819 return true
5820 }
5821
5822
5823 for {
5824 y := v_0
5825 if v_2.Op != OpAMD64FlagEQ {
5826 break
5827 }
5828 v.copyOf(y)
5829 return true
5830 }
5831
5832
5833 for {
5834 x := v_1
5835 if v_2.Op != OpAMD64FlagGT_UGT {
5836 break
5837 }
5838 v.copyOf(x)
5839 return true
5840 }
5841
5842
5843 for {
5844 x := v_1
5845 if v_2.Op != OpAMD64FlagGT_ULT {
5846 break
5847 }
5848 v.copyOf(x)
5849 return true
5850 }
5851
5852
5853 for {
5854 y := v_0
5855 if v_2.Op != OpAMD64FlagLT_ULT {
5856 break
5857 }
5858 v.copyOf(y)
5859 return true
5860 }
5861
5862
5863 for {
5864 y := v_0
5865 if v_2.Op != OpAMD64FlagLT_UGT {
5866 break
5867 }
5868 v.copyOf(y)
5869 return true
5870 }
5871 return false
5872 }
5873 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
5874 v_2 := v.Args[2]
5875 v_1 := v.Args[1]
5876 v_0 := v.Args[0]
5877
5878
5879 for {
5880 x := v_0
5881 y := v_1
5882 if v_2.Op != OpAMD64InvertFlags {
5883 break
5884 }
5885 cond := v_2.Args[0]
5886 v.reset(OpAMD64CMOVWCS)
5887 v.AddArg3(x, y, cond)
5888 return true
5889 }
5890
5891
5892 for {
5893 y := v_0
5894 if v_2.Op != OpAMD64FlagEQ {
5895 break
5896 }
5897 v.copyOf(y)
5898 return true
5899 }
5900
5901
5902 for {
5903 x := v_1
5904 if v_2.Op != OpAMD64FlagGT_UGT {
5905 break
5906 }
5907 v.copyOf(x)
5908 return true
5909 }
5910
5911
5912 for {
5913 y := v_0
5914 if v_2.Op != OpAMD64FlagGT_ULT {
5915 break
5916 }
5917 v.copyOf(y)
5918 return true
5919 }
5920
5921
5922 for {
5923 y := v_0
5924 if v_2.Op != OpAMD64FlagLT_ULT {
5925 break
5926 }
5927 v.copyOf(y)
5928 return true
5929 }
5930
5931
5932 for {
5933 x := v_1
5934 if v_2.Op != OpAMD64FlagLT_UGT {
5935 break
5936 }
5937 v.copyOf(x)
5938 return true
5939 }
5940 return false
5941 }
5942 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
5943 v_2 := v.Args[2]
5944 v_1 := v.Args[1]
5945 v_0 := v.Args[0]
5946
5947
5948 for {
5949 x := v_0
5950 y := v_1
5951 if v_2.Op != OpAMD64InvertFlags {
5952 break
5953 }
5954 cond := v_2.Args[0]
5955 v.reset(OpAMD64CMOVWGE)
5956 v.AddArg3(x, y, cond)
5957 return true
5958 }
5959
5960
5961 for {
5962 x := v_1
5963 if v_2.Op != OpAMD64FlagEQ {
5964 break
5965 }
5966 v.copyOf(x)
5967 return true
5968 }
5969
5970
5971 for {
5972 y := v_0
5973 if v_2.Op != OpAMD64FlagGT_UGT {
5974 break
5975 }
5976 v.copyOf(y)
5977 return true
5978 }
5979
5980
5981 for {
5982 y := v_0
5983 if v_2.Op != OpAMD64FlagGT_ULT {
5984 break
5985 }
5986 v.copyOf(y)
5987 return true
5988 }
5989
5990
5991 for {
5992 x := v_1
5993 if v_2.Op != OpAMD64FlagLT_ULT {
5994 break
5995 }
5996 v.copyOf(x)
5997 return true
5998 }
5999
6000
6001 for {
6002 x := v_1
6003 if v_2.Op != OpAMD64FlagLT_UGT {
6004 break
6005 }
6006 v.copyOf(x)
6007 return true
6008 }
6009 return false
6010 }
6011 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6012 v_2 := v.Args[2]
6013 v_1 := v.Args[1]
6014 v_0 := v.Args[0]
6015
6016
6017 for {
6018 x := v_0
6019 y := v_1
6020 if v_2.Op != OpAMD64InvertFlags {
6021 break
6022 }
6023 cond := v_2.Args[0]
6024 v.reset(OpAMD64CMOVWCC)
6025 v.AddArg3(x, y, cond)
6026 return true
6027 }
6028
6029
6030 for {
6031 x := v_1
6032 if v_2.Op != OpAMD64FlagEQ {
6033 break
6034 }
6035 v.copyOf(x)
6036 return true
6037 }
6038
6039
6040 for {
6041 y := v_0
6042 if v_2.Op != OpAMD64FlagGT_UGT {
6043 break
6044 }
6045 v.copyOf(y)
6046 return true
6047 }
6048
6049
6050 for {
6051 x := v_1
6052 if v_2.Op != OpAMD64FlagGT_ULT {
6053 break
6054 }
6055 v.copyOf(x)
6056 return true
6057 }
6058
6059
6060 for {
6061 x := v_1
6062 if v_2.Op != OpAMD64FlagLT_ULT {
6063 break
6064 }
6065 v.copyOf(x)
6066 return true
6067 }
6068
6069
6070 for {
6071 y := v_0
6072 if v_2.Op != OpAMD64FlagLT_UGT {
6073 break
6074 }
6075 v.copyOf(y)
6076 return true
6077 }
6078 return false
6079 }
6080 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6081 v_2 := v.Args[2]
6082 v_1 := v.Args[1]
6083 v_0 := v.Args[0]
6084
6085
6086 for {
6087 x := v_0
6088 y := v_1
6089 if v_2.Op != OpAMD64InvertFlags {
6090 break
6091 }
6092 cond := v_2.Args[0]
6093 v.reset(OpAMD64CMOVWGT)
6094 v.AddArg3(x, y, cond)
6095 return true
6096 }
6097
6098
6099 for {
6100 y := v_0
6101 if v_2.Op != OpAMD64FlagEQ {
6102 break
6103 }
6104 v.copyOf(y)
6105 return true
6106 }
6107
6108
6109 for {
6110 y := v_0
6111 if v_2.Op != OpAMD64FlagGT_UGT {
6112 break
6113 }
6114 v.copyOf(y)
6115 return true
6116 }
6117
6118
6119 for {
6120 y := v_0
6121 if v_2.Op != OpAMD64FlagGT_ULT {
6122 break
6123 }
6124 v.copyOf(y)
6125 return true
6126 }
6127
6128
6129 for {
6130 x := v_1
6131 if v_2.Op != OpAMD64FlagLT_ULT {
6132 break
6133 }
6134 v.copyOf(x)
6135 return true
6136 }
6137
6138
6139 for {
6140 x := v_1
6141 if v_2.Op != OpAMD64FlagLT_UGT {
6142 break
6143 }
6144 v.copyOf(x)
6145 return true
6146 }
6147 return false
6148 }
6149 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6150 v_2 := v.Args[2]
6151 v_1 := v.Args[1]
6152 v_0 := v.Args[0]
6153
6154
6155 for {
6156 x := v_0
6157 y := v_1
6158 if v_2.Op != OpAMD64InvertFlags {
6159 break
6160 }
6161 cond := v_2.Args[0]
6162 v.reset(OpAMD64CMOVWNE)
6163 v.AddArg3(x, y, cond)
6164 return true
6165 }
6166
6167
6168 for {
6169 y := v_0
6170 if v_2.Op != OpAMD64FlagEQ {
6171 break
6172 }
6173 v.copyOf(y)
6174 return true
6175 }
6176
6177
6178 for {
6179 x := v_1
6180 if v_2.Op != OpAMD64FlagGT_UGT {
6181 break
6182 }
6183 v.copyOf(x)
6184 return true
6185 }
6186
6187
6188 for {
6189 x := v_1
6190 if v_2.Op != OpAMD64FlagGT_ULT {
6191 break
6192 }
6193 v.copyOf(x)
6194 return true
6195 }
6196
6197
6198 for {
6199 x := v_1
6200 if v_2.Op != OpAMD64FlagLT_ULT {
6201 break
6202 }
6203 v.copyOf(x)
6204 return true
6205 }
6206
6207
6208 for {
6209 x := v_1
6210 if v_2.Op != OpAMD64FlagLT_UGT {
6211 break
6212 }
6213 v.copyOf(x)
6214 return true
6215 }
6216 return false
6217 }
6218 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6219 v_1 := v.Args[1]
6220 v_0 := v.Args[0]
6221 b := v.Block
6222
6223
6224 for {
6225 x := v_0
6226 if v_1.Op != OpAMD64MOVLconst {
6227 break
6228 }
6229 c := auxIntToInt32(v_1.AuxInt)
6230 v.reset(OpAMD64CMPBconst)
6231 v.AuxInt = int8ToAuxInt(int8(c))
6232 v.AddArg(x)
6233 return true
6234 }
6235
6236
6237 for {
6238 if v_0.Op != OpAMD64MOVLconst {
6239 break
6240 }
6241 c := auxIntToInt32(v_0.AuxInt)
6242 x := v_1
6243 v.reset(OpAMD64InvertFlags)
6244 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6245 v0.AuxInt = int8ToAuxInt(int8(c))
6246 v0.AddArg(x)
6247 v.AddArg(v0)
6248 return true
6249 }
6250
6251
6252
6253 for {
6254 x := v_0
6255 y := v_1
6256 if !(canonLessThan(x, y)) {
6257 break
6258 }
6259 v.reset(OpAMD64InvertFlags)
6260 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6261 v0.AddArg2(y, x)
6262 v.AddArg(v0)
6263 return true
6264 }
6265
6266
6267
6268 for {
6269 l := v_0
6270 if l.Op != OpAMD64MOVBload {
6271 break
6272 }
6273 off := auxIntToInt32(l.AuxInt)
6274 sym := auxToSym(l.Aux)
6275 mem := l.Args[1]
6276 ptr := l.Args[0]
6277 x := v_1
6278 if !(canMergeLoad(v, l) && clobber(l)) {
6279 break
6280 }
6281 v.reset(OpAMD64CMPBload)
6282 v.AuxInt = int32ToAuxInt(off)
6283 v.Aux = symToAux(sym)
6284 v.AddArg3(ptr, x, mem)
6285 return true
6286 }
6287
6288
6289
6290 for {
6291 x := v_0
6292 l := v_1
6293 if l.Op != OpAMD64MOVBload {
6294 break
6295 }
6296 off := auxIntToInt32(l.AuxInt)
6297 sym := auxToSym(l.Aux)
6298 mem := l.Args[1]
6299 ptr := l.Args[0]
6300 if !(canMergeLoad(v, l) && clobber(l)) {
6301 break
6302 }
6303 v.reset(OpAMD64InvertFlags)
6304 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6305 v0.AuxInt = int32ToAuxInt(off)
6306 v0.Aux = symToAux(sym)
6307 v0.AddArg3(ptr, x, mem)
6308 v.AddArg(v0)
6309 return true
6310 }
6311 return false
6312 }
6313 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6314 v_0 := v.Args[0]
6315 b := v.Block
6316
6317
6318
6319 for {
6320 y := auxIntToInt8(v.AuxInt)
6321 if v_0.Op != OpAMD64MOVLconst {
6322 break
6323 }
6324 x := auxIntToInt32(v_0.AuxInt)
6325 if !(int8(x) == y) {
6326 break
6327 }
6328 v.reset(OpAMD64FlagEQ)
6329 return true
6330 }
6331
6332
6333
6334 for {
6335 y := auxIntToInt8(v.AuxInt)
6336 if v_0.Op != OpAMD64MOVLconst {
6337 break
6338 }
6339 x := auxIntToInt32(v_0.AuxInt)
6340 if !(int8(x) < y && uint8(x) < uint8(y)) {
6341 break
6342 }
6343 v.reset(OpAMD64FlagLT_ULT)
6344 return true
6345 }
6346
6347
6348
6349 for {
6350 y := auxIntToInt8(v.AuxInt)
6351 if v_0.Op != OpAMD64MOVLconst {
6352 break
6353 }
6354 x := auxIntToInt32(v_0.AuxInt)
6355 if !(int8(x) < y && uint8(x) > uint8(y)) {
6356 break
6357 }
6358 v.reset(OpAMD64FlagLT_UGT)
6359 return true
6360 }
6361
6362
6363
6364 for {
6365 y := auxIntToInt8(v.AuxInt)
6366 if v_0.Op != OpAMD64MOVLconst {
6367 break
6368 }
6369 x := auxIntToInt32(v_0.AuxInt)
6370 if !(int8(x) > y && uint8(x) < uint8(y)) {
6371 break
6372 }
6373 v.reset(OpAMD64FlagGT_ULT)
6374 return true
6375 }
6376
6377
6378
6379 for {
6380 y := auxIntToInt8(v.AuxInt)
6381 if v_0.Op != OpAMD64MOVLconst {
6382 break
6383 }
6384 x := auxIntToInt32(v_0.AuxInt)
6385 if !(int8(x) > y && uint8(x) > uint8(y)) {
6386 break
6387 }
6388 v.reset(OpAMD64FlagGT_UGT)
6389 return true
6390 }
6391
6392
6393
6394 for {
6395 n := auxIntToInt8(v.AuxInt)
6396 if v_0.Op != OpAMD64ANDLconst {
6397 break
6398 }
6399 m := auxIntToInt32(v_0.AuxInt)
6400 if !(0 <= int8(m) && int8(m) < n) {
6401 break
6402 }
6403 v.reset(OpAMD64FlagLT_ULT)
6404 return true
6405 }
6406
6407
6408
6409 for {
6410 if auxIntToInt8(v.AuxInt) != 0 {
6411 break
6412 }
6413 a := v_0
6414 if a.Op != OpAMD64ANDL {
6415 break
6416 }
6417 y := a.Args[1]
6418 x := a.Args[0]
6419 if !(a.Uses == 1) {
6420 break
6421 }
6422 v.reset(OpAMD64TESTB)
6423 v.AddArg2(x, y)
6424 return true
6425 }
6426
6427
6428
6429 for {
6430 if auxIntToInt8(v.AuxInt) != 0 {
6431 break
6432 }
6433 a := v_0
6434 if a.Op != OpAMD64ANDLconst {
6435 break
6436 }
6437 c := auxIntToInt32(a.AuxInt)
6438 x := a.Args[0]
6439 if !(a.Uses == 1) {
6440 break
6441 }
6442 v.reset(OpAMD64TESTBconst)
6443 v.AuxInt = int8ToAuxInt(int8(c))
6444 v.AddArg(x)
6445 return true
6446 }
6447
6448
6449 for {
6450 if auxIntToInt8(v.AuxInt) != 0 {
6451 break
6452 }
6453 x := v_0
6454 v.reset(OpAMD64TESTB)
6455 v.AddArg2(x, x)
6456 return true
6457 }
6458
6459
6460
6461 for {
6462 c := auxIntToInt8(v.AuxInt)
6463 l := v_0
6464 if l.Op != OpAMD64MOVBload {
6465 break
6466 }
6467 off := auxIntToInt32(l.AuxInt)
6468 sym := auxToSym(l.Aux)
6469 mem := l.Args[1]
6470 ptr := l.Args[0]
6471 if !(l.Uses == 1 && clobber(l)) {
6472 break
6473 }
6474 b = l.Block
6475 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6476 v.copyOf(v0)
6477 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6478 v0.Aux = symToAux(sym)
6479 v0.AddArg2(ptr, mem)
6480 return true
6481 }
6482 return false
6483 }
6484 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6485 v_1 := v.Args[1]
6486 v_0 := v.Args[0]
6487
6488
6489
6490 for {
6491 valoff1 := auxIntToValAndOff(v.AuxInt)
6492 sym := auxToSym(v.Aux)
6493 if v_0.Op != OpAMD64ADDQconst {
6494 break
6495 }
6496 off2 := auxIntToInt32(v_0.AuxInt)
6497 base := v_0.Args[0]
6498 mem := v_1
6499 if !(ValAndOff(valoff1).canAdd32(off2)) {
6500 break
6501 }
6502 v.reset(OpAMD64CMPBconstload)
6503 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6504 v.Aux = symToAux(sym)
6505 v.AddArg2(base, mem)
6506 return true
6507 }
6508
6509
6510
6511 for {
6512 valoff1 := auxIntToValAndOff(v.AuxInt)
6513 sym1 := auxToSym(v.Aux)
6514 if v_0.Op != OpAMD64LEAQ {
6515 break
6516 }
6517 off2 := auxIntToInt32(v_0.AuxInt)
6518 sym2 := auxToSym(v_0.Aux)
6519 base := v_0.Args[0]
6520 mem := v_1
6521 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6522 break
6523 }
6524 v.reset(OpAMD64CMPBconstload)
6525 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6526 v.Aux = symToAux(mergeSym(sym1, sym2))
6527 v.AddArg2(base, mem)
6528 return true
6529 }
6530 return false
6531 }
6532 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6533 v_2 := v.Args[2]
6534 v_1 := v.Args[1]
6535 v_0 := v.Args[0]
6536
6537
6538
6539 for {
6540 off1 := auxIntToInt32(v.AuxInt)
6541 sym := auxToSym(v.Aux)
6542 if v_0.Op != OpAMD64ADDQconst {
6543 break
6544 }
6545 off2 := auxIntToInt32(v_0.AuxInt)
6546 base := v_0.Args[0]
6547 val := v_1
6548 mem := v_2
6549 if !(is32Bit(int64(off1) + int64(off2))) {
6550 break
6551 }
6552 v.reset(OpAMD64CMPBload)
6553 v.AuxInt = int32ToAuxInt(off1 + off2)
6554 v.Aux = symToAux(sym)
6555 v.AddArg3(base, val, mem)
6556 return true
6557 }
6558
6559
6560
6561 for {
6562 off1 := auxIntToInt32(v.AuxInt)
6563 sym1 := auxToSym(v.Aux)
6564 if v_0.Op != OpAMD64LEAQ {
6565 break
6566 }
6567 off2 := auxIntToInt32(v_0.AuxInt)
6568 sym2 := auxToSym(v_0.Aux)
6569 base := v_0.Args[0]
6570 val := v_1
6571 mem := v_2
6572 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6573 break
6574 }
6575 v.reset(OpAMD64CMPBload)
6576 v.AuxInt = int32ToAuxInt(off1 + off2)
6577 v.Aux = symToAux(mergeSym(sym1, sym2))
6578 v.AddArg3(base, val, mem)
6579 return true
6580 }
6581
6582
6583 for {
6584 off := auxIntToInt32(v.AuxInt)
6585 sym := auxToSym(v.Aux)
6586 ptr := v_0
6587 if v_1.Op != OpAMD64MOVLconst {
6588 break
6589 }
6590 c := auxIntToInt32(v_1.AuxInt)
6591 mem := v_2
6592 v.reset(OpAMD64CMPBconstload)
6593 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6594 v.Aux = symToAux(sym)
6595 v.AddArg2(ptr, mem)
6596 return true
6597 }
6598 return false
6599 }
6600 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6601 v_1 := v.Args[1]
6602 v_0 := v.Args[0]
6603 b := v.Block
6604
6605
6606 for {
6607 x := v_0
6608 if v_1.Op != OpAMD64MOVLconst {
6609 break
6610 }
6611 c := auxIntToInt32(v_1.AuxInt)
6612 v.reset(OpAMD64CMPLconst)
6613 v.AuxInt = int32ToAuxInt(c)
6614 v.AddArg(x)
6615 return true
6616 }
6617
6618
6619 for {
6620 if v_0.Op != OpAMD64MOVLconst {
6621 break
6622 }
6623 c := auxIntToInt32(v_0.AuxInt)
6624 x := v_1
6625 v.reset(OpAMD64InvertFlags)
6626 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6627 v0.AuxInt = int32ToAuxInt(c)
6628 v0.AddArg(x)
6629 v.AddArg(v0)
6630 return true
6631 }
6632
6633
6634
6635 for {
6636 x := v_0
6637 y := v_1
6638 if !(canonLessThan(x, y)) {
6639 break
6640 }
6641 v.reset(OpAMD64InvertFlags)
6642 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6643 v0.AddArg2(y, x)
6644 v.AddArg(v0)
6645 return true
6646 }
6647
6648
6649
6650 for {
6651 l := v_0
6652 if l.Op != OpAMD64MOVLload {
6653 break
6654 }
6655 off := auxIntToInt32(l.AuxInt)
6656 sym := auxToSym(l.Aux)
6657 mem := l.Args[1]
6658 ptr := l.Args[0]
6659 x := v_1
6660 if !(canMergeLoad(v, l) && clobber(l)) {
6661 break
6662 }
6663 v.reset(OpAMD64CMPLload)
6664 v.AuxInt = int32ToAuxInt(off)
6665 v.Aux = symToAux(sym)
6666 v.AddArg3(ptr, x, mem)
6667 return true
6668 }
6669
6670
6671
6672 for {
6673 x := v_0
6674 l := v_1
6675 if l.Op != OpAMD64MOVLload {
6676 break
6677 }
6678 off := auxIntToInt32(l.AuxInt)
6679 sym := auxToSym(l.Aux)
6680 mem := l.Args[1]
6681 ptr := l.Args[0]
6682 if !(canMergeLoad(v, l) && clobber(l)) {
6683 break
6684 }
6685 v.reset(OpAMD64InvertFlags)
6686 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6687 v0.AuxInt = int32ToAuxInt(off)
6688 v0.Aux = symToAux(sym)
6689 v0.AddArg3(ptr, x, mem)
6690 v.AddArg(v0)
6691 return true
6692 }
6693 return false
6694 }
6695 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6696 v_0 := v.Args[0]
6697 b := v.Block
6698
6699
6700
6701 for {
6702 y := auxIntToInt32(v.AuxInt)
6703 if v_0.Op != OpAMD64MOVLconst {
6704 break
6705 }
6706 x := auxIntToInt32(v_0.AuxInt)
6707 if !(x == y) {
6708 break
6709 }
6710 v.reset(OpAMD64FlagEQ)
6711 return true
6712 }
6713
6714
6715
6716 for {
6717 y := auxIntToInt32(v.AuxInt)
6718 if v_0.Op != OpAMD64MOVLconst {
6719 break
6720 }
6721 x := auxIntToInt32(v_0.AuxInt)
6722 if !(x < y && uint32(x) < uint32(y)) {
6723 break
6724 }
6725 v.reset(OpAMD64FlagLT_ULT)
6726 return true
6727 }
6728
6729
6730
6731 for {
6732 y := auxIntToInt32(v.AuxInt)
6733 if v_0.Op != OpAMD64MOVLconst {
6734 break
6735 }
6736 x := auxIntToInt32(v_0.AuxInt)
6737 if !(x < y && uint32(x) > uint32(y)) {
6738 break
6739 }
6740 v.reset(OpAMD64FlagLT_UGT)
6741 return true
6742 }
6743
6744
6745
6746 for {
6747 y := auxIntToInt32(v.AuxInt)
6748 if v_0.Op != OpAMD64MOVLconst {
6749 break
6750 }
6751 x := auxIntToInt32(v_0.AuxInt)
6752 if !(x > y && uint32(x) < uint32(y)) {
6753 break
6754 }
6755 v.reset(OpAMD64FlagGT_ULT)
6756 return true
6757 }
6758
6759
6760
6761 for {
6762 y := auxIntToInt32(v.AuxInt)
6763 if v_0.Op != OpAMD64MOVLconst {
6764 break
6765 }
6766 x := auxIntToInt32(v_0.AuxInt)
6767 if !(x > y && uint32(x) > uint32(y)) {
6768 break
6769 }
6770 v.reset(OpAMD64FlagGT_UGT)
6771 return true
6772 }
6773
6774
6775
6776 for {
6777 n := auxIntToInt32(v.AuxInt)
6778 if v_0.Op != OpAMD64SHRLconst {
6779 break
6780 }
6781 c := auxIntToInt8(v_0.AuxInt)
6782 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6783 break
6784 }
6785 v.reset(OpAMD64FlagLT_ULT)
6786 return true
6787 }
6788
6789
6790
6791 for {
6792 n := auxIntToInt32(v.AuxInt)
6793 if v_0.Op != OpAMD64ANDLconst {
6794 break
6795 }
6796 m := auxIntToInt32(v_0.AuxInt)
6797 if !(0 <= m && m < n) {
6798 break
6799 }
6800 v.reset(OpAMD64FlagLT_ULT)
6801 return true
6802 }
6803
6804
6805
6806 for {
6807 if auxIntToInt32(v.AuxInt) != 0 {
6808 break
6809 }
6810 a := v_0
6811 if a.Op != OpAMD64ANDL {
6812 break
6813 }
6814 y := a.Args[1]
6815 x := a.Args[0]
6816 if !(a.Uses == 1) {
6817 break
6818 }
6819 v.reset(OpAMD64TESTL)
6820 v.AddArg2(x, y)
6821 return true
6822 }
6823
6824
6825
6826 for {
6827 if auxIntToInt32(v.AuxInt) != 0 {
6828 break
6829 }
6830 a := v_0
6831 if a.Op != OpAMD64ANDLconst {
6832 break
6833 }
6834 c := auxIntToInt32(a.AuxInt)
6835 x := a.Args[0]
6836 if !(a.Uses == 1) {
6837 break
6838 }
6839 v.reset(OpAMD64TESTLconst)
6840 v.AuxInt = int32ToAuxInt(c)
6841 v.AddArg(x)
6842 return true
6843 }
6844
6845
6846 for {
6847 if auxIntToInt32(v.AuxInt) != 0 {
6848 break
6849 }
6850 x := v_0
6851 v.reset(OpAMD64TESTL)
6852 v.AddArg2(x, x)
6853 return true
6854 }
6855
6856
6857
6858 for {
6859 c := auxIntToInt32(v.AuxInt)
6860 l := v_0
6861 if l.Op != OpAMD64MOVLload {
6862 break
6863 }
6864 off := auxIntToInt32(l.AuxInt)
6865 sym := auxToSym(l.Aux)
6866 mem := l.Args[1]
6867 ptr := l.Args[0]
6868 if !(l.Uses == 1 && clobber(l)) {
6869 break
6870 }
6871 b = l.Block
6872 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
6873 v.copyOf(v0)
6874 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6875 v0.Aux = symToAux(sym)
6876 v0.AddArg2(ptr, mem)
6877 return true
6878 }
6879 return false
6880 }
6881 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
6882 v_1 := v.Args[1]
6883 v_0 := v.Args[0]
6884
6885
6886
6887 for {
6888 valoff1 := auxIntToValAndOff(v.AuxInt)
6889 sym := auxToSym(v.Aux)
6890 if v_0.Op != OpAMD64ADDQconst {
6891 break
6892 }
6893 off2 := auxIntToInt32(v_0.AuxInt)
6894 base := v_0.Args[0]
6895 mem := v_1
6896 if !(ValAndOff(valoff1).canAdd32(off2)) {
6897 break
6898 }
6899 v.reset(OpAMD64CMPLconstload)
6900 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6901 v.Aux = symToAux(sym)
6902 v.AddArg2(base, mem)
6903 return true
6904 }
6905
6906
6907
6908 for {
6909 valoff1 := auxIntToValAndOff(v.AuxInt)
6910 sym1 := auxToSym(v.Aux)
6911 if v_0.Op != OpAMD64LEAQ {
6912 break
6913 }
6914 off2 := auxIntToInt32(v_0.AuxInt)
6915 sym2 := auxToSym(v_0.Aux)
6916 base := v_0.Args[0]
6917 mem := v_1
6918 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6919 break
6920 }
6921 v.reset(OpAMD64CMPLconstload)
6922 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6923 v.Aux = symToAux(mergeSym(sym1, sym2))
6924 v.AddArg2(base, mem)
6925 return true
6926 }
6927 return false
6928 }
6929 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
6930 v_2 := v.Args[2]
6931 v_1 := v.Args[1]
6932 v_0 := v.Args[0]
6933
6934
6935
6936 for {
6937 off1 := auxIntToInt32(v.AuxInt)
6938 sym := auxToSym(v.Aux)
6939 if v_0.Op != OpAMD64ADDQconst {
6940 break
6941 }
6942 off2 := auxIntToInt32(v_0.AuxInt)
6943 base := v_0.Args[0]
6944 val := v_1
6945 mem := v_2
6946 if !(is32Bit(int64(off1) + int64(off2))) {
6947 break
6948 }
6949 v.reset(OpAMD64CMPLload)
6950 v.AuxInt = int32ToAuxInt(off1 + off2)
6951 v.Aux = symToAux(sym)
6952 v.AddArg3(base, val, mem)
6953 return true
6954 }
6955
6956
6957
6958 for {
6959 off1 := auxIntToInt32(v.AuxInt)
6960 sym1 := auxToSym(v.Aux)
6961 if v_0.Op != OpAMD64LEAQ {
6962 break
6963 }
6964 off2 := auxIntToInt32(v_0.AuxInt)
6965 sym2 := auxToSym(v_0.Aux)
6966 base := v_0.Args[0]
6967 val := v_1
6968 mem := v_2
6969 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6970 break
6971 }
6972 v.reset(OpAMD64CMPLload)
6973 v.AuxInt = int32ToAuxInt(off1 + off2)
6974 v.Aux = symToAux(mergeSym(sym1, sym2))
6975 v.AddArg3(base, val, mem)
6976 return true
6977 }
6978
6979
6980 for {
6981 off := auxIntToInt32(v.AuxInt)
6982 sym := auxToSym(v.Aux)
6983 ptr := v_0
6984 if v_1.Op != OpAMD64MOVLconst {
6985 break
6986 }
6987 c := auxIntToInt32(v_1.AuxInt)
6988 mem := v_2
6989 v.reset(OpAMD64CMPLconstload)
6990 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6991 v.Aux = symToAux(sym)
6992 v.AddArg2(ptr, mem)
6993 return true
6994 }
6995 return false
6996 }
6997 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
6998 v_1 := v.Args[1]
6999 v_0 := v.Args[0]
7000 b := v.Block
7001
7002
7003
7004 for {
7005 x := v_0
7006 if v_1.Op != OpAMD64MOVQconst {
7007 break
7008 }
7009 c := auxIntToInt64(v_1.AuxInt)
7010 if !(is32Bit(c)) {
7011 break
7012 }
7013 v.reset(OpAMD64CMPQconst)
7014 v.AuxInt = int32ToAuxInt(int32(c))
7015 v.AddArg(x)
7016 return true
7017 }
7018
7019
7020
7021 for {
7022 if v_0.Op != OpAMD64MOVQconst {
7023 break
7024 }
7025 c := auxIntToInt64(v_0.AuxInt)
7026 x := v_1
7027 if !(is32Bit(c)) {
7028 break
7029 }
7030 v.reset(OpAMD64InvertFlags)
7031 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7032 v0.AuxInt = int32ToAuxInt(int32(c))
7033 v0.AddArg(x)
7034 v.AddArg(v0)
7035 return true
7036 }
7037
7038
7039
7040 for {
7041 x := v_0
7042 y := v_1
7043 if !(canonLessThan(x, y)) {
7044 break
7045 }
7046 v.reset(OpAMD64InvertFlags)
7047 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7048 v0.AddArg2(y, x)
7049 v.AddArg(v0)
7050 return true
7051 }
7052
7053
7054
7055 for {
7056 if v_0.Op != OpAMD64MOVQconst {
7057 break
7058 }
7059 x := auxIntToInt64(v_0.AuxInt)
7060 if v_1.Op != OpAMD64MOVQconst {
7061 break
7062 }
7063 y := auxIntToInt64(v_1.AuxInt)
7064 if !(x == y) {
7065 break
7066 }
7067 v.reset(OpAMD64FlagEQ)
7068 return true
7069 }
7070
7071
7072
7073 for {
7074 if v_0.Op != OpAMD64MOVQconst {
7075 break
7076 }
7077 x := auxIntToInt64(v_0.AuxInt)
7078 if v_1.Op != OpAMD64MOVQconst {
7079 break
7080 }
7081 y := auxIntToInt64(v_1.AuxInt)
7082 if !(x < y && uint64(x) < uint64(y)) {
7083 break
7084 }
7085 v.reset(OpAMD64FlagLT_ULT)
7086 return true
7087 }
7088
7089
7090
7091 for {
7092 if v_0.Op != OpAMD64MOVQconst {
7093 break
7094 }
7095 x := auxIntToInt64(v_0.AuxInt)
7096 if v_1.Op != OpAMD64MOVQconst {
7097 break
7098 }
7099 y := auxIntToInt64(v_1.AuxInt)
7100 if !(x < y && uint64(x) > uint64(y)) {
7101 break
7102 }
7103 v.reset(OpAMD64FlagLT_UGT)
7104 return true
7105 }
7106
7107
7108
7109 for {
7110 if v_0.Op != OpAMD64MOVQconst {
7111 break
7112 }
7113 x := auxIntToInt64(v_0.AuxInt)
7114 if v_1.Op != OpAMD64MOVQconst {
7115 break
7116 }
7117 y := auxIntToInt64(v_1.AuxInt)
7118 if !(x > y && uint64(x) < uint64(y)) {
7119 break
7120 }
7121 v.reset(OpAMD64FlagGT_ULT)
7122 return true
7123 }
7124
7125
7126
7127 for {
7128 if v_0.Op != OpAMD64MOVQconst {
7129 break
7130 }
7131 x := auxIntToInt64(v_0.AuxInt)
7132 if v_1.Op != OpAMD64MOVQconst {
7133 break
7134 }
7135 y := auxIntToInt64(v_1.AuxInt)
7136 if !(x > y && uint64(x) > uint64(y)) {
7137 break
7138 }
7139 v.reset(OpAMD64FlagGT_UGT)
7140 return true
7141 }
7142
7143
7144
7145 for {
7146 l := v_0
7147 if l.Op != OpAMD64MOVQload {
7148 break
7149 }
7150 off := auxIntToInt32(l.AuxInt)
7151 sym := auxToSym(l.Aux)
7152 mem := l.Args[1]
7153 ptr := l.Args[0]
7154 x := v_1
7155 if !(canMergeLoad(v, l) && clobber(l)) {
7156 break
7157 }
7158 v.reset(OpAMD64CMPQload)
7159 v.AuxInt = int32ToAuxInt(off)
7160 v.Aux = symToAux(sym)
7161 v.AddArg3(ptr, x, mem)
7162 return true
7163 }
7164
7165
7166
7167 for {
7168 x := v_0
7169 l := v_1
7170 if l.Op != OpAMD64MOVQload {
7171 break
7172 }
7173 off := auxIntToInt32(l.AuxInt)
7174 sym := auxToSym(l.Aux)
7175 mem := l.Args[1]
7176 ptr := l.Args[0]
7177 if !(canMergeLoad(v, l) && clobber(l)) {
7178 break
7179 }
7180 v.reset(OpAMD64InvertFlags)
7181 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7182 v0.AuxInt = int32ToAuxInt(off)
7183 v0.Aux = symToAux(sym)
7184 v0.AddArg3(ptr, x, mem)
7185 v.AddArg(v0)
7186 return true
7187 }
7188 return false
7189 }
7190 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7191 v_0 := v.Args[0]
7192 b := v.Block
7193
7194
7195
7196 for {
7197 y := auxIntToInt32(v.AuxInt)
7198 if v_0.Op != OpAMD64MOVQconst {
7199 break
7200 }
7201 x := auxIntToInt64(v_0.AuxInt)
7202 if !(x == int64(y)) {
7203 break
7204 }
7205 v.reset(OpAMD64FlagEQ)
7206 return true
7207 }
7208
7209
7210
7211 for {
7212 y := auxIntToInt32(v.AuxInt)
7213 if v_0.Op != OpAMD64MOVQconst {
7214 break
7215 }
7216 x := auxIntToInt64(v_0.AuxInt)
7217 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7218 break
7219 }
7220 v.reset(OpAMD64FlagLT_ULT)
7221 return true
7222 }
7223
7224
7225
7226 for {
7227 y := auxIntToInt32(v.AuxInt)
7228 if v_0.Op != OpAMD64MOVQconst {
7229 break
7230 }
7231 x := auxIntToInt64(v_0.AuxInt)
7232 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7233 break
7234 }
7235 v.reset(OpAMD64FlagLT_UGT)
7236 return true
7237 }
7238
7239
7240
7241 for {
7242 y := auxIntToInt32(v.AuxInt)
7243 if v_0.Op != OpAMD64MOVQconst {
7244 break
7245 }
7246 x := auxIntToInt64(v_0.AuxInt)
7247 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7248 break
7249 }
7250 v.reset(OpAMD64FlagGT_ULT)
7251 return true
7252 }
7253
7254
7255
7256 for {
7257 y := auxIntToInt32(v.AuxInt)
7258 if v_0.Op != OpAMD64MOVQconst {
7259 break
7260 }
7261 x := auxIntToInt64(v_0.AuxInt)
7262 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7263 break
7264 }
7265 v.reset(OpAMD64FlagGT_UGT)
7266 return true
7267 }
7268
7269
7270
7271 for {
7272 c := auxIntToInt32(v.AuxInt)
7273 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7274 break
7275 }
7276 v.reset(OpAMD64FlagLT_ULT)
7277 return true
7278 }
7279
7280
7281
7282 for {
7283 c := auxIntToInt32(v.AuxInt)
7284 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7285 break
7286 }
7287 v.reset(OpAMD64FlagLT_ULT)
7288 return true
7289 }
7290
7291
7292
7293 for {
7294 n := auxIntToInt32(v.AuxInt)
7295 if v_0.Op != OpAMD64SHRQconst {
7296 break
7297 }
7298 c := auxIntToInt8(v_0.AuxInt)
7299 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7300 break
7301 }
7302 v.reset(OpAMD64FlagLT_ULT)
7303 return true
7304 }
7305
7306
7307
7308 for {
7309 n := auxIntToInt32(v.AuxInt)
7310 if v_0.Op != OpAMD64ANDQconst {
7311 break
7312 }
7313 m := auxIntToInt32(v_0.AuxInt)
7314 if !(0 <= m && m < n) {
7315 break
7316 }
7317 v.reset(OpAMD64FlagLT_ULT)
7318 return true
7319 }
7320
7321
7322
7323 for {
7324 n := auxIntToInt32(v.AuxInt)
7325 if v_0.Op != OpAMD64ANDLconst {
7326 break
7327 }
7328 m := auxIntToInt32(v_0.AuxInt)
7329 if !(0 <= m && m < n) {
7330 break
7331 }
7332 v.reset(OpAMD64FlagLT_ULT)
7333 return true
7334 }
7335
7336
7337
7338 for {
7339 if auxIntToInt32(v.AuxInt) != 0 {
7340 break
7341 }
7342 a := v_0
7343 if a.Op != OpAMD64ANDQ {
7344 break
7345 }
7346 y := a.Args[1]
7347 x := a.Args[0]
7348 if !(a.Uses == 1) {
7349 break
7350 }
7351 v.reset(OpAMD64TESTQ)
7352 v.AddArg2(x, y)
7353 return true
7354 }
7355
7356
7357
7358 for {
7359 if auxIntToInt32(v.AuxInt) != 0 {
7360 break
7361 }
7362 a := v_0
7363 if a.Op != OpAMD64ANDQconst {
7364 break
7365 }
7366 c := auxIntToInt32(a.AuxInt)
7367 x := a.Args[0]
7368 if !(a.Uses == 1) {
7369 break
7370 }
7371 v.reset(OpAMD64TESTQconst)
7372 v.AuxInt = int32ToAuxInt(c)
7373 v.AddArg(x)
7374 return true
7375 }
7376
7377
7378 for {
7379 if auxIntToInt32(v.AuxInt) != 0 {
7380 break
7381 }
7382 x := v_0
7383 v.reset(OpAMD64TESTQ)
7384 v.AddArg2(x, x)
7385 return true
7386 }
7387
7388
7389
7390 for {
7391 c := auxIntToInt32(v.AuxInt)
7392 l := v_0
7393 if l.Op != OpAMD64MOVQload {
7394 break
7395 }
7396 off := auxIntToInt32(l.AuxInt)
7397 sym := auxToSym(l.Aux)
7398 mem := l.Args[1]
7399 ptr := l.Args[0]
7400 if !(l.Uses == 1 && clobber(l)) {
7401 break
7402 }
7403 b = l.Block
7404 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7405 v.copyOf(v0)
7406 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7407 v0.Aux = symToAux(sym)
7408 v0.AddArg2(ptr, mem)
7409 return true
7410 }
7411 return false
7412 }
7413 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7414 v_1 := v.Args[1]
7415 v_0 := v.Args[0]
7416
7417
7418
7419 for {
7420 valoff1 := auxIntToValAndOff(v.AuxInt)
7421 sym := auxToSym(v.Aux)
7422 if v_0.Op != OpAMD64ADDQconst {
7423 break
7424 }
7425 off2 := auxIntToInt32(v_0.AuxInt)
7426 base := v_0.Args[0]
7427 mem := v_1
7428 if !(ValAndOff(valoff1).canAdd32(off2)) {
7429 break
7430 }
7431 v.reset(OpAMD64CMPQconstload)
7432 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7433 v.Aux = symToAux(sym)
7434 v.AddArg2(base, mem)
7435 return true
7436 }
7437
7438
7439
7440 for {
7441 valoff1 := auxIntToValAndOff(v.AuxInt)
7442 sym1 := auxToSym(v.Aux)
7443 if v_0.Op != OpAMD64LEAQ {
7444 break
7445 }
7446 off2 := auxIntToInt32(v_0.AuxInt)
7447 sym2 := auxToSym(v_0.Aux)
7448 base := v_0.Args[0]
7449 mem := v_1
7450 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7451 break
7452 }
7453 v.reset(OpAMD64CMPQconstload)
7454 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7455 v.Aux = symToAux(mergeSym(sym1, sym2))
7456 v.AddArg2(base, mem)
7457 return true
7458 }
7459 return false
7460 }
7461 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7462 v_2 := v.Args[2]
7463 v_1 := v.Args[1]
7464 v_0 := v.Args[0]
7465
7466
7467
7468 for {
7469 off1 := auxIntToInt32(v.AuxInt)
7470 sym := auxToSym(v.Aux)
7471 if v_0.Op != OpAMD64ADDQconst {
7472 break
7473 }
7474 off2 := auxIntToInt32(v_0.AuxInt)
7475 base := v_0.Args[0]
7476 val := v_1
7477 mem := v_2
7478 if !(is32Bit(int64(off1) + int64(off2))) {
7479 break
7480 }
7481 v.reset(OpAMD64CMPQload)
7482 v.AuxInt = int32ToAuxInt(off1 + off2)
7483 v.Aux = symToAux(sym)
7484 v.AddArg3(base, val, mem)
7485 return true
7486 }
7487
7488
7489
7490 for {
7491 off1 := auxIntToInt32(v.AuxInt)
7492 sym1 := auxToSym(v.Aux)
7493 if v_0.Op != OpAMD64LEAQ {
7494 break
7495 }
7496 off2 := auxIntToInt32(v_0.AuxInt)
7497 sym2 := auxToSym(v_0.Aux)
7498 base := v_0.Args[0]
7499 val := v_1
7500 mem := v_2
7501 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7502 break
7503 }
7504 v.reset(OpAMD64CMPQload)
7505 v.AuxInt = int32ToAuxInt(off1 + off2)
7506 v.Aux = symToAux(mergeSym(sym1, sym2))
7507 v.AddArg3(base, val, mem)
7508 return true
7509 }
7510
7511
7512
7513 for {
7514 off := auxIntToInt32(v.AuxInt)
7515 sym := auxToSym(v.Aux)
7516 ptr := v_0
7517 if v_1.Op != OpAMD64MOVQconst {
7518 break
7519 }
7520 c := auxIntToInt64(v_1.AuxInt)
7521 mem := v_2
7522 if !(validVal(c)) {
7523 break
7524 }
7525 v.reset(OpAMD64CMPQconstload)
7526 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7527 v.Aux = symToAux(sym)
7528 v.AddArg2(ptr, mem)
7529 return true
7530 }
7531 return false
7532 }
7533 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7534 v_1 := v.Args[1]
7535 v_0 := v.Args[0]
7536 b := v.Block
7537
7538
7539 for {
7540 x := v_0
7541 if v_1.Op != OpAMD64MOVLconst {
7542 break
7543 }
7544 c := auxIntToInt32(v_1.AuxInt)
7545 v.reset(OpAMD64CMPWconst)
7546 v.AuxInt = int16ToAuxInt(int16(c))
7547 v.AddArg(x)
7548 return true
7549 }
7550
7551
7552 for {
7553 if v_0.Op != OpAMD64MOVLconst {
7554 break
7555 }
7556 c := auxIntToInt32(v_0.AuxInt)
7557 x := v_1
7558 v.reset(OpAMD64InvertFlags)
7559 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7560 v0.AuxInt = int16ToAuxInt(int16(c))
7561 v0.AddArg(x)
7562 v.AddArg(v0)
7563 return true
7564 }
7565
7566
7567
7568 for {
7569 x := v_0
7570 y := v_1
7571 if !(canonLessThan(x, y)) {
7572 break
7573 }
7574 v.reset(OpAMD64InvertFlags)
7575 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7576 v0.AddArg2(y, x)
7577 v.AddArg(v0)
7578 return true
7579 }
7580
7581
7582
7583 for {
7584 l := v_0
7585 if l.Op != OpAMD64MOVWload {
7586 break
7587 }
7588 off := auxIntToInt32(l.AuxInt)
7589 sym := auxToSym(l.Aux)
7590 mem := l.Args[1]
7591 ptr := l.Args[0]
7592 x := v_1
7593 if !(canMergeLoad(v, l) && clobber(l)) {
7594 break
7595 }
7596 v.reset(OpAMD64CMPWload)
7597 v.AuxInt = int32ToAuxInt(off)
7598 v.Aux = symToAux(sym)
7599 v.AddArg3(ptr, x, mem)
7600 return true
7601 }
7602
7603
7604
7605 for {
7606 x := v_0
7607 l := v_1
7608 if l.Op != OpAMD64MOVWload {
7609 break
7610 }
7611 off := auxIntToInt32(l.AuxInt)
7612 sym := auxToSym(l.Aux)
7613 mem := l.Args[1]
7614 ptr := l.Args[0]
7615 if !(canMergeLoad(v, l) && clobber(l)) {
7616 break
7617 }
7618 v.reset(OpAMD64InvertFlags)
7619 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7620 v0.AuxInt = int32ToAuxInt(off)
7621 v0.Aux = symToAux(sym)
7622 v0.AddArg3(ptr, x, mem)
7623 v.AddArg(v0)
7624 return true
7625 }
7626 return false
7627 }
7628 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7629 v_0 := v.Args[0]
7630 b := v.Block
7631
7632
7633
7634 for {
7635 y := auxIntToInt16(v.AuxInt)
7636 if v_0.Op != OpAMD64MOVLconst {
7637 break
7638 }
7639 x := auxIntToInt32(v_0.AuxInt)
7640 if !(int16(x) == y) {
7641 break
7642 }
7643 v.reset(OpAMD64FlagEQ)
7644 return true
7645 }
7646
7647
7648
7649 for {
7650 y := auxIntToInt16(v.AuxInt)
7651 if v_0.Op != OpAMD64MOVLconst {
7652 break
7653 }
7654 x := auxIntToInt32(v_0.AuxInt)
7655 if !(int16(x) < y && uint16(x) < uint16(y)) {
7656 break
7657 }
7658 v.reset(OpAMD64FlagLT_ULT)
7659 return true
7660 }
7661
7662
7663
7664 for {
7665 y := auxIntToInt16(v.AuxInt)
7666 if v_0.Op != OpAMD64MOVLconst {
7667 break
7668 }
7669 x := auxIntToInt32(v_0.AuxInt)
7670 if !(int16(x) < y && uint16(x) > uint16(y)) {
7671 break
7672 }
7673 v.reset(OpAMD64FlagLT_UGT)
7674 return true
7675 }
7676
7677
7678
7679 for {
7680 y := auxIntToInt16(v.AuxInt)
7681 if v_0.Op != OpAMD64MOVLconst {
7682 break
7683 }
7684 x := auxIntToInt32(v_0.AuxInt)
7685 if !(int16(x) > y && uint16(x) < uint16(y)) {
7686 break
7687 }
7688 v.reset(OpAMD64FlagGT_ULT)
7689 return true
7690 }
7691
7692
7693
7694 for {
7695 y := auxIntToInt16(v.AuxInt)
7696 if v_0.Op != OpAMD64MOVLconst {
7697 break
7698 }
7699 x := auxIntToInt32(v_0.AuxInt)
7700 if !(int16(x) > y && uint16(x) > uint16(y)) {
7701 break
7702 }
7703 v.reset(OpAMD64FlagGT_UGT)
7704 return true
7705 }
7706
7707
7708
7709 for {
7710 n := auxIntToInt16(v.AuxInt)
7711 if v_0.Op != OpAMD64ANDLconst {
7712 break
7713 }
7714 m := auxIntToInt32(v_0.AuxInt)
7715 if !(0 <= int16(m) && int16(m) < n) {
7716 break
7717 }
7718 v.reset(OpAMD64FlagLT_ULT)
7719 return true
7720 }
7721
7722
7723
7724 for {
7725 if auxIntToInt16(v.AuxInt) != 0 {
7726 break
7727 }
7728 a := v_0
7729 if a.Op != OpAMD64ANDL {
7730 break
7731 }
7732 y := a.Args[1]
7733 x := a.Args[0]
7734 if !(a.Uses == 1) {
7735 break
7736 }
7737 v.reset(OpAMD64TESTW)
7738 v.AddArg2(x, y)
7739 return true
7740 }
7741
7742
7743
7744 for {
7745 if auxIntToInt16(v.AuxInt) != 0 {
7746 break
7747 }
7748 a := v_0
7749 if a.Op != OpAMD64ANDLconst {
7750 break
7751 }
7752 c := auxIntToInt32(a.AuxInt)
7753 x := a.Args[0]
7754 if !(a.Uses == 1) {
7755 break
7756 }
7757 v.reset(OpAMD64TESTWconst)
7758 v.AuxInt = int16ToAuxInt(int16(c))
7759 v.AddArg(x)
7760 return true
7761 }
7762
7763
7764 for {
7765 if auxIntToInt16(v.AuxInt) != 0 {
7766 break
7767 }
7768 x := v_0
7769 v.reset(OpAMD64TESTW)
7770 v.AddArg2(x, x)
7771 return true
7772 }
7773
7774
7775
7776 for {
7777 c := auxIntToInt16(v.AuxInt)
7778 l := v_0
7779 if l.Op != OpAMD64MOVWload {
7780 break
7781 }
7782 off := auxIntToInt32(l.AuxInt)
7783 sym := auxToSym(l.Aux)
7784 mem := l.Args[1]
7785 ptr := l.Args[0]
7786 if !(l.Uses == 1 && clobber(l)) {
7787 break
7788 }
7789 b = l.Block
7790 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7791 v.copyOf(v0)
7792 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7793 v0.Aux = symToAux(sym)
7794 v0.AddArg2(ptr, mem)
7795 return true
7796 }
7797 return false
7798 }
7799 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7800 v_1 := v.Args[1]
7801 v_0 := v.Args[0]
7802
7803
7804
7805 for {
7806 valoff1 := auxIntToValAndOff(v.AuxInt)
7807 sym := auxToSym(v.Aux)
7808 if v_0.Op != OpAMD64ADDQconst {
7809 break
7810 }
7811 off2 := auxIntToInt32(v_0.AuxInt)
7812 base := v_0.Args[0]
7813 mem := v_1
7814 if !(ValAndOff(valoff1).canAdd32(off2)) {
7815 break
7816 }
7817 v.reset(OpAMD64CMPWconstload)
7818 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7819 v.Aux = symToAux(sym)
7820 v.AddArg2(base, mem)
7821 return true
7822 }
7823
7824
7825
7826 for {
7827 valoff1 := auxIntToValAndOff(v.AuxInt)
7828 sym1 := auxToSym(v.Aux)
7829 if v_0.Op != OpAMD64LEAQ {
7830 break
7831 }
7832 off2 := auxIntToInt32(v_0.AuxInt)
7833 sym2 := auxToSym(v_0.Aux)
7834 base := v_0.Args[0]
7835 mem := v_1
7836 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7837 break
7838 }
7839 v.reset(OpAMD64CMPWconstload)
7840 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7841 v.Aux = symToAux(mergeSym(sym1, sym2))
7842 v.AddArg2(base, mem)
7843 return true
7844 }
7845 return false
7846 }
7847 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
7848 v_2 := v.Args[2]
7849 v_1 := v.Args[1]
7850 v_0 := v.Args[0]
7851
7852
7853
7854 for {
7855 off1 := auxIntToInt32(v.AuxInt)
7856 sym := auxToSym(v.Aux)
7857 if v_0.Op != OpAMD64ADDQconst {
7858 break
7859 }
7860 off2 := auxIntToInt32(v_0.AuxInt)
7861 base := v_0.Args[0]
7862 val := v_1
7863 mem := v_2
7864 if !(is32Bit(int64(off1) + int64(off2))) {
7865 break
7866 }
7867 v.reset(OpAMD64CMPWload)
7868 v.AuxInt = int32ToAuxInt(off1 + off2)
7869 v.Aux = symToAux(sym)
7870 v.AddArg3(base, val, mem)
7871 return true
7872 }
7873
7874
7875
7876 for {
7877 off1 := auxIntToInt32(v.AuxInt)
7878 sym1 := auxToSym(v.Aux)
7879 if v_0.Op != OpAMD64LEAQ {
7880 break
7881 }
7882 off2 := auxIntToInt32(v_0.AuxInt)
7883 sym2 := auxToSym(v_0.Aux)
7884 base := v_0.Args[0]
7885 val := v_1
7886 mem := v_2
7887 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7888 break
7889 }
7890 v.reset(OpAMD64CMPWload)
7891 v.AuxInt = int32ToAuxInt(off1 + off2)
7892 v.Aux = symToAux(mergeSym(sym1, sym2))
7893 v.AddArg3(base, val, mem)
7894 return true
7895 }
7896
7897
7898 for {
7899 off := auxIntToInt32(v.AuxInt)
7900 sym := auxToSym(v.Aux)
7901 ptr := v_0
7902 if v_1.Op != OpAMD64MOVLconst {
7903 break
7904 }
7905 c := auxIntToInt32(v_1.AuxInt)
7906 mem := v_2
7907 v.reset(OpAMD64CMPWconstload)
7908 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
7909 v.Aux = symToAux(sym)
7910 v.AddArg2(ptr, mem)
7911 return true
7912 }
7913 return false
7914 }
7915 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
7916 v_3 := v.Args[3]
7917 v_2 := v.Args[2]
7918 v_1 := v.Args[1]
7919 v_0 := v.Args[0]
7920
7921
7922
7923 for {
7924 off1 := auxIntToInt32(v.AuxInt)
7925 sym := auxToSym(v.Aux)
7926 if v_0.Op != OpAMD64ADDQconst {
7927 break
7928 }
7929 off2 := auxIntToInt32(v_0.AuxInt)
7930 ptr := v_0.Args[0]
7931 old := v_1
7932 new_ := v_2
7933 mem := v_3
7934 if !(is32Bit(int64(off1) + int64(off2))) {
7935 break
7936 }
7937 v.reset(OpAMD64CMPXCHGLlock)
7938 v.AuxInt = int32ToAuxInt(off1 + off2)
7939 v.Aux = symToAux(sym)
7940 v.AddArg4(ptr, old, new_, mem)
7941 return true
7942 }
7943 return false
7944 }
7945 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
7946 v_3 := v.Args[3]
7947 v_2 := v.Args[2]
7948 v_1 := v.Args[1]
7949 v_0 := v.Args[0]
7950
7951
7952
7953 for {
7954 off1 := auxIntToInt32(v.AuxInt)
7955 sym := auxToSym(v.Aux)
7956 if v_0.Op != OpAMD64ADDQconst {
7957 break
7958 }
7959 off2 := auxIntToInt32(v_0.AuxInt)
7960 ptr := v_0.Args[0]
7961 old := v_1
7962 new_ := v_2
7963 mem := v_3
7964 if !(is32Bit(int64(off1) + int64(off2))) {
7965 break
7966 }
7967 v.reset(OpAMD64CMPXCHGQlock)
7968 v.AuxInt = int32ToAuxInt(off1 + off2)
7969 v.Aux = symToAux(sym)
7970 v.AddArg4(ptr, old, new_, mem)
7971 return true
7972 }
7973 return false
7974 }
7975 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
7976 v_1 := v.Args[1]
7977 v_0 := v.Args[0]
7978
7979
7980
7981 for {
7982 x := v_0
7983 l := v_1
7984 if l.Op != OpAMD64MOVSDload {
7985 break
7986 }
7987 off := auxIntToInt32(l.AuxInt)
7988 sym := auxToSym(l.Aux)
7989 mem := l.Args[1]
7990 ptr := l.Args[0]
7991 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
7992 break
7993 }
7994 v.reset(OpAMD64DIVSDload)
7995 v.AuxInt = int32ToAuxInt(off)
7996 v.Aux = symToAux(sym)
7997 v.AddArg3(x, ptr, mem)
7998 return true
7999 }
8000 return false
8001 }
8002 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8003 v_2 := v.Args[2]
8004 v_1 := v.Args[1]
8005 v_0 := v.Args[0]
8006
8007
8008
8009 for {
8010 off1 := auxIntToInt32(v.AuxInt)
8011 sym := auxToSym(v.Aux)
8012 val := v_0
8013 if v_1.Op != OpAMD64ADDQconst {
8014 break
8015 }
8016 off2 := auxIntToInt32(v_1.AuxInt)
8017 base := v_1.Args[0]
8018 mem := v_2
8019 if !(is32Bit(int64(off1) + int64(off2))) {
8020 break
8021 }
8022 v.reset(OpAMD64DIVSDload)
8023 v.AuxInt = int32ToAuxInt(off1 + off2)
8024 v.Aux = symToAux(sym)
8025 v.AddArg3(val, base, mem)
8026 return true
8027 }
8028
8029
8030
8031 for {
8032 off1 := auxIntToInt32(v.AuxInt)
8033 sym1 := auxToSym(v.Aux)
8034 val := v_0
8035 if v_1.Op != OpAMD64LEAQ {
8036 break
8037 }
8038 off2 := auxIntToInt32(v_1.AuxInt)
8039 sym2 := auxToSym(v_1.Aux)
8040 base := v_1.Args[0]
8041 mem := v_2
8042 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8043 break
8044 }
8045 v.reset(OpAMD64DIVSDload)
8046 v.AuxInt = int32ToAuxInt(off1 + off2)
8047 v.Aux = symToAux(mergeSym(sym1, sym2))
8048 v.AddArg3(val, base, mem)
8049 return true
8050 }
8051 return false
8052 }
8053 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8054 v_1 := v.Args[1]
8055 v_0 := v.Args[0]
8056
8057
8058
8059 for {
8060 x := v_0
8061 l := v_1
8062 if l.Op != OpAMD64MOVSSload {
8063 break
8064 }
8065 off := auxIntToInt32(l.AuxInt)
8066 sym := auxToSym(l.Aux)
8067 mem := l.Args[1]
8068 ptr := l.Args[0]
8069 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8070 break
8071 }
8072 v.reset(OpAMD64DIVSSload)
8073 v.AuxInt = int32ToAuxInt(off)
8074 v.Aux = symToAux(sym)
8075 v.AddArg3(x, ptr, mem)
8076 return true
8077 }
8078 return false
8079 }
8080 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8081 v_2 := v.Args[2]
8082 v_1 := v.Args[1]
8083 v_0 := v.Args[0]
8084
8085
8086
8087 for {
8088 off1 := auxIntToInt32(v.AuxInt)
8089 sym := auxToSym(v.Aux)
8090 val := v_0
8091 if v_1.Op != OpAMD64ADDQconst {
8092 break
8093 }
8094 off2 := auxIntToInt32(v_1.AuxInt)
8095 base := v_1.Args[0]
8096 mem := v_2
8097 if !(is32Bit(int64(off1) + int64(off2))) {
8098 break
8099 }
8100 v.reset(OpAMD64DIVSSload)
8101 v.AuxInt = int32ToAuxInt(off1 + off2)
8102 v.Aux = symToAux(sym)
8103 v.AddArg3(val, base, mem)
8104 return true
8105 }
8106
8107
8108
8109 for {
8110 off1 := auxIntToInt32(v.AuxInt)
8111 sym1 := auxToSym(v.Aux)
8112 val := v_0
8113 if v_1.Op != OpAMD64LEAQ {
8114 break
8115 }
8116 off2 := auxIntToInt32(v_1.AuxInt)
8117 sym2 := auxToSym(v_1.Aux)
8118 base := v_1.Args[0]
8119 mem := v_2
8120 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8121 break
8122 }
8123 v.reset(OpAMD64DIVSSload)
8124 v.AuxInt = int32ToAuxInt(off1 + off2)
8125 v.Aux = symToAux(mergeSym(sym1, sym2))
8126 v.AddArg3(val, base, mem)
8127 return true
8128 }
8129 return false
8130 }
8131 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8132 v_1 := v.Args[1]
8133 v_0 := v.Args[0]
8134
8135
8136
8137 for {
8138 x := v_0
8139 y := v_1
8140 if !(!x.rematerializeable() && y.rematerializeable()) {
8141 break
8142 }
8143 v.reset(OpAMD64HMULL)
8144 v.AddArg2(y, x)
8145 return true
8146 }
8147 return false
8148 }
8149 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8150 v_1 := v.Args[1]
8151 v_0 := v.Args[0]
8152
8153
8154
8155 for {
8156 x := v_0
8157 y := v_1
8158 if !(!x.rematerializeable() && y.rematerializeable()) {
8159 break
8160 }
8161 v.reset(OpAMD64HMULLU)
8162 v.AddArg2(y, x)
8163 return true
8164 }
8165 return false
8166 }
8167 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8168 v_1 := v.Args[1]
8169 v_0 := v.Args[0]
8170
8171
8172
8173 for {
8174 x := v_0
8175 y := v_1
8176 if !(!x.rematerializeable() && y.rematerializeable()) {
8177 break
8178 }
8179 v.reset(OpAMD64HMULQ)
8180 v.AddArg2(y, x)
8181 return true
8182 }
8183 return false
8184 }
8185 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8186 v_1 := v.Args[1]
8187 v_0 := v.Args[0]
8188
8189
8190
8191 for {
8192 x := v_0
8193 y := v_1
8194 if !(!x.rematerializeable() && y.rematerializeable()) {
8195 break
8196 }
8197 v.reset(OpAMD64HMULQU)
8198 v.AddArg2(y, x)
8199 return true
8200 }
8201 return false
8202 }
8203 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8204 v_0 := v.Args[0]
8205
8206
8207
8208 for {
8209 c := auxIntToInt32(v.AuxInt)
8210 s := auxToSym(v.Aux)
8211 if v_0.Op != OpAMD64ADDLconst {
8212 break
8213 }
8214 d := auxIntToInt32(v_0.AuxInt)
8215 x := v_0.Args[0]
8216 if !(is32Bit(int64(c) + int64(d))) {
8217 break
8218 }
8219 v.reset(OpAMD64LEAL)
8220 v.AuxInt = int32ToAuxInt(c + d)
8221 v.Aux = symToAux(s)
8222 v.AddArg(x)
8223 return true
8224 }
8225
8226
8227
8228 for {
8229 c := auxIntToInt32(v.AuxInt)
8230 s := auxToSym(v.Aux)
8231 if v_0.Op != OpAMD64ADDL {
8232 break
8233 }
8234 _ = v_0.Args[1]
8235 v_0_0 := v_0.Args[0]
8236 v_0_1 := v_0.Args[1]
8237 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8238 x := v_0_0
8239 y := v_0_1
8240 if !(x.Op != OpSB && y.Op != OpSB) {
8241 continue
8242 }
8243 v.reset(OpAMD64LEAL1)
8244 v.AuxInt = int32ToAuxInt(c)
8245 v.Aux = symToAux(s)
8246 v.AddArg2(x, y)
8247 return true
8248 }
8249 break
8250 }
8251 return false
8252 }
8253 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8254 v_1 := v.Args[1]
8255 v_0 := v.Args[0]
8256
8257
8258
8259 for {
8260 c := auxIntToInt32(v.AuxInt)
8261 s := auxToSym(v.Aux)
8262 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8263 if v_0.Op != OpAMD64ADDLconst {
8264 continue
8265 }
8266 d := auxIntToInt32(v_0.AuxInt)
8267 x := v_0.Args[0]
8268 y := v_1
8269 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8270 continue
8271 }
8272 v.reset(OpAMD64LEAL1)
8273 v.AuxInt = int32ToAuxInt(c + d)
8274 v.Aux = symToAux(s)
8275 v.AddArg2(x, y)
8276 return true
8277 }
8278 break
8279 }
8280
8281
8282 for {
8283 c := auxIntToInt32(v.AuxInt)
8284 s := auxToSym(v.Aux)
8285 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8286 x := v_0
8287 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8288 continue
8289 }
8290 y := v_1.Args[0]
8291 v.reset(OpAMD64LEAL2)
8292 v.AuxInt = int32ToAuxInt(c)
8293 v.Aux = symToAux(s)
8294 v.AddArg2(x, y)
8295 return true
8296 }
8297 break
8298 }
8299
8300
8301 for {
8302 c := auxIntToInt32(v.AuxInt)
8303 s := auxToSym(v.Aux)
8304 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8305 x := v_0
8306 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8307 continue
8308 }
8309 y := v_1.Args[0]
8310 v.reset(OpAMD64LEAL4)
8311 v.AuxInt = int32ToAuxInt(c)
8312 v.Aux = symToAux(s)
8313 v.AddArg2(x, y)
8314 return true
8315 }
8316 break
8317 }
8318
8319
8320 for {
8321 c := auxIntToInt32(v.AuxInt)
8322 s := auxToSym(v.Aux)
8323 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8324 x := v_0
8325 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8326 continue
8327 }
8328 y := v_1.Args[0]
8329 v.reset(OpAMD64LEAL8)
8330 v.AuxInt = int32ToAuxInt(c)
8331 v.Aux = symToAux(s)
8332 v.AddArg2(x, y)
8333 return true
8334 }
8335 break
8336 }
8337 return false
8338 }
8339 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8340 v_1 := v.Args[1]
8341 v_0 := v.Args[0]
8342
8343
8344
8345 for {
8346 c := auxIntToInt32(v.AuxInt)
8347 s := auxToSym(v.Aux)
8348 if v_0.Op != OpAMD64ADDLconst {
8349 break
8350 }
8351 d := auxIntToInt32(v_0.AuxInt)
8352 x := v_0.Args[0]
8353 y := v_1
8354 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8355 break
8356 }
8357 v.reset(OpAMD64LEAL2)
8358 v.AuxInt = int32ToAuxInt(c + d)
8359 v.Aux = symToAux(s)
8360 v.AddArg2(x, y)
8361 return true
8362 }
8363
8364
8365
8366 for {
8367 c := auxIntToInt32(v.AuxInt)
8368 s := auxToSym(v.Aux)
8369 x := v_0
8370 if v_1.Op != OpAMD64ADDLconst {
8371 break
8372 }
8373 d := auxIntToInt32(v_1.AuxInt)
8374 y := v_1.Args[0]
8375 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8376 break
8377 }
8378 v.reset(OpAMD64LEAL2)
8379 v.AuxInt = int32ToAuxInt(c + 2*d)
8380 v.Aux = symToAux(s)
8381 v.AddArg2(x, y)
8382 return true
8383 }
8384
8385
8386 for {
8387 c := auxIntToInt32(v.AuxInt)
8388 s := auxToSym(v.Aux)
8389 x := v_0
8390 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8391 break
8392 }
8393 y := v_1.Args[0]
8394 v.reset(OpAMD64LEAL4)
8395 v.AuxInt = int32ToAuxInt(c)
8396 v.Aux = symToAux(s)
8397 v.AddArg2(x, y)
8398 return true
8399 }
8400
8401
8402 for {
8403 c := auxIntToInt32(v.AuxInt)
8404 s := auxToSym(v.Aux)
8405 x := v_0
8406 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8407 break
8408 }
8409 y := v_1.Args[0]
8410 v.reset(OpAMD64LEAL8)
8411 v.AuxInt = int32ToAuxInt(c)
8412 v.Aux = symToAux(s)
8413 v.AddArg2(x, y)
8414 return true
8415 }
8416 return false
8417 }
8418 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8419 v_1 := v.Args[1]
8420 v_0 := v.Args[0]
8421
8422
8423
8424 for {
8425 c := auxIntToInt32(v.AuxInt)
8426 s := auxToSym(v.Aux)
8427 if v_0.Op != OpAMD64ADDLconst {
8428 break
8429 }
8430 d := auxIntToInt32(v_0.AuxInt)
8431 x := v_0.Args[0]
8432 y := v_1
8433 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8434 break
8435 }
8436 v.reset(OpAMD64LEAL4)
8437 v.AuxInt = int32ToAuxInt(c + d)
8438 v.Aux = symToAux(s)
8439 v.AddArg2(x, y)
8440 return true
8441 }
8442
8443
8444
8445 for {
8446 c := auxIntToInt32(v.AuxInt)
8447 s := auxToSym(v.Aux)
8448 x := v_0
8449 if v_1.Op != OpAMD64ADDLconst {
8450 break
8451 }
8452 d := auxIntToInt32(v_1.AuxInt)
8453 y := v_1.Args[0]
8454 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8455 break
8456 }
8457 v.reset(OpAMD64LEAL4)
8458 v.AuxInt = int32ToAuxInt(c + 4*d)
8459 v.Aux = symToAux(s)
8460 v.AddArg2(x, y)
8461 return true
8462 }
8463
8464
8465 for {
8466 c := auxIntToInt32(v.AuxInt)
8467 s := auxToSym(v.Aux)
8468 x := v_0
8469 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8470 break
8471 }
8472 y := v_1.Args[0]
8473 v.reset(OpAMD64LEAL8)
8474 v.AuxInt = int32ToAuxInt(c)
8475 v.Aux = symToAux(s)
8476 v.AddArg2(x, y)
8477 return true
8478 }
8479 return false
8480 }
8481 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8482 v_1 := v.Args[1]
8483 v_0 := v.Args[0]
8484
8485
8486
8487 for {
8488 c := auxIntToInt32(v.AuxInt)
8489 s := auxToSym(v.Aux)
8490 if v_0.Op != OpAMD64ADDLconst {
8491 break
8492 }
8493 d := auxIntToInt32(v_0.AuxInt)
8494 x := v_0.Args[0]
8495 y := v_1
8496 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8497 break
8498 }
8499 v.reset(OpAMD64LEAL8)
8500 v.AuxInt = int32ToAuxInt(c + d)
8501 v.Aux = symToAux(s)
8502 v.AddArg2(x, y)
8503 return true
8504 }
8505
8506
8507
8508 for {
8509 c := auxIntToInt32(v.AuxInt)
8510 s := auxToSym(v.Aux)
8511 x := v_0
8512 if v_1.Op != OpAMD64ADDLconst {
8513 break
8514 }
8515 d := auxIntToInt32(v_1.AuxInt)
8516 y := v_1.Args[0]
8517 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8518 break
8519 }
8520 v.reset(OpAMD64LEAL8)
8521 v.AuxInt = int32ToAuxInt(c + 8*d)
8522 v.Aux = symToAux(s)
8523 v.AddArg2(x, y)
8524 return true
8525 }
8526 return false
8527 }
8528 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8529 v_0 := v.Args[0]
8530
8531
8532
8533 for {
8534 c := auxIntToInt32(v.AuxInt)
8535 s := auxToSym(v.Aux)
8536 if v_0.Op != OpAMD64ADDQconst {
8537 break
8538 }
8539 d := auxIntToInt32(v_0.AuxInt)
8540 x := v_0.Args[0]
8541 if !(is32Bit(int64(c) + int64(d))) {
8542 break
8543 }
8544 v.reset(OpAMD64LEAQ)
8545 v.AuxInt = int32ToAuxInt(c + d)
8546 v.Aux = symToAux(s)
8547 v.AddArg(x)
8548 return true
8549 }
8550
8551
8552
8553 for {
8554 c := auxIntToInt32(v.AuxInt)
8555 s := auxToSym(v.Aux)
8556 if v_0.Op != OpAMD64ADDQ {
8557 break
8558 }
8559 _ = v_0.Args[1]
8560 v_0_0 := v_0.Args[0]
8561 v_0_1 := v_0.Args[1]
8562 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8563 x := v_0_0
8564 y := v_0_1
8565 if !(x.Op != OpSB && y.Op != OpSB) {
8566 continue
8567 }
8568 v.reset(OpAMD64LEAQ1)
8569 v.AuxInt = int32ToAuxInt(c)
8570 v.Aux = symToAux(s)
8571 v.AddArg2(x, y)
8572 return true
8573 }
8574 break
8575 }
8576
8577
8578
8579 for {
8580 off1 := auxIntToInt32(v.AuxInt)
8581 sym1 := auxToSym(v.Aux)
8582 if v_0.Op != OpAMD64LEAQ {
8583 break
8584 }
8585 off2 := auxIntToInt32(v_0.AuxInt)
8586 sym2 := auxToSym(v_0.Aux)
8587 x := v_0.Args[0]
8588 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8589 break
8590 }
8591 v.reset(OpAMD64LEAQ)
8592 v.AuxInt = int32ToAuxInt(off1 + off2)
8593 v.Aux = symToAux(mergeSym(sym1, sym2))
8594 v.AddArg(x)
8595 return true
8596 }
8597
8598
8599
8600 for {
8601 off1 := auxIntToInt32(v.AuxInt)
8602 sym1 := auxToSym(v.Aux)
8603 if v_0.Op != OpAMD64LEAQ1 {
8604 break
8605 }
8606 off2 := auxIntToInt32(v_0.AuxInt)
8607 sym2 := auxToSym(v_0.Aux)
8608 y := v_0.Args[1]
8609 x := v_0.Args[0]
8610 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8611 break
8612 }
8613 v.reset(OpAMD64LEAQ1)
8614 v.AuxInt = int32ToAuxInt(off1 + off2)
8615 v.Aux = symToAux(mergeSym(sym1, sym2))
8616 v.AddArg2(x, y)
8617 return true
8618 }
8619
8620
8621
8622 for {
8623 off1 := auxIntToInt32(v.AuxInt)
8624 sym1 := auxToSym(v.Aux)
8625 if v_0.Op != OpAMD64LEAQ2 {
8626 break
8627 }
8628 off2 := auxIntToInt32(v_0.AuxInt)
8629 sym2 := auxToSym(v_0.Aux)
8630 y := v_0.Args[1]
8631 x := v_0.Args[0]
8632 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8633 break
8634 }
8635 v.reset(OpAMD64LEAQ2)
8636 v.AuxInt = int32ToAuxInt(off1 + off2)
8637 v.Aux = symToAux(mergeSym(sym1, sym2))
8638 v.AddArg2(x, y)
8639 return true
8640 }
8641
8642
8643
8644 for {
8645 off1 := auxIntToInt32(v.AuxInt)
8646 sym1 := auxToSym(v.Aux)
8647 if v_0.Op != OpAMD64LEAQ4 {
8648 break
8649 }
8650 off2 := auxIntToInt32(v_0.AuxInt)
8651 sym2 := auxToSym(v_0.Aux)
8652 y := v_0.Args[1]
8653 x := v_0.Args[0]
8654 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8655 break
8656 }
8657 v.reset(OpAMD64LEAQ4)
8658 v.AuxInt = int32ToAuxInt(off1 + off2)
8659 v.Aux = symToAux(mergeSym(sym1, sym2))
8660 v.AddArg2(x, y)
8661 return true
8662 }
8663
8664
8665
8666 for {
8667 off1 := auxIntToInt32(v.AuxInt)
8668 sym1 := auxToSym(v.Aux)
8669 if v_0.Op != OpAMD64LEAQ8 {
8670 break
8671 }
8672 off2 := auxIntToInt32(v_0.AuxInt)
8673 sym2 := auxToSym(v_0.Aux)
8674 y := v_0.Args[1]
8675 x := v_0.Args[0]
8676 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8677 break
8678 }
8679 v.reset(OpAMD64LEAQ8)
8680 v.AuxInt = int32ToAuxInt(off1 + off2)
8681 v.Aux = symToAux(mergeSym(sym1, sym2))
8682 v.AddArg2(x, y)
8683 return true
8684 }
8685 return false
8686 }
8687 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8688 v_1 := v.Args[1]
8689 v_0 := v.Args[0]
8690
8691
8692
8693 for {
8694 c := auxIntToInt32(v.AuxInt)
8695 s := auxToSym(v.Aux)
8696 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8697 if v_0.Op != OpAMD64ADDQconst {
8698 continue
8699 }
8700 d := auxIntToInt32(v_0.AuxInt)
8701 x := v_0.Args[0]
8702 y := v_1
8703 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8704 continue
8705 }
8706 v.reset(OpAMD64LEAQ1)
8707 v.AuxInt = int32ToAuxInt(c + d)
8708 v.Aux = symToAux(s)
8709 v.AddArg2(x, y)
8710 return true
8711 }
8712 break
8713 }
8714
8715
8716 for {
8717 c := auxIntToInt32(v.AuxInt)
8718 s := auxToSym(v.Aux)
8719 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8720 x := v_0
8721 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8722 continue
8723 }
8724 y := v_1.Args[0]
8725 v.reset(OpAMD64LEAQ2)
8726 v.AuxInt = int32ToAuxInt(c)
8727 v.Aux = symToAux(s)
8728 v.AddArg2(x, y)
8729 return true
8730 }
8731 break
8732 }
8733
8734
8735 for {
8736 c := auxIntToInt32(v.AuxInt)
8737 s := auxToSym(v.Aux)
8738 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8739 x := v_0
8740 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8741 continue
8742 }
8743 y := v_1.Args[0]
8744 v.reset(OpAMD64LEAQ4)
8745 v.AuxInt = int32ToAuxInt(c)
8746 v.Aux = symToAux(s)
8747 v.AddArg2(x, y)
8748 return true
8749 }
8750 break
8751 }
8752
8753
8754 for {
8755 c := auxIntToInt32(v.AuxInt)
8756 s := auxToSym(v.Aux)
8757 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8758 x := v_0
8759 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8760 continue
8761 }
8762 y := v_1.Args[0]
8763 v.reset(OpAMD64LEAQ8)
8764 v.AuxInt = int32ToAuxInt(c)
8765 v.Aux = symToAux(s)
8766 v.AddArg2(x, y)
8767 return true
8768 }
8769 break
8770 }
8771
8772
8773
8774 for {
8775 off1 := auxIntToInt32(v.AuxInt)
8776 sym1 := auxToSym(v.Aux)
8777 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8778 if v_0.Op != OpAMD64LEAQ {
8779 continue
8780 }
8781 off2 := auxIntToInt32(v_0.AuxInt)
8782 sym2 := auxToSym(v_0.Aux)
8783 x := v_0.Args[0]
8784 y := v_1
8785 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8786 continue
8787 }
8788 v.reset(OpAMD64LEAQ1)
8789 v.AuxInt = int32ToAuxInt(off1 + off2)
8790 v.Aux = symToAux(mergeSym(sym1, sym2))
8791 v.AddArg2(x, y)
8792 return true
8793 }
8794 break
8795 }
8796
8797
8798
8799 for {
8800 off1 := auxIntToInt32(v.AuxInt)
8801 sym1 := auxToSym(v.Aux)
8802 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8803 x := v_0
8804 if v_1.Op != OpAMD64LEAQ1 {
8805 continue
8806 }
8807 off2 := auxIntToInt32(v_1.AuxInt)
8808 sym2 := auxToSym(v_1.Aux)
8809 y := v_1.Args[1]
8810 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8811 continue
8812 }
8813 v.reset(OpAMD64LEAQ2)
8814 v.AuxInt = int32ToAuxInt(off1 + off2)
8815 v.Aux = symToAux(mergeSym(sym1, sym2))
8816 v.AddArg2(x, y)
8817 return true
8818 }
8819 break
8820 }
8821
8822
8823
8824 for {
8825 off1 := auxIntToInt32(v.AuxInt)
8826 sym1 := auxToSym(v.Aux)
8827 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8828 x := v_0
8829 if v_1.Op != OpAMD64LEAQ1 {
8830 continue
8831 }
8832 off2 := auxIntToInt32(v_1.AuxInt)
8833 sym2 := auxToSym(v_1.Aux)
8834 _ = v_1.Args[1]
8835 v_1_0 := v_1.Args[0]
8836 v_1_1 := v_1.Args[1]
8837 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
8838 if x != v_1_0 {
8839 continue
8840 }
8841 y := v_1_1
8842 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8843 continue
8844 }
8845 v.reset(OpAMD64LEAQ2)
8846 v.AuxInt = int32ToAuxInt(off1 + off2)
8847 v.Aux = symToAux(mergeSym(sym1, sym2))
8848 v.AddArg2(y, x)
8849 return true
8850 }
8851 }
8852 break
8853 }
8854
8855
8856
8857 for {
8858 if auxIntToInt32(v.AuxInt) != 0 {
8859 break
8860 }
8861 x := v_0
8862 y := v_1
8863 if !(v.Aux == nil) {
8864 break
8865 }
8866 v.reset(OpAMD64ADDQ)
8867 v.AddArg2(x, y)
8868 return true
8869 }
8870 return false
8871 }
8872 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
8873 v_1 := v.Args[1]
8874 v_0 := v.Args[0]
8875
8876
8877
8878 for {
8879 c := auxIntToInt32(v.AuxInt)
8880 s := auxToSym(v.Aux)
8881 if v_0.Op != OpAMD64ADDQconst {
8882 break
8883 }
8884 d := auxIntToInt32(v_0.AuxInt)
8885 x := v_0.Args[0]
8886 y := v_1
8887 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8888 break
8889 }
8890 v.reset(OpAMD64LEAQ2)
8891 v.AuxInt = int32ToAuxInt(c + d)
8892 v.Aux = symToAux(s)
8893 v.AddArg2(x, y)
8894 return true
8895 }
8896
8897
8898
8899 for {
8900 c := auxIntToInt32(v.AuxInt)
8901 s := auxToSym(v.Aux)
8902 x := v_0
8903 if v_1.Op != OpAMD64ADDQconst {
8904 break
8905 }
8906 d := auxIntToInt32(v_1.AuxInt)
8907 y := v_1.Args[0]
8908 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8909 break
8910 }
8911 v.reset(OpAMD64LEAQ2)
8912 v.AuxInt = int32ToAuxInt(c + 2*d)
8913 v.Aux = symToAux(s)
8914 v.AddArg2(x, y)
8915 return true
8916 }
8917
8918
8919 for {
8920 c := auxIntToInt32(v.AuxInt)
8921 s := auxToSym(v.Aux)
8922 x := v_0
8923 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8924 break
8925 }
8926 y := v_1.Args[0]
8927 v.reset(OpAMD64LEAQ4)
8928 v.AuxInt = int32ToAuxInt(c)
8929 v.Aux = symToAux(s)
8930 v.AddArg2(x, y)
8931 return true
8932 }
8933
8934
8935 for {
8936 c := auxIntToInt32(v.AuxInt)
8937 s := auxToSym(v.Aux)
8938 x := v_0
8939 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8940 break
8941 }
8942 y := v_1.Args[0]
8943 v.reset(OpAMD64LEAQ8)
8944 v.AuxInt = int32ToAuxInt(c)
8945 v.Aux = symToAux(s)
8946 v.AddArg2(x, y)
8947 return true
8948 }
8949
8950
8951
8952 for {
8953 off1 := auxIntToInt32(v.AuxInt)
8954 sym1 := auxToSym(v.Aux)
8955 if v_0.Op != OpAMD64LEAQ {
8956 break
8957 }
8958 off2 := auxIntToInt32(v_0.AuxInt)
8959 sym2 := auxToSym(v_0.Aux)
8960 x := v_0.Args[0]
8961 y := v_1
8962 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8963 break
8964 }
8965 v.reset(OpAMD64LEAQ2)
8966 v.AuxInt = int32ToAuxInt(off1 + off2)
8967 v.Aux = symToAux(mergeSym(sym1, sym2))
8968 v.AddArg2(x, y)
8969 return true
8970 }
8971
8972
8973
8974 for {
8975 off1 := auxIntToInt32(v.AuxInt)
8976 sym1 := auxToSym(v.Aux)
8977 x := v_0
8978 if v_1.Op != OpAMD64LEAQ1 {
8979 break
8980 }
8981 off2 := auxIntToInt32(v_1.AuxInt)
8982 sym2 := auxToSym(v_1.Aux)
8983 y := v_1.Args[1]
8984 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
8985 break
8986 }
8987 v.reset(OpAMD64LEAQ4)
8988 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
8989 v.Aux = symToAux(sym1)
8990 v.AddArg2(x, y)
8991 return true
8992 }
8993
8994
8995
8996 for {
8997 off := auxIntToInt32(v.AuxInt)
8998 sym := auxToSym(v.Aux)
8999 x := v_0
9000 if v_1.Op != OpAMD64MOVQconst {
9001 break
9002 }
9003 scale := auxIntToInt64(v_1.AuxInt)
9004 if !(is32Bit(int64(off) + int64(scale)*2)) {
9005 break
9006 }
9007 v.reset(OpAMD64LEAQ)
9008 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9009 v.Aux = symToAux(sym)
9010 v.AddArg(x)
9011 return true
9012 }
9013
9014
9015
9016 for {
9017 off := auxIntToInt32(v.AuxInt)
9018 sym := auxToSym(v.Aux)
9019 x := v_0
9020 if v_1.Op != OpAMD64MOVLconst {
9021 break
9022 }
9023 scale := auxIntToInt32(v_1.AuxInt)
9024 if !(is32Bit(int64(off) + int64(scale)*2)) {
9025 break
9026 }
9027 v.reset(OpAMD64LEAQ)
9028 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9029 v.Aux = symToAux(sym)
9030 v.AddArg(x)
9031 return true
9032 }
9033 return false
9034 }
9035 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9036 v_1 := v.Args[1]
9037 v_0 := v.Args[0]
9038
9039
9040
9041 for {
9042 c := auxIntToInt32(v.AuxInt)
9043 s := auxToSym(v.Aux)
9044 if v_0.Op != OpAMD64ADDQconst {
9045 break
9046 }
9047 d := auxIntToInt32(v_0.AuxInt)
9048 x := v_0.Args[0]
9049 y := v_1
9050 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9051 break
9052 }
9053 v.reset(OpAMD64LEAQ4)
9054 v.AuxInt = int32ToAuxInt(c + d)
9055 v.Aux = symToAux(s)
9056 v.AddArg2(x, y)
9057 return true
9058 }
9059
9060
9061
9062 for {
9063 c := auxIntToInt32(v.AuxInt)
9064 s := auxToSym(v.Aux)
9065 x := v_0
9066 if v_1.Op != OpAMD64ADDQconst {
9067 break
9068 }
9069 d := auxIntToInt32(v_1.AuxInt)
9070 y := v_1.Args[0]
9071 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9072 break
9073 }
9074 v.reset(OpAMD64LEAQ4)
9075 v.AuxInt = int32ToAuxInt(c + 4*d)
9076 v.Aux = symToAux(s)
9077 v.AddArg2(x, y)
9078 return true
9079 }
9080
9081
9082 for {
9083 c := auxIntToInt32(v.AuxInt)
9084 s := auxToSym(v.Aux)
9085 x := v_0
9086 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9087 break
9088 }
9089 y := v_1.Args[0]
9090 v.reset(OpAMD64LEAQ8)
9091 v.AuxInt = int32ToAuxInt(c)
9092 v.Aux = symToAux(s)
9093 v.AddArg2(x, y)
9094 return true
9095 }
9096
9097
9098
9099 for {
9100 off1 := auxIntToInt32(v.AuxInt)
9101 sym1 := auxToSym(v.Aux)
9102 if v_0.Op != OpAMD64LEAQ {
9103 break
9104 }
9105 off2 := auxIntToInt32(v_0.AuxInt)
9106 sym2 := auxToSym(v_0.Aux)
9107 x := v_0.Args[0]
9108 y := v_1
9109 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9110 break
9111 }
9112 v.reset(OpAMD64LEAQ4)
9113 v.AuxInt = int32ToAuxInt(off1 + off2)
9114 v.Aux = symToAux(mergeSym(sym1, sym2))
9115 v.AddArg2(x, y)
9116 return true
9117 }
9118
9119
9120
9121 for {
9122 off1 := auxIntToInt32(v.AuxInt)
9123 sym1 := auxToSym(v.Aux)
9124 x := v_0
9125 if v_1.Op != OpAMD64LEAQ1 {
9126 break
9127 }
9128 off2 := auxIntToInt32(v_1.AuxInt)
9129 sym2 := auxToSym(v_1.Aux)
9130 y := v_1.Args[1]
9131 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9132 break
9133 }
9134 v.reset(OpAMD64LEAQ8)
9135 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9136 v.Aux = symToAux(sym1)
9137 v.AddArg2(x, y)
9138 return true
9139 }
9140
9141
9142
9143 for {
9144 off := auxIntToInt32(v.AuxInt)
9145 sym := auxToSym(v.Aux)
9146 x := v_0
9147 if v_1.Op != OpAMD64MOVQconst {
9148 break
9149 }
9150 scale := auxIntToInt64(v_1.AuxInt)
9151 if !(is32Bit(int64(off) + int64(scale)*4)) {
9152 break
9153 }
9154 v.reset(OpAMD64LEAQ)
9155 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9156 v.Aux = symToAux(sym)
9157 v.AddArg(x)
9158 return true
9159 }
9160
9161
9162
9163 for {
9164 off := auxIntToInt32(v.AuxInt)
9165 sym := auxToSym(v.Aux)
9166 x := v_0
9167 if v_1.Op != OpAMD64MOVLconst {
9168 break
9169 }
9170 scale := auxIntToInt32(v_1.AuxInt)
9171 if !(is32Bit(int64(off) + int64(scale)*4)) {
9172 break
9173 }
9174 v.reset(OpAMD64LEAQ)
9175 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9176 v.Aux = symToAux(sym)
9177 v.AddArg(x)
9178 return true
9179 }
9180 return false
9181 }
9182 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9183 v_1 := v.Args[1]
9184 v_0 := v.Args[0]
9185
9186
9187
9188 for {
9189 c := auxIntToInt32(v.AuxInt)
9190 s := auxToSym(v.Aux)
9191 if v_0.Op != OpAMD64ADDQconst {
9192 break
9193 }
9194 d := auxIntToInt32(v_0.AuxInt)
9195 x := v_0.Args[0]
9196 y := v_1
9197 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9198 break
9199 }
9200 v.reset(OpAMD64LEAQ8)
9201 v.AuxInt = int32ToAuxInt(c + d)
9202 v.Aux = symToAux(s)
9203 v.AddArg2(x, y)
9204 return true
9205 }
9206
9207
9208
9209 for {
9210 c := auxIntToInt32(v.AuxInt)
9211 s := auxToSym(v.Aux)
9212 x := v_0
9213 if v_1.Op != OpAMD64ADDQconst {
9214 break
9215 }
9216 d := auxIntToInt32(v_1.AuxInt)
9217 y := v_1.Args[0]
9218 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9219 break
9220 }
9221 v.reset(OpAMD64LEAQ8)
9222 v.AuxInt = int32ToAuxInt(c + 8*d)
9223 v.Aux = symToAux(s)
9224 v.AddArg2(x, y)
9225 return true
9226 }
9227
9228
9229
9230 for {
9231 off1 := auxIntToInt32(v.AuxInt)
9232 sym1 := auxToSym(v.Aux)
9233 if v_0.Op != OpAMD64LEAQ {
9234 break
9235 }
9236 off2 := auxIntToInt32(v_0.AuxInt)
9237 sym2 := auxToSym(v_0.Aux)
9238 x := v_0.Args[0]
9239 y := v_1
9240 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9241 break
9242 }
9243 v.reset(OpAMD64LEAQ8)
9244 v.AuxInt = int32ToAuxInt(off1 + off2)
9245 v.Aux = symToAux(mergeSym(sym1, sym2))
9246 v.AddArg2(x, y)
9247 return true
9248 }
9249
9250
9251
9252 for {
9253 off := auxIntToInt32(v.AuxInt)
9254 sym := auxToSym(v.Aux)
9255 x := v_0
9256 if v_1.Op != OpAMD64MOVQconst {
9257 break
9258 }
9259 scale := auxIntToInt64(v_1.AuxInt)
9260 if !(is32Bit(int64(off) + int64(scale)*8)) {
9261 break
9262 }
9263 v.reset(OpAMD64LEAQ)
9264 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9265 v.Aux = symToAux(sym)
9266 v.AddArg(x)
9267 return true
9268 }
9269
9270
9271
9272 for {
9273 off := auxIntToInt32(v.AuxInt)
9274 sym := auxToSym(v.Aux)
9275 x := v_0
9276 if v_1.Op != OpAMD64MOVLconst {
9277 break
9278 }
9279 scale := auxIntToInt32(v_1.AuxInt)
9280 if !(is32Bit(int64(off) + int64(scale)*8)) {
9281 break
9282 }
9283 v.reset(OpAMD64LEAQ)
9284 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9285 v.Aux = symToAux(sym)
9286 v.AddArg(x)
9287 return true
9288 }
9289 return false
9290 }
9291 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9292 v_2 := v.Args[2]
9293 v_1 := v.Args[1]
9294 v_0 := v.Args[0]
9295
9296
9297
9298 for {
9299 i := auxIntToInt32(v.AuxInt)
9300 s := auxToSym(v.Aux)
9301 p := v_0
9302 x := v_1
9303 if x.Op != OpAMD64BSWAPL {
9304 break
9305 }
9306 w := x.Args[0]
9307 mem := v_2
9308 if !(x.Uses == 1) {
9309 break
9310 }
9311 v.reset(OpAMD64MOVLstore)
9312 v.AuxInt = int32ToAuxInt(i)
9313 v.Aux = symToAux(s)
9314 v.AddArg3(p, w, mem)
9315 return true
9316 }
9317 return false
9318 }
9319 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9320 v_2 := v.Args[2]
9321 v_1 := v.Args[1]
9322 v_0 := v.Args[0]
9323
9324
9325
9326 for {
9327 i := auxIntToInt32(v.AuxInt)
9328 s := auxToSym(v.Aux)
9329 p := v_0
9330 x := v_1
9331 if x.Op != OpAMD64BSWAPQ {
9332 break
9333 }
9334 w := x.Args[0]
9335 mem := v_2
9336 if !(x.Uses == 1) {
9337 break
9338 }
9339 v.reset(OpAMD64MOVQstore)
9340 v.AuxInt = int32ToAuxInt(i)
9341 v.Aux = symToAux(s)
9342 v.AddArg3(p, w, mem)
9343 return true
9344 }
9345 return false
9346 }
9347 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9348 v_2 := v.Args[2]
9349 v_1 := v.Args[1]
9350 v_0 := v.Args[0]
9351
9352
9353
9354 for {
9355 i := auxIntToInt32(v.AuxInt)
9356 s := auxToSym(v.Aux)
9357 p := v_0
9358 x := v_1
9359 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9360 break
9361 }
9362 w := x.Args[0]
9363 mem := v_2
9364 if !(x.Uses == 1) {
9365 break
9366 }
9367 v.reset(OpAMD64MOVWstore)
9368 v.AuxInt = int32ToAuxInt(i)
9369 v.Aux = symToAux(s)
9370 v.AddArg3(p, w, mem)
9371 return true
9372 }
9373 return false
9374 }
9375 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9376 v_0 := v.Args[0]
9377 b := v.Block
9378
9379
9380
9381 for {
9382 x := v_0
9383 if x.Op != OpAMD64MOVBload {
9384 break
9385 }
9386 off := auxIntToInt32(x.AuxInt)
9387 sym := auxToSym(x.Aux)
9388 mem := x.Args[1]
9389 ptr := x.Args[0]
9390 if !(x.Uses == 1 && clobber(x)) {
9391 break
9392 }
9393 b = x.Block
9394 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9395 v.copyOf(v0)
9396 v0.AuxInt = int32ToAuxInt(off)
9397 v0.Aux = symToAux(sym)
9398 v0.AddArg2(ptr, mem)
9399 return true
9400 }
9401
9402
9403
9404 for {
9405 x := v_0
9406 if x.Op != OpAMD64MOVWload {
9407 break
9408 }
9409 off := auxIntToInt32(x.AuxInt)
9410 sym := auxToSym(x.Aux)
9411 mem := x.Args[1]
9412 ptr := x.Args[0]
9413 if !(x.Uses == 1 && clobber(x)) {
9414 break
9415 }
9416 b = x.Block
9417 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9418 v.copyOf(v0)
9419 v0.AuxInt = int32ToAuxInt(off)
9420 v0.Aux = symToAux(sym)
9421 v0.AddArg2(ptr, mem)
9422 return true
9423 }
9424
9425
9426
9427 for {
9428 x := v_0
9429 if x.Op != OpAMD64MOVLload {
9430 break
9431 }
9432 off := auxIntToInt32(x.AuxInt)
9433 sym := auxToSym(x.Aux)
9434 mem := x.Args[1]
9435 ptr := x.Args[0]
9436 if !(x.Uses == 1 && clobber(x)) {
9437 break
9438 }
9439 b = x.Block
9440 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9441 v.copyOf(v0)
9442 v0.AuxInt = int32ToAuxInt(off)
9443 v0.Aux = symToAux(sym)
9444 v0.AddArg2(ptr, mem)
9445 return true
9446 }
9447
9448
9449
9450 for {
9451 x := v_0
9452 if x.Op != OpAMD64MOVQload {
9453 break
9454 }
9455 off := auxIntToInt32(x.AuxInt)
9456 sym := auxToSym(x.Aux)
9457 mem := x.Args[1]
9458 ptr := x.Args[0]
9459 if !(x.Uses == 1 && clobber(x)) {
9460 break
9461 }
9462 b = x.Block
9463 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9464 v.copyOf(v0)
9465 v0.AuxInt = int32ToAuxInt(off)
9466 v0.Aux = symToAux(sym)
9467 v0.AddArg2(ptr, mem)
9468 return true
9469 }
9470
9471
9472
9473 for {
9474 if v_0.Op != OpAMD64ANDLconst {
9475 break
9476 }
9477 c := auxIntToInt32(v_0.AuxInt)
9478 x := v_0.Args[0]
9479 if !(c&0x80 == 0) {
9480 break
9481 }
9482 v.reset(OpAMD64ANDLconst)
9483 v.AuxInt = int32ToAuxInt(c & 0x7f)
9484 v.AddArg(x)
9485 return true
9486 }
9487
9488
9489 for {
9490 if v_0.Op != OpAMD64MOVBQSX {
9491 break
9492 }
9493 x := v_0.Args[0]
9494 v.reset(OpAMD64MOVBQSX)
9495 v.AddArg(x)
9496 return true
9497 }
9498 return false
9499 }
9500 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9501 v_1 := v.Args[1]
9502 v_0 := v.Args[0]
9503
9504
9505
9506 for {
9507 off := auxIntToInt32(v.AuxInt)
9508 sym := auxToSym(v.Aux)
9509 ptr := v_0
9510 if v_1.Op != OpAMD64MOVBstore {
9511 break
9512 }
9513 off2 := auxIntToInt32(v_1.AuxInt)
9514 sym2 := auxToSym(v_1.Aux)
9515 x := v_1.Args[1]
9516 ptr2 := v_1.Args[0]
9517 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9518 break
9519 }
9520 v.reset(OpAMD64MOVBQSX)
9521 v.AddArg(x)
9522 return true
9523 }
9524
9525
9526
9527 for {
9528 off1 := auxIntToInt32(v.AuxInt)
9529 sym1 := auxToSym(v.Aux)
9530 if v_0.Op != OpAMD64LEAQ {
9531 break
9532 }
9533 off2 := auxIntToInt32(v_0.AuxInt)
9534 sym2 := auxToSym(v_0.Aux)
9535 base := v_0.Args[0]
9536 mem := v_1
9537 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9538 break
9539 }
9540 v.reset(OpAMD64MOVBQSXload)
9541 v.AuxInt = int32ToAuxInt(off1 + off2)
9542 v.Aux = symToAux(mergeSym(sym1, sym2))
9543 v.AddArg2(base, mem)
9544 return true
9545 }
9546 return false
9547 }
9548 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9549 v_0 := v.Args[0]
9550 b := v.Block
9551
9552
9553
9554 for {
9555 x := v_0
9556 if x.Op != OpAMD64MOVBload {
9557 break
9558 }
9559 off := auxIntToInt32(x.AuxInt)
9560 sym := auxToSym(x.Aux)
9561 mem := x.Args[1]
9562 ptr := x.Args[0]
9563 if !(x.Uses == 1 && clobber(x)) {
9564 break
9565 }
9566 b = x.Block
9567 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9568 v.copyOf(v0)
9569 v0.AuxInt = int32ToAuxInt(off)
9570 v0.Aux = symToAux(sym)
9571 v0.AddArg2(ptr, mem)
9572 return true
9573 }
9574
9575
9576
9577 for {
9578 x := v_0
9579 if x.Op != OpAMD64MOVWload {
9580 break
9581 }
9582 off := auxIntToInt32(x.AuxInt)
9583 sym := auxToSym(x.Aux)
9584 mem := x.Args[1]
9585 ptr := x.Args[0]
9586 if !(x.Uses == 1 && clobber(x)) {
9587 break
9588 }
9589 b = x.Block
9590 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9591 v.copyOf(v0)
9592 v0.AuxInt = int32ToAuxInt(off)
9593 v0.Aux = symToAux(sym)
9594 v0.AddArg2(ptr, mem)
9595 return true
9596 }
9597
9598
9599
9600 for {
9601 x := v_0
9602 if x.Op != OpAMD64MOVLload {
9603 break
9604 }
9605 off := auxIntToInt32(x.AuxInt)
9606 sym := auxToSym(x.Aux)
9607 mem := x.Args[1]
9608 ptr := x.Args[0]
9609 if !(x.Uses == 1 && clobber(x)) {
9610 break
9611 }
9612 b = x.Block
9613 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9614 v.copyOf(v0)
9615 v0.AuxInt = int32ToAuxInt(off)
9616 v0.Aux = symToAux(sym)
9617 v0.AddArg2(ptr, mem)
9618 return true
9619 }
9620
9621
9622
9623 for {
9624 x := v_0
9625 if x.Op != OpAMD64MOVQload {
9626 break
9627 }
9628 off := auxIntToInt32(x.AuxInt)
9629 sym := auxToSym(x.Aux)
9630 mem := x.Args[1]
9631 ptr := x.Args[0]
9632 if !(x.Uses == 1 && clobber(x)) {
9633 break
9634 }
9635 b = x.Block
9636 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9637 v.copyOf(v0)
9638 v0.AuxInt = int32ToAuxInt(off)
9639 v0.Aux = symToAux(sym)
9640 v0.AddArg2(ptr, mem)
9641 return true
9642 }
9643
9644
9645 for {
9646 if v_0.Op != OpAMD64ANDLconst {
9647 break
9648 }
9649 c := auxIntToInt32(v_0.AuxInt)
9650 x := v_0.Args[0]
9651 v.reset(OpAMD64ANDLconst)
9652 v.AuxInt = int32ToAuxInt(c & 0xff)
9653 v.AddArg(x)
9654 return true
9655 }
9656
9657
9658 for {
9659 if v_0.Op != OpAMD64MOVBQZX {
9660 break
9661 }
9662 x := v_0.Args[0]
9663 v.reset(OpAMD64MOVBQZX)
9664 v.AddArg(x)
9665 return true
9666 }
9667 return false
9668 }
9669 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9670 v_1 := v.Args[1]
9671 v_0 := v.Args[0]
9672
9673
9674
9675 for {
9676 off1 := auxIntToInt32(v.AuxInt)
9677 sym := auxToSym(v.Aux)
9678 if v_0.Op != OpAMD64ADDQconst {
9679 break
9680 }
9681 off2 := auxIntToInt32(v_0.AuxInt)
9682 ptr := v_0.Args[0]
9683 mem := v_1
9684 if !(is32Bit(int64(off1) + int64(off2))) {
9685 break
9686 }
9687 v.reset(OpAMD64MOVBatomicload)
9688 v.AuxInt = int32ToAuxInt(off1 + off2)
9689 v.Aux = symToAux(sym)
9690 v.AddArg2(ptr, mem)
9691 return true
9692 }
9693
9694
9695
9696 for {
9697 off1 := auxIntToInt32(v.AuxInt)
9698 sym1 := auxToSym(v.Aux)
9699 if v_0.Op != OpAMD64LEAQ {
9700 break
9701 }
9702 off2 := auxIntToInt32(v_0.AuxInt)
9703 sym2 := auxToSym(v_0.Aux)
9704 ptr := v_0.Args[0]
9705 mem := v_1
9706 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9707 break
9708 }
9709 v.reset(OpAMD64MOVBatomicload)
9710 v.AuxInt = int32ToAuxInt(off1 + off2)
9711 v.Aux = symToAux(mergeSym(sym1, sym2))
9712 v.AddArg2(ptr, mem)
9713 return true
9714 }
9715 return false
9716 }
9717 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9718 v_1 := v.Args[1]
9719 v_0 := v.Args[0]
9720
9721
9722
9723 for {
9724 off := auxIntToInt32(v.AuxInt)
9725 sym := auxToSym(v.Aux)
9726 ptr := v_0
9727 if v_1.Op != OpAMD64MOVBstore {
9728 break
9729 }
9730 off2 := auxIntToInt32(v_1.AuxInt)
9731 sym2 := auxToSym(v_1.Aux)
9732 x := v_1.Args[1]
9733 ptr2 := v_1.Args[0]
9734 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9735 break
9736 }
9737 v.reset(OpAMD64MOVBQZX)
9738 v.AddArg(x)
9739 return true
9740 }
9741
9742
9743
9744 for {
9745 off1 := auxIntToInt32(v.AuxInt)
9746 sym := auxToSym(v.Aux)
9747 if v_0.Op != OpAMD64ADDQconst {
9748 break
9749 }
9750 off2 := auxIntToInt32(v_0.AuxInt)
9751 ptr := v_0.Args[0]
9752 mem := v_1
9753 if !(is32Bit(int64(off1) + int64(off2))) {
9754 break
9755 }
9756 v.reset(OpAMD64MOVBload)
9757 v.AuxInt = int32ToAuxInt(off1 + off2)
9758 v.Aux = symToAux(sym)
9759 v.AddArg2(ptr, mem)
9760 return true
9761 }
9762
9763
9764
9765 for {
9766 off1 := auxIntToInt32(v.AuxInt)
9767 sym1 := auxToSym(v.Aux)
9768 if v_0.Op != OpAMD64LEAQ {
9769 break
9770 }
9771 off2 := auxIntToInt32(v_0.AuxInt)
9772 sym2 := auxToSym(v_0.Aux)
9773 base := v_0.Args[0]
9774 mem := v_1
9775 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9776 break
9777 }
9778 v.reset(OpAMD64MOVBload)
9779 v.AuxInt = int32ToAuxInt(off1 + off2)
9780 v.Aux = symToAux(mergeSym(sym1, sym2))
9781 v.AddArg2(base, mem)
9782 return true
9783 }
9784
9785
9786
9787 for {
9788 off := auxIntToInt32(v.AuxInt)
9789 sym := auxToSym(v.Aux)
9790 if v_0.Op != OpSB || !(symIsRO(sym)) {
9791 break
9792 }
9793 v.reset(OpAMD64MOVLconst)
9794 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9795 return true
9796 }
9797 return false
9798 }
9799 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9800 v_2 := v.Args[2]
9801 v_1 := v.Args[1]
9802 v_0 := v.Args[0]
9803
9804
9805
9806 for {
9807 off := auxIntToInt32(v.AuxInt)
9808 sym := auxToSym(v.Aux)
9809 ptr := v_0
9810 y := v_1
9811 if y.Op != OpAMD64SETL {
9812 break
9813 }
9814 x := y.Args[0]
9815 mem := v_2
9816 if !(y.Uses == 1) {
9817 break
9818 }
9819 v.reset(OpAMD64SETLstore)
9820 v.AuxInt = int32ToAuxInt(off)
9821 v.Aux = symToAux(sym)
9822 v.AddArg3(ptr, x, mem)
9823 return true
9824 }
9825
9826
9827
9828 for {
9829 off := auxIntToInt32(v.AuxInt)
9830 sym := auxToSym(v.Aux)
9831 ptr := v_0
9832 y := v_1
9833 if y.Op != OpAMD64SETLE {
9834 break
9835 }
9836 x := y.Args[0]
9837 mem := v_2
9838 if !(y.Uses == 1) {
9839 break
9840 }
9841 v.reset(OpAMD64SETLEstore)
9842 v.AuxInt = int32ToAuxInt(off)
9843 v.Aux = symToAux(sym)
9844 v.AddArg3(ptr, x, mem)
9845 return true
9846 }
9847
9848
9849
9850 for {
9851 off := auxIntToInt32(v.AuxInt)
9852 sym := auxToSym(v.Aux)
9853 ptr := v_0
9854 y := v_1
9855 if y.Op != OpAMD64SETG {
9856 break
9857 }
9858 x := y.Args[0]
9859 mem := v_2
9860 if !(y.Uses == 1) {
9861 break
9862 }
9863 v.reset(OpAMD64SETGstore)
9864 v.AuxInt = int32ToAuxInt(off)
9865 v.Aux = symToAux(sym)
9866 v.AddArg3(ptr, x, mem)
9867 return true
9868 }
9869
9870
9871
9872 for {
9873 off := auxIntToInt32(v.AuxInt)
9874 sym := auxToSym(v.Aux)
9875 ptr := v_0
9876 y := v_1
9877 if y.Op != OpAMD64SETGE {
9878 break
9879 }
9880 x := y.Args[0]
9881 mem := v_2
9882 if !(y.Uses == 1) {
9883 break
9884 }
9885 v.reset(OpAMD64SETGEstore)
9886 v.AuxInt = int32ToAuxInt(off)
9887 v.Aux = symToAux(sym)
9888 v.AddArg3(ptr, x, mem)
9889 return true
9890 }
9891
9892
9893
9894 for {
9895 off := auxIntToInt32(v.AuxInt)
9896 sym := auxToSym(v.Aux)
9897 ptr := v_0
9898 y := v_1
9899 if y.Op != OpAMD64SETEQ {
9900 break
9901 }
9902 x := y.Args[0]
9903 mem := v_2
9904 if !(y.Uses == 1) {
9905 break
9906 }
9907 v.reset(OpAMD64SETEQstore)
9908 v.AuxInt = int32ToAuxInt(off)
9909 v.Aux = symToAux(sym)
9910 v.AddArg3(ptr, x, mem)
9911 return true
9912 }
9913
9914
9915
9916 for {
9917 off := auxIntToInt32(v.AuxInt)
9918 sym := auxToSym(v.Aux)
9919 ptr := v_0
9920 y := v_1
9921 if y.Op != OpAMD64SETNE {
9922 break
9923 }
9924 x := y.Args[0]
9925 mem := v_2
9926 if !(y.Uses == 1) {
9927 break
9928 }
9929 v.reset(OpAMD64SETNEstore)
9930 v.AuxInt = int32ToAuxInt(off)
9931 v.Aux = symToAux(sym)
9932 v.AddArg3(ptr, x, mem)
9933 return true
9934 }
9935
9936
9937
9938 for {
9939 off := auxIntToInt32(v.AuxInt)
9940 sym := auxToSym(v.Aux)
9941 ptr := v_0
9942 y := v_1
9943 if y.Op != OpAMD64SETB {
9944 break
9945 }
9946 x := y.Args[0]
9947 mem := v_2
9948 if !(y.Uses == 1) {
9949 break
9950 }
9951 v.reset(OpAMD64SETBstore)
9952 v.AuxInt = int32ToAuxInt(off)
9953 v.Aux = symToAux(sym)
9954 v.AddArg3(ptr, x, mem)
9955 return true
9956 }
9957
9958
9959
9960 for {
9961 off := auxIntToInt32(v.AuxInt)
9962 sym := auxToSym(v.Aux)
9963 ptr := v_0
9964 y := v_1
9965 if y.Op != OpAMD64SETBE {
9966 break
9967 }
9968 x := y.Args[0]
9969 mem := v_2
9970 if !(y.Uses == 1) {
9971 break
9972 }
9973 v.reset(OpAMD64SETBEstore)
9974 v.AuxInt = int32ToAuxInt(off)
9975 v.Aux = symToAux(sym)
9976 v.AddArg3(ptr, x, mem)
9977 return true
9978 }
9979
9980
9981
9982 for {
9983 off := auxIntToInt32(v.AuxInt)
9984 sym := auxToSym(v.Aux)
9985 ptr := v_0
9986 y := v_1
9987 if y.Op != OpAMD64SETA {
9988 break
9989 }
9990 x := y.Args[0]
9991 mem := v_2
9992 if !(y.Uses == 1) {
9993 break
9994 }
9995 v.reset(OpAMD64SETAstore)
9996 v.AuxInt = int32ToAuxInt(off)
9997 v.Aux = symToAux(sym)
9998 v.AddArg3(ptr, x, mem)
9999 return true
10000 }
10001
10002
10003
10004 for {
10005 off := auxIntToInt32(v.AuxInt)
10006 sym := auxToSym(v.Aux)
10007 ptr := v_0
10008 y := v_1
10009 if y.Op != OpAMD64SETAE {
10010 break
10011 }
10012 x := y.Args[0]
10013 mem := v_2
10014 if !(y.Uses == 1) {
10015 break
10016 }
10017 v.reset(OpAMD64SETAEstore)
10018 v.AuxInt = int32ToAuxInt(off)
10019 v.Aux = symToAux(sym)
10020 v.AddArg3(ptr, x, mem)
10021 return true
10022 }
10023
10024
10025 for {
10026 off := auxIntToInt32(v.AuxInt)
10027 sym := auxToSym(v.Aux)
10028 ptr := v_0
10029 if v_1.Op != OpAMD64MOVBQSX {
10030 break
10031 }
10032 x := v_1.Args[0]
10033 mem := v_2
10034 v.reset(OpAMD64MOVBstore)
10035 v.AuxInt = int32ToAuxInt(off)
10036 v.Aux = symToAux(sym)
10037 v.AddArg3(ptr, x, mem)
10038 return true
10039 }
10040
10041
10042 for {
10043 off := auxIntToInt32(v.AuxInt)
10044 sym := auxToSym(v.Aux)
10045 ptr := v_0
10046 if v_1.Op != OpAMD64MOVBQZX {
10047 break
10048 }
10049 x := v_1.Args[0]
10050 mem := v_2
10051 v.reset(OpAMD64MOVBstore)
10052 v.AuxInt = int32ToAuxInt(off)
10053 v.Aux = symToAux(sym)
10054 v.AddArg3(ptr, x, mem)
10055 return true
10056 }
10057
10058
10059
10060 for {
10061 off1 := auxIntToInt32(v.AuxInt)
10062 sym := auxToSym(v.Aux)
10063 if v_0.Op != OpAMD64ADDQconst {
10064 break
10065 }
10066 off2 := auxIntToInt32(v_0.AuxInt)
10067 ptr := v_0.Args[0]
10068 val := v_1
10069 mem := v_2
10070 if !(is32Bit(int64(off1) + int64(off2))) {
10071 break
10072 }
10073 v.reset(OpAMD64MOVBstore)
10074 v.AuxInt = int32ToAuxInt(off1 + off2)
10075 v.Aux = symToAux(sym)
10076 v.AddArg3(ptr, val, mem)
10077 return true
10078 }
10079
10080
10081 for {
10082 off := auxIntToInt32(v.AuxInt)
10083 sym := auxToSym(v.Aux)
10084 ptr := v_0
10085 if v_1.Op != OpAMD64MOVLconst {
10086 break
10087 }
10088 c := auxIntToInt32(v_1.AuxInt)
10089 mem := v_2
10090 v.reset(OpAMD64MOVBstoreconst)
10091 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10092 v.Aux = symToAux(sym)
10093 v.AddArg2(ptr, mem)
10094 return true
10095 }
10096
10097
10098 for {
10099 off := auxIntToInt32(v.AuxInt)
10100 sym := auxToSym(v.Aux)
10101 ptr := v_0
10102 if v_1.Op != OpAMD64MOVQconst {
10103 break
10104 }
10105 c := auxIntToInt64(v_1.AuxInt)
10106 mem := v_2
10107 v.reset(OpAMD64MOVBstoreconst)
10108 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10109 v.Aux = symToAux(sym)
10110 v.AddArg2(ptr, mem)
10111 return true
10112 }
10113
10114
10115
10116 for {
10117 off1 := auxIntToInt32(v.AuxInt)
10118 sym1 := auxToSym(v.Aux)
10119 if v_0.Op != OpAMD64LEAQ {
10120 break
10121 }
10122 off2 := auxIntToInt32(v_0.AuxInt)
10123 sym2 := auxToSym(v_0.Aux)
10124 base := v_0.Args[0]
10125 val := v_1
10126 mem := v_2
10127 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10128 break
10129 }
10130 v.reset(OpAMD64MOVBstore)
10131 v.AuxInt = int32ToAuxInt(off1 + off2)
10132 v.Aux = symToAux(mergeSym(sym1, sym2))
10133 v.AddArg3(base, val, mem)
10134 return true
10135 }
10136 return false
10137 }
10138 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10139 v_1 := v.Args[1]
10140 v_0 := v.Args[0]
10141
10142
10143
10144 for {
10145 sc := auxIntToValAndOff(v.AuxInt)
10146 s := auxToSym(v.Aux)
10147 if v_0.Op != OpAMD64ADDQconst {
10148 break
10149 }
10150 off := auxIntToInt32(v_0.AuxInt)
10151 ptr := v_0.Args[0]
10152 mem := v_1
10153 if !(ValAndOff(sc).canAdd32(off)) {
10154 break
10155 }
10156 v.reset(OpAMD64MOVBstoreconst)
10157 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10158 v.Aux = symToAux(s)
10159 v.AddArg2(ptr, mem)
10160 return true
10161 }
10162
10163
10164
10165 for {
10166 sc := auxIntToValAndOff(v.AuxInt)
10167 sym1 := auxToSym(v.Aux)
10168 if v_0.Op != OpAMD64LEAQ {
10169 break
10170 }
10171 off := auxIntToInt32(v_0.AuxInt)
10172 sym2 := auxToSym(v_0.Aux)
10173 ptr := v_0.Args[0]
10174 mem := v_1
10175 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10176 break
10177 }
10178 v.reset(OpAMD64MOVBstoreconst)
10179 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10180 v.Aux = symToAux(mergeSym(sym1, sym2))
10181 v.AddArg2(ptr, mem)
10182 return true
10183 }
10184 return false
10185 }
10186 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10187 v_0 := v.Args[0]
10188 b := v.Block
10189
10190
10191
10192 for {
10193 x := v_0
10194 if x.Op != OpAMD64MOVLload {
10195 break
10196 }
10197 off := auxIntToInt32(x.AuxInt)
10198 sym := auxToSym(x.Aux)
10199 mem := x.Args[1]
10200 ptr := x.Args[0]
10201 if !(x.Uses == 1 && clobber(x)) {
10202 break
10203 }
10204 b = x.Block
10205 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10206 v.copyOf(v0)
10207 v0.AuxInt = int32ToAuxInt(off)
10208 v0.Aux = symToAux(sym)
10209 v0.AddArg2(ptr, mem)
10210 return true
10211 }
10212
10213
10214
10215 for {
10216 x := v_0
10217 if x.Op != OpAMD64MOVQload {
10218 break
10219 }
10220 off := auxIntToInt32(x.AuxInt)
10221 sym := auxToSym(x.Aux)
10222 mem := x.Args[1]
10223 ptr := x.Args[0]
10224 if !(x.Uses == 1 && clobber(x)) {
10225 break
10226 }
10227 b = x.Block
10228 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10229 v.copyOf(v0)
10230 v0.AuxInt = int32ToAuxInt(off)
10231 v0.Aux = symToAux(sym)
10232 v0.AddArg2(ptr, mem)
10233 return true
10234 }
10235
10236
10237
10238 for {
10239 if v_0.Op != OpAMD64ANDLconst {
10240 break
10241 }
10242 c := auxIntToInt32(v_0.AuxInt)
10243 x := v_0.Args[0]
10244 if !(uint32(c)&0x80000000 == 0) {
10245 break
10246 }
10247 v.reset(OpAMD64ANDLconst)
10248 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10249 v.AddArg(x)
10250 return true
10251 }
10252
10253
10254 for {
10255 if v_0.Op != OpAMD64MOVLQSX {
10256 break
10257 }
10258 x := v_0.Args[0]
10259 v.reset(OpAMD64MOVLQSX)
10260 v.AddArg(x)
10261 return true
10262 }
10263
10264
10265 for {
10266 if v_0.Op != OpAMD64MOVWQSX {
10267 break
10268 }
10269 x := v_0.Args[0]
10270 v.reset(OpAMD64MOVWQSX)
10271 v.AddArg(x)
10272 return true
10273 }
10274
10275
10276 for {
10277 if v_0.Op != OpAMD64MOVBQSX {
10278 break
10279 }
10280 x := v_0.Args[0]
10281 v.reset(OpAMD64MOVBQSX)
10282 v.AddArg(x)
10283 return true
10284 }
10285 return false
10286 }
10287 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10288 v_1 := v.Args[1]
10289 v_0 := v.Args[0]
10290
10291
10292
10293 for {
10294 off := auxIntToInt32(v.AuxInt)
10295 sym := auxToSym(v.Aux)
10296 ptr := v_0
10297 if v_1.Op != OpAMD64MOVLstore {
10298 break
10299 }
10300 off2 := auxIntToInt32(v_1.AuxInt)
10301 sym2 := auxToSym(v_1.Aux)
10302 x := v_1.Args[1]
10303 ptr2 := v_1.Args[0]
10304 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10305 break
10306 }
10307 v.reset(OpAMD64MOVLQSX)
10308 v.AddArg(x)
10309 return true
10310 }
10311
10312
10313
10314 for {
10315 off1 := auxIntToInt32(v.AuxInt)
10316 sym1 := auxToSym(v.Aux)
10317 if v_0.Op != OpAMD64LEAQ {
10318 break
10319 }
10320 off2 := auxIntToInt32(v_0.AuxInt)
10321 sym2 := auxToSym(v_0.Aux)
10322 base := v_0.Args[0]
10323 mem := v_1
10324 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10325 break
10326 }
10327 v.reset(OpAMD64MOVLQSXload)
10328 v.AuxInt = int32ToAuxInt(off1 + off2)
10329 v.Aux = symToAux(mergeSym(sym1, sym2))
10330 v.AddArg2(base, mem)
10331 return true
10332 }
10333 return false
10334 }
10335 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10336 v_0 := v.Args[0]
10337 b := v.Block
10338
10339
10340
10341 for {
10342 x := v_0
10343 if x.Op != OpAMD64MOVLload {
10344 break
10345 }
10346 off := auxIntToInt32(x.AuxInt)
10347 sym := auxToSym(x.Aux)
10348 mem := x.Args[1]
10349 ptr := x.Args[0]
10350 if !(x.Uses == 1 && clobber(x)) {
10351 break
10352 }
10353 b = x.Block
10354 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10355 v.copyOf(v0)
10356 v0.AuxInt = int32ToAuxInt(off)
10357 v0.Aux = symToAux(sym)
10358 v0.AddArg2(ptr, mem)
10359 return true
10360 }
10361
10362
10363
10364 for {
10365 x := v_0
10366 if x.Op != OpAMD64MOVQload {
10367 break
10368 }
10369 off := auxIntToInt32(x.AuxInt)
10370 sym := auxToSym(x.Aux)
10371 mem := x.Args[1]
10372 ptr := x.Args[0]
10373 if !(x.Uses == 1 && clobber(x)) {
10374 break
10375 }
10376 b = x.Block
10377 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10378 v.copyOf(v0)
10379 v0.AuxInt = int32ToAuxInt(off)
10380 v0.Aux = symToAux(sym)
10381 v0.AddArg2(ptr, mem)
10382 return true
10383 }
10384
10385
10386 for {
10387 if v_0.Op != OpAMD64ANDLconst {
10388 break
10389 }
10390 c := auxIntToInt32(v_0.AuxInt)
10391 x := v_0.Args[0]
10392 v.reset(OpAMD64ANDLconst)
10393 v.AuxInt = int32ToAuxInt(c)
10394 v.AddArg(x)
10395 return true
10396 }
10397
10398
10399 for {
10400 if v_0.Op != OpAMD64MOVLQZX {
10401 break
10402 }
10403 x := v_0.Args[0]
10404 v.reset(OpAMD64MOVLQZX)
10405 v.AddArg(x)
10406 return true
10407 }
10408
10409
10410 for {
10411 if v_0.Op != OpAMD64MOVWQZX {
10412 break
10413 }
10414 x := v_0.Args[0]
10415 v.reset(OpAMD64MOVWQZX)
10416 v.AddArg(x)
10417 return true
10418 }
10419
10420
10421 for {
10422 if v_0.Op != OpAMD64MOVBQZX {
10423 break
10424 }
10425 x := v_0.Args[0]
10426 v.reset(OpAMD64MOVBQZX)
10427 v.AddArg(x)
10428 return true
10429 }
10430 return false
10431 }
10432 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10433 v_1 := v.Args[1]
10434 v_0 := v.Args[0]
10435
10436
10437
10438 for {
10439 off1 := auxIntToInt32(v.AuxInt)
10440 sym := auxToSym(v.Aux)
10441 if v_0.Op != OpAMD64ADDQconst {
10442 break
10443 }
10444 off2 := auxIntToInt32(v_0.AuxInt)
10445 ptr := v_0.Args[0]
10446 mem := v_1
10447 if !(is32Bit(int64(off1) + int64(off2))) {
10448 break
10449 }
10450 v.reset(OpAMD64MOVLatomicload)
10451 v.AuxInt = int32ToAuxInt(off1 + off2)
10452 v.Aux = symToAux(sym)
10453 v.AddArg2(ptr, mem)
10454 return true
10455 }
10456
10457
10458
10459 for {
10460 off1 := auxIntToInt32(v.AuxInt)
10461 sym1 := auxToSym(v.Aux)
10462 if v_0.Op != OpAMD64LEAQ {
10463 break
10464 }
10465 off2 := auxIntToInt32(v_0.AuxInt)
10466 sym2 := auxToSym(v_0.Aux)
10467 ptr := v_0.Args[0]
10468 mem := v_1
10469 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10470 break
10471 }
10472 v.reset(OpAMD64MOVLatomicload)
10473 v.AuxInt = int32ToAuxInt(off1 + off2)
10474 v.Aux = symToAux(mergeSym(sym1, sym2))
10475 v.AddArg2(ptr, mem)
10476 return true
10477 }
10478 return false
10479 }
10480 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10481 v_0 := v.Args[0]
10482 b := v.Block
10483
10484
10485
10486 for {
10487 t := v.Type
10488 if v_0.Op != OpArg {
10489 break
10490 }
10491 u := v_0.Type
10492 off := auxIntToInt32(v_0.AuxInt)
10493 sym := auxToSym(v_0.Aux)
10494 if !(t.Size() == u.Size()) {
10495 break
10496 }
10497 b = b.Func.Entry
10498 v0 := b.NewValue0(v.Pos, OpArg, t)
10499 v.copyOf(v0)
10500 v0.AuxInt = int32ToAuxInt(off)
10501 v0.Aux = symToAux(sym)
10502 return true
10503 }
10504 return false
10505 }
10506 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10507 v_0 := v.Args[0]
10508 b := v.Block
10509
10510
10511
10512 for {
10513 t := v.Type
10514 if v_0.Op != OpArg {
10515 break
10516 }
10517 u := v_0.Type
10518 off := auxIntToInt32(v_0.AuxInt)
10519 sym := auxToSym(v_0.Aux)
10520 if !(t.Size() == u.Size()) {
10521 break
10522 }
10523 b = b.Func.Entry
10524 v0 := b.NewValue0(v.Pos, OpArg, t)
10525 v.copyOf(v0)
10526 v0.AuxInt = int32ToAuxInt(off)
10527 v0.Aux = symToAux(sym)
10528 return true
10529 }
10530 return false
10531 }
10532 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10533 v_1 := v.Args[1]
10534 v_0 := v.Args[0]
10535 b := v.Block
10536 config := b.Func.Config
10537
10538
10539
10540 for {
10541 off := auxIntToInt32(v.AuxInt)
10542 sym := auxToSym(v.Aux)
10543 ptr := v_0
10544 if v_1.Op != OpAMD64MOVLstore {
10545 break
10546 }
10547 off2 := auxIntToInt32(v_1.AuxInt)
10548 sym2 := auxToSym(v_1.Aux)
10549 x := v_1.Args[1]
10550 ptr2 := v_1.Args[0]
10551 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10552 break
10553 }
10554 v.reset(OpAMD64MOVLQZX)
10555 v.AddArg(x)
10556 return true
10557 }
10558
10559
10560
10561 for {
10562 off1 := auxIntToInt32(v.AuxInt)
10563 sym := auxToSym(v.Aux)
10564 if v_0.Op != OpAMD64ADDQconst {
10565 break
10566 }
10567 off2 := auxIntToInt32(v_0.AuxInt)
10568 ptr := v_0.Args[0]
10569 mem := v_1
10570 if !(is32Bit(int64(off1) + int64(off2))) {
10571 break
10572 }
10573 v.reset(OpAMD64MOVLload)
10574 v.AuxInt = int32ToAuxInt(off1 + off2)
10575 v.Aux = symToAux(sym)
10576 v.AddArg2(ptr, mem)
10577 return true
10578 }
10579
10580
10581
10582 for {
10583 off1 := auxIntToInt32(v.AuxInt)
10584 sym1 := auxToSym(v.Aux)
10585 if v_0.Op != OpAMD64LEAQ {
10586 break
10587 }
10588 off2 := auxIntToInt32(v_0.AuxInt)
10589 sym2 := auxToSym(v_0.Aux)
10590 base := v_0.Args[0]
10591 mem := v_1
10592 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10593 break
10594 }
10595 v.reset(OpAMD64MOVLload)
10596 v.AuxInt = int32ToAuxInt(off1 + off2)
10597 v.Aux = symToAux(mergeSym(sym1, sym2))
10598 v.AddArg2(base, mem)
10599 return true
10600 }
10601
10602
10603 for {
10604 off := auxIntToInt32(v.AuxInt)
10605 sym := auxToSym(v.Aux)
10606 ptr := v_0
10607 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
10608 break
10609 }
10610 val := v_1.Args[1]
10611 if ptr != v_1.Args[0] {
10612 break
10613 }
10614 v.reset(OpAMD64MOVLf2i)
10615 v.AddArg(val)
10616 return true
10617 }
10618
10619
10620
10621 for {
10622 off := auxIntToInt32(v.AuxInt)
10623 sym := auxToSym(v.Aux)
10624 if v_0.Op != OpSB || !(symIsRO(sym)) {
10625 break
10626 }
10627 v.reset(OpAMD64MOVQconst)
10628 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
10629 return true
10630 }
10631 return false
10632 }
10633 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
10634 v_2 := v.Args[2]
10635 v_1 := v.Args[1]
10636 v_0 := v.Args[0]
10637
10638
10639 for {
10640 off := auxIntToInt32(v.AuxInt)
10641 sym := auxToSym(v.Aux)
10642 ptr := v_0
10643 if v_1.Op != OpAMD64MOVLQSX {
10644 break
10645 }
10646 x := v_1.Args[0]
10647 mem := v_2
10648 v.reset(OpAMD64MOVLstore)
10649 v.AuxInt = int32ToAuxInt(off)
10650 v.Aux = symToAux(sym)
10651 v.AddArg3(ptr, x, mem)
10652 return true
10653 }
10654
10655
10656 for {
10657 off := auxIntToInt32(v.AuxInt)
10658 sym := auxToSym(v.Aux)
10659 ptr := v_0
10660 if v_1.Op != OpAMD64MOVLQZX {
10661 break
10662 }
10663 x := v_1.Args[0]
10664 mem := v_2
10665 v.reset(OpAMD64MOVLstore)
10666 v.AuxInt = int32ToAuxInt(off)
10667 v.Aux = symToAux(sym)
10668 v.AddArg3(ptr, x, mem)
10669 return true
10670 }
10671
10672
10673
10674 for {
10675 off1 := auxIntToInt32(v.AuxInt)
10676 sym := auxToSym(v.Aux)
10677 if v_0.Op != OpAMD64ADDQconst {
10678 break
10679 }
10680 off2 := auxIntToInt32(v_0.AuxInt)
10681 ptr := v_0.Args[0]
10682 val := v_1
10683 mem := v_2
10684 if !(is32Bit(int64(off1) + int64(off2))) {
10685 break
10686 }
10687 v.reset(OpAMD64MOVLstore)
10688 v.AuxInt = int32ToAuxInt(off1 + off2)
10689 v.Aux = symToAux(sym)
10690 v.AddArg3(ptr, val, mem)
10691 return true
10692 }
10693
10694
10695 for {
10696 off := auxIntToInt32(v.AuxInt)
10697 sym := auxToSym(v.Aux)
10698 ptr := v_0
10699 if v_1.Op != OpAMD64MOVLconst {
10700 break
10701 }
10702 c := auxIntToInt32(v_1.AuxInt)
10703 mem := v_2
10704 v.reset(OpAMD64MOVLstoreconst)
10705 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10706 v.Aux = symToAux(sym)
10707 v.AddArg2(ptr, mem)
10708 return true
10709 }
10710
10711
10712 for {
10713 off := auxIntToInt32(v.AuxInt)
10714 sym := auxToSym(v.Aux)
10715 ptr := v_0
10716 if v_1.Op != OpAMD64MOVQconst {
10717 break
10718 }
10719 c := auxIntToInt64(v_1.AuxInt)
10720 mem := v_2
10721 v.reset(OpAMD64MOVLstoreconst)
10722 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
10723 v.Aux = symToAux(sym)
10724 v.AddArg2(ptr, mem)
10725 return true
10726 }
10727
10728
10729
10730 for {
10731 off1 := auxIntToInt32(v.AuxInt)
10732 sym1 := auxToSym(v.Aux)
10733 if v_0.Op != OpAMD64LEAQ {
10734 break
10735 }
10736 off2 := auxIntToInt32(v_0.AuxInt)
10737 sym2 := auxToSym(v_0.Aux)
10738 base := v_0.Args[0]
10739 val := v_1
10740 mem := v_2
10741 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10742 break
10743 }
10744 v.reset(OpAMD64MOVLstore)
10745 v.AuxInt = int32ToAuxInt(off1 + off2)
10746 v.Aux = symToAux(mergeSym(sym1, sym2))
10747 v.AddArg3(base, val, mem)
10748 return true
10749 }
10750
10751
10752
10753 for {
10754 off := auxIntToInt32(v.AuxInt)
10755 sym := auxToSym(v.Aux)
10756 ptr := v_0
10757 y := v_1
10758 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10759 break
10760 }
10761 mem := y.Args[2]
10762 x := y.Args[0]
10763 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10764 break
10765 }
10766 v.reset(OpAMD64ADDLmodify)
10767 v.AuxInt = int32ToAuxInt(off)
10768 v.Aux = symToAux(sym)
10769 v.AddArg3(ptr, x, mem)
10770 return true
10771 }
10772
10773
10774
10775 for {
10776 off := auxIntToInt32(v.AuxInt)
10777 sym := auxToSym(v.Aux)
10778 ptr := v_0
10779 y := v_1
10780 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10781 break
10782 }
10783 mem := y.Args[2]
10784 x := y.Args[0]
10785 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10786 break
10787 }
10788 v.reset(OpAMD64ANDLmodify)
10789 v.AuxInt = int32ToAuxInt(off)
10790 v.Aux = symToAux(sym)
10791 v.AddArg3(ptr, x, mem)
10792 return true
10793 }
10794
10795
10796
10797 for {
10798 off := auxIntToInt32(v.AuxInt)
10799 sym := auxToSym(v.Aux)
10800 ptr := v_0
10801 y := v_1
10802 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10803 break
10804 }
10805 mem := y.Args[2]
10806 x := y.Args[0]
10807 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10808 break
10809 }
10810 v.reset(OpAMD64ORLmodify)
10811 v.AuxInt = int32ToAuxInt(off)
10812 v.Aux = symToAux(sym)
10813 v.AddArg3(ptr, x, mem)
10814 return true
10815 }
10816
10817
10818
10819 for {
10820 off := auxIntToInt32(v.AuxInt)
10821 sym := auxToSym(v.Aux)
10822 ptr := v_0
10823 y := v_1
10824 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
10825 break
10826 }
10827 mem := y.Args[2]
10828 x := y.Args[0]
10829 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
10830 break
10831 }
10832 v.reset(OpAMD64XORLmodify)
10833 v.AuxInt = int32ToAuxInt(off)
10834 v.Aux = symToAux(sym)
10835 v.AddArg3(ptr, x, mem)
10836 return true
10837 }
10838
10839
10840
10841 for {
10842 off := auxIntToInt32(v.AuxInt)
10843 sym := auxToSym(v.Aux)
10844 ptr := v_0
10845 y := v_1
10846 if y.Op != OpAMD64ADDL {
10847 break
10848 }
10849 _ = y.Args[1]
10850 y_0 := y.Args[0]
10851 y_1 := y.Args[1]
10852 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10853 l := y_0
10854 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10855 continue
10856 }
10857 mem := l.Args[1]
10858 if ptr != l.Args[0] {
10859 continue
10860 }
10861 x := y_1
10862 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10863 continue
10864 }
10865 v.reset(OpAMD64ADDLmodify)
10866 v.AuxInt = int32ToAuxInt(off)
10867 v.Aux = symToAux(sym)
10868 v.AddArg3(ptr, x, mem)
10869 return true
10870 }
10871 break
10872 }
10873
10874
10875
10876 for {
10877 off := auxIntToInt32(v.AuxInt)
10878 sym := auxToSym(v.Aux)
10879 ptr := v_0
10880 y := v_1
10881 if y.Op != OpAMD64SUBL {
10882 break
10883 }
10884 x := y.Args[1]
10885 l := y.Args[0]
10886 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10887 break
10888 }
10889 mem := l.Args[1]
10890 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10891 break
10892 }
10893 v.reset(OpAMD64SUBLmodify)
10894 v.AuxInt = int32ToAuxInt(off)
10895 v.Aux = symToAux(sym)
10896 v.AddArg3(ptr, x, mem)
10897 return true
10898 }
10899
10900
10901
10902 for {
10903 off := auxIntToInt32(v.AuxInt)
10904 sym := auxToSym(v.Aux)
10905 ptr := v_0
10906 y := v_1
10907 if y.Op != OpAMD64ANDL {
10908 break
10909 }
10910 _ = y.Args[1]
10911 y_0 := y.Args[0]
10912 y_1 := y.Args[1]
10913 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10914 l := y_0
10915 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10916 continue
10917 }
10918 mem := l.Args[1]
10919 if ptr != l.Args[0] {
10920 continue
10921 }
10922 x := y_1
10923 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10924 continue
10925 }
10926 v.reset(OpAMD64ANDLmodify)
10927 v.AuxInt = int32ToAuxInt(off)
10928 v.Aux = symToAux(sym)
10929 v.AddArg3(ptr, x, mem)
10930 return true
10931 }
10932 break
10933 }
10934
10935
10936
10937 for {
10938 off := auxIntToInt32(v.AuxInt)
10939 sym := auxToSym(v.Aux)
10940 ptr := v_0
10941 y := v_1
10942 if y.Op != OpAMD64ORL {
10943 break
10944 }
10945 _ = y.Args[1]
10946 y_0 := y.Args[0]
10947 y_1 := y.Args[1]
10948 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10949 l := y_0
10950 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10951 continue
10952 }
10953 mem := l.Args[1]
10954 if ptr != l.Args[0] {
10955 continue
10956 }
10957 x := y_1
10958 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10959 continue
10960 }
10961 v.reset(OpAMD64ORLmodify)
10962 v.AuxInt = int32ToAuxInt(off)
10963 v.Aux = symToAux(sym)
10964 v.AddArg3(ptr, x, mem)
10965 return true
10966 }
10967 break
10968 }
10969
10970
10971
10972 for {
10973 off := auxIntToInt32(v.AuxInt)
10974 sym := auxToSym(v.Aux)
10975 ptr := v_0
10976 y := v_1
10977 if y.Op != OpAMD64XORL {
10978 break
10979 }
10980 _ = y.Args[1]
10981 y_0 := y.Args[0]
10982 y_1 := y.Args[1]
10983 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
10984 l := y_0
10985 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
10986 continue
10987 }
10988 mem := l.Args[1]
10989 if ptr != l.Args[0] {
10990 continue
10991 }
10992 x := y_1
10993 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
10994 continue
10995 }
10996 v.reset(OpAMD64XORLmodify)
10997 v.AuxInt = int32ToAuxInt(off)
10998 v.Aux = symToAux(sym)
10999 v.AddArg3(ptr, x, mem)
11000 return true
11001 }
11002 break
11003 }
11004
11005
11006
11007 for {
11008 off := auxIntToInt32(v.AuxInt)
11009 sym := auxToSym(v.Aux)
11010 ptr := v_0
11011 a := v_1
11012 if a.Op != OpAMD64ADDLconst {
11013 break
11014 }
11015 c := auxIntToInt32(a.AuxInt)
11016 l := a.Args[0]
11017 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11018 break
11019 }
11020 mem := l.Args[1]
11021 ptr2 := l.Args[0]
11022 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11023 break
11024 }
11025 v.reset(OpAMD64ADDLconstmodify)
11026 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11027 v.Aux = symToAux(sym)
11028 v.AddArg2(ptr, mem)
11029 return true
11030 }
11031
11032
11033
11034 for {
11035 off := auxIntToInt32(v.AuxInt)
11036 sym := auxToSym(v.Aux)
11037 ptr := v_0
11038 a := v_1
11039 if a.Op != OpAMD64ANDLconst {
11040 break
11041 }
11042 c := auxIntToInt32(a.AuxInt)
11043 l := a.Args[0]
11044 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11045 break
11046 }
11047 mem := l.Args[1]
11048 ptr2 := l.Args[0]
11049 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11050 break
11051 }
11052 v.reset(OpAMD64ANDLconstmodify)
11053 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11054 v.Aux = symToAux(sym)
11055 v.AddArg2(ptr, mem)
11056 return true
11057 }
11058
11059
11060
11061 for {
11062 off := auxIntToInt32(v.AuxInt)
11063 sym := auxToSym(v.Aux)
11064 ptr := v_0
11065 a := v_1
11066 if a.Op != OpAMD64ORLconst {
11067 break
11068 }
11069 c := auxIntToInt32(a.AuxInt)
11070 l := a.Args[0]
11071 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11072 break
11073 }
11074 mem := l.Args[1]
11075 ptr2 := l.Args[0]
11076 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11077 break
11078 }
11079 v.reset(OpAMD64ORLconstmodify)
11080 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11081 v.Aux = symToAux(sym)
11082 v.AddArg2(ptr, mem)
11083 return true
11084 }
11085
11086
11087
11088 for {
11089 off := auxIntToInt32(v.AuxInt)
11090 sym := auxToSym(v.Aux)
11091 ptr := v_0
11092 a := v_1
11093 if a.Op != OpAMD64XORLconst {
11094 break
11095 }
11096 c := auxIntToInt32(a.AuxInt)
11097 l := a.Args[0]
11098 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11099 break
11100 }
11101 mem := l.Args[1]
11102 ptr2 := l.Args[0]
11103 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11104 break
11105 }
11106 v.reset(OpAMD64XORLconstmodify)
11107 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11108 v.Aux = symToAux(sym)
11109 v.AddArg2(ptr, mem)
11110 return true
11111 }
11112
11113
11114 for {
11115 off := auxIntToInt32(v.AuxInt)
11116 sym := auxToSym(v.Aux)
11117 ptr := v_0
11118 if v_1.Op != OpAMD64MOVLf2i {
11119 break
11120 }
11121 val := v_1.Args[0]
11122 mem := v_2
11123 v.reset(OpAMD64MOVSSstore)
11124 v.AuxInt = int32ToAuxInt(off)
11125 v.Aux = symToAux(sym)
11126 v.AddArg3(ptr, val, mem)
11127 return true
11128 }
11129
11130
11131
11132 for {
11133 i := auxIntToInt32(v.AuxInt)
11134 s := auxToSym(v.Aux)
11135 p := v_0
11136 x := v_1
11137 if x.Op != OpAMD64BSWAPL {
11138 break
11139 }
11140 w := x.Args[0]
11141 mem := v_2
11142 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11143 break
11144 }
11145 v.reset(OpAMD64MOVBELstore)
11146 v.AuxInt = int32ToAuxInt(i)
11147 v.Aux = symToAux(s)
11148 v.AddArg3(p, w, mem)
11149 return true
11150 }
11151 return false
11152 }
11153 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11154 v_1 := v.Args[1]
11155 v_0 := v.Args[0]
11156
11157
11158
11159 for {
11160 sc := auxIntToValAndOff(v.AuxInt)
11161 s := auxToSym(v.Aux)
11162 if v_0.Op != OpAMD64ADDQconst {
11163 break
11164 }
11165 off := auxIntToInt32(v_0.AuxInt)
11166 ptr := v_0.Args[0]
11167 mem := v_1
11168 if !(ValAndOff(sc).canAdd32(off)) {
11169 break
11170 }
11171 v.reset(OpAMD64MOVLstoreconst)
11172 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11173 v.Aux = symToAux(s)
11174 v.AddArg2(ptr, mem)
11175 return true
11176 }
11177
11178
11179
11180 for {
11181 sc := auxIntToValAndOff(v.AuxInt)
11182 sym1 := auxToSym(v.Aux)
11183 if v_0.Op != OpAMD64LEAQ {
11184 break
11185 }
11186 off := auxIntToInt32(v_0.AuxInt)
11187 sym2 := auxToSym(v_0.Aux)
11188 ptr := v_0.Args[0]
11189 mem := v_1
11190 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11191 break
11192 }
11193 v.reset(OpAMD64MOVLstoreconst)
11194 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11195 v.Aux = symToAux(mergeSym(sym1, sym2))
11196 v.AddArg2(ptr, mem)
11197 return true
11198 }
11199 return false
11200 }
11201 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11202 v_1 := v.Args[1]
11203 v_0 := v.Args[0]
11204
11205
11206
11207 for {
11208 off1 := auxIntToInt32(v.AuxInt)
11209 sym := auxToSym(v.Aux)
11210 if v_0.Op != OpAMD64ADDQconst {
11211 break
11212 }
11213 off2 := auxIntToInt32(v_0.AuxInt)
11214 ptr := v_0.Args[0]
11215 mem := v_1
11216 if !(is32Bit(int64(off1) + int64(off2))) {
11217 break
11218 }
11219 v.reset(OpAMD64MOVOload)
11220 v.AuxInt = int32ToAuxInt(off1 + off2)
11221 v.Aux = symToAux(sym)
11222 v.AddArg2(ptr, mem)
11223 return true
11224 }
11225
11226
11227
11228 for {
11229 off1 := auxIntToInt32(v.AuxInt)
11230 sym1 := auxToSym(v.Aux)
11231 if v_0.Op != OpAMD64LEAQ {
11232 break
11233 }
11234 off2 := auxIntToInt32(v_0.AuxInt)
11235 sym2 := auxToSym(v_0.Aux)
11236 base := v_0.Args[0]
11237 mem := v_1
11238 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11239 break
11240 }
11241 v.reset(OpAMD64MOVOload)
11242 v.AuxInt = int32ToAuxInt(off1 + off2)
11243 v.Aux = symToAux(mergeSym(sym1, sym2))
11244 v.AddArg2(base, mem)
11245 return true
11246 }
11247 return false
11248 }
11249 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11250 v_2 := v.Args[2]
11251 v_1 := v.Args[1]
11252 v_0 := v.Args[0]
11253 b := v.Block
11254 config := b.Func.Config
11255 typ := &b.Func.Config.Types
11256
11257
11258
11259 for {
11260 off1 := auxIntToInt32(v.AuxInt)
11261 sym := auxToSym(v.Aux)
11262 if v_0.Op != OpAMD64ADDQconst {
11263 break
11264 }
11265 off2 := auxIntToInt32(v_0.AuxInt)
11266 ptr := v_0.Args[0]
11267 val := v_1
11268 mem := v_2
11269 if !(is32Bit(int64(off1) + int64(off2))) {
11270 break
11271 }
11272 v.reset(OpAMD64MOVOstore)
11273 v.AuxInt = int32ToAuxInt(off1 + off2)
11274 v.Aux = symToAux(sym)
11275 v.AddArg3(ptr, val, mem)
11276 return true
11277 }
11278
11279
11280
11281 for {
11282 off1 := auxIntToInt32(v.AuxInt)
11283 sym1 := auxToSym(v.Aux)
11284 if v_0.Op != OpAMD64LEAQ {
11285 break
11286 }
11287 off2 := auxIntToInt32(v_0.AuxInt)
11288 sym2 := auxToSym(v_0.Aux)
11289 base := v_0.Args[0]
11290 val := v_1
11291 mem := v_2
11292 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11293 break
11294 }
11295 v.reset(OpAMD64MOVOstore)
11296 v.AuxInt = int32ToAuxInt(off1 + off2)
11297 v.Aux = symToAux(mergeSym(sym1, sym2))
11298 v.AddArg3(base, val, mem)
11299 return true
11300 }
11301
11302
11303
11304 for {
11305 dstOff := auxIntToInt32(v.AuxInt)
11306 dstSym := auxToSym(v.Aux)
11307 ptr := v_0
11308 if v_1.Op != OpAMD64MOVOload {
11309 break
11310 }
11311 srcOff := auxIntToInt32(v_1.AuxInt)
11312 srcSym := auxToSym(v_1.Aux)
11313 v_1_0 := v_1.Args[0]
11314 if v_1_0.Op != OpSB {
11315 break
11316 }
11317 mem := v_2
11318 if !(symIsRO(srcSym)) {
11319 break
11320 }
11321 v.reset(OpAMD64MOVQstore)
11322 v.AuxInt = int32ToAuxInt(dstOff + 8)
11323 v.Aux = symToAux(dstSym)
11324 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11325 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11326 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11327 v1.AuxInt = int32ToAuxInt(dstOff)
11328 v1.Aux = symToAux(dstSym)
11329 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11330 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11331 v1.AddArg3(ptr, v2, mem)
11332 v.AddArg3(ptr, v0, v1)
11333 return true
11334 }
11335 return false
11336 }
11337 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11338 v_1 := v.Args[1]
11339 v_0 := v.Args[0]
11340
11341
11342
11343 for {
11344 sc := auxIntToValAndOff(v.AuxInt)
11345 s := auxToSym(v.Aux)
11346 if v_0.Op != OpAMD64ADDQconst {
11347 break
11348 }
11349 off := auxIntToInt32(v_0.AuxInt)
11350 ptr := v_0.Args[0]
11351 mem := v_1
11352 if !(ValAndOff(sc).canAdd32(off)) {
11353 break
11354 }
11355 v.reset(OpAMD64MOVOstoreconst)
11356 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11357 v.Aux = symToAux(s)
11358 v.AddArg2(ptr, mem)
11359 return true
11360 }
11361
11362
11363
11364 for {
11365 sc := auxIntToValAndOff(v.AuxInt)
11366 sym1 := auxToSym(v.Aux)
11367 if v_0.Op != OpAMD64LEAQ {
11368 break
11369 }
11370 off := auxIntToInt32(v_0.AuxInt)
11371 sym2 := auxToSym(v_0.Aux)
11372 ptr := v_0.Args[0]
11373 mem := v_1
11374 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11375 break
11376 }
11377 v.reset(OpAMD64MOVOstoreconst)
11378 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11379 v.Aux = symToAux(mergeSym(sym1, sym2))
11380 v.AddArg2(ptr, mem)
11381 return true
11382 }
11383 return false
11384 }
11385 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11386 v_1 := v.Args[1]
11387 v_0 := v.Args[0]
11388
11389
11390
11391 for {
11392 off1 := auxIntToInt32(v.AuxInt)
11393 sym := auxToSym(v.Aux)
11394 if v_0.Op != OpAMD64ADDQconst {
11395 break
11396 }
11397 off2 := auxIntToInt32(v_0.AuxInt)
11398 ptr := v_0.Args[0]
11399 mem := v_1
11400 if !(is32Bit(int64(off1) + int64(off2))) {
11401 break
11402 }
11403 v.reset(OpAMD64MOVQatomicload)
11404 v.AuxInt = int32ToAuxInt(off1 + off2)
11405 v.Aux = symToAux(sym)
11406 v.AddArg2(ptr, mem)
11407 return true
11408 }
11409
11410
11411
11412 for {
11413 off1 := auxIntToInt32(v.AuxInt)
11414 sym1 := auxToSym(v.Aux)
11415 if v_0.Op != OpAMD64LEAQ {
11416 break
11417 }
11418 off2 := auxIntToInt32(v_0.AuxInt)
11419 sym2 := auxToSym(v_0.Aux)
11420 ptr := v_0.Args[0]
11421 mem := v_1
11422 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11423 break
11424 }
11425 v.reset(OpAMD64MOVQatomicload)
11426 v.AuxInt = int32ToAuxInt(off1 + off2)
11427 v.Aux = symToAux(mergeSym(sym1, sym2))
11428 v.AddArg2(ptr, mem)
11429 return true
11430 }
11431 return false
11432 }
11433 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11434 v_0 := v.Args[0]
11435 b := v.Block
11436
11437
11438
11439 for {
11440 t := v.Type
11441 if v_0.Op != OpArg {
11442 break
11443 }
11444 u := v_0.Type
11445 off := auxIntToInt32(v_0.AuxInt)
11446 sym := auxToSym(v_0.Aux)
11447 if !(t.Size() == u.Size()) {
11448 break
11449 }
11450 b = b.Func.Entry
11451 v0 := b.NewValue0(v.Pos, OpArg, t)
11452 v.copyOf(v0)
11453 v0.AuxInt = int32ToAuxInt(off)
11454 v0.Aux = symToAux(sym)
11455 return true
11456 }
11457 return false
11458 }
11459 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11460 v_0 := v.Args[0]
11461 b := v.Block
11462
11463
11464
11465 for {
11466 t := v.Type
11467 if v_0.Op != OpArg {
11468 break
11469 }
11470 u := v_0.Type
11471 off := auxIntToInt32(v_0.AuxInt)
11472 sym := auxToSym(v_0.Aux)
11473 if !(t.Size() == u.Size()) {
11474 break
11475 }
11476 b = b.Func.Entry
11477 v0 := b.NewValue0(v.Pos, OpArg, t)
11478 v.copyOf(v0)
11479 v0.AuxInt = int32ToAuxInt(off)
11480 v0.Aux = symToAux(sym)
11481 return true
11482 }
11483 return false
11484 }
11485 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11486 v_1 := v.Args[1]
11487 v_0 := v.Args[0]
11488 b := v.Block
11489 config := b.Func.Config
11490
11491
11492
11493 for {
11494 off := auxIntToInt32(v.AuxInt)
11495 sym := auxToSym(v.Aux)
11496 ptr := v_0
11497 if v_1.Op != OpAMD64MOVQstore {
11498 break
11499 }
11500 off2 := auxIntToInt32(v_1.AuxInt)
11501 sym2 := auxToSym(v_1.Aux)
11502 x := v_1.Args[1]
11503 ptr2 := v_1.Args[0]
11504 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11505 break
11506 }
11507 v.copyOf(x)
11508 return true
11509 }
11510
11511
11512
11513 for {
11514 off1 := auxIntToInt32(v.AuxInt)
11515 sym := auxToSym(v.Aux)
11516 if v_0.Op != OpAMD64ADDQconst {
11517 break
11518 }
11519 off2 := auxIntToInt32(v_0.AuxInt)
11520 ptr := v_0.Args[0]
11521 mem := v_1
11522 if !(is32Bit(int64(off1) + int64(off2))) {
11523 break
11524 }
11525 v.reset(OpAMD64MOVQload)
11526 v.AuxInt = int32ToAuxInt(off1 + off2)
11527 v.Aux = symToAux(sym)
11528 v.AddArg2(ptr, mem)
11529 return true
11530 }
11531
11532
11533
11534 for {
11535 off1 := auxIntToInt32(v.AuxInt)
11536 sym1 := auxToSym(v.Aux)
11537 if v_0.Op != OpAMD64LEAQ {
11538 break
11539 }
11540 off2 := auxIntToInt32(v_0.AuxInt)
11541 sym2 := auxToSym(v_0.Aux)
11542 base := v_0.Args[0]
11543 mem := v_1
11544 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11545 break
11546 }
11547 v.reset(OpAMD64MOVQload)
11548 v.AuxInt = int32ToAuxInt(off1 + off2)
11549 v.Aux = symToAux(mergeSym(sym1, sym2))
11550 v.AddArg2(base, mem)
11551 return true
11552 }
11553
11554
11555 for {
11556 off := auxIntToInt32(v.AuxInt)
11557 sym := auxToSym(v.Aux)
11558 ptr := v_0
11559 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11560 break
11561 }
11562 val := v_1.Args[1]
11563 if ptr != v_1.Args[0] {
11564 break
11565 }
11566 v.reset(OpAMD64MOVQf2i)
11567 v.AddArg(val)
11568 return true
11569 }
11570
11571
11572
11573 for {
11574 off := auxIntToInt32(v.AuxInt)
11575 sym := auxToSym(v.Aux)
11576 if v_0.Op != OpSB || !(symIsRO(sym)) {
11577 break
11578 }
11579 v.reset(OpAMD64MOVQconst)
11580 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11581 return true
11582 }
11583 return false
11584 }
11585 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
11586 v_2 := v.Args[2]
11587 v_1 := v.Args[1]
11588 v_0 := v.Args[0]
11589
11590
11591
11592 for {
11593 off1 := auxIntToInt32(v.AuxInt)
11594 sym := auxToSym(v.Aux)
11595 if v_0.Op != OpAMD64ADDQconst {
11596 break
11597 }
11598 off2 := auxIntToInt32(v_0.AuxInt)
11599 ptr := v_0.Args[0]
11600 val := v_1
11601 mem := v_2
11602 if !(is32Bit(int64(off1) + int64(off2))) {
11603 break
11604 }
11605 v.reset(OpAMD64MOVQstore)
11606 v.AuxInt = int32ToAuxInt(off1 + off2)
11607 v.Aux = symToAux(sym)
11608 v.AddArg3(ptr, val, mem)
11609 return true
11610 }
11611
11612
11613
11614 for {
11615 off := auxIntToInt32(v.AuxInt)
11616 sym := auxToSym(v.Aux)
11617 ptr := v_0
11618 if v_1.Op != OpAMD64MOVQconst {
11619 break
11620 }
11621 c := auxIntToInt64(v_1.AuxInt)
11622 mem := v_2
11623 if !(validVal(c)) {
11624 break
11625 }
11626 v.reset(OpAMD64MOVQstoreconst)
11627 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11628 v.Aux = symToAux(sym)
11629 v.AddArg2(ptr, mem)
11630 return true
11631 }
11632
11633
11634
11635 for {
11636 off1 := auxIntToInt32(v.AuxInt)
11637 sym1 := auxToSym(v.Aux)
11638 if v_0.Op != OpAMD64LEAQ {
11639 break
11640 }
11641 off2 := auxIntToInt32(v_0.AuxInt)
11642 sym2 := auxToSym(v_0.Aux)
11643 base := v_0.Args[0]
11644 val := v_1
11645 mem := v_2
11646 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11647 break
11648 }
11649 v.reset(OpAMD64MOVQstore)
11650 v.AuxInt = int32ToAuxInt(off1 + off2)
11651 v.Aux = symToAux(mergeSym(sym1, sym2))
11652 v.AddArg3(base, val, mem)
11653 return true
11654 }
11655
11656
11657
11658 for {
11659 off := auxIntToInt32(v.AuxInt)
11660 sym := auxToSym(v.Aux)
11661 ptr := v_0
11662 y := v_1
11663 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11664 break
11665 }
11666 mem := y.Args[2]
11667 x := y.Args[0]
11668 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11669 break
11670 }
11671 v.reset(OpAMD64ADDQmodify)
11672 v.AuxInt = int32ToAuxInt(off)
11673 v.Aux = symToAux(sym)
11674 v.AddArg3(ptr, x, mem)
11675 return true
11676 }
11677
11678
11679
11680 for {
11681 off := auxIntToInt32(v.AuxInt)
11682 sym := auxToSym(v.Aux)
11683 ptr := v_0
11684 y := v_1
11685 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11686 break
11687 }
11688 mem := y.Args[2]
11689 x := y.Args[0]
11690 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11691 break
11692 }
11693 v.reset(OpAMD64ANDQmodify)
11694 v.AuxInt = int32ToAuxInt(off)
11695 v.Aux = symToAux(sym)
11696 v.AddArg3(ptr, x, mem)
11697 return true
11698 }
11699
11700
11701
11702 for {
11703 off := auxIntToInt32(v.AuxInt)
11704 sym := auxToSym(v.Aux)
11705 ptr := v_0
11706 y := v_1
11707 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11708 break
11709 }
11710 mem := y.Args[2]
11711 x := y.Args[0]
11712 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11713 break
11714 }
11715 v.reset(OpAMD64ORQmodify)
11716 v.AuxInt = int32ToAuxInt(off)
11717 v.Aux = symToAux(sym)
11718 v.AddArg3(ptr, x, mem)
11719 return true
11720 }
11721
11722
11723
11724 for {
11725 off := auxIntToInt32(v.AuxInt)
11726 sym := auxToSym(v.Aux)
11727 ptr := v_0
11728 y := v_1
11729 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11730 break
11731 }
11732 mem := y.Args[2]
11733 x := y.Args[0]
11734 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11735 break
11736 }
11737 v.reset(OpAMD64XORQmodify)
11738 v.AuxInt = int32ToAuxInt(off)
11739 v.Aux = symToAux(sym)
11740 v.AddArg3(ptr, x, mem)
11741 return true
11742 }
11743
11744
11745
11746 for {
11747 off := auxIntToInt32(v.AuxInt)
11748 sym := auxToSym(v.Aux)
11749 ptr := v_0
11750 y := v_1
11751 if y.Op != OpAMD64ADDQ {
11752 break
11753 }
11754 _ = y.Args[1]
11755 y_0 := y.Args[0]
11756 y_1 := y.Args[1]
11757 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11758 l := y_0
11759 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11760 continue
11761 }
11762 mem := l.Args[1]
11763 if ptr != l.Args[0] {
11764 continue
11765 }
11766 x := y_1
11767 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11768 continue
11769 }
11770 v.reset(OpAMD64ADDQmodify)
11771 v.AuxInt = int32ToAuxInt(off)
11772 v.Aux = symToAux(sym)
11773 v.AddArg3(ptr, x, mem)
11774 return true
11775 }
11776 break
11777 }
11778
11779
11780
11781 for {
11782 off := auxIntToInt32(v.AuxInt)
11783 sym := auxToSym(v.Aux)
11784 ptr := v_0
11785 y := v_1
11786 if y.Op != OpAMD64SUBQ {
11787 break
11788 }
11789 x := y.Args[1]
11790 l := y.Args[0]
11791 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11792 break
11793 }
11794 mem := l.Args[1]
11795 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11796 break
11797 }
11798 v.reset(OpAMD64SUBQmodify)
11799 v.AuxInt = int32ToAuxInt(off)
11800 v.Aux = symToAux(sym)
11801 v.AddArg3(ptr, x, mem)
11802 return true
11803 }
11804
11805
11806
11807 for {
11808 off := auxIntToInt32(v.AuxInt)
11809 sym := auxToSym(v.Aux)
11810 ptr := v_0
11811 y := v_1
11812 if y.Op != OpAMD64ANDQ {
11813 break
11814 }
11815 _ = y.Args[1]
11816 y_0 := y.Args[0]
11817 y_1 := y.Args[1]
11818 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11819 l := y_0
11820 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11821 continue
11822 }
11823 mem := l.Args[1]
11824 if ptr != l.Args[0] {
11825 continue
11826 }
11827 x := y_1
11828 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11829 continue
11830 }
11831 v.reset(OpAMD64ANDQmodify)
11832 v.AuxInt = int32ToAuxInt(off)
11833 v.Aux = symToAux(sym)
11834 v.AddArg3(ptr, x, mem)
11835 return true
11836 }
11837 break
11838 }
11839
11840
11841
11842 for {
11843 off := auxIntToInt32(v.AuxInt)
11844 sym := auxToSym(v.Aux)
11845 ptr := v_0
11846 y := v_1
11847 if y.Op != OpAMD64ORQ {
11848 break
11849 }
11850 _ = y.Args[1]
11851 y_0 := y.Args[0]
11852 y_1 := y.Args[1]
11853 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11854 l := y_0
11855 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11856 continue
11857 }
11858 mem := l.Args[1]
11859 if ptr != l.Args[0] {
11860 continue
11861 }
11862 x := y_1
11863 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11864 continue
11865 }
11866 v.reset(OpAMD64ORQmodify)
11867 v.AuxInt = int32ToAuxInt(off)
11868 v.Aux = symToAux(sym)
11869 v.AddArg3(ptr, x, mem)
11870 return true
11871 }
11872 break
11873 }
11874
11875
11876
11877 for {
11878 off := auxIntToInt32(v.AuxInt)
11879 sym := auxToSym(v.Aux)
11880 ptr := v_0
11881 y := v_1
11882 if y.Op != OpAMD64XORQ {
11883 break
11884 }
11885 _ = y.Args[1]
11886 y_0 := y.Args[0]
11887 y_1 := y.Args[1]
11888 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11889 l := y_0
11890 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11891 continue
11892 }
11893 mem := l.Args[1]
11894 if ptr != l.Args[0] {
11895 continue
11896 }
11897 x := y_1
11898 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11899 continue
11900 }
11901 v.reset(OpAMD64XORQmodify)
11902 v.AuxInt = int32ToAuxInt(off)
11903 v.Aux = symToAux(sym)
11904 v.AddArg3(ptr, x, mem)
11905 return true
11906 }
11907 break
11908 }
11909
11910
11911
11912 for {
11913 off := auxIntToInt32(v.AuxInt)
11914 sym := auxToSym(v.Aux)
11915 ptr := v_0
11916 x := v_1
11917 if x.Op != OpAMD64BTSQconst {
11918 break
11919 }
11920 c := auxIntToInt8(x.AuxInt)
11921 l := x.Args[0]
11922 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11923 break
11924 }
11925 mem := l.Args[1]
11926 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11927 break
11928 }
11929 v.reset(OpAMD64BTSQconstmodify)
11930 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11931 v.Aux = symToAux(sym)
11932 v.AddArg2(ptr, mem)
11933 return true
11934 }
11935
11936
11937
11938 for {
11939 off := auxIntToInt32(v.AuxInt)
11940 sym := auxToSym(v.Aux)
11941 ptr := v_0
11942 x := v_1
11943 if x.Op != OpAMD64BTRQconst {
11944 break
11945 }
11946 c := auxIntToInt8(x.AuxInt)
11947 l := x.Args[0]
11948 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11949 break
11950 }
11951 mem := l.Args[1]
11952 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11953 break
11954 }
11955 v.reset(OpAMD64BTRQconstmodify)
11956 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11957 v.Aux = symToAux(sym)
11958 v.AddArg2(ptr, mem)
11959 return true
11960 }
11961
11962
11963
11964 for {
11965 off := auxIntToInt32(v.AuxInt)
11966 sym := auxToSym(v.Aux)
11967 ptr := v_0
11968 x := v_1
11969 if x.Op != OpAMD64BTCQconst {
11970 break
11971 }
11972 c := auxIntToInt8(x.AuxInt)
11973 l := x.Args[0]
11974 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11975 break
11976 }
11977 mem := l.Args[1]
11978 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
11979 break
11980 }
11981 v.reset(OpAMD64BTCQconstmodify)
11982 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11983 v.Aux = symToAux(sym)
11984 v.AddArg2(ptr, mem)
11985 return true
11986 }
11987
11988
11989
11990 for {
11991 off := auxIntToInt32(v.AuxInt)
11992 sym := auxToSym(v.Aux)
11993 ptr := v_0
11994 a := v_1
11995 if a.Op != OpAMD64ADDQconst {
11996 break
11997 }
11998 c := auxIntToInt32(a.AuxInt)
11999 l := a.Args[0]
12000 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12001 break
12002 }
12003 mem := l.Args[1]
12004 ptr2 := l.Args[0]
12005 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12006 break
12007 }
12008 v.reset(OpAMD64ADDQconstmodify)
12009 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12010 v.Aux = symToAux(sym)
12011 v.AddArg2(ptr, mem)
12012 return true
12013 }
12014
12015
12016
12017 for {
12018 off := auxIntToInt32(v.AuxInt)
12019 sym := auxToSym(v.Aux)
12020 ptr := v_0
12021 a := v_1
12022 if a.Op != OpAMD64ANDQconst {
12023 break
12024 }
12025 c := auxIntToInt32(a.AuxInt)
12026 l := a.Args[0]
12027 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12028 break
12029 }
12030 mem := l.Args[1]
12031 ptr2 := l.Args[0]
12032 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12033 break
12034 }
12035 v.reset(OpAMD64ANDQconstmodify)
12036 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12037 v.Aux = symToAux(sym)
12038 v.AddArg2(ptr, mem)
12039 return true
12040 }
12041
12042
12043
12044 for {
12045 off := auxIntToInt32(v.AuxInt)
12046 sym := auxToSym(v.Aux)
12047 ptr := v_0
12048 a := v_1
12049 if a.Op != OpAMD64ORQconst {
12050 break
12051 }
12052 c := auxIntToInt32(a.AuxInt)
12053 l := a.Args[0]
12054 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12055 break
12056 }
12057 mem := l.Args[1]
12058 ptr2 := l.Args[0]
12059 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12060 break
12061 }
12062 v.reset(OpAMD64ORQconstmodify)
12063 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12064 v.Aux = symToAux(sym)
12065 v.AddArg2(ptr, mem)
12066 return true
12067 }
12068
12069
12070
12071 for {
12072 off := auxIntToInt32(v.AuxInt)
12073 sym := auxToSym(v.Aux)
12074 ptr := v_0
12075 a := v_1
12076 if a.Op != OpAMD64XORQconst {
12077 break
12078 }
12079 c := auxIntToInt32(a.AuxInt)
12080 l := a.Args[0]
12081 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12082 break
12083 }
12084 mem := l.Args[1]
12085 ptr2 := l.Args[0]
12086 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12087 break
12088 }
12089 v.reset(OpAMD64XORQconstmodify)
12090 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12091 v.Aux = symToAux(sym)
12092 v.AddArg2(ptr, mem)
12093 return true
12094 }
12095
12096
12097 for {
12098 off := auxIntToInt32(v.AuxInt)
12099 sym := auxToSym(v.Aux)
12100 ptr := v_0
12101 if v_1.Op != OpAMD64MOVQf2i {
12102 break
12103 }
12104 val := v_1.Args[0]
12105 mem := v_2
12106 v.reset(OpAMD64MOVSDstore)
12107 v.AuxInt = int32ToAuxInt(off)
12108 v.Aux = symToAux(sym)
12109 v.AddArg3(ptr, val, mem)
12110 return true
12111 }
12112
12113
12114
12115 for {
12116 i := auxIntToInt32(v.AuxInt)
12117 s := auxToSym(v.Aux)
12118 p := v_0
12119 x := v_1
12120 if x.Op != OpAMD64BSWAPQ {
12121 break
12122 }
12123 w := x.Args[0]
12124 mem := v_2
12125 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12126 break
12127 }
12128 v.reset(OpAMD64MOVBEQstore)
12129 v.AuxInt = int32ToAuxInt(i)
12130 v.Aux = symToAux(s)
12131 v.AddArg3(p, w, mem)
12132 return true
12133 }
12134 return false
12135 }
12136 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12137 v_1 := v.Args[1]
12138 v_0 := v.Args[0]
12139 b := v.Block
12140 config := b.Func.Config
12141
12142
12143
12144 for {
12145 sc := auxIntToValAndOff(v.AuxInt)
12146 s := auxToSym(v.Aux)
12147 if v_0.Op != OpAMD64ADDQconst {
12148 break
12149 }
12150 off := auxIntToInt32(v_0.AuxInt)
12151 ptr := v_0.Args[0]
12152 mem := v_1
12153 if !(ValAndOff(sc).canAdd32(off)) {
12154 break
12155 }
12156 v.reset(OpAMD64MOVQstoreconst)
12157 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12158 v.Aux = symToAux(s)
12159 v.AddArg2(ptr, mem)
12160 return true
12161 }
12162
12163
12164
12165 for {
12166 sc := auxIntToValAndOff(v.AuxInt)
12167 sym1 := auxToSym(v.Aux)
12168 if v_0.Op != OpAMD64LEAQ {
12169 break
12170 }
12171 off := auxIntToInt32(v_0.AuxInt)
12172 sym2 := auxToSym(v_0.Aux)
12173 ptr := v_0.Args[0]
12174 mem := v_1
12175 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12176 break
12177 }
12178 v.reset(OpAMD64MOVQstoreconst)
12179 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12180 v.Aux = symToAux(mergeSym(sym1, sym2))
12181 v.AddArg2(ptr, mem)
12182 return true
12183 }
12184
12185
12186
12187 for {
12188 c := auxIntToValAndOff(v.AuxInt)
12189 s := auxToSym(v.Aux)
12190 p1 := v_0
12191 x := v_1
12192 if x.Op != OpAMD64MOVQstoreconst {
12193 break
12194 }
12195 a := auxIntToValAndOff(x.AuxInt)
12196 if auxToSym(x.Aux) != s {
12197 break
12198 }
12199 mem := x.Args[1]
12200 p0 := x.Args[0]
12201 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12202 break
12203 }
12204 v.reset(OpAMD64MOVOstoreconst)
12205 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12206 v.Aux = symToAux(s)
12207 v.AddArg2(p0, mem)
12208 return true
12209 }
12210
12211
12212
12213 for {
12214 a := auxIntToValAndOff(v.AuxInt)
12215 s := auxToSym(v.Aux)
12216 p0 := v_0
12217 x := v_1
12218 if x.Op != OpAMD64MOVQstoreconst {
12219 break
12220 }
12221 c := auxIntToValAndOff(x.AuxInt)
12222 if auxToSym(x.Aux) != s {
12223 break
12224 }
12225 mem := x.Args[1]
12226 p1 := x.Args[0]
12227 if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12228 break
12229 }
12230 v.reset(OpAMD64MOVOstoreconst)
12231 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12232 v.Aux = symToAux(s)
12233 v.AddArg2(p0, mem)
12234 return true
12235 }
12236 return false
12237 }
12238 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12239 v_1 := v.Args[1]
12240 v_0 := v.Args[0]
12241
12242
12243
12244 for {
12245 off1 := auxIntToInt32(v.AuxInt)
12246 sym := auxToSym(v.Aux)
12247 if v_0.Op != OpAMD64ADDQconst {
12248 break
12249 }
12250 off2 := auxIntToInt32(v_0.AuxInt)
12251 ptr := v_0.Args[0]
12252 mem := v_1
12253 if !(is32Bit(int64(off1) + int64(off2))) {
12254 break
12255 }
12256 v.reset(OpAMD64MOVSDload)
12257 v.AuxInt = int32ToAuxInt(off1 + off2)
12258 v.Aux = symToAux(sym)
12259 v.AddArg2(ptr, mem)
12260 return true
12261 }
12262
12263
12264
12265 for {
12266 off1 := auxIntToInt32(v.AuxInt)
12267 sym1 := auxToSym(v.Aux)
12268 if v_0.Op != OpAMD64LEAQ {
12269 break
12270 }
12271 off2 := auxIntToInt32(v_0.AuxInt)
12272 sym2 := auxToSym(v_0.Aux)
12273 base := v_0.Args[0]
12274 mem := v_1
12275 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12276 break
12277 }
12278 v.reset(OpAMD64MOVSDload)
12279 v.AuxInt = int32ToAuxInt(off1 + off2)
12280 v.Aux = symToAux(mergeSym(sym1, sym2))
12281 v.AddArg2(base, mem)
12282 return true
12283 }
12284
12285
12286 for {
12287 off := auxIntToInt32(v.AuxInt)
12288 sym := auxToSym(v.Aux)
12289 ptr := v_0
12290 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12291 break
12292 }
12293 val := v_1.Args[1]
12294 if ptr != v_1.Args[0] {
12295 break
12296 }
12297 v.reset(OpAMD64MOVQi2f)
12298 v.AddArg(val)
12299 return true
12300 }
12301 return false
12302 }
12303 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12304 v_2 := v.Args[2]
12305 v_1 := v.Args[1]
12306 v_0 := v.Args[0]
12307
12308
12309
12310 for {
12311 off1 := auxIntToInt32(v.AuxInt)
12312 sym := auxToSym(v.Aux)
12313 if v_0.Op != OpAMD64ADDQconst {
12314 break
12315 }
12316 off2 := auxIntToInt32(v_0.AuxInt)
12317 ptr := v_0.Args[0]
12318 val := v_1
12319 mem := v_2
12320 if !(is32Bit(int64(off1) + int64(off2))) {
12321 break
12322 }
12323 v.reset(OpAMD64MOVSDstore)
12324 v.AuxInt = int32ToAuxInt(off1 + off2)
12325 v.Aux = symToAux(sym)
12326 v.AddArg3(ptr, val, mem)
12327 return true
12328 }
12329
12330
12331
12332 for {
12333 off1 := auxIntToInt32(v.AuxInt)
12334 sym1 := auxToSym(v.Aux)
12335 if v_0.Op != OpAMD64LEAQ {
12336 break
12337 }
12338 off2 := auxIntToInt32(v_0.AuxInt)
12339 sym2 := auxToSym(v_0.Aux)
12340 base := v_0.Args[0]
12341 val := v_1
12342 mem := v_2
12343 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12344 break
12345 }
12346 v.reset(OpAMD64MOVSDstore)
12347 v.AuxInt = int32ToAuxInt(off1 + off2)
12348 v.Aux = symToAux(mergeSym(sym1, sym2))
12349 v.AddArg3(base, val, mem)
12350 return true
12351 }
12352
12353
12354 for {
12355 off := auxIntToInt32(v.AuxInt)
12356 sym := auxToSym(v.Aux)
12357 ptr := v_0
12358 if v_1.Op != OpAMD64MOVQi2f {
12359 break
12360 }
12361 val := v_1.Args[0]
12362 mem := v_2
12363 v.reset(OpAMD64MOVQstore)
12364 v.AuxInt = int32ToAuxInt(off)
12365 v.Aux = symToAux(sym)
12366 v.AddArg3(ptr, val, mem)
12367 return true
12368 }
12369 return false
12370 }
12371 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12372 v_1 := v.Args[1]
12373 v_0 := v.Args[0]
12374
12375
12376
12377 for {
12378 off1 := auxIntToInt32(v.AuxInt)
12379 sym := auxToSym(v.Aux)
12380 if v_0.Op != OpAMD64ADDQconst {
12381 break
12382 }
12383 off2 := auxIntToInt32(v_0.AuxInt)
12384 ptr := v_0.Args[0]
12385 mem := v_1
12386 if !(is32Bit(int64(off1) + int64(off2))) {
12387 break
12388 }
12389 v.reset(OpAMD64MOVSSload)
12390 v.AuxInt = int32ToAuxInt(off1 + off2)
12391 v.Aux = symToAux(sym)
12392 v.AddArg2(ptr, mem)
12393 return true
12394 }
12395
12396
12397
12398 for {
12399 off1 := auxIntToInt32(v.AuxInt)
12400 sym1 := auxToSym(v.Aux)
12401 if v_0.Op != OpAMD64LEAQ {
12402 break
12403 }
12404 off2 := auxIntToInt32(v_0.AuxInt)
12405 sym2 := auxToSym(v_0.Aux)
12406 base := v_0.Args[0]
12407 mem := v_1
12408 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12409 break
12410 }
12411 v.reset(OpAMD64MOVSSload)
12412 v.AuxInt = int32ToAuxInt(off1 + off2)
12413 v.Aux = symToAux(mergeSym(sym1, sym2))
12414 v.AddArg2(base, mem)
12415 return true
12416 }
12417
12418
12419 for {
12420 off := auxIntToInt32(v.AuxInt)
12421 sym := auxToSym(v.Aux)
12422 ptr := v_0
12423 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12424 break
12425 }
12426 val := v_1.Args[1]
12427 if ptr != v_1.Args[0] {
12428 break
12429 }
12430 v.reset(OpAMD64MOVLi2f)
12431 v.AddArg(val)
12432 return true
12433 }
12434 return false
12435 }
12436 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12437 v_2 := v.Args[2]
12438 v_1 := v.Args[1]
12439 v_0 := v.Args[0]
12440
12441
12442
12443 for {
12444 off1 := auxIntToInt32(v.AuxInt)
12445 sym := auxToSym(v.Aux)
12446 if v_0.Op != OpAMD64ADDQconst {
12447 break
12448 }
12449 off2 := auxIntToInt32(v_0.AuxInt)
12450 ptr := v_0.Args[0]
12451 val := v_1
12452 mem := v_2
12453 if !(is32Bit(int64(off1) + int64(off2))) {
12454 break
12455 }
12456 v.reset(OpAMD64MOVSSstore)
12457 v.AuxInt = int32ToAuxInt(off1 + off2)
12458 v.Aux = symToAux(sym)
12459 v.AddArg3(ptr, val, mem)
12460 return true
12461 }
12462
12463
12464
12465 for {
12466 off1 := auxIntToInt32(v.AuxInt)
12467 sym1 := auxToSym(v.Aux)
12468 if v_0.Op != OpAMD64LEAQ {
12469 break
12470 }
12471 off2 := auxIntToInt32(v_0.AuxInt)
12472 sym2 := auxToSym(v_0.Aux)
12473 base := v_0.Args[0]
12474 val := v_1
12475 mem := v_2
12476 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12477 break
12478 }
12479 v.reset(OpAMD64MOVSSstore)
12480 v.AuxInt = int32ToAuxInt(off1 + off2)
12481 v.Aux = symToAux(mergeSym(sym1, sym2))
12482 v.AddArg3(base, val, mem)
12483 return true
12484 }
12485
12486
12487 for {
12488 off := auxIntToInt32(v.AuxInt)
12489 sym := auxToSym(v.Aux)
12490 ptr := v_0
12491 if v_1.Op != OpAMD64MOVLi2f {
12492 break
12493 }
12494 val := v_1.Args[0]
12495 mem := v_2
12496 v.reset(OpAMD64MOVLstore)
12497 v.AuxInt = int32ToAuxInt(off)
12498 v.Aux = symToAux(sym)
12499 v.AddArg3(ptr, val, mem)
12500 return true
12501 }
12502 return false
12503 }
12504 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
12505 v_0 := v.Args[0]
12506 b := v.Block
12507
12508
12509
12510 for {
12511 x := v_0
12512 if x.Op != OpAMD64MOVWload {
12513 break
12514 }
12515 off := auxIntToInt32(x.AuxInt)
12516 sym := auxToSym(x.Aux)
12517 mem := x.Args[1]
12518 ptr := x.Args[0]
12519 if !(x.Uses == 1 && clobber(x)) {
12520 break
12521 }
12522 b = x.Block
12523 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12524 v.copyOf(v0)
12525 v0.AuxInt = int32ToAuxInt(off)
12526 v0.Aux = symToAux(sym)
12527 v0.AddArg2(ptr, mem)
12528 return true
12529 }
12530
12531
12532
12533 for {
12534 x := v_0
12535 if x.Op != OpAMD64MOVLload {
12536 break
12537 }
12538 off := auxIntToInt32(x.AuxInt)
12539 sym := auxToSym(x.Aux)
12540 mem := x.Args[1]
12541 ptr := x.Args[0]
12542 if !(x.Uses == 1 && clobber(x)) {
12543 break
12544 }
12545 b = x.Block
12546 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12547 v.copyOf(v0)
12548 v0.AuxInt = int32ToAuxInt(off)
12549 v0.Aux = symToAux(sym)
12550 v0.AddArg2(ptr, mem)
12551 return true
12552 }
12553
12554
12555
12556 for {
12557 x := v_0
12558 if x.Op != OpAMD64MOVQload {
12559 break
12560 }
12561 off := auxIntToInt32(x.AuxInt)
12562 sym := auxToSym(x.Aux)
12563 mem := x.Args[1]
12564 ptr := x.Args[0]
12565 if !(x.Uses == 1 && clobber(x)) {
12566 break
12567 }
12568 b = x.Block
12569 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12570 v.copyOf(v0)
12571 v0.AuxInt = int32ToAuxInt(off)
12572 v0.Aux = symToAux(sym)
12573 v0.AddArg2(ptr, mem)
12574 return true
12575 }
12576
12577
12578
12579 for {
12580 if v_0.Op != OpAMD64ANDLconst {
12581 break
12582 }
12583 c := auxIntToInt32(v_0.AuxInt)
12584 x := v_0.Args[0]
12585 if !(c&0x8000 == 0) {
12586 break
12587 }
12588 v.reset(OpAMD64ANDLconst)
12589 v.AuxInt = int32ToAuxInt(c & 0x7fff)
12590 v.AddArg(x)
12591 return true
12592 }
12593
12594
12595 for {
12596 if v_0.Op != OpAMD64MOVWQSX {
12597 break
12598 }
12599 x := v_0.Args[0]
12600 v.reset(OpAMD64MOVWQSX)
12601 v.AddArg(x)
12602 return true
12603 }
12604
12605
12606 for {
12607 if v_0.Op != OpAMD64MOVBQSX {
12608 break
12609 }
12610 x := v_0.Args[0]
12611 v.reset(OpAMD64MOVBQSX)
12612 v.AddArg(x)
12613 return true
12614 }
12615 return false
12616 }
12617 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
12618 v_1 := v.Args[1]
12619 v_0 := v.Args[0]
12620
12621
12622
12623 for {
12624 off := auxIntToInt32(v.AuxInt)
12625 sym := auxToSym(v.Aux)
12626 ptr := v_0
12627 if v_1.Op != OpAMD64MOVWstore {
12628 break
12629 }
12630 off2 := auxIntToInt32(v_1.AuxInt)
12631 sym2 := auxToSym(v_1.Aux)
12632 x := v_1.Args[1]
12633 ptr2 := v_1.Args[0]
12634 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12635 break
12636 }
12637 v.reset(OpAMD64MOVWQSX)
12638 v.AddArg(x)
12639 return true
12640 }
12641
12642
12643
12644 for {
12645 off1 := auxIntToInt32(v.AuxInt)
12646 sym1 := auxToSym(v.Aux)
12647 if v_0.Op != OpAMD64LEAQ {
12648 break
12649 }
12650 off2 := auxIntToInt32(v_0.AuxInt)
12651 sym2 := auxToSym(v_0.Aux)
12652 base := v_0.Args[0]
12653 mem := v_1
12654 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12655 break
12656 }
12657 v.reset(OpAMD64MOVWQSXload)
12658 v.AuxInt = int32ToAuxInt(off1 + off2)
12659 v.Aux = symToAux(mergeSym(sym1, sym2))
12660 v.AddArg2(base, mem)
12661 return true
12662 }
12663 return false
12664 }
12665 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
12666 v_0 := v.Args[0]
12667 b := v.Block
12668
12669
12670
12671 for {
12672 x := v_0
12673 if x.Op != OpAMD64MOVWload {
12674 break
12675 }
12676 off := auxIntToInt32(x.AuxInt)
12677 sym := auxToSym(x.Aux)
12678 mem := x.Args[1]
12679 ptr := x.Args[0]
12680 if !(x.Uses == 1 && clobber(x)) {
12681 break
12682 }
12683 b = x.Block
12684 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12685 v.copyOf(v0)
12686 v0.AuxInt = int32ToAuxInt(off)
12687 v0.Aux = symToAux(sym)
12688 v0.AddArg2(ptr, mem)
12689 return true
12690 }
12691
12692
12693
12694 for {
12695 x := v_0
12696 if x.Op != OpAMD64MOVLload {
12697 break
12698 }
12699 off := auxIntToInt32(x.AuxInt)
12700 sym := auxToSym(x.Aux)
12701 mem := x.Args[1]
12702 ptr := x.Args[0]
12703 if !(x.Uses == 1 && clobber(x)) {
12704 break
12705 }
12706 b = x.Block
12707 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12708 v.copyOf(v0)
12709 v0.AuxInt = int32ToAuxInt(off)
12710 v0.Aux = symToAux(sym)
12711 v0.AddArg2(ptr, mem)
12712 return true
12713 }
12714
12715
12716
12717 for {
12718 x := v_0
12719 if x.Op != OpAMD64MOVQload {
12720 break
12721 }
12722 off := auxIntToInt32(x.AuxInt)
12723 sym := auxToSym(x.Aux)
12724 mem := x.Args[1]
12725 ptr := x.Args[0]
12726 if !(x.Uses == 1 && clobber(x)) {
12727 break
12728 }
12729 b = x.Block
12730 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
12731 v.copyOf(v0)
12732 v0.AuxInt = int32ToAuxInt(off)
12733 v0.Aux = symToAux(sym)
12734 v0.AddArg2(ptr, mem)
12735 return true
12736 }
12737
12738
12739 for {
12740 if v_0.Op != OpAMD64ANDLconst {
12741 break
12742 }
12743 c := auxIntToInt32(v_0.AuxInt)
12744 x := v_0.Args[0]
12745 v.reset(OpAMD64ANDLconst)
12746 v.AuxInt = int32ToAuxInt(c & 0xffff)
12747 v.AddArg(x)
12748 return true
12749 }
12750
12751
12752 for {
12753 if v_0.Op != OpAMD64MOVWQZX {
12754 break
12755 }
12756 x := v_0.Args[0]
12757 v.reset(OpAMD64MOVWQZX)
12758 v.AddArg(x)
12759 return true
12760 }
12761
12762
12763 for {
12764 if v_0.Op != OpAMD64MOVBQZX {
12765 break
12766 }
12767 x := v_0.Args[0]
12768 v.reset(OpAMD64MOVBQZX)
12769 v.AddArg(x)
12770 return true
12771 }
12772 return false
12773 }
12774 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
12775 v_1 := v.Args[1]
12776 v_0 := v.Args[0]
12777 b := v.Block
12778 config := b.Func.Config
12779
12780
12781
12782 for {
12783 off := auxIntToInt32(v.AuxInt)
12784 sym := auxToSym(v.Aux)
12785 ptr := v_0
12786 if v_1.Op != OpAMD64MOVWstore {
12787 break
12788 }
12789 off2 := auxIntToInt32(v_1.AuxInt)
12790 sym2 := auxToSym(v_1.Aux)
12791 x := v_1.Args[1]
12792 ptr2 := v_1.Args[0]
12793 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12794 break
12795 }
12796 v.reset(OpAMD64MOVWQZX)
12797 v.AddArg(x)
12798 return true
12799 }
12800
12801
12802
12803 for {
12804 off1 := auxIntToInt32(v.AuxInt)
12805 sym := auxToSym(v.Aux)
12806 if v_0.Op != OpAMD64ADDQconst {
12807 break
12808 }
12809 off2 := auxIntToInt32(v_0.AuxInt)
12810 ptr := v_0.Args[0]
12811 mem := v_1
12812 if !(is32Bit(int64(off1) + int64(off2))) {
12813 break
12814 }
12815 v.reset(OpAMD64MOVWload)
12816 v.AuxInt = int32ToAuxInt(off1 + off2)
12817 v.Aux = symToAux(sym)
12818 v.AddArg2(ptr, mem)
12819 return true
12820 }
12821
12822
12823
12824 for {
12825 off1 := auxIntToInt32(v.AuxInt)
12826 sym1 := auxToSym(v.Aux)
12827 if v_0.Op != OpAMD64LEAQ {
12828 break
12829 }
12830 off2 := auxIntToInt32(v_0.AuxInt)
12831 sym2 := auxToSym(v_0.Aux)
12832 base := v_0.Args[0]
12833 mem := v_1
12834 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12835 break
12836 }
12837 v.reset(OpAMD64MOVWload)
12838 v.AuxInt = int32ToAuxInt(off1 + off2)
12839 v.Aux = symToAux(mergeSym(sym1, sym2))
12840 v.AddArg2(base, mem)
12841 return true
12842 }
12843
12844
12845