1
2
3
4 package ssa
5
6 import "internal/buildcfg"
7 import "math"
8 import "cmd/internal/obj"
9 import "cmd/compile/internal/types"
10
11 func rewriteValueAMD64(v *Value) bool {
12 switch v.Op {
13 case OpAMD64ADCQ:
14 return rewriteValueAMD64_OpAMD64ADCQ(v)
15 case OpAMD64ADCQconst:
16 return rewriteValueAMD64_OpAMD64ADCQconst(v)
17 case OpAMD64ADDL:
18 return rewriteValueAMD64_OpAMD64ADDL(v)
19 case OpAMD64ADDLconst:
20 return rewriteValueAMD64_OpAMD64ADDLconst(v)
21 case OpAMD64ADDLconstmodify:
22 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
23 case OpAMD64ADDLload:
24 return rewriteValueAMD64_OpAMD64ADDLload(v)
25 case OpAMD64ADDLmodify:
26 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
27 case OpAMD64ADDQ:
28 return rewriteValueAMD64_OpAMD64ADDQ(v)
29 case OpAMD64ADDQcarry:
30 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
31 case OpAMD64ADDQconst:
32 return rewriteValueAMD64_OpAMD64ADDQconst(v)
33 case OpAMD64ADDQconstmodify:
34 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
35 case OpAMD64ADDQload:
36 return rewriteValueAMD64_OpAMD64ADDQload(v)
37 case OpAMD64ADDQmodify:
38 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
39 case OpAMD64ADDSD:
40 return rewriteValueAMD64_OpAMD64ADDSD(v)
41 case OpAMD64ADDSDload:
42 return rewriteValueAMD64_OpAMD64ADDSDload(v)
43 case OpAMD64ADDSS:
44 return rewriteValueAMD64_OpAMD64ADDSS(v)
45 case OpAMD64ADDSSload:
46 return rewriteValueAMD64_OpAMD64ADDSSload(v)
47 case OpAMD64ANDL:
48 return rewriteValueAMD64_OpAMD64ANDL(v)
49 case OpAMD64ANDLconst:
50 return rewriteValueAMD64_OpAMD64ANDLconst(v)
51 case OpAMD64ANDLconstmodify:
52 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
53 case OpAMD64ANDLload:
54 return rewriteValueAMD64_OpAMD64ANDLload(v)
55 case OpAMD64ANDLmodify:
56 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
57 case OpAMD64ANDNL:
58 return rewriteValueAMD64_OpAMD64ANDNL(v)
59 case OpAMD64ANDNQ:
60 return rewriteValueAMD64_OpAMD64ANDNQ(v)
61 case OpAMD64ANDQ:
62 return rewriteValueAMD64_OpAMD64ANDQ(v)
63 case OpAMD64ANDQconst:
64 return rewriteValueAMD64_OpAMD64ANDQconst(v)
65 case OpAMD64ANDQconstmodify:
66 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
67 case OpAMD64ANDQload:
68 return rewriteValueAMD64_OpAMD64ANDQload(v)
69 case OpAMD64ANDQmodify:
70 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
71 case OpAMD64BSFQ:
72 return rewriteValueAMD64_OpAMD64BSFQ(v)
73 case OpAMD64BSWAPL:
74 return rewriteValueAMD64_OpAMD64BSWAPL(v)
75 case OpAMD64BSWAPQ:
76 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
77 case OpAMD64BTCLconst:
78 return rewriteValueAMD64_OpAMD64BTCLconst(v)
79 case OpAMD64BTCQconst:
80 return rewriteValueAMD64_OpAMD64BTCQconst(v)
81 case OpAMD64BTLconst:
82 return rewriteValueAMD64_OpAMD64BTLconst(v)
83 case OpAMD64BTQconst:
84 return rewriteValueAMD64_OpAMD64BTQconst(v)
85 case OpAMD64BTRLconst:
86 return rewriteValueAMD64_OpAMD64BTRLconst(v)
87 case OpAMD64BTRQconst:
88 return rewriteValueAMD64_OpAMD64BTRQconst(v)
89 case OpAMD64BTSLconst:
90 return rewriteValueAMD64_OpAMD64BTSLconst(v)
91 case OpAMD64BTSQconst:
92 return rewriteValueAMD64_OpAMD64BTSQconst(v)
93 case OpAMD64CMOVLCC:
94 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
95 case OpAMD64CMOVLCS:
96 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
97 case OpAMD64CMOVLEQ:
98 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
99 case OpAMD64CMOVLGE:
100 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
101 case OpAMD64CMOVLGT:
102 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
103 case OpAMD64CMOVLHI:
104 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
105 case OpAMD64CMOVLLE:
106 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
107 case OpAMD64CMOVLLS:
108 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
109 case OpAMD64CMOVLLT:
110 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
111 case OpAMD64CMOVLNE:
112 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
113 case OpAMD64CMOVQCC:
114 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
115 case OpAMD64CMOVQCS:
116 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
117 case OpAMD64CMOVQEQ:
118 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
119 case OpAMD64CMOVQGE:
120 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
121 case OpAMD64CMOVQGT:
122 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
123 case OpAMD64CMOVQHI:
124 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
125 case OpAMD64CMOVQLE:
126 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
127 case OpAMD64CMOVQLS:
128 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
129 case OpAMD64CMOVQLT:
130 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
131 case OpAMD64CMOVQNE:
132 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
133 case OpAMD64CMOVWCC:
134 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
135 case OpAMD64CMOVWCS:
136 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
137 case OpAMD64CMOVWEQ:
138 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
139 case OpAMD64CMOVWGE:
140 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
141 case OpAMD64CMOVWGT:
142 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
143 case OpAMD64CMOVWHI:
144 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
145 case OpAMD64CMOVWLE:
146 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
147 case OpAMD64CMOVWLS:
148 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
149 case OpAMD64CMOVWLT:
150 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
151 case OpAMD64CMOVWNE:
152 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
153 case OpAMD64CMPB:
154 return rewriteValueAMD64_OpAMD64CMPB(v)
155 case OpAMD64CMPBconst:
156 return rewriteValueAMD64_OpAMD64CMPBconst(v)
157 case OpAMD64CMPBconstload:
158 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
159 case OpAMD64CMPBload:
160 return rewriteValueAMD64_OpAMD64CMPBload(v)
161 case OpAMD64CMPL:
162 return rewriteValueAMD64_OpAMD64CMPL(v)
163 case OpAMD64CMPLconst:
164 return rewriteValueAMD64_OpAMD64CMPLconst(v)
165 case OpAMD64CMPLconstload:
166 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
167 case OpAMD64CMPLload:
168 return rewriteValueAMD64_OpAMD64CMPLload(v)
169 case OpAMD64CMPQ:
170 return rewriteValueAMD64_OpAMD64CMPQ(v)
171 case OpAMD64CMPQconst:
172 return rewriteValueAMD64_OpAMD64CMPQconst(v)
173 case OpAMD64CMPQconstload:
174 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
175 case OpAMD64CMPQload:
176 return rewriteValueAMD64_OpAMD64CMPQload(v)
177 case OpAMD64CMPW:
178 return rewriteValueAMD64_OpAMD64CMPW(v)
179 case OpAMD64CMPWconst:
180 return rewriteValueAMD64_OpAMD64CMPWconst(v)
181 case OpAMD64CMPWconstload:
182 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
183 case OpAMD64CMPWload:
184 return rewriteValueAMD64_OpAMD64CMPWload(v)
185 case OpAMD64CMPXCHGLlock:
186 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
187 case OpAMD64CMPXCHGQlock:
188 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
189 case OpAMD64DIVSD:
190 return rewriteValueAMD64_OpAMD64DIVSD(v)
191 case OpAMD64DIVSDload:
192 return rewriteValueAMD64_OpAMD64DIVSDload(v)
193 case OpAMD64DIVSS:
194 return rewriteValueAMD64_OpAMD64DIVSS(v)
195 case OpAMD64DIVSSload:
196 return rewriteValueAMD64_OpAMD64DIVSSload(v)
197 case OpAMD64HMULL:
198 return rewriteValueAMD64_OpAMD64HMULL(v)
199 case OpAMD64HMULLU:
200 return rewriteValueAMD64_OpAMD64HMULLU(v)
201 case OpAMD64HMULQ:
202 return rewriteValueAMD64_OpAMD64HMULQ(v)
203 case OpAMD64HMULQU:
204 return rewriteValueAMD64_OpAMD64HMULQU(v)
205 case OpAMD64LEAL:
206 return rewriteValueAMD64_OpAMD64LEAL(v)
207 case OpAMD64LEAL1:
208 return rewriteValueAMD64_OpAMD64LEAL1(v)
209 case OpAMD64LEAL2:
210 return rewriteValueAMD64_OpAMD64LEAL2(v)
211 case OpAMD64LEAL4:
212 return rewriteValueAMD64_OpAMD64LEAL4(v)
213 case OpAMD64LEAL8:
214 return rewriteValueAMD64_OpAMD64LEAL8(v)
215 case OpAMD64LEAQ:
216 return rewriteValueAMD64_OpAMD64LEAQ(v)
217 case OpAMD64LEAQ1:
218 return rewriteValueAMD64_OpAMD64LEAQ1(v)
219 case OpAMD64LEAQ2:
220 return rewriteValueAMD64_OpAMD64LEAQ2(v)
221 case OpAMD64LEAQ4:
222 return rewriteValueAMD64_OpAMD64LEAQ4(v)
223 case OpAMD64LEAQ8:
224 return rewriteValueAMD64_OpAMD64LEAQ8(v)
225 case OpAMD64MOVBELstore:
226 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
227 case OpAMD64MOVBEQstore:
228 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
229 case OpAMD64MOVBEWstore:
230 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
231 case OpAMD64MOVBQSX:
232 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
233 case OpAMD64MOVBQSXload:
234 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
235 case OpAMD64MOVBQZX:
236 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
237 case OpAMD64MOVBatomicload:
238 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
239 case OpAMD64MOVBload:
240 return rewriteValueAMD64_OpAMD64MOVBload(v)
241 case OpAMD64MOVBstore:
242 return rewriteValueAMD64_OpAMD64MOVBstore(v)
243 case OpAMD64MOVBstoreconst:
244 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
245 case OpAMD64MOVLQSX:
246 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
247 case OpAMD64MOVLQSXload:
248 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
249 case OpAMD64MOVLQZX:
250 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
251 case OpAMD64MOVLatomicload:
252 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
253 case OpAMD64MOVLf2i:
254 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
255 case OpAMD64MOVLi2f:
256 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
257 case OpAMD64MOVLload:
258 return rewriteValueAMD64_OpAMD64MOVLload(v)
259 case OpAMD64MOVLstore:
260 return rewriteValueAMD64_OpAMD64MOVLstore(v)
261 case OpAMD64MOVLstoreconst:
262 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
263 case OpAMD64MOVOload:
264 return rewriteValueAMD64_OpAMD64MOVOload(v)
265 case OpAMD64MOVOstore:
266 return rewriteValueAMD64_OpAMD64MOVOstore(v)
267 case OpAMD64MOVOstoreconst:
268 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
269 case OpAMD64MOVQatomicload:
270 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
271 case OpAMD64MOVQf2i:
272 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
273 case OpAMD64MOVQi2f:
274 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
275 case OpAMD64MOVQload:
276 return rewriteValueAMD64_OpAMD64MOVQload(v)
277 case OpAMD64MOVQstore:
278 return rewriteValueAMD64_OpAMD64MOVQstore(v)
279 case OpAMD64MOVQstoreconst:
280 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
281 case OpAMD64MOVSDload:
282 return rewriteValueAMD64_OpAMD64MOVSDload(v)
283 case OpAMD64MOVSDstore:
284 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
285 case OpAMD64MOVSSload:
286 return rewriteValueAMD64_OpAMD64MOVSSload(v)
287 case OpAMD64MOVSSstore:
288 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
289 case OpAMD64MOVWQSX:
290 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
291 case OpAMD64MOVWQSXload:
292 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
293 case OpAMD64MOVWQZX:
294 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
295 case OpAMD64MOVWload:
296 return rewriteValueAMD64_OpAMD64MOVWload(v)
297 case OpAMD64MOVWstore:
298 return rewriteValueAMD64_OpAMD64MOVWstore(v)
299 case OpAMD64MOVWstoreconst:
300 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
301 case OpAMD64MULL:
302 return rewriteValueAMD64_OpAMD64MULL(v)
303 case OpAMD64MULLconst:
304 return rewriteValueAMD64_OpAMD64MULLconst(v)
305 case OpAMD64MULQ:
306 return rewriteValueAMD64_OpAMD64MULQ(v)
307 case OpAMD64MULQconst:
308 return rewriteValueAMD64_OpAMD64MULQconst(v)
309 case OpAMD64MULSD:
310 return rewriteValueAMD64_OpAMD64MULSD(v)
311 case OpAMD64MULSDload:
312 return rewriteValueAMD64_OpAMD64MULSDload(v)
313 case OpAMD64MULSS:
314 return rewriteValueAMD64_OpAMD64MULSS(v)
315 case OpAMD64MULSSload:
316 return rewriteValueAMD64_OpAMD64MULSSload(v)
317 case OpAMD64NEGL:
318 return rewriteValueAMD64_OpAMD64NEGL(v)
319 case OpAMD64NEGQ:
320 return rewriteValueAMD64_OpAMD64NEGQ(v)
321 case OpAMD64NOTL:
322 return rewriteValueAMD64_OpAMD64NOTL(v)
323 case OpAMD64NOTQ:
324 return rewriteValueAMD64_OpAMD64NOTQ(v)
325 case OpAMD64ORL:
326 return rewriteValueAMD64_OpAMD64ORL(v)
327 case OpAMD64ORLconst:
328 return rewriteValueAMD64_OpAMD64ORLconst(v)
329 case OpAMD64ORLconstmodify:
330 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
331 case OpAMD64ORLload:
332 return rewriteValueAMD64_OpAMD64ORLload(v)
333 case OpAMD64ORLmodify:
334 return rewriteValueAMD64_OpAMD64ORLmodify(v)
335 case OpAMD64ORQ:
336 return rewriteValueAMD64_OpAMD64ORQ(v)
337 case OpAMD64ORQconst:
338 return rewriteValueAMD64_OpAMD64ORQconst(v)
339 case OpAMD64ORQconstmodify:
340 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
341 case OpAMD64ORQload:
342 return rewriteValueAMD64_OpAMD64ORQload(v)
343 case OpAMD64ORQmodify:
344 return rewriteValueAMD64_OpAMD64ORQmodify(v)
345 case OpAMD64ROLB:
346 return rewriteValueAMD64_OpAMD64ROLB(v)
347 case OpAMD64ROLBconst:
348 return rewriteValueAMD64_OpAMD64ROLBconst(v)
349 case OpAMD64ROLL:
350 return rewriteValueAMD64_OpAMD64ROLL(v)
351 case OpAMD64ROLLconst:
352 return rewriteValueAMD64_OpAMD64ROLLconst(v)
353 case OpAMD64ROLQ:
354 return rewriteValueAMD64_OpAMD64ROLQ(v)
355 case OpAMD64ROLQconst:
356 return rewriteValueAMD64_OpAMD64ROLQconst(v)
357 case OpAMD64ROLW:
358 return rewriteValueAMD64_OpAMD64ROLW(v)
359 case OpAMD64ROLWconst:
360 return rewriteValueAMD64_OpAMD64ROLWconst(v)
361 case OpAMD64RORB:
362 return rewriteValueAMD64_OpAMD64RORB(v)
363 case OpAMD64RORL:
364 return rewriteValueAMD64_OpAMD64RORL(v)
365 case OpAMD64RORQ:
366 return rewriteValueAMD64_OpAMD64RORQ(v)
367 case OpAMD64RORW:
368 return rewriteValueAMD64_OpAMD64RORW(v)
369 case OpAMD64SARB:
370 return rewriteValueAMD64_OpAMD64SARB(v)
371 case OpAMD64SARBconst:
372 return rewriteValueAMD64_OpAMD64SARBconst(v)
373 case OpAMD64SARL:
374 return rewriteValueAMD64_OpAMD64SARL(v)
375 case OpAMD64SARLconst:
376 return rewriteValueAMD64_OpAMD64SARLconst(v)
377 case OpAMD64SARQ:
378 return rewriteValueAMD64_OpAMD64SARQ(v)
379 case OpAMD64SARQconst:
380 return rewriteValueAMD64_OpAMD64SARQconst(v)
381 case OpAMD64SARW:
382 return rewriteValueAMD64_OpAMD64SARW(v)
383 case OpAMD64SARWconst:
384 return rewriteValueAMD64_OpAMD64SARWconst(v)
385 case OpAMD64SARXLload:
386 return rewriteValueAMD64_OpAMD64SARXLload(v)
387 case OpAMD64SARXQload:
388 return rewriteValueAMD64_OpAMD64SARXQload(v)
389 case OpAMD64SBBLcarrymask:
390 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
391 case OpAMD64SBBQ:
392 return rewriteValueAMD64_OpAMD64SBBQ(v)
393 case OpAMD64SBBQcarrymask:
394 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
395 case OpAMD64SBBQconst:
396 return rewriteValueAMD64_OpAMD64SBBQconst(v)
397 case OpAMD64SETA:
398 return rewriteValueAMD64_OpAMD64SETA(v)
399 case OpAMD64SETAE:
400 return rewriteValueAMD64_OpAMD64SETAE(v)
401 case OpAMD64SETAEstore:
402 return rewriteValueAMD64_OpAMD64SETAEstore(v)
403 case OpAMD64SETAstore:
404 return rewriteValueAMD64_OpAMD64SETAstore(v)
405 case OpAMD64SETB:
406 return rewriteValueAMD64_OpAMD64SETB(v)
407 case OpAMD64SETBE:
408 return rewriteValueAMD64_OpAMD64SETBE(v)
409 case OpAMD64SETBEstore:
410 return rewriteValueAMD64_OpAMD64SETBEstore(v)
411 case OpAMD64SETBstore:
412 return rewriteValueAMD64_OpAMD64SETBstore(v)
413 case OpAMD64SETEQ:
414 return rewriteValueAMD64_OpAMD64SETEQ(v)
415 case OpAMD64SETEQstore:
416 return rewriteValueAMD64_OpAMD64SETEQstore(v)
417 case OpAMD64SETG:
418 return rewriteValueAMD64_OpAMD64SETG(v)
419 case OpAMD64SETGE:
420 return rewriteValueAMD64_OpAMD64SETGE(v)
421 case OpAMD64SETGEstore:
422 return rewriteValueAMD64_OpAMD64SETGEstore(v)
423 case OpAMD64SETGstore:
424 return rewriteValueAMD64_OpAMD64SETGstore(v)
425 case OpAMD64SETL:
426 return rewriteValueAMD64_OpAMD64SETL(v)
427 case OpAMD64SETLE:
428 return rewriteValueAMD64_OpAMD64SETLE(v)
429 case OpAMD64SETLEstore:
430 return rewriteValueAMD64_OpAMD64SETLEstore(v)
431 case OpAMD64SETLstore:
432 return rewriteValueAMD64_OpAMD64SETLstore(v)
433 case OpAMD64SETNE:
434 return rewriteValueAMD64_OpAMD64SETNE(v)
435 case OpAMD64SETNEstore:
436 return rewriteValueAMD64_OpAMD64SETNEstore(v)
437 case OpAMD64SHLL:
438 return rewriteValueAMD64_OpAMD64SHLL(v)
439 case OpAMD64SHLLconst:
440 return rewriteValueAMD64_OpAMD64SHLLconst(v)
441 case OpAMD64SHLQ:
442 return rewriteValueAMD64_OpAMD64SHLQ(v)
443 case OpAMD64SHLQconst:
444 return rewriteValueAMD64_OpAMD64SHLQconst(v)
445 case OpAMD64SHLXLload:
446 return rewriteValueAMD64_OpAMD64SHLXLload(v)
447 case OpAMD64SHLXQload:
448 return rewriteValueAMD64_OpAMD64SHLXQload(v)
449 case OpAMD64SHRB:
450 return rewriteValueAMD64_OpAMD64SHRB(v)
451 case OpAMD64SHRBconst:
452 return rewriteValueAMD64_OpAMD64SHRBconst(v)
453 case OpAMD64SHRL:
454 return rewriteValueAMD64_OpAMD64SHRL(v)
455 case OpAMD64SHRLconst:
456 return rewriteValueAMD64_OpAMD64SHRLconst(v)
457 case OpAMD64SHRQ:
458 return rewriteValueAMD64_OpAMD64SHRQ(v)
459 case OpAMD64SHRQconst:
460 return rewriteValueAMD64_OpAMD64SHRQconst(v)
461 case OpAMD64SHRW:
462 return rewriteValueAMD64_OpAMD64SHRW(v)
463 case OpAMD64SHRWconst:
464 return rewriteValueAMD64_OpAMD64SHRWconst(v)
465 case OpAMD64SHRXLload:
466 return rewriteValueAMD64_OpAMD64SHRXLload(v)
467 case OpAMD64SHRXQload:
468 return rewriteValueAMD64_OpAMD64SHRXQload(v)
469 case OpAMD64SUBL:
470 return rewriteValueAMD64_OpAMD64SUBL(v)
471 case OpAMD64SUBLconst:
472 return rewriteValueAMD64_OpAMD64SUBLconst(v)
473 case OpAMD64SUBLload:
474 return rewriteValueAMD64_OpAMD64SUBLload(v)
475 case OpAMD64SUBLmodify:
476 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
477 case OpAMD64SUBQ:
478 return rewriteValueAMD64_OpAMD64SUBQ(v)
479 case OpAMD64SUBQborrow:
480 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
481 case OpAMD64SUBQconst:
482 return rewriteValueAMD64_OpAMD64SUBQconst(v)
483 case OpAMD64SUBQload:
484 return rewriteValueAMD64_OpAMD64SUBQload(v)
485 case OpAMD64SUBQmodify:
486 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
487 case OpAMD64SUBSD:
488 return rewriteValueAMD64_OpAMD64SUBSD(v)
489 case OpAMD64SUBSDload:
490 return rewriteValueAMD64_OpAMD64SUBSDload(v)
491 case OpAMD64SUBSS:
492 return rewriteValueAMD64_OpAMD64SUBSS(v)
493 case OpAMD64SUBSSload:
494 return rewriteValueAMD64_OpAMD64SUBSSload(v)
495 case OpAMD64TESTB:
496 return rewriteValueAMD64_OpAMD64TESTB(v)
497 case OpAMD64TESTBconst:
498 return rewriteValueAMD64_OpAMD64TESTBconst(v)
499 case OpAMD64TESTL:
500 return rewriteValueAMD64_OpAMD64TESTL(v)
501 case OpAMD64TESTLconst:
502 return rewriteValueAMD64_OpAMD64TESTLconst(v)
503 case OpAMD64TESTQ:
504 return rewriteValueAMD64_OpAMD64TESTQ(v)
505 case OpAMD64TESTQconst:
506 return rewriteValueAMD64_OpAMD64TESTQconst(v)
507 case OpAMD64TESTW:
508 return rewriteValueAMD64_OpAMD64TESTW(v)
509 case OpAMD64TESTWconst:
510 return rewriteValueAMD64_OpAMD64TESTWconst(v)
511 case OpAMD64XADDLlock:
512 return rewriteValueAMD64_OpAMD64XADDLlock(v)
513 case OpAMD64XADDQlock:
514 return rewriteValueAMD64_OpAMD64XADDQlock(v)
515 case OpAMD64XCHGL:
516 return rewriteValueAMD64_OpAMD64XCHGL(v)
517 case OpAMD64XCHGQ:
518 return rewriteValueAMD64_OpAMD64XCHGQ(v)
519 case OpAMD64XORL:
520 return rewriteValueAMD64_OpAMD64XORL(v)
521 case OpAMD64XORLconst:
522 return rewriteValueAMD64_OpAMD64XORLconst(v)
523 case OpAMD64XORLconstmodify:
524 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
525 case OpAMD64XORLload:
526 return rewriteValueAMD64_OpAMD64XORLload(v)
527 case OpAMD64XORLmodify:
528 return rewriteValueAMD64_OpAMD64XORLmodify(v)
529 case OpAMD64XORQ:
530 return rewriteValueAMD64_OpAMD64XORQ(v)
531 case OpAMD64XORQconst:
532 return rewriteValueAMD64_OpAMD64XORQconst(v)
533 case OpAMD64XORQconstmodify:
534 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
535 case OpAMD64XORQload:
536 return rewriteValueAMD64_OpAMD64XORQload(v)
537 case OpAMD64XORQmodify:
538 return rewriteValueAMD64_OpAMD64XORQmodify(v)
539 case OpAdd16:
540 v.Op = OpAMD64ADDL
541 return true
542 case OpAdd32:
543 v.Op = OpAMD64ADDL
544 return true
545 case OpAdd32F:
546 v.Op = OpAMD64ADDSS
547 return true
548 case OpAdd64:
549 v.Op = OpAMD64ADDQ
550 return true
551 case OpAdd64F:
552 v.Op = OpAMD64ADDSD
553 return true
554 case OpAdd8:
555 v.Op = OpAMD64ADDL
556 return true
557 case OpAddPtr:
558 v.Op = OpAMD64ADDQ
559 return true
560 case OpAddr:
561 return rewriteValueAMD64_OpAddr(v)
562 case OpAnd16:
563 v.Op = OpAMD64ANDL
564 return true
565 case OpAnd32:
566 v.Op = OpAMD64ANDL
567 return true
568 case OpAnd64:
569 v.Op = OpAMD64ANDQ
570 return true
571 case OpAnd8:
572 v.Op = OpAMD64ANDL
573 return true
574 case OpAndB:
575 v.Op = OpAMD64ANDL
576 return true
577 case OpAtomicAdd32:
578 return rewriteValueAMD64_OpAtomicAdd32(v)
579 case OpAtomicAdd64:
580 return rewriteValueAMD64_OpAtomicAdd64(v)
581 case OpAtomicAnd32:
582 return rewriteValueAMD64_OpAtomicAnd32(v)
583 case OpAtomicAnd8:
584 return rewriteValueAMD64_OpAtomicAnd8(v)
585 case OpAtomicCompareAndSwap32:
586 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
587 case OpAtomicCompareAndSwap64:
588 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
589 case OpAtomicExchange32:
590 return rewriteValueAMD64_OpAtomicExchange32(v)
591 case OpAtomicExchange64:
592 return rewriteValueAMD64_OpAtomicExchange64(v)
593 case OpAtomicLoad32:
594 return rewriteValueAMD64_OpAtomicLoad32(v)
595 case OpAtomicLoad64:
596 return rewriteValueAMD64_OpAtomicLoad64(v)
597 case OpAtomicLoad8:
598 return rewriteValueAMD64_OpAtomicLoad8(v)
599 case OpAtomicLoadPtr:
600 return rewriteValueAMD64_OpAtomicLoadPtr(v)
601 case OpAtomicOr32:
602 return rewriteValueAMD64_OpAtomicOr32(v)
603 case OpAtomicOr8:
604 return rewriteValueAMD64_OpAtomicOr8(v)
605 case OpAtomicStore32:
606 return rewriteValueAMD64_OpAtomicStore32(v)
607 case OpAtomicStore64:
608 return rewriteValueAMD64_OpAtomicStore64(v)
609 case OpAtomicStore8:
610 return rewriteValueAMD64_OpAtomicStore8(v)
611 case OpAtomicStorePtrNoWB:
612 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
613 case OpAvg64u:
614 v.Op = OpAMD64AVGQU
615 return true
616 case OpBitLen16:
617 return rewriteValueAMD64_OpBitLen16(v)
618 case OpBitLen32:
619 return rewriteValueAMD64_OpBitLen32(v)
620 case OpBitLen64:
621 return rewriteValueAMD64_OpBitLen64(v)
622 case OpBitLen8:
623 return rewriteValueAMD64_OpBitLen8(v)
624 case OpBswap32:
625 v.Op = OpAMD64BSWAPL
626 return true
627 case OpBswap64:
628 v.Op = OpAMD64BSWAPQ
629 return true
630 case OpCeil:
631 return rewriteValueAMD64_OpCeil(v)
632 case OpClosureCall:
633 v.Op = OpAMD64CALLclosure
634 return true
635 case OpCom16:
636 v.Op = OpAMD64NOTL
637 return true
638 case OpCom32:
639 v.Op = OpAMD64NOTL
640 return true
641 case OpCom64:
642 v.Op = OpAMD64NOTQ
643 return true
644 case OpCom8:
645 v.Op = OpAMD64NOTL
646 return true
647 case OpCondSelect:
648 return rewriteValueAMD64_OpCondSelect(v)
649 case OpConst16:
650 return rewriteValueAMD64_OpConst16(v)
651 case OpConst32:
652 v.Op = OpAMD64MOVLconst
653 return true
654 case OpConst32F:
655 v.Op = OpAMD64MOVSSconst
656 return true
657 case OpConst64:
658 v.Op = OpAMD64MOVQconst
659 return true
660 case OpConst64F:
661 v.Op = OpAMD64MOVSDconst
662 return true
663 case OpConst8:
664 return rewriteValueAMD64_OpConst8(v)
665 case OpConstBool:
666 return rewriteValueAMD64_OpConstBool(v)
667 case OpConstNil:
668 return rewriteValueAMD64_OpConstNil(v)
669 case OpCtz16:
670 return rewriteValueAMD64_OpCtz16(v)
671 case OpCtz16NonZero:
672 return rewriteValueAMD64_OpCtz16NonZero(v)
673 case OpCtz32:
674 return rewriteValueAMD64_OpCtz32(v)
675 case OpCtz32NonZero:
676 return rewriteValueAMD64_OpCtz32NonZero(v)
677 case OpCtz64:
678 return rewriteValueAMD64_OpCtz64(v)
679 case OpCtz64NonZero:
680 return rewriteValueAMD64_OpCtz64NonZero(v)
681 case OpCtz8:
682 return rewriteValueAMD64_OpCtz8(v)
683 case OpCtz8NonZero:
684 return rewriteValueAMD64_OpCtz8NonZero(v)
685 case OpCvt32Fto32:
686 v.Op = OpAMD64CVTTSS2SL
687 return true
688 case OpCvt32Fto64:
689 v.Op = OpAMD64CVTTSS2SQ
690 return true
691 case OpCvt32Fto64F:
692 v.Op = OpAMD64CVTSS2SD
693 return true
694 case OpCvt32to32F:
695 v.Op = OpAMD64CVTSL2SS
696 return true
697 case OpCvt32to64F:
698 v.Op = OpAMD64CVTSL2SD
699 return true
700 case OpCvt64Fto32:
701 v.Op = OpAMD64CVTTSD2SL
702 return true
703 case OpCvt64Fto32F:
704 v.Op = OpAMD64CVTSD2SS
705 return true
706 case OpCvt64Fto64:
707 v.Op = OpAMD64CVTTSD2SQ
708 return true
709 case OpCvt64to32F:
710 v.Op = OpAMD64CVTSQ2SS
711 return true
712 case OpCvt64to64F:
713 v.Op = OpAMD64CVTSQ2SD
714 return true
715 case OpCvtBoolToUint8:
716 v.Op = OpCopy
717 return true
718 case OpDiv128u:
719 v.Op = OpAMD64DIVQU2
720 return true
721 case OpDiv16:
722 return rewriteValueAMD64_OpDiv16(v)
723 case OpDiv16u:
724 return rewriteValueAMD64_OpDiv16u(v)
725 case OpDiv32:
726 return rewriteValueAMD64_OpDiv32(v)
727 case OpDiv32F:
728 v.Op = OpAMD64DIVSS
729 return true
730 case OpDiv32u:
731 return rewriteValueAMD64_OpDiv32u(v)
732 case OpDiv64:
733 return rewriteValueAMD64_OpDiv64(v)
734 case OpDiv64F:
735 v.Op = OpAMD64DIVSD
736 return true
737 case OpDiv64u:
738 return rewriteValueAMD64_OpDiv64u(v)
739 case OpDiv8:
740 return rewriteValueAMD64_OpDiv8(v)
741 case OpDiv8u:
742 return rewriteValueAMD64_OpDiv8u(v)
743 case OpEq16:
744 return rewriteValueAMD64_OpEq16(v)
745 case OpEq32:
746 return rewriteValueAMD64_OpEq32(v)
747 case OpEq32F:
748 return rewriteValueAMD64_OpEq32F(v)
749 case OpEq64:
750 return rewriteValueAMD64_OpEq64(v)
751 case OpEq64F:
752 return rewriteValueAMD64_OpEq64F(v)
753 case OpEq8:
754 return rewriteValueAMD64_OpEq8(v)
755 case OpEqB:
756 return rewriteValueAMD64_OpEqB(v)
757 case OpEqPtr:
758 return rewriteValueAMD64_OpEqPtr(v)
759 case OpFMA:
760 return rewriteValueAMD64_OpFMA(v)
761 case OpFloor:
762 return rewriteValueAMD64_OpFloor(v)
763 case OpGetCallerPC:
764 v.Op = OpAMD64LoweredGetCallerPC
765 return true
766 case OpGetCallerSP:
767 v.Op = OpAMD64LoweredGetCallerSP
768 return true
769 case OpGetClosurePtr:
770 v.Op = OpAMD64LoweredGetClosurePtr
771 return true
772 case OpGetG:
773 return rewriteValueAMD64_OpGetG(v)
774 case OpHasCPUFeature:
775 return rewriteValueAMD64_OpHasCPUFeature(v)
776 case OpHmul32:
777 v.Op = OpAMD64HMULL
778 return true
779 case OpHmul32u:
780 v.Op = OpAMD64HMULLU
781 return true
782 case OpHmul64:
783 v.Op = OpAMD64HMULQ
784 return true
785 case OpHmul64u:
786 v.Op = OpAMD64HMULQU
787 return true
788 case OpInterCall:
789 v.Op = OpAMD64CALLinter
790 return true
791 case OpIsInBounds:
792 return rewriteValueAMD64_OpIsInBounds(v)
793 case OpIsNonNil:
794 return rewriteValueAMD64_OpIsNonNil(v)
795 case OpIsSliceInBounds:
796 return rewriteValueAMD64_OpIsSliceInBounds(v)
797 case OpLeq16:
798 return rewriteValueAMD64_OpLeq16(v)
799 case OpLeq16U:
800 return rewriteValueAMD64_OpLeq16U(v)
801 case OpLeq32:
802 return rewriteValueAMD64_OpLeq32(v)
803 case OpLeq32F:
804 return rewriteValueAMD64_OpLeq32F(v)
805 case OpLeq32U:
806 return rewriteValueAMD64_OpLeq32U(v)
807 case OpLeq64:
808 return rewriteValueAMD64_OpLeq64(v)
809 case OpLeq64F:
810 return rewriteValueAMD64_OpLeq64F(v)
811 case OpLeq64U:
812 return rewriteValueAMD64_OpLeq64U(v)
813 case OpLeq8:
814 return rewriteValueAMD64_OpLeq8(v)
815 case OpLeq8U:
816 return rewriteValueAMD64_OpLeq8U(v)
817 case OpLess16:
818 return rewriteValueAMD64_OpLess16(v)
819 case OpLess16U:
820 return rewriteValueAMD64_OpLess16U(v)
821 case OpLess32:
822 return rewriteValueAMD64_OpLess32(v)
823 case OpLess32F:
824 return rewriteValueAMD64_OpLess32F(v)
825 case OpLess32U:
826 return rewriteValueAMD64_OpLess32U(v)
827 case OpLess64:
828 return rewriteValueAMD64_OpLess64(v)
829 case OpLess64F:
830 return rewriteValueAMD64_OpLess64F(v)
831 case OpLess64U:
832 return rewriteValueAMD64_OpLess64U(v)
833 case OpLess8:
834 return rewriteValueAMD64_OpLess8(v)
835 case OpLess8U:
836 return rewriteValueAMD64_OpLess8U(v)
837 case OpLoad:
838 return rewriteValueAMD64_OpLoad(v)
839 case OpLocalAddr:
840 return rewriteValueAMD64_OpLocalAddr(v)
841 case OpLsh16x16:
842 return rewriteValueAMD64_OpLsh16x16(v)
843 case OpLsh16x32:
844 return rewriteValueAMD64_OpLsh16x32(v)
845 case OpLsh16x64:
846 return rewriteValueAMD64_OpLsh16x64(v)
847 case OpLsh16x8:
848 return rewriteValueAMD64_OpLsh16x8(v)
849 case OpLsh32x16:
850 return rewriteValueAMD64_OpLsh32x16(v)
851 case OpLsh32x32:
852 return rewriteValueAMD64_OpLsh32x32(v)
853 case OpLsh32x64:
854 return rewriteValueAMD64_OpLsh32x64(v)
855 case OpLsh32x8:
856 return rewriteValueAMD64_OpLsh32x8(v)
857 case OpLsh64x16:
858 return rewriteValueAMD64_OpLsh64x16(v)
859 case OpLsh64x32:
860 return rewriteValueAMD64_OpLsh64x32(v)
861 case OpLsh64x64:
862 return rewriteValueAMD64_OpLsh64x64(v)
863 case OpLsh64x8:
864 return rewriteValueAMD64_OpLsh64x8(v)
865 case OpLsh8x16:
866 return rewriteValueAMD64_OpLsh8x16(v)
867 case OpLsh8x32:
868 return rewriteValueAMD64_OpLsh8x32(v)
869 case OpLsh8x64:
870 return rewriteValueAMD64_OpLsh8x64(v)
871 case OpLsh8x8:
872 return rewriteValueAMD64_OpLsh8x8(v)
873 case OpMod16:
874 return rewriteValueAMD64_OpMod16(v)
875 case OpMod16u:
876 return rewriteValueAMD64_OpMod16u(v)
877 case OpMod32:
878 return rewriteValueAMD64_OpMod32(v)
879 case OpMod32u:
880 return rewriteValueAMD64_OpMod32u(v)
881 case OpMod64:
882 return rewriteValueAMD64_OpMod64(v)
883 case OpMod64u:
884 return rewriteValueAMD64_OpMod64u(v)
885 case OpMod8:
886 return rewriteValueAMD64_OpMod8(v)
887 case OpMod8u:
888 return rewriteValueAMD64_OpMod8u(v)
889 case OpMove:
890 return rewriteValueAMD64_OpMove(v)
891 case OpMul16:
892 v.Op = OpAMD64MULL
893 return true
894 case OpMul32:
895 v.Op = OpAMD64MULL
896 return true
897 case OpMul32F:
898 v.Op = OpAMD64MULSS
899 return true
900 case OpMul64:
901 v.Op = OpAMD64MULQ
902 return true
903 case OpMul64F:
904 v.Op = OpAMD64MULSD
905 return true
906 case OpMul64uhilo:
907 v.Op = OpAMD64MULQU2
908 return true
909 case OpMul8:
910 v.Op = OpAMD64MULL
911 return true
912 case OpNeg16:
913 v.Op = OpAMD64NEGL
914 return true
915 case OpNeg32:
916 v.Op = OpAMD64NEGL
917 return true
918 case OpNeg32F:
919 return rewriteValueAMD64_OpNeg32F(v)
920 case OpNeg64:
921 v.Op = OpAMD64NEGQ
922 return true
923 case OpNeg64F:
924 return rewriteValueAMD64_OpNeg64F(v)
925 case OpNeg8:
926 v.Op = OpAMD64NEGL
927 return true
928 case OpNeq16:
929 return rewriteValueAMD64_OpNeq16(v)
930 case OpNeq32:
931 return rewriteValueAMD64_OpNeq32(v)
932 case OpNeq32F:
933 return rewriteValueAMD64_OpNeq32F(v)
934 case OpNeq64:
935 return rewriteValueAMD64_OpNeq64(v)
936 case OpNeq64F:
937 return rewriteValueAMD64_OpNeq64F(v)
938 case OpNeq8:
939 return rewriteValueAMD64_OpNeq8(v)
940 case OpNeqB:
941 return rewriteValueAMD64_OpNeqB(v)
942 case OpNeqPtr:
943 return rewriteValueAMD64_OpNeqPtr(v)
944 case OpNilCheck:
945 v.Op = OpAMD64LoweredNilCheck
946 return true
947 case OpNot:
948 return rewriteValueAMD64_OpNot(v)
949 case OpOffPtr:
950 return rewriteValueAMD64_OpOffPtr(v)
951 case OpOr16:
952 v.Op = OpAMD64ORL
953 return true
954 case OpOr32:
955 v.Op = OpAMD64ORL
956 return true
957 case OpOr64:
958 v.Op = OpAMD64ORQ
959 return true
960 case OpOr8:
961 v.Op = OpAMD64ORL
962 return true
963 case OpOrB:
964 v.Op = OpAMD64ORL
965 return true
966 case OpPanicBounds:
967 return rewriteValueAMD64_OpPanicBounds(v)
968 case OpPopCount16:
969 return rewriteValueAMD64_OpPopCount16(v)
970 case OpPopCount32:
971 v.Op = OpAMD64POPCNTL
972 return true
973 case OpPopCount64:
974 v.Op = OpAMD64POPCNTQ
975 return true
976 case OpPopCount8:
977 return rewriteValueAMD64_OpPopCount8(v)
978 case OpPrefetchCache:
979 v.Op = OpAMD64PrefetchT0
980 return true
981 case OpPrefetchCacheStreamed:
982 v.Op = OpAMD64PrefetchNTA
983 return true
984 case OpRotateLeft16:
985 v.Op = OpAMD64ROLW
986 return true
987 case OpRotateLeft32:
988 v.Op = OpAMD64ROLL
989 return true
990 case OpRotateLeft64:
991 v.Op = OpAMD64ROLQ
992 return true
993 case OpRotateLeft8:
994 v.Op = OpAMD64ROLB
995 return true
996 case OpRound32F:
997 v.Op = OpCopy
998 return true
999 case OpRound64F:
1000 v.Op = OpCopy
1001 return true
1002 case OpRoundToEven:
1003 return rewriteValueAMD64_OpRoundToEven(v)
1004 case OpRsh16Ux16:
1005 return rewriteValueAMD64_OpRsh16Ux16(v)
1006 case OpRsh16Ux32:
1007 return rewriteValueAMD64_OpRsh16Ux32(v)
1008 case OpRsh16Ux64:
1009 return rewriteValueAMD64_OpRsh16Ux64(v)
1010 case OpRsh16Ux8:
1011 return rewriteValueAMD64_OpRsh16Ux8(v)
1012 case OpRsh16x16:
1013 return rewriteValueAMD64_OpRsh16x16(v)
1014 case OpRsh16x32:
1015 return rewriteValueAMD64_OpRsh16x32(v)
1016 case OpRsh16x64:
1017 return rewriteValueAMD64_OpRsh16x64(v)
1018 case OpRsh16x8:
1019 return rewriteValueAMD64_OpRsh16x8(v)
1020 case OpRsh32Ux16:
1021 return rewriteValueAMD64_OpRsh32Ux16(v)
1022 case OpRsh32Ux32:
1023 return rewriteValueAMD64_OpRsh32Ux32(v)
1024 case OpRsh32Ux64:
1025 return rewriteValueAMD64_OpRsh32Ux64(v)
1026 case OpRsh32Ux8:
1027 return rewriteValueAMD64_OpRsh32Ux8(v)
1028 case OpRsh32x16:
1029 return rewriteValueAMD64_OpRsh32x16(v)
1030 case OpRsh32x32:
1031 return rewriteValueAMD64_OpRsh32x32(v)
1032 case OpRsh32x64:
1033 return rewriteValueAMD64_OpRsh32x64(v)
1034 case OpRsh32x8:
1035 return rewriteValueAMD64_OpRsh32x8(v)
1036 case OpRsh64Ux16:
1037 return rewriteValueAMD64_OpRsh64Ux16(v)
1038 case OpRsh64Ux32:
1039 return rewriteValueAMD64_OpRsh64Ux32(v)
1040 case OpRsh64Ux64:
1041 return rewriteValueAMD64_OpRsh64Ux64(v)
1042 case OpRsh64Ux8:
1043 return rewriteValueAMD64_OpRsh64Ux8(v)
1044 case OpRsh64x16:
1045 return rewriteValueAMD64_OpRsh64x16(v)
1046 case OpRsh64x32:
1047 return rewriteValueAMD64_OpRsh64x32(v)
1048 case OpRsh64x64:
1049 return rewriteValueAMD64_OpRsh64x64(v)
1050 case OpRsh64x8:
1051 return rewriteValueAMD64_OpRsh64x8(v)
1052 case OpRsh8Ux16:
1053 return rewriteValueAMD64_OpRsh8Ux16(v)
1054 case OpRsh8Ux32:
1055 return rewriteValueAMD64_OpRsh8Ux32(v)
1056 case OpRsh8Ux64:
1057 return rewriteValueAMD64_OpRsh8Ux64(v)
1058 case OpRsh8Ux8:
1059 return rewriteValueAMD64_OpRsh8Ux8(v)
1060 case OpRsh8x16:
1061 return rewriteValueAMD64_OpRsh8x16(v)
1062 case OpRsh8x32:
1063 return rewriteValueAMD64_OpRsh8x32(v)
1064 case OpRsh8x64:
1065 return rewriteValueAMD64_OpRsh8x64(v)
1066 case OpRsh8x8:
1067 return rewriteValueAMD64_OpRsh8x8(v)
1068 case OpSelect0:
1069 return rewriteValueAMD64_OpSelect0(v)
1070 case OpSelect1:
1071 return rewriteValueAMD64_OpSelect1(v)
1072 case OpSelectN:
1073 return rewriteValueAMD64_OpSelectN(v)
1074 case OpSignExt16to32:
1075 v.Op = OpAMD64MOVWQSX
1076 return true
1077 case OpSignExt16to64:
1078 v.Op = OpAMD64MOVWQSX
1079 return true
1080 case OpSignExt32to64:
1081 v.Op = OpAMD64MOVLQSX
1082 return true
1083 case OpSignExt8to16:
1084 v.Op = OpAMD64MOVBQSX
1085 return true
1086 case OpSignExt8to32:
1087 v.Op = OpAMD64MOVBQSX
1088 return true
1089 case OpSignExt8to64:
1090 v.Op = OpAMD64MOVBQSX
1091 return true
1092 case OpSlicemask:
1093 return rewriteValueAMD64_OpSlicemask(v)
1094 case OpSpectreIndex:
1095 return rewriteValueAMD64_OpSpectreIndex(v)
1096 case OpSpectreSliceIndex:
1097 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1098 case OpSqrt:
1099 v.Op = OpAMD64SQRTSD
1100 return true
1101 case OpSqrt32:
1102 v.Op = OpAMD64SQRTSS
1103 return true
1104 case OpStaticCall:
1105 v.Op = OpAMD64CALLstatic
1106 return true
1107 case OpStore:
1108 return rewriteValueAMD64_OpStore(v)
1109 case OpSub16:
1110 v.Op = OpAMD64SUBL
1111 return true
1112 case OpSub32:
1113 v.Op = OpAMD64SUBL
1114 return true
1115 case OpSub32F:
1116 v.Op = OpAMD64SUBSS
1117 return true
1118 case OpSub64:
1119 v.Op = OpAMD64SUBQ
1120 return true
1121 case OpSub64F:
1122 v.Op = OpAMD64SUBSD
1123 return true
1124 case OpSub8:
1125 v.Op = OpAMD64SUBL
1126 return true
1127 case OpSubPtr:
1128 v.Op = OpAMD64SUBQ
1129 return true
1130 case OpTailCall:
1131 v.Op = OpAMD64CALLtail
1132 return true
1133 case OpTrunc:
1134 return rewriteValueAMD64_OpTrunc(v)
1135 case OpTrunc16to8:
1136 v.Op = OpCopy
1137 return true
1138 case OpTrunc32to16:
1139 v.Op = OpCopy
1140 return true
1141 case OpTrunc32to8:
1142 v.Op = OpCopy
1143 return true
1144 case OpTrunc64to16:
1145 v.Op = OpCopy
1146 return true
1147 case OpTrunc64to32:
1148 v.Op = OpCopy
1149 return true
1150 case OpTrunc64to8:
1151 v.Op = OpCopy
1152 return true
1153 case OpWB:
1154 v.Op = OpAMD64LoweredWB
1155 return true
1156 case OpXor16:
1157 v.Op = OpAMD64XORL
1158 return true
1159 case OpXor32:
1160 v.Op = OpAMD64XORL
1161 return true
1162 case OpXor64:
1163 v.Op = OpAMD64XORQ
1164 return true
1165 case OpXor8:
1166 v.Op = OpAMD64XORL
1167 return true
1168 case OpZero:
1169 return rewriteValueAMD64_OpZero(v)
1170 case OpZeroExt16to32:
1171 v.Op = OpAMD64MOVWQZX
1172 return true
1173 case OpZeroExt16to64:
1174 v.Op = OpAMD64MOVWQZX
1175 return true
1176 case OpZeroExt32to64:
1177 v.Op = OpAMD64MOVLQZX
1178 return true
1179 case OpZeroExt8to16:
1180 v.Op = OpAMD64MOVBQZX
1181 return true
1182 case OpZeroExt8to32:
1183 v.Op = OpAMD64MOVBQZX
1184 return true
1185 case OpZeroExt8to64:
1186 v.Op = OpAMD64MOVBQZX
1187 return true
1188 }
1189 return false
1190 }
1191 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1192 v_2 := v.Args[2]
1193 v_1 := v.Args[1]
1194 v_0 := v.Args[0]
1195
1196
1197
1198 for {
1199 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1200 x := v_0
1201 if v_1.Op != OpAMD64MOVQconst {
1202 continue
1203 }
1204 c := auxIntToInt64(v_1.AuxInt)
1205 carry := v_2
1206 if !(is32Bit(c)) {
1207 continue
1208 }
1209 v.reset(OpAMD64ADCQconst)
1210 v.AuxInt = int32ToAuxInt(int32(c))
1211 v.AddArg2(x, carry)
1212 return true
1213 }
1214 break
1215 }
1216
1217
1218 for {
1219 x := v_0
1220 y := v_1
1221 if v_2.Op != OpAMD64FlagEQ {
1222 break
1223 }
1224 v.reset(OpAMD64ADDQcarry)
1225 v.AddArg2(x, y)
1226 return true
1227 }
1228 return false
1229 }
1230 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1231 v_1 := v.Args[1]
1232 v_0 := v.Args[0]
1233
1234
1235 for {
1236 c := auxIntToInt32(v.AuxInt)
1237 x := v_0
1238 if v_1.Op != OpAMD64FlagEQ {
1239 break
1240 }
1241 v.reset(OpAMD64ADDQconstcarry)
1242 v.AuxInt = int32ToAuxInt(c)
1243 v.AddArg(x)
1244 return true
1245 }
1246 return false
1247 }
1248 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1249 v_1 := v.Args[1]
1250 v_0 := v.Args[0]
1251
1252
1253 for {
1254 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1255 x := v_0
1256 if v_1.Op != OpAMD64MOVLconst {
1257 continue
1258 }
1259 c := auxIntToInt32(v_1.AuxInt)
1260 v.reset(OpAMD64ADDLconst)
1261 v.AuxInt = int32ToAuxInt(c)
1262 v.AddArg(x)
1263 return true
1264 }
1265 break
1266 }
1267
1268
1269 for {
1270 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1271 x := v_0
1272 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1273 continue
1274 }
1275 y := v_1.Args[0]
1276 v.reset(OpAMD64LEAL8)
1277 v.AddArg2(x, y)
1278 return true
1279 }
1280 break
1281 }
1282
1283
1284 for {
1285 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1286 x := v_0
1287 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1288 continue
1289 }
1290 y := v_1.Args[0]
1291 v.reset(OpAMD64LEAL4)
1292 v.AddArg2(x, y)
1293 return true
1294 }
1295 break
1296 }
1297
1298
1299 for {
1300 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1301 x := v_0
1302 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1303 continue
1304 }
1305 y := v_1.Args[0]
1306 v.reset(OpAMD64LEAL2)
1307 v.AddArg2(x, y)
1308 return true
1309 }
1310 break
1311 }
1312
1313
1314 for {
1315 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1316 x := v_0
1317 if v_1.Op != OpAMD64ADDL {
1318 continue
1319 }
1320 y := v_1.Args[1]
1321 if y != v_1.Args[0] {
1322 continue
1323 }
1324 v.reset(OpAMD64LEAL2)
1325 v.AddArg2(x, y)
1326 return true
1327 }
1328 break
1329 }
1330
1331
1332 for {
1333 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1334 x := v_0
1335 if v_1.Op != OpAMD64ADDL {
1336 continue
1337 }
1338 _ = v_1.Args[1]
1339 v_1_0 := v_1.Args[0]
1340 v_1_1 := v_1.Args[1]
1341 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1342 if x != v_1_0 {
1343 continue
1344 }
1345 y := v_1_1
1346 v.reset(OpAMD64LEAL2)
1347 v.AddArg2(y, x)
1348 return true
1349 }
1350 }
1351 break
1352 }
1353
1354
1355 for {
1356 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1357 if v_0.Op != OpAMD64ADDLconst {
1358 continue
1359 }
1360 c := auxIntToInt32(v_0.AuxInt)
1361 x := v_0.Args[0]
1362 y := v_1
1363 v.reset(OpAMD64LEAL1)
1364 v.AuxInt = int32ToAuxInt(c)
1365 v.AddArg2(x, y)
1366 return true
1367 }
1368 break
1369 }
1370
1371
1372
1373 for {
1374 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1375 x := v_0
1376 if v_1.Op != OpAMD64LEAL {
1377 continue
1378 }
1379 c := auxIntToInt32(v_1.AuxInt)
1380 s := auxToSym(v_1.Aux)
1381 y := v_1.Args[0]
1382 if !(x.Op != OpSB && y.Op != OpSB) {
1383 continue
1384 }
1385 v.reset(OpAMD64LEAL1)
1386 v.AuxInt = int32ToAuxInt(c)
1387 v.Aux = symToAux(s)
1388 v.AddArg2(x, y)
1389 return true
1390 }
1391 break
1392 }
1393
1394
1395 for {
1396 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1397 x := v_0
1398 if v_1.Op != OpAMD64NEGL {
1399 continue
1400 }
1401 y := v_1.Args[0]
1402 v.reset(OpAMD64SUBL)
1403 v.AddArg2(x, y)
1404 return true
1405 }
1406 break
1407 }
1408
1409
1410
1411 for {
1412 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1413 x := v_0
1414 l := v_1
1415 if l.Op != OpAMD64MOVLload {
1416 continue
1417 }
1418 off := auxIntToInt32(l.AuxInt)
1419 sym := auxToSym(l.Aux)
1420 mem := l.Args[1]
1421 ptr := l.Args[0]
1422 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1423 continue
1424 }
1425 v.reset(OpAMD64ADDLload)
1426 v.AuxInt = int32ToAuxInt(off)
1427 v.Aux = symToAux(sym)
1428 v.AddArg3(x, ptr, mem)
1429 return true
1430 }
1431 break
1432 }
1433 return false
1434 }
1435 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1436 v_0 := v.Args[0]
1437
1438
1439 for {
1440 c := auxIntToInt32(v.AuxInt)
1441 if v_0.Op != OpAMD64ADDL {
1442 break
1443 }
1444 y := v_0.Args[1]
1445 x := v_0.Args[0]
1446 v.reset(OpAMD64LEAL1)
1447 v.AuxInt = int32ToAuxInt(c)
1448 v.AddArg2(x, y)
1449 return true
1450 }
1451
1452
1453 for {
1454 c := auxIntToInt32(v.AuxInt)
1455 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1456 break
1457 }
1458 x := v_0.Args[0]
1459 v.reset(OpAMD64LEAL1)
1460 v.AuxInt = int32ToAuxInt(c)
1461 v.AddArg2(x, x)
1462 return true
1463 }
1464
1465
1466
1467 for {
1468 c := auxIntToInt32(v.AuxInt)
1469 if v_0.Op != OpAMD64LEAL {
1470 break
1471 }
1472 d := auxIntToInt32(v_0.AuxInt)
1473 s := auxToSym(v_0.Aux)
1474 x := v_0.Args[0]
1475 if !(is32Bit(int64(c) + int64(d))) {
1476 break
1477 }
1478 v.reset(OpAMD64LEAL)
1479 v.AuxInt = int32ToAuxInt(c + d)
1480 v.Aux = symToAux(s)
1481 v.AddArg(x)
1482 return true
1483 }
1484
1485
1486
1487 for {
1488 c := auxIntToInt32(v.AuxInt)
1489 if v_0.Op != OpAMD64LEAL1 {
1490 break
1491 }
1492 d := auxIntToInt32(v_0.AuxInt)
1493 s := auxToSym(v_0.Aux)
1494 y := v_0.Args[1]
1495 x := v_0.Args[0]
1496 if !(is32Bit(int64(c) + int64(d))) {
1497 break
1498 }
1499 v.reset(OpAMD64LEAL1)
1500 v.AuxInt = int32ToAuxInt(c + d)
1501 v.Aux = symToAux(s)
1502 v.AddArg2(x, y)
1503 return true
1504 }
1505
1506
1507
1508 for {
1509 c := auxIntToInt32(v.AuxInt)
1510 if v_0.Op != OpAMD64LEAL2 {
1511 break
1512 }
1513 d := auxIntToInt32(v_0.AuxInt)
1514 s := auxToSym(v_0.Aux)
1515 y := v_0.Args[1]
1516 x := v_0.Args[0]
1517 if !(is32Bit(int64(c) + int64(d))) {
1518 break
1519 }
1520 v.reset(OpAMD64LEAL2)
1521 v.AuxInt = int32ToAuxInt(c + d)
1522 v.Aux = symToAux(s)
1523 v.AddArg2(x, y)
1524 return true
1525 }
1526
1527
1528
1529 for {
1530 c := auxIntToInt32(v.AuxInt)
1531 if v_0.Op != OpAMD64LEAL4 {
1532 break
1533 }
1534 d := auxIntToInt32(v_0.AuxInt)
1535 s := auxToSym(v_0.Aux)
1536 y := v_0.Args[1]
1537 x := v_0.Args[0]
1538 if !(is32Bit(int64(c) + int64(d))) {
1539 break
1540 }
1541 v.reset(OpAMD64LEAL4)
1542 v.AuxInt = int32ToAuxInt(c + d)
1543 v.Aux = symToAux(s)
1544 v.AddArg2(x, y)
1545 return true
1546 }
1547
1548
1549
1550 for {
1551 c := auxIntToInt32(v.AuxInt)
1552 if v_0.Op != OpAMD64LEAL8 {
1553 break
1554 }
1555 d := auxIntToInt32(v_0.AuxInt)
1556 s := auxToSym(v_0.Aux)
1557 y := v_0.Args[1]
1558 x := v_0.Args[0]
1559 if !(is32Bit(int64(c) + int64(d))) {
1560 break
1561 }
1562 v.reset(OpAMD64LEAL8)
1563 v.AuxInt = int32ToAuxInt(c + d)
1564 v.Aux = symToAux(s)
1565 v.AddArg2(x, y)
1566 return true
1567 }
1568
1569
1570
1571 for {
1572 c := auxIntToInt32(v.AuxInt)
1573 x := v_0
1574 if !(c == 0) {
1575 break
1576 }
1577 v.copyOf(x)
1578 return true
1579 }
1580
1581
1582 for {
1583 c := auxIntToInt32(v.AuxInt)
1584 if v_0.Op != OpAMD64MOVLconst {
1585 break
1586 }
1587 d := auxIntToInt32(v_0.AuxInt)
1588 v.reset(OpAMD64MOVLconst)
1589 v.AuxInt = int32ToAuxInt(c + d)
1590 return true
1591 }
1592
1593
1594 for {
1595 c := auxIntToInt32(v.AuxInt)
1596 if v_0.Op != OpAMD64ADDLconst {
1597 break
1598 }
1599 d := auxIntToInt32(v_0.AuxInt)
1600 x := v_0.Args[0]
1601 v.reset(OpAMD64ADDLconst)
1602 v.AuxInt = int32ToAuxInt(c + d)
1603 v.AddArg(x)
1604 return true
1605 }
1606
1607
1608 for {
1609 off := auxIntToInt32(v.AuxInt)
1610 x := v_0
1611 if x.Op != OpSP {
1612 break
1613 }
1614 v.reset(OpAMD64LEAL)
1615 v.AuxInt = int32ToAuxInt(off)
1616 v.AddArg(x)
1617 return true
1618 }
1619 return false
1620 }
1621 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1622 v_1 := v.Args[1]
1623 v_0 := v.Args[0]
1624
1625
1626
1627 for {
1628 valoff1 := auxIntToValAndOff(v.AuxInt)
1629 sym := auxToSym(v.Aux)
1630 if v_0.Op != OpAMD64ADDQconst {
1631 break
1632 }
1633 off2 := auxIntToInt32(v_0.AuxInt)
1634 base := v_0.Args[0]
1635 mem := v_1
1636 if !(ValAndOff(valoff1).canAdd32(off2)) {
1637 break
1638 }
1639 v.reset(OpAMD64ADDLconstmodify)
1640 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1641 v.Aux = symToAux(sym)
1642 v.AddArg2(base, mem)
1643 return true
1644 }
1645
1646
1647
1648 for {
1649 valoff1 := auxIntToValAndOff(v.AuxInt)
1650 sym1 := auxToSym(v.Aux)
1651 if v_0.Op != OpAMD64LEAQ {
1652 break
1653 }
1654 off2 := auxIntToInt32(v_0.AuxInt)
1655 sym2 := auxToSym(v_0.Aux)
1656 base := v_0.Args[0]
1657 mem := v_1
1658 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1659 break
1660 }
1661 v.reset(OpAMD64ADDLconstmodify)
1662 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1663 v.Aux = symToAux(mergeSym(sym1, sym2))
1664 v.AddArg2(base, mem)
1665 return true
1666 }
1667 return false
1668 }
1669 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1670 v_2 := v.Args[2]
1671 v_1 := v.Args[1]
1672 v_0 := v.Args[0]
1673 b := v.Block
1674 typ := &b.Func.Config.Types
1675
1676
1677
1678 for {
1679 off1 := auxIntToInt32(v.AuxInt)
1680 sym := auxToSym(v.Aux)
1681 val := v_0
1682 if v_1.Op != OpAMD64ADDQconst {
1683 break
1684 }
1685 off2 := auxIntToInt32(v_1.AuxInt)
1686 base := v_1.Args[0]
1687 mem := v_2
1688 if !(is32Bit(int64(off1) + int64(off2))) {
1689 break
1690 }
1691 v.reset(OpAMD64ADDLload)
1692 v.AuxInt = int32ToAuxInt(off1 + off2)
1693 v.Aux = symToAux(sym)
1694 v.AddArg3(val, base, mem)
1695 return true
1696 }
1697
1698
1699
1700 for {
1701 off1 := auxIntToInt32(v.AuxInt)
1702 sym1 := auxToSym(v.Aux)
1703 val := v_0
1704 if v_1.Op != OpAMD64LEAQ {
1705 break
1706 }
1707 off2 := auxIntToInt32(v_1.AuxInt)
1708 sym2 := auxToSym(v_1.Aux)
1709 base := v_1.Args[0]
1710 mem := v_2
1711 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1712 break
1713 }
1714 v.reset(OpAMD64ADDLload)
1715 v.AuxInt = int32ToAuxInt(off1 + off2)
1716 v.Aux = symToAux(mergeSym(sym1, sym2))
1717 v.AddArg3(val, base, mem)
1718 return true
1719 }
1720
1721
1722 for {
1723 off := auxIntToInt32(v.AuxInt)
1724 sym := auxToSym(v.Aux)
1725 x := v_0
1726 ptr := v_1
1727 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1728 break
1729 }
1730 y := v_2.Args[1]
1731 if ptr != v_2.Args[0] {
1732 break
1733 }
1734 v.reset(OpAMD64ADDL)
1735 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1736 v0.AddArg(y)
1737 v.AddArg2(x, v0)
1738 return true
1739 }
1740 return false
1741 }
1742 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1743 v_2 := v.Args[2]
1744 v_1 := v.Args[1]
1745 v_0 := v.Args[0]
1746
1747
1748
1749 for {
1750 off1 := auxIntToInt32(v.AuxInt)
1751 sym := auxToSym(v.Aux)
1752 if v_0.Op != OpAMD64ADDQconst {
1753 break
1754 }
1755 off2 := auxIntToInt32(v_0.AuxInt)
1756 base := v_0.Args[0]
1757 val := v_1
1758 mem := v_2
1759 if !(is32Bit(int64(off1) + int64(off2))) {
1760 break
1761 }
1762 v.reset(OpAMD64ADDLmodify)
1763 v.AuxInt = int32ToAuxInt(off1 + off2)
1764 v.Aux = symToAux(sym)
1765 v.AddArg3(base, val, mem)
1766 return true
1767 }
1768
1769
1770
1771 for {
1772 off1 := auxIntToInt32(v.AuxInt)
1773 sym1 := auxToSym(v.Aux)
1774 if v_0.Op != OpAMD64LEAQ {
1775 break
1776 }
1777 off2 := auxIntToInt32(v_0.AuxInt)
1778 sym2 := auxToSym(v_0.Aux)
1779 base := v_0.Args[0]
1780 val := v_1
1781 mem := v_2
1782 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1783 break
1784 }
1785 v.reset(OpAMD64ADDLmodify)
1786 v.AuxInt = int32ToAuxInt(off1 + off2)
1787 v.Aux = symToAux(mergeSym(sym1, sym2))
1788 v.AddArg3(base, val, mem)
1789 return true
1790 }
1791 return false
1792 }
1793 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1794 v_1 := v.Args[1]
1795 v_0 := v.Args[0]
1796
1797
1798
1799 for {
1800 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1801 x := v_0
1802 if v_1.Op != OpAMD64MOVQconst {
1803 continue
1804 }
1805 c := auxIntToInt64(v_1.AuxInt)
1806 if !(is32Bit(c)) {
1807 continue
1808 }
1809 v.reset(OpAMD64ADDQconst)
1810 v.AuxInt = int32ToAuxInt(int32(c))
1811 v.AddArg(x)
1812 return true
1813 }
1814 break
1815 }
1816
1817
1818 for {
1819 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1820 x := v_0
1821 if v_1.Op != OpAMD64MOVLconst {
1822 continue
1823 }
1824 c := auxIntToInt32(v_1.AuxInt)
1825 v.reset(OpAMD64ADDQconst)
1826 v.AuxInt = int32ToAuxInt(c)
1827 v.AddArg(x)
1828 return true
1829 }
1830 break
1831 }
1832
1833
1834 for {
1835 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1836 x := v_0
1837 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1838 continue
1839 }
1840 y := v_1.Args[0]
1841 v.reset(OpAMD64LEAQ8)
1842 v.AddArg2(x, y)
1843 return true
1844 }
1845 break
1846 }
1847
1848
1849 for {
1850 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1851 x := v_0
1852 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1853 continue
1854 }
1855 y := v_1.Args[0]
1856 v.reset(OpAMD64LEAQ4)
1857 v.AddArg2(x, y)
1858 return true
1859 }
1860 break
1861 }
1862
1863
1864 for {
1865 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1866 x := v_0
1867 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1868 continue
1869 }
1870 y := v_1.Args[0]
1871 v.reset(OpAMD64LEAQ2)
1872 v.AddArg2(x, y)
1873 return true
1874 }
1875 break
1876 }
1877
1878
1879 for {
1880 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1881 x := v_0
1882 if v_1.Op != OpAMD64ADDQ {
1883 continue
1884 }
1885 y := v_1.Args[1]
1886 if y != v_1.Args[0] {
1887 continue
1888 }
1889 v.reset(OpAMD64LEAQ2)
1890 v.AddArg2(x, y)
1891 return true
1892 }
1893 break
1894 }
1895
1896
1897 for {
1898 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1899 x := v_0
1900 if v_1.Op != OpAMD64ADDQ {
1901 continue
1902 }
1903 _ = v_1.Args[1]
1904 v_1_0 := v_1.Args[0]
1905 v_1_1 := v_1.Args[1]
1906 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1907 if x != v_1_0 {
1908 continue
1909 }
1910 y := v_1_1
1911 v.reset(OpAMD64LEAQ2)
1912 v.AddArg2(y, x)
1913 return true
1914 }
1915 }
1916 break
1917 }
1918
1919
1920 for {
1921 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1922 if v_0.Op != OpAMD64ADDQconst {
1923 continue
1924 }
1925 c := auxIntToInt32(v_0.AuxInt)
1926 x := v_0.Args[0]
1927 y := v_1
1928 v.reset(OpAMD64LEAQ1)
1929 v.AuxInt = int32ToAuxInt(c)
1930 v.AddArg2(x, y)
1931 return true
1932 }
1933 break
1934 }
1935
1936
1937
1938 for {
1939 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1940 x := v_0
1941 if v_1.Op != OpAMD64LEAQ {
1942 continue
1943 }
1944 c := auxIntToInt32(v_1.AuxInt)
1945 s := auxToSym(v_1.Aux)
1946 y := v_1.Args[0]
1947 if !(x.Op != OpSB && y.Op != OpSB) {
1948 continue
1949 }
1950 v.reset(OpAMD64LEAQ1)
1951 v.AuxInt = int32ToAuxInt(c)
1952 v.Aux = symToAux(s)
1953 v.AddArg2(x, y)
1954 return true
1955 }
1956 break
1957 }
1958
1959
1960 for {
1961 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1962 x := v_0
1963 if v_1.Op != OpAMD64NEGQ {
1964 continue
1965 }
1966 y := v_1.Args[0]
1967 v.reset(OpAMD64SUBQ)
1968 v.AddArg2(x, y)
1969 return true
1970 }
1971 break
1972 }
1973
1974
1975
1976 for {
1977 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1978 x := v_0
1979 l := v_1
1980 if l.Op != OpAMD64MOVQload {
1981 continue
1982 }
1983 off := auxIntToInt32(l.AuxInt)
1984 sym := auxToSym(l.Aux)
1985 mem := l.Args[1]
1986 ptr := l.Args[0]
1987 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1988 continue
1989 }
1990 v.reset(OpAMD64ADDQload)
1991 v.AuxInt = int32ToAuxInt(off)
1992 v.Aux = symToAux(sym)
1993 v.AddArg3(x, ptr, mem)
1994 return true
1995 }
1996 break
1997 }
1998 return false
1999 }
2000 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2001 v_1 := v.Args[1]
2002 v_0 := v.Args[0]
2003
2004
2005
2006 for {
2007 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2008 x := v_0
2009 if v_1.Op != OpAMD64MOVQconst {
2010 continue
2011 }
2012 c := auxIntToInt64(v_1.AuxInt)
2013 if !(is32Bit(c)) {
2014 continue
2015 }
2016 v.reset(OpAMD64ADDQconstcarry)
2017 v.AuxInt = int32ToAuxInt(int32(c))
2018 v.AddArg(x)
2019 return true
2020 }
2021 break
2022 }
2023 return false
2024 }
2025 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2026 v_0 := v.Args[0]
2027
2028
2029 for {
2030 c := auxIntToInt32(v.AuxInt)
2031 if v_0.Op != OpAMD64ADDQ {
2032 break
2033 }
2034 y := v_0.Args[1]
2035 x := v_0.Args[0]
2036 v.reset(OpAMD64LEAQ1)
2037 v.AuxInt = int32ToAuxInt(c)
2038 v.AddArg2(x, y)
2039 return true
2040 }
2041
2042
2043 for {
2044 c := auxIntToInt32(v.AuxInt)
2045 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2046 break
2047 }
2048 x := v_0.Args[0]
2049 v.reset(OpAMD64LEAQ1)
2050 v.AuxInt = int32ToAuxInt(c)
2051 v.AddArg2(x, x)
2052 return true
2053 }
2054
2055
2056
2057 for {
2058 c := auxIntToInt32(v.AuxInt)
2059 if v_0.Op != OpAMD64LEAQ {
2060 break
2061 }
2062 d := auxIntToInt32(v_0.AuxInt)
2063 s := auxToSym(v_0.Aux)
2064 x := v_0.Args[0]
2065 if !(is32Bit(int64(c) + int64(d))) {
2066 break
2067 }
2068 v.reset(OpAMD64LEAQ)
2069 v.AuxInt = int32ToAuxInt(c + d)
2070 v.Aux = symToAux(s)
2071 v.AddArg(x)
2072 return true
2073 }
2074
2075
2076
2077 for {
2078 c := auxIntToInt32(v.AuxInt)
2079 if v_0.Op != OpAMD64LEAQ1 {
2080 break
2081 }
2082 d := auxIntToInt32(v_0.AuxInt)
2083 s := auxToSym(v_0.Aux)
2084 y := v_0.Args[1]
2085 x := v_0.Args[0]
2086 if !(is32Bit(int64(c) + int64(d))) {
2087 break
2088 }
2089 v.reset(OpAMD64LEAQ1)
2090 v.AuxInt = int32ToAuxInt(c + d)
2091 v.Aux = symToAux(s)
2092 v.AddArg2(x, y)
2093 return true
2094 }
2095
2096
2097
2098 for {
2099 c := auxIntToInt32(v.AuxInt)
2100 if v_0.Op != OpAMD64LEAQ2 {
2101 break
2102 }
2103 d := auxIntToInt32(v_0.AuxInt)
2104 s := auxToSym(v_0.Aux)
2105 y := v_0.Args[1]
2106 x := v_0.Args[0]
2107 if !(is32Bit(int64(c) + int64(d))) {
2108 break
2109 }
2110 v.reset(OpAMD64LEAQ2)
2111 v.AuxInt = int32ToAuxInt(c + d)
2112 v.Aux = symToAux(s)
2113 v.AddArg2(x, y)
2114 return true
2115 }
2116
2117
2118
2119 for {
2120 c := auxIntToInt32(v.AuxInt)
2121 if v_0.Op != OpAMD64LEAQ4 {
2122 break
2123 }
2124 d := auxIntToInt32(v_0.AuxInt)
2125 s := auxToSym(v_0.Aux)
2126 y := v_0.Args[1]
2127 x := v_0.Args[0]
2128 if !(is32Bit(int64(c) + int64(d))) {
2129 break
2130 }
2131 v.reset(OpAMD64LEAQ4)
2132 v.AuxInt = int32ToAuxInt(c + d)
2133 v.Aux = symToAux(s)
2134 v.AddArg2(x, y)
2135 return true
2136 }
2137
2138
2139
2140 for {
2141 c := auxIntToInt32(v.AuxInt)
2142 if v_0.Op != OpAMD64LEAQ8 {
2143 break
2144 }
2145 d := auxIntToInt32(v_0.AuxInt)
2146 s := auxToSym(v_0.Aux)
2147 y := v_0.Args[1]
2148 x := v_0.Args[0]
2149 if !(is32Bit(int64(c) + int64(d))) {
2150 break
2151 }
2152 v.reset(OpAMD64LEAQ8)
2153 v.AuxInt = int32ToAuxInt(c + d)
2154 v.Aux = symToAux(s)
2155 v.AddArg2(x, y)
2156 return true
2157 }
2158
2159
2160 for {
2161 if auxIntToInt32(v.AuxInt) != 0 {
2162 break
2163 }
2164 x := v_0
2165 v.copyOf(x)
2166 return true
2167 }
2168
2169
2170 for {
2171 c := auxIntToInt32(v.AuxInt)
2172 if v_0.Op != OpAMD64MOVQconst {
2173 break
2174 }
2175 d := auxIntToInt64(v_0.AuxInt)
2176 v.reset(OpAMD64MOVQconst)
2177 v.AuxInt = int64ToAuxInt(int64(c) + d)
2178 return true
2179 }
2180
2181
2182
2183 for {
2184 c := auxIntToInt32(v.AuxInt)
2185 if v_0.Op != OpAMD64ADDQconst {
2186 break
2187 }
2188 d := auxIntToInt32(v_0.AuxInt)
2189 x := v_0.Args[0]
2190 if !(is32Bit(int64(c) + int64(d))) {
2191 break
2192 }
2193 v.reset(OpAMD64ADDQconst)
2194 v.AuxInt = int32ToAuxInt(c + d)
2195 v.AddArg(x)
2196 return true
2197 }
2198
2199
2200 for {
2201 off := auxIntToInt32(v.AuxInt)
2202 x := v_0
2203 if x.Op != OpSP {
2204 break
2205 }
2206 v.reset(OpAMD64LEAQ)
2207 v.AuxInt = int32ToAuxInt(off)
2208 v.AddArg(x)
2209 return true
2210 }
2211 return false
2212 }
2213 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2214 v_1 := v.Args[1]
2215 v_0 := v.Args[0]
2216
2217
2218
2219 for {
2220 valoff1 := auxIntToValAndOff(v.AuxInt)
2221 sym := auxToSym(v.Aux)
2222 if v_0.Op != OpAMD64ADDQconst {
2223 break
2224 }
2225 off2 := auxIntToInt32(v_0.AuxInt)
2226 base := v_0.Args[0]
2227 mem := v_1
2228 if !(ValAndOff(valoff1).canAdd32(off2)) {
2229 break
2230 }
2231 v.reset(OpAMD64ADDQconstmodify)
2232 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2233 v.Aux = symToAux(sym)
2234 v.AddArg2(base, mem)
2235 return true
2236 }
2237
2238
2239
2240 for {
2241 valoff1 := auxIntToValAndOff(v.AuxInt)
2242 sym1 := auxToSym(v.Aux)
2243 if v_0.Op != OpAMD64LEAQ {
2244 break
2245 }
2246 off2 := auxIntToInt32(v_0.AuxInt)
2247 sym2 := auxToSym(v_0.Aux)
2248 base := v_0.Args[0]
2249 mem := v_1
2250 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2251 break
2252 }
2253 v.reset(OpAMD64ADDQconstmodify)
2254 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2255 v.Aux = symToAux(mergeSym(sym1, sym2))
2256 v.AddArg2(base, mem)
2257 return true
2258 }
2259 return false
2260 }
2261 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2262 v_2 := v.Args[2]
2263 v_1 := v.Args[1]
2264 v_0 := v.Args[0]
2265 b := v.Block
2266 typ := &b.Func.Config.Types
2267
2268
2269
2270 for {
2271 off1 := auxIntToInt32(v.AuxInt)
2272 sym := auxToSym(v.Aux)
2273 val := v_0
2274 if v_1.Op != OpAMD64ADDQconst {
2275 break
2276 }
2277 off2 := auxIntToInt32(v_1.AuxInt)
2278 base := v_1.Args[0]
2279 mem := v_2
2280 if !(is32Bit(int64(off1) + int64(off2))) {
2281 break
2282 }
2283 v.reset(OpAMD64ADDQload)
2284 v.AuxInt = int32ToAuxInt(off1 + off2)
2285 v.Aux = symToAux(sym)
2286 v.AddArg3(val, base, mem)
2287 return true
2288 }
2289
2290
2291
2292 for {
2293 off1 := auxIntToInt32(v.AuxInt)
2294 sym1 := auxToSym(v.Aux)
2295 val := v_0
2296 if v_1.Op != OpAMD64LEAQ {
2297 break
2298 }
2299 off2 := auxIntToInt32(v_1.AuxInt)
2300 sym2 := auxToSym(v_1.Aux)
2301 base := v_1.Args[0]
2302 mem := v_2
2303 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2304 break
2305 }
2306 v.reset(OpAMD64ADDQload)
2307 v.AuxInt = int32ToAuxInt(off1 + off2)
2308 v.Aux = symToAux(mergeSym(sym1, sym2))
2309 v.AddArg3(val, base, mem)
2310 return true
2311 }
2312
2313
2314 for {
2315 off := auxIntToInt32(v.AuxInt)
2316 sym := auxToSym(v.Aux)
2317 x := v_0
2318 ptr := v_1
2319 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2320 break
2321 }
2322 y := v_2.Args[1]
2323 if ptr != v_2.Args[0] {
2324 break
2325 }
2326 v.reset(OpAMD64ADDQ)
2327 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2328 v0.AddArg(y)
2329 v.AddArg2(x, v0)
2330 return true
2331 }
2332 return false
2333 }
2334 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2335 v_2 := v.Args[2]
2336 v_1 := v.Args[1]
2337 v_0 := v.Args[0]
2338
2339
2340
2341 for {
2342 off1 := auxIntToInt32(v.AuxInt)
2343 sym := auxToSym(v.Aux)
2344 if v_0.Op != OpAMD64ADDQconst {
2345 break
2346 }
2347 off2 := auxIntToInt32(v_0.AuxInt)
2348 base := v_0.Args[0]
2349 val := v_1
2350 mem := v_2
2351 if !(is32Bit(int64(off1) + int64(off2))) {
2352 break
2353 }
2354 v.reset(OpAMD64ADDQmodify)
2355 v.AuxInt = int32ToAuxInt(off1 + off2)
2356 v.Aux = symToAux(sym)
2357 v.AddArg3(base, val, mem)
2358 return true
2359 }
2360
2361
2362
2363 for {
2364 off1 := auxIntToInt32(v.AuxInt)
2365 sym1 := auxToSym(v.Aux)
2366 if v_0.Op != OpAMD64LEAQ {
2367 break
2368 }
2369 off2 := auxIntToInt32(v_0.AuxInt)
2370 sym2 := auxToSym(v_0.Aux)
2371 base := v_0.Args[0]
2372 val := v_1
2373 mem := v_2
2374 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2375 break
2376 }
2377 v.reset(OpAMD64ADDQmodify)
2378 v.AuxInt = int32ToAuxInt(off1 + off2)
2379 v.Aux = symToAux(mergeSym(sym1, sym2))
2380 v.AddArg3(base, val, mem)
2381 return true
2382 }
2383 return false
2384 }
2385 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2386 v_1 := v.Args[1]
2387 v_0 := v.Args[0]
2388
2389
2390
2391 for {
2392 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2393 x := v_0
2394 l := v_1
2395 if l.Op != OpAMD64MOVSDload {
2396 continue
2397 }
2398 off := auxIntToInt32(l.AuxInt)
2399 sym := auxToSym(l.Aux)
2400 mem := l.Args[1]
2401 ptr := l.Args[0]
2402 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2403 continue
2404 }
2405 v.reset(OpAMD64ADDSDload)
2406 v.AuxInt = int32ToAuxInt(off)
2407 v.Aux = symToAux(sym)
2408 v.AddArg3(x, ptr, mem)
2409 return true
2410 }
2411 break
2412 }
2413 return false
2414 }
2415 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2416 v_2 := v.Args[2]
2417 v_1 := v.Args[1]
2418 v_0 := v.Args[0]
2419 b := v.Block
2420 typ := &b.Func.Config.Types
2421
2422
2423
2424 for {
2425 off1 := auxIntToInt32(v.AuxInt)
2426 sym := auxToSym(v.Aux)
2427 val := v_0
2428 if v_1.Op != OpAMD64ADDQconst {
2429 break
2430 }
2431 off2 := auxIntToInt32(v_1.AuxInt)
2432 base := v_1.Args[0]
2433 mem := v_2
2434 if !(is32Bit(int64(off1) + int64(off2))) {
2435 break
2436 }
2437 v.reset(OpAMD64ADDSDload)
2438 v.AuxInt = int32ToAuxInt(off1 + off2)
2439 v.Aux = symToAux(sym)
2440 v.AddArg3(val, base, mem)
2441 return true
2442 }
2443
2444
2445
2446 for {
2447 off1 := auxIntToInt32(v.AuxInt)
2448 sym1 := auxToSym(v.Aux)
2449 val := v_0
2450 if v_1.Op != OpAMD64LEAQ {
2451 break
2452 }
2453 off2 := auxIntToInt32(v_1.AuxInt)
2454 sym2 := auxToSym(v_1.Aux)
2455 base := v_1.Args[0]
2456 mem := v_2
2457 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2458 break
2459 }
2460 v.reset(OpAMD64ADDSDload)
2461 v.AuxInt = int32ToAuxInt(off1 + off2)
2462 v.Aux = symToAux(mergeSym(sym1, sym2))
2463 v.AddArg3(val, base, mem)
2464 return true
2465 }
2466
2467
2468 for {
2469 off := auxIntToInt32(v.AuxInt)
2470 sym := auxToSym(v.Aux)
2471 x := v_0
2472 ptr := v_1
2473 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2474 break
2475 }
2476 y := v_2.Args[1]
2477 if ptr != v_2.Args[0] {
2478 break
2479 }
2480 v.reset(OpAMD64ADDSD)
2481 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2482 v0.AddArg(y)
2483 v.AddArg2(x, v0)
2484 return true
2485 }
2486 return false
2487 }
2488 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2489 v_1 := v.Args[1]
2490 v_0 := v.Args[0]
2491
2492
2493
2494 for {
2495 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2496 x := v_0
2497 l := v_1
2498 if l.Op != OpAMD64MOVSSload {
2499 continue
2500 }
2501 off := auxIntToInt32(l.AuxInt)
2502 sym := auxToSym(l.Aux)
2503 mem := l.Args[1]
2504 ptr := l.Args[0]
2505 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2506 continue
2507 }
2508 v.reset(OpAMD64ADDSSload)
2509 v.AuxInt = int32ToAuxInt(off)
2510 v.Aux = symToAux(sym)
2511 v.AddArg3(x, ptr, mem)
2512 return true
2513 }
2514 break
2515 }
2516 return false
2517 }
2518 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2519 v_2 := v.Args[2]
2520 v_1 := v.Args[1]
2521 v_0 := v.Args[0]
2522 b := v.Block
2523 typ := &b.Func.Config.Types
2524
2525
2526
2527 for {
2528 off1 := auxIntToInt32(v.AuxInt)
2529 sym := auxToSym(v.Aux)
2530 val := v_0
2531 if v_1.Op != OpAMD64ADDQconst {
2532 break
2533 }
2534 off2 := auxIntToInt32(v_1.AuxInt)
2535 base := v_1.Args[0]
2536 mem := v_2
2537 if !(is32Bit(int64(off1) + int64(off2))) {
2538 break
2539 }
2540 v.reset(OpAMD64ADDSSload)
2541 v.AuxInt = int32ToAuxInt(off1 + off2)
2542 v.Aux = symToAux(sym)
2543 v.AddArg3(val, base, mem)
2544 return true
2545 }
2546
2547
2548
2549 for {
2550 off1 := auxIntToInt32(v.AuxInt)
2551 sym1 := auxToSym(v.Aux)
2552 val := v_0
2553 if v_1.Op != OpAMD64LEAQ {
2554 break
2555 }
2556 off2 := auxIntToInt32(v_1.AuxInt)
2557 sym2 := auxToSym(v_1.Aux)
2558 base := v_1.Args[0]
2559 mem := v_2
2560 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2561 break
2562 }
2563 v.reset(OpAMD64ADDSSload)
2564 v.AuxInt = int32ToAuxInt(off1 + off2)
2565 v.Aux = symToAux(mergeSym(sym1, sym2))
2566 v.AddArg3(val, base, mem)
2567 return true
2568 }
2569
2570
2571 for {
2572 off := auxIntToInt32(v.AuxInt)
2573 sym := auxToSym(v.Aux)
2574 x := v_0
2575 ptr := v_1
2576 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2577 break
2578 }
2579 y := v_2.Args[1]
2580 if ptr != v_2.Args[0] {
2581 break
2582 }
2583 v.reset(OpAMD64ADDSS)
2584 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2585 v0.AddArg(y)
2586 v.AddArg2(x, v0)
2587 return true
2588 }
2589 return false
2590 }
2591 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2592 v_1 := v.Args[1]
2593 v_0 := v.Args[0]
2594
2595
2596 for {
2597 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2598 if v_0.Op != OpAMD64NOTL {
2599 continue
2600 }
2601 v_0_0 := v_0.Args[0]
2602 if v_0_0.Op != OpAMD64SHLL {
2603 continue
2604 }
2605 y := v_0_0.Args[1]
2606 v_0_0_0 := v_0_0.Args[0]
2607 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2608 continue
2609 }
2610 x := v_1
2611 v.reset(OpAMD64BTRL)
2612 v.AddArg2(x, y)
2613 return true
2614 }
2615 break
2616 }
2617
2618
2619
2620 for {
2621 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2622 if v_0.Op != OpAMD64MOVLconst {
2623 continue
2624 }
2625 c := auxIntToInt32(v_0.AuxInt)
2626 x := v_1
2627 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2628 continue
2629 }
2630 v.reset(OpAMD64BTRLconst)
2631 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2632 v.AddArg(x)
2633 return true
2634 }
2635 break
2636 }
2637
2638
2639 for {
2640 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2641 x := v_0
2642 if v_1.Op != OpAMD64MOVLconst {
2643 continue
2644 }
2645 c := auxIntToInt32(v_1.AuxInt)
2646 v.reset(OpAMD64ANDLconst)
2647 v.AuxInt = int32ToAuxInt(c)
2648 v.AddArg(x)
2649 return true
2650 }
2651 break
2652 }
2653
2654
2655 for {
2656 x := v_0
2657 if x != v_1 {
2658 break
2659 }
2660 v.copyOf(x)
2661 return true
2662 }
2663
2664
2665
2666 for {
2667 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2668 x := v_0
2669 l := v_1
2670 if l.Op != OpAMD64MOVLload {
2671 continue
2672 }
2673 off := auxIntToInt32(l.AuxInt)
2674 sym := auxToSym(l.Aux)
2675 mem := l.Args[1]
2676 ptr := l.Args[0]
2677 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2678 continue
2679 }
2680 v.reset(OpAMD64ANDLload)
2681 v.AuxInt = int32ToAuxInt(off)
2682 v.Aux = symToAux(sym)
2683 v.AddArg3(x, ptr, mem)
2684 return true
2685 }
2686 break
2687 }
2688
2689
2690
2691 for {
2692 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2693 x := v_0
2694 if v_1.Op != OpAMD64NOTL {
2695 continue
2696 }
2697 y := v_1.Args[0]
2698 if !(buildcfg.GOAMD64 >= 3) {
2699 continue
2700 }
2701 v.reset(OpAMD64ANDNL)
2702 v.AddArg2(x, y)
2703 return true
2704 }
2705 break
2706 }
2707
2708
2709
2710 for {
2711 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2712 x := v_0
2713 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2714 continue
2715 }
2716 v.reset(OpAMD64BLSIL)
2717 v.AddArg(x)
2718 return true
2719 }
2720 break
2721 }
2722
2723
2724
2725 for {
2726 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2727 x := v_0
2728 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2729 continue
2730 }
2731 v.reset(OpAMD64BLSRL)
2732 v.AddArg(x)
2733 return true
2734 }
2735 break
2736 }
2737 return false
2738 }
2739 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2740 v_0 := v.Args[0]
2741
2742
2743
2744 for {
2745 c := auxIntToInt32(v.AuxInt)
2746 x := v_0
2747 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2748 break
2749 }
2750 v.reset(OpAMD64BTRLconst)
2751 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2752 v.AddArg(x)
2753 return true
2754 }
2755
2756
2757 for {
2758 c := auxIntToInt32(v.AuxInt)
2759 if v_0.Op != OpAMD64ANDLconst {
2760 break
2761 }
2762 d := auxIntToInt32(v_0.AuxInt)
2763 x := v_0.Args[0]
2764 v.reset(OpAMD64ANDLconst)
2765 v.AuxInt = int32ToAuxInt(c & d)
2766 v.AddArg(x)
2767 return true
2768 }
2769
2770
2771 for {
2772 c := auxIntToInt32(v.AuxInt)
2773 if v_0.Op != OpAMD64BTRLconst {
2774 break
2775 }
2776 d := auxIntToInt8(v_0.AuxInt)
2777 x := v_0.Args[0]
2778 v.reset(OpAMD64ANDLconst)
2779 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2780 v.AddArg(x)
2781 return true
2782 }
2783
2784
2785 for {
2786 if auxIntToInt32(v.AuxInt) != 0xFF {
2787 break
2788 }
2789 x := v_0
2790 v.reset(OpAMD64MOVBQZX)
2791 v.AddArg(x)
2792 return true
2793 }
2794
2795
2796 for {
2797 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2798 break
2799 }
2800 x := v_0
2801 v.reset(OpAMD64MOVWQZX)
2802 v.AddArg(x)
2803 return true
2804 }
2805
2806
2807
2808 for {
2809 c := auxIntToInt32(v.AuxInt)
2810 if !(c == 0) {
2811 break
2812 }
2813 v.reset(OpAMD64MOVLconst)
2814 v.AuxInt = int32ToAuxInt(0)
2815 return true
2816 }
2817
2818
2819
2820 for {
2821 c := auxIntToInt32(v.AuxInt)
2822 x := v_0
2823 if !(c == -1) {
2824 break
2825 }
2826 v.copyOf(x)
2827 return true
2828 }
2829
2830
2831 for {
2832 c := auxIntToInt32(v.AuxInt)
2833 if v_0.Op != OpAMD64MOVLconst {
2834 break
2835 }
2836 d := auxIntToInt32(v_0.AuxInt)
2837 v.reset(OpAMD64MOVLconst)
2838 v.AuxInt = int32ToAuxInt(c & d)
2839 return true
2840 }
2841 return false
2842 }
2843 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2844 v_1 := v.Args[1]
2845 v_0 := v.Args[0]
2846
2847
2848
2849 for {
2850 valoff1 := auxIntToValAndOff(v.AuxInt)
2851 sym := auxToSym(v.Aux)
2852 if v_0.Op != OpAMD64ADDQconst {
2853 break
2854 }
2855 off2 := auxIntToInt32(v_0.AuxInt)
2856 base := v_0.Args[0]
2857 mem := v_1
2858 if !(ValAndOff(valoff1).canAdd32(off2)) {
2859 break
2860 }
2861 v.reset(OpAMD64ANDLconstmodify)
2862 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2863 v.Aux = symToAux(sym)
2864 v.AddArg2(base, mem)
2865 return true
2866 }
2867
2868
2869
2870 for {
2871 valoff1 := auxIntToValAndOff(v.AuxInt)
2872 sym1 := auxToSym(v.Aux)
2873 if v_0.Op != OpAMD64LEAQ {
2874 break
2875 }
2876 off2 := auxIntToInt32(v_0.AuxInt)
2877 sym2 := auxToSym(v_0.Aux)
2878 base := v_0.Args[0]
2879 mem := v_1
2880 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2881 break
2882 }
2883 v.reset(OpAMD64ANDLconstmodify)
2884 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2885 v.Aux = symToAux(mergeSym(sym1, sym2))
2886 v.AddArg2(base, mem)
2887 return true
2888 }
2889 return false
2890 }
2891 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2892 v_2 := v.Args[2]
2893 v_1 := v.Args[1]
2894 v_0 := v.Args[0]
2895 b := v.Block
2896 typ := &b.Func.Config.Types
2897
2898
2899
2900 for {
2901 off1 := auxIntToInt32(v.AuxInt)
2902 sym := auxToSym(v.Aux)
2903 val := v_0
2904 if v_1.Op != OpAMD64ADDQconst {
2905 break
2906 }
2907 off2 := auxIntToInt32(v_1.AuxInt)
2908 base := v_1.Args[0]
2909 mem := v_2
2910 if !(is32Bit(int64(off1) + int64(off2))) {
2911 break
2912 }
2913 v.reset(OpAMD64ANDLload)
2914 v.AuxInt = int32ToAuxInt(off1 + off2)
2915 v.Aux = symToAux(sym)
2916 v.AddArg3(val, base, mem)
2917 return true
2918 }
2919
2920
2921
2922 for {
2923 off1 := auxIntToInt32(v.AuxInt)
2924 sym1 := auxToSym(v.Aux)
2925 val := v_0
2926 if v_1.Op != OpAMD64LEAQ {
2927 break
2928 }
2929 off2 := auxIntToInt32(v_1.AuxInt)
2930 sym2 := auxToSym(v_1.Aux)
2931 base := v_1.Args[0]
2932 mem := v_2
2933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2934 break
2935 }
2936 v.reset(OpAMD64ANDLload)
2937 v.AuxInt = int32ToAuxInt(off1 + off2)
2938 v.Aux = symToAux(mergeSym(sym1, sym2))
2939 v.AddArg3(val, base, mem)
2940 return true
2941 }
2942
2943
2944 for {
2945 off := auxIntToInt32(v.AuxInt)
2946 sym := auxToSym(v.Aux)
2947 x := v_0
2948 ptr := v_1
2949 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2950 break
2951 }
2952 y := v_2.Args[1]
2953 if ptr != v_2.Args[0] {
2954 break
2955 }
2956 v.reset(OpAMD64ANDL)
2957 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2958 v0.AddArg(y)
2959 v.AddArg2(x, v0)
2960 return true
2961 }
2962 return false
2963 }
2964 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2965 v_2 := v.Args[2]
2966 v_1 := v.Args[1]
2967 v_0 := v.Args[0]
2968
2969
2970
2971 for {
2972 off1 := auxIntToInt32(v.AuxInt)
2973 sym := auxToSym(v.Aux)
2974 if v_0.Op != OpAMD64ADDQconst {
2975 break
2976 }
2977 off2 := auxIntToInt32(v_0.AuxInt)
2978 base := v_0.Args[0]
2979 val := v_1
2980 mem := v_2
2981 if !(is32Bit(int64(off1) + int64(off2))) {
2982 break
2983 }
2984 v.reset(OpAMD64ANDLmodify)
2985 v.AuxInt = int32ToAuxInt(off1 + off2)
2986 v.Aux = symToAux(sym)
2987 v.AddArg3(base, val, mem)
2988 return true
2989 }
2990
2991
2992
2993 for {
2994 off1 := auxIntToInt32(v.AuxInt)
2995 sym1 := auxToSym(v.Aux)
2996 if v_0.Op != OpAMD64LEAQ {
2997 break
2998 }
2999 off2 := auxIntToInt32(v_0.AuxInt)
3000 sym2 := auxToSym(v_0.Aux)
3001 base := v_0.Args[0]
3002 val := v_1
3003 mem := v_2
3004 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3005 break
3006 }
3007 v.reset(OpAMD64ANDLmodify)
3008 v.AuxInt = int32ToAuxInt(off1 + off2)
3009 v.Aux = symToAux(mergeSym(sym1, sym2))
3010 v.AddArg3(base, val, mem)
3011 return true
3012 }
3013 return false
3014 }
3015 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
3016 v_1 := v.Args[1]
3017 v_0 := v.Args[0]
3018
3019
3020 for {
3021 x := v_0
3022 if v_1.Op != OpAMD64SHLL {
3023 break
3024 }
3025 y := v_1.Args[1]
3026 v_1_0 := v_1.Args[0]
3027 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3028 break
3029 }
3030 v.reset(OpAMD64BTRL)
3031 v.AddArg2(x, y)
3032 return true
3033 }
3034 return false
3035 }
3036 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3037 v_1 := v.Args[1]
3038 v_0 := v.Args[0]
3039
3040
3041 for {
3042 x := v_0
3043 if v_1.Op != OpAMD64SHLQ {
3044 break
3045 }
3046 y := v_1.Args[1]
3047 v_1_0 := v_1.Args[0]
3048 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3049 break
3050 }
3051 v.reset(OpAMD64BTRQ)
3052 v.AddArg2(x, y)
3053 return true
3054 }
3055 return false
3056 }
3057 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3058 v_1 := v.Args[1]
3059 v_0 := v.Args[0]
3060
3061
3062 for {
3063 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3064 if v_0.Op != OpAMD64NOTQ {
3065 continue
3066 }
3067 v_0_0 := v_0.Args[0]
3068 if v_0_0.Op != OpAMD64SHLQ {
3069 continue
3070 }
3071 y := v_0_0.Args[1]
3072 v_0_0_0 := v_0_0.Args[0]
3073 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3074 continue
3075 }
3076 x := v_1
3077 v.reset(OpAMD64BTRQ)
3078 v.AddArg2(x, y)
3079 return true
3080 }
3081 break
3082 }
3083
3084
3085
3086 for {
3087 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3088 if v_0.Op != OpAMD64MOVQconst {
3089 continue
3090 }
3091 c := auxIntToInt64(v_0.AuxInt)
3092 x := v_1
3093 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3094 continue
3095 }
3096 v.reset(OpAMD64BTRQconst)
3097 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3098 v.AddArg(x)
3099 return true
3100 }
3101 break
3102 }
3103
3104
3105
3106 for {
3107 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3108 x := v_0
3109 if v_1.Op != OpAMD64MOVQconst {
3110 continue
3111 }
3112 c := auxIntToInt64(v_1.AuxInt)
3113 if !(is32Bit(c)) {
3114 continue
3115 }
3116 v.reset(OpAMD64ANDQconst)
3117 v.AuxInt = int32ToAuxInt(int32(c))
3118 v.AddArg(x)
3119 return true
3120 }
3121 break
3122 }
3123
3124
3125 for {
3126 x := v_0
3127 if x != v_1 {
3128 break
3129 }
3130 v.copyOf(x)
3131 return true
3132 }
3133
3134
3135
3136 for {
3137 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3138 x := v_0
3139 l := v_1
3140 if l.Op != OpAMD64MOVQload {
3141 continue
3142 }
3143 off := auxIntToInt32(l.AuxInt)
3144 sym := auxToSym(l.Aux)
3145 mem := l.Args[1]
3146 ptr := l.Args[0]
3147 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3148 continue
3149 }
3150 v.reset(OpAMD64ANDQload)
3151 v.AuxInt = int32ToAuxInt(off)
3152 v.Aux = symToAux(sym)
3153 v.AddArg3(x, ptr, mem)
3154 return true
3155 }
3156 break
3157 }
3158
3159
3160
3161 for {
3162 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3163 x := v_0
3164 if v_1.Op != OpAMD64NOTQ {
3165 continue
3166 }
3167 y := v_1.Args[0]
3168 if !(buildcfg.GOAMD64 >= 3) {
3169 continue
3170 }
3171 v.reset(OpAMD64ANDNQ)
3172 v.AddArg2(x, y)
3173 return true
3174 }
3175 break
3176 }
3177
3178
3179
3180 for {
3181 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3182 x := v_0
3183 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3184 continue
3185 }
3186 v.reset(OpAMD64BLSIQ)
3187 v.AddArg(x)
3188 return true
3189 }
3190 break
3191 }
3192
3193
3194
3195 for {
3196 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3197 x := v_0
3198 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3199 continue
3200 }
3201 v.reset(OpAMD64BLSRQ)
3202 v.AddArg(x)
3203 return true
3204 }
3205 break
3206 }
3207 return false
3208 }
3209 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3210 v_0 := v.Args[0]
3211
3212
3213
3214 for {
3215 c := auxIntToInt32(v.AuxInt)
3216 x := v_0
3217 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
3218 break
3219 }
3220 v.reset(OpAMD64BTRQconst)
3221 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
3222 v.AddArg(x)
3223 return true
3224 }
3225
3226
3227 for {
3228 c := auxIntToInt32(v.AuxInt)
3229 if v_0.Op != OpAMD64ANDQconst {
3230 break
3231 }
3232 d := auxIntToInt32(v_0.AuxInt)
3233 x := v_0.Args[0]
3234 v.reset(OpAMD64ANDQconst)
3235 v.AuxInt = int32ToAuxInt(c & d)
3236 v.AddArg(x)
3237 return true
3238 }
3239
3240
3241
3242 for {
3243 c := auxIntToInt32(v.AuxInt)
3244 if v_0.Op != OpAMD64BTRQconst {
3245 break
3246 }
3247 d := auxIntToInt8(v_0.AuxInt)
3248 x := v_0.Args[0]
3249 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3250 break
3251 }
3252 v.reset(OpAMD64ANDQconst)
3253 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3254 v.AddArg(x)
3255 return true
3256 }
3257
3258
3259 for {
3260 if auxIntToInt32(v.AuxInt) != 0xFF {
3261 break
3262 }
3263 x := v_0
3264 v.reset(OpAMD64MOVBQZX)
3265 v.AddArg(x)
3266 return true
3267 }
3268
3269
3270 for {
3271 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3272 break
3273 }
3274 x := v_0
3275 v.reset(OpAMD64MOVWQZX)
3276 v.AddArg(x)
3277 return true
3278 }
3279
3280
3281 for {
3282 if auxIntToInt32(v.AuxInt) != 0 {
3283 break
3284 }
3285 v.reset(OpAMD64MOVQconst)
3286 v.AuxInt = int64ToAuxInt(0)
3287 return true
3288 }
3289
3290
3291 for {
3292 if auxIntToInt32(v.AuxInt) != -1 {
3293 break
3294 }
3295 x := v_0
3296 v.copyOf(x)
3297 return true
3298 }
3299
3300
3301 for {
3302 c := auxIntToInt32(v.AuxInt)
3303 if v_0.Op != OpAMD64MOVQconst {
3304 break
3305 }
3306 d := auxIntToInt64(v_0.AuxInt)
3307 v.reset(OpAMD64MOVQconst)
3308 v.AuxInt = int64ToAuxInt(int64(c) & d)
3309 return true
3310 }
3311 return false
3312 }
3313 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3314 v_1 := v.Args[1]
3315 v_0 := v.Args[0]
3316
3317
3318
3319 for {
3320 valoff1 := auxIntToValAndOff(v.AuxInt)
3321 sym := auxToSym(v.Aux)
3322 if v_0.Op != OpAMD64ADDQconst {
3323 break
3324 }
3325 off2 := auxIntToInt32(v_0.AuxInt)
3326 base := v_0.Args[0]
3327 mem := v_1
3328 if !(ValAndOff(valoff1).canAdd32(off2)) {
3329 break
3330 }
3331 v.reset(OpAMD64ANDQconstmodify)
3332 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3333 v.Aux = symToAux(sym)
3334 v.AddArg2(base, mem)
3335 return true
3336 }
3337
3338
3339
3340 for {
3341 valoff1 := auxIntToValAndOff(v.AuxInt)
3342 sym1 := auxToSym(v.Aux)
3343 if v_0.Op != OpAMD64LEAQ {
3344 break
3345 }
3346 off2 := auxIntToInt32(v_0.AuxInt)
3347 sym2 := auxToSym(v_0.Aux)
3348 base := v_0.Args[0]
3349 mem := v_1
3350 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3351 break
3352 }
3353 v.reset(OpAMD64ANDQconstmodify)
3354 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3355 v.Aux = symToAux(mergeSym(sym1, sym2))
3356 v.AddArg2(base, mem)
3357 return true
3358 }
3359 return false
3360 }
3361 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3362 v_2 := v.Args[2]
3363 v_1 := v.Args[1]
3364 v_0 := v.Args[0]
3365 b := v.Block
3366 typ := &b.Func.Config.Types
3367
3368
3369
3370 for {
3371 off1 := auxIntToInt32(v.AuxInt)
3372 sym := auxToSym(v.Aux)
3373 val := v_0
3374 if v_1.Op != OpAMD64ADDQconst {
3375 break
3376 }
3377 off2 := auxIntToInt32(v_1.AuxInt)
3378 base := v_1.Args[0]
3379 mem := v_2
3380 if !(is32Bit(int64(off1) + int64(off2))) {
3381 break
3382 }
3383 v.reset(OpAMD64ANDQload)
3384 v.AuxInt = int32ToAuxInt(off1 + off2)
3385 v.Aux = symToAux(sym)
3386 v.AddArg3(val, base, mem)
3387 return true
3388 }
3389
3390
3391
3392 for {
3393 off1 := auxIntToInt32(v.AuxInt)
3394 sym1 := auxToSym(v.Aux)
3395 val := v_0
3396 if v_1.Op != OpAMD64LEAQ {
3397 break
3398 }
3399 off2 := auxIntToInt32(v_1.AuxInt)
3400 sym2 := auxToSym(v_1.Aux)
3401 base := v_1.Args[0]
3402 mem := v_2
3403 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3404 break
3405 }
3406 v.reset(OpAMD64ANDQload)
3407 v.AuxInt = int32ToAuxInt(off1 + off2)
3408 v.Aux = symToAux(mergeSym(sym1, sym2))
3409 v.AddArg3(val, base, mem)
3410 return true
3411 }
3412
3413
3414 for {
3415 off := auxIntToInt32(v.AuxInt)
3416 sym := auxToSym(v.Aux)
3417 x := v_0
3418 ptr := v_1
3419 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3420 break
3421 }
3422 y := v_2.Args[1]
3423 if ptr != v_2.Args[0] {
3424 break
3425 }
3426 v.reset(OpAMD64ANDQ)
3427 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3428 v0.AddArg(y)
3429 v.AddArg2(x, v0)
3430 return true
3431 }
3432 return false
3433 }
3434 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3435 v_2 := v.Args[2]
3436 v_1 := v.Args[1]
3437 v_0 := v.Args[0]
3438
3439
3440
3441 for {
3442 off1 := auxIntToInt32(v.AuxInt)
3443 sym := auxToSym(v.Aux)
3444 if v_0.Op != OpAMD64ADDQconst {
3445 break
3446 }
3447 off2 := auxIntToInt32(v_0.AuxInt)
3448 base := v_0.Args[0]
3449 val := v_1
3450 mem := v_2
3451 if !(is32Bit(int64(off1) + int64(off2))) {
3452 break
3453 }
3454 v.reset(OpAMD64ANDQmodify)
3455 v.AuxInt = int32ToAuxInt(off1 + off2)
3456 v.Aux = symToAux(sym)
3457 v.AddArg3(base, val, mem)
3458 return true
3459 }
3460
3461
3462
3463 for {
3464 off1 := auxIntToInt32(v.AuxInt)
3465 sym1 := auxToSym(v.Aux)
3466 if v_0.Op != OpAMD64LEAQ {
3467 break
3468 }
3469 off2 := auxIntToInt32(v_0.AuxInt)
3470 sym2 := auxToSym(v_0.Aux)
3471 base := v_0.Args[0]
3472 val := v_1
3473 mem := v_2
3474 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3475 break
3476 }
3477 v.reset(OpAMD64ANDQmodify)
3478 v.AuxInt = int32ToAuxInt(off1 + off2)
3479 v.Aux = symToAux(mergeSym(sym1, sym2))
3480 v.AddArg3(base, val, mem)
3481 return true
3482 }
3483 return false
3484 }
3485 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3486 v_0 := v.Args[0]
3487 b := v.Block
3488
3489
3490 for {
3491 if v_0.Op != OpAMD64ORQconst {
3492 break
3493 }
3494 t := v_0.Type
3495 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3496 break
3497 }
3498 v_0_0 := v_0.Args[0]
3499 if v_0_0.Op != OpAMD64MOVBQZX {
3500 break
3501 }
3502 x := v_0_0.Args[0]
3503 v.reset(OpAMD64BSFQ)
3504 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3505 v0.AuxInt = int32ToAuxInt(1 << 8)
3506 v0.AddArg(x)
3507 v.AddArg(v0)
3508 return true
3509 }
3510
3511
3512 for {
3513 if v_0.Op != OpAMD64ORQconst {
3514 break
3515 }
3516 t := v_0.Type
3517 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3518 break
3519 }
3520 v_0_0 := v_0.Args[0]
3521 if v_0_0.Op != OpAMD64MOVWQZX {
3522 break
3523 }
3524 x := v_0_0.Args[0]
3525 v.reset(OpAMD64BSFQ)
3526 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3527 v0.AuxInt = int32ToAuxInt(1 << 16)
3528 v0.AddArg(x)
3529 v.AddArg(v0)
3530 return true
3531 }
3532 return false
3533 }
3534 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3535 v_0 := v.Args[0]
3536
3537
3538 for {
3539 if v_0.Op != OpAMD64BSWAPL {
3540 break
3541 }
3542 p := v_0.Args[0]
3543 v.copyOf(p)
3544 return true
3545 }
3546
3547
3548
3549 for {
3550 x := v_0
3551 if x.Op != OpAMD64MOVLload {
3552 break
3553 }
3554 i := auxIntToInt32(x.AuxInt)
3555 s := auxToSym(x.Aux)
3556 mem := x.Args[1]
3557 p := x.Args[0]
3558 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3559 break
3560 }
3561 v.reset(OpAMD64MOVBELload)
3562 v.AuxInt = int32ToAuxInt(i)
3563 v.Aux = symToAux(s)
3564 v.AddArg2(p, mem)
3565 return true
3566 }
3567
3568
3569 for {
3570 if v_0.Op != OpAMD64MOVBELload {
3571 break
3572 }
3573 i := auxIntToInt32(v_0.AuxInt)
3574 s := auxToSym(v_0.Aux)
3575 m := v_0.Args[1]
3576 p := v_0.Args[0]
3577 v.reset(OpAMD64MOVLload)
3578 v.AuxInt = int32ToAuxInt(i)
3579 v.Aux = symToAux(s)
3580 v.AddArg2(p, m)
3581 return true
3582 }
3583 return false
3584 }
3585 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3586 v_0 := v.Args[0]
3587
3588
3589 for {
3590 if v_0.Op != OpAMD64BSWAPQ {
3591 break
3592 }
3593 p := v_0.Args[0]
3594 v.copyOf(p)
3595 return true
3596 }
3597
3598
3599
3600 for {
3601 x := v_0
3602 if x.Op != OpAMD64MOVQload {
3603 break
3604 }
3605 i := auxIntToInt32(x.AuxInt)
3606 s := auxToSym(x.Aux)
3607 mem := x.Args[1]
3608 p := x.Args[0]
3609 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3610 break
3611 }
3612 v.reset(OpAMD64MOVBEQload)
3613 v.AuxInt = int32ToAuxInt(i)
3614 v.Aux = symToAux(s)
3615 v.AddArg2(p, mem)
3616 return true
3617 }
3618
3619
3620 for {
3621 if v_0.Op != OpAMD64MOVBEQload {
3622 break
3623 }
3624 i := auxIntToInt32(v_0.AuxInt)
3625 s := auxToSym(v_0.Aux)
3626 m := v_0.Args[1]
3627 p := v_0.Args[0]
3628 v.reset(OpAMD64MOVQload)
3629 v.AuxInt = int32ToAuxInt(i)
3630 v.Aux = symToAux(s)
3631 v.AddArg2(p, m)
3632 return true
3633 }
3634 return false
3635 }
3636 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3637 v_0 := v.Args[0]
3638
3639
3640 for {
3641 c := auxIntToInt8(v.AuxInt)
3642 if v_0.Op != OpAMD64XORLconst {
3643 break
3644 }
3645 d := auxIntToInt32(v_0.AuxInt)
3646 x := v_0.Args[0]
3647 v.reset(OpAMD64XORLconst)
3648 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3649 v.AddArg(x)
3650 return true
3651 }
3652
3653
3654 for {
3655 c := auxIntToInt8(v.AuxInt)
3656 if v_0.Op != OpAMD64BTCLconst {
3657 break
3658 }
3659 d := auxIntToInt8(v_0.AuxInt)
3660 x := v_0.Args[0]
3661 v.reset(OpAMD64XORLconst)
3662 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3663 v.AddArg(x)
3664 return true
3665 }
3666
3667
3668 for {
3669 c := auxIntToInt8(v.AuxInt)
3670 if v_0.Op != OpAMD64MOVLconst {
3671 break
3672 }
3673 d := auxIntToInt32(v_0.AuxInt)
3674 v.reset(OpAMD64MOVLconst)
3675 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3676 return true
3677 }
3678 return false
3679 }
3680 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3681 v_0 := v.Args[0]
3682
3683
3684
3685 for {
3686 c := auxIntToInt8(v.AuxInt)
3687 if v_0.Op != OpAMD64XORQconst {
3688 break
3689 }
3690 d := auxIntToInt32(v_0.AuxInt)
3691 x := v_0.Args[0]
3692 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3693 break
3694 }
3695 v.reset(OpAMD64XORQconst)
3696 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3697 v.AddArg(x)
3698 return true
3699 }
3700
3701
3702
3703 for {
3704 c := auxIntToInt8(v.AuxInt)
3705 if v_0.Op != OpAMD64BTCQconst {
3706 break
3707 }
3708 d := auxIntToInt8(v_0.AuxInt)
3709 x := v_0.Args[0]
3710 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3711 break
3712 }
3713 v.reset(OpAMD64XORQconst)
3714 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3715 v.AddArg(x)
3716 return true
3717 }
3718
3719
3720 for {
3721 c := auxIntToInt8(v.AuxInt)
3722 if v_0.Op != OpAMD64MOVQconst {
3723 break
3724 }
3725 d := auxIntToInt64(v_0.AuxInt)
3726 v.reset(OpAMD64MOVQconst)
3727 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3728 return true
3729 }
3730 return false
3731 }
3732 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3733 v_0 := v.Args[0]
3734
3735
3736
3737 for {
3738 c := auxIntToInt8(v.AuxInt)
3739 if v_0.Op != OpAMD64SHRQconst {
3740 break
3741 }
3742 d := auxIntToInt8(v_0.AuxInt)
3743 x := v_0.Args[0]
3744 if !((c + d) < 64) {
3745 break
3746 }
3747 v.reset(OpAMD64BTQconst)
3748 v.AuxInt = int8ToAuxInt(c + d)
3749 v.AddArg(x)
3750 return true
3751 }
3752
3753
3754
3755 for {
3756 c := auxIntToInt8(v.AuxInt)
3757 if v_0.Op != OpAMD64SHLQconst {
3758 break
3759 }
3760 d := auxIntToInt8(v_0.AuxInt)
3761 x := v_0.Args[0]
3762 if !(c > d) {
3763 break
3764 }
3765 v.reset(OpAMD64BTLconst)
3766 v.AuxInt = int8ToAuxInt(c - d)
3767 v.AddArg(x)
3768 return true
3769 }
3770
3771
3772 for {
3773 if auxIntToInt8(v.AuxInt) != 0 {
3774 break
3775 }
3776 s := v_0
3777 if s.Op != OpAMD64SHRQ {
3778 break
3779 }
3780 y := s.Args[1]
3781 x := s.Args[0]
3782 v.reset(OpAMD64BTQ)
3783 v.AddArg2(y, x)
3784 return true
3785 }
3786
3787
3788
3789 for {
3790 c := auxIntToInt8(v.AuxInt)
3791 if v_0.Op != OpAMD64SHRLconst {
3792 break
3793 }
3794 d := auxIntToInt8(v_0.AuxInt)
3795 x := v_0.Args[0]
3796 if !((c + d) < 32) {
3797 break
3798 }
3799 v.reset(OpAMD64BTLconst)
3800 v.AuxInt = int8ToAuxInt(c + d)
3801 v.AddArg(x)
3802 return true
3803 }
3804
3805
3806
3807 for {
3808 c := auxIntToInt8(v.AuxInt)
3809 if v_0.Op != OpAMD64SHLLconst {
3810 break
3811 }
3812 d := auxIntToInt8(v_0.AuxInt)
3813 x := v_0.Args[0]
3814 if !(c > d) {
3815 break
3816 }
3817 v.reset(OpAMD64BTLconst)
3818 v.AuxInt = int8ToAuxInt(c - d)
3819 v.AddArg(x)
3820 return true
3821 }
3822
3823
3824 for {
3825 if auxIntToInt8(v.AuxInt) != 0 {
3826 break
3827 }
3828 s := v_0
3829 if s.Op != OpAMD64SHRL {
3830 break
3831 }
3832 y := s.Args[1]
3833 x := s.Args[0]
3834 v.reset(OpAMD64BTL)
3835 v.AddArg2(y, x)
3836 return true
3837 }
3838
3839
3840 for {
3841 if auxIntToInt8(v.AuxInt) != 0 {
3842 break
3843 }
3844 s := v_0
3845 if s.Op != OpAMD64SHRXL {
3846 break
3847 }
3848 y := s.Args[1]
3849 x := s.Args[0]
3850 v.reset(OpAMD64BTL)
3851 v.AddArg2(y, x)
3852 return true
3853 }
3854 return false
3855 }
3856 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3857 v_0 := v.Args[0]
3858
3859
3860
3861 for {
3862 c := auxIntToInt8(v.AuxInt)
3863 if v_0.Op != OpAMD64SHRQconst {
3864 break
3865 }
3866 d := auxIntToInt8(v_0.AuxInt)
3867 x := v_0.Args[0]
3868 if !((c + d) < 64) {
3869 break
3870 }
3871 v.reset(OpAMD64BTQconst)
3872 v.AuxInt = int8ToAuxInt(c + d)
3873 v.AddArg(x)
3874 return true
3875 }
3876
3877
3878
3879 for {
3880 c := auxIntToInt8(v.AuxInt)
3881 if v_0.Op != OpAMD64SHLQconst {
3882 break
3883 }
3884 d := auxIntToInt8(v_0.AuxInt)
3885 x := v_0.Args[0]
3886 if !(c > d) {
3887 break
3888 }
3889 v.reset(OpAMD64BTQconst)
3890 v.AuxInt = int8ToAuxInt(c - d)
3891 v.AddArg(x)
3892 return true
3893 }
3894
3895
3896 for {
3897 if auxIntToInt8(v.AuxInt) != 0 {
3898 break
3899 }
3900 s := v_0
3901 if s.Op != OpAMD64SHRQ {
3902 break
3903 }
3904 y := s.Args[1]
3905 x := s.Args[0]
3906 v.reset(OpAMD64BTQ)
3907 v.AddArg2(y, x)
3908 return true
3909 }
3910 return false
3911 }
3912 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
3913 v_0 := v.Args[0]
3914
3915
3916 for {
3917 c := auxIntToInt8(v.AuxInt)
3918 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
3919 break
3920 }
3921 x := v_0.Args[0]
3922 v.reset(OpAMD64BTRLconst)
3923 v.AuxInt = int8ToAuxInt(c)
3924 v.AddArg(x)
3925 return true
3926 }
3927
3928
3929 for {
3930 c := auxIntToInt8(v.AuxInt)
3931 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
3932 break
3933 }
3934 x := v_0.Args[0]
3935 v.reset(OpAMD64BTRLconst)
3936 v.AuxInt = int8ToAuxInt(c)
3937 v.AddArg(x)
3938 return true
3939 }
3940
3941
3942 for {
3943 c := auxIntToInt8(v.AuxInt)
3944 if v_0.Op != OpAMD64ANDLconst {
3945 break
3946 }
3947 d := auxIntToInt32(v_0.AuxInt)
3948 x := v_0.Args[0]
3949 v.reset(OpAMD64ANDLconst)
3950 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
3951 v.AddArg(x)
3952 return true
3953 }
3954
3955
3956 for {
3957 c := auxIntToInt8(v.AuxInt)
3958 if v_0.Op != OpAMD64BTRLconst {
3959 break
3960 }
3961 d := auxIntToInt8(v_0.AuxInt)
3962 x := v_0.Args[0]
3963 v.reset(OpAMD64ANDLconst)
3964 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
3965 v.AddArg(x)
3966 return true
3967 }
3968
3969
3970 for {
3971 c := auxIntToInt8(v.AuxInt)
3972 if v_0.Op != OpAMD64MOVLconst {
3973 break
3974 }
3975 d := auxIntToInt32(v_0.AuxInt)
3976 v.reset(OpAMD64MOVLconst)
3977 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
3978 return true
3979 }
3980 return false
3981 }
3982 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3983 v_0 := v.Args[0]
3984
3985
3986 for {
3987 c := auxIntToInt8(v.AuxInt)
3988 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3989 break
3990 }
3991 x := v_0.Args[0]
3992 v.reset(OpAMD64BTRQconst)
3993 v.AuxInt = int8ToAuxInt(c)
3994 v.AddArg(x)
3995 return true
3996 }
3997
3998
3999 for {
4000 c := auxIntToInt8(v.AuxInt)
4001 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4002 break
4003 }
4004 x := v_0.Args[0]
4005 v.reset(OpAMD64BTRQconst)
4006 v.AuxInt = int8ToAuxInt(c)
4007 v.AddArg(x)
4008 return true
4009 }
4010
4011
4012
4013 for {
4014 c := auxIntToInt8(v.AuxInt)
4015 if v_0.Op != OpAMD64ANDQconst {
4016 break
4017 }
4018 d := auxIntToInt32(v_0.AuxInt)
4019 x := v_0.Args[0]
4020 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4021 break
4022 }
4023 v.reset(OpAMD64ANDQconst)
4024 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4025 v.AddArg(x)
4026 return true
4027 }
4028
4029
4030
4031 for {
4032 c := auxIntToInt8(v.AuxInt)
4033 if v_0.Op != OpAMD64BTRQconst {
4034 break
4035 }
4036 d := auxIntToInt8(v_0.AuxInt)
4037 x := v_0.Args[0]
4038 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4039 break
4040 }
4041 v.reset(OpAMD64ANDQconst)
4042 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4043 v.AddArg(x)
4044 return true
4045 }
4046
4047
4048 for {
4049 c := auxIntToInt8(v.AuxInt)
4050 if v_0.Op != OpAMD64MOVQconst {
4051 break
4052 }
4053 d := auxIntToInt64(v_0.AuxInt)
4054 v.reset(OpAMD64MOVQconst)
4055 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
4056 return true
4057 }
4058 return false
4059 }
4060 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4061 v_0 := v.Args[0]
4062
4063
4064 for {
4065 c := auxIntToInt8(v.AuxInt)
4066 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
4067 break
4068 }
4069 x := v_0.Args[0]
4070 v.reset(OpAMD64BTSLconst)
4071 v.AuxInt = int8ToAuxInt(c)
4072 v.AddArg(x)
4073 return true
4074 }
4075
4076
4077 for {
4078 c := auxIntToInt8(v.AuxInt)
4079 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4080 break
4081 }
4082 x := v_0.Args[0]
4083 v.reset(OpAMD64BTSLconst)
4084 v.AuxInt = int8ToAuxInt(c)
4085 v.AddArg(x)
4086 return true
4087 }
4088
4089
4090 for {
4091 c := auxIntToInt8(v.AuxInt)
4092 if v_0.Op != OpAMD64ORLconst {
4093 break
4094 }
4095 d := auxIntToInt32(v_0.AuxInt)
4096 x := v_0.Args[0]
4097 v.reset(OpAMD64ORLconst)
4098 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4099 v.AddArg(x)
4100 return true
4101 }
4102
4103
4104 for {
4105 c := auxIntToInt8(v.AuxInt)
4106 if v_0.Op != OpAMD64BTSLconst {
4107 break
4108 }
4109 d := auxIntToInt8(v_0.AuxInt)
4110 x := v_0.Args[0]
4111 v.reset(OpAMD64ORLconst)
4112 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4113 v.AddArg(x)
4114 return true
4115 }
4116
4117
4118 for {
4119 c := auxIntToInt8(v.AuxInt)
4120 if v_0.Op != OpAMD64MOVLconst {
4121 break
4122 }
4123 d := auxIntToInt32(v_0.AuxInt)
4124 v.reset(OpAMD64MOVLconst)
4125 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4126 return true
4127 }
4128 return false
4129 }
4130 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4131 v_0 := v.Args[0]
4132
4133
4134 for {
4135 c := auxIntToInt8(v.AuxInt)
4136 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
4137 break
4138 }
4139 x := v_0.Args[0]
4140 v.reset(OpAMD64BTSQconst)
4141 v.AuxInt = int8ToAuxInt(c)
4142 v.AddArg(x)
4143 return true
4144 }
4145
4146
4147 for {
4148 c := auxIntToInt8(v.AuxInt)
4149 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4150 break
4151 }
4152 x := v_0.Args[0]
4153 v.reset(OpAMD64BTSQconst)
4154 v.AuxInt = int8ToAuxInt(c)
4155 v.AddArg(x)
4156 return true
4157 }
4158
4159
4160
4161 for {
4162 c := auxIntToInt8(v.AuxInt)
4163 if v_0.Op != OpAMD64ORQconst {
4164 break
4165 }
4166 d := auxIntToInt32(v_0.AuxInt)
4167 x := v_0.Args[0]
4168 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4169 break
4170 }
4171 v.reset(OpAMD64ORQconst)
4172 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4173 v.AddArg(x)
4174 return true
4175 }
4176
4177
4178
4179 for {
4180 c := auxIntToInt8(v.AuxInt)
4181 if v_0.Op != OpAMD64BTSQconst {
4182 break
4183 }
4184 d := auxIntToInt8(v_0.AuxInt)
4185 x := v_0.Args[0]
4186 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4187 break
4188 }
4189 v.reset(OpAMD64ORQconst)
4190 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4191 v.AddArg(x)
4192 return true
4193 }
4194
4195
4196 for {
4197 c := auxIntToInt8(v.AuxInt)
4198 if v_0.Op != OpAMD64MOVQconst {
4199 break
4200 }
4201 d := auxIntToInt64(v_0.AuxInt)
4202 v.reset(OpAMD64MOVQconst)
4203 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
4204 return true
4205 }
4206 return false
4207 }
4208 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4209 v_2 := v.Args[2]
4210 v_1 := v.Args[1]
4211 v_0 := v.Args[0]
4212
4213
4214 for {
4215 x := v_0
4216 y := v_1
4217 if v_2.Op != OpAMD64InvertFlags {
4218 break
4219 }
4220 cond := v_2.Args[0]
4221 v.reset(OpAMD64CMOVLLS)
4222 v.AddArg3(x, y, cond)
4223 return true
4224 }
4225
4226
4227 for {
4228 x := v_1
4229 if v_2.Op != OpAMD64FlagEQ {
4230 break
4231 }
4232 v.copyOf(x)
4233 return true
4234 }
4235
4236
4237 for {
4238 x := v_1
4239 if v_2.Op != OpAMD64FlagGT_UGT {
4240 break
4241 }
4242 v.copyOf(x)
4243 return true
4244 }
4245
4246
4247 for {
4248 y := v_0
4249 if v_2.Op != OpAMD64FlagGT_ULT {
4250 break
4251 }
4252 v.copyOf(y)
4253 return true
4254 }
4255
4256
4257 for {
4258 y := v_0
4259 if v_2.Op != OpAMD64FlagLT_ULT {
4260 break
4261 }
4262 v.copyOf(y)
4263 return true
4264 }
4265
4266
4267 for {
4268 x := v_1
4269 if v_2.Op != OpAMD64FlagLT_UGT {
4270 break
4271 }
4272 v.copyOf(x)
4273 return true
4274 }
4275 return false
4276 }
4277 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4278 v_2 := v.Args[2]
4279 v_1 := v.Args[1]
4280 v_0 := v.Args[0]
4281
4282
4283 for {
4284 x := v_0
4285 y := v_1
4286 if v_2.Op != OpAMD64InvertFlags {
4287 break
4288 }
4289 cond := v_2.Args[0]
4290 v.reset(OpAMD64CMOVLHI)
4291 v.AddArg3(x, y, cond)
4292 return true
4293 }
4294
4295
4296 for {
4297 y := v_0
4298 if v_2.Op != OpAMD64FlagEQ {
4299 break
4300 }
4301 v.copyOf(y)
4302 return true
4303 }
4304
4305
4306 for {
4307 y := v_0
4308 if v_2.Op != OpAMD64FlagGT_UGT {
4309 break
4310 }
4311 v.copyOf(y)
4312 return true
4313 }
4314
4315
4316 for {
4317 x := v_1
4318 if v_2.Op != OpAMD64FlagGT_ULT {
4319 break
4320 }
4321 v.copyOf(x)
4322 return true
4323 }
4324
4325
4326 for {
4327 x := v_1
4328 if v_2.Op != OpAMD64FlagLT_ULT {
4329 break
4330 }
4331 v.copyOf(x)
4332 return true
4333 }
4334
4335
4336 for {
4337 y := v_0
4338 if v_2.Op != OpAMD64FlagLT_UGT {
4339 break
4340 }
4341 v.copyOf(y)
4342 return true
4343 }
4344 return false
4345 }
4346 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4347 v_2 := v.Args[2]
4348 v_1 := v.Args[1]
4349 v_0 := v.Args[0]
4350
4351
4352 for {
4353 x := v_0
4354 y := v_1
4355 if v_2.Op != OpAMD64InvertFlags {
4356 break
4357 }
4358 cond := v_2.Args[0]
4359 v.reset(OpAMD64CMOVLEQ)
4360 v.AddArg3(x, y, cond)
4361 return true
4362 }
4363
4364
4365 for {
4366 x := v_1
4367 if v_2.Op != OpAMD64FlagEQ {
4368 break
4369 }
4370 v.copyOf(x)
4371 return true
4372 }
4373
4374
4375 for {
4376 y := v_0
4377 if v_2.Op != OpAMD64FlagGT_UGT {
4378 break
4379 }
4380 v.copyOf(y)
4381 return true
4382 }
4383
4384
4385 for {
4386 y := v_0
4387 if v_2.Op != OpAMD64FlagGT_ULT {
4388 break
4389 }
4390 v.copyOf(y)
4391 return true
4392 }
4393
4394
4395 for {
4396 y := v_0
4397 if v_2.Op != OpAMD64FlagLT_ULT {
4398 break
4399 }
4400 v.copyOf(y)
4401 return true
4402 }
4403
4404
4405 for {
4406 y := v_0
4407 if v_2.Op != OpAMD64FlagLT_UGT {
4408 break
4409 }
4410 v.copyOf(y)
4411 return true
4412 }
4413 return false
4414 }
4415 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4416 v_2 := v.Args[2]
4417 v_1 := v.Args[1]
4418 v_0 := v.Args[0]
4419
4420
4421 for {
4422 x := v_0
4423 y := v_1
4424 if v_2.Op != OpAMD64InvertFlags {
4425 break
4426 }
4427 cond := v_2.Args[0]
4428 v.reset(OpAMD64CMOVLLE)
4429 v.AddArg3(x, y, cond)
4430 return true
4431 }
4432
4433
4434 for {
4435 x := v_1
4436 if v_2.Op != OpAMD64FlagEQ {
4437 break
4438 }
4439 v.copyOf(x)
4440 return true
4441 }
4442
4443
4444 for {
4445 x := v_1
4446 if v_2.Op != OpAMD64FlagGT_UGT {
4447 break
4448 }
4449 v.copyOf(x)
4450 return true
4451 }
4452
4453
4454 for {
4455 x := v_1
4456 if v_2.Op != OpAMD64FlagGT_ULT {
4457 break
4458 }
4459 v.copyOf(x)
4460 return true
4461 }
4462
4463
4464 for {
4465 y := v_0
4466 if v_2.Op != OpAMD64FlagLT_ULT {
4467 break
4468 }
4469 v.copyOf(y)
4470 return true
4471 }
4472
4473
4474 for {
4475 y := v_0
4476 if v_2.Op != OpAMD64FlagLT_UGT {
4477 break
4478 }
4479 v.copyOf(y)
4480 return true
4481 }
4482 return false
4483 }
4484 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4485 v_2 := v.Args[2]
4486 v_1 := v.Args[1]
4487 v_0 := v.Args[0]
4488
4489
4490 for {
4491 x := v_0
4492 y := v_1
4493 if v_2.Op != OpAMD64InvertFlags {
4494 break
4495 }
4496 cond := v_2.Args[0]
4497 v.reset(OpAMD64CMOVLLT)
4498 v.AddArg3(x, y, cond)
4499 return true
4500 }
4501
4502
4503 for {
4504 y := v_0
4505 if v_2.Op != OpAMD64FlagEQ {
4506 break
4507 }
4508 v.copyOf(y)
4509 return true
4510 }
4511
4512
4513 for {
4514 x := v_1
4515 if v_2.Op != OpAMD64FlagGT_UGT {
4516 break
4517 }
4518 v.copyOf(x)
4519 return true
4520 }
4521
4522
4523 for {
4524 x := v_1
4525 if v_2.Op != OpAMD64FlagGT_ULT {
4526 break
4527 }
4528 v.copyOf(x)
4529 return true
4530 }
4531
4532
4533 for {
4534 y := v_0
4535 if v_2.Op != OpAMD64FlagLT_ULT {
4536 break
4537 }
4538 v.copyOf(y)
4539 return true
4540 }
4541
4542
4543 for {
4544 y := v_0
4545 if v_2.Op != OpAMD64FlagLT_UGT {
4546 break
4547 }
4548 v.copyOf(y)
4549 return true
4550 }
4551 return false
4552 }
4553 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4554 v_2 := v.Args[2]
4555 v_1 := v.Args[1]
4556 v_0 := v.Args[0]
4557
4558
4559 for {
4560 x := v_0
4561 y := v_1
4562 if v_2.Op != OpAMD64InvertFlags {
4563 break
4564 }
4565 cond := v_2.Args[0]
4566 v.reset(OpAMD64CMOVLCS)
4567 v.AddArg3(x, y, cond)
4568 return true
4569 }
4570
4571
4572 for {
4573 y := v_0
4574 if v_2.Op != OpAMD64FlagEQ {
4575 break
4576 }
4577 v.copyOf(y)
4578 return true
4579 }
4580
4581
4582 for {
4583 x := v_1
4584 if v_2.Op != OpAMD64FlagGT_UGT {
4585 break
4586 }
4587 v.copyOf(x)
4588 return true
4589 }
4590
4591
4592 for {
4593 y := v_0
4594 if v_2.Op != OpAMD64FlagGT_ULT {
4595 break
4596 }
4597 v.copyOf(y)
4598 return true
4599 }
4600
4601
4602 for {
4603 y := v_0
4604 if v_2.Op != OpAMD64FlagLT_ULT {
4605 break
4606 }
4607 v.copyOf(y)
4608 return true
4609 }
4610
4611
4612 for {
4613 x := v_1
4614 if v_2.Op != OpAMD64FlagLT_UGT {
4615 break
4616 }
4617 v.copyOf(x)
4618 return true
4619 }
4620 return false
4621 }
4622 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4623 v_2 := v.Args[2]
4624 v_1 := v.Args[1]
4625 v_0 := v.Args[0]
4626
4627
4628 for {
4629 x := v_0
4630 y := v_1
4631 if v_2.Op != OpAMD64InvertFlags {
4632 break
4633 }
4634 cond := v_2.Args[0]
4635 v.reset(OpAMD64CMOVLGE)
4636 v.AddArg3(x, y, cond)
4637 return true
4638 }
4639
4640
4641 for {
4642 x := v_1
4643 if v_2.Op != OpAMD64FlagEQ {
4644 break
4645 }
4646 v.copyOf(x)
4647 return true
4648 }
4649
4650
4651 for {
4652 y := v_0
4653 if v_2.Op != OpAMD64FlagGT_UGT {
4654 break
4655 }
4656 v.copyOf(y)
4657 return true
4658 }
4659
4660
4661 for {
4662 y := v_0
4663 if v_2.Op != OpAMD64FlagGT_ULT {
4664 break
4665 }
4666 v.copyOf(y)
4667 return true
4668 }
4669
4670
4671 for {
4672 x := v_1
4673 if v_2.Op != OpAMD64FlagLT_ULT {
4674 break
4675 }
4676 v.copyOf(x)
4677 return true
4678 }
4679
4680
4681 for {
4682 x := v_1
4683 if v_2.Op != OpAMD64FlagLT_UGT {
4684 break
4685 }
4686 v.copyOf(x)
4687 return true
4688 }
4689 return false
4690 }
4691 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4692 v_2 := v.Args[2]
4693 v_1 := v.Args[1]
4694 v_0 := v.Args[0]
4695
4696
4697 for {
4698 x := v_0
4699 y := v_1
4700 if v_2.Op != OpAMD64InvertFlags {
4701 break
4702 }
4703 cond := v_2.Args[0]
4704 v.reset(OpAMD64CMOVLCC)
4705 v.AddArg3(x, y, cond)
4706 return true
4707 }
4708
4709
4710 for {
4711 x := v_1
4712 if v_2.Op != OpAMD64FlagEQ {
4713 break
4714 }
4715 v.copyOf(x)
4716 return true
4717 }
4718
4719
4720 for {
4721 y := v_0
4722 if v_2.Op != OpAMD64FlagGT_UGT {
4723 break
4724 }
4725 v.copyOf(y)
4726 return true
4727 }
4728
4729
4730 for {
4731 x := v_1
4732 if v_2.Op != OpAMD64FlagGT_ULT {
4733 break
4734 }
4735 v.copyOf(x)
4736 return true
4737 }
4738
4739
4740 for {
4741 x := v_1
4742 if v_2.Op != OpAMD64FlagLT_ULT {
4743 break
4744 }
4745 v.copyOf(x)
4746 return true
4747 }
4748
4749
4750 for {
4751 y := v_0
4752 if v_2.Op != OpAMD64FlagLT_UGT {
4753 break
4754 }
4755 v.copyOf(y)
4756 return true
4757 }
4758 return false
4759 }
4760 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4761 v_2 := v.Args[2]
4762 v_1 := v.Args[1]
4763 v_0 := v.Args[0]
4764
4765
4766 for {
4767 x := v_0
4768 y := v_1
4769 if v_2.Op != OpAMD64InvertFlags {
4770 break
4771 }
4772 cond := v_2.Args[0]
4773 v.reset(OpAMD64CMOVLGT)
4774 v.AddArg3(x, y, cond)
4775 return true
4776 }
4777
4778
4779 for {
4780 y := v_0
4781 if v_2.Op != OpAMD64FlagEQ {
4782 break
4783 }
4784 v.copyOf(y)
4785 return true
4786 }
4787
4788
4789 for {
4790 y := v_0
4791 if v_2.Op != OpAMD64FlagGT_UGT {
4792 break
4793 }
4794 v.copyOf(y)
4795 return true
4796 }
4797
4798
4799 for {
4800 y := v_0
4801 if v_2.Op != OpAMD64FlagGT_ULT {
4802 break
4803 }
4804 v.copyOf(y)
4805 return true
4806 }
4807
4808
4809 for {
4810 x := v_1
4811 if v_2.Op != OpAMD64FlagLT_ULT {
4812 break
4813 }
4814 v.copyOf(x)
4815 return true
4816 }
4817
4818
4819 for {
4820 x := v_1
4821 if v_2.Op != OpAMD64FlagLT_UGT {
4822 break
4823 }
4824 v.copyOf(x)
4825 return true
4826 }
4827 return false
4828 }
4829 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4830 v_2 := v.Args[2]
4831 v_1 := v.Args[1]
4832 v_0 := v.Args[0]
4833
4834
4835 for {
4836 x := v_0
4837 y := v_1
4838 if v_2.Op != OpAMD64InvertFlags {
4839 break
4840 }
4841 cond := v_2.Args[0]
4842 v.reset(OpAMD64CMOVLNE)
4843 v.AddArg3(x, y, cond)
4844 return true
4845 }
4846
4847
4848 for {
4849 y := v_0
4850 if v_2.Op != OpAMD64FlagEQ {
4851 break
4852 }
4853 v.copyOf(y)
4854 return true
4855 }
4856
4857
4858 for {
4859 x := v_1
4860 if v_2.Op != OpAMD64FlagGT_UGT {
4861 break
4862 }
4863 v.copyOf(x)
4864 return true
4865 }
4866
4867
4868 for {
4869 x := v_1
4870 if v_2.Op != OpAMD64FlagGT_ULT {
4871 break
4872 }
4873 v.copyOf(x)
4874 return true
4875 }
4876
4877
4878 for {
4879 x := v_1
4880 if v_2.Op != OpAMD64FlagLT_ULT {
4881 break
4882 }
4883 v.copyOf(x)
4884 return true
4885 }
4886
4887
4888 for {
4889 x := v_1
4890 if v_2.Op != OpAMD64FlagLT_UGT {
4891 break
4892 }
4893 v.copyOf(x)
4894 return true
4895 }
4896 return false
4897 }
4898 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4899 v_2 := v.Args[2]
4900 v_1 := v.Args[1]
4901 v_0 := v.Args[0]
4902
4903
4904 for {
4905 x := v_0
4906 y := v_1
4907 if v_2.Op != OpAMD64InvertFlags {
4908 break
4909 }
4910 cond := v_2.Args[0]
4911 v.reset(OpAMD64CMOVQLS)
4912 v.AddArg3(x, y, cond)
4913 return true
4914 }
4915
4916
4917 for {
4918 x := v_1
4919 if v_2.Op != OpAMD64FlagEQ {
4920 break
4921 }
4922 v.copyOf(x)
4923 return true
4924 }
4925
4926
4927 for {
4928 x := v_1
4929 if v_2.Op != OpAMD64FlagGT_UGT {
4930 break
4931 }
4932 v.copyOf(x)
4933 return true
4934 }
4935
4936
4937 for {
4938 y := v_0
4939 if v_2.Op != OpAMD64FlagGT_ULT {
4940 break
4941 }
4942 v.copyOf(y)
4943 return true
4944 }
4945
4946
4947 for {
4948 y := v_0
4949 if v_2.Op != OpAMD64FlagLT_ULT {
4950 break
4951 }
4952 v.copyOf(y)
4953 return true
4954 }
4955
4956
4957 for {
4958 x := v_1
4959 if v_2.Op != OpAMD64FlagLT_UGT {
4960 break
4961 }
4962 v.copyOf(x)
4963 return true
4964 }
4965 return false
4966 }
4967 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4968 v_2 := v.Args[2]
4969 v_1 := v.Args[1]
4970 v_0 := v.Args[0]
4971
4972
4973 for {
4974 x := v_0
4975 y := v_1
4976 if v_2.Op != OpAMD64InvertFlags {
4977 break
4978 }
4979 cond := v_2.Args[0]
4980 v.reset(OpAMD64CMOVQHI)
4981 v.AddArg3(x, y, cond)
4982 return true
4983 }
4984
4985
4986 for {
4987 y := v_0
4988 if v_2.Op != OpAMD64FlagEQ {
4989 break
4990 }
4991 v.copyOf(y)
4992 return true
4993 }
4994
4995
4996 for {
4997 y := v_0
4998 if v_2.Op != OpAMD64FlagGT_UGT {
4999 break
5000 }
5001 v.copyOf(y)
5002 return true
5003 }
5004
5005
5006 for {
5007 x := v_1
5008 if v_2.Op != OpAMD64FlagGT_ULT {
5009 break
5010 }
5011 v.copyOf(x)
5012 return true
5013 }
5014
5015
5016 for {
5017 x := v_1
5018 if v_2.Op != OpAMD64FlagLT_ULT {
5019 break
5020 }
5021 v.copyOf(x)
5022 return true
5023 }
5024
5025
5026 for {
5027 y := v_0
5028 if v_2.Op != OpAMD64FlagLT_UGT {
5029 break
5030 }
5031 v.copyOf(y)
5032 return true
5033 }
5034 return false
5035 }
5036 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5037 v_2 := v.Args[2]
5038 v_1 := v.Args[1]
5039 v_0 := v.Args[0]
5040
5041
5042 for {
5043 x := v_0
5044 y := v_1
5045 if v_2.Op != OpAMD64InvertFlags {
5046 break
5047 }
5048 cond := v_2.Args[0]
5049 v.reset(OpAMD64CMOVQEQ)
5050 v.AddArg3(x, y, cond)
5051 return true
5052 }
5053
5054
5055 for {
5056 x := v_1
5057 if v_2.Op != OpAMD64FlagEQ {
5058 break
5059 }
5060 v.copyOf(x)
5061 return true
5062 }
5063
5064
5065 for {
5066 y := v_0
5067 if v_2.Op != OpAMD64FlagGT_UGT {
5068 break
5069 }
5070 v.copyOf(y)
5071 return true
5072 }
5073
5074
5075 for {
5076 y := v_0
5077 if v_2.Op != OpAMD64FlagGT_ULT {
5078 break
5079 }
5080 v.copyOf(y)
5081 return true
5082 }
5083
5084
5085 for {
5086 y := v_0
5087 if v_2.Op != OpAMD64FlagLT_ULT {
5088 break
5089 }
5090 v.copyOf(y)
5091 return true
5092 }
5093
5094
5095 for {
5096 y := v_0
5097 if v_2.Op != OpAMD64FlagLT_UGT {
5098 break
5099 }
5100 v.copyOf(y)
5101 return true
5102 }
5103
5104
5105
5106 for {
5107 x := v_0
5108 if v_2.Op != OpSelect1 {
5109 break
5110 }
5111 v_2_0 := v_2.Args[0]
5112 if v_2_0.Op != OpAMD64BSFQ {
5113 break
5114 }
5115 v_2_0_0 := v_2_0.Args[0]
5116 if v_2_0_0.Op != OpAMD64ORQconst {
5117 break
5118 }
5119 c := auxIntToInt32(v_2_0_0.AuxInt)
5120 if !(c != 0) {
5121 break
5122 }
5123 v.copyOf(x)
5124 return true
5125 }
5126
5127
5128
5129 for {
5130 x := v_0
5131 if v_2.Op != OpSelect1 {
5132 break
5133 }
5134 v_2_0 := v_2.Args[0]
5135 if v_2_0.Op != OpAMD64BSRQ {
5136 break
5137 }
5138 v_2_0_0 := v_2_0.Args[0]
5139 if v_2_0_0.Op != OpAMD64ORQconst {
5140 break
5141 }
5142 c := auxIntToInt32(v_2_0_0.AuxInt)
5143 if !(c != 0) {
5144 break
5145 }
5146 v.copyOf(x)
5147 return true
5148 }
5149 return false
5150 }
5151 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5152 v_2 := v.Args[2]
5153 v_1 := v.Args[1]
5154 v_0 := v.Args[0]
5155
5156
5157 for {
5158 x := v_0
5159 y := v_1
5160 if v_2.Op != OpAMD64InvertFlags {
5161 break
5162 }
5163 cond := v_2.Args[0]
5164 v.reset(OpAMD64CMOVQLE)
5165 v.AddArg3(x, y, cond)
5166 return true
5167 }
5168
5169
5170 for {
5171 x := v_1
5172 if v_2.Op != OpAMD64FlagEQ {
5173 break
5174 }
5175 v.copyOf(x)
5176 return true
5177 }
5178
5179
5180 for {
5181 x := v_1
5182 if v_2.Op != OpAMD64FlagGT_UGT {
5183 break
5184 }
5185 v.copyOf(x)
5186 return true
5187 }
5188
5189
5190 for {
5191 x := v_1
5192 if v_2.Op != OpAMD64FlagGT_ULT {
5193 break
5194 }
5195 v.copyOf(x)
5196 return true
5197 }
5198
5199
5200 for {
5201 y := v_0
5202 if v_2.Op != OpAMD64FlagLT_ULT {
5203 break
5204 }
5205 v.copyOf(y)
5206 return true
5207 }
5208
5209
5210 for {
5211 y := v_0
5212 if v_2.Op != OpAMD64FlagLT_UGT {
5213 break
5214 }
5215 v.copyOf(y)
5216 return true
5217 }
5218 return false
5219 }
5220 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5221 v_2 := v.Args[2]
5222 v_1 := v.Args[1]
5223 v_0 := v.Args[0]
5224
5225
5226 for {
5227 x := v_0
5228 y := v_1
5229 if v_2.Op != OpAMD64InvertFlags {
5230 break
5231 }
5232 cond := v_2.Args[0]
5233 v.reset(OpAMD64CMOVQLT)
5234 v.AddArg3(x, y, cond)
5235 return true
5236 }
5237
5238
5239 for {
5240 y := v_0
5241 if v_2.Op != OpAMD64FlagEQ {
5242 break
5243 }
5244 v.copyOf(y)
5245 return true
5246 }
5247
5248
5249 for {
5250 x := v_1
5251 if v_2.Op != OpAMD64FlagGT_UGT {
5252 break
5253 }
5254 v.copyOf(x)
5255 return true
5256 }
5257
5258
5259 for {
5260 x := v_1
5261 if v_2.Op != OpAMD64FlagGT_ULT {
5262 break
5263 }
5264 v.copyOf(x)
5265 return true
5266 }
5267
5268
5269 for {
5270 y := v_0
5271 if v_2.Op != OpAMD64FlagLT_ULT {
5272 break
5273 }
5274 v.copyOf(y)
5275 return true
5276 }
5277
5278
5279 for {
5280 y := v_0
5281 if v_2.Op != OpAMD64FlagLT_UGT {
5282 break
5283 }
5284 v.copyOf(y)
5285 return true
5286 }
5287 return false
5288 }
5289 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5290 v_2 := v.Args[2]
5291 v_1 := v.Args[1]
5292 v_0 := v.Args[0]
5293
5294
5295 for {
5296 x := v_0
5297 y := v_1
5298 if v_2.Op != OpAMD64InvertFlags {
5299 break
5300 }
5301 cond := v_2.Args[0]
5302 v.reset(OpAMD64CMOVQCS)
5303 v.AddArg3(x, y, cond)
5304 return true
5305 }
5306
5307
5308 for {
5309 y := v_0
5310 if v_2.Op != OpAMD64FlagEQ {
5311 break
5312 }
5313 v.copyOf(y)
5314 return true
5315 }
5316
5317
5318 for {
5319 x := v_1
5320 if v_2.Op != OpAMD64FlagGT_UGT {
5321 break
5322 }
5323 v.copyOf(x)
5324 return true
5325 }
5326
5327
5328 for {
5329 y := v_0
5330 if v_2.Op != OpAMD64FlagGT_ULT {
5331 break
5332 }
5333 v.copyOf(y)
5334 return true
5335 }
5336
5337
5338 for {
5339 y := v_0
5340 if v_2.Op != OpAMD64FlagLT_ULT {
5341 break
5342 }
5343 v.copyOf(y)
5344 return true
5345 }
5346
5347
5348 for {
5349 x := v_1
5350 if v_2.Op != OpAMD64FlagLT_UGT {
5351 break
5352 }
5353 v.copyOf(x)
5354 return true
5355 }
5356 return false
5357 }
5358 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5359 v_2 := v.Args[2]
5360 v_1 := v.Args[1]
5361 v_0 := v.Args[0]
5362
5363
5364 for {
5365 x := v_0
5366 y := v_1
5367 if v_2.Op != OpAMD64InvertFlags {
5368 break
5369 }
5370 cond := v_2.Args[0]
5371 v.reset(OpAMD64CMOVQGE)
5372 v.AddArg3(x, y, cond)
5373 return true
5374 }
5375
5376
5377 for {
5378 x := v_1
5379 if v_2.Op != OpAMD64FlagEQ {
5380 break
5381 }
5382 v.copyOf(x)
5383 return true
5384 }
5385
5386
5387 for {
5388 y := v_0
5389 if v_2.Op != OpAMD64FlagGT_UGT {
5390 break
5391 }
5392 v.copyOf(y)
5393 return true
5394 }
5395
5396
5397 for {
5398 y := v_0
5399 if v_2.Op != OpAMD64FlagGT_ULT {
5400 break
5401 }
5402 v.copyOf(y)
5403 return true
5404 }
5405
5406
5407 for {
5408 x := v_1
5409 if v_2.Op != OpAMD64FlagLT_ULT {
5410 break
5411 }
5412 v.copyOf(x)
5413 return true
5414 }
5415
5416
5417 for {
5418 x := v_1
5419 if v_2.Op != OpAMD64FlagLT_UGT {
5420 break
5421 }
5422 v.copyOf(x)
5423 return true
5424 }
5425 return false
5426 }
5427 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5428 v_2 := v.Args[2]
5429 v_1 := v.Args[1]
5430 v_0 := v.Args[0]
5431
5432
5433 for {
5434 x := v_0
5435 y := v_1
5436 if v_2.Op != OpAMD64InvertFlags {
5437 break
5438 }
5439 cond := v_2.Args[0]
5440 v.reset(OpAMD64CMOVQCC)
5441 v.AddArg3(x, y, cond)
5442 return true
5443 }
5444
5445
5446 for {
5447 x := v_1
5448 if v_2.Op != OpAMD64FlagEQ {
5449 break
5450 }
5451 v.copyOf(x)
5452 return true
5453 }
5454
5455
5456 for {
5457 y := v_0
5458 if v_2.Op != OpAMD64FlagGT_UGT {
5459 break
5460 }
5461 v.copyOf(y)
5462 return true
5463 }
5464
5465
5466 for {
5467 x := v_1
5468 if v_2.Op != OpAMD64FlagGT_ULT {
5469 break
5470 }
5471 v.copyOf(x)
5472 return true
5473 }
5474
5475
5476 for {
5477 x := v_1
5478 if v_2.Op != OpAMD64FlagLT_ULT {
5479 break
5480 }
5481 v.copyOf(x)
5482 return true
5483 }
5484
5485
5486 for {
5487 y := v_0
5488 if v_2.Op != OpAMD64FlagLT_UGT {
5489 break
5490 }
5491 v.copyOf(y)
5492 return true
5493 }
5494 return false
5495 }
5496 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5497 v_2 := v.Args[2]
5498 v_1 := v.Args[1]
5499 v_0 := v.Args[0]
5500
5501
5502 for {
5503 x := v_0
5504 y := v_1
5505 if v_2.Op != OpAMD64InvertFlags {
5506 break
5507 }
5508 cond := v_2.Args[0]
5509 v.reset(OpAMD64CMOVQGT)
5510 v.AddArg3(x, y, cond)
5511 return true
5512 }
5513
5514
5515 for {
5516 y := v_0
5517 if v_2.Op != OpAMD64FlagEQ {
5518 break
5519 }
5520 v.copyOf(y)
5521 return true
5522 }
5523
5524
5525 for {
5526 y := v_0
5527 if v_2.Op != OpAMD64FlagGT_UGT {
5528 break
5529 }
5530 v.copyOf(y)
5531 return true
5532 }
5533
5534
5535 for {
5536 y := v_0
5537 if v_2.Op != OpAMD64FlagGT_ULT {
5538 break
5539 }
5540 v.copyOf(y)
5541 return true
5542 }
5543
5544
5545 for {
5546 x := v_1
5547 if v_2.Op != OpAMD64FlagLT_ULT {
5548 break
5549 }
5550 v.copyOf(x)
5551 return true
5552 }
5553
5554
5555 for {
5556 x := v_1
5557 if v_2.Op != OpAMD64FlagLT_UGT {
5558 break
5559 }
5560 v.copyOf(x)
5561 return true
5562 }
5563 return false
5564 }
5565 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5566 v_2 := v.Args[2]
5567 v_1 := v.Args[1]
5568 v_0 := v.Args[0]
5569
5570
5571 for {
5572 x := v_0
5573 y := v_1
5574 if v_2.Op != OpAMD64InvertFlags {
5575 break
5576 }
5577 cond := v_2.Args[0]
5578 v.reset(OpAMD64CMOVQNE)
5579 v.AddArg3(x, y, cond)
5580 return true
5581 }
5582
5583
5584 for {
5585 y := v_0
5586 if v_2.Op != OpAMD64FlagEQ {
5587 break
5588 }
5589 v.copyOf(y)
5590 return true
5591 }
5592
5593
5594 for {
5595 x := v_1
5596 if v_2.Op != OpAMD64FlagGT_UGT {
5597 break
5598 }
5599 v.copyOf(x)
5600 return true
5601 }
5602
5603
5604 for {
5605 x := v_1
5606 if v_2.Op != OpAMD64FlagGT_ULT {
5607 break
5608 }
5609 v.copyOf(x)
5610 return true
5611 }
5612
5613
5614 for {
5615 x := v_1
5616 if v_2.Op != OpAMD64FlagLT_ULT {
5617 break
5618 }
5619 v.copyOf(x)
5620 return true
5621 }
5622
5623
5624 for {
5625 x := v_1
5626 if v_2.Op != OpAMD64FlagLT_UGT {
5627 break
5628 }
5629 v.copyOf(x)
5630 return true
5631 }
5632 return false
5633 }
5634 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5635 v_2 := v.Args[2]
5636 v_1 := v.Args[1]
5637 v_0 := v.Args[0]
5638
5639
5640 for {
5641 x := v_0
5642 y := v_1
5643 if v_2.Op != OpAMD64InvertFlags {
5644 break
5645 }
5646 cond := v_2.Args[0]
5647 v.reset(OpAMD64CMOVWLS)
5648 v.AddArg3(x, y, cond)
5649 return true
5650 }
5651
5652
5653 for {
5654 x := v_1
5655 if v_2.Op != OpAMD64FlagEQ {
5656 break
5657 }
5658 v.copyOf(x)
5659 return true
5660 }
5661
5662
5663 for {
5664 x := v_1
5665 if v_2.Op != OpAMD64FlagGT_UGT {
5666 break
5667 }
5668 v.copyOf(x)
5669 return true
5670 }
5671
5672
5673 for {
5674 y := v_0
5675 if v_2.Op != OpAMD64FlagGT_ULT {
5676 break
5677 }
5678 v.copyOf(y)
5679 return true
5680 }
5681
5682
5683 for {
5684 y := v_0
5685 if v_2.Op != OpAMD64FlagLT_ULT {
5686 break
5687 }
5688 v.copyOf(y)
5689 return true
5690 }
5691
5692
5693 for {
5694 x := v_1
5695 if v_2.Op != OpAMD64FlagLT_UGT {
5696 break
5697 }
5698 v.copyOf(x)
5699 return true
5700 }
5701 return false
5702 }
5703 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5704 v_2 := v.Args[2]
5705 v_1 := v.Args[1]
5706 v_0 := v.Args[0]
5707
5708
5709 for {
5710 x := v_0
5711 y := v_1
5712 if v_2.Op != OpAMD64InvertFlags {
5713 break
5714 }
5715 cond := v_2.Args[0]
5716 v.reset(OpAMD64CMOVWHI)
5717 v.AddArg3(x, y, cond)
5718 return true
5719 }
5720
5721
5722 for {
5723 y := v_0
5724 if v_2.Op != OpAMD64FlagEQ {
5725 break
5726 }
5727 v.copyOf(y)
5728 return true
5729 }
5730
5731
5732 for {
5733 y := v_0
5734 if v_2.Op != OpAMD64FlagGT_UGT {
5735 break
5736 }
5737 v.copyOf(y)
5738 return true
5739 }
5740
5741
5742 for {
5743 x := v_1
5744 if v_2.Op != OpAMD64FlagGT_ULT {
5745 break
5746 }
5747 v.copyOf(x)
5748 return true
5749 }
5750
5751
5752 for {
5753 x := v_1
5754 if v_2.Op != OpAMD64FlagLT_ULT {
5755 break
5756 }
5757 v.copyOf(x)
5758 return true
5759 }
5760
5761
5762 for {
5763 y := v_0
5764 if v_2.Op != OpAMD64FlagLT_UGT {
5765 break
5766 }
5767 v.copyOf(y)
5768 return true
5769 }
5770 return false
5771 }
5772 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5773 v_2 := v.Args[2]
5774 v_1 := v.Args[1]
5775 v_0 := v.Args[0]
5776
5777
5778 for {
5779 x := v_0
5780 y := v_1
5781 if v_2.Op != OpAMD64InvertFlags {
5782 break
5783 }
5784 cond := v_2.Args[0]
5785 v.reset(OpAMD64CMOVWEQ)
5786 v.AddArg3(x, y, cond)
5787 return true
5788 }
5789
5790
5791 for {
5792 x := v_1
5793 if v_2.Op != OpAMD64FlagEQ {
5794 break
5795 }
5796 v.copyOf(x)
5797 return true
5798 }
5799
5800
5801 for {
5802 y := v_0
5803 if v_2.Op != OpAMD64FlagGT_UGT {
5804 break
5805 }
5806 v.copyOf(y)
5807 return true
5808 }
5809
5810
5811 for {
5812 y := v_0
5813 if v_2.Op != OpAMD64FlagGT_ULT {
5814 break
5815 }
5816 v.copyOf(y)
5817 return true
5818 }
5819
5820
5821 for {
5822 y := v_0
5823 if v_2.Op != OpAMD64FlagLT_ULT {
5824 break
5825 }
5826 v.copyOf(y)
5827 return true
5828 }
5829
5830
5831 for {
5832 y := v_0
5833 if v_2.Op != OpAMD64FlagLT_UGT {
5834 break
5835 }
5836 v.copyOf(y)
5837 return true
5838 }
5839 return false
5840 }
5841 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5842 v_2 := v.Args[2]
5843 v_1 := v.Args[1]
5844 v_0 := v.Args[0]
5845
5846
5847 for {
5848 x := v_0
5849 y := v_1
5850 if v_2.Op != OpAMD64InvertFlags {
5851 break
5852 }
5853 cond := v_2.Args[0]
5854 v.reset(OpAMD64CMOVWLE)
5855 v.AddArg3(x, y, cond)
5856 return true
5857 }
5858
5859
5860 for {
5861 x := v_1
5862 if v_2.Op != OpAMD64FlagEQ {
5863 break
5864 }
5865 v.copyOf(x)
5866 return true
5867 }
5868
5869
5870 for {
5871 x := v_1
5872 if v_2.Op != OpAMD64FlagGT_UGT {
5873 break
5874 }
5875 v.copyOf(x)
5876 return true
5877 }
5878
5879
5880 for {
5881 x := v_1
5882 if v_2.Op != OpAMD64FlagGT_ULT {
5883 break
5884 }
5885 v.copyOf(x)
5886 return true
5887 }
5888
5889
5890 for {
5891 y := v_0
5892 if v_2.Op != OpAMD64FlagLT_ULT {
5893 break
5894 }
5895 v.copyOf(y)
5896 return true
5897 }
5898
5899
5900 for {
5901 y := v_0
5902 if v_2.Op != OpAMD64FlagLT_UGT {
5903 break
5904 }
5905 v.copyOf(y)
5906 return true
5907 }
5908 return false
5909 }
5910 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5911 v_2 := v.Args[2]
5912 v_1 := v.Args[1]
5913 v_0 := v.Args[0]
5914
5915
5916 for {
5917 x := v_0
5918 y := v_1
5919 if v_2.Op != OpAMD64InvertFlags {
5920 break
5921 }
5922 cond := v_2.Args[0]
5923 v.reset(OpAMD64CMOVWLT)
5924 v.AddArg3(x, y, cond)
5925 return true
5926 }
5927
5928
5929 for {
5930 y := v_0
5931 if v_2.Op != OpAMD64FlagEQ {
5932 break
5933 }
5934 v.copyOf(y)
5935 return true
5936 }
5937
5938
5939 for {
5940 x := v_1
5941 if v_2.Op != OpAMD64FlagGT_UGT {
5942 break
5943 }
5944 v.copyOf(x)
5945 return true
5946 }
5947
5948
5949 for {
5950 x := v_1
5951 if v_2.Op != OpAMD64FlagGT_ULT {
5952 break
5953 }
5954 v.copyOf(x)
5955 return true
5956 }
5957
5958
5959 for {
5960 y := v_0
5961 if v_2.Op != OpAMD64FlagLT_ULT {
5962 break
5963 }
5964 v.copyOf(y)
5965 return true
5966 }
5967
5968
5969 for {
5970 y := v_0
5971 if v_2.Op != OpAMD64FlagLT_UGT {
5972 break
5973 }
5974 v.copyOf(y)
5975 return true
5976 }
5977 return false
5978 }
5979 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
5980 v_2 := v.Args[2]
5981 v_1 := v.Args[1]
5982 v_0 := v.Args[0]
5983
5984
5985 for {
5986 x := v_0
5987 y := v_1
5988 if v_2.Op != OpAMD64InvertFlags {
5989 break
5990 }
5991 cond := v_2.Args[0]
5992 v.reset(OpAMD64CMOVWCS)
5993 v.AddArg3(x, y, cond)
5994 return true
5995 }
5996
5997
5998 for {
5999 y := v_0
6000 if v_2.Op != OpAMD64FlagEQ {
6001 break
6002 }
6003 v.copyOf(y)
6004 return true
6005 }
6006
6007
6008 for {
6009 x := v_1
6010 if v_2.Op != OpAMD64FlagGT_UGT {
6011 break
6012 }
6013 v.copyOf(x)
6014 return true
6015 }
6016
6017
6018 for {
6019 y := v_0
6020 if v_2.Op != OpAMD64FlagGT_ULT {
6021 break
6022 }
6023 v.copyOf(y)
6024 return true
6025 }
6026
6027
6028 for {
6029 y := v_0
6030 if v_2.Op != OpAMD64FlagLT_ULT {
6031 break
6032 }
6033 v.copyOf(y)
6034 return true
6035 }
6036
6037
6038 for {
6039 x := v_1
6040 if v_2.Op != OpAMD64FlagLT_UGT {
6041 break
6042 }
6043 v.copyOf(x)
6044 return true
6045 }
6046 return false
6047 }
6048 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6049 v_2 := v.Args[2]
6050 v_1 := v.Args[1]
6051 v_0 := v.Args[0]
6052
6053
6054 for {
6055 x := v_0
6056 y := v_1
6057 if v_2.Op != OpAMD64InvertFlags {
6058 break
6059 }
6060 cond := v_2.Args[0]
6061 v.reset(OpAMD64CMOVWGE)
6062 v.AddArg3(x, y, cond)
6063 return true
6064 }
6065
6066
6067 for {
6068 x := v_1
6069 if v_2.Op != OpAMD64FlagEQ {
6070 break
6071 }
6072 v.copyOf(x)
6073 return true
6074 }
6075
6076
6077 for {
6078 y := v_0
6079 if v_2.Op != OpAMD64FlagGT_UGT {
6080 break
6081 }
6082 v.copyOf(y)
6083 return true
6084 }
6085
6086
6087 for {
6088 y := v_0
6089 if v_2.Op != OpAMD64FlagGT_ULT {
6090 break
6091 }
6092 v.copyOf(y)
6093 return true
6094 }
6095
6096
6097 for {
6098 x := v_1
6099 if v_2.Op != OpAMD64FlagLT_ULT {
6100 break
6101 }
6102 v.copyOf(x)
6103 return true
6104 }
6105
6106
6107 for {
6108 x := v_1
6109 if v_2.Op != OpAMD64FlagLT_UGT {
6110 break
6111 }
6112 v.copyOf(x)
6113 return true
6114 }
6115 return false
6116 }
6117 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6118 v_2 := v.Args[2]
6119 v_1 := v.Args[1]
6120 v_0 := v.Args[0]
6121
6122
6123 for {
6124 x := v_0
6125 y := v_1
6126 if v_2.Op != OpAMD64InvertFlags {
6127 break
6128 }
6129 cond := v_2.Args[0]
6130 v.reset(OpAMD64CMOVWCC)
6131 v.AddArg3(x, y, cond)
6132 return true
6133 }
6134
6135
6136 for {
6137 x := v_1
6138 if v_2.Op != OpAMD64FlagEQ {
6139 break
6140 }
6141 v.copyOf(x)
6142 return true
6143 }
6144
6145
6146 for {
6147 y := v_0
6148 if v_2.Op != OpAMD64FlagGT_UGT {
6149 break
6150 }
6151 v.copyOf(y)
6152 return true
6153 }
6154
6155
6156 for {
6157 x := v_1
6158 if v_2.Op != OpAMD64FlagGT_ULT {
6159 break
6160 }
6161 v.copyOf(x)
6162 return true
6163 }
6164
6165
6166 for {
6167 x := v_1
6168 if v_2.Op != OpAMD64FlagLT_ULT {
6169 break
6170 }
6171 v.copyOf(x)
6172 return true
6173 }
6174
6175
6176 for {
6177 y := v_0
6178 if v_2.Op != OpAMD64FlagLT_UGT {
6179 break
6180 }
6181 v.copyOf(y)
6182 return true
6183 }
6184 return false
6185 }
6186 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6187 v_2 := v.Args[2]
6188 v_1 := v.Args[1]
6189 v_0 := v.Args[0]
6190
6191
6192 for {
6193 x := v_0
6194 y := v_1
6195 if v_2.Op != OpAMD64InvertFlags {
6196 break
6197 }
6198 cond := v_2.Args[0]
6199 v.reset(OpAMD64CMOVWGT)
6200 v.AddArg3(x, y, cond)
6201 return true
6202 }
6203
6204
6205 for {
6206 y := v_0
6207 if v_2.Op != OpAMD64FlagEQ {
6208 break
6209 }
6210 v.copyOf(y)
6211 return true
6212 }
6213
6214
6215 for {
6216 y := v_0
6217 if v_2.Op != OpAMD64FlagGT_UGT {
6218 break
6219 }
6220 v.copyOf(y)
6221 return true
6222 }
6223
6224
6225 for {
6226 y := v_0
6227 if v_2.Op != OpAMD64FlagGT_ULT {
6228 break
6229 }
6230 v.copyOf(y)
6231 return true
6232 }
6233
6234
6235 for {
6236 x := v_1
6237 if v_2.Op != OpAMD64FlagLT_ULT {
6238 break
6239 }
6240 v.copyOf(x)
6241 return true
6242 }
6243
6244
6245 for {
6246 x := v_1
6247 if v_2.Op != OpAMD64FlagLT_UGT {
6248 break
6249 }
6250 v.copyOf(x)
6251 return true
6252 }
6253 return false
6254 }
6255 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6256 v_2 := v.Args[2]
6257 v_1 := v.Args[1]
6258 v_0 := v.Args[0]
6259
6260
6261 for {
6262 x := v_0
6263 y := v_1
6264 if v_2.Op != OpAMD64InvertFlags {
6265 break
6266 }
6267 cond := v_2.Args[0]
6268 v.reset(OpAMD64CMOVWNE)
6269 v.AddArg3(x, y, cond)
6270 return true
6271 }
6272
6273
6274 for {
6275 y := v_0
6276 if v_2.Op != OpAMD64FlagEQ {
6277 break
6278 }
6279 v.copyOf(y)
6280 return true
6281 }
6282
6283
6284 for {
6285 x := v_1
6286 if v_2.Op != OpAMD64FlagGT_UGT {
6287 break
6288 }
6289 v.copyOf(x)
6290 return true
6291 }
6292
6293
6294 for {
6295 x := v_1
6296 if v_2.Op != OpAMD64FlagGT_ULT {
6297 break
6298 }
6299 v.copyOf(x)
6300 return true
6301 }
6302
6303
6304 for {
6305 x := v_1
6306 if v_2.Op != OpAMD64FlagLT_ULT {
6307 break
6308 }
6309 v.copyOf(x)
6310 return true
6311 }
6312
6313
6314 for {
6315 x := v_1
6316 if v_2.Op != OpAMD64FlagLT_UGT {
6317 break
6318 }
6319 v.copyOf(x)
6320 return true
6321 }
6322 return false
6323 }
6324 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6325 v_1 := v.Args[1]
6326 v_0 := v.Args[0]
6327 b := v.Block
6328
6329
6330 for {
6331 x := v_0
6332 if v_1.Op != OpAMD64MOVLconst {
6333 break
6334 }
6335 c := auxIntToInt32(v_1.AuxInt)
6336 v.reset(OpAMD64CMPBconst)
6337 v.AuxInt = int8ToAuxInt(int8(c))
6338 v.AddArg(x)
6339 return true
6340 }
6341
6342
6343 for {
6344 if v_0.Op != OpAMD64MOVLconst {
6345 break
6346 }
6347 c := auxIntToInt32(v_0.AuxInt)
6348 x := v_1
6349 v.reset(OpAMD64InvertFlags)
6350 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6351 v0.AuxInt = int8ToAuxInt(int8(c))
6352 v0.AddArg(x)
6353 v.AddArg(v0)
6354 return true
6355 }
6356
6357
6358
6359 for {
6360 x := v_0
6361 y := v_1
6362 if !(canonLessThan(x, y)) {
6363 break
6364 }
6365 v.reset(OpAMD64InvertFlags)
6366 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6367 v0.AddArg2(y, x)
6368 v.AddArg(v0)
6369 return true
6370 }
6371
6372
6373
6374 for {
6375 l := v_0
6376 if l.Op != OpAMD64MOVBload {
6377 break
6378 }
6379 off := auxIntToInt32(l.AuxInt)
6380 sym := auxToSym(l.Aux)
6381 mem := l.Args[1]
6382 ptr := l.Args[0]
6383 x := v_1
6384 if !(canMergeLoad(v, l) && clobber(l)) {
6385 break
6386 }
6387 v.reset(OpAMD64CMPBload)
6388 v.AuxInt = int32ToAuxInt(off)
6389 v.Aux = symToAux(sym)
6390 v.AddArg3(ptr, x, mem)
6391 return true
6392 }
6393
6394
6395
6396 for {
6397 x := v_0
6398 l := v_1
6399 if l.Op != OpAMD64MOVBload {
6400 break
6401 }
6402 off := auxIntToInt32(l.AuxInt)
6403 sym := auxToSym(l.Aux)
6404 mem := l.Args[1]
6405 ptr := l.Args[0]
6406 if !(canMergeLoad(v, l) && clobber(l)) {
6407 break
6408 }
6409 v.reset(OpAMD64InvertFlags)
6410 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6411 v0.AuxInt = int32ToAuxInt(off)
6412 v0.Aux = symToAux(sym)
6413 v0.AddArg3(ptr, x, mem)
6414 v.AddArg(v0)
6415 return true
6416 }
6417 return false
6418 }
6419 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6420 v_0 := v.Args[0]
6421 b := v.Block
6422
6423
6424
6425 for {
6426 y := auxIntToInt8(v.AuxInt)
6427 if v_0.Op != OpAMD64MOVLconst {
6428 break
6429 }
6430 x := auxIntToInt32(v_0.AuxInt)
6431 if !(int8(x) == y) {
6432 break
6433 }
6434 v.reset(OpAMD64FlagEQ)
6435 return true
6436 }
6437
6438
6439
6440 for {
6441 y := auxIntToInt8(v.AuxInt)
6442 if v_0.Op != OpAMD64MOVLconst {
6443 break
6444 }
6445 x := auxIntToInt32(v_0.AuxInt)
6446 if !(int8(x) < y && uint8(x) < uint8(y)) {
6447 break
6448 }
6449 v.reset(OpAMD64FlagLT_ULT)
6450 return true
6451 }
6452
6453
6454
6455 for {
6456 y := auxIntToInt8(v.AuxInt)
6457 if v_0.Op != OpAMD64MOVLconst {
6458 break
6459 }
6460 x := auxIntToInt32(v_0.AuxInt)
6461 if !(int8(x) < y && uint8(x) > uint8(y)) {
6462 break
6463 }
6464 v.reset(OpAMD64FlagLT_UGT)
6465 return true
6466 }
6467
6468
6469
6470 for {
6471 y := auxIntToInt8(v.AuxInt)
6472 if v_0.Op != OpAMD64MOVLconst {
6473 break
6474 }
6475 x := auxIntToInt32(v_0.AuxInt)
6476 if !(int8(x) > y && uint8(x) < uint8(y)) {
6477 break
6478 }
6479 v.reset(OpAMD64FlagGT_ULT)
6480 return true
6481 }
6482
6483
6484
6485 for {
6486 y := auxIntToInt8(v.AuxInt)
6487 if v_0.Op != OpAMD64MOVLconst {
6488 break
6489 }
6490 x := auxIntToInt32(v_0.AuxInt)
6491 if !(int8(x) > y && uint8(x) > uint8(y)) {
6492 break
6493 }
6494 v.reset(OpAMD64FlagGT_UGT)
6495 return true
6496 }
6497
6498
6499
6500 for {
6501 n := auxIntToInt8(v.AuxInt)
6502 if v_0.Op != OpAMD64ANDLconst {
6503 break
6504 }
6505 m := auxIntToInt32(v_0.AuxInt)
6506 if !(0 <= int8(m) && int8(m) < n) {
6507 break
6508 }
6509 v.reset(OpAMD64FlagLT_ULT)
6510 return true
6511 }
6512
6513
6514
6515 for {
6516 if auxIntToInt8(v.AuxInt) != 0 {
6517 break
6518 }
6519 a := v_0
6520 if a.Op != OpAMD64ANDL {
6521 break
6522 }
6523 y := a.Args[1]
6524 x := a.Args[0]
6525 if !(a.Uses == 1) {
6526 break
6527 }
6528 v.reset(OpAMD64TESTB)
6529 v.AddArg2(x, y)
6530 return true
6531 }
6532
6533
6534
6535 for {
6536 if auxIntToInt8(v.AuxInt) != 0 {
6537 break
6538 }
6539 a := v_0
6540 if a.Op != OpAMD64ANDLconst {
6541 break
6542 }
6543 c := auxIntToInt32(a.AuxInt)
6544 x := a.Args[0]
6545 if !(a.Uses == 1) {
6546 break
6547 }
6548 v.reset(OpAMD64TESTBconst)
6549 v.AuxInt = int8ToAuxInt(int8(c))
6550 v.AddArg(x)
6551 return true
6552 }
6553
6554
6555 for {
6556 if auxIntToInt8(v.AuxInt) != 0 {
6557 break
6558 }
6559 x := v_0
6560 v.reset(OpAMD64TESTB)
6561 v.AddArg2(x, x)
6562 return true
6563 }
6564
6565
6566
6567 for {
6568 c := auxIntToInt8(v.AuxInt)
6569 l := v_0
6570 if l.Op != OpAMD64MOVBload {
6571 break
6572 }
6573 off := auxIntToInt32(l.AuxInt)
6574 sym := auxToSym(l.Aux)
6575 mem := l.Args[1]
6576 ptr := l.Args[0]
6577 if !(l.Uses == 1 && clobber(l)) {
6578 break
6579 }
6580 b = l.Block
6581 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6582 v.copyOf(v0)
6583 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6584 v0.Aux = symToAux(sym)
6585 v0.AddArg2(ptr, mem)
6586 return true
6587 }
6588 return false
6589 }
6590 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6591 v_1 := v.Args[1]
6592 v_0 := v.Args[0]
6593
6594
6595
6596 for {
6597 valoff1 := auxIntToValAndOff(v.AuxInt)
6598 sym := auxToSym(v.Aux)
6599 if v_0.Op != OpAMD64ADDQconst {
6600 break
6601 }
6602 off2 := auxIntToInt32(v_0.AuxInt)
6603 base := v_0.Args[0]
6604 mem := v_1
6605 if !(ValAndOff(valoff1).canAdd32(off2)) {
6606 break
6607 }
6608 v.reset(OpAMD64CMPBconstload)
6609 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6610 v.Aux = symToAux(sym)
6611 v.AddArg2(base, mem)
6612 return true
6613 }
6614
6615
6616
6617 for {
6618 valoff1 := auxIntToValAndOff(v.AuxInt)
6619 sym1 := auxToSym(v.Aux)
6620 if v_0.Op != OpAMD64LEAQ {
6621 break
6622 }
6623 off2 := auxIntToInt32(v_0.AuxInt)
6624 sym2 := auxToSym(v_0.Aux)
6625 base := v_0.Args[0]
6626 mem := v_1
6627 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6628 break
6629 }
6630 v.reset(OpAMD64CMPBconstload)
6631 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6632 v.Aux = symToAux(mergeSym(sym1, sym2))
6633 v.AddArg2(base, mem)
6634 return true
6635 }
6636 return false
6637 }
6638 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6639 v_2 := v.Args[2]
6640 v_1 := v.Args[1]
6641 v_0 := v.Args[0]
6642
6643
6644
6645 for {
6646 off1 := auxIntToInt32(v.AuxInt)
6647 sym := auxToSym(v.Aux)
6648 if v_0.Op != OpAMD64ADDQconst {
6649 break
6650 }
6651 off2 := auxIntToInt32(v_0.AuxInt)
6652 base := v_0.Args[0]
6653 val := v_1
6654 mem := v_2
6655 if !(is32Bit(int64(off1) + int64(off2))) {
6656 break
6657 }
6658 v.reset(OpAMD64CMPBload)
6659 v.AuxInt = int32ToAuxInt(off1 + off2)
6660 v.Aux = symToAux(sym)
6661 v.AddArg3(base, val, mem)
6662 return true
6663 }
6664
6665
6666
6667 for {
6668 off1 := auxIntToInt32(v.AuxInt)
6669 sym1 := auxToSym(v.Aux)
6670 if v_0.Op != OpAMD64LEAQ {
6671 break
6672 }
6673 off2 := auxIntToInt32(v_0.AuxInt)
6674 sym2 := auxToSym(v_0.Aux)
6675 base := v_0.Args[0]
6676 val := v_1
6677 mem := v_2
6678 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6679 break
6680 }
6681 v.reset(OpAMD64CMPBload)
6682 v.AuxInt = int32ToAuxInt(off1 + off2)
6683 v.Aux = symToAux(mergeSym(sym1, sym2))
6684 v.AddArg3(base, val, mem)
6685 return true
6686 }
6687
6688
6689 for {
6690 off := auxIntToInt32(v.AuxInt)
6691 sym := auxToSym(v.Aux)
6692 ptr := v_0
6693 if v_1.Op != OpAMD64MOVLconst {
6694 break
6695 }
6696 c := auxIntToInt32(v_1.AuxInt)
6697 mem := v_2
6698 v.reset(OpAMD64CMPBconstload)
6699 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6700 v.Aux = symToAux(sym)
6701 v.AddArg2(ptr, mem)
6702 return true
6703 }
6704 return false
6705 }
6706 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6707 v_1 := v.Args[1]
6708 v_0 := v.Args[0]
6709 b := v.Block
6710
6711
6712 for {
6713 x := v_0
6714 if v_1.Op != OpAMD64MOVLconst {
6715 break
6716 }
6717 c := auxIntToInt32(v_1.AuxInt)
6718 v.reset(OpAMD64CMPLconst)
6719 v.AuxInt = int32ToAuxInt(c)
6720 v.AddArg(x)
6721 return true
6722 }
6723
6724
6725 for {
6726 if v_0.Op != OpAMD64MOVLconst {
6727 break
6728 }
6729 c := auxIntToInt32(v_0.AuxInt)
6730 x := v_1
6731 v.reset(OpAMD64InvertFlags)
6732 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6733 v0.AuxInt = int32ToAuxInt(c)
6734 v0.AddArg(x)
6735 v.AddArg(v0)
6736 return true
6737 }
6738
6739
6740
6741 for {
6742 x := v_0
6743 y := v_1
6744 if !(canonLessThan(x, y)) {
6745 break
6746 }
6747 v.reset(OpAMD64InvertFlags)
6748 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6749 v0.AddArg2(y, x)
6750 v.AddArg(v0)
6751 return true
6752 }
6753
6754
6755
6756 for {
6757 l := v_0
6758 if l.Op != OpAMD64MOVLload {
6759 break
6760 }
6761 off := auxIntToInt32(l.AuxInt)
6762 sym := auxToSym(l.Aux)
6763 mem := l.Args[1]
6764 ptr := l.Args[0]
6765 x := v_1
6766 if !(canMergeLoad(v, l) && clobber(l)) {
6767 break
6768 }
6769 v.reset(OpAMD64CMPLload)
6770 v.AuxInt = int32ToAuxInt(off)
6771 v.Aux = symToAux(sym)
6772 v.AddArg3(ptr, x, mem)
6773 return true
6774 }
6775
6776
6777
6778 for {
6779 x := v_0
6780 l := v_1
6781 if l.Op != OpAMD64MOVLload {
6782 break
6783 }
6784 off := auxIntToInt32(l.AuxInt)
6785 sym := auxToSym(l.Aux)
6786 mem := l.Args[1]
6787 ptr := l.Args[0]
6788 if !(canMergeLoad(v, l) && clobber(l)) {
6789 break
6790 }
6791 v.reset(OpAMD64InvertFlags)
6792 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6793 v0.AuxInt = int32ToAuxInt(off)
6794 v0.Aux = symToAux(sym)
6795 v0.AddArg3(ptr, x, mem)
6796 v.AddArg(v0)
6797 return true
6798 }
6799 return false
6800 }
6801 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6802 v_0 := v.Args[0]
6803 b := v.Block
6804
6805
6806
6807 for {
6808 y := auxIntToInt32(v.AuxInt)
6809 if v_0.Op != OpAMD64MOVLconst {
6810 break
6811 }
6812 x := auxIntToInt32(v_0.AuxInt)
6813 if !(x == y) {
6814 break
6815 }
6816 v.reset(OpAMD64FlagEQ)
6817 return true
6818 }
6819
6820
6821
6822 for {
6823 y := auxIntToInt32(v.AuxInt)
6824 if v_0.Op != OpAMD64MOVLconst {
6825 break
6826 }
6827 x := auxIntToInt32(v_0.AuxInt)
6828 if !(x < y && uint32(x) < uint32(y)) {
6829 break
6830 }
6831 v.reset(OpAMD64FlagLT_ULT)
6832 return true
6833 }
6834
6835
6836
6837 for {
6838 y := auxIntToInt32(v.AuxInt)
6839 if v_0.Op != OpAMD64MOVLconst {
6840 break
6841 }
6842 x := auxIntToInt32(v_0.AuxInt)
6843 if !(x < y && uint32(x) > uint32(y)) {
6844 break
6845 }
6846 v.reset(OpAMD64FlagLT_UGT)
6847 return true
6848 }
6849
6850
6851
6852 for {
6853 y := auxIntToInt32(v.AuxInt)
6854 if v_0.Op != OpAMD64MOVLconst {
6855 break
6856 }
6857 x := auxIntToInt32(v_0.AuxInt)
6858 if !(x > y && uint32(x) < uint32(y)) {
6859 break
6860 }
6861 v.reset(OpAMD64FlagGT_ULT)
6862 return true
6863 }
6864
6865
6866
6867 for {
6868 y := auxIntToInt32(v.AuxInt)
6869 if v_0.Op != OpAMD64MOVLconst {
6870 break
6871 }
6872 x := auxIntToInt32(v_0.AuxInt)
6873 if !(x > y && uint32(x) > uint32(y)) {
6874 break
6875 }
6876 v.reset(OpAMD64FlagGT_UGT)
6877 return true
6878 }
6879
6880
6881
6882 for {
6883 n := auxIntToInt32(v.AuxInt)
6884 if v_0.Op != OpAMD64SHRLconst {
6885 break
6886 }
6887 c := auxIntToInt8(v_0.AuxInt)
6888 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6889 break
6890 }
6891 v.reset(OpAMD64FlagLT_ULT)
6892 return true
6893 }
6894
6895
6896
6897 for {
6898 n := auxIntToInt32(v.AuxInt)
6899 if v_0.Op != OpAMD64ANDLconst {
6900 break
6901 }
6902 m := auxIntToInt32(v_0.AuxInt)
6903 if !(0 <= m && m < n) {
6904 break
6905 }
6906 v.reset(OpAMD64FlagLT_ULT)
6907 return true
6908 }
6909
6910
6911
6912 for {
6913 if auxIntToInt32(v.AuxInt) != 0 {
6914 break
6915 }
6916 a := v_0
6917 if a.Op != OpAMD64ANDL {
6918 break
6919 }
6920 y := a.Args[1]
6921 x := a.Args[0]
6922 if !(a.Uses == 1) {
6923 break
6924 }
6925 v.reset(OpAMD64TESTL)
6926 v.AddArg2(x, y)
6927 return true
6928 }
6929
6930
6931
6932 for {
6933 if auxIntToInt32(v.AuxInt) != 0 {
6934 break
6935 }
6936 a := v_0
6937 if a.Op != OpAMD64ANDLconst {
6938 break
6939 }
6940 c := auxIntToInt32(a.AuxInt)
6941 x := a.Args[0]
6942 if !(a.Uses == 1) {
6943 break
6944 }
6945 v.reset(OpAMD64TESTLconst)
6946 v.AuxInt = int32ToAuxInt(c)
6947 v.AddArg(x)
6948 return true
6949 }
6950
6951
6952 for {
6953 if auxIntToInt32(v.AuxInt) != 0 {
6954 break
6955 }
6956 x := v_0
6957 v.reset(OpAMD64TESTL)
6958 v.AddArg2(x, x)
6959 return true
6960 }
6961
6962
6963
6964 for {
6965 c := auxIntToInt32(v.AuxInt)
6966 l := v_0
6967 if l.Op != OpAMD64MOVLload {
6968 break
6969 }
6970 off := auxIntToInt32(l.AuxInt)
6971 sym := auxToSym(l.Aux)
6972 mem := l.Args[1]
6973 ptr := l.Args[0]
6974 if !(l.Uses == 1 && clobber(l)) {
6975 break
6976 }
6977 b = l.Block
6978 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
6979 v.copyOf(v0)
6980 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
6981 v0.Aux = symToAux(sym)
6982 v0.AddArg2(ptr, mem)
6983 return true
6984 }
6985 return false
6986 }
6987 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
6988 v_1 := v.Args[1]
6989 v_0 := v.Args[0]
6990
6991
6992
6993 for {
6994 valoff1 := auxIntToValAndOff(v.AuxInt)
6995 sym := auxToSym(v.Aux)
6996 if v_0.Op != OpAMD64ADDQconst {
6997 break
6998 }
6999 off2 := auxIntToInt32(v_0.AuxInt)
7000 base := v_0.Args[0]
7001 mem := v_1
7002 if !(ValAndOff(valoff1).canAdd32(off2)) {
7003 break
7004 }
7005 v.reset(OpAMD64CMPLconstload)
7006 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7007 v.Aux = symToAux(sym)
7008 v.AddArg2(base, mem)
7009 return true
7010 }
7011
7012
7013
7014 for {
7015 valoff1 := auxIntToValAndOff(v.AuxInt)
7016 sym1 := auxToSym(v.Aux)
7017 if v_0.Op != OpAMD64LEAQ {
7018 break
7019 }
7020 off2 := auxIntToInt32(v_0.AuxInt)
7021 sym2 := auxToSym(v_0.Aux)
7022 base := v_0.Args[0]
7023 mem := v_1
7024 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7025 break
7026 }
7027 v.reset(OpAMD64CMPLconstload)
7028 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7029 v.Aux = symToAux(mergeSym(sym1, sym2))
7030 v.AddArg2(base, mem)
7031 return true
7032 }
7033 return false
7034 }
7035 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7036 v_2 := v.Args[2]
7037 v_1 := v.Args[1]
7038 v_0 := v.Args[0]
7039
7040
7041
7042 for {
7043 off1 := auxIntToInt32(v.AuxInt)
7044 sym := auxToSym(v.Aux)
7045 if v_0.Op != OpAMD64ADDQconst {
7046 break
7047 }
7048 off2 := auxIntToInt32(v_0.AuxInt)
7049 base := v_0.Args[0]
7050 val := v_1
7051 mem := v_2
7052 if !(is32Bit(int64(off1) + int64(off2))) {
7053 break
7054 }
7055 v.reset(OpAMD64CMPLload)
7056 v.AuxInt = int32ToAuxInt(off1 + off2)
7057 v.Aux = symToAux(sym)
7058 v.AddArg3(base, val, mem)
7059 return true
7060 }
7061
7062
7063
7064 for {
7065 off1 := auxIntToInt32(v.AuxInt)
7066 sym1 := auxToSym(v.Aux)
7067 if v_0.Op != OpAMD64LEAQ {
7068 break
7069 }
7070 off2 := auxIntToInt32(v_0.AuxInt)
7071 sym2 := auxToSym(v_0.Aux)
7072 base := v_0.Args[0]
7073 val := v_1
7074 mem := v_2
7075 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7076 break
7077 }
7078 v.reset(OpAMD64CMPLload)
7079 v.AuxInt = int32ToAuxInt(off1 + off2)
7080 v.Aux = symToAux(mergeSym(sym1, sym2))
7081 v.AddArg3(base, val, mem)
7082 return true
7083 }
7084
7085
7086 for {
7087 off := auxIntToInt32(v.AuxInt)
7088 sym := auxToSym(v.Aux)
7089 ptr := v_0
7090 if v_1.Op != OpAMD64MOVLconst {
7091 break
7092 }
7093 c := auxIntToInt32(v_1.AuxInt)
7094 mem := v_2
7095 v.reset(OpAMD64CMPLconstload)
7096 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7097 v.Aux = symToAux(sym)
7098 v.AddArg2(ptr, mem)
7099 return true
7100 }
7101 return false
7102 }
7103 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7104 v_1 := v.Args[1]
7105 v_0 := v.Args[0]
7106 b := v.Block
7107
7108
7109
7110 for {
7111 x := v_0
7112 if v_1.Op != OpAMD64MOVQconst {
7113 break
7114 }
7115 c := auxIntToInt64(v_1.AuxInt)
7116 if !(is32Bit(c)) {
7117 break
7118 }
7119 v.reset(OpAMD64CMPQconst)
7120 v.AuxInt = int32ToAuxInt(int32(c))
7121 v.AddArg(x)
7122 return true
7123 }
7124
7125
7126
7127 for {
7128 if v_0.Op != OpAMD64MOVQconst {
7129 break
7130 }
7131 c := auxIntToInt64(v_0.AuxInt)
7132 x := v_1
7133 if !(is32Bit(c)) {
7134 break
7135 }
7136 v.reset(OpAMD64InvertFlags)
7137 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7138 v0.AuxInt = int32ToAuxInt(int32(c))
7139 v0.AddArg(x)
7140 v.AddArg(v0)
7141 return true
7142 }
7143
7144
7145
7146 for {
7147 x := v_0
7148 y := v_1
7149 if !(canonLessThan(x, y)) {
7150 break
7151 }
7152 v.reset(OpAMD64InvertFlags)
7153 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7154 v0.AddArg2(y, x)
7155 v.AddArg(v0)
7156 return true
7157 }
7158
7159
7160
7161 for {
7162 if v_0.Op != OpAMD64MOVQconst {
7163 break
7164 }
7165 x := auxIntToInt64(v_0.AuxInt)
7166 if v_1.Op != OpAMD64MOVQconst {
7167 break
7168 }
7169 y := auxIntToInt64(v_1.AuxInt)
7170 if !(x == y) {
7171 break
7172 }
7173 v.reset(OpAMD64FlagEQ)
7174 return true
7175 }
7176
7177
7178
7179 for {
7180 if v_0.Op != OpAMD64MOVQconst {
7181 break
7182 }
7183 x := auxIntToInt64(v_0.AuxInt)
7184 if v_1.Op != OpAMD64MOVQconst {
7185 break
7186 }
7187 y := auxIntToInt64(v_1.AuxInt)
7188 if !(x < y && uint64(x) < uint64(y)) {
7189 break
7190 }
7191 v.reset(OpAMD64FlagLT_ULT)
7192 return true
7193 }
7194
7195
7196
7197 for {
7198 if v_0.Op != OpAMD64MOVQconst {
7199 break
7200 }
7201 x := auxIntToInt64(v_0.AuxInt)
7202 if v_1.Op != OpAMD64MOVQconst {
7203 break
7204 }
7205 y := auxIntToInt64(v_1.AuxInt)
7206 if !(x < y && uint64(x) > uint64(y)) {
7207 break
7208 }
7209 v.reset(OpAMD64FlagLT_UGT)
7210 return true
7211 }
7212
7213
7214
7215 for {
7216 if v_0.Op != OpAMD64MOVQconst {
7217 break
7218 }
7219 x := auxIntToInt64(v_0.AuxInt)
7220 if v_1.Op != OpAMD64MOVQconst {
7221 break
7222 }
7223 y := auxIntToInt64(v_1.AuxInt)
7224 if !(x > y && uint64(x) < uint64(y)) {
7225 break
7226 }
7227 v.reset(OpAMD64FlagGT_ULT)
7228 return true
7229 }
7230
7231
7232
7233 for {
7234 if v_0.Op != OpAMD64MOVQconst {
7235 break
7236 }
7237 x := auxIntToInt64(v_0.AuxInt)
7238 if v_1.Op != OpAMD64MOVQconst {
7239 break
7240 }
7241 y := auxIntToInt64(v_1.AuxInt)
7242 if !(x > y && uint64(x) > uint64(y)) {
7243 break
7244 }
7245 v.reset(OpAMD64FlagGT_UGT)
7246 return true
7247 }
7248
7249
7250
7251 for {
7252 l := v_0
7253 if l.Op != OpAMD64MOVQload {
7254 break
7255 }
7256 off := auxIntToInt32(l.AuxInt)
7257 sym := auxToSym(l.Aux)
7258 mem := l.Args[1]
7259 ptr := l.Args[0]
7260 x := v_1
7261 if !(canMergeLoad(v, l) && clobber(l)) {
7262 break
7263 }
7264 v.reset(OpAMD64CMPQload)
7265 v.AuxInt = int32ToAuxInt(off)
7266 v.Aux = symToAux(sym)
7267 v.AddArg3(ptr, x, mem)
7268 return true
7269 }
7270
7271
7272
7273 for {
7274 x := v_0
7275 l := v_1
7276 if l.Op != OpAMD64MOVQload {
7277 break
7278 }
7279 off := auxIntToInt32(l.AuxInt)
7280 sym := auxToSym(l.Aux)
7281 mem := l.Args[1]
7282 ptr := l.Args[0]
7283 if !(canMergeLoad(v, l) && clobber(l)) {
7284 break
7285 }
7286 v.reset(OpAMD64InvertFlags)
7287 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7288 v0.AuxInt = int32ToAuxInt(off)
7289 v0.Aux = symToAux(sym)
7290 v0.AddArg3(ptr, x, mem)
7291 v.AddArg(v0)
7292 return true
7293 }
7294 return false
7295 }
7296 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7297 v_0 := v.Args[0]
7298 b := v.Block
7299
7300
7301
7302 for {
7303 y := auxIntToInt32(v.AuxInt)
7304 if v_0.Op != OpAMD64MOVQconst {
7305 break
7306 }
7307 x := auxIntToInt64(v_0.AuxInt)
7308 if !(x == int64(y)) {
7309 break
7310 }
7311 v.reset(OpAMD64FlagEQ)
7312 return true
7313 }
7314
7315
7316
7317 for {
7318 y := auxIntToInt32(v.AuxInt)
7319 if v_0.Op != OpAMD64MOVQconst {
7320 break
7321 }
7322 x := auxIntToInt64(v_0.AuxInt)
7323 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7324 break
7325 }
7326 v.reset(OpAMD64FlagLT_ULT)
7327 return true
7328 }
7329
7330
7331
7332 for {
7333 y := auxIntToInt32(v.AuxInt)
7334 if v_0.Op != OpAMD64MOVQconst {
7335 break
7336 }
7337 x := auxIntToInt64(v_0.AuxInt)
7338 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7339 break
7340 }
7341 v.reset(OpAMD64FlagLT_UGT)
7342 return true
7343 }
7344
7345
7346
7347 for {
7348 y := auxIntToInt32(v.AuxInt)
7349 if v_0.Op != OpAMD64MOVQconst {
7350 break
7351 }
7352 x := auxIntToInt64(v_0.AuxInt)
7353 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7354 break
7355 }
7356 v.reset(OpAMD64FlagGT_ULT)
7357 return true
7358 }
7359
7360
7361
7362 for {
7363 y := auxIntToInt32(v.AuxInt)
7364 if v_0.Op != OpAMD64MOVQconst {
7365 break
7366 }
7367 x := auxIntToInt64(v_0.AuxInt)
7368 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7369 break
7370 }
7371 v.reset(OpAMD64FlagGT_UGT)
7372 return true
7373 }
7374
7375
7376
7377 for {
7378 c := auxIntToInt32(v.AuxInt)
7379 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7380 break
7381 }
7382 v.reset(OpAMD64FlagLT_ULT)
7383 return true
7384 }
7385
7386
7387
7388 for {
7389 c := auxIntToInt32(v.AuxInt)
7390 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7391 break
7392 }
7393 v.reset(OpAMD64FlagLT_ULT)
7394 return true
7395 }
7396
7397
7398
7399 for {
7400 n := auxIntToInt32(v.AuxInt)
7401 if v_0.Op != OpAMD64SHRQconst {
7402 break
7403 }
7404 c := auxIntToInt8(v_0.AuxInt)
7405 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7406 break
7407 }
7408 v.reset(OpAMD64FlagLT_ULT)
7409 return true
7410 }
7411
7412
7413
7414 for {
7415 n := auxIntToInt32(v.AuxInt)
7416 if v_0.Op != OpAMD64ANDQconst {
7417 break
7418 }
7419 m := auxIntToInt32(v_0.AuxInt)
7420 if !(0 <= m && m < n) {
7421 break
7422 }
7423 v.reset(OpAMD64FlagLT_ULT)
7424 return true
7425 }
7426
7427
7428
7429 for {
7430 n := auxIntToInt32(v.AuxInt)
7431 if v_0.Op != OpAMD64ANDLconst {
7432 break
7433 }
7434 m := auxIntToInt32(v_0.AuxInt)
7435 if !(0 <= m && m < n) {
7436 break
7437 }
7438 v.reset(OpAMD64FlagLT_ULT)
7439 return true
7440 }
7441
7442
7443
7444 for {
7445 if auxIntToInt32(v.AuxInt) != 0 {
7446 break
7447 }
7448 a := v_0
7449 if a.Op != OpAMD64ANDQ {
7450 break
7451 }
7452 y := a.Args[1]
7453 x := a.Args[0]
7454 if !(a.Uses == 1) {
7455 break
7456 }
7457 v.reset(OpAMD64TESTQ)
7458 v.AddArg2(x, y)
7459 return true
7460 }
7461
7462
7463
7464 for {
7465 if auxIntToInt32(v.AuxInt) != 0 {
7466 break
7467 }
7468 a := v_0
7469 if a.Op != OpAMD64ANDQconst {
7470 break
7471 }
7472 c := auxIntToInt32(a.AuxInt)
7473 x := a.Args[0]
7474 if !(a.Uses == 1) {
7475 break
7476 }
7477 v.reset(OpAMD64TESTQconst)
7478 v.AuxInt = int32ToAuxInt(c)
7479 v.AddArg(x)
7480 return true
7481 }
7482
7483
7484 for {
7485 if auxIntToInt32(v.AuxInt) != 0 {
7486 break
7487 }
7488 x := v_0
7489 v.reset(OpAMD64TESTQ)
7490 v.AddArg2(x, x)
7491 return true
7492 }
7493
7494
7495
7496 for {
7497 c := auxIntToInt32(v.AuxInt)
7498 l := v_0
7499 if l.Op != OpAMD64MOVQload {
7500 break
7501 }
7502 off := auxIntToInt32(l.AuxInt)
7503 sym := auxToSym(l.Aux)
7504 mem := l.Args[1]
7505 ptr := l.Args[0]
7506 if !(l.Uses == 1 && clobber(l)) {
7507 break
7508 }
7509 b = l.Block
7510 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7511 v.copyOf(v0)
7512 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7513 v0.Aux = symToAux(sym)
7514 v0.AddArg2(ptr, mem)
7515 return true
7516 }
7517 return false
7518 }
7519 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7520 v_1 := v.Args[1]
7521 v_0 := v.Args[0]
7522
7523
7524
7525 for {
7526 valoff1 := auxIntToValAndOff(v.AuxInt)
7527 sym := auxToSym(v.Aux)
7528 if v_0.Op != OpAMD64ADDQconst {
7529 break
7530 }
7531 off2 := auxIntToInt32(v_0.AuxInt)
7532 base := v_0.Args[0]
7533 mem := v_1
7534 if !(ValAndOff(valoff1).canAdd32(off2)) {
7535 break
7536 }
7537 v.reset(OpAMD64CMPQconstload)
7538 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7539 v.Aux = symToAux(sym)
7540 v.AddArg2(base, mem)
7541 return true
7542 }
7543
7544
7545
7546 for {
7547 valoff1 := auxIntToValAndOff(v.AuxInt)
7548 sym1 := auxToSym(v.Aux)
7549 if v_0.Op != OpAMD64LEAQ {
7550 break
7551 }
7552 off2 := auxIntToInt32(v_0.AuxInt)
7553 sym2 := auxToSym(v_0.Aux)
7554 base := v_0.Args[0]
7555 mem := v_1
7556 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7557 break
7558 }
7559 v.reset(OpAMD64CMPQconstload)
7560 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7561 v.Aux = symToAux(mergeSym(sym1, sym2))
7562 v.AddArg2(base, mem)
7563 return true
7564 }
7565 return false
7566 }
7567 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7568 v_2 := v.Args[2]
7569 v_1 := v.Args[1]
7570 v_0 := v.Args[0]
7571
7572
7573
7574 for {
7575 off1 := auxIntToInt32(v.AuxInt)
7576 sym := auxToSym(v.Aux)
7577 if v_0.Op != OpAMD64ADDQconst {
7578 break
7579 }
7580 off2 := auxIntToInt32(v_0.AuxInt)
7581 base := v_0.Args[0]
7582 val := v_1
7583 mem := v_2
7584 if !(is32Bit(int64(off1) + int64(off2))) {
7585 break
7586 }
7587 v.reset(OpAMD64CMPQload)
7588 v.AuxInt = int32ToAuxInt(off1 + off2)
7589 v.Aux = symToAux(sym)
7590 v.AddArg3(base, val, mem)
7591 return true
7592 }
7593
7594
7595
7596 for {
7597 off1 := auxIntToInt32(v.AuxInt)
7598 sym1 := auxToSym(v.Aux)
7599 if v_0.Op != OpAMD64LEAQ {
7600 break
7601 }
7602 off2 := auxIntToInt32(v_0.AuxInt)
7603 sym2 := auxToSym(v_0.Aux)
7604 base := v_0.Args[0]
7605 val := v_1
7606 mem := v_2
7607 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7608 break
7609 }
7610 v.reset(OpAMD64CMPQload)
7611 v.AuxInt = int32ToAuxInt(off1 + off2)
7612 v.Aux = symToAux(mergeSym(sym1, sym2))
7613 v.AddArg3(base, val, mem)
7614 return true
7615 }
7616
7617
7618
7619 for {
7620 off := auxIntToInt32(v.AuxInt)
7621 sym := auxToSym(v.Aux)
7622 ptr := v_0
7623 if v_1.Op != OpAMD64MOVQconst {
7624 break
7625 }
7626 c := auxIntToInt64(v_1.AuxInt)
7627 mem := v_2
7628 if !(validVal(c)) {
7629 break
7630 }
7631 v.reset(OpAMD64CMPQconstload)
7632 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7633 v.Aux = symToAux(sym)
7634 v.AddArg2(ptr, mem)
7635 return true
7636 }
7637 return false
7638 }
7639 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7640 v_1 := v.Args[1]
7641 v_0 := v.Args[0]
7642 b := v.Block
7643
7644
7645 for {
7646 x := v_0
7647 if v_1.Op != OpAMD64MOVLconst {
7648 break
7649 }
7650 c := auxIntToInt32(v_1.AuxInt)
7651 v.reset(OpAMD64CMPWconst)
7652 v.AuxInt = int16ToAuxInt(int16(c))
7653 v.AddArg(x)
7654 return true
7655 }
7656
7657
7658 for {
7659 if v_0.Op != OpAMD64MOVLconst {
7660 break
7661 }
7662 c := auxIntToInt32(v_0.AuxInt)
7663 x := v_1
7664 v.reset(OpAMD64InvertFlags)
7665 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7666 v0.AuxInt = int16ToAuxInt(int16(c))
7667 v0.AddArg(x)
7668 v.AddArg(v0)
7669 return true
7670 }
7671
7672
7673
7674 for {
7675 x := v_0
7676 y := v_1
7677 if !(canonLessThan(x, y)) {
7678 break
7679 }
7680 v.reset(OpAMD64InvertFlags)
7681 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7682 v0.AddArg2(y, x)
7683 v.AddArg(v0)
7684 return true
7685 }
7686
7687
7688
7689 for {
7690 l := v_0
7691 if l.Op != OpAMD64MOVWload {
7692 break
7693 }
7694 off := auxIntToInt32(l.AuxInt)
7695 sym := auxToSym(l.Aux)
7696 mem := l.Args[1]
7697 ptr := l.Args[0]
7698 x := v_1
7699 if !(canMergeLoad(v, l) && clobber(l)) {
7700 break
7701 }
7702 v.reset(OpAMD64CMPWload)
7703 v.AuxInt = int32ToAuxInt(off)
7704 v.Aux = symToAux(sym)
7705 v.AddArg3(ptr, x, mem)
7706 return true
7707 }
7708
7709
7710
7711 for {
7712 x := v_0
7713 l := v_1
7714 if l.Op != OpAMD64MOVWload {
7715 break
7716 }
7717 off := auxIntToInt32(l.AuxInt)
7718 sym := auxToSym(l.Aux)
7719 mem := l.Args[1]
7720 ptr := l.Args[0]
7721 if !(canMergeLoad(v, l) && clobber(l)) {
7722 break
7723 }
7724 v.reset(OpAMD64InvertFlags)
7725 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7726 v0.AuxInt = int32ToAuxInt(off)
7727 v0.Aux = symToAux(sym)
7728 v0.AddArg3(ptr, x, mem)
7729 v.AddArg(v0)
7730 return true
7731 }
7732 return false
7733 }
7734 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7735 v_0 := v.Args[0]
7736 b := v.Block
7737
7738
7739
7740 for {
7741 y := auxIntToInt16(v.AuxInt)
7742 if v_0.Op != OpAMD64MOVLconst {
7743 break
7744 }
7745 x := auxIntToInt32(v_0.AuxInt)
7746 if !(int16(x) == y) {
7747 break
7748 }
7749 v.reset(OpAMD64FlagEQ)
7750 return true
7751 }
7752
7753
7754
7755 for {
7756 y := auxIntToInt16(v.AuxInt)
7757 if v_0.Op != OpAMD64MOVLconst {
7758 break
7759 }
7760 x := auxIntToInt32(v_0.AuxInt)
7761 if !(int16(x) < y && uint16(x) < uint16(y)) {
7762 break
7763 }
7764 v.reset(OpAMD64FlagLT_ULT)
7765 return true
7766 }
7767
7768
7769
7770 for {
7771 y := auxIntToInt16(v.AuxInt)
7772 if v_0.Op != OpAMD64MOVLconst {
7773 break
7774 }
7775 x := auxIntToInt32(v_0.AuxInt)
7776 if !(int16(x) < y && uint16(x) > uint16(y)) {
7777 break
7778 }
7779 v.reset(OpAMD64FlagLT_UGT)
7780 return true
7781 }
7782
7783
7784
7785 for {
7786 y := auxIntToInt16(v.AuxInt)
7787 if v_0.Op != OpAMD64MOVLconst {
7788 break
7789 }
7790 x := auxIntToInt32(v_0.AuxInt)
7791 if !(int16(x) > y && uint16(x) < uint16(y)) {
7792 break
7793 }
7794 v.reset(OpAMD64FlagGT_ULT)
7795 return true
7796 }
7797
7798
7799
7800 for {
7801 y := auxIntToInt16(v.AuxInt)
7802 if v_0.Op != OpAMD64MOVLconst {
7803 break
7804 }
7805 x := auxIntToInt32(v_0.AuxInt)
7806 if !(int16(x) > y && uint16(x) > uint16(y)) {
7807 break
7808 }
7809 v.reset(OpAMD64FlagGT_UGT)
7810 return true
7811 }
7812
7813
7814
7815 for {
7816 n := auxIntToInt16(v.AuxInt)
7817 if v_0.Op != OpAMD64ANDLconst {
7818 break
7819 }
7820 m := auxIntToInt32(v_0.AuxInt)
7821 if !(0 <= int16(m) && int16(m) < n) {
7822 break
7823 }
7824 v.reset(OpAMD64FlagLT_ULT)
7825 return true
7826 }
7827
7828
7829
7830 for {
7831 if auxIntToInt16(v.AuxInt) != 0 {
7832 break
7833 }
7834 a := v_0
7835 if a.Op != OpAMD64ANDL {
7836 break
7837 }
7838 y := a.Args[1]
7839 x := a.Args[0]
7840 if !(a.Uses == 1) {
7841 break
7842 }
7843 v.reset(OpAMD64TESTW)
7844 v.AddArg2(x, y)
7845 return true
7846 }
7847
7848
7849
7850 for {
7851 if auxIntToInt16(v.AuxInt) != 0 {
7852 break
7853 }
7854 a := v_0
7855 if a.Op != OpAMD64ANDLconst {
7856 break
7857 }
7858 c := auxIntToInt32(a.AuxInt)
7859 x := a.Args[0]
7860 if !(a.Uses == 1) {
7861 break
7862 }
7863 v.reset(OpAMD64TESTWconst)
7864 v.AuxInt = int16ToAuxInt(int16(c))
7865 v.AddArg(x)
7866 return true
7867 }
7868
7869
7870 for {
7871 if auxIntToInt16(v.AuxInt) != 0 {
7872 break
7873 }
7874 x := v_0
7875 v.reset(OpAMD64TESTW)
7876 v.AddArg2(x, x)
7877 return true
7878 }
7879
7880
7881
7882 for {
7883 c := auxIntToInt16(v.AuxInt)
7884 l := v_0
7885 if l.Op != OpAMD64MOVWload {
7886 break
7887 }
7888 off := auxIntToInt32(l.AuxInt)
7889 sym := auxToSym(l.Aux)
7890 mem := l.Args[1]
7891 ptr := l.Args[0]
7892 if !(l.Uses == 1 && clobber(l)) {
7893 break
7894 }
7895 b = l.Block
7896 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7897 v.copyOf(v0)
7898 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7899 v0.Aux = symToAux(sym)
7900 v0.AddArg2(ptr, mem)
7901 return true
7902 }
7903 return false
7904 }
7905 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7906 v_1 := v.Args[1]
7907 v_0 := v.Args[0]
7908
7909
7910
7911 for {
7912 valoff1 := auxIntToValAndOff(v.AuxInt)
7913 sym := auxToSym(v.Aux)
7914 if v_0.Op != OpAMD64ADDQconst {
7915 break
7916 }
7917 off2 := auxIntToInt32(v_0.AuxInt)
7918 base := v_0.Args[0]
7919 mem := v_1
7920 if !(ValAndOff(valoff1).canAdd32(off2)) {
7921 break
7922 }
7923 v.reset(OpAMD64CMPWconstload)
7924 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7925 v.Aux = symToAux(sym)
7926 v.AddArg2(base, mem)
7927 return true
7928 }
7929
7930
7931
7932 for {
7933 valoff1 := auxIntToValAndOff(v.AuxInt)
7934 sym1 := auxToSym(v.Aux)
7935 if v_0.Op != OpAMD64LEAQ {
7936 break
7937 }
7938 off2 := auxIntToInt32(v_0.AuxInt)
7939 sym2 := auxToSym(v_0.Aux)
7940 base := v_0.Args[0]
7941 mem := v_1
7942 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7943 break
7944 }
7945 v.reset(OpAMD64CMPWconstload)
7946 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7947 v.Aux = symToAux(mergeSym(sym1, sym2))
7948 v.AddArg2(base, mem)
7949 return true
7950 }
7951 return false
7952 }
7953 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
7954 v_2 := v.Args[2]
7955 v_1 := v.Args[1]
7956 v_0 := v.Args[0]
7957
7958
7959
7960 for {
7961 off1 := auxIntToInt32(v.AuxInt)
7962 sym := auxToSym(v.Aux)
7963 if v_0.Op != OpAMD64ADDQconst {
7964 break
7965 }
7966 off2 := auxIntToInt32(v_0.AuxInt)
7967 base := v_0.Args[0]
7968 val := v_1
7969 mem := v_2
7970 if !(is32Bit(int64(off1) + int64(off2))) {
7971 break
7972 }
7973 v.reset(OpAMD64CMPWload)
7974 v.AuxInt = int32ToAuxInt(off1 + off2)
7975 v.Aux = symToAux(sym)
7976 v.AddArg3(base, val, mem)
7977 return true
7978 }
7979
7980
7981
7982 for {
7983 off1 := auxIntToInt32(v.AuxInt)
7984 sym1 := auxToSym(v.Aux)
7985 if v_0.Op != OpAMD64LEAQ {
7986 break
7987 }
7988 off2 := auxIntToInt32(v_0.AuxInt)
7989 sym2 := auxToSym(v_0.Aux)
7990 base := v_0.Args[0]
7991 val := v_1
7992 mem := v_2
7993 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7994 break
7995 }
7996 v.reset(OpAMD64CMPWload)
7997 v.AuxInt = int32ToAuxInt(off1 + off2)
7998 v.Aux = symToAux(mergeSym(sym1, sym2))
7999 v.AddArg3(base, val, mem)
8000 return true
8001 }
8002
8003
8004 for {
8005 off := auxIntToInt32(v.AuxInt)
8006 sym := auxToSym(v.Aux)
8007 ptr := v_0
8008 if v_1.Op != OpAMD64MOVLconst {
8009 break
8010 }
8011 c := auxIntToInt32(v_1.AuxInt)
8012 mem := v_2
8013 v.reset(OpAMD64CMPWconstload)
8014 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8015 v.Aux = symToAux(sym)
8016 v.AddArg2(ptr, mem)
8017 return true
8018 }
8019 return false
8020 }
8021 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8022 v_3 := v.Args[3]
8023 v_2 := v.Args[2]
8024 v_1 := v.Args[1]
8025 v_0 := v.Args[0]
8026
8027
8028
8029 for {
8030 off1 := auxIntToInt32(v.AuxInt)
8031 sym := auxToSym(v.Aux)
8032 if v_0.Op != OpAMD64ADDQconst {
8033 break
8034 }
8035 off2 := auxIntToInt32(v_0.AuxInt)
8036 ptr := v_0.Args[0]
8037 old := v_1
8038 new_ := v_2
8039 mem := v_3
8040 if !(is32Bit(int64(off1) + int64(off2))) {
8041 break
8042 }
8043 v.reset(OpAMD64CMPXCHGLlock)
8044 v.AuxInt = int32ToAuxInt(off1 + off2)
8045 v.Aux = symToAux(sym)
8046 v.AddArg4(ptr, old, new_, mem)
8047 return true
8048 }
8049 return false
8050 }
8051 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8052 v_3 := v.Args[3]
8053 v_2 := v.Args[2]
8054 v_1 := v.Args[1]
8055 v_0 := v.Args[0]
8056
8057
8058
8059 for {
8060 off1 := auxIntToInt32(v.AuxInt)
8061 sym := auxToSym(v.Aux)
8062 if v_0.Op != OpAMD64ADDQconst {
8063 break
8064 }
8065 off2 := auxIntToInt32(v_0.AuxInt)
8066 ptr := v_0.Args[0]
8067 old := v_1
8068 new_ := v_2
8069 mem := v_3
8070 if !(is32Bit(int64(off1) + int64(off2))) {
8071 break
8072 }
8073 v.reset(OpAMD64CMPXCHGQlock)
8074 v.AuxInt = int32ToAuxInt(off1 + off2)
8075 v.Aux = symToAux(sym)
8076 v.AddArg4(ptr, old, new_, mem)
8077 return true
8078 }
8079 return false
8080 }
8081 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8082 v_1 := v.Args[1]
8083 v_0 := v.Args[0]
8084
8085
8086
8087 for {
8088 x := v_0
8089 l := v_1
8090 if l.Op != OpAMD64MOVSDload {
8091 break
8092 }
8093 off := auxIntToInt32(l.AuxInt)
8094 sym := auxToSym(l.Aux)
8095 mem := l.Args[1]
8096 ptr := l.Args[0]
8097 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8098 break
8099 }
8100 v.reset(OpAMD64DIVSDload)
8101 v.AuxInt = int32ToAuxInt(off)
8102 v.Aux = symToAux(sym)
8103 v.AddArg3(x, ptr, mem)
8104 return true
8105 }
8106 return false
8107 }
8108 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8109 v_2 := v.Args[2]
8110 v_1 := v.Args[1]
8111 v_0 := v.Args[0]
8112
8113
8114
8115 for {
8116 off1 := auxIntToInt32(v.AuxInt)
8117 sym := auxToSym(v.Aux)
8118 val := v_0
8119 if v_1.Op != OpAMD64ADDQconst {
8120 break
8121 }
8122 off2 := auxIntToInt32(v_1.AuxInt)
8123 base := v_1.Args[0]
8124 mem := v_2
8125 if !(is32Bit(int64(off1) + int64(off2))) {
8126 break
8127 }
8128 v.reset(OpAMD64DIVSDload)
8129 v.AuxInt = int32ToAuxInt(off1 + off2)
8130 v.Aux = symToAux(sym)
8131 v.AddArg3(val, base, mem)
8132 return true
8133 }
8134
8135
8136
8137 for {
8138 off1 := auxIntToInt32(v.AuxInt)
8139 sym1 := auxToSym(v.Aux)
8140 val := v_0
8141 if v_1.Op != OpAMD64LEAQ {
8142 break
8143 }
8144 off2 := auxIntToInt32(v_1.AuxInt)
8145 sym2 := auxToSym(v_1.Aux)
8146 base := v_1.Args[0]
8147 mem := v_2
8148 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8149 break
8150 }
8151 v.reset(OpAMD64DIVSDload)
8152 v.AuxInt = int32ToAuxInt(off1 + off2)
8153 v.Aux = symToAux(mergeSym(sym1, sym2))
8154 v.AddArg3(val, base, mem)
8155 return true
8156 }
8157 return false
8158 }
8159 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8160 v_1 := v.Args[1]
8161 v_0 := v.Args[0]
8162
8163
8164
8165 for {
8166 x := v_0
8167 l := v_1
8168 if l.Op != OpAMD64MOVSSload {
8169 break
8170 }
8171 off := auxIntToInt32(l.AuxInt)
8172 sym := auxToSym(l.Aux)
8173 mem := l.Args[1]
8174 ptr := l.Args[0]
8175 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8176 break
8177 }
8178 v.reset(OpAMD64DIVSSload)
8179 v.AuxInt = int32ToAuxInt(off)
8180 v.Aux = symToAux(sym)
8181 v.AddArg3(x, ptr, mem)
8182 return true
8183 }
8184 return false
8185 }
8186 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8187 v_2 := v.Args[2]
8188 v_1 := v.Args[1]
8189 v_0 := v.Args[0]
8190
8191
8192
8193 for {
8194 off1 := auxIntToInt32(v.AuxInt)
8195 sym := auxToSym(v.Aux)
8196 val := v_0
8197 if v_1.Op != OpAMD64ADDQconst {
8198 break
8199 }
8200 off2 := auxIntToInt32(v_1.AuxInt)
8201 base := v_1.Args[0]
8202 mem := v_2
8203 if !(is32Bit(int64(off1) + int64(off2))) {
8204 break
8205 }
8206 v.reset(OpAMD64DIVSSload)
8207 v.AuxInt = int32ToAuxInt(off1 + off2)
8208 v.Aux = symToAux(sym)
8209 v.AddArg3(val, base, mem)
8210 return true
8211 }
8212
8213
8214
8215 for {
8216 off1 := auxIntToInt32(v.AuxInt)
8217 sym1 := auxToSym(v.Aux)
8218 val := v_0
8219 if v_1.Op != OpAMD64LEAQ {
8220 break
8221 }
8222 off2 := auxIntToInt32(v_1.AuxInt)
8223 sym2 := auxToSym(v_1.Aux)
8224 base := v_1.Args[0]
8225 mem := v_2
8226 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8227 break
8228 }
8229 v.reset(OpAMD64DIVSSload)
8230 v.AuxInt = int32ToAuxInt(off1 + off2)
8231 v.Aux = symToAux(mergeSym(sym1, sym2))
8232 v.AddArg3(val, base, mem)
8233 return true
8234 }
8235 return false
8236 }
8237 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8238 v_1 := v.Args[1]
8239 v_0 := v.Args[0]
8240
8241
8242
8243 for {
8244 x := v_0
8245 y := v_1
8246 if !(!x.rematerializeable() && y.rematerializeable()) {
8247 break
8248 }
8249 v.reset(OpAMD64HMULL)
8250 v.AddArg2(y, x)
8251 return true
8252 }
8253 return false
8254 }
8255 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8256 v_1 := v.Args[1]
8257 v_0 := v.Args[0]
8258
8259
8260
8261 for {
8262 x := v_0
8263 y := v_1
8264 if !(!x.rematerializeable() && y.rematerializeable()) {
8265 break
8266 }
8267 v.reset(OpAMD64HMULLU)
8268 v.AddArg2(y, x)
8269 return true
8270 }
8271 return false
8272 }
8273 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8274 v_1 := v.Args[1]
8275 v_0 := v.Args[0]
8276
8277
8278
8279 for {
8280 x := v_0
8281 y := v_1
8282 if !(!x.rematerializeable() && y.rematerializeable()) {
8283 break
8284 }
8285 v.reset(OpAMD64HMULQ)
8286 v.AddArg2(y, x)
8287 return true
8288 }
8289 return false
8290 }
8291 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8292 v_1 := v.Args[1]
8293 v_0 := v.Args[0]
8294
8295
8296
8297 for {
8298 x := v_0
8299 y := v_1
8300 if !(!x.rematerializeable() && y.rematerializeable()) {
8301 break
8302 }
8303 v.reset(OpAMD64HMULQU)
8304 v.AddArg2(y, x)
8305 return true
8306 }
8307 return false
8308 }
8309 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8310 v_0 := v.Args[0]
8311
8312
8313
8314 for {
8315 c := auxIntToInt32(v.AuxInt)
8316 s := auxToSym(v.Aux)
8317 if v_0.Op != OpAMD64ADDLconst {
8318 break
8319 }
8320 d := auxIntToInt32(v_0.AuxInt)
8321 x := v_0.Args[0]
8322 if !(is32Bit(int64(c) + int64(d))) {
8323 break
8324 }
8325 v.reset(OpAMD64LEAL)
8326 v.AuxInt = int32ToAuxInt(c + d)
8327 v.Aux = symToAux(s)
8328 v.AddArg(x)
8329 return true
8330 }
8331
8332
8333
8334 for {
8335 c := auxIntToInt32(v.AuxInt)
8336 s := auxToSym(v.Aux)
8337 if v_0.Op != OpAMD64ADDL {
8338 break
8339 }
8340 _ = v_0.Args[1]
8341 v_0_0 := v_0.Args[0]
8342 v_0_1 := v_0.Args[1]
8343 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8344 x := v_0_0
8345 y := v_0_1
8346 if !(x.Op != OpSB && y.Op != OpSB) {
8347 continue
8348 }
8349 v.reset(OpAMD64LEAL1)
8350 v.AuxInt = int32ToAuxInt(c)
8351 v.Aux = symToAux(s)
8352 v.AddArg2(x, y)
8353 return true
8354 }
8355 break
8356 }
8357 return false
8358 }
8359 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8360 v_1 := v.Args[1]
8361 v_0 := v.Args[0]
8362
8363
8364
8365 for {
8366 c := auxIntToInt32(v.AuxInt)
8367 s := auxToSym(v.Aux)
8368 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8369 if v_0.Op != OpAMD64ADDLconst {
8370 continue
8371 }
8372 d := auxIntToInt32(v_0.AuxInt)
8373 x := v_0.Args[0]
8374 y := v_1
8375 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8376 continue
8377 }
8378 v.reset(OpAMD64LEAL1)
8379 v.AuxInt = int32ToAuxInt(c + d)
8380 v.Aux = symToAux(s)
8381 v.AddArg2(x, y)
8382 return true
8383 }
8384 break
8385 }
8386
8387
8388 for {
8389 c := auxIntToInt32(v.AuxInt)
8390 s := auxToSym(v.Aux)
8391 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8392 x := v_0
8393 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8394 continue
8395 }
8396 y := v_1.Args[0]
8397 v.reset(OpAMD64LEAL2)
8398 v.AuxInt = int32ToAuxInt(c)
8399 v.Aux = symToAux(s)
8400 v.AddArg2(x, y)
8401 return true
8402 }
8403 break
8404 }
8405
8406
8407 for {
8408 c := auxIntToInt32(v.AuxInt)
8409 s := auxToSym(v.Aux)
8410 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8411 x := v_0
8412 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8413 continue
8414 }
8415 y := v_1.Args[0]
8416 v.reset(OpAMD64LEAL4)
8417 v.AuxInt = int32ToAuxInt(c)
8418 v.Aux = symToAux(s)
8419 v.AddArg2(x, y)
8420 return true
8421 }
8422 break
8423 }
8424
8425
8426 for {
8427 c := auxIntToInt32(v.AuxInt)
8428 s := auxToSym(v.Aux)
8429 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8430 x := v_0
8431 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8432 continue
8433 }
8434 y := v_1.Args[0]
8435 v.reset(OpAMD64LEAL8)
8436 v.AuxInt = int32ToAuxInt(c)
8437 v.Aux = symToAux(s)
8438 v.AddArg2(x, y)
8439 return true
8440 }
8441 break
8442 }
8443 return false
8444 }
8445 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8446 v_1 := v.Args[1]
8447 v_0 := v.Args[0]
8448
8449
8450
8451 for {
8452 c := auxIntToInt32(v.AuxInt)
8453 s := auxToSym(v.Aux)
8454 if v_0.Op != OpAMD64ADDLconst {
8455 break
8456 }
8457 d := auxIntToInt32(v_0.AuxInt)
8458 x := v_0.Args[0]
8459 y := v_1
8460 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8461 break
8462 }
8463 v.reset(OpAMD64LEAL2)
8464 v.AuxInt = int32ToAuxInt(c + d)
8465 v.Aux = symToAux(s)
8466 v.AddArg2(x, y)
8467 return true
8468 }
8469
8470
8471
8472 for {
8473 c := auxIntToInt32(v.AuxInt)
8474 s := auxToSym(v.Aux)
8475 x := v_0
8476 if v_1.Op != OpAMD64ADDLconst {
8477 break
8478 }
8479 d := auxIntToInt32(v_1.AuxInt)
8480 y := v_1.Args[0]
8481 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8482 break
8483 }
8484 v.reset(OpAMD64LEAL2)
8485 v.AuxInt = int32ToAuxInt(c + 2*d)
8486 v.Aux = symToAux(s)
8487 v.AddArg2(x, y)
8488 return true
8489 }
8490
8491
8492 for {
8493 c := auxIntToInt32(v.AuxInt)
8494 s := auxToSym(v.Aux)
8495 x := v_0
8496 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8497 break
8498 }
8499 y := v_1.Args[0]
8500 v.reset(OpAMD64LEAL4)
8501 v.AuxInt = int32ToAuxInt(c)
8502 v.Aux = symToAux(s)
8503 v.AddArg2(x, y)
8504 return true
8505 }
8506
8507
8508 for {
8509 c := auxIntToInt32(v.AuxInt)
8510 s := auxToSym(v.Aux)
8511 x := v_0
8512 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8513 break
8514 }
8515 y := v_1.Args[0]
8516 v.reset(OpAMD64LEAL8)
8517 v.AuxInt = int32ToAuxInt(c)
8518 v.Aux = symToAux(s)
8519 v.AddArg2(x, y)
8520 return true
8521 }
8522 return false
8523 }
8524 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8525 v_1 := v.Args[1]
8526 v_0 := v.Args[0]
8527
8528
8529
8530 for {
8531 c := auxIntToInt32(v.AuxInt)
8532 s := auxToSym(v.Aux)
8533 if v_0.Op != OpAMD64ADDLconst {
8534 break
8535 }
8536 d := auxIntToInt32(v_0.AuxInt)
8537 x := v_0.Args[0]
8538 y := v_1
8539 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8540 break
8541 }
8542 v.reset(OpAMD64LEAL4)
8543 v.AuxInt = int32ToAuxInt(c + d)
8544 v.Aux = symToAux(s)
8545 v.AddArg2(x, y)
8546 return true
8547 }
8548
8549
8550
8551 for {
8552 c := auxIntToInt32(v.AuxInt)
8553 s := auxToSym(v.Aux)
8554 x := v_0
8555 if v_1.Op != OpAMD64ADDLconst {
8556 break
8557 }
8558 d := auxIntToInt32(v_1.AuxInt)
8559 y := v_1.Args[0]
8560 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8561 break
8562 }
8563 v.reset(OpAMD64LEAL4)
8564 v.AuxInt = int32ToAuxInt(c + 4*d)
8565 v.Aux = symToAux(s)
8566 v.AddArg2(x, y)
8567 return true
8568 }
8569
8570
8571 for {
8572 c := auxIntToInt32(v.AuxInt)
8573 s := auxToSym(v.Aux)
8574 x := v_0
8575 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8576 break
8577 }
8578 y := v_1.Args[0]
8579 v.reset(OpAMD64LEAL8)
8580 v.AuxInt = int32ToAuxInt(c)
8581 v.Aux = symToAux(s)
8582 v.AddArg2(x, y)
8583 return true
8584 }
8585 return false
8586 }
8587 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8588 v_1 := v.Args[1]
8589 v_0 := v.Args[0]
8590
8591
8592
8593 for {
8594 c := auxIntToInt32(v.AuxInt)
8595 s := auxToSym(v.Aux)
8596 if v_0.Op != OpAMD64ADDLconst {
8597 break
8598 }
8599 d := auxIntToInt32(v_0.AuxInt)
8600 x := v_0.Args[0]
8601 y := v_1
8602 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8603 break
8604 }
8605 v.reset(OpAMD64LEAL8)
8606 v.AuxInt = int32ToAuxInt(c + d)
8607 v.Aux = symToAux(s)
8608 v.AddArg2(x, y)
8609 return true
8610 }
8611
8612
8613
8614 for {
8615 c := auxIntToInt32(v.AuxInt)
8616 s := auxToSym(v.Aux)
8617 x := v_0
8618 if v_1.Op != OpAMD64ADDLconst {
8619 break
8620 }
8621 d := auxIntToInt32(v_1.AuxInt)
8622 y := v_1.Args[0]
8623 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8624 break
8625 }
8626 v.reset(OpAMD64LEAL8)
8627 v.AuxInt = int32ToAuxInt(c + 8*d)
8628 v.Aux = symToAux(s)
8629 v.AddArg2(x, y)
8630 return true
8631 }
8632 return false
8633 }
8634 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8635 v_0 := v.Args[0]
8636
8637
8638
8639 for {
8640 c := auxIntToInt32(v.AuxInt)
8641 s := auxToSym(v.Aux)
8642 if v_0.Op != OpAMD64ADDQconst {
8643 break
8644 }
8645 d := auxIntToInt32(v_0.AuxInt)
8646 x := v_0.Args[0]
8647 if !(is32Bit(int64(c) + int64(d))) {
8648 break
8649 }
8650 v.reset(OpAMD64LEAQ)
8651 v.AuxInt = int32ToAuxInt(c + d)
8652 v.Aux = symToAux(s)
8653 v.AddArg(x)
8654 return true
8655 }
8656
8657
8658
8659 for {
8660 c := auxIntToInt32(v.AuxInt)
8661 s := auxToSym(v.Aux)
8662 if v_0.Op != OpAMD64ADDQ {
8663 break
8664 }
8665 _ = v_0.Args[1]
8666 v_0_0 := v_0.Args[0]
8667 v_0_1 := v_0.Args[1]
8668 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8669 x := v_0_0
8670 y := v_0_1
8671 if !(x.Op != OpSB && y.Op != OpSB) {
8672 continue
8673 }
8674 v.reset(OpAMD64LEAQ1)
8675 v.AuxInt = int32ToAuxInt(c)
8676 v.Aux = symToAux(s)
8677 v.AddArg2(x, y)
8678 return true
8679 }
8680 break
8681 }
8682
8683
8684
8685 for {
8686 off1 := auxIntToInt32(v.AuxInt)
8687 sym1 := auxToSym(v.Aux)
8688 if v_0.Op != OpAMD64LEAQ {
8689 break
8690 }
8691 off2 := auxIntToInt32(v_0.AuxInt)
8692 sym2 := auxToSym(v_0.Aux)
8693 x := v_0.Args[0]
8694 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8695 break
8696 }
8697 v.reset(OpAMD64LEAQ)
8698 v.AuxInt = int32ToAuxInt(off1 + off2)
8699 v.Aux = symToAux(mergeSym(sym1, sym2))
8700 v.AddArg(x)
8701 return true
8702 }
8703
8704
8705
8706 for {
8707 off1 := auxIntToInt32(v.AuxInt)
8708 sym1 := auxToSym(v.Aux)
8709 if v_0.Op != OpAMD64LEAQ1 {
8710 break
8711 }
8712 off2 := auxIntToInt32(v_0.AuxInt)
8713 sym2 := auxToSym(v_0.Aux)
8714 y := v_0.Args[1]
8715 x := v_0.Args[0]
8716 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8717 break
8718 }
8719 v.reset(OpAMD64LEAQ1)
8720 v.AuxInt = int32ToAuxInt(off1 + off2)
8721 v.Aux = symToAux(mergeSym(sym1, sym2))
8722 v.AddArg2(x, y)
8723 return true
8724 }
8725
8726
8727
8728 for {
8729 off1 := auxIntToInt32(v.AuxInt)
8730 sym1 := auxToSym(v.Aux)
8731 if v_0.Op != OpAMD64LEAQ2 {
8732 break
8733 }
8734 off2 := auxIntToInt32(v_0.AuxInt)
8735 sym2 := auxToSym(v_0.Aux)
8736 y := v_0.Args[1]
8737 x := v_0.Args[0]
8738 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8739 break
8740 }
8741 v.reset(OpAMD64LEAQ2)
8742 v.AuxInt = int32ToAuxInt(off1 + off2)
8743 v.Aux = symToAux(mergeSym(sym1, sym2))
8744 v.AddArg2(x, y)
8745 return true
8746 }
8747
8748
8749
8750 for {
8751 off1 := auxIntToInt32(v.AuxInt)
8752 sym1 := auxToSym(v.Aux)
8753 if v_0.Op != OpAMD64LEAQ4 {
8754 break
8755 }
8756 off2 := auxIntToInt32(v_0.AuxInt)
8757 sym2 := auxToSym(v_0.Aux)
8758 y := v_0.Args[1]
8759 x := v_0.Args[0]
8760 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8761 break
8762 }
8763 v.reset(OpAMD64LEAQ4)
8764 v.AuxInt = int32ToAuxInt(off1 + off2)
8765 v.Aux = symToAux(mergeSym(sym1, sym2))
8766 v.AddArg2(x, y)
8767 return true
8768 }
8769
8770
8771
8772 for {
8773 off1 := auxIntToInt32(v.AuxInt)
8774 sym1 := auxToSym(v.Aux)
8775 if v_0.Op != OpAMD64LEAQ8 {
8776 break
8777 }
8778 off2 := auxIntToInt32(v_0.AuxInt)
8779 sym2 := auxToSym(v_0.Aux)
8780 y := v_0.Args[1]
8781 x := v_0.Args[0]
8782 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8783 break
8784 }
8785 v.reset(OpAMD64LEAQ8)
8786 v.AuxInt = int32ToAuxInt(off1 + off2)
8787 v.Aux = symToAux(mergeSym(sym1, sym2))
8788 v.AddArg2(x, y)
8789 return true
8790 }
8791 return false
8792 }
8793 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8794 v_1 := v.Args[1]
8795 v_0 := v.Args[0]
8796
8797
8798
8799 for {
8800 c := auxIntToInt32(v.AuxInt)
8801 s := auxToSym(v.Aux)
8802 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8803 if v_0.Op != OpAMD64ADDQconst {
8804 continue
8805 }
8806 d := auxIntToInt32(v_0.AuxInt)
8807 x := v_0.Args[0]
8808 y := v_1
8809 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8810 continue
8811 }
8812 v.reset(OpAMD64LEAQ1)
8813 v.AuxInt = int32ToAuxInt(c + d)
8814 v.Aux = symToAux(s)
8815 v.AddArg2(x, y)
8816 return true
8817 }
8818 break
8819 }
8820
8821
8822 for {
8823 c := auxIntToInt32(v.AuxInt)
8824 s := auxToSym(v.Aux)
8825 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8826 x := v_0
8827 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8828 continue
8829 }
8830 y := v_1.Args[0]
8831 v.reset(OpAMD64LEAQ2)
8832 v.AuxInt = int32ToAuxInt(c)
8833 v.Aux = symToAux(s)
8834 v.AddArg2(x, y)
8835 return true
8836 }
8837 break
8838 }
8839
8840
8841 for {
8842 c := auxIntToInt32(v.AuxInt)
8843 s := auxToSym(v.Aux)
8844 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8845 x := v_0
8846 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8847 continue
8848 }
8849 y := v_1.Args[0]
8850 v.reset(OpAMD64LEAQ4)
8851 v.AuxInt = int32ToAuxInt(c)
8852 v.Aux = symToAux(s)
8853 v.AddArg2(x, y)
8854 return true
8855 }
8856 break
8857 }
8858
8859
8860 for {
8861 c := auxIntToInt32(v.AuxInt)
8862 s := auxToSym(v.Aux)
8863 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8864 x := v_0
8865 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8866 continue
8867 }
8868 y := v_1.Args[0]
8869 v.reset(OpAMD64LEAQ8)
8870 v.AuxInt = int32ToAuxInt(c)
8871 v.Aux = symToAux(s)
8872 v.AddArg2(x, y)
8873 return true
8874 }
8875 break
8876 }
8877
8878
8879
8880 for {
8881 off1 := auxIntToInt32(v.AuxInt)
8882 sym1 := auxToSym(v.Aux)
8883 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8884 if v_0.Op != OpAMD64LEAQ {
8885 continue
8886 }
8887 off2 := auxIntToInt32(v_0.AuxInt)
8888 sym2 := auxToSym(v_0.Aux)
8889 x := v_0.Args[0]
8890 y := v_1
8891 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8892 continue
8893 }
8894 v.reset(OpAMD64LEAQ1)
8895 v.AuxInt = int32ToAuxInt(off1 + off2)
8896 v.Aux = symToAux(mergeSym(sym1, sym2))
8897 v.AddArg2(x, y)
8898 return true
8899 }
8900 break
8901 }
8902
8903
8904
8905 for {
8906 off1 := auxIntToInt32(v.AuxInt)
8907 sym1 := auxToSym(v.Aux)
8908 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8909 x := v_0
8910 if v_1.Op != OpAMD64LEAQ1 {
8911 continue
8912 }
8913 off2 := auxIntToInt32(v_1.AuxInt)
8914 sym2 := auxToSym(v_1.Aux)
8915 y := v_1.Args[1]
8916 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8917 continue
8918 }
8919 v.reset(OpAMD64LEAQ2)
8920 v.AuxInt = int32ToAuxInt(off1 + off2)
8921 v.Aux = symToAux(mergeSym(sym1, sym2))
8922 v.AddArg2(x, y)
8923 return true
8924 }
8925 break
8926 }
8927
8928
8929
8930 for {
8931 off1 := auxIntToInt32(v.AuxInt)
8932 sym1 := auxToSym(v.Aux)
8933 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8934 x := v_0
8935 if v_1.Op != OpAMD64LEAQ1 {
8936 continue
8937 }
8938 off2 := auxIntToInt32(v_1.AuxInt)
8939 sym2 := auxToSym(v_1.Aux)
8940 _ = v_1.Args[1]
8941 v_1_0 := v_1.Args[0]
8942 v_1_1 := v_1.Args[1]
8943 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
8944 if x != v_1_0 {
8945 continue
8946 }
8947 y := v_1_1
8948 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8949 continue
8950 }
8951 v.reset(OpAMD64LEAQ2)
8952 v.AuxInt = int32ToAuxInt(off1 + off2)
8953 v.Aux = symToAux(mergeSym(sym1, sym2))
8954 v.AddArg2(y, x)
8955 return true
8956 }
8957 }
8958 break
8959 }
8960
8961
8962
8963 for {
8964 if auxIntToInt32(v.AuxInt) != 0 {
8965 break
8966 }
8967 x := v_0
8968 y := v_1
8969 if !(v.Aux == nil) {
8970 break
8971 }
8972 v.reset(OpAMD64ADDQ)
8973 v.AddArg2(x, y)
8974 return true
8975 }
8976 return false
8977 }
8978 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
8979 v_1 := v.Args[1]
8980 v_0 := v.Args[0]
8981
8982
8983
8984 for {
8985 c := auxIntToInt32(v.AuxInt)
8986 s := auxToSym(v.Aux)
8987 if v_0.Op != OpAMD64ADDQconst {
8988 break
8989 }
8990 d := auxIntToInt32(v_0.AuxInt)
8991 x := v_0.Args[0]
8992 y := v_1
8993 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8994 break
8995 }
8996 v.reset(OpAMD64LEAQ2)
8997 v.AuxInt = int32ToAuxInt(c + d)
8998 v.Aux = symToAux(s)
8999 v.AddArg2(x, y)
9000 return true
9001 }
9002
9003
9004
9005 for {
9006 c := auxIntToInt32(v.AuxInt)
9007 s := auxToSym(v.Aux)
9008 x := v_0
9009 if v_1.Op != OpAMD64ADDQconst {
9010 break
9011 }
9012 d := auxIntToInt32(v_1.AuxInt)
9013 y := v_1.Args[0]
9014 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9015 break
9016 }
9017 v.reset(OpAMD64LEAQ2)
9018 v.AuxInt = int32ToAuxInt(c + 2*d)
9019 v.Aux = symToAux(s)
9020 v.AddArg2(x, y)
9021 return true
9022 }
9023
9024
9025 for {
9026 c := auxIntToInt32(v.AuxInt)
9027 s := auxToSym(v.Aux)
9028 x := v_0
9029 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9030 break
9031 }
9032 y := v_1.Args[0]
9033 v.reset(OpAMD64LEAQ4)
9034 v.AuxInt = int32ToAuxInt(c)
9035 v.Aux = symToAux(s)
9036 v.AddArg2(x, y)
9037 return true
9038 }
9039
9040
9041 for {
9042 c := auxIntToInt32(v.AuxInt)
9043 s := auxToSym(v.Aux)
9044 x := v_0
9045 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9046 break
9047 }
9048 y := v_1.Args[0]
9049 v.reset(OpAMD64LEAQ8)
9050 v.AuxInt = int32ToAuxInt(c)
9051 v.Aux = symToAux(s)
9052 v.AddArg2(x, y)
9053 return true
9054 }
9055
9056
9057
9058 for {
9059 off1 := auxIntToInt32(v.AuxInt)
9060 sym1 := auxToSym(v.Aux)
9061 if v_0.Op != OpAMD64LEAQ {
9062 break
9063 }
9064 off2 := auxIntToInt32(v_0.AuxInt)
9065 sym2 := auxToSym(v_0.Aux)
9066 x := v_0.Args[0]
9067 y := v_1
9068 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9069 break
9070 }
9071 v.reset(OpAMD64LEAQ2)
9072 v.AuxInt = int32ToAuxInt(off1 + off2)
9073 v.Aux = symToAux(mergeSym(sym1, sym2))
9074 v.AddArg2(x, y)
9075 return true
9076 }
9077
9078
9079
9080 for {
9081 off1 := auxIntToInt32(v.AuxInt)
9082 sym1 := auxToSym(v.Aux)
9083 x := v_0
9084 if v_1.Op != OpAMD64LEAQ1 {
9085 break
9086 }
9087 off2 := auxIntToInt32(v_1.AuxInt)
9088 sym2 := auxToSym(v_1.Aux)
9089 y := v_1.Args[1]
9090 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9091 break
9092 }
9093 v.reset(OpAMD64LEAQ4)
9094 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9095 v.Aux = symToAux(sym1)
9096 v.AddArg2(x, y)
9097 return true
9098 }
9099
9100
9101
9102 for {
9103 off := auxIntToInt32(v.AuxInt)
9104 sym := auxToSym(v.Aux)
9105 x := v_0
9106 if v_1.Op != OpAMD64MOVQconst {
9107 break
9108 }
9109 scale := auxIntToInt64(v_1.AuxInt)
9110 if !(is32Bit(int64(off) + int64(scale)*2)) {
9111 break
9112 }
9113 v.reset(OpAMD64LEAQ)
9114 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9115 v.Aux = symToAux(sym)
9116 v.AddArg(x)
9117 return true
9118 }
9119
9120
9121
9122 for {
9123 off := auxIntToInt32(v.AuxInt)
9124 sym := auxToSym(v.Aux)
9125 x := v_0
9126 if v_1.Op != OpAMD64MOVLconst {
9127 break
9128 }
9129 scale := auxIntToInt32(v_1.AuxInt)
9130 if !(is32Bit(int64(off) + int64(scale)*2)) {
9131 break
9132 }
9133 v.reset(OpAMD64LEAQ)
9134 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9135 v.Aux = symToAux(sym)
9136 v.AddArg(x)
9137 return true
9138 }
9139 return false
9140 }
9141 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9142 v_1 := v.Args[1]
9143 v_0 := v.Args[0]
9144
9145
9146
9147 for {
9148 c := auxIntToInt32(v.AuxInt)
9149 s := auxToSym(v.Aux)
9150 if v_0.Op != OpAMD64ADDQconst {
9151 break
9152 }
9153 d := auxIntToInt32(v_0.AuxInt)
9154 x := v_0.Args[0]
9155 y := v_1
9156 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9157 break
9158 }
9159 v.reset(OpAMD64LEAQ4)
9160 v.AuxInt = int32ToAuxInt(c + d)
9161 v.Aux = symToAux(s)
9162 v.AddArg2(x, y)
9163 return true
9164 }
9165
9166
9167
9168 for {
9169 c := auxIntToInt32(v.AuxInt)
9170 s := auxToSym(v.Aux)
9171 x := v_0
9172 if v_1.Op != OpAMD64ADDQconst {
9173 break
9174 }
9175 d := auxIntToInt32(v_1.AuxInt)
9176 y := v_1.Args[0]
9177 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9178 break
9179 }
9180 v.reset(OpAMD64LEAQ4)
9181 v.AuxInt = int32ToAuxInt(c + 4*d)
9182 v.Aux = symToAux(s)
9183 v.AddArg2(x, y)
9184 return true
9185 }
9186
9187
9188 for {
9189 c := auxIntToInt32(v.AuxInt)
9190 s := auxToSym(v.Aux)
9191 x := v_0
9192 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9193 break
9194 }
9195 y := v_1.Args[0]
9196 v.reset(OpAMD64LEAQ8)
9197 v.AuxInt = int32ToAuxInt(c)
9198 v.Aux = symToAux(s)
9199 v.AddArg2(x, y)
9200 return true
9201 }
9202
9203
9204
9205 for {
9206 off1 := auxIntToInt32(v.AuxInt)
9207 sym1 := auxToSym(v.Aux)
9208 if v_0.Op != OpAMD64LEAQ {
9209 break
9210 }
9211 off2 := auxIntToInt32(v_0.AuxInt)
9212 sym2 := auxToSym(v_0.Aux)
9213 x := v_0.Args[0]
9214 y := v_1
9215 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9216 break
9217 }
9218 v.reset(OpAMD64LEAQ4)
9219 v.AuxInt = int32ToAuxInt(off1 + off2)
9220 v.Aux = symToAux(mergeSym(sym1, sym2))
9221 v.AddArg2(x, y)
9222 return true
9223 }
9224
9225
9226
9227 for {
9228 off1 := auxIntToInt32(v.AuxInt)
9229 sym1 := auxToSym(v.Aux)
9230 x := v_0
9231 if v_1.Op != OpAMD64LEAQ1 {
9232 break
9233 }
9234 off2 := auxIntToInt32(v_1.AuxInt)
9235 sym2 := auxToSym(v_1.Aux)
9236 y := v_1.Args[1]
9237 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9238 break
9239 }
9240 v.reset(OpAMD64LEAQ8)
9241 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9242 v.Aux = symToAux(sym1)
9243 v.AddArg2(x, y)
9244 return true
9245 }
9246
9247
9248
9249 for {
9250 off := auxIntToInt32(v.AuxInt)
9251 sym := auxToSym(v.Aux)
9252 x := v_0
9253 if v_1.Op != OpAMD64MOVQconst {
9254 break
9255 }
9256 scale := auxIntToInt64(v_1.AuxInt)
9257 if !(is32Bit(int64(off) + int64(scale)*4)) {
9258 break
9259 }
9260 v.reset(OpAMD64LEAQ)
9261 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9262 v.Aux = symToAux(sym)
9263 v.AddArg(x)
9264 return true
9265 }
9266
9267
9268
9269 for {
9270 off := auxIntToInt32(v.AuxInt)
9271 sym := auxToSym(v.Aux)
9272 x := v_0
9273 if v_1.Op != OpAMD64MOVLconst {
9274 break
9275 }
9276 scale := auxIntToInt32(v_1.AuxInt)
9277 if !(is32Bit(int64(off) + int64(scale)*4)) {
9278 break
9279 }
9280 v.reset(OpAMD64LEAQ)
9281 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9282 v.Aux = symToAux(sym)
9283 v.AddArg(x)
9284 return true
9285 }
9286 return false
9287 }
9288 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9289 v_1 := v.Args[1]
9290 v_0 := v.Args[0]
9291
9292
9293
9294 for {
9295 c := auxIntToInt32(v.AuxInt)
9296 s := auxToSym(v.Aux)
9297 if v_0.Op != OpAMD64ADDQconst {
9298 break
9299 }
9300 d := auxIntToInt32(v_0.AuxInt)
9301 x := v_0.Args[0]
9302 y := v_1
9303 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9304 break
9305 }
9306 v.reset(OpAMD64LEAQ8)
9307 v.AuxInt = int32ToAuxInt(c + d)
9308 v.Aux = symToAux(s)
9309 v.AddArg2(x, y)
9310 return true
9311 }
9312
9313
9314
9315 for {
9316 c := auxIntToInt32(v.AuxInt)
9317 s := auxToSym(v.Aux)
9318 x := v_0
9319 if v_1.Op != OpAMD64ADDQconst {
9320 break
9321 }
9322 d := auxIntToInt32(v_1.AuxInt)
9323 y := v_1.Args[0]
9324 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9325 break
9326 }
9327 v.reset(OpAMD64LEAQ8)
9328 v.AuxInt = int32ToAuxInt(c + 8*d)
9329 v.Aux = symToAux(s)
9330 v.AddArg2(x, y)
9331 return true
9332 }
9333
9334
9335
9336 for {
9337 off1 := auxIntToInt32(v.AuxInt)
9338 sym1 := auxToSym(v.Aux)
9339 if v_0.Op != OpAMD64LEAQ {
9340 break
9341 }
9342 off2 := auxIntToInt32(v_0.AuxInt)
9343 sym2 := auxToSym(v_0.Aux)
9344 x := v_0.Args[0]
9345 y := v_1
9346 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9347 break
9348 }
9349 v.reset(OpAMD64LEAQ8)
9350 v.AuxInt = int32ToAuxInt(off1 + off2)
9351 v.Aux = symToAux(mergeSym(sym1, sym2))
9352 v.AddArg2(x, y)
9353 return true
9354 }
9355
9356
9357
9358 for {
9359 off := auxIntToInt32(v.AuxInt)
9360 sym := auxToSym(v.Aux)
9361 x := v_0
9362 if v_1.Op != OpAMD64MOVQconst {
9363 break
9364 }
9365 scale := auxIntToInt64(v_1.AuxInt)
9366 if !(is32Bit(int64(off) + int64(scale)*8)) {
9367 break
9368 }
9369 v.reset(OpAMD64LEAQ)
9370 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9371 v.Aux = symToAux(sym)
9372 v.AddArg(x)
9373 return true
9374 }
9375
9376
9377
9378 for {
9379 off := auxIntToInt32(v.AuxInt)
9380 sym := auxToSym(v.Aux)
9381 x := v_0
9382 if v_1.Op != OpAMD64MOVLconst {
9383 break
9384 }
9385 scale := auxIntToInt32(v_1.AuxInt)
9386 if !(is32Bit(int64(off) + int64(scale)*8)) {
9387 break
9388 }
9389 v.reset(OpAMD64LEAQ)
9390 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9391 v.Aux = symToAux(sym)
9392 v.AddArg(x)
9393 return true
9394 }
9395 return false
9396 }
9397 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9398 v_2 := v.Args[2]
9399 v_1 := v.Args[1]
9400 v_0 := v.Args[0]
9401
9402
9403 for {
9404 i := auxIntToInt32(v.AuxInt)
9405 s := auxToSym(v.Aux)
9406 p := v_0
9407 if v_1.Op != OpAMD64BSWAPL {
9408 break
9409 }
9410 x := v_1.Args[0]
9411 m := v_2
9412 v.reset(OpAMD64MOVLstore)
9413 v.AuxInt = int32ToAuxInt(i)
9414 v.Aux = symToAux(s)
9415 v.AddArg3(p, x, m)
9416 return true
9417 }
9418 return false
9419 }
9420 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9421 v_2 := v.Args[2]
9422 v_1 := v.Args[1]
9423 v_0 := v.Args[0]
9424
9425
9426 for {
9427 i := auxIntToInt32(v.AuxInt)
9428 s := auxToSym(v.Aux)
9429 p := v_0
9430 if v_1.Op != OpAMD64BSWAPQ {
9431 break
9432 }
9433 x := v_1.Args[0]
9434 m := v_2
9435 v.reset(OpAMD64MOVQstore)
9436 v.AuxInt = int32ToAuxInt(i)
9437 v.Aux = symToAux(s)
9438 v.AddArg3(p, x, m)
9439 return true
9440 }
9441 return false
9442 }
9443 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9444 v_2 := v.Args[2]
9445 v_1 := v.Args[1]
9446 v_0 := v.Args[0]
9447
9448
9449
9450 for {
9451 i := auxIntToInt32(v.AuxInt)
9452 s := auxToSym(v.Aux)
9453 p := v_0
9454 x := v_1
9455 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9456 break
9457 }
9458 w := x.Args[0]
9459 mem := v_2
9460 if !(x.Uses == 1) {
9461 break
9462 }
9463 v.reset(OpAMD64MOVWstore)
9464 v.AuxInt = int32ToAuxInt(i)
9465 v.Aux = symToAux(s)
9466 v.AddArg3(p, w, mem)
9467 return true
9468 }
9469 return false
9470 }
9471 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9472 v_0 := v.Args[0]
9473 b := v.Block
9474
9475
9476
9477 for {
9478 x := v_0
9479 if x.Op != OpAMD64MOVBload {
9480 break
9481 }
9482 off := auxIntToInt32(x.AuxInt)
9483 sym := auxToSym(x.Aux)
9484 mem := x.Args[1]
9485 ptr := x.Args[0]
9486 if !(x.Uses == 1 && clobber(x)) {
9487 break
9488 }
9489 b = x.Block
9490 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9491 v.copyOf(v0)
9492 v0.AuxInt = int32ToAuxInt(off)
9493 v0.Aux = symToAux(sym)
9494 v0.AddArg2(ptr, mem)
9495 return true
9496 }
9497
9498
9499
9500 for {
9501 x := v_0
9502 if x.Op != OpAMD64MOVWload {
9503 break
9504 }
9505 off := auxIntToInt32(x.AuxInt)
9506 sym := auxToSym(x.Aux)
9507 mem := x.Args[1]
9508 ptr := x.Args[0]
9509 if !(x.Uses == 1 && clobber(x)) {
9510 break
9511 }
9512 b = x.Block
9513 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9514 v.copyOf(v0)
9515 v0.AuxInt = int32ToAuxInt(off)
9516 v0.Aux = symToAux(sym)
9517 v0.AddArg2(ptr, mem)
9518 return true
9519 }
9520
9521
9522
9523 for {
9524 x := v_0
9525 if x.Op != OpAMD64MOVLload {
9526 break
9527 }
9528 off := auxIntToInt32(x.AuxInt)
9529 sym := auxToSym(x.Aux)
9530 mem := x.Args[1]
9531 ptr := x.Args[0]
9532 if !(x.Uses == 1 && clobber(x)) {
9533 break
9534 }
9535 b = x.Block
9536 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9537 v.copyOf(v0)
9538 v0.AuxInt = int32ToAuxInt(off)
9539 v0.Aux = symToAux(sym)
9540 v0.AddArg2(ptr, mem)
9541 return true
9542 }
9543
9544
9545
9546 for {
9547 x := v_0
9548 if x.Op != OpAMD64MOVQload {
9549 break
9550 }
9551 off := auxIntToInt32(x.AuxInt)
9552 sym := auxToSym(x.Aux)
9553 mem := x.Args[1]
9554 ptr := x.Args[0]
9555 if !(x.Uses == 1 && clobber(x)) {
9556 break
9557 }
9558 b = x.Block
9559 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9560 v.copyOf(v0)
9561 v0.AuxInt = int32ToAuxInt(off)
9562 v0.Aux = symToAux(sym)
9563 v0.AddArg2(ptr, mem)
9564 return true
9565 }
9566
9567
9568
9569 for {
9570 if v_0.Op != OpAMD64ANDLconst {
9571 break
9572 }
9573 c := auxIntToInt32(v_0.AuxInt)
9574 x := v_0.Args[0]
9575 if !(c&0x80 == 0) {
9576 break
9577 }
9578 v.reset(OpAMD64ANDLconst)
9579 v.AuxInt = int32ToAuxInt(c & 0x7f)
9580 v.AddArg(x)
9581 return true
9582 }
9583
9584
9585 for {
9586 if v_0.Op != OpAMD64MOVBQSX {
9587 break
9588 }
9589 x := v_0.Args[0]
9590 v.reset(OpAMD64MOVBQSX)
9591 v.AddArg(x)
9592 return true
9593 }
9594 return false
9595 }
9596 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9597 v_1 := v.Args[1]
9598 v_0 := v.Args[0]
9599
9600
9601
9602 for {
9603 off := auxIntToInt32(v.AuxInt)
9604 sym := auxToSym(v.Aux)
9605 ptr := v_0
9606 if v_1.Op != OpAMD64MOVBstore {
9607 break
9608 }
9609 off2 := auxIntToInt32(v_1.AuxInt)
9610 sym2 := auxToSym(v_1.Aux)
9611 x := v_1.Args[1]
9612 ptr2 := v_1.Args[0]
9613 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9614 break
9615 }
9616 v.reset(OpAMD64MOVBQSX)
9617 v.AddArg(x)
9618 return true
9619 }
9620
9621
9622
9623 for {
9624 off1 := auxIntToInt32(v.AuxInt)
9625 sym1 := auxToSym(v.Aux)
9626 if v_0.Op != OpAMD64LEAQ {
9627 break
9628 }
9629 off2 := auxIntToInt32(v_0.AuxInt)
9630 sym2 := auxToSym(v_0.Aux)
9631 base := v_0.Args[0]
9632 mem := v_1
9633 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9634 break
9635 }
9636 v.reset(OpAMD64MOVBQSXload)
9637 v.AuxInt = int32ToAuxInt(off1 + off2)
9638 v.Aux = symToAux(mergeSym(sym1, sym2))
9639 v.AddArg2(base, mem)
9640 return true
9641 }
9642 return false
9643 }
9644 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9645 v_0 := v.Args[0]
9646 b := v.Block
9647
9648
9649
9650 for {
9651 x := v_0
9652 if x.Op != OpAMD64MOVBload {
9653 break
9654 }
9655 off := auxIntToInt32(x.AuxInt)
9656 sym := auxToSym(x.Aux)
9657 mem := x.Args[1]
9658 ptr := x.Args[0]
9659 if !(x.Uses == 1 && clobber(x)) {
9660 break
9661 }
9662 b = x.Block
9663 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9664 v.copyOf(v0)
9665 v0.AuxInt = int32ToAuxInt(off)
9666 v0.Aux = symToAux(sym)
9667 v0.AddArg2(ptr, mem)
9668 return true
9669 }
9670
9671
9672
9673 for {
9674 x := v_0
9675 if x.Op != OpAMD64MOVWload {
9676 break
9677 }
9678 off := auxIntToInt32(x.AuxInt)
9679 sym := auxToSym(x.Aux)
9680 mem := x.Args[1]
9681 ptr := x.Args[0]
9682 if !(x.Uses == 1 && clobber(x)) {
9683 break
9684 }
9685 b = x.Block
9686 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9687 v.copyOf(v0)
9688 v0.AuxInt = int32ToAuxInt(off)
9689 v0.Aux = symToAux(sym)
9690 v0.AddArg2(ptr, mem)
9691 return true
9692 }
9693
9694
9695
9696 for {
9697 x := v_0
9698 if x.Op != OpAMD64MOVLload {
9699 break
9700 }
9701 off := auxIntToInt32(x.AuxInt)
9702 sym := auxToSym(x.Aux)
9703 mem := x.Args[1]
9704 ptr := x.Args[0]
9705 if !(x.Uses == 1 && clobber(x)) {
9706 break
9707 }
9708 b = x.Block
9709 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9710 v.copyOf(v0)
9711 v0.AuxInt = int32ToAuxInt(off)
9712 v0.Aux = symToAux(sym)
9713 v0.AddArg2(ptr, mem)
9714 return true
9715 }
9716
9717
9718
9719 for {
9720 x := v_0
9721 if x.Op != OpAMD64MOVQload {
9722 break
9723 }
9724 off := auxIntToInt32(x.AuxInt)
9725 sym := auxToSym(x.Aux)
9726 mem := x.Args[1]
9727 ptr := x.Args[0]
9728 if !(x.Uses == 1 && clobber(x)) {
9729 break
9730 }
9731 b = x.Block
9732 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9733 v.copyOf(v0)
9734 v0.AuxInt = int32ToAuxInt(off)
9735 v0.Aux = symToAux(sym)
9736 v0.AddArg2(ptr, mem)
9737 return true
9738 }
9739
9740
9741
9742 for {
9743 x := v_0
9744 if !(zeroUpper56Bits(x, 3)) {
9745 break
9746 }
9747 v.copyOf(x)
9748 return true
9749 }
9750
9751
9752 for {
9753 if v_0.Op != OpAMD64ANDLconst {
9754 break
9755 }
9756 c := auxIntToInt32(v_0.AuxInt)
9757 x := v_0.Args[0]
9758 v.reset(OpAMD64ANDLconst)
9759 v.AuxInt = int32ToAuxInt(c & 0xff)
9760 v.AddArg(x)
9761 return true
9762 }
9763
9764
9765 for {
9766 if v_0.Op != OpAMD64MOVBQZX {
9767 break
9768 }
9769 x := v_0.Args[0]
9770 v.reset(OpAMD64MOVBQZX)
9771 v.AddArg(x)
9772 return true
9773 }
9774 return false
9775 }
9776 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9777 v_1 := v.Args[1]
9778 v_0 := v.Args[0]
9779
9780
9781
9782 for {
9783 off1 := auxIntToInt32(v.AuxInt)
9784 sym := auxToSym(v.Aux)
9785 if v_0.Op != OpAMD64ADDQconst {
9786 break
9787 }
9788 off2 := auxIntToInt32(v_0.AuxInt)
9789 ptr := v_0.Args[0]
9790 mem := v_1
9791 if !(is32Bit(int64(off1) + int64(off2))) {
9792 break
9793 }
9794 v.reset(OpAMD64MOVBatomicload)
9795 v.AuxInt = int32ToAuxInt(off1 + off2)
9796 v.Aux = symToAux(sym)
9797 v.AddArg2(ptr, mem)
9798 return true
9799 }
9800
9801
9802
9803 for {
9804 off1 := auxIntToInt32(v.AuxInt)
9805 sym1 := auxToSym(v.Aux)
9806 if v_0.Op != OpAMD64LEAQ {
9807 break
9808 }
9809 off2 := auxIntToInt32(v_0.AuxInt)
9810 sym2 := auxToSym(v_0.Aux)
9811 ptr := v_0.Args[0]
9812 mem := v_1
9813 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9814 break
9815 }
9816 v.reset(OpAMD64MOVBatomicload)
9817 v.AuxInt = int32ToAuxInt(off1 + off2)
9818 v.Aux = symToAux(mergeSym(sym1, sym2))
9819 v.AddArg2(ptr, mem)
9820 return true
9821 }
9822 return false
9823 }
9824 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9825 v_1 := v.Args[1]
9826 v_0 := v.Args[0]
9827
9828
9829
9830 for {
9831 off := auxIntToInt32(v.AuxInt)
9832 sym := auxToSym(v.Aux)
9833 ptr := v_0
9834 if v_1.Op != OpAMD64MOVBstore {
9835 break
9836 }
9837 off2 := auxIntToInt32(v_1.AuxInt)
9838 sym2 := auxToSym(v_1.Aux)
9839 x := v_1.Args[1]
9840 ptr2 := v_1.Args[0]
9841 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9842 break
9843 }
9844 v.reset(OpAMD64MOVBQZX)
9845 v.AddArg(x)
9846 return true
9847 }
9848
9849
9850
9851 for {
9852 off1 := auxIntToInt32(v.AuxInt)
9853 sym := auxToSym(v.Aux)
9854 if v_0.Op != OpAMD64ADDQconst {
9855 break
9856 }
9857 off2 := auxIntToInt32(v_0.AuxInt)
9858 ptr := v_0.Args[0]
9859 mem := v_1
9860 if !(is32Bit(int64(off1) + int64(off2))) {
9861 break
9862 }
9863 v.reset(OpAMD64MOVBload)
9864 v.AuxInt = int32ToAuxInt(off1 + off2)
9865 v.Aux = symToAux(sym)
9866 v.AddArg2(ptr, mem)
9867 return true
9868 }
9869
9870
9871
9872 for {
9873 off1 := auxIntToInt32(v.AuxInt)
9874 sym1 := auxToSym(v.Aux)
9875 if v_0.Op != OpAMD64LEAQ {
9876 break
9877 }
9878 off2 := auxIntToInt32(v_0.AuxInt)
9879 sym2 := auxToSym(v_0.Aux)
9880 base := v_0.Args[0]
9881 mem := v_1
9882 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9883 break
9884 }
9885 v.reset(OpAMD64MOVBload)
9886 v.AuxInt = int32ToAuxInt(off1 + off2)
9887 v.Aux = symToAux(mergeSym(sym1, sym2))
9888 v.AddArg2(base, mem)
9889 return true
9890 }
9891
9892
9893
9894 for {
9895 off := auxIntToInt32(v.AuxInt)
9896 sym := auxToSym(v.Aux)
9897 if v_0.Op != OpSB || !(symIsRO(sym)) {
9898 break
9899 }
9900 v.reset(OpAMD64MOVLconst)
9901 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9902 return true
9903 }
9904 return false
9905 }
9906 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9907 v_2 := v.Args[2]
9908 v_1 := v.Args[1]
9909 v_0 := v.Args[0]
9910 b := v.Block
9911 typ := &b.Func.Config.Types
9912
9913
9914
9915 for {
9916 off := auxIntToInt32(v.AuxInt)
9917 sym := auxToSym(v.Aux)
9918 ptr := v_0
9919 y := v_1
9920 if y.Op != OpAMD64SETL {
9921 break
9922 }
9923 x := y.Args[0]
9924 mem := v_2
9925 if !(y.Uses == 1) {
9926 break
9927 }
9928 v.reset(OpAMD64SETLstore)
9929 v.AuxInt = int32ToAuxInt(off)
9930 v.Aux = symToAux(sym)
9931 v.AddArg3(ptr, x, mem)
9932 return true
9933 }
9934
9935
9936
9937 for {
9938 off := auxIntToInt32(v.AuxInt)
9939 sym := auxToSym(v.Aux)
9940 ptr := v_0
9941 y := v_1
9942 if y.Op != OpAMD64SETLE {
9943 break
9944 }
9945 x := y.Args[0]
9946 mem := v_2
9947 if !(y.Uses == 1) {
9948 break
9949 }
9950 v.reset(OpAMD64SETLEstore)
9951 v.AuxInt = int32ToAuxInt(off)
9952 v.Aux = symToAux(sym)
9953 v.AddArg3(ptr, x, mem)
9954 return true
9955 }
9956
9957
9958
9959 for {
9960 off := auxIntToInt32(v.AuxInt)
9961 sym := auxToSym(v.Aux)
9962 ptr := v_0
9963 y := v_1
9964 if y.Op != OpAMD64SETG {
9965 break
9966 }
9967 x := y.Args[0]
9968 mem := v_2
9969 if !(y.Uses == 1) {
9970 break
9971 }
9972 v.reset(OpAMD64SETGstore)
9973 v.AuxInt = int32ToAuxInt(off)
9974 v.Aux = symToAux(sym)
9975 v.AddArg3(ptr, x, mem)
9976 return true
9977 }
9978
9979
9980
9981 for {
9982 off := auxIntToInt32(v.AuxInt)
9983 sym := auxToSym(v.Aux)
9984 ptr := v_0
9985 y := v_1
9986 if y.Op != OpAMD64SETGE {
9987 break
9988 }
9989 x := y.Args[0]
9990 mem := v_2
9991 if !(y.Uses == 1) {
9992 break
9993 }
9994 v.reset(OpAMD64SETGEstore)
9995 v.AuxInt = int32ToAuxInt(off)
9996 v.Aux = symToAux(sym)
9997 v.AddArg3(ptr, x, mem)
9998 return true
9999 }
10000
10001
10002
10003 for {
10004 off := auxIntToInt32(v.AuxInt)
10005 sym := auxToSym(v.Aux)
10006 ptr := v_0
10007 y := v_1
10008 if y.Op != OpAMD64SETEQ {
10009 break
10010 }
10011 x := y.Args[0]
10012 mem := v_2
10013 if !(y.Uses == 1) {
10014 break
10015 }
10016 v.reset(OpAMD64SETEQstore)
10017 v.AuxInt = int32ToAuxInt(off)
10018 v.Aux = symToAux(sym)
10019 v.AddArg3(ptr, x, mem)
10020 return true
10021 }
10022
10023
10024
10025 for {
10026 off := auxIntToInt32(v.AuxInt)
10027 sym := auxToSym(v.Aux)
10028 ptr := v_0
10029 y := v_1
10030 if y.Op != OpAMD64SETNE {
10031 break
10032 }
10033 x := y.Args[0]
10034 mem := v_2
10035 if !(y.Uses == 1) {
10036 break
10037 }
10038 v.reset(OpAMD64SETNEstore)
10039 v.AuxInt = int32ToAuxInt(off)
10040 v.Aux = symToAux(sym)
10041 v.AddArg3(ptr, x, mem)
10042 return true
10043 }
10044
10045
10046
10047 for {
10048 off := auxIntToInt32(v.AuxInt)
10049 sym := auxToSym(v.Aux)
10050 ptr := v_0
10051 y := v_1
10052 if y.Op != OpAMD64SETB {
10053 break
10054 }
10055 x := y.Args[0]
10056 mem := v_2
10057 if !(y.Uses == 1) {
10058 break
10059 }
10060 v.reset(OpAMD64SETBstore)
10061 v.AuxInt = int32ToAuxInt(off)
10062 v.Aux = symToAux(sym)
10063 v.AddArg3(ptr, x, mem)
10064 return true
10065 }
10066
10067
10068
10069 for {
10070 off := auxIntToInt32(v.AuxInt)
10071 sym := auxToSym(v.Aux)
10072 ptr := v_0
10073 y := v_1
10074 if y.Op != OpAMD64SETBE {
10075 break
10076 }
10077 x := y.Args[0]
10078 mem := v_2
10079 if !(y.Uses == 1) {
10080 break
10081 }
10082 v.reset(OpAMD64SETBEstore)
10083 v.AuxInt = int32ToAuxInt(off)
10084 v.Aux = symToAux(sym)
10085 v.AddArg3(ptr, x, mem)
10086 return true
10087 }
10088
10089
10090
10091 for {
10092 off := auxIntToInt32(v.AuxInt)
10093 sym := auxToSym(v.Aux)
10094 ptr := v_0
10095 y := v_1
10096 if y.Op != OpAMD64SETA {
10097 break
10098 }
10099 x := y.Args[0]
10100 mem := v_2
10101 if !(y.Uses == 1) {
10102 break
10103 }
10104 v.reset(OpAMD64SETAstore)
10105 v.AuxInt = int32ToAuxInt(off)
10106 v.Aux = symToAux(sym)
10107 v.AddArg3(ptr, x, mem)
10108 return true
10109 }
10110
10111
10112
10113 for {
10114 off := auxIntToInt32(v.AuxInt)
10115 sym := auxToSym(v.Aux)
10116 ptr := v_0
10117 y := v_1
10118 if y.Op != OpAMD64SETAE {
10119 break
10120 }
10121 x := y.Args[0]
10122 mem := v_2
10123 if !(y.Uses == 1) {
10124 break
10125 }
10126 v.reset(OpAMD64SETAEstore)
10127 v.AuxInt = int32ToAuxInt(off)
10128 v.Aux = symToAux(sym)
10129 v.AddArg3(ptr, x, mem)
10130 return true
10131 }
10132
10133
10134 for {
10135 off := auxIntToInt32(v.AuxInt)
10136 sym := auxToSym(v.Aux)
10137 ptr := v_0
10138 if v_1.Op != OpAMD64MOVBQSX {
10139 break
10140 }
10141 x := v_1.Args[0]
10142 mem := v_2
10143 v.reset(OpAMD64MOVBstore)
10144 v.AuxInt = int32ToAuxInt(off)
10145 v.Aux = symToAux(sym)
10146 v.AddArg3(ptr, x, mem)
10147 return true
10148 }
10149
10150
10151 for {
10152 off := auxIntToInt32(v.AuxInt)
10153 sym := auxToSym(v.Aux)
10154 ptr := v_0
10155 if v_1.Op != OpAMD64MOVBQZX {
10156 break
10157 }
10158 x := v_1.Args[0]
10159 mem := v_2
10160 v.reset(OpAMD64MOVBstore)
10161 v.AuxInt = int32ToAuxInt(off)
10162 v.Aux = symToAux(sym)
10163 v.AddArg3(ptr, x, mem)
10164 return true
10165 }
10166
10167
10168
10169 for {
10170 off1 := auxIntToInt32(v.AuxInt)
10171 sym := auxToSym(v.Aux)
10172 if v_0.Op != OpAMD64ADDQconst {
10173 break
10174 }
10175 off2 := auxIntToInt32(v_0.AuxInt)
10176 ptr := v_0.Args[0]
10177 val := v_1
10178 mem := v_2
10179 if !(is32Bit(int64(off1) + int64(off2))) {
10180 break
10181 }
10182 v.reset(OpAMD64MOVBstore)
10183 v.AuxInt = int32ToAuxInt(off1 + off2)
10184 v.Aux = symToAux(sym)
10185 v.AddArg3(ptr, val, mem)
10186 return true
10187 }
10188
10189
10190 for {
10191 off := auxIntToInt32(v.AuxInt)
10192 sym := auxToSym(v.Aux)
10193 ptr := v_0
10194 if v_1.Op != OpAMD64MOVLconst {
10195 break
10196 }
10197 c := auxIntToInt32(v_1.AuxInt)
10198 mem := v_2
10199 v.reset(OpAMD64MOVBstoreconst)
10200 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10201 v.Aux = symToAux(sym)
10202 v.AddArg2(ptr, mem)
10203 return true
10204 }
10205
10206
10207 for {
10208 off := auxIntToInt32(v.AuxInt)
10209 sym := auxToSym(v.Aux)
10210 ptr := v_0
10211 if v_1.Op != OpAMD64MOVQconst {
10212 break
10213 }
10214 c := auxIntToInt64(v_1.AuxInt)
10215 mem := v_2
10216 v.reset(OpAMD64MOVBstoreconst)
10217 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10218 v.Aux = symToAux(sym)
10219 v.AddArg2(ptr, mem)
10220 return true
10221 }
10222
10223
10224
10225 for {
10226 off1 := auxIntToInt32(v.AuxInt)
10227 sym1 := auxToSym(v.Aux)
10228 if v_0.Op != OpAMD64LEAQ {
10229 break
10230 }
10231 off2 := auxIntToInt32(v_0.AuxInt)
10232 sym2 := auxToSym(v_0.Aux)
10233 base := v_0.Args[0]
10234 val := v_1
10235 mem := v_2
10236 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10237 break
10238 }
10239 v.reset(OpAMD64MOVBstore)
10240 v.AuxInt = int32ToAuxInt(off1 + off2)
10241 v.Aux = symToAux(mergeSym(sym1, sym2))
10242 v.AddArg3(base, val, mem)
10243 return true
10244 }
10245
10246
10247
10248 for {
10249 i := auxIntToInt32(v.AuxInt)
10250 s := auxToSym(v.Aux)
10251 p := v_0
10252 w := v_1
10253 x0 := v_2
10254 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
10255 break
10256 }
10257 mem := x0.Args[2]
10258 if p != x0.Args[0] {
10259 break
10260 }
10261 x0_1 := x0.Args[1]
10262 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10263 break
10264 }
10265 v.reset(OpAMD64MOVWstore)
10266 v.AuxInt = int32ToAuxInt(i - 1)
10267 v.Aux = symToAux(s)
10268 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10269 v0.AuxInt = int8ToAuxInt(8)
10270 v0.AddArg(w)
10271 v.AddArg3(p, v0, mem)
10272 return true
10273 }
10274
10275
10276
10277 for {
10278 i := auxIntToInt32(v.AuxInt)
10279 s := auxToSym(v.Aux)
10280 p1 := v_0
10281 w := v_1
10282 x0 := v_2
10283 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10284 break
10285 }
10286 mem := x0.Args[2]
10287 p0 := x0.Args[0]
10288 x0_1 := x0.Args[1]
10289 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10290 break
10291 }
10292 v.reset(OpAMD64MOVWstore)
10293 v.AuxInt = int32ToAuxInt(i)
10294 v.Aux = symToAux(s)
10295 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10296 v0.AuxInt = int8ToAuxInt(8)
10297 v0.AddArg(w)
10298 v.AddArg3(p0, v0, mem)
10299 return true
10300 }
10301
10302
10303
10304 for {
10305 i := auxIntToInt32(v.AuxInt)
10306 s := auxToSym(v.Aux)
10307 p := v_0
10308 w := v_1
10309 x2 := v_2
10310 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
10311 break
10312 }
10313 _ = x2.Args[2]
10314 if p != x2.Args[0] {
10315 break
10316 }
10317 x2_1 := x2.Args[1]
10318 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10319 break
10320 }
10321 x1 := x2.Args[2]
10322 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
10323 break
10324 }
10325 _ = x1.Args[2]
10326 if p != x1.Args[0] {
10327 break
10328 }
10329 x1_1 := x1.Args[1]
10330 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10331 break
10332 }
10333 x0 := x1.Args[2]
10334 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
10335 break
10336 }
10337 mem := x0.Args[2]
10338 if p != x0.Args[0] {
10339 break
10340 }
10341 x0_1 := x0.Args[1]
10342 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10343 break
10344 }
10345 v.reset(OpAMD64MOVLstore)
10346 v.AuxInt = int32ToAuxInt(i - 3)
10347 v.Aux = symToAux(s)
10348 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10349 v0.AddArg(w)
10350 v.AddArg3(p, v0, mem)
10351 return true
10352 }
10353
10354
10355
10356 for {
10357 i := auxIntToInt32(v.AuxInt)
10358 s := auxToSym(v.Aux)
10359 p3 := v_0
10360 w := v_1
10361 x2 := v_2
10362 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10363 break
10364 }
10365 _ = x2.Args[2]
10366 p2 := x2.Args[0]
10367 x2_1 := x2.Args[1]
10368 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10369 break
10370 }
10371 x1 := x2.Args[2]
10372 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10373 break
10374 }
10375 _ = x1.Args[2]
10376 p1 := x1.Args[0]
10377 x1_1 := x1.Args[1]
10378 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10379 break
10380 }
10381 x0 := x1.Args[2]
10382 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10383 break
10384 }
10385 mem := x0.Args[2]
10386 p0 := x0.Args[0]
10387 x0_1 := x0.Args[1]
10388 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10389 break
10390 }
10391 v.reset(OpAMD64MOVLstore)
10392 v.AuxInt = int32ToAuxInt(i)
10393 v.Aux = symToAux(s)
10394 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10395 v0.AddArg(w)
10396 v.AddArg3(p0, v0, mem)
10397 return true
10398 }
10399
10400
10401
10402 for {
10403 i := auxIntToInt32(v.AuxInt)
10404 s := auxToSym(v.Aux)
10405 p := v_0
10406 w := v_1
10407 x6 := v_2
10408 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
10409 break
10410 }
10411 _ = x6.Args[2]
10412 if p != x6.Args[0] {
10413 break
10414 }
10415 x6_1 := x6.Args[1]
10416 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10417 break
10418 }
10419 x5 := x6.Args[2]
10420 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
10421 break
10422 }
10423 _ = x5.Args[2]
10424 if p != x5.Args[0] {
10425 break
10426 }
10427 x5_1 := x5.Args[1]
10428 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10429 break
10430 }
10431 x4 := x5.Args[2]
10432 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
10433 break
10434 }
10435 _ = x4.Args[2]
10436 if p != x4.Args[0] {
10437 break
10438 }
10439 x4_1 := x4.Args[1]
10440 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10441 break
10442 }
10443 x3 := x4.Args[2]
10444 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
10445 break
10446 }
10447 _ = x3.Args[2]
10448 if p != x3.Args[0] {
10449 break
10450 }
10451 x3_1 := x3.Args[1]
10452 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10453 break
10454 }
10455 x2 := x3.Args[2]
10456 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
10457 break
10458 }
10459 _ = x2.Args[2]
10460 if p != x2.Args[0] {
10461 break
10462 }
10463 x2_1 := x2.Args[1]
10464 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10465 break
10466 }
10467 x1 := x2.Args[2]
10468 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
10469 break
10470 }
10471 _ = x1.Args[2]
10472 if p != x1.Args[0] {
10473 break
10474 }
10475 x1_1 := x1.Args[1]
10476 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10477 break
10478 }
10479 x0 := x1.Args[2]
10480 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
10481 break
10482 }
10483 mem := x0.Args[2]
10484 if p != x0.Args[0] {
10485 break
10486 }
10487 x0_1 := x0.Args[1]
10488 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10489 break
10490 }
10491 v.reset(OpAMD64MOVQstore)
10492 v.AuxInt = int32ToAuxInt(i - 7)
10493 v.Aux = symToAux(s)
10494 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10495 v0.AddArg(w)
10496 v.AddArg3(p, v0, mem)
10497 return true
10498 }
10499
10500
10501
10502 for {
10503 i := auxIntToInt32(v.AuxInt)
10504 s := auxToSym(v.Aux)
10505 p7 := v_0
10506 w := v_1
10507 x6 := v_2
10508 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
10509 break
10510 }
10511 _ = x6.Args[2]
10512 p6 := x6.Args[0]
10513 x6_1 := x6.Args[1]
10514 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10515 break
10516 }
10517 x5 := x6.Args[2]
10518 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
10519 break
10520 }
10521 _ = x5.Args[2]
10522 p5 := x5.Args[0]
10523 x5_1 := x5.Args[1]
10524 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10525 break
10526 }
10527 x4 := x5.Args[2]
10528 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
10529 break
10530 }
10531 _ = x4.Args[2]
10532 p4 := x4.Args[0]
10533 x4_1 := x4.Args[1]
10534 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10535 break
10536 }
10537 x3 := x4.Args[2]
10538 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
10539 break
10540 }
10541 _ = x3.Args[2]
10542 p3 := x3.Args[0]
10543 x3_1 := x3.Args[1]
10544 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10545 break
10546 }
10547 x2 := x3.Args[2]
10548 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10549 break
10550 }
10551 _ = x2.Args[2]
10552 p2 := x2.Args[0]
10553 x2_1 := x2.Args[1]
10554 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10555 break
10556 }
10557 x1 := x2.Args[2]
10558 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10559 break
10560 }
10561 _ = x1.Args[2]
10562 p1 := x1.Args[0]
10563 x1_1 := x1.Args[1]
10564 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10565 break
10566 }
10567 x0 := x1.Args[2]
10568 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10569 break
10570 }
10571 mem := x0.Args[2]
10572 p0 := x0.Args[0]
10573 x0_1 := x0.Args[1]
10574 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10575 break
10576 }
10577 v.reset(OpAMD64MOVQstore)
10578 v.AuxInt = int32ToAuxInt(i)
10579 v.Aux = symToAux(s)
10580 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10581 v0.AddArg(w)
10582 v.AddArg3(p0, v0, mem)
10583 return true
10584 }
10585
10586
10587
10588 for {
10589 i := auxIntToInt32(v.AuxInt)
10590 s := auxToSym(v.Aux)
10591 p := v_0
10592 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
10593 break
10594 }
10595 w := v_1.Args[0]
10596 x := v_2
10597 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10598 break
10599 }
10600 mem := x.Args[2]
10601 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10602 break
10603 }
10604 v.reset(OpAMD64MOVWstore)
10605 v.AuxInt = int32ToAuxInt(i - 1)
10606 v.Aux = symToAux(s)
10607 v.AddArg3(p, w, mem)
10608 return true
10609 }
10610
10611
10612
10613 for {
10614 i := auxIntToInt32(v.AuxInt)
10615 s := auxToSym(v.Aux)
10616 p := v_0
10617 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
10618 break
10619 }
10620 w := v_1.Args[0]
10621 x := v_2
10622 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10623 break
10624 }
10625 mem := x.Args[2]
10626 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10627 break
10628 }
10629 v.reset(OpAMD64MOVWstore)
10630 v.AuxInt = int32ToAuxInt(i - 1)
10631 v.Aux = symToAux(s)
10632 v.AddArg3(p, w, mem)
10633 return true
10634 }
10635
10636
10637
10638 for {
10639 i := auxIntToInt32(v.AuxInt)
10640 s := auxToSym(v.Aux)
10641 p := v_0
10642 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
10643 break
10644 }
10645 w := v_1.Args[0]
10646 x := v_2
10647 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10648 break
10649 }
10650 mem := x.Args[2]
10651 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10652 break
10653 }
10654 v.reset(OpAMD64MOVWstore)
10655 v.AuxInt = int32ToAuxInt(i - 1)
10656 v.Aux = symToAux(s)
10657 v.AddArg3(p, w, mem)
10658 return true
10659 }
10660
10661
10662
10663 for {
10664 i := auxIntToInt32(v.AuxInt)
10665 s := auxToSym(v.Aux)
10666 p := v_0
10667 w := v_1
10668 x := v_2
10669 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10670 break
10671 }
10672 mem := x.Args[2]
10673 if p != x.Args[0] {
10674 break
10675 }
10676 x_1 := x.Args[1]
10677 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10678 break
10679 }
10680 v.reset(OpAMD64MOVWstore)
10681 v.AuxInt = int32ToAuxInt(i)
10682 v.Aux = symToAux(s)
10683 v.AddArg3(p, w, mem)
10684 return true
10685 }
10686
10687
10688
10689 for {
10690 i := auxIntToInt32(v.AuxInt)
10691 s := auxToSym(v.Aux)
10692 p := v_0
10693 w := v_1
10694 x := v_2
10695 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10696 break
10697 }
10698 mem := x.Args[2]
10699 if p != x.Args[0] {
10700 break
10701 }
10702 x_1 := x.Args[1]
10703 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10704 break
10705 }
10706 v.reset(OpAMD64MOVWstore)
10707 v.AuxInt = int32ToAuxInt(i)
10708 v.Aux = symToAux(s)
10709 v.AddArg3(p, w, mem)
10710 return true
10711 }
10712
10713
10714
10715 for {
10716 i := auxIntToInt32(v.AuxInt)
10717 s := auxToSym(v.Aux)
10718 p := v_0
10719 w := v_1
10720 x := v_2
10721 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10722 break
10723 }
10724 mem := x.Args[2]
10725 if p != x.Args[0] {
10726 break
10727 }
10728 x_1 := x.Args[1]
10729 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10730 break
10731 }
10732 v.reset(OpAMD64MOVWstore)
10733 v.AuxInt = int32ToAuxInt(i)
10734 v.Aux = symToAux(s)
10735 v.AddArg3(p, w, mem)
10736 return true
10737 }
10738
10739
10740
10741 for {
10742 i := auxIntToInt32(v.AuxInt)
10743 s := auxToSym(v.Aux)
10744 p := v_0
10745 if v_1.Op != OpAMD64SHRLconst {
10746 break
10747 }
10748 j := auxIntToInt8(v_1.AuxInt)
10749 w := v_1.Args[0]
10750 x := v_2
10751 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10752 break
10753 }
10754 mem := x.Args[2]
10755 if p != x.Args[0] {
10756 break
10757 }
10758 w0 := x.Args[1]
10759 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
10760 break
10761 }
10762 v.reset(OpAMD64MOVWstore)
10763 v.AuxInt = int32ToAuxInt(i - 1)
10764 v.Aux = symToAux(s)
10765 v.AddArg3(p, w0, mem)
10766 return true
10767 }
10768
10769
10770
10771 for {
10772 i := auxIntToInt32(v.AuxInt)
10773 s := auxToSym(v.Aux)
10774 p := v_0
10775 if v_1.Op != OpAMD64SHRQconst {
10776 break
10777 }
10778 j := auxIntToInt8(v_1.AuxInt)
10779 w := v_1.Args[0]
10780 x := v_2
10781 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10782 break
10783 }
10784 mem := x.Args[2]
10785 if p != x.Args[0] {
10786 break
10787 }
10788 w0 := x.Args[1]
10789 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
10790 break
10791 }
10792 v.reset(OpAMD64MOVWstore)
10793 v.AuxInt = int32ToAuxInt(i - 1)
10794 v.Aux = symToAux(s)
10795 v.AddArg3(p, w0, mem)
10796 return true
10797 }
10798
10799
10800
10801 for {
10802 i := auxIntToInt32(v.AuxInt)
10803 s := auxToSym(v.Aux)
10804 p1 := v_0
10805 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
10806 break
10807 }
10808 w := v_1.Args[0]
10809 x := v_2
10810 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10811 break
10812 }
10813 mem := x.Args[2]
10814 p0 := x.Args[0]
10815 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10816 break
10817 }
10818 v.reset(OpAMD64MOVWstore)
10819 v.AuxInt = int32ToAuxInt(i)
10820 v.Aux = symToAux(s)
10821 v.AddArg3(p0, w, mem)
10822 return true
10823 }
10824
10825
10826
10827 for {
10828 i := auxIntToInt32(v.AuxInt)
10829 s := auxToSym(v.Aux)
10830 p1 := v_0
10831 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
10832 break
10833 }
10834 w := v_1.Args[0]
10835 x := v_2
10836 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10837 break
10838 }
10839 mem := x.Args[2]
10840 p0 := x.Args[0]
10841 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10842 break
10843 }
10844 v.reset(OpAMD64MOVWstore)
10845 v.AuxInt = int32ToAuxInt(i)
10846 v.Aux = symToAux(s)
10847 v.AddArg3(p0, w, mem)
10848 return true
10849 }
10850
10851
10852
10853 for {
10854 i := auxIntToInt32(v.AuxInt)
10855 s := auxToSym(v.Aux)
10856 p1 := v_0
10857 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
10858 break
10859 }
10860 w := v_1.Args[0]
10861 x := v_2
10862 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10863 break
10864 }
10865 mem := x.Args[2]
10866 p0 := x.Args[0]
10867 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10868 break
10869 }
10870 v.reset(OpAMD64MOVWstore)
10871 v.AuxInt = int32ToAuxInt(i)
10872 v.Aux = symToAux(s)
10873 v.AddArg3(p0, w, mem)
10874 return true
10875 }
10876
10877
10878
10879 for {
10880 i := auxIntToInt32(v.AuxInt)
10881 s := auxToSym(v.Aux)
10882 p0 := v_0
10883 w := v_1
10884 x := v_2
10885 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10886 break
10887 }
10888 mem := x.Args[2]
10889 p1 := x.Args[0]
10890 x_1 := x.Args[1]
10891 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10892 break
10893 }
10894 v.reset(OpAMD64MOVWstore)
10895 v.AuxInt = int32ToAuxInt(i)
10896 v.Aux = symToAux(s)
10897 v.AddArg3(p0, w, mem)
10898 return true
10899 }
10900
10901
10902
10903 for {
10904 i := auxIntToInt32(v.AuxInt)
10905 s := auxToSym(v.Aux)
10906 p0 := v_0
10907 w := v_1
10908 x := v_2
10909 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10910 break
10911 }
10912 mem := x.Args[2]
10913 p1 := x.Args[0]
10914 x_1 := x.Args[1]
10915 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10916 break
10917 }
10918 v.reset(OpAMD64MOVWstore)
10919 v.AuxInt = int32ToAuxInt(i)
10920 v.Aux = symToAux(s)
10921 v.AddArg3(p0, w, mem)
10922 return true
10923 }
10924
10925
10926
10927 for {
10928 i := auxIntToInt32(v.AuxInt)
10929 s := auxToSym(v.Aux)
10930 p0 := v_0
10931 w := v_1
10932 x := v_2
10933 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10934 break
10935 }
10936 mem := x.Args[2]
10937 p1 := x.Args[0]
10938 x_1 := x.Args[1]
10939 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10940 break
10941 }
10942 v.reset(OpAMD64MOVWstore)
10943 v.AuxInt = int32ToAuxInt(i)
10944 v.Aux = symToAux(s)
10945 v.AddArg3(p0, w, mem)
10946 return true
10947 }
10948
10949
10950
10951 for {
10952 i := auxIntToInt32(v.AuxInt)
10953 s := auxToSym(v.Aux)
10954 p1 := v_0
10955 if v_1.Op != OpAMD64SHRLconst {
10956 break
10957 }
10958 j := auxIntToInt8(v_1.AuxInt)
10959 w := v_1.Args[0]
10960 x := v_2
10961 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10962 break
10963 }
10964 mem := x.Args[2]
10965 p0 := x.Args[0]
10966 w0 := x.Args[1]
10967 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10968 break
10969 }
10970 v.reset(OpAMD64MOVWstore)
10971 v.AuxInt = int32ToAuxInt(i)
10972 v.Aux = symToAux(s)
10973 v.AddArg3(p0, w0, mem)
10974 return true
10975 }
10976
10977
10978
10979 for {
10980 i := auxIntToInt32(v.AuxInt)
10981 s := auxToSym(v.Aux)
10982 p1 := v_0
10983 if v_1.Op != OpAMD64SHRQconst {
10984 break
10985 }
10986 j := auxIntToInt8(v_1.AuxInt)
10987 w := v_1.Args[0]
10988 x := v_2
10989 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10990 break
10991 }
10992 mem := x.Args[2]
10993 p0 := x.Args[0]
10994 w0 := x.Args[1]
10995 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10996 break
10997 }
10998 v.reset(OpAMD64MOVWstore)
10999 v.AuxInt = int32ToAuxInt(i)
11000 v.Aux = symToAux(s)
11001 v.AddArg3(p0, w0, mem)
11002 return true
11003 }
11004
11005
11006
11007 for {
11008 c3 := auxIntToInt32(v.AuxInt)
11009 s := auxToSym(v.Aux)
11010 p3 := v_0
11011 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
11012 break
11013 }
11014 w := v_1.Args[0]
11015 x1 := v_2
11016 if x1.Op != OpAMD64MOVWstore {
11017 break
11018 }
11019 c2 := auxIntToInt32(x1.AuxInt)
11020 if auxToSym(x1.Aux) != s {
11021 break
11022 }
11023 _ = x1.Args[2]
11024 p2 := x1.Args[0]
11025 x1_1 := x1.Args[1]
11026 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
11027 break
11028 }
11029 x2 := x1.Args[2]
11030 if x2.Op != OpAMD64MOVLstore {
11031 break
11032 }
11033 c1 := auxIntToInt32(x2.AuxInt)
11034 if auxToSym(x2.Aux) != s {
11035 break
11036 }
11037 _ = x2.Args[2]
11038 p1 := x2.Args[0]
11039 x2_1 := x2.Args[1]
11040 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
11041 break
11042 }
11043 x3 := x2.Args[2]
11044 if x3.Op != OpAMD64MOVBstore {
11045 break
11046 }
11047 c0 := auxIntToInt32(x3.AuxInt)
11048 if auxToSym(x3.Aux) != s {
11049 break
11050 }
11051 mem := x3.Args[2]
11052 p0 := x3.Args[0]
11053 if w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && sequentialAddresses(p0, p1, int64(1+c0-c1)) && sequentialAddresses(p0, p2, int64(5+c0-c2)) && sequentialAddresses(p0, p3, int64(7+c0-c3)) && clobber(x1, x2, x3)) {
11054 break
11055 }
11056 v.reset(OpAMD64MOVQstore)
11057 v.AuxInt = int32ToAuxInt(c0)
11058 v.Aux = symToAux(s)
11059 v.AddArg3(p0, w, mem)
11060 return true
11061 }
11062
11063
11064
11065 for {
11066 i := auxIntToInt32(v.AuxInt)
11067 s := auxToSym(v.Aux)
11068 p := v_0
11069 x1 := v_1
11070 if x1.Op != OpAMD64MOVBload {
11071 break
11072 }
11073 j := auxIntToInt32(x1.AuxInt)
11074 s2 := auxToSym(x1.Aux)
11075 mem := x1.Args[1]
11076 p2 := x1.Args[0]
11077 mem2 := v_2
11078 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
11079 break
11080 }
11081 _ = mem2.Args[2]
11082 if p != mem2.Args[0] {
11083 break
11084 }
11085 x2 := mem2.Args[1]
11086 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
11087 break
11088 }
11089 _ = x2.Args[1]
11090 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11091 break
11092 }
11093 v.reset(OpAMD64MOVWstore)
11094 v.AuxInt = int32ToAuxInt(i - 1)
11095 v.Aux = symToAux(s)
11096 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11097 v0.AuxInt = int32ToAuxInt(j - 1)
11098 v0.Aux = symToAux(s2)
11099 v0.AddArg2(p2, mem)
11100 v.AddArg3(p, v0, mem)
11101 return true
11102 }
11103 return false
11104 }
11105 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11106 v_1 := v.Args[1]
11107 v_0 := v.Args[0]
11108
11109
11110
11111 for {
11112 sc := auxIntToValAndOff(v.AuxInt)
11113 s := auxToSym(v.Aux)
11114 if v_0.Op != OpAMD64ADDQconst {
11115 break
11116 }
11117 off := auxIntToInt32(v_0.AuxInt)
11118 ptr := v_0.Args[0]
11119 mem := v_1
11120 if !(ValAndOff(sc).canAdd32(off)) {
11121 break
11122 }
11123 v.reset(OpAMD64MOVBstoreconst)
11124 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11125 v.Aux = symToAux(s)
11126 v.AddArg2(ptr, mem)
11127 return true
11128 }
11129
11130
11131
11132 for {
11133 sc := auxIntToValAndOff(v.AuxInt)
11134 sym1 := auxToSym(v.Aux)
11135 if v_0.Op != OpAMD64LEAQ {
11136 break
11137 }
11138 off := auxIntToInt32(v_0.AuxInt)
11139 sym2 := auxToSym(v_0.Aux)
11140 ptr := v_0.Args[0]
11141 mem := v_1
11142 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11143 break
11144 }
11145 v.reset(OpAMD64MOVBstoreconst)
11146 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11147 v.Aux = symToAux(mergeSym(sym1, sym2))
11148 v.AddArg2(ptr, mem)
11149 return true
11150 }
11151
11152
11153
11154 for {
11155 c := auxIntToValAndOff(v.AuxInt)
11156 s := auxToSym(v.Aux)
11157 p1 := v_0
11158 x := v_1
11159 if x.Op != OpAMD64MOVBstoreconst {
11160 break
11161 }
11162 a := auxIntToValAndOff(x.AuxInt)
11163 if auxToSym(x.Aux) != s {
11164 break
11165 }
11166 mem := x.Args[1]
11167 p0 := x.Args[0]
11168 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+1-c.Off())) && clobber(x)) {
11169 break
11170 }
11171 v.reset(OpAMD64MOVWstoreconst)
11172 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11173 v.Aux = symToAux(s)
11174 v.AddArg2(p0, mem)
11175 return true
11176 }
11177
11178
11179
11180 for {
11181 a := auxIntToValAndOff(v.AuxInt)
11182 s := auxToSym(v.Aux)
11183 p0 := v_0
11184 x := v_1
11185 if x.Op != OpAMD64MOVBstoreconst {
11186 break
11187 }
11188 c := auxIntToValAndOff(x.AuxInt)
11189 if auxToSym(x.Aux) != s {
11190 break
11191 }
11192 mem := x.Args[1]
11193 p1 := x.Args[0]
11194 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+1-c.Off())) && clobber(x)) {
11195 break
11196 }
11197 v.reset(OpAMD64MOVWstoreconst)
11198 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11199 v.Aux = symToAux(s)
11200 v.AddArg2(p0, mem)
11201 return true
11202 }
11203 return false
11204 }
11205 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11206 v_0 := v.Args[0]
11207 b := v.Block
11208
11209
11210
11211 for {
11212 x := v_0
11213 if x.Op != OpAMD64MOVLload {
11214 break
11215 }
11216 off := auxIntToInt32(x.AuxInt)
11217 sym := auxToSym(x.Aux)
11218 mem := x.Args[1]
11219 ptr := x.Args[0]
11220 if !(x.Uses == 1 && clobber(x)) {
11221 break
11222 }
11223 b = x.Block
11224 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11225 v.copyOf(v0)
11226 v0.AuxInt = int32ToAuxInt(off)
11227 v0.Aux = symToAux(sym)
11228 v0.AddArg2(ptr, mem)
11229 return true
11230 }
11231
11232
11233
11234 for {
11235 x := v_0
11236 if x.Op != OpAMD64MOVQload {
11237 break
11238 }
11239 off := auxIntToInt32(x.AuxInt)
11240 sym := auxToSym(x.Aux)
11241 mem := x.Args[1]
11242 ptr := x.Args[0]
11243 if !(x.Uses == 1 && clobber(x)) {
11244 break
11245 }
11246 b = x.Block
11247 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11248 v.copyOf(v0)
11249 v0.AuxInt = int32ToAuxInt(off)
11250 v0.Aux = symToAux(sym)
11251 v0.AddArg2(ptr, mem)
11252 return true
11253 }
11254
11255
11256
11257 for {
11258 if v_0.Op != OpAMD64ANDLconst {
11259 break
11260 }
11261 c := auxIntToInt32(v_0.AuxInt)
11262 x := v_0.Args[0]
11263 if !(uint32(c)&0x80000000 == 0) {
11264 break
11265 }
11266 v.reset(OpAMD64ANDLconst)
11267 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
11268 v.AddArg(x)
11269 return true
11270 }
11271
11272
11273 for {
11274 if v_0.Op != OpAMD64MOVLQSX {
11275 break
11276 }
11277 x := v_0.Args[0]
11278 v.reset(OpAMD64MOVLQSX)
11279 v.AddArg(x)
11280 return true
11281 }
11282
11283
11284 for {
11285 if v_0.Op != OpAMD64MOVWQSX {
11286 break
11287 }
11288 x := v_0.Args[0]
11289 v.reset(OpAMD64MOVWQSX)
11290 v.AddArg(x)
11291 return true
11292 }
11293
11294
11295 for {
11296 if v_0.Op != OpAMD64MOVBQSX {
11297 break
11298 }
11299 x := v_0.Args[0]
11300 v.reset(OpAMD64MOVBQSX)
11301 v.AddArg(x)
11302 return true
11303 }
11304 return false
11305 }
11306 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11307 v_1 := v.Args[1]
11308 v_0 := v.Args[0]
11309
11310
11311
11312 for {
11313 off := auxIntToInt32(v.AuxInt)
11314 sym := auxToSym(v.Aux)
11315 ptr := v_0
11316 if v_1.Op != OpAMD64MOVLstore {
11317 break
11318 }
11319 off2 := auxIntToInt32(v_1.AuxInt)
11320 sym2 := auxToSym(v_1.Aux)
11321 x := v_1.Args[1]
11322 ptr2 := v_1.Args[0]
11323 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11324 break
11325 }
11326 v.reset(OpAMD64MOVLQSX)
11327 v.AddArg(x)
11328 return true
11329 }
11330
11331
11332
11333 for {
11334 off1 := auxIntToInt32(v.AuxInt)
11335 sym1 := auxToSym(v.Aux)
11336 if v_0.Op != OpAMD64LEAQ {
11337 break
11338 }
11339 off2 := auxIntToInt32(v_0.AuxInt)
11340 sym2 := auxToSym(v_0.Aux)
11341 base := v_0.Args[0]
11342 mem := v_1
11343 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11344 break
11345 }
11346 v.reset(OpAMD64MOVLQSXload)
11347 v.AuxInt = int32ToAuxInt(off1 + off2)
11348 v.Aux = symToAux(mergeSym(sym1, sym2))
11349 v.AddArg2(base, mem)
11350 return true
11351 }
11352 return false
11353 }
11354 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11355 v_0 := v.Args[0]
11356 b := v.Block
11357
11358
11359
11360 for {
11361 x := v_0
11362 if x.Op != OpAMD64MOVLload {
11363 break
11364 }
11365 off := auxIntToInt32(x.AuxInt)
11366 sym := auxToSym(x.Aux)
11367 mem := x.Args[1]
11368 ptr := x.Args[0]
11369 if !(x.Uses == 1 && clobber(x)) {
11370 break
11371 }
11372 b = x.Block
11373 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11374 v.copyOf(v0)
11375 v0.AuxInt = int32ToAuxInt(off)
11376 v0.Aux = symToAux(sym)
11377 v0.AddArg2(ptr, mem)
11378 return true
11379 }
11380
11381
11382
11383 for {
11384 x := v_0
11385 if x.Op != OpAMD64MOVQload {
11386 break
11387 }
11388 off := auxIntToInt32(x.AuxInt)
11389 sym := auxToSym(x.Aux)
11390 mem := x.Args[1]
11391 ptr := x.Args[0]
11392 if !(x.Uses == 1 && clobber(x)) {
11393 break
11394 }
11395 b = x.Block
11396 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11397 v.copyOf(v0)
11398 v0.AuxInt = int32ToAuxInt(off)
11399 v0.Aux = symToAux(sym)
11400 v0.AddArg2(ptr, mem)
11401 return true
11402 }
11403
11404
11405
11406 for {
11407 x := v_0
11408 if !(zeroUpper32Bits(x, 3)) {
11409 break
11410 }
11411 v.copyOf(x)
11412 return true
11413 }
11414
11415
11416 for {
11417 if v_0.Op != OpAMD64ANDLconst {
11418 break
11419 }
11420 c := auxIntToInt32(v_0.AuxInt)
11421 x := v_0.Args[0]
11422 v.reset(OpAMD64ANDLconst)
11423 v.AuxInt = int32ToAuxInt(c)
11424 v.AddArg(x)
11425 return true
11426 }
11427
11428
11429 for {
11430 if v_0.Op != OpAMD64MOVLQZX {
11431 break
11432 }
11433 x := v_0.Args[0]
11434 v.reset(OpAMD64MOVLQZX)
11435 v.AddArg(x)
11436 return true
11437 }
11438
11439
11440 for {
11441 if v_0.Op != OpAMD64MOVWQZX {
11442 break
11443 }
11444 x := v_0.Args[0]
11445 v.reset(OpAMD64MOVWQZX)
11446 v.AddArg(x)
11447 return true
11448 }
11449
11450
11451 for {
11452 if v_0.Op != OpAMD64MOVBQZX {
11453 break
11454 }
11455 x := v_0.Args[0]
11456 v.reset(OpAMD64MOVBQZX)
11457 v.AddArg(x)
11458 return true
11459 }
11460 return false
11461 }
11462 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
11463 v_1 := v.Args[1]
11464 v_0 := v.Args[0]
11465
11466
11467
11468 for {
11469 off1 := auxIntToInt32(v.AuxInt)
11470 sym := auxToSym(v.Aux)
11471 if v_0.Op != OpAMD64ADDQconst {
11472 break
11473 }
11474 off2 := auxIntToInt32(v_0.AuxInt)
11475 ptr := v_0.Args[0]
11476 mem := v_1
11477 if !(is32Bit(int64(off1) + int64(off2))) {
11478 break
11479 }
11480 v.reset(OpAMD64MOVLatomicload)
11481 v.AuxInt = int32ToAuxInt(off1 + off2)
11482 v.Aux = symToAux(sym)
11483 v.AddArg2(ptr, mem)
11484 return true
11485 }
11486
11487
11488
11489 for {
11490 off1 := auxIntToInt32(v.AuxInt)
11491 sym1 := auxToSym(v.Aux)
11492 if v_0.Op != OpAMD64LEAQ {
11493 break
11494 }
11495 off2 := auxIntToInt32(v_0.AuxInt)
11496 sym2 := auxToSym(v_0.Aux)
11497 ptr := v_0.Args[0]
11498 mem := v_1
11499 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11500 break
11501 }
11502 v.reset(OpAMD64MOVLatomicload)
11503 v.AuxInt = int32ToAuxInt(off1 + off2)
11504 v.Aux = symToAux(mergeSym(sym1, sym2))
11505 v.AddArg2(ptr, mem)
11506 return true
11507 }
11508 return false
11509 }
11510 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
11511 v_0 := v.Args[0]
11512 b := v.Block
11513
11514
11515
11516 for {
11517 t := v.Type
11518 if v_0.Op != OpArg {
11519 break
11520 }
11521 u := v_0.Type
11522 off := auxIntToInt32(v_0.AuxInt)
11523 sym := auxToSym(v_0.Aux)
11524 if !(t.Size() == u.Size()) {
11525 break
11526 }
11527 b = b.Func.Entry
11528 v0 := b.NewValue0(v.Pos, OpArg, t)
11529 v.copyOf(v0)
11530 v0.AuxInt = int32ToAuxInt(off)
11531 v0.Aux = symToAux(sym)
11532 return true
11533 }
11534 return false
11535 }
11536 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
11537 v_0 := v.Args[0]
11538 b := v.Block
11539
11540
11541
11542 for {
11543 t := v.Type
11544 if v_0.Op != OpArg {
11545 break
11546 }
11547 u := v_0.Type
11548 off := auxIntToInt32(v_0.AuxInt)
11549 sym := auxToSym(v_0.Aux)
11550 if !(t.Size() == u.Size()) {
11551 break
11552 }
11553 b = b.Func.Entry
11554 v0 := b.NewValue0(v.Pos, OpArg, t)
11555 v.copyOf(v0)
11556 v0.AuxInt = int32ToAuxInt(off)
11557 v0.Aux = symToAux(sym)
11558 return true
11559 }
11560 return false
11561 }
11562 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
11563 v_1 := v.Args[1]
11564 v_0 := v.Args[0]
11565 b := v.Block
11566 config := b.Func.Config
11567
11568
11569
11570 for {
11571 off := auxIntToInt32(v.AuxInt)
11572 sym := auxToSym(v.Aux)
11573 ptr := v_0
11574 if v_1.Op != OpAMD64MOVLstore {
11575 break
11576 }
11577 off2 := auxIntToInt32(v_1.AuxInt)
11578 sym2 := auxToSym(v_1.Aux)
11579 x := v_1.Args[1]
11580 ptr2 := v_1.Args[0]
11581 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11582 break
11583 }
11584 v.reset(OpAMD64MOVLQZX)
11585 v.AddArg(x)
11586 return true
11587 }
11588
11589
11590
11591 for {
11592 off1 := auxIntToInt32(v.AuxInt)
11593 sym := auxToSym(v.Aux)
11594 if v_0.Op != OpAMD64ADDQconst {
11595 break
11596 }
11597 off2 := auxIntToInt32(v_0.AuxInt)
11598 ptr := v_0.Args[0]
11599 mem := v_1
11600 if !(is32Bit(int64(off1) + int64(off2))) {
11601 break
11602 }
11603 v.reset(OpAMD64MOVLload)
11604 v.AuxInt = int32ToAuxInt(off1 + off2)
11605 v.Aux = symToAux(sym)
11606 v.AddArg2(ptr, mem)
11607 return true
11608 }
11609
11610
11611
11612 for {
11613 off1 := auxIntToInt32(v.AuxInt)
11614 sym1 := auxToSym(v.Aux)
11615 if v_0.Op != OpAMD64LEAQ {
11616 break
11617 }
11618 off2 := auxIntToInt32(v_0.AuxInt)
11619 sym2 := auxToSym(v_0.Aux)
11620 base := v_0.Args[0]
11621 mem := v_1
11622 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11623 break
11624 }
11625 v.reset(OpAMD64MOVLload)
11626 v.AuxInt = int32ToAuxInt(off1 + off2)
11627 v.Aux = symToAux(mergeSym(sym1, sym2))
11628 v.AddArg2(base, mem)
11629 return true
11630 }
11631
11632
11633 for {
11634 off := auxIntToInt32(v.AuxInt)
11635 sym := auxToSym(v.Aux)
11636 ptr := v_0
11637 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11638 break
11639 }
11640 val := v_1.Args[1]
11641 if ptr != v_1.Args[0] {
11642 break
11643 }
11644 v.reset(OpAMD64MOVLf2i)
11645 v.AddArg(val)
11646 return true
11647 }
11648
11649
11650
11651 for {
11652 off := auxIntToInt32(v.AuxInt)
11653 sym := auxToSym(v.Aux)
11654 if v_0.Op != OpSB || !(symIsRO(sym)) {
11655 break
11656 }
11657 v.reset(OpAMD64MOVQconst)
11658 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11659 return true
11660 }
11661 return false
11662 }
11663 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
11664 v_2 := v.Args[2]
11665 v_1 := v.Args[1]
11666 v_0 := v.Args[0]
11667 b := v.Block
11668 typ := &b.Func.Config.Types
11669
11670
11671 for {
11672 off := auxIntToInt32(v.AuxInt)
11673 sym := auxToSym(v.Aux)
11674 ptr := v_0
11675 if v_1.Op != OpAMD64MOVLQSX {
11676 break
11677 }
11678 x := v_1.Args[0]
11679 mem := v_2
11680 v.reset(OpAMD64MOVLstore)
11681 v.AuxInt = int32ToAuxInt(off)
11682 v.Aux = symToAux(sym)
11683 v.AddArg3(ptr, x, mem)
11684 return true
11685 }
11686
11687
11688 for {
11689 off := auxIntToInt32(v.AuxInt)
11690 sym := auxToSym(v.Aux)
11691 ptr := v_0
11692 if v_1.Op != OpAMD64MOVLQZX {
11693 break
11694 }
11695 x := v_1.Args[0]
11696 mem := v_2
11697 v.reset(OpAMD64MOVLstore)
11698 v.AuxInt = int32ToAuxInt(off)
11699 v.Aux = symToAux(sym)
11700 v.AddArg3(ptr, x, mem)
11701 return true
11702 }
11703
11704
11705
11706 for {
11707 off1 := auxIntToInt32(v.AuxInt)
11708 sym := auxToSym(v.Aux)
11709 if v_0.Op != OpAMD64ADDQconst {
11710 break
11711 }
11712 off2 := auxIntToInt32(v_0.AuxInt)
11713 ptr := v_0.Args[0]
11714 val := v_1
11715 mem := v_2
11716 if !(is32Bit(int64(off1) + int64(off2))) {
11717 break
11718 }
11719 v.reset(OpAMD64MOVLstore)
11720 v.AuxInt = int32ToAuxInt(off1 + off2)
11721 v.Aux = symToAux(sym)
11722 v.AddArg3(ptr, val, mem)
11723 return true
11724 }
11725
11726
11727 for {
11728 off := auxIntToInt32(v.AuxInt)
11729 sym := auxToSym(v.Aux)
11730 ptr := v_0
11731 if v_1.Op != OpAMD64MOVLconst {
11732 break
11733 }
11734 c := auxIntToInt32(v_1.AuxInt)
11735 mem := v_2
11736 v.reset(OpAMD64MOVLstoreconst)
11737 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11738 v.Aux = symToAux(sym)
11739 v.AddArg2(ptr, mem)
11740 return true
11741 }
11742
11743
11744 for {
11745 off := auxIntToInt32(v.AuxInt)
11746 sym := auxToSym(v.Aux)
11747 ptr := v_0
11748 if v_1.Op != OpAMD64MOVQconst {
11749 break
11750 }
11751 c := auxIntToInt64(v_1.AuxInt)
11752 mem := v_2
11753 v.reset(OpAMD64MOVLstoreconst)
11754 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11755 v.Aux = symToAux(sym)
11756 v.AddArg2(ptr, mem)
11757 return true
11758 }
11759
11760
11761
11762 for {
11763 off1 := auxIntToInt32(v.AuxInt)
11764 sym1 := auxToSym(v.Aux)
11765 if v_0.Op != OpAMD64LEAQ {
11766 break
11767 }
11768 off2 := auxIntToInt32(v_0.AuxInt)
11769 sym2 := auxToSym(v_0.Aux)
11770 base := v_0.Args[0]
11771 val := v_1
11772 mem := v_2
11773 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11774 break
11775 }
11776 v.reset(OpAMD64MOVLstore)
11777 v.AuxInt = int32ToAuxInt(off1 + off2)
11778 v.Aux = symToAux(mergeSym(sym1, sym2))
11779 v.AddArg3(base, val, mem)
11780 return true
11781 }
11782
11783
11784
11785 for {
11786 i := auxIntToInt32(v.AuxInt)
11787 s := auxToSym(v.Aux)
11788 p := v_0
11789 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
11790 break
11791 }
11792 w := v_1.Args[0]
11793 x := v_2
11794 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
11795 break
11796 }
11797 mem := x.Args[2]
11798 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11799 break
11800 }
11801 v.reset(OpAMD64MOVQstore)
11802 v.AuxInt = int32ToAuxInt(i - 4)
11803 v.Aux = symToAux(s)
11804 v.AddArg3(p, w, mem)
11805 return true
11806 }
11807
11808
11809
11810 for {
11811 i := auxIntToInt32(v.AuxInt)
11812 s := auxToSym(v.Aux)
11813 p := v_0
11814 if v_1.Op != OpAMD64SHRQconst {
11815 break
11816 }
11817 j := auxIntToInt8(v_1.AuxInt)
11818 w := v_1.Args[0]
11819 x := v_2
11820 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
11821 break
11822 }
11823 mem := x.Args[2]
11824 if p != x.Args[0] {
11825 break
11826 }
11827 w0 := x.Args[1]
11828 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11829 break
11830 }
11831 v.reset(OpAMD64MOVQstore)
11832 v.AuxInt = int32ToAuxInt(i - 4)
11833 v.Aux = symToAux(s)
11834 v.AddArg3(p, w0, mem)
11835 return true
11836 }
11837
11838
11839
11840 for {
11841 i := auxIntToInt32(v.AuxInt)
11842 s := auxToSym(v.Aux)
11843 p1 := v_0
11844 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
11845 break
11846 }
11847 w := v_1.Args[0]
11848 x := v_2
11849 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11850 break
11851 }
11852 mem := x.Args[2]
11853 p0 := x.Args[0]
11854 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
11855 break
11856 }
11857 v.reset(OpAMD64MOVQstore)
11858 v.AuxInt = int32ToAuxInt(i)
11859 v.Aux = symToAux(s)
11860 v.AddArg3(p0, w, mem)
11861 return true
11862 }
11863
11864
11865
11866 for {
11867 i := auxIntToInt32(v.AuxInt)
11868 s := auxToSym(v.Aux)
11869 p1 := v_0
11870 if v_1.Op != OpAMD64SHRQconst {
11871 break
11872 }
11873 j := auxIntToInt8(v_1.AuxInt)
11874 w := v_1.Args[0]
11875 x := v_2
11876 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11877 break
11878 }
11879 mem := x.Args[2]
11880 p0 := x.Args[0]
11881 w0 := x.Args[1]
11882 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
11883 break
11884 }
11885 v.reset(OpAMD64MOVQstore)
11886 v.AuxInt = int32ToAuxInt(i)
11887 v.Aux = symToAux(s)
11888 v.AddArg3(p0, w0, mem)
11889 return true
11890 }
11891
11892
11893
11894 for {
11895 i := auxIntToInt32(v.AuxInt)
11896 s := auxToSym(v.Aux)
11897 p := v_0
11898 x1 := v_1
11899 if x1.Op != OpAMD64MOVLload {
11900 break
11901 }
11902 j := auxIntToInt32(x1.AuxInt)
11903 s2 := auxToSym(x1.Aux)
11904 mem := x1.Args[1]
11905 p2 := x1.Args[0]
11906 mem2 := v_2
11907 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
11908 break
11909 }
11910 _ = mem2.Args[2]
11911 if p != mem2.Args[0] {
11912 break
11913 }
11914 x2 := mem2.Args[1]
11915 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
11916 break
11917 }
11918 _ = x2.Args[1]
11919 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11920 break
11921 }
11922 v.reset(OpAMD64MOVQstore)
11923 v.AuxInt = int32ToAuxInt(i - 4)
11924 v.Aux = symToAux(s)
11925 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
11926 v0.AuxInt = int32ToAuxInt(j - 4)
11927 v0.Aux = symToAux(s2)
11928 v0.AddArg2(p2, mem)
11929 v.AddArg3(p, v0, mem)
11930 return true
11931 }
11932
11933
11934
11935 for {
11936 off := auxIntToInt32(v.AuxInt)
11937 sym := auxToSym(v.Aux)
11938 ptr := v_0
11939 y := v_1
11940 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11941 break
11942 }
11943 mem := y.Args[2]
11944 x := y.Args[0]
11945 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11946 break
11947 }
11948 v.reset(OpAMD64ADDLmodify)
11949 v.AuxInt = int32ToAuxInt(off)
11950 v.Aux = symToAux(sym)
11951 v.AddArg3(ptr, x, mem)
11952 return true
11953 }
11954
11955
11956
11957 for {
11958 off := auxIntToInt32(v.AuxInt)
11959 sym := auxToSym(v.Aux)
11960 ptr := v_0
11961 y := v_1
11962 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11963 break
11964 }
11965 mem := y.Args[2]
11966 x := y.Args[0]
11967 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11968 break
11969 }
11970 v.reset(OpAMD64ANDLmodify)
11971 v.AuxInt = int32ToAuxInt(off)
11972 v.Aux = symToAux(sym)
11973 v.AddArg3(ptr, x, mem)
11974 return true
11975 }
11976
11977
11978
11979 for {
11980 off := auxIntToInt32(v.AuxInt)
11981 sym := auxToSym(v.Aux)
11982 ptr := v_0
11983 y := v_1
11984 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11985 break
11986 }
11987 mem := y.Args[2]
11988 x := y.Args[0]
11989 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11990 break
11991 }
11992 v.reset(OpAMD64ORLmodify)
11993 v.AuxInt = int32ToAuxInt(off)
11994 v.Aux = symToAux(sym)
11995 v.AddArg3(ptr, x, mem)
11996 return true
11997 }
11998
11999
12000
12001 for {
12002 off := auxIntToInt32(v.AuxInt)
12003 sym := auxToSym(v.Aux)
12004 ptr := v_0
12005 y := v_1
12006 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12007 break
12008 }
12009 mem := y.Args[2]
12010 x := y.Args[0]
12011 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12012 break
12013 }
12014 v.reset(OpAMD64XORLmodify)
12015 v.AuxInt = int32ToAuxInt(off)
12016 v.Aux = symToAux(sym)
12017 v.AddArg3(ptr, x, mem)
12018 return true
12019 }
12020
12021
12022
12023 for {
12024 off := auxIntToInt32(v.AuxInt)
12025 sym := auxToSym(v.Aux)
12026 ptr := v_0
12027 y := v_1
12028 if y.Op != OpAMD64ADDL {
12029 break
12030 }
12031 _ = y.Args[1]
12032 y_0 := y.Args[0]
12033 y_1 := y.Args[1]
12034 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12035 l := y_0
12036 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12037 continue
12038 }
12039 mem := l.Args[1]
12040 if ptr != l.Args[0] {
12041 continue
12042 }
12043 x := y_1
12044 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12045 continue
12046 }
12047 v.reset(OpAMD64ADDLmodify)
12048 v.AuxInt = int32ToAuxInt(off)
12049 v.Aux = symToAux(sym)
12050 v.AddArg3(ptr, x, mem)
12051 return true
12052 }
12053 break
12054 }
12055
12056
12057
12058 for {
12059 off := auxIntToInt32(v.AuxInt)
12060 sym := auxToSym(v.Aux)
12061 ptr := v_0
12062 y := v_1
12063 if y.Op != OpAMD64SUBL {
12064 break
12065 }
12066 x := y.Args[1]
12067 l := y.Args[0]
12068 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12069 break
12070 }
12071 mem := l.Args[1]
12072 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12073 break
12074 }
12075 v.reset(OpAMD64SUBLmodify)
12076 v.AuxInt = int32ToAuxInt(off)
12077 v.Aux = symToAux(sym)
12078 v.AddArg3(ptr, x, mem)
12079 return true
12080 }
12081
12082
12083
12084 for {
12085 off := auxIntToInt32(v.AuxInt)
12086 sym := auxToSym(v.Aux)
12087 ptr := v_0
12088 y := v_1
12089 if y.Op != OpAMD64ANDL {
12090 break
12091 }
12092 _ = y.Args[1]
12093 y_0 := y.Args[0]
12094 y_1 := y.Args[1]
12095 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12096 l := y_0
12097 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12098 continue
12099 }
12100 mem := l.Args[1]
12101 if ptr != l.Args[0] {
12102 continue
12103 }
12104 x := y_1
12105 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12106 continue
12107 }
12108 v.reset(OpAMD64ANDLmodify)
12109 v.AuxInt = int32ToAuxInt(off)
12110 v.Aux = symToAux(sym)
12111 v.AddArg3(ptr, x, mem)
12112 return true
12113 }
12114 break
12115 }
12116
12117
12118
12119 for {
12120 off := auxIntToInt32(v.AuxInt)
12121 sym := auxToSym(v.Aux)
12122 ptr := v_0
12123 y := v_1
12124 if y.Op != OpAMD64ORL {
12125 break
12126 }
12127 _ = y.Args[1]
12128 y_0 := y.Args[0]
12129 y_1 := y.Args[1]
12130 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12131 l := y_0
12132 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12133 continue
12134 }
12135 mem := l.Args[1]
12136 if ptr != l.Args[0] {
12137 continue
12138 }
12139 x := y_1
12140 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12141 continue
12142 }
12143 v.reset(OpAMD64ORLmodify)
12144 v.AuxInt = int32ToAuxInt(off)
12145 v.Aux = symToAux(sym)
12146 v.AddArg3(ptr, x, mem)
12147 return true
12148 }
12149 break
12150 }
12151
12152
12153
12154 for {
12155 off := auxIntToInt32(v.AuxInt)
12156 sym := auxToSym(v.Aux)
12157 ptr := v_0
12158 y := v_1
12159 if y.Op != OpAMD64XORL {
12160 break
12161 }
12162 _ = y.Args[1]
12163 y_0 := y.Args[0]
12164 y_1 := y.Args[1]
12165 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12166 l := y_0
12167 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12168 continue
12169 }
12170 mem := l.Args[1]
12171 if ptr != l.Args[0] {
12172 continue
12173 }
12174 x := y_1
12175 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12176 continue
12177 }
12178 v.reset(OpAMD64XORLmodify)
12179 v.AuxInt = int32ToAuxInt(off)
12180 v.Aux = symToAux(sym)
12181 v.AddArg3(ptr, x, mem)
12182 return true
12183 }
12184 break
12185 }
12186
12187
12188
12189 for {
12190 off := auxIntToInt32(v.AuxInt)
12191 sym := auxToSym(v.Aux)
12192 ptr := v_0
12193 a := v_1
12194 if a.Op != OpAMD64ADDLconst {
12195 break
12196 }
12197 c := auxIntToInt32(a.AuxInt)
12198 l := a.Args[0]
12199 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12200 break
12201 }
12202 mem := l.Args[1]
12203 ptr2 := l.Args[0]
12204 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12205 break
12206 }
12207 v.reset(OpAMD64ADDLconstmodify)
12208 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12209 v.Aux = symToAux(sym)
12210 v.AddArg2(ptr, mem)
12211 return true
12212 }
12213
12214
12215
12216 for {
12217 off := auxIntToInt32(v.AuxInt)
12218 sym := auxToSym(v.Aux)
12219 ptr := v_0
12220 a := v_1
12221 if a.Op != OpAMD64ANDLconst {
12222 break
12223 }
12224 c := auxIntToInt32(a.AuxInt)
12225 l := a.Args[0]
12226 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12227 break
12228 }
12229 mem := l.Args[1]
12230 ptr2 := l.Args[0]
12231 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12232 break
12233 }
12234 v.reset(OpAMD64ANDLconstmodify)
12235 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12236 v.Aux = symToAux(sym)
12237 v.AddArg2(ptr, mem)
12238 return true
12239 }
12240
12241
12242
12243 for {
12244 off := auxIntToInt32(v.AuxInt)
12245 sym := auxToSym(v.Aux)
12246 ptr := v_0
12247 a := v_1
12248 if a.Op != OpAMD64ORLconst {
12249 break
12250 }
12251 c := auxIntToInt32(a.AuxInt)
12252 l := a.Args[0]
12253 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12254 break
12255 }
12256 mem := l.Args[1]
12257 ptr2 := l.Args[0]
12258 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12259 break
12260 }
12261 v.reset(OpAMD64ORLconstmodify)
12262 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12263 v.Aux = symToAux(sym)
12264 v.AddArg2(ptr, mem)
12265 return true
12266 }
12267
12268
12269
12270 for {
12271 off := auxIntToInt32(v.AuxInt)
12272 sym := auxToSym(v.Aux)
12273 ptr := v_0
12274 a := v_1
12275 if a.Op != OpAMD64XORLconst {
12276 break
12277 }
12278 c := auxIntToInt32(a.AuxInt)
12279 l := a.Args[0]
12280 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12281 break
12282 }
12283 mem := l.Args[1]
12284 ptr2 := l.Args[0]
12285 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12286 break
12287 }
12288 v.reset(OpAMD64XORLconstmodify)
12289 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12290 v.Aux = symToAux(sym)
12291 v.AddArg2(ptr, mem)
12292 return true
12293 }
12294
12295
12296 for {
12297 off := auxIntToInt32(v.AuxInt)
12298 sym := auxToSym(v.Aux)
12299 ptr := v_0
12300 if v_1.Op != OpAMD64MOVLf2i {
12301 break
12302 }
12303 val := v_1.Args[0]
12304 mem := v_2
12305 v.reset(OpAMD64MOVSSstore)
12306 v.AuxInt = int32ToAuxInt(off)
12307 v.Aux = symToAux(sym)
12308 v.AddArg3(ptr, val, mem)
12309 return true
12310 }
12311
12312
12313
12314 for {
12315 i := auxIntToInt32(v.AuxInt)
12316 s := auxToSym(v.Aux)
12317 p := v_0
12318 x := v_1
12319 if x.Op != OpAMD64BSWAPL {
12320 break
12321 }
12322 w := x.Args[0]
12323 mem := v_2
12324 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12325 break
12326 }
12327 v.reset(OpAMD64MOVBELstore)
12328 v.AuxInt = int32ToAuxInt(i)
12329 v.Aux = symToAux(s)
12330 v.AddArg3(p, w, mem)
12331 return true
12332 }
12333 return false
12334 }
12335 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
12336 v_1 := v.Args[1]
12337 v_0 := v.Args[0]
12338 b := v.Block
12339 typ := &b.Func.Config.Types
12340
12341
12342
12343 for {
12344 sc := auxIntToValAndOff(v.AuxInt)
12345 s := auxToSym(v.Aux)
12346 if v_0.Op != OpAMD64ADDQconst {
12347 break
12348 }
12349 off := auxIntToInt32(v_0.AuxInt)
12350 ptr := v_0.Args[0]
12351 mem := v_1
12352 if !(ValAndOff(sc).canAdd32(off)) {
12353 break
12354 }
12355 v.reset(OpAMD64MOVLstoreconst)
12356 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12357 v.Aux = symToAux(s)
12358 v.AddArg2(ptr, mem)
12359 return true
12360 }
12361
12362
12363
12364 for {
12365 sc := auxIntToValAndOff(v.AuxInt)
12366 sym1 := auxToSym(v.Aux)
12367 if v_0.Op != OpAMD64LEAQ {
12368 break
12369 }
12370 off := auxIntToInt32(v_0.AuxInt)
12371 sym2 := auxToSym(v_0.Aux)
12372 ptr := v_0.Args[0]
12373 mem := v_1
12374 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12375 break
12376 }
12377 v.reset(OpAMD64MOVLstoreconst)
12378 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12379 v.Aux = symToAux(mergeSym(sym1, sym2))
12380 v.AddArg2(ptr, mem)
12381 return true
12382 }
12383
12384
12385
12386 for {
12387 c := auxIntToValAndOff(v.AuxInt)
12388 s := auxToSym(v.Aux)
12389 p1 := v_0
12390 x := v_1
12391 if x.Op != OpAMD64MOVLstoreconst {
12392 break
12393 }
12394 a := auxIntToValAndOff(x.AuxInt)
12395 if auxToSym(x.Aux) != s {
12396 break
12397 }
12398 mem := x.Args[1]
12399 p0 := x.Args[0]
12400 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+4-c.Off())) && clobber(x)) {
12401 break
12402 }
12403 v.reset(OpAMD64MOVQstore)
12404 v.AuxInt = int32ToAuxInt(a.Off())
12405 v.Aux = symToAux(s)
12406 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
12407 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
12408 v.AddArg3(p0, v0, mem)
12409 return true
12410 }
12411
12412
12413
12414 for {
12415 a := auxIntToValAndOff(v.AuxInt)
12416 s := auxToSym(v.Aux)
12417 p0 := v_0
12418 x := v_1
12419 if x.Op != OpAMD64MOVLstoreconst {
12420 break
12421 }
12422 c := auxIntToValAndOff(x.AuxInt)
12423 if auxToSym(x.Aux) != s {
12424 break
12425 }
12426 mem := x.Args[1]
12427 p1 := x.Args[0]
12428 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+4-c.Off())) && clobber(x)) {
12429 break
12430 }
12431 v.reset(OpAMD64MOVQstore)
12432 v.AuxInt = int32ToAuxInt(a.Off())
12433 v.Aux = symToAux(s)
12434 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
12435 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
12436 v.AddArg3(p0, v0, mem)
12437 return true
12438 }
12439 return false
12440 }
12441 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
12442 v_1 := v.Args[1]
12443 v_0 := v.Args[0]
12444
12445
12446
12447 for {
12448 off1 := auxIntToInt32(v.AuxInt)
12449 sym := auxToSym(v.Aux)
12450 if v_0.Op != OpAMD64ADDQconst {
12451 break
12452 }
12453 off2 := auxIntToInt32(v_0.AuxInt)
12454 ptr := v_0.Args[0]
12455 mem := v_1
12456 if !(is32Bit(int64(off1) + int64(off2))) {
12457 break
12458 }
12459 v.reset(OpAMD64MOVOload)
12460 v.AuxInt = int32ToAuxInt(off1 + off2)
12461 v.Aux = symToAux(sym)
12462 v.AddArg2(ptr, mem)
12463 return true
12464 }
12465
12466
12467
12468 for {
12469 off1 := auxIntToInt32(v.AuxInt)
12470 sym1 := auxToSym(v.Aux)
12471 if v_0.Op != OpAMD64LEAQ {
12472 break
12473 }
12474 off2 := auxIntToInt32(v_0.AuxInt)
12475 sym2 := auxToSym(v_0.Aux)
12476 base := v_0.Args[0]
12477 mem := v_1
12478 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12479 break
12480 }
12481 v.reset(OpAMD64MOVOload)
12482 v.AuxInt = int32ToAuxInt(off1 + off2)
12483 v.Aux = symToAux(mergeSym(sym1, sym2))
12484 v.AddArg2(base, mem)
12485 return true
12486 }
12487 return false
12488 }
12489 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
12490 v_2 := v.Args[2]
12491 v_1 := v.Args[1]
12492 v_0 := v.Args[0]
12493 b := v.Block
12494 config := b.Func.Config
12495 typ := &b.Func.Config.Types
12496
12497
12498
12499 for {
12500 off1 := auxIntToInt32(v.AuxInt)
12501 sym := auxToSym(v.Aux)
12502 if v_0.Op != OpAMD64ADDQconst {
12503 break
12504 }
12505 off2 := auxIntToInt32(v_0.AuxInt)
12506 ptr := v_0.Args[0]
12507 val := v_1
12508 mem := v_2
12509 if !(is32Bit(int64(off1) + int64(off2))) {
12510 break
12511 }
12512 v.reset(OpAMD64MOVOstore)
12513 v.AuxInt = int32ToAuxInt(off1 + off2)
12514 v.Aux = symToAux(sym)
12515 v.AddArg3(ptr, val, mem)
12516 return true
12517 }
12518
12519
12520
12521 for {
12522 off1 := auxIntToInt32(v.AuxInt)
12523 sym1 := auxToSym(v.Aux)
12524 if v_0.Op != OpAMD64LEAQ {
12525 break
12526 }
12527 off2 := auxIntToInt32(v_0.AuxInt)
12528 sym2 := auxToSym(v_0.Aux)
12529 base := v_0.Args[0]
12530 val := v_1
12531 mem := v_2
12532 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12533 break
12534 }
12535 v.reset(OpAMD64MOVOstore)
12536 v.AuxInt = int32ToAuxInt(off1 + off2)
12537 v.Aux = symToAux(mergeSym(sym1, sym2))
12538 v.AddArg3(base, val, mem)
12539 return true
12540 }
12541
12542
12543
12544 for {
12545 dstOff := auxIntToInt32(v.AuxInt)
12546 dstSym := auxToSym(v.Aux)
12547 ptr := v_0
12548 if v_1.Op != OpAMD64MOVOload {
12549 break
12550 }
12551 srcOff := auxIntToInt32(v_1.AuxInt)
12552 srcSym := auxToSym(v_1.Aux)
12553 v_1_0 := v_1.Args[0]
12554 if v_1_0.Op != OpSB {
12555 break
12556 }
12557 mem := v_2
12558 if !(symIsRO(srcSym)) {
12559 break
12560 }
12561 v.reset(OpAMD64MOVQstore)
12562 v.AuxInt = int32ToAuxInt(dstOff + 8)
12563 v.Aux = symToAux(dstSym)
12564 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
12565 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
12566 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
12567 v1.AuxInt = int32ToAuxInt(dstOff)
12568 v1.Aux = symToAux(dstSym)
12569 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
12570 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
12571 v1.AddArg3(ptr, v2, mem)
12572 v.AddArg3(ptr, v0, v1)
12573 return true
12574 }
12575 return false
12576 }
12577 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
12578 v_1 := v.Args[1]
12579 v_0 := v.Args[0]
12580
12581
12582
12583 for {
12584 sc := auxIntToValAndOff(v.AuxInt)
12585 s := auxToSym(v.Aux)
12586 if v_0.Op != OpAMD64ADDQconst {
12587 break
12588 }
12589 off := auxIntToInt32(v_0.AuxInt)
12590 ptr := v_0.Args[0]
12591 mem := v_1
12592 if !(ValAndOff(sc).canAdd32(off)) {
12593 break
12594 }
12595 v.reset(OpAMD64MOVOstoreconst)
12596 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12597 v.Aux = symToAux(s)
12598 v.AddArg2(ptr, mem)
12599 return true
12600 }
12601
12602
12603
12604 for {
12605 sc := auxIntToValAndOff(v.AuxInt)
12606 sym1 := auxToSym(v.Aux)
12607 if v_0.Op != OpAMD64LEAQ {
12608 break
12609 }
12610 off := auxIntToInt32(v_0.AuxInt)
12611 sym2 := auxToSym(v_0.Aux)
12612 ptr := v_0.Args[0]
12613 mem := v_1
12614 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12615 break
12616 }
12617 v.reset(OpAMD64MOVOstoreconst)
12618 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12619 v.Aux = symToAux(mergeSym(sym1, sym2))
12620 v.AddArg2(ptr, mem)
12621 return true
12622 }
12623 return false
12624 }
12625 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
12626 v_1 := v.Args[1]
12627 v_0 := v.Args[0]
12628
12629
12630
12631 for {
12632 off1 := auxIntToInt32(v.AuxInt)
12633 sym := auxToSym(v.Aux)
12634 if v_0.Op != OpAMD64ADDQconst {
12635 break
12636 }
12637 off2 := auxIntToInt32(v_0.AuxInt)
12638 ptr := v_0.Args[0]
12639 mem := v_1
12640 if !(is32Bit(int64(off1) + int64(off2))) {
12641 break
12642 }
12643 v.reset(OpAMD64MOVQatomicload)
12644 v.AuxInt = int32ToAuxInt(off1 + off2)
12645 v.Aux = symToAux(sym)
12646 v.AddArg2(ptr, mem)
12647 return true
12648 }
12649
12650
12651
12652 for {
12653 off1 := auxIntToInt32(v.AuxInt)
12654 sym1 := auxToSym(v.Aux)
12655 if v_0.Op != OpAMD64LEAQ {
12656 break
12657 }
12658 off2 := auxIntToInt32(v_0.AuxInt)
12659 sym2 := auxToSym(v_0.Aux)
12660 ptr := v_0.Args[0]
12661 mem := v_1
12662 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12663 break
12664 }
12665 v.reset(OpAMD64MOVQatomicload)
12666 v.AuxInt = int32ToAuxInt(off1 + off2)
12667 v.Aux = symToAux(mergeSym(sym1, sym2))
12668 v.AddArg2(ptr, mem)
12669 return true
12670 }
12671 return false
12672 }
12673 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
12674 v_0 := v.Args[0]
12675 b := v.Block
12676
12677
12678
12679 for {
12680 t := v.Type
12681 if v_0.Op != OpArg {
12682 break
12683 }
12684 u := v_0.Type
12685 off := auxIntToInt32(v_0.AuxInt)
12686 sym := auxToSym(v_0.Aux)
12687 if !(t.Size() == u.Size()) {
12688 break
12689 }
12690 b = b.Func.Entry
12691 v0 := b.NewValue0(v.Pos, OpArg, t)
12692 v.copyOf(v0)
12693 v0.AuxInt = int32ToAuxInt(off)
12694 v0.Aux = symToAux(sym)
12695 return true
12696 }
12697 return false
12698 }
12699 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
12700 v_0 := v.Args[0]
12701 b := v.Block
12702
12703
12704
12705 for {
12706 t := v.Type
12707 if v_0.Op != OpArg {
12708 break
12709 }
12710 u := v_0.Type
12711 off := auxIntToInt32(v_0.AuxInt)
12712 sym := auxToSym(v_0.Aux)
12713 if !(t.Size() == u.Size()) {
12714 break
12715 }
12716 b = b.Func.Entry
12717 v0 := b.NewValue0(v.Pos, OpArg, t)
12718 v.copyOf(v0)
12719 v0.AuxInt = int32ToAuxInt(off)
12720 v0.Aux = symToAux(sym)
12721 return true
12722 }
12723 return false
12724 }
12725 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
12726 v_1 := v.Args[1]
12727 v_0 := v.Args[0]
12728 b := v.Block
12729 config := b.Func.Config
12730
12731
12732
12733 for {
12734 off := auxIntToInt32(v.AuxInt)
12735 sym := auxToSym(v.Aux)
12736 ptr := v_0
12737 if v_1.Op != OpAMD64MOVQstore {
12738 break
12739 }
12740 off2 := auxIntToInt32(v_1.AuxInt)
12741 sym2 := auxToSym(v_1.Aux)
12742 x := v_1.Args[1]
12743 ptr2 := v_1.Args[0]
12744 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12745 break
12746 }
12747 v.copyOf(x)
12748 return true
12749 }
12750
12751
12752
12753 for {
12754 off1 := auxIntToInt32(v.AuxInt)
12755 sym := auxToSym(v.Aux)
12756 if v_0.Op != OpAMD64ADDQconst {
12757 break
12758 }
12759 off2 := auxIntToInt32(v_0.AuxInt)
12760 ptr := v_0.Args[0]
12761 mem := v_1
12762 if !(is32Bit(int64(off1) + int64(off2))) {
12763 break
12764 }
12765 v.reset(OpAMD64MOVQload)
12766 v.AuxInt = int32ToAuxInt(off1 + off2)
12767 v.Aux = symToAux(sym)
12768 v.AddArg2(ptr, mem)
12769 return true
12770 }
12771
12772
12773
12774 for {
12775 off1 := auxIntToInt32(v.AuxInt)
12776 sym1 := auxToSym(v.Aux)
12777 if v_0.Op != OpAMD64LEAQ {
12778 break
12779 }
12780 off2 := auxIntToInt32(v_0.AuxInt)
12781 sym2 := auxToSym(v_0.Aux)
12782 base := v_0.Args[0]
12783 mem := v_1
12784 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12785 break
12786 }
12787 v.reset(OpAMD64MOVQload)
12788 v.AuxInt = int32ToAuxInt(off1 + off2)
12789 v.Aux = symToAux(mergeSym(sym1, sym2))
12790 v.AddArg2(base, mem)
12791 return true
12792 }
12793
12794
12795 for {
12796 off := auxIntToInt32(v.AuxInt)
12797 sym := auxToSym(v.Aux)
12798 ptr := v_0
12799 if v_1.Op != OpAMD64MOVSDstore