1
2
3
4 package ssa
5
6 import "internal/buildcfg"
7 import "math"
8 import "cmd/internal/obj"
9 import "cmd/compile/internal/types"
10
11 func rewriteValueAMD64(v *Value) bool {
12 switch v.Op {
13 case OpAMD64ADCQ:
14 return rewriteValueAMD64_OpAMD64ADCQ(v)
15 case OpAMD64ADCQconst:
16 return rewriteValueAMD64_OpAMD64ADCQconst(v)
17 case OpAMD64ADDL:
18 return rewriteValueAMD64_OpAMD64ADDL(v)
19 case OpAMD64ADDLconst:
20 return rewriteValueAMD64_OpAMD64ADDLconst(v)
21 case OpAMD64ADDLconstmodify:
22 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
23 case OpAMD64ADDLload:
24 return rewriteValueAMD64_OpAMD64ADDLload(v)
25 case OpAMD64ADDLmodify:
26 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
27 case OpAMD64ADDQ:
28 return rewriteValueAMD64_OpAMD64ADDQ(v)
29 case OpAMD64ADDQcarry:
30 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
31 case OpAMD64ADDQconst:
32 return rewriteValueAMD64_OpAMD64ADDQconst(v)
33 case OpAMD64ADDQconstmodify:
34 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
35 case OpAMD64ADDQload:
36 return rewriteValueAMD64_OpAMD64ADDQload(v)
37 case OpAMD64ADDQmodify:
38 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
39 case OpAMD64ADDSD:
40 return rewriteValueAMD64_OpAMD64ADDSD(v)
41 case OpAMD64ADDSDload:
42 return rewriteValueAMD64_OpAMD64ADDSDload(v)
43 case OpAMD64ADDSS:
44 return rewriteValueAMD64_OpAMD64ADDSS(v)
45 case OpAMD64ADDSSload:
46 return rewriteValueAMD64_OpAMD64ADDSSload(v)
47 case OpAMD64ANDL:
48 return rewriteValueAMD64_OpAMD64ANDL(v)
49 case OpAMD64ANDLconst:
50 return rewriteValueAMD64_OpAMD64ANDLconst(v)
51 case OpAMD64ANDLconstmodify:
52 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
53 case OpAMD64ANDLload:
54 return rewriteValueAMD64_OpAMD64ANDLload(v)
55 case OpAMD64ANDLmodify:
56 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
57 case OpAMD64ANDNL:
58 return rewriteValueAMD64_OpAMD64ANDNL(v)
59 case OpAMD64ANDNQ:
60 return rewriteValueAMD64_OpAMD64ANDNQ(v)
61 case OpAMD64ANDQ:
62 return rewriteValueAMD64_OpAMD64ANDQ(v)
63 case OpAMD64ANDQconst:
64 return rewriteValueAMD64_OpAMD64ANDQconst(v)
65 case OpAMD64ANDQconstmodify:
66 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
67 case OpAMD64ANDQload:
68 return rewriteValueAMD64_OpAMD64ANDQload(v)
69 case OpAMD64ANDQmodify:
70 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
71 case OpAMD64BSFQ:
72 return rewriteValueAMD64_OpAMD64BSFQ(v)
73 case OpAMD64BSWAPL:
74 return rewriteValueAMD64_OpAMD64BSWAPL(v)
75 case OpAMD64BSWAPQ:
76 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
77 case OpAMD64BTCLconst:
78 return rewriteValueAMD64_OpAMD64BTCLconst(v)
79 case OpAMD64BTCQconst:
80 return rewriteValueAMD64_OpAMD64BTCQconst(v)
81 case OpAMD64BTLconst:
82 return rewriteValueAMD64_OpAMD64BTLconst(v)
83 case OpAMD64BTQconst:
84 return rewriteValueAMD64_OpAMD64BTQconst(v)
85 case OpAMD64BTRLconst:
86 return rewriteValueAMD64_OpAMD64BTRLconst(v)
87 case OpAMD64BTRQconst:
88 return rewriteValueAMD64_OpAMD64BTRQconst(v)
89 case OpAMD64BTSLconst:
90 return rewriteValueAMD64_OpAMD64BTSLconst(v)
91 case OpAMD64BTSQconst:
92 return rewriteValueAMD64_OpAMD64BTSQconst(v)
93 case OpAMD64CMOVLCC:
94 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
95 case OpAMD64CMOVLCS:
96 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
97 case OpAMD64CMOVLEQ:
98 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
99 case OpAMD64CMOVLGE:
100 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
101 case OpAMD64CMOVLGT:
102 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
103 case OpAMD64CMOVLHI:
104 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
105 case OpAMD64CMOVLLE:
106 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
107 case OpAMD64CMOVLLS:
108 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
109 case OpAMD64CMOVLLT:
110 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
111 case OpAMD64CMOVLNE:
112 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
113 case OpAMD64CMOVQCC:
114 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
115 case OpAMD64CMOVQCS:
116 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
117 case OpAMD64CMOVQEQ:
118 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
119 case OpAMD64CMOVQGE:
120 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
121 case OpAMD64CMOVQGT:
122 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
123 case OpAMD64CMOVQHI:
124 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
125 case OpAMD64CMOVQLE:
126 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
127 case OpAMD64CMOVQLS:
128 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
129 case OpAMD64CMOVQLT:
130 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
131 case OpAMD64CMOVQNE:
132 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
133 case OpAMD64CMOVWCC:
134 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
135 case OpAMD64CMOVWCS:
136 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
137 case OpAMD64CMOVWEQ:
138 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
139 case OpAMD64CMOVWGE:
140 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
141 case OpAMD64CMOVWGT:
142 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
143 case OpAMD64CMOVWHI:
144 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
145 case OpAMD64CMOVWLE:
146 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
147 case OpAMD64CMOVWLS:
148 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
149 case OpAMD64CMOVWLT:
150 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
151 case OpAMD64CMOVWNE:
152 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
153 case OpAMD64CMPB:
154 return rewriteValueAMD64_OpAMD64CMPB(v)
155 case OpAMD64CMPBconst:
156 return rewriteValueAMD64_OpAMD64CMPBconst(v)
157 case OpAMD64CMPBconstload:
158 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
159 case OpAMD64CMPBload:
160 return rewriteValueAMD64_OpAMD64CMPBload(v)
161 case OpAMD64CMPL:
162 return rewriteValueAMD64_OpAMD64CMPL(v)
163 case OpAMD64CMPLconst:
164 return rewriteValueAMD64_OpAMD64CMPLconst(v)
165 case OpAMD64CMPLconstload:
166 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
167 case OpAMD64CMPLload:
168 return rewriteValueAMD64_OpAMD64CMPLload(v)
169 case OpAMD64CMPQ:
170 return rewriteValueAMD64_OpAMD64CMPQ(v)
171 case OpAMD64CMPQconst:
172 return rewriteValueAMD64_OpAMD64CMPQconst(v)
173 case OpAMD64CMPQconstload:
174 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
175 case OpAMD64CMPQload:
176 return rewriteValueAMD64_OpAMD64CMPQload(v)
177 case OpAMD64CMPW:
178 return rewriteValueAMD64_OpAMD64CMPW(v)
179 case OpAMD64CMPWconst:
180 return rewriteValueAMD64_OpAMD64CMPWconst(v)
181 case OpAMD64CMPWconstload:
182 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
183 case OpAMD64CMPWload:
184 return rewriteValueAMD64_OpAMD64CMPWload(v)
185 case OpAMD64CMPXCHGLlock:
186 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
187 case OpAMD64CMPXCHGQlock:
188 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
189 case OpAMD64DIVSD:
190 return rewriteValueAMD64_OpAMD64DIVSD(v)
191 case OpAMD64DIVSDload:
192 return rewriteValueAMD64_OpAMD64DIVSDload(v)
193 case OpAMD64DIVSS:
194 return rewriteValueAMD64_OpAMD64DIVSS(v)
195 case OpAMD64DIVSSload:
196 return rewriteValueAMD64_OpAMD64DIVSSload(v)
197 case OpAMD64HMULL:
198 return rewriteValueAMD64_OpAMD64HMULL(v)
199 case OpAMD64HMULLU:
200 return rewriteValueAMD64_OpAMD64HMULLU(v)
201 case OpAMD64HMULQ:
202 return rewriteValueAMD64_OpAMD64HMULQ(v)
203 case OpAMD64HMULQU:
204 return rewriteValueAMD64_OpAMD64HMULQU(v)
205 case OpAMD64LEAL:
206 return rewriteValueAMD64_OpAMD64LEAL(v)
207 case OpAMD64LEAL1:
208 return rewriteValueAMD64_OpAMD64LEAL1(v)
209 case OpAMD64LEAL2:
210 return rewriteValueAMD64_OpAMD64LEAL2(v)
211 case OpAMD64LEAL4:
212 return rewriteValueAMD64_OpAMD64LEAL4(v)
213 case OpAMD64LEAL8:
214 return rewriteValueAMD64_OpAMD64LEAL8(v)
215 case OpAMD64LEAQ:
216 return rewriteValueAMD64_OpAMD64LEAQ(v)
217 case OpAMD64LEAQ1:
218 return rewriteValueAMD64_OpAMD64LEAQ1(v)
219 case OpAMD64LEAQ2:
220 return rewriteValueAMD64_OpAMD64LEAQ2(v)
221 case OpAMD64LEAQ4:
222 return rewriteValueAMD64_OpAMD64LEAQ4(v)
223 case OpAMD64LEAQ8:
224 return rewriteValueAMD64_OpAMD64LEAQ8(v)
225 case OpAMD64MOVBELstore:
226 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
227 case OpAMD64MOVBEQstore:
228 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
229 case OpAMD64MOVBQSX:
230 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
231 case OpAMD64MOVBQSXload:
232 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
233 case OpAMD64MOVBQZX:
234 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
235 case OpAMD64MOVBatomicload:
236 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
237 case OpAMD64MOVBload:
238 return rewriteValueAMD64_OpAMD64MOVBload(v)
239 case OpAMD64MOVBstore:
240 return rewriteValueAMD64_OpAMD64MOVBstore(v)
241 case OpAMD64MOVBstoreconst:
242 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
243 case OpAMD64MOVLQSX:
244 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
245 case OpAMD64MOVLQSXload:
246 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
247 case OpAMD64MOVLQZX:
248 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
249 case OpAMD64MOVLatomicload:
250 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
251 case OpAMD64MOVLf2i:
252 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
253 case OpAMD64MOVLi2f:
254 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
255 case OpAMD64MOVLload:
256 return rewriteValueAMD64_OpAMD64MOVLload(v)
257 case OpAMD64MOVLstore:
258 return rewriteValueAMD64_OpAMD64MOVLstore(v)
259 case OpAMD64MOVLstoreconst:
260 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
261 case OpAMD64MOVOload:
262 return rewriteValueAMD64_OpAMD64MOVOload(v)
263 case OpAMD64MOVOstore:
264 return rewriteValueAMD64_OpAMD64MOVOstore(v)
265 case OpAMD64MOVOstoreconst:
266 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
267 case OpAMD64MOVQatomicload:
268 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
269 case OpAMD64MOVQf2i:
270 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
271 case OpAMD64MOVQi2f:
272 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
273 case OpAMD64MOVQload:
274 return rewriteValueAMD64_OpAMD64MOVQload(v)
275 case OpAMD64MOVQstore:
276 return rewriteValueAMD64_OpAMD64MOVQstore(v)
277 case OpAMD64MOVQstoreconst:
278 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
279 case OpAMD64MOVSDload:
280 return rewriteValueAMD64_OpAMD64MOVSDload(v)
281 case OpAMD64MOVSDstore:
282 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
283 case OpAMD64MOVSSload:
284 return rewriteValueAMD64_OpAMD64MOVSSload(v)
285 case OpAMD64MOVSSstore:
286 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
287 case OpAMD64MOVWQSX:
288 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
289 case OpAMD64MOVWQSXload:
290 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
291 case OpAMD64MOVWQZX:
292 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
293 case OpAMD64MOVWload:
294 return rewriteValueAMD64_OpAMD64MOVWload(v)
295 case OpAMD64MOVWstore:
296 return rewriteValueAMD64_OpAMD64MOVWstore(v)
297 case OpAMD64MOVWstoreconst:
298 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
299 case OpAMD64MULL:
300 return rewriteValueAMD64_OpAMD64MULL(v)
301 case OpAMD64MULLconst:
302 return rewriteValueAMD64_OpAMD64MULLconst(v)
303 case OpAMD64MULQ:
304 return rewriteValueAMD64_OpAMD64MULQ(v)
305 case OpAMD64MULQconst:
306 return rewriteValueAMD64_OpAMD64MULQconst(v)
307 case OpAMD64MULSD:
308 return rewriteValueAMD64_OpAMD64MULSD(v)
309 case OpAMD64MULSDload:
310 return rewriteValueAMD64_OpAMD64MULSDload(v)
311 case OpAMD64MULSS:
312 return rewriteValueAMD64_OpAMD64MULSS(v)
313 case OpAMD64MULSSload:
314 return rewriteValueAMD64_OpAMD64MULSSload(v)
315 case OpAMD64NEGL:
316 return rewriteValueAMD64_OpAMD64NEGL(v)
317 case OpAMD64NEGQ:
318 return rewriteValueAMD64_OpAMD64NEGQ(v)
319 case OpAMD64NOTL:
320 return rewriteValueAMD64_OpAMD64NOTL(v)
321 case OpAMD64NOTQ:
322 return rewriteValueAMD64_OpAMD64NOTQ(v)
323 case OpAMD64ORL:
324 return rewriteValueAMD64_OpAMD64ORL(v)
325 case OpAMD64ORLconst:
326 return rewriteValueAMD64_OpAMD64ORLconst(v)
327 case OpAMD64ORLconstmodify:
328 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
329 case OpAMD64ORLload:
330 return rewriteValueAMD64_OpAMD64ORLload(v)
331 case OpAMD64ORLmodify:
332 return rewriteValueAMD64_OpAMD64ORLmodify(v)
333 case OpAMD64ORQ:
334 return rewriteValueAMD64_OpAMD64ORQ(v)
335 case OpAMD64ORQconst:
336 return rewriteValueAMD64_OpAMD64ORQconst(v)
337 case OpAMD64ORQconstmodify:
338 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
339 case OpAMD64ORQload:
340 return rewriteValueAMD64_OpAMD64ORQload(v)
341 case OpAMD64ORQmodify:
342 return rewriteValueAMD64_OpAMD64ORQmodify(v)
343 case OpAMD64ROLB:
344 return rewriteValueAMD64_OpAMD64ROLB(v)
345 case OpAMD64ROLBconst:
346 return rewriteValueAMD64_OpAMD64ROLBconst(v)
347 case OpAMD64ROLL:
348 return rewriteValueAMD64_OpAMD64ROLL(v)
349 case OpAMD64ROLLconst:
350 return rewriteValueAMD64_OpAMD64ROLLconst(v)
351 case OpAMD64ROLQ:
352 return rewriteValueAMD64_OpAMD64ROLQ(v)
353 case OpAMD64ROLQconst:
354 return rewriteValueAMD64_OpAMD64ROLQconst(v)
355 case OpAMD64ROLW:
356 return rewriteValueAMD64_OpAMD64ROLW(v)
357 case OpAMD64ROLWconst:
358 return rewriteValueAMD64_OpAMD64ROLWconst(v)
359 case OpAMD64RORB:
360 return rewriteValueAMD64_OpAMD64RORB(v)
361 case OpAMD64RORL:
362 return rewriteValueAMD64_OpAMD64RORL(v)
363 case OpAMD64RORQ:
364 return rewriteValueAMD64_OpAMD64RORQ(v)
365 case OpAMD64RORW:
366 return rewriteValueAMD64_OpAMD64RORW(v)
367 case OpAMD64SARB:
368 return rewriteValueAMD64_OpAMD64SARB(v)
369 case OpAMD64SARBconst:
370 return rewriteValueAMD64_OpAMD64SARBconst(v)
371 case OpAMD64SARL:
372 return rewriteValueAMD64_OpAMD64SARL(v)
373 case OpAMD64SARLconst:
374 return rewriteValueAMD64_OpAMD64SARLconst(v)
375 case OpAMD64SARQ:
376 return rewriteValueAMD64_OpAMD64SARQ(v)
377 case OpAMD64SARQconst:
378 return rewriteValueAMD64_OpAMD64SARQconst(v)
379 case OpAMD64SARW:
380 return rewriteValueAMD64_OpAMD64SARW(v)
381 case OpAMD64SARWconst:
382 return rewriteValueAMD64_OpAMD64SARWconst(v)
383 case OpAMD64SBBLcarrymask:
384 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
385 case OpAMD64SBBQ:
386 return rewriteValueAMD64_OpAMD64SBBQ(v)
387 case OpAMD64SBBQcarrymask:
388 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
389 case OpAMD64SBBQconst:
390 return rewriteValueAMD64_OpAMD64SBBQconst(v)
391 case OpAMD64SETA:
392 return rewriteValueAMD64_OpAMD64SETA(v)
393 case OpAMD64SETAE:
394 return rewriteValueAMD64_OpAMD64SETAE(v)
395 case OpAMD64SETAEstore:
396 return rewriteValueAMD64_OpAMD64SETAEstore(v)
397 case OpAMD64SETAstore:
398 return rewriteValueAMD64_OpAMD64SETAstore(v)
399 case OpAMD64SETB:
400 return rewriteValueAMD64_OpAMD64SETB(v)
401 case OpAMD64SETBE:
402 return rewriteValueAMD64_OpAMD64SETBE(v)
403 case OpAMD64SETBEstore:
404 return rewriteValueAMD64_OpAMD64SETBEstore(v)
405 case OpAMD64SETBstore:
406 return rewriteValueAMD64_OpAMD64SETBstore(v)
407 case OpAMD64SETEQ:
408 return rewriteValueAMD64_OpAMD64SETEQ(v)
409 case OpAMD64SETEQstore:
410 return rewriteValueAMD64_OpAMD64SETEQstore(v)
411 case OpAMD64SETG:
412 return rewriteValueAMD64_OpAMD64SETG(v)
413 case OpAMD64SETGE:
414 return rewriteValueAMD64_OpAMD64SETGE(v)
415 case OpAMD64SETGEstore:
416 return rewriteValueAMD64_OpAMD64SETGEstore(v)
417 case OpAMD64SETGstore:
418 return rewriteValueAMD64_OpAMD64SETGstore(v)
419 case OpAMD64SETL:
420 return rewriteValueAMD64_OpAMD64SETL(v)
421 case OpAMD64SETLE:
422 return rewriteValueAMD64_OpAMD64SETLE(v)
423 case OpAMD64SETLEstore:
424 return rewriteValueAMD64_OpAMD64SETLEstore(v)
425 case OpAMD64SETLstore:
426 return rewriteValueAMD64_OpAMD64SETLstore(v)
427 case OpAMD64SETNE:
428 return rewriteValueAMD64_OpAMD64SETNE(v)
429 case OpAMD64SETNEstore:
430 return rewriteValueAMD64_OpAMD64SETNEstore(v)
431 case OpAMD64SHLL:
432 return rewriteValueAMD64_OpAMD64SHLL(v)
433 case OpAMD64SHLLconst:
434 return rewriteValueAMD64_OpAMD64SHLLconst(v)
435 case OpAMD64SHLQ:
436 return rewriteValueAMD64_OpAMD64SHLQ(v)
437 case OpAMD64SHLQconst:
438 return rewriteValueAMD64_OpAMD64SHLQconst(v)
439 case OpAMD64SHRB:
440 return rewriteValueAMD64_OpAMD64SHRB(v)
441 case OpAMD64SHRBconst:
442 return rewriteValueAMD64_OpAMD64SHRBconst(v)
443 case OpAMD64SHRL:
444 return rewriteValueAMD64_OpAMD64SHRL(v)
445 case OpAMD64SHRLconst:
446 return rewriteValueAMD64_OpAMD64SHRLconst(v)
447 case OpAMD64SHRQ:
448 return rewriteValueAMD64_OpAMD64SHRQ(v)
449 case OpAMD64SHRQconst:
450 return rewriteValueAMD64_OpAMD64SHRQconst(v)
451 case OpAMD64SHRW:
452 return rewriteValueAMD64_OpAMD64SHRW(v)
453 case OpAMD64SHRWconst:
454 return rewriteValueAMD64_OpAMD64SHRWconst(v)
455 case OpAMD64SUBL:
456 return rewriteValueAMD64_OpAMD64SUBL(v)
457 case OpAMD64SUBLconst:
458 return rewriteValueAMD64_OpAMD64SUBLconst(v)
459 case OpAMD64SUBLload:
460 return rewriteValueAMD64_OpAMD64SUBLload(v)
461 case OpAMD64SUBLmodify:
462 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
463 case OpAMD64SUBQ:
464 return rewriteValueAMD64_OpAMD64SUBQ(v)
465 case OpAMD64SUBQborrow:
466 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
467 case OpAMD64SUBQconst:
468 return rewriteValueAMD64_OpAMD64SUBQconst(v)
469 case OpAMD64SUBQload:
470 return rewriteValueAMD64_OpAMD64SUBQload(v)
471 case OpAMD64SUBQmodify:
472 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
473 case OpAMD64SUBSD:
474 return rewriteValueAMD64_OpAMD64SUBSD(v)
475 case OpAMD64SUBSDload:
476 return rewriteValueAMD64_OpAMD64SUBSDload(v)
477 case OpAMD64SUBSS:
478 return rewriteValueAMD64_OpAMD64SUBSS(v)
479 case OpAMD64SUBSSload:
480 return rewriteValueAMD64_OpAMD64SUBSSload(v)
481 case OpAMD64TESTB:
482 return rewriteValueAMD64_OpAMD64TESTB(v)
483 case OpAMD64TESTBconst:
484 return rewriteValueAMD64_OpAMD64TESTBconst(v)
485 case OpAMD64TESTL:
486 return rewriteValueAMD64_OpAMD64TESTL(v)
487 case OpAMD64TESTLconst:
488 return rewriteValueAMD64_OpAMD64TESTLconst(v)
489 case OpAMD64TESTQ:
490 return rewriteValueAMD64_OpAMD64TESTQ(v)
491 case OpAMD64TESTQconst:
492 return rewriteValueAMD64_OpAMD64TESTQconst(v)
493 case OpAMD64TESTW:
494 return rewriteValueAMD64_OpAMD64TESTW(v)
495 case OpAMD64TESTWconst:
496 return rewriteValueAMD64_OpAMD64TESTWconst(v)
497 case OpAMD64XADDLlock:
498 return rewriteValueAMD64_OpAMD64XADDLlock(v)
499 case OpAMD64XADDQlock:
500 return rewriteValueAMD64_OpAMD64XADDQlock(v)
501 case OpAMD64XCHGL:
502 return rewriteValueAMD64_OpAMD64XCHGL(v)
503 case OpAMD64XCHGQ:
504 return rewriteValueAMD64_OpAMD64XCHGQ(v)
505 case OpAMD64XORL:
506 return rewriteValueAMD64_OpAMD64XORL(v)
507 case OpAMD64XORLconst:
508 return rewriteValueAMD64_OpAMD64XORLconst(v)
509 case OpAMD64XORLconstmodify:
510 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
511 case OpAMD64XORLload:
512 return rewriteValueAMD64_OpAMD64XORLload(v)
513 case OpAMD64XORLmodify:
514 return rewriteValueAMD64_OpAMD64XORLmodify(v)
515 case OpAMD64XORQ:
516 return rewriteValueAMD64_OpAMD64XORQ(v)
517 case OpAMD64XORQconst:
518 return rewriteValueAMD64_OpAMD64XORQconst(v)
519 case OpAMD64XORQconstmodify:
520 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
521 case OpAMD64XORQload:
522 return rewriteValueAMD64_OpAMD64XORQload(v)
523 case OpAMD64XORQmodify:
524 return rewriteValueAMD64_OpAMD64XORQmodify(v)
525 case OpAdd16:
526 v.Op = OpAMD64ADDL
527 return true
528 case OpAdd32:
529 v.Op = OpAMD64ADDL
530 return true
531 case OpAdd32F:
532 v.Op = OpAMD64ADDSS
533 return true
534 case OpAdd64:
535 v.Op = OpAMD64ADDQ
536 return true
537 case OpAdd64F:
538 v.Op = OpAMD64ADDSD
539 return true
540 case OpAdd8:
541 v.Op = OpAMD64ADDL
542 return true
543 case OpAddPtr:
544 v.Op = OpAMD64ADDQ
545 return true
546 case OpAddr:
547 return rewriteValueAMD64_OpAddr(v)
548 case OpAnd16:
549 v.Op = OpAMD64ANDL
550 return true
551 case OpAnd32:
552 v.Op = OpAMD64ANDL
553 return true
554 case OpAnd64:
555 v.Op = OpAMD64ANDQ
556 return true
557 case OpAnd8:
558 v.Op = OpAMD64ANDL
559 return true
560 case OpAndB:
561 v.Op = OpAMD64ANDL
562 return true
563 case OpAtomicAdd32:
564 return rewriteValueAMD64_OpAtomicAdd32(v)
565 case OpAtomicAdd64:
566 return rewriteValueAMD64_OpAtomicAdd64(v)
567 case OpAtomicAnd32:
568 return rewriteValueAMD64_OpAtomicAnd32(v)
569 case OpAtomicAnd8:
570 return rewriteValueAMD64_OpAtomicAnd8(v)
571 case OpAtomicCompareAndSwap32:
572 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
573 case OpAtomicCompareAndSwap64:
574 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
575 case OpAtomicExchange32:
576 return rewriteValueAMD64_OpAtomicExchange32(v)
577 case OpAtomicExchange64:
578 return rewriteValueAMD64_OpAtomicExchange64(v)
579 case OpAtomicLoad32:
580 return rewriteValueAMD64_OpAtomicLoad32(v)
581 case OpAtomicLoad64:
582 return rewriteValueAMD64_OpAtomicLoad64(v)
583 case OpAtomicLoad8:
584 return rewriteValueAMD64_OpAtomicLoad8(v)
585 case OpAtomicLoadPtr:
586 return rewriteValueAMD64_OpAtomicLoadPtr(v)
587 case OpAtomicOr32:
588 return rewriteValueAMD64_OpAtomicOr32(v)
589 case OpAtomicOr8:
590 return rewriteValueAMD64_OpAtomicOr8(v)
591 case OpAtomicStore32:
592 return rewriteValueAMD64_OpAtomicStore32(v)
593 case OpAtomicStore64:
594 return rewriteValueAMD64_OpAtomicStore64(v)
595 case OpAtomicStore8:
596 return rewriteValueAMD64_OpAtomicStore8(v)
597 case OpAtomicStorePtrNoWB:
598 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
599 case OpAvg64u:
600 v.Op = OpAMD64AVGQU
601 return true
602 case OpBitLen16:
603 return rewriteValueAMD64_OpBitLen16(v)
604 case OpBitLen32:
605 return rewriteValueAMD64_OpBitLen32(v)
606 case OpBitLen64:
607 return rewriteValueAMD64_OpBitLen64(v)
608 case OpBitLen8:
609 return rewriteValueAMD64_OpBitLen8(v)
610 case OpBswap32:
611 v.Op = OpAMD64BSWAPL
612 return true
613 case OpBswap64:
614 v.Op = OpAMD64BSWAPQ
615 return true
616 case OpCeil:
617 return rewriteValueAMD64_OpCeil(v)
618 case OpClosureCall:
619 v.Op = OpAMD64CALLclosure
620 return true
621 case OpCom16:
622 v.Op = OpAMD64NOTL
623 return true
624 case OpCom32:
625 v.Op = OpAMD64NOTL
626 return true
627 case OpCom64:
628 v.Op = OpAMD64NOTQ
629 return true
630 case OpCom8:
631 v.Op = OpAMD64NOTL
632 return true
633 case OpCondSelect:
634 return rewriteValueAMD64_OpCondSelect(v)
635 case OpConst16:
636 return rewriteValueAMD64_OpConst16(v)
637 case OpConst32:
638 v.Op = OpAMD64MOVLconst
639 return true
640 case OpConst32F:
641 v.Op = OpAMD64MOVSSconst
642 return true
643 case OpConst64:
644 v.Op = OpAMD64MOVQconst
645 return true
646 case OpConst64F:
647 v.Op = OpAMD64MOVSDconst
648 return true
649 case OpConst8:
650 return rewriteValueAMD64_OpConst8(v)
651 case OpConstBool:
652 return rewriteValueAMD64_OpConstBool(v)
653 case OpConstNil:
654 return rewriteValueAMD64_OpConstNil(v)
655 case OpCtz16:
656 return rewriteValueAMD64_OpCtz16(v)
657 case OpCtz16NonZero:
658 return rewriteValueAMD64_OpCtz16NonZero(v)
659 case OpCtz32:
660 return rewriteValueAMD64_OpCtz32(v)
661 case OpCtz32NonZero:
662 return rewriteValueAMD64_OpCtz32NonZero(v)
663 case OpCtz64:
664 return rewriteValueAMD64_OpCtz64(v)
665 case OpCtz64NonZero:
666 return rewriteValueAMD64_OpCtz64NonZero(v)
667 case OpCtz8:
668 return rewriteValueAMD64_OpCtz8(v)
669 case OpCtz8NonZero:
670 return rewriteValueAMD64_OpCtz8NonZero(v)
671 case OpCvt32Fto32:
672 v.Op = OpAMD64CVTTSS2SL
673 return true
674 case OpCvt32Fto64:
675 v.Op = OpAMD64CVTTSS2SQ
676 return true
677 case OpCvt32Fto64F:
678 v.Op = OpAMD64CVTSS2SD
679 return true
680 case OpCvt32to32F:
681 v.Op = OpAMD64CVTSL2SS
682 return true
683 case OpCvt32to64F:
684 v.Op = OpAMD64CVTSL2SD
685 return true
686 case OpCvt64Fto32:
687 v.Op = OpAMD64CVTTSD2SL
688 return true
689 case OpCvt64Fto32F:
690 v.Op = OpAMD64CVTSD2SS
691 return true
692 case OpCvt64Fto64:
693 v.Op = OpAMD64CVTTSD2SQ
694 return true
695 case OpCvt64to32F:
696 v.Op = OpAMD64CVTSQ2SS
697 return true
698 case OpCvt64to64F:
699 v.Op = OpAMD64CVTSQ2SD
700 return true
701 case OpCvtBoolToUint8:
702 v.Op = OpCopy
703 return true
704 case OpDiv128u:
705 v.Op = OpAMD64DIVQU2
706 return true
707 case OpDiv16:
708 return rewriteValueAMD64_OpDiv16(v)
709 case OpDiv16u:
710 return rewriteValueAMD64_OpDiv16u(v)
711 case OpDiv32:
712 return rewriteValueAMD64_OpDiv32(v)
713 case OpDiv32F:
714 v.Op = OpAMD64DIVSS
715 return true
716 case OpDiv32u:
717 return rewriteValueAMD64_OpDiv32u(v)
718 case OpDiv64:
719 return rewriteValueAMD64_OpDiv64(v)
720 case OpDiv64F:
721 v.Op = OpAMD64DIVSD
722 return true
723 case OpDiv64u:
724 return rewriteValueAMD64_OpDiv64u(v)
725 case OpDiv8:
726 return rewriteValueAMD64_OpDiv8(v)
727 case OpDiv8u:
728 return rewriteValueAMD64_OpDiv8u(v)
729 case OpEq16:
730 return rewriteValueAMD64_OpEq16(v)
731 case OpEq32:
732 return rewriteValueAMD64_OpEq32(v)
733 case OpEq32F:
734 return rewriteValueAMD64_OpEq32F(v)
735 case OpEq64:
736 return rewriteValueAMD64_OpEq64(v)
737 case OpEq64F:
738 return rewriteValueAMD64_OpEq64F(v)
739 case OpEq8:
740 return rewriteValueAMD64_OpEq8(v)
741 case OpEqB:
742 return rewriteValueAMD64_OpEqB(v)
743 case OpEqPtr:
744 return rewriteValueAMD64_OpEqPtr(v)
745 case OpFMA:
746 return rewriteValueAMD64_OpFMA(v)
747 case OpFloor:
748 return rewriteValueAMD64_OpFloor(v)
749 case OpGetCallerPC:
750 v.Op = OpAMD64LoweredGetCallerPC
751 return true
752 case OpGetCallerSP:
753 v.Op = OpAMD64LoweredGetCallerSP
754 return true
755 case OpGetClosurePtr:
756 v.Op = OpAMD64LoweredGetClosurePtr
757 return true
758 case OpGetG:
759 return rewriteValueAMD64_OpGetG(v)
760 case OpHasCPUFeature:
761 return rewriteValueAMD64_OpHasCPUFeature(v)
762 case OpHmul32:
763 v.Op = OpAMD64HMULL
764 return true
765 case OpHmul32u:
766 v.Op = OpAMD64HMULLU
767 return true
768 case OpHmul64:
769 v.Op = OpAMD64HMULQ
770 return true
771 case OpHmul64u:
772 v.Op = OpAMD64HMULQU
773 return true
774 case OpInterCall:
775 v.Op = OpAMD64CALLinter
776 return true
777 case OpIsInBounds:
778 return rewriteValueAMD64_OpIsInBounds(v)
779 case OpIsNonNil:
780 return rewriteValueAMD64_OpIsNonNil(v)
781 case OpIsSliceInBounds:
782 return rewriteValueAMD64_OpIsSliceInBounds(v)
783 case OpLeq16:
784 return rewriteValueAMD64_OpLeq16(v)
785 case OpLeq16U:
786 return rewriteValueAMD64_OpLeq16U(v)
787 case OpLeq32:
788 return rewriteValueAMD64_OpLeq32(v)
789 case OpLeq32F:
790 return rewriteValueAMD64_OpLeq32F(v)
791 case OpLeq32U:
792 return rewriteValueAMD64_OpLeq32U(v)
793 case OpLeq64:
794 return rewriteValueAMD64_OpLeq64(v)
795 case OpLeq64F:
796 return rewriteValueAMD64_OpLeq64F(v)
797 case OpLeq64U:
798 return rewriteValueAMD64_OpLeq64U(v)
799 case OpLeq8:
800 return rewriteValueAMD64_OpLeq8(v)
801 case OpLeq8U:
802 return rewriteValueAMD64_OpLeq8U(v)
803 case OpLess16:
804 return rewriteValueAMD64_OpLess16(v)
805 case OpLess16U:
806 return rewriteValueAMD64_OpLess16U(v)
807 case OpLess32:
808 return rewriteValueAMD64_OpLess32(v)
809 case OpLess32F:
810 return rewriteValueAMD64_OpLess32F(v)
811 case OpLess32U:
812 return rewriteValueAMD64_OpLess32U(v)
813 case OpLess64:
814 return rewriteValueAMD64_OpLess64(v)
815 case OpLess64F:
816 return rewriteValueAMD64_OpLess64F(v)
817 case OpLess64U:
818 return rewriteValueAMD64_OpLess64U(v)
819 case OpLess8:
820 return rewriteValueAMD64_OpLess8(v)
821 case OpLess8U:
822 return rewriteValueAMD64_OpLess8U(v)
823 case OpLoad:
824 return rewriteValueAMD64_OpLoad(v)
825 case OpLocalAddr:
826 return rewriteValueAMD64_OpLocalAddr(v)
827 case OpLsh16x16:
828 return rewriteValueAMD64_OpLsh16x16(v)
829 case OpLsh16x32:
830 return rewriteValueAMD64_OpLsh16x32(v)
831 case OpLsh16x64:
832 return rewriteValueAMD64_OpLsh16x64(v)
833 case OpLsh16x8:
834 return rewriteValueAMD64_OpLsh16x8(v)
835 case OpLsh32x16:
836 return rewriteValueAMD64_OpLsh32x16(v)
837 case OpLsh32x32:
838 return rewriteValueAMD64_OpLsh32x32(v)
839 case OpLsh32x64:
840 return rewriteValueAMD64_OpLsh32x64(v)
841 case OpLsh32x8:
842 return rewriteValueAMD64_OpLsh32x8(v)
843 case OpLsh64x16:
844 return rewriteValueAMD64_OpLsh64x16(v)
845 case OpLsh64x32:
846 return rewriteValueAMD64_OpLsh64x32(v)
847 case OpLsh64x64:
848 return rewriteValueAMD64_OpLsh64x64(v)
849 case OpLsh64x8:
850 return rewriteValueAMD64_OpLsh64x8(v)
851 case OpLsh8x16:
852 return rewriteValueAMD64_OpLsh8x16(v)
853 case OpLsh8x32:
854 return rewriteValueAMD64_OpLsh8x32(v)
855 case OpLsh8x64:
856 return rewriteValueAMD64_OpLsh8x64(v)
857 case OpLsh8x8:
858 return rewriteValueAMD64_OpLsh8x8(v)
859 case OpMod16:
860 return rewriteValueAMD64_OpMod16(v)
861 case OpMod16u:
862 return rewriteValueAMD64_OpMod16u(v)
863 case OpMod32:
864 return rewriteValueAMD64_OpMod32(v)
865 case OpMod32u:
866 return rewriteValueAMD64_OpMod32u(v)
867 case OpMod64:
868 return rewriteValueAMD64_OpMod64(v)
869 case OpMod64u:
870 return rewriteValueAMD64_OpMod64u(v)
871 case OpMod8:
872 return rewriteValueAMD64_OpMod8(v)
873 case OpMod8u:
874 return rewriteValueAMD64_OpMod8u(v)
875 case OpMove:
876 return rewriteValueAMD64_OpMove(v)
877 case OpMul16:
878 v.Op = OpAMD64MULL
879 return true
880 case OpMul32:
881 v.Op = OpAMD64MULL
882 return true
883 case OpMul32F:
884 v.Op = OpAMD64MULSS
885 return true
886 case OpMul64:
887 v.Op = OpAMD64MULQ
888 return true
889 case OpMul64F:
890 v.Op = OpAMD64MULSD
891 return true
892 case OpMul64uhilo:
893 v.Op = OpAMD64MULQU2
894 return true
895 case OpMul8:
896 v.Op = OpAMD64MULL
897 return true
898 case OpNeg16:
899 v.Op = OpAMD64NEGL
900 return true
901 case OpNeg32:
902 v.Op = OpAMD64NEGL
903 return true
904 case OpNeg32F:
905 return rewriteValueAMD64_OpNeg32F(v)
906 case OpNeg64:
907 v.Op = OpAMD64NEGQ
908 return true
909 case OpNeg64F:
910 return rewriteValueAMD64_OpNeg64F(v)
911 case OpNeg8:
912 v.Op = OpAMD64NEGL
913 return true
914 case OpNeq16:
915 return rewriteValueAMD64_OpNeq16(v)
916 case OpNeq32:
917 return rewriteValueAMD64_OpNeq32(v)
918 case OpNeq32F:
919 return rewriteValueAMD64_OpNeq32F(v)
920 case OpNeq64:
921 return rewriteValueAMD64_OpNeq64(v)
922 case OpNeq64F:
923 return rewriteValueAMD64_OpNeq64F(v)
924 case OpNeq8:
925 return rewriteValueAMD64_OpNeq8(v)
926 case OpNeqB:
927 return rewriteValueAMD64_OpNeqB(v)
928 case OpNeqPtr:
929 return rewriteValueAMD64_OpNeqPtr(v)
930 case OpNilCheck:
931 v.Op = OpAMD64LoweredNilCheck
932 return true
933 case OpNot:
934 return rewriteValueAMD64_OpNot(v)
935 case OpOffPtr:
936 return rewriteValueAMD64_OpOffPtr(v)
937 case OpOr16:
938 v.Op = OpAMD64ORL
939 return true
940 case OpOr32:
941 v.Op = OpAMD64ORL
942 return true
943 case OpOr64:
944 v.Op = OpAMD64ORQ
945 return true
946 case OpOr8:
947 v.Op = OpAMD64ORL
948 return true
949 case OpOrB:
950 v.Op = OpAMD64ORL
951 return true
952 case OpPanicBounds:
953 return rewriteValueAMD64_OpPanicBounds(v)
954 case OpPopCount16:
955 return rewriteValueAMD64_OpPopCount16(v)
956 case OpPopCount32:
957 v.Op = OpAMD64POPCNTL
958 return true
959 case OpPopCount64:
960 v.Op = OpAMD64POPCNTQ
961 return true
962 case OpPopCount8:
963 return rewriteValueAMD64_OpPopCount8(v)
964 case OpPrefetchCache:
965 v.Op = OpAMD64PrefetchT0
966 return true
967 case OpPrefetchCacheStreamed:
968 v.Op = OpAMD64PrefetchNTA
969 return true
970 case OpRotateLeft16:
971 v.Op = OpAMD64ROLW
972 return true
973 case OpRotateLeft32:
974 v.Op = OpAMD64ROLL
975 return true
976 case OpRotateLeft64:
977 v.Op = OpAMD64ROLQ
978 return true
979 case OpRotateLeft8:
980 v.Op = OpAMD64ROLB
981 return true
982 case OpRound32F:
983 v.Op = OpCopy
984 return true
985 case OpRound64F:
986 v.Op = OpCopy
987 return true
988 case OpRoundToEven:
989 return rewriteValueAMD64_OpRoundToEven(v)
990 case OpRsh16Ux16:
991 return rewriteValueAMD64_OpRsh16Ux16(v)
992 case OpRsh16Ux32:
993 return rewriteValueAMD64_OpRsh16Ux32(v)
994 case OpRsh16Ux64:
995 return rewriteValueAMD64_OpRsh16Ux64(v)
996 case OpRsh16Ux8:
997 return rewriteValueAMD64_OpRsh16Ux8(v)
998 case OpRsh16x16:
999 return rewriteValueAMD64_OpRsh16x16(v)
1000 case OpRsh16x32:
1001 return rewriteValueAMD64_OpRsh16x32(v)
1002 case OpRsh16x64:
1003 return rewriteValueAMD64_OpRsh16x64(v)
1004 case OpRsh16x8:
1005 return rewriteValueAMD64_OpRsh16x8(v)
1006 case OpRsh32Ux16:
1007 return rewriteValueAMD64_OpRsh32Ux16(v)
1008 case OpRsh32Ux32:
1009 return rewriteValueAMD64_OpRsh32Ux32(v)
1010 case OpRsh32Ux64:
1011 return rewriteValueAMD64_OpRsh32Ux64(v)
1012 case OpRsh32Ux8:
1013 return rewriteValueAMD64_OpRsh32Ux8(v)
1014 case OpRsh32x16:
1015 return rewriteValueAMD64_OpRsh32x16(v)
1016 case OpRsh32x32:
1017 return rewriteValueAMD64_OpRsh32x32(v)
1018 case OpRsh32x64:
1019 return rewriteValueAMD64_OpRsh32x64(v)
1020 case OpRsh32x8:
1021 return rewriteValueAMD64_OpRsh32x8(v)
1022 case OpRsh64Ux16:
1023 return rewriteValueAMD64_OpRsh64Ux16(v)
1024 case OpRsh64Ux32:
1025 return rewriteValueAMD64_OpRsh64Ux32(v)
1026 case OpRsh64Ux64:
1027 return rewriteValueAMD64_OpRsh64Ux64(v)
1028 case OpRsh64Ux8:
1029 return rewriteValueAMD64_OpRsh64Ux8(v)
1030 case OpRsh64x16:
1031 return rewriteValueAMD64_OpRsh64x16(v)
1032 case OpRsh64x32:
1033 return rewriteValueAMD64_OpRsh64x32(v)
1034 case OpRsh64x64:
1035 return rewriteValueAMD64_OpRsh64x64(v)
1036 case OpRsh64x8:
1037 return rewriteValueAMD64_OpRsh64x8(v)
1038 case OpRsh8Ux16:
1039 return rewriteValueAMD64_OpRsh8Ux16(v)
1040 case OpRsh8Ux32:
1041 return rewriteValueAMD64_OpRsh8Ux32(v)
1042 case OpRsh8Ux64:
1043 return rewriteValueAMD64_OpRsh8Ux64(v)
1044 case OpRsh8Ux8:
1045 return rewriteValueAMD64_OpRsh8Ux8(v)
1046 case OpRsh8x16:
1047 return rewriteValueAMD64_OpRsh8x16(v)
1048 case OpRsh8x32:
1049 return rewriteValueAMD64_OpRsh8x32(v)
1050 case OpRsh8x64:
1051 return rewriteValueAMD64_OpRsh8x64(v)
1052 case OpRsh8x8:
1053 return rewriteValueAMD64_OpRsh8x8(v)
1054 case OpSelect0:
1055 return rewriteValueAMD64_OpSelect0(v)
1056 case OpSelect1:
1057 return rewriteValueAMD64_OpSelect1(v)
1058 case OpSelectN:
1059 return rewriteValueAMD64_OpSelectN(v)
1060 case OpSignExt16to32:
1061 v.Op = OpAMD64MOVWQSX
1062 return true
1063 case OpSignExt16to64:
1064 v.Op = OpAMD64MOVWQSX
1065 return true
1066 case OpSignExt32to64:
1067 v.Op = OpAMD64MOVLQSX
1068 return true
1069 case OpSignExt8to16:
1070 v.Op = OpAMD64MOVBQSX
1071 return true
1072 case OpSignExt8to32:
1073 v.Op = OpAMD64MOVBQSX
1074 return true
1075 case OpSignExt8to64:
1076 v.Op = OpAMD64MOVBQSX
1077 return true
1078 case OpSlicemask:
1079 return rewriteValueAMD64_OpSlicemask(v)
1080 case OpSpectreIndex:
1081 return rewriteValueAMD64_OpSpectreIndex(v)
1082 case OpSpectreSliceIndex:
1083 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1084 case OpSqrt:
1085 v.Op = OpAMD64SQRTSD
1086 return true
1087 case OpSqrt32:
1088 v.Op = OpAMD64SQRTSS
1089 return true
1090 case OpStaticCall:
1091 v.Op = OpAMD64CALLstatic
1092 return true
1093 case OpStore:
1094 return rewriteValueAMD64_OpStore(v)
1095 case OpSub16:
1096 v.Op = OpAMD64SUBL
1097 return true
1098 case OpSub32:
1099 v.Op = OpAMD64SUBL
1100 return true
1101 case OpSub32F:
1102 v.Op = OpAMD64SUBSS
1103 return true
1104 case OpSub64:
1105 v.Op = OpAMD64SUBQ
1106 return true
1107 case OpSub64F:
1108 v.Op = OpAMD64SUBSD
1109 return true
1110 case OpSub8:
1111 v.Op = OpAMD64SUBL
1112 return true
1113 case OpSubPtr:
1114 v.Op = OpAMD64SUBQ
1115 return true
1116 case OpTailCall:
1117 v.Op = OpAMD64CALLtail
1118 return true
1119 case OpTrunc:
1120 return rewriteValueAMD64_OpTrunc(v)
1121 case OpTrunc16to8:
1122 v.Op = OpCopy
1123 return true
1124 case OpTrunc32to16:
1125 v.Op = OpCopy
1126 return true
1127 case OpTrunc32to8:
1128 v.Op = OpCopy
1129 return true
1130 case OpTrunc64to16:
1131 v.Op = OpCopy
1132 return true
1133 case OpTrunc64to32:
1134 v.Op = OpCopy
1135 return true
1136 case OpTrunc64to8:
1137 v.Op = OpCopy
1138 return true
1139 case OpWB:
1140 v.Op = OpAMD64LoweredWB
1141 return true
1142 case OpXor16:
1143 v.Op = OpAMD64XORL
1144 return true
1145 case OpXor32:
1146 v.Op = OpAMD64XORL
1147 return true
1148 case OpXor64:
1149 v.Op = OpAMD64XORQ
1150 return true
1151 case OpXor8:
1152 v.Op = OpAMD64XORL
1153 return true
1154 case OpZero:
1155 return rewriteValueAMD64_OpZero(v)
1156 case OpZeroExt16to32:
1157 v.Op = OpAMD64MOVWQZX
1158 return true
1159 case OpZeroExt16to64:
1160 v.Op = OpAMD64MOVWQZX
1161 return true
1162 case OpZeroExt32to64:
1163 v.Op = OpAMD64MOVLQZX
1164 return true
1165 case OpZeroExt8to16:
1166 v.Op = OpAMD64MOVBQZX
1167 return true
1168 case OpZeroExt8to32:
1169 v.Op = OpAMD64MOVBQZX
1170 return true
1171 case OpZeroExt8to64:
1172 v.Op = OpAMD64MOVBQZX
1173 return true
1174 }
1175 return false
1176 }
1177 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1178 v_2 := v.Args[2]
1179 v_1 := v.Args[1]
1180 v_0 := v.Args[0]
1181
1182
1183
1184 for {
1185 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1186 x := v_0
1187 if v_1.Op != OpAMD64MOVQconst {
1188 continue
1189 }
1190 c := auxIntToInt64(v_1.AuxInt)
1191 carry := v_2
1192 if !(is32Bit(c)) {
1193 continue
1194 }
1195 v.reset(OpAMD64ADCQconst)
1196 v.AuxInt = int32ToAuxInt(int32(c))
1197 v.AddArg2(x, carry)
1198 return true
1199 }
1200 break
1201 }
1202
1203
1204 for {
1205 x := v_0
1206 y := v_1
1207 if v_2.Op != OpAMD64FlagEQ {
1208 break
1209 }
1210 v.reset(OpAMD64ADDQcarry)
1211 v.AddArg2(x, y)
1212 return true
1213 }
1214 return false
1215 }
1216 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1217 v_1 := v.Args[1]
1218 v_0 := v.Args[0]
1219
1220
1221 for {
1222 c := auxIntToInt32(v.AuxInt)
1223 x := v_0
1224 if v_1.Op != OpAMD64FlagEQ {
1225 break
1226 }
1227 v.reset(OpAMD64ADDQconstcarry)
1228 v.AuxInt = int32ToAuxInt(c)
1229 v.AddArg(x)
1230 return true
1231 }
1232 return false
1233 }
1234 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1235 v_1 := v.Args[1]
1236 v_0 := v.Args[0]
1237
1238
1239 for {
1240 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1241 x := v_0
1242 if v_1.Op != OpAMD64MOVLconst {
1243 continue
1244 }
1245 c := auxIntToInt32(v_1.AuxInt)
1246 v.reset(OpAMD64ADDLconst)
1247 v.AuxInt = int32ToAuxInt(c)
1248 v.AddArg(x)
1249 return true
1250 }
1251 break
1252 }
1253
1254
1255
1256 for {
1257 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1258 if v_0.Op != OpAMD64SHLLconst {
1259 continue
1260 }
1261 c := auxIntToInt8(v_0.AuxInt)
1262 x := v_0.Args[0]
1263 if v_1.Op != OpAMD64SHRLconst {
1264 continue
1265 }
1266 d := auxIntToInt8(v_1.AuxInt)
1267 if x != v_1.Args[0] || !(d == 32-c) {
1268 continue
1269 }
1270 v.reset(OpAMD64ROLLconst)
1271 v.AuxInt = int8ToAuxInt(c)
1272 v.AddArg(x)
1273 return true
1274 }
1275 break
1276 }
1277
1278
1279
1280 for {
1281 t := v.Type
1282 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1283 if v_0.Op != OpAMD64SHLLconst {
1284 continue
1285 }
1286 c := auxIntToInt8(v_0.AuxInt)
1287 x := v_0.Args[0]
1288 if v_1.Op != OpAMD64SHRWconst {
1289 continue
1290 }
1291 d := auxIntToInt8(v_1.AuxInt)
1292 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
1293 continue
1294 }
1295 v.reset(OpAMD64ROLWconst)
1296 v.AuxInt = int8ToAuxInt(c)
1297 v.AddArg(x)
1298 return true
1299 }
1300 break
1301 }
1302
1303
1304
1305 for {
1306 t := v.Type
1307 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1308 if v_0.Op != OpAMD64SHLLconst {
1309 continue
1310 }
1311 c := auxIntToInt8(v_0.AuxInt)
1312 x := v_0.Args[0]
1313 if v_1.Op != OpAMD64SHRBconst {
1314 continue
1315 }
1316 d := auxIntToInt8(v_1.AuxInt)
1317 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
1318 continue
1319 }
1320 v.reset(OpAMD64ROLBconst)
1321 v.AuxInt = int8ToAuxInt(c)
1322 v.AddArg(x)
1323 return true
1324 }
1325 break
1326 }
1327
1328
1329 for {
1330 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1331 x := v_0
1332 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1333 continue
1334 }
1335 y := v_1.Args[0]
1336 v.reset(OpAMD64LEAL8)
1337 v.AddArg2(x, y)
1338 return true
1339 }
1340 break
1341 }
1342
1343
1344 for {
1345 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1346 x := v_0
1347 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1348 continue
1349 }
1350 y := v_1.Args[0]
1351 v.reset(OpAMD64LEAL4)
1352 v.AddArg2(x, y)
1353 return true
1354 }
1355 break
1356 }
1357
1358
1359 for {
1360 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1361 x := v_0
1362 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1363 continue
1364 }
1365 y := v_1.Args[0]
1366 v.reset(OpAMD64LEAL2)
1367 v.AddArg2(x, y)
1368 return true
1369 }
1370 break
1371 }
1372
1373
1374 for {
1375 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1376 x := v_0
1377 if v_1.Op != OpAMD64ADDL {
1378 continue
1379 }
1380 y := v_1.Args[1]
1381 if y != v_1.Args[0] {
1382 continue
1383 }
1384 v.reset(OpAMD64LEAL2)
1385 v.AddArg2(x, y)
1386 return true
1387 }
1388 break
1389 }
1390
1391
1392 for {
1393 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1394 x := v_0
1395 if v_1.Op != OpAMD64ADDL {
1396 continue
1397 }
1398 _ = v_1.Args[1]
1399 v_1_0 := v_1.Args[0]
1400 v_1_1 := v_1.Args[1]
1401 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1402 if x != v_1_0 {
1403 continue
1404 }
1405 y := v_1_1
1406 v.reset(OpAMD64LEAL2)
1407 v.AddArg2(y, x)
1408 return true
1409 }
1410 }
1411 break
1412 }
1413
1414
1415 for {
1416 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1417 if v_0.Op != OpAMD64ADDLconst {
1418 continue
1419 }
1420 c := auxIntToInt32(v_0.AuxInt)
1421 x := v_0.Args[0]
1422 y := v_1
1423 v.reset(OpAMD64LEAL1)
1424 v.AuxInt = int32ToAuxInt(c)
1425 v.AddArg2(x, y)
1426 return true
1427 }
1428 break
1429 }
1430
1431
1432
1433 for {
1434 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1435 x := v_0
1436 if v_1.Op != OpAMD64LEAL {
1437 continue
1438 }
1439 c := auxIntToInt32(v_1.AuxInt)
1440 s := auxToSym(v_1.Aux)
1441 y := v_1.Args[0]
1442 if !(x.Op != OpSB && y.Op != OpSB) {
1443 continue
1444 }
1445 v.reset(OpAMD64LEAL1)
1446 v.AuxInt = int32ToAuxInt(c)
1447 v.Aux = symToAux(s)
1448 v.AddArg2(x, y)
1449 return true
1450 }
1451 break
1452 }
1453
1454
1455 for {
1456 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1457 x := v_0
1458 if v_1.Op != OpAMD64NEGL {
1459 continue
1460 }
1461 y := v_1.Args[0]
1462 v.reset(OpAMD64SUBL)
1463 v.AddArg2(x, y)
1464 return true
1465 }
1466 break
1467 }
1468
1469
1470
1471 for {
1472 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1473 x := v_0
1474 l := v_1
1475 if l.Op != OpAMD64MOVLload {
1476 continue
1477 }
1478 off := auxIntToInt32(l.AuxInt)
1479 sym := auxToSym(l.Aux)
1480 mem := l.Args[1]
1481 ptr := l.Args[0]
1482 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1483 continue
1484 }
1485 v.reset(OpAMD64ADDLload)
1486 v.AuxInt = int32ToAuxInt(off)
1487 v.Aux = symToAux(sym)
1488 v.AddArg3(x, ptr, mem)
1489 return true
1490 }
1491 break
1492 }
1493 return false
1494 }
1495 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1496 v_0 := v.Args[0]
1497
1498
1499 for {
1500 c := auxIntToInt32(v.AuxInt)
1501 if v_0.Op != OpAMD64ADDL {
1502 break
1503 }
1504 y := v_0.Args[1]
1505 x := v_0.Args[0]
1506 v.reset(OpAMD64LEAL1)
1507 v.AuxInt = int32ToAuxInt(c)
1508 v.AddArg2(x, y)
1509 return true
1510 }
1511
1512
1513 for {
1514 c := auxIntToInt32(v.AuxInt)
1515 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1516 break
1517 }
1518 x := v_0.Args[0]
1519 v.reset(OpAMD64LEAL1)
1520 v.AuxInt = int32ToAuxInt(c)
1521 v.AddArg2(x, x)
1522 return true
1523 }
1524
1525
1526
1527 for {
1528 c := auxIntToInt32(v.AuxInt)
1529 if v_0.Op != OpAMD64LEAL {
1530 break
1531 }
1532 d := auxIntToInt32(v_0.AuxInt)
1533 s := auxToSym(v_0.Aux)
1534 x := v_0.Args[0]
1535 if !(is32Bit(int64(c) + int64(d))) {
1536 break
1537 }
1538 v.reset(OpAMD64LEAL)
1539 v.AuxInt = int32ToAuxInt(c + d)
1540 v.Aux = symToAux(s)
1541 v.AddArg(x)
1542 return true
1543 }
1544
1545
1546
1547 for {
1548 c := auxIntToInt32(v.AuxInt)
1549 if v_0.Op != OpAMD64LEAL1 {
1550 break
1551 }
1552 d := auxIntToInt32(v_0.AuxInt)
1553 s := auxToSym(v_0.Aux)
1554 y := v_0.Args[1]
1555 x := v_0.Args[0]
1556 if !(is32Bit(int64(c) + int64(d))) {
1557 break
1558 }
1559 v.reset(OpAMD64LEAL1)
1560 v.AuxInt = int32ToAuxInt(c + d)
1561 v.Aux = symToAux(s)
1562 v.AddArg2(x, y)
1563 return true
1564 }
1565
1566
1567
1568 for {
1569 c := auxIntToInt32(v.AuxInt)
1570 if v_0.Op != OpAMD64LEAL2 {
1571 break
1572 }
1573 d := auxIntToInt32(v_0.AuxInt)
1574 s := auxToSym(v_0.Aux)
1575 y := v_0.Args[1]
1576 x := v_0.Args[0]
1577 if !(is32Bit(int64(c) + int64(d))) {
1578 break
1579 }
1580 v.reset(OpAMD64LEAL2)
1581 v.AuxInt = int32ToAuxInt(c + d)
1582 v.Aux = symToAux(s)
1583 v.AddArg2(x, y)
1584 return true
1585 }
1586
1587
1588
1589 for {
1590 c := auxIntToInt32(v.AuxInt)
1591 if v_0.Op != OpAMD64LEAL4 {
1592 break
1593 }
1594 d := auxIntToInt32(v_0.AuxInt)
1595 s := auxToSym(v_0.Aux)
1596 y := v_0.Args[1]
1597 x := v_0.Args[0]
1598 if !(is32Bit(int64(c) + int64(d))) {
1599 break
1600 }
1601 v.reset(OpAMD64LEAL4)
1602 v.AuxInt = int32ToAuxInt(c + d)
1603 v.Aux = symToAux(s)
1604 v.AddArg2(x, y)
1605 return true
1606 }
1607
1608
1609
1610 for {
1611 c := auxIntToInt32(v.AuxInt)
1612 if v_0.Op != OpAMD64LEAL8 {
1613 break
1614 }
1615 d := auxIntToInt32(v_0.AuxInt)
1616 s := auxToSym(v_0.Aux)
1617 y := v_0.Args[1]
1618 x := v_0.Args[0]
1619 if !(is32Bit(int64(c) + int64(d))) {
1620 break
1621 }
1622 v.reset(OpAMD64LEAL8)
1623 v.AuxInt = int32ToAuxInt(c + d)
1624 v.Aux = symToAux(s)
1625 v.AddArg2(x, y)
1626 return true
1627 }
1628
1629
1630
1631 for {
1632 c := auxIntToInt32(v.AuxInt)
1633 x := v_0
1634 if !(c == 0) {
1635 break
1636 }
1637 v.copyOf(x)
1638 return true
1639 }
1640
1641
1642 for {
1643 c := auxIntToInt32(v.AuxInt)
1644 if v_0.Op != OpAMD64MOVLconst {
1645 break
1646 }
1647 d := auxIntToInt32(v_0.AuxInt)
1648 v.reset(OpAMD64MOVLconst)
1649 v.AuxInt = int32ToAuxInt(c + d)
1650 return true
1651 }
1652
1653
1654 for {
1655 c := auxIntToInt32(v.AuxInt)
1656 if v_0.Op != OpAMD64ADDLconst {
1657 break
1658 }
1659 d := auxIntToInt32(v_0.AuxInt)
1660 x := v_0.Args[0]
1661 v.reset(OpAMD64ADDLconst)
1662 v.AuxInt = int32ToAuxInt(c + d)
1663 v.AddArg(x)
1664 return true
1665 }
1666
1667
1668 for {
1669 off := auxIntToInt32(v.AuxInt)
1670 x := v_0
1671 if x.Op != OpSP {
1672 break
1673 }
1674 v.reset(OpAMD64LEAL)
1675 v.AuxInt = int32ToAuxInt(off)
1676 v.AddArg(x)
1677 return true
1678 }
1679 return false
1680 }
1681 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1682 v_1 := v.Args[1]
1683 v_0 := v.Args[0]
1684
1685
1686
1687 for {
1688 valoff1 := auxIntToValAndOff(v.AuxInt)
1689 sym := auxToSym(v.Aux)
1690 if v_0.Op != OpAMD64ADDQconst {
1691 break
1692 }
1693 off2 := auxIntToInt32(v_0.AuxInt)
1694 base := v_0.Args[0]
1695 mem := v_1
1696 if !(ValAndOff(valoff1).canAdd32(off2)) {
1697 break
1698 }
1699 v.reset(OpAMD64ADDLconstmodify)
1700 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1701 v.Aux = symToAux(sym)
1702 v.AddArg2(base, mem)
1703 return true
1704 }
1705
1706
1707
1708 for {
1709 valoff1 := auxIntToValAndOff(v.AuxInt)
1710 sym1 := auxToSym(v.Aux)
1711 if v_0.Op != OpAMD64LEAQ {
1712 break
1713 }
1714 off2 := auxIntToInt32(v_0.AuxInt)
1715 sym2 := auxToSym(v_0.Aux)
1716 base := v_0.Args[0]
1717 mem := v_1
1718 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1719 break
1720 }
1721 v.reset(OpAMD64ADDLconstmodify)
1722 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1723 v.Aux = symToAux(mergeSym(sym1, sym2))
1724 v.AddArg2(base, mem)
1725 return true
1726 }
1727 return false
1728 }
1729 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1730 v_2 := v.Args[2]
1731 v_1 := v.Args[1]
1732 v_0 := v.Args[0]
1733 b := v.Block
1734 typ := &b.Func.Config.Types
1735
1736
1737
1738 for {
1739 off1 := auxIntToInt32(v.AuxInt)
1740 sym := auxToSym(v.Aux)
1741 val := v_0
1742 if v_1.Op != OpAMD64ADDQconst {
1743 break
1744 }
1745 off2 := auxIntToInt32(v_1.AuxInt)
1746 base := v_1.Args[0]
1747 mem := v_2
1748 if !(is32Bit(int64(off1) + int64(off2))) {
1749 break
1750 }
1751 v.reset(OpAMD64ADDLload)
1752 v.AuxInt = int32ToAuxInt(off1 + off2)
1753 v.Aux = symToAux(sym)
1754 v.AddArg3(val, base, mem)
1755 return true
1756 }
1757
1758
1759
1760 for {
1761 off1 := auxIntToInt32(v.AuxInt)
1762 sym1 := auxToSym(v.Aux)
1763 val := v_0
1764 if v_1.Op != OpAMD64LEAQ {
1765 break
1766 }
1767 off2 := auxIntToInt32(v_1.AuxInt)
1768 sym2 := auxToSym(v_1.Aux)
1769 base := v_1.Args[0]
1770 mem := v_2
1771 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1772 break
1773 }
1774 v.reset(OpAMD64ADDLload)
1775 v.AuxInt = int32ToAuxInt(off1 + off2)
1776 v.Aux = symToAux(mergeSym(sym1, sym2))
1777 v.AddArg3(val, base, mem)
1778 return true
1779 }
1780
1781
1782 for {
1783 off := auxIntToInt32(v.AuxInt)
1784 sym := auxToSym(v.Aux)
1785 x := v_0
1786 ptr := v_1
1787 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1788 break
1789 }
1790 y := v_2.Args[1]
1791 if ptr != v_2.Args[0] {
1792 break
1793 }
1794 v.reset(OpAMD64ADDL)
1795 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1796 v0.AddArg(y)
1797 v.AddArg2(x, v0)
1798 return true
1799 }
1800 return false
1801 }
1802 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1803 v_2 := v.Args[2]
1804 v_1 := v.Args[1]
1805 v_0 := v.Args[0]
1806
1807
1808
1809 for {
1810 off1 := auxIntToInt32(v.AuxInt)
1811 sym := auxToSym(v.Aux)
1812 if v_0.Op != OpAMD64ADDQconst {
1813 break
1814 }
1815 off2 := auxIntToInt32(v_0.AuxInt)
1816 base := v_0.Args[0]
1817 val := v_1
1818 mem := v_2
1819 if !(is32Bit(int64(off1) + int64(off2))) {
1820 break
1821 }
1822 v.reset(OpAMD64ADDLmodify)
1823 v.AuxInt = int32ToAuxInt(off1 + off2)
1824 v.Aux = symToAux(sym)
1825 v.AddArg3(base, val, mem)
1826 return true
1827 }
1828
1829
1830
1831 for {
1832 off1 := auxIntToInt32(v.AuxInt)
1833 sym1 := auxToSym(v.Aux)
1834 if v_0.Op != OpAMD64LEAQ {
1835 break
1836 }
1837 off2 := auxIntToInt32(v_0.AuxInt)
1838 sym2 := auxToSym(v_0.Aux)
1839 base := v_0.Args[0]
1840 val := v_1
1841 mem := v_2
1842 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1843 break
1844 }
1845 v.reset(OpAMD64ADDLmodify)
1846 v.AuxInt = int32ToAuxInt(off1 + off2)
1847 v.Aux = symToAux(mergeSym(sym1, sym2))
1848 v.AddArg3(base, val, mem)
1849 return true
1850 }
1851 return false
1852 }
1853 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1854 v_1 := v.Args[1]
1855 v_0 := v.Args[0]
1856
1857
1858
1859 for {
1860 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1861 x := v_0
1862 if v_1.Op != OpAMD64MOVQconst {
1863 continue
1864 }
1865 c := auxIntToInt64(v_1.AuxInt)
1866 if !(is32Bit(c)) {
1867 continue
1868 }
1869 v.reset(OpAMD64ADDQconst)
1870 v.AuxInt = int32ToAuxInt(int32(c))
1871 v.AddArg(x)
1872 return true
1873 }
1874 break
1875 }
1876
1877
1878 for {
1879 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1880 x := v_0
1881 if v_1.Op != OpAMD64MOVLconst {
1882 continue
1883 }
1884 c := auxIntToInt32(v_1.AuxInt)
1885 v.reset(OpAMD64ADDQconst)
1886 v.AuxInt = int32ToAuxInt(c)
1887 v.AddArg(x)
1888 return true
1889 }
1890 break
1891 }
1892
1893
1894
1895 for {
1896 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1897 if v_0.Op != OpAMD64SHLQconst {
1898 continue
1899 }
1900 c := auxIntToInt8(v_0.AuxInt)
1901 x := v_0.Args[0]
1902 if v_1.Op != OpAMD64SHRQconst {
1903 continue
1904 }
1905 d := auxIntToInt8(v_1.AuxInt)
1906 if x != v_1.Args[0] || !(d == 64-c) {
1907 continue
1908 }
1909 v.reset(OpAMD64ROLQconst)
1910 v.AuxInt = int8ToAuxInt(c)
1911 v.AddArg(x)
1912 return true
1913 }
1914 break
1915 }
1916
1917
1918 for {
1919 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1920 x := v_0
1921 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1922 continue
1923 }
1924 y := v_1.Args[0]
1925 v.reset(OpAMD64LEAQ8)
1926 v.AddArg2(x, y)
1927 return true
1928 }
1929 break
1930 }
1931
1932
1933 for {
1934 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1935 x := v_0
1936 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1937 continue
1938 }
1939 y := v_1.Args[0]
1940 v.reset(OpAMD64LEAQ4)
1941 v.AddArg2(x, y)
1942 return true
1943 }
1944 break
1945 }
1946
1947
1948 for {
1949 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1950 x := v_0
1951 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1952 continue
1953 }
1954 y := v_1.Args[0]
1955 v.reset(OpAMD64LEAQ2)
1956 v.AddArg2(x, y)
1957 return true
1958 }
1959 break
1960 }
1961
1962
1963 for {
1964 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1965 x := v_0
1966 if v_1.Op != OpAMD64ADDQ {
1967 continue
1968 }
1969 y := v_1.Args[1]
1970 if y != v_1.Args[0] {
1971 continue
1972 }
1973 v.reset(OpAMD64LEAQ2)
1974 v.AddArg2(x, y)
1975 return true
1976 }
1977 break
1978 }
1979
1980
1981 for {
1982 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1983 x := v_0
1984 if v_1.Op != OpAMD64ADDQ {
1985 continue
1986 }
1987 _ = v_1.Args[1]
1988 v_1_0 := v_1.Args[0]
1989 v_1_1 := v_1.Args[1]
1990 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1991 if x != v_1_0 {
1992 continue
1993 }
1994 y := v_1_1
1995 v.reset(OpAMD64LEAQ2)
1996 v.AddArg2(y, x)
1997 return true
1998 }
1999 }
2000 break
2001 }
2002
2003
2004 for {
2005 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2006 if v_0.Op != OpAMD64ADDQconst {
2007 continue
2008 }
2009 c := auxIntToInt32(v_0.AuxInt)
2010 x := v_0.Args[0]
2011 y := v_1
2012 v.reset(OpAMD64LEAQ1)
2013 v.AuxInt = int32ToAuxInt(c)
2014 v.AddArg2(x, y)
2015 return true
2016 }
2017 break
2018 }
2019
2020
2021
2022 for {
2023 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2024 x := v_0
2025 if v_1.Op != OpAMD64LEAQ {
2026 continue
2027 }
2028 c := auxIntToInt32(v_1.AuxInt)
2029 s := auxToSym(v_1.Aux)
2030 y := v_1.Args[0]
2031 if !(x.Op != OpSB && y.Op != OpSB) {
2032 continue
2033 }
2034 v.reset(OpAMD64LEAQ1)
2035 v.AuxInt = int32ToAuxInt(c)
2036 v.Aux = symToAux(s)
2037 v.AddArg2(x, y)
2038 return true
2039 }
2040 break
2041 }
2042
2043
2044 for {
2045 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2046 x := v_0
2047 if v_1.Op != OpAMD64NEGQ {
2048 continue
2049 }
2050 y := v_1.Args[0]
2051 v.reset(OpAMD64SUBQ)
2052 v.AddArg2(x, y)
2053 return true
2054 }
2055 break
2056 }
2057
2058
2059
2060 for {
2061 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2062 x := v_0
2063 l := v_1
2064 if l.Op != OpAMD64MOVQload {
2065 continue
2066 }
2067 off := auxIntToInt32(l.AuxInt)
2068 sym := auxToSym(l.Aux)
2069 mem := l.Args[1]
2070 ptr := l.Args[0]
2071 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2072 continue
2073 }
2074 v.reset(OpAMD64ADDQload)
2075 v.AuxInt = int32ToAuxInt(off)
2076 v.Aux = symToAux(sym)
2077 v.AddArg3(x, ptr, mem)
2078 return true
2079 }
2080 break
2081 }
2082 return false
2083 }
2084 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2085 v_1 := v.Args[1]
2086 v_0 := v.Args[0]
2087
2088
2089
2090 for {
2091 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2092 x := v_0
2093 if v_1.Op != OpAMD64MOVQconst {
2094 continue
2095 }
2096 c := auxIntToInt64(v_1.AuxInt)
2097 if !(is32Bit(c)) {
2098 continue
2099 }
2100 v.reset(OpAMD64ADDQconstcarry)
2101 v.AuxInt = int32ToAuxInt(int32(c))
2102 v.AddArg(x)
2103 return true
2104 }
2105 break
2106 }
2107 return false
2108 }
2109 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2110 v_0 := v.Args[0]
2111
2112
2113 for {
2114 c := auxIntToInt32(v.AuxInt)
2115 if v_0.Op != OpAMD64ADDQ {
2116 break
2117 }
2118 y := v_0.Args[1]
2119 x := v_0.Args[0]
2120 v.reset(OpAMD64LEAQ1)
2121 v.AuxInt = int32ToAuxInt(c)
2122 v.AddArg2(x, y)
2123 return true
2124 }
2125
2126
2127 for {
2128 c := auxIntToInt32(v.AuxInt)
2129 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2130 break
2131 }
2132 x := v_0.Args[0]
2133 v.reset(OpAMD64LEAQ1)
2134 v.AuxInt = int32ToAuxInt(c)
2135 v.AddArg2(x, x)
2136 return true
2137 }
2138
2139
2140
2141 for {
2142 c := auxIntToInt32(v.AuxInt)
2143 if v_0.Op != OpAMD64LEAQ {
2144 break
2145 }
2146 d := auxIntToInt32(v_0.AuxInt)
2147 s := auxToSym(v_0.Aux)
2148 x := v_0.Args[0]
2149 if !(is32Bit(int64(c) + int64(d))) {
2150 break
2151 }
2152 v.reset(OpAMD64LEAQ)
2153 v.AuxInt = int32ToAuxInt(c + d)
2154 v.Aux = symToAux(s)
2155 v.AddArg(x)
2156 return true
2157 }
2158
2159
2160
2161 for {
2162 c := auxIntToInt32(v.AuxInt)
2163 if v_0.Op != OpAMD64LEAQ1 {
2164 break
2165 }
2166 d := auxIntToInt32(v_0.AuxInt)
2167 s := auxToSym(v_0.Aux)
2168 y := v_0.Args[1]
2169 x := v_0.Args[0]
2170 if !(is32Bit(int64(c) + int64(d))) {
2171 break
2172 }
2173 v.reset(OpAMD64LEAQ1)
2174 v.AuxInt = int32ToAuxInt(c + d)
2175 v.Aux = symToAux(s)
2176 v.AddArg2(x, y)
2177 return true
2178 }
2179
2180
2181
2182 for {
2183 c := auxIntToInt32(v.AuxInt)
2184 if v_0.Op != OpAMD64LEAQ2 {
2185 break
2186 }
2187 d := auxIntToInt32(v_0.AuxInt)
2188 s := auxToSym(v_0.Aux)
2189 y := v_0.Args[1]
2190 x := v_0.Args[0]
2191 if !(is32Bit(int64(c) + int64(d))) {
2192 break
2193 }
2194 v.reset(OpAMD64LEAQ2)
2195 v.AuxInt = int32ToAuxInt(c + d)
2196 v.Aux = symToAux(s)
2197 v.AddArg2(x, y)
2198 return true
2199 }
2200
2201
2202
2203 for {
2204 c := auxIntToInt32(v.AuxInt)
2205 if v_0.Op != OpAMD64LEAQ4 {
2206 break
2207 }
2208 d := auxIntToInt32(v_0.AuxInt)
2209 s := auxToSym(v_0.Aux)
2210 y := v_0.Args[1]
2211 x := v_0.Args[0]
2212 if !(is32Bit(int64(c) + int64(d))) {
2213 break
2214 }
2215 v.reset(OpAMD64LEAQ4)
2216 v.AuxInt = int32ToAuxInt(c + d)
2217 v.Aux = symToAux(s)
2218 v.AddArg2(x, y)
2219 return true
2220 }
2221
2222
2223
2224 for {
2225 c := auxIntToInt32(v.AuxInt)
2226 if v_0.Op != OpAMD64LEAQ8 {
2227 break
2228 }
2229 d := auxIntToInt32(v_0.AuxInt)
2230 s := auxToSym(v_0.Aux)
2231 y := v_0.Args[1]
2232 x := v_0.Args[0]
2233 if !(is32Bit(int64(c) + int64(d))) {
2234 break
2235 }
2236 v.reset(OpAMD64LEAQ8)
2237 v.AuxInt = int32ToAuxInt(c + d)
2238 v.Aux = symToAux(s)
2239 v.AddArg2(x, y)
2240 return true
2241 }
2242
2243
2244 for {
2245 if auxIntToInt32(v.AuxInt) != 0 {
2246 break
2247 }
2248 x := v_0
2249 v.copyOf(x)
2250 return true
2251 }
2252
2253
2254 for {
2255 c := auxIntToInt32(v.AuxInt)
2256 if v_0.Op != OpAMD64MOVQconst {
2257 break
2258 }
2259 d := auxIntToInt64(v_0.AuxInt)
2260 v.reset(OpAMD64MOVQconst)
2261 v.AuxInt = int64ToAuxInt(int64(c) + d)
2262 return true
2263 }
2264
2265
2266
2267 for {
2268 c := auxIntToInt32(v.AuxInt)
2269 if v_0.Op != OpAMD64ADDQconst {
2270 break
2271 }
2272 d := auxIntToInt32(v_0.AuxInt)
2273 x := v_0.Args[0]
2274 if !(is32Bit(int64(c) + int64(d))) {
2275 break
2276 }
2277 v.reset(OpAMD64ADDQconst)
2278 v.AuxInt = int32ToAuxInt(c + d)
2279 v.AddArg(x)
2280 return true
2281 }
2282
2283
2284 for {
2285 off := auxIntToInt32(v.AuxInt)
2286 x := v_0
2287 if x.Op != OpSP {
2288 break
2289 }
2290 v.reset(OpAMD64LEAQ)
2291 v.AuxInt = int32ToAuxInt(off)
2292 v.AddArg(x)
2293 return true
2294 }
2295 return false
2296 }
2297 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2298 v_1 := v.Args[1]
2299 v_0 := v.Args[0]
2300
2301
2302
2303 for {
2304 valoff1 := auxIntToValAndOff(v.AuxInt)
2305 sym := auxToSym(v.Aux)
2306 if v_0.Op != OpAMD64ADDQconst {
2307 break
2308 }
2309 off2 := auxIntToInt32(v_0.AuxInt)
2310 base := v_0.Args[0]
2311 mem := v_1
2312 if !(ValAndOff(valoff1).canAdd32(off2)) {
2313 break
2314 }
2315 v.reset(OpAMD64ADDQconstmodify)
2316 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2317 v.Aux = symToAux(sym)
2318 v.AddArg2(base, mem)
2319 return true
2320 }
2321
2322
2323
2324 for {
2325 valoff1 := auxIntToValAndOff(v.AuxInt)
2326 sym1 := auxToSym(v.Aux)
2327 if v_0.Op != OpAMD64LEAQ {
2328 break
2329 }
2330 off2 := auxIntToInt32(v_0.AuxInt)
2331 sym2 := auxToSym(v_0.Aux)
2332 base := v_0.Args[0]
2333 mem := v_1
2334 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2335 break
2336 }
2337 v.reset(OpAMD64ADDQconstmodify)
2338 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2339 v.Aux = symToAux(mergeSym(sym1, sym2))
2340 v.AddArg2(base, mem)
2341 return true
2342 }
2343 return false
2344 }
2345 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2346 v_2 := v.Args[2]
2347 v_1 := v.Args[1]
2348 v_0 := v.Args[0]
2349 b := v.Block
2350 typ := &b.Func.Config.Types
2351
2352
2353
2354 for {
2355 off1 := auxIntToInt32(v.AuxInt)
2356 sym := auxToSym(v.Aux)
2357 val := v_0
2358 if v_1.Op != OpAMD64ADDQconst {
2359 break
2360 }
2361 off2 := auxIntToInt32(v_1.AuxInt)
2362 base := v_1.Args[0]
2363 mem := v_2
2364 if !(is32Bit(int64(off1) + int64(off2))) {
2365 break
2366 }
2367 v.reset(OpAMD64ADDQload)
2368 v.AuxInt = int32ToAuxInt(off1 + off2)
2369 v.Aux = symToAux(sym)
2370 v.AddArg3(val, base, mem)
2371 return true
2372 }
2373
2374
2375
2376 for {
2377 off1 := auxIntToInt32(v.AuxInt)
2378 sym1 := auxToSym(v.Aux)
2379 val := v_0
2380 if v_1.Op != OpAMD64LEAQ {
2381 break
2382 }
2383 off2 := auxIntToInt32(v_1.AuxInt)
2384 sym2 := auxToSym(v_1.Aux)
2385 base := v_1.Args[0]
2386 mem := v_2
2387 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2388 break
2389 }
2390 v.reset(OpAMD64ADDQload)
2391 v.AuxInt = int32ToAuxInt(off1 + off2)
2392 v.Aux = symToAux(mergeSym(sym1, sym2))
2393 v.AddArg3(val, base, mem)
2394 return true
2395 }
2396
2397
2398 for {
2399 off := auxIntToInt32(v.AuxInt)
2400 sym := auxToSym(v.Aux)
2401 x := v_0
2402 ptr := v_1
2403 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2404 break
2405 }
2406 y := v_2.Args[1]
2407 if ptr != v_2.Args[0] {
2408 break
2409 }
2410 v.reset(OpAMD64ADDQ)
2411 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2412 v0.AddArg(y)
2413 v.AddArg2(x, v0)
2414 return true
2415 }
2416 return false
2417 }
2418 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2419 v_2 := v.Args[2]
2420 v_1 := v.Args[1]
2421 v_0 := v.Args[0]
2422
2423
2424
2425 for {
2426 off1 := auxIntToInt32(v.AuxInt)
2427 sym := auxToSym(v.Aux)
2428 if v_0.Op != OpAMD64ADDQconst {
2429 break
2430 }
2431 off2 := auxIntToInt32(v_0.AuxInt)
2432 base := v_0.Args[0]
2433 val := v_1
2434 mem := v_2
2435 if !(is32Bit(int64(off1) + int64(off2))) {
2436 break
2437 }
2438 v.reset(OpAMD64ADDQmodify)
2439 v.AuxInt = int32ToAuxInt(off1 + off2)
2440 v.Aux = symToAux(sym)
2441 v.AddArg3(base, val, mem)
2442 return true
2443 }
2444
2445
2446
2447 for {
2448 off1 := auxIntToInt32(v.AuxInt)
2449 sym1 := auxToSym(v.Aux)
2450 if v_0.Op != OpAMD64LEAQ {
2451 break
2452 }
2453 off2 := auxIntToInt32(v_0.AuxInt)
2454 sym2 := auxToSym(v_0.Aux)
2455 base := v_0.Args[0]
2456 val := v_1
2457 mem := v_2
2458 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2459 break
2460 }
2461 v.reset(OpAMD64ADDQmodify)
2462 v.AuxInt = int32ToAuxInt(off1 + off2)
2463 v.Aux = symToAux(mergeSym(sym1, sym2))
2464 v.AddArg3(base, val, mem)
2465 return true
2466 }
2467 return false
2468 }
2469 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2470 v_1 := v.Args[1]
2471 v_0 := v.Args[0]
2472
2473
2474
2475 for {
2476 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2477 x := v_0
2478 l := v_1
2479 if l.Op != OpAMD64MOVSDload {
2480 continue
2481 }
2482 off := auxIntToInt32(l.AuxInt)
2483 sym := auxToSym(l.Aux)
2484 mem := l.Args[1]
2485 ptr := l.Args[0]
2486 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2487 continue
2488 }
2489 v.reset(OpAMD64ADDSDload)
2490 v.AuxInt = int32ToAuxInt(off)
2491 v.Aux = symToAux(sym)
2492 v.AddArg3(x, ptr, mem)
2493 return true
2494 }
2495 break
2496 }
2497 return false
2498 }
2499 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2500 v_2 := v.Args[2]
2501 v_1 := v.Args[1]
2502 v_0 := v.Args[0]
2503 b := v.Block
2504 typ := &b.Func.Config.Types
2505
2506
2507
2508 for {
2509 off1 := auxIntToInt32(v.AuxInt)
2510 sym := auxToSym(v.Aux)
2511 val := v_0
2512 if v_1.Op != OpAMD64ADDQconst {
2513 break
2514 }
2515 off2 := auxIntToInt32(v_1.AuxInt)
2516 base := v_1.Args[0]
2517 mem := v_2
2518 if !(is32Bit(int64(off1) + int64(off2))) {
2519 break
2520 }
2521 v.reset(OpAMD64ADDSDload)
2522 v.AuxInt = int32ToAuxInt(off1 + off2)
2523 v.Aux = symToAux(sym)
2524 v.AddArg3(val, base, mem)
2525 return true
2526 }
2527
2528
2529
2530 for {
2531 off1 := auxIntToInt32(v.AuxInt)
2532 sym1 := auxToSym(v.Aux)
2533 val := v_0
2534 if v_1.Op != OpAMD64LEAQ {
2535 break
2536 }
2537 off2 := auxIntToInt32(v_1.AuxInt)
2538 sym2 := auxToSym(v_1.Aux)
2539 base := v_1.Args[0]
2540 mem := v_2
2541 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2542 break
2543 }
2544 v.reset(OpAMD64ADDSDload)
2545 v.AuxInt = int32ToAuxInt(off1 + off2)
2546 v.Aux = symToAux(mergeSym(sym1, sym2))
2547 v.AddArg3(val, base, mem)
2548 return true
2549 }
2550
2551
2552 for {
2553 off := auxIntToInt32(v.AuxInt)
2554 sym := auxToSym(v.Aux)
2555 x := v_0
2556 ptr := v_1
2557 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2558 break
2559 }
2560 y := v_2.Args[1]
2561 if ptr != v_2.Args[0] {
2562 break
2563 }
2564 v.reset(OpAMD64ADDSD)
2565 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2566 v0.AddArg(y)
2567 v.AddArg2(x, v0)
2568 return true
2569 }
2570 return false
2571 }
2572 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2573 v_1 := v.Args[1]
2574 v_0 := v.Args[0]
2575
2576
2577
2578 for {
2579 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2580 x := v_0
2581 l := v_1
2582 if l.Op != OpAMD64MOVSSload {
2583 continue
2584 }
2585 off := auxIntToInt32(l.AuxInt)
2586 sym := auxToSym(l.Aux)
2587 mem := l.Args[1]
2588 ptr := l.Args[0]
2589 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2590 continue
2591 }
2592 v.reset(OpAMD64ADDSSload)
2593 v.AuxInt = int32ToAuxInt(off)
2594 v.Aux = symToAux(sym)
2595 v.AddArg3(x, ptr, mem)
2596 return true
2597 }
2598 break
2599 }
2600 return false
2601 }
2602 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2603 v_2 := v.Args[2]
2604 v_1 := v.Args[1]
2605 v_0 := v.Args[0]
2606 b := v.Block
2607 typ := &b.Func.Config.Types
2608
2609
2610
2611 for {
2612 off1 := auxIntToInt32(v.AuxInt)
2613 sym := auxToSym(v.Aux)
2614 val := v_0
2615 if v_1.Op != OpAMD64ADDQconst {
2616 break
2617 }
2618 off2 := auxIntToInt32(v_1.AuxInt)
2619 base := v_1.Args[0]
2620 mem := v_2
2621 if !(is32Bit(int64(off1) + int64(off2))) {
2622 break
2623 }
2624 v.reset(OpAMD64ADDSSload)
2625 v.AuxInt = int32ToAuxInt(off1 + off2)
2626 v.Aux = symToAux(sym)
2627 v.AddArg3(val, base, mem)
2628 return true
2629 }
2630
2631
2632
2633 for {
2634 off1 := auxIntToInt32(v.AuxInt)
2635 sym1 := auxToSym(v.Aux)
2636 val := v_0
2637 if v_1.Op != OpAMD64LEAQ {
2638 break
2639 }
2640 off2 := auxIntToInt32(v_1.AuxInt)
2641 sym2 := auxToSym(v_1.Aux)
2642 base := v_1.Args[0]
2643 mem := v_2
2644 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2645 break
2646 }
2647 v.reset(OpAMD64ADDSSload)
2648 v.AuxInt = int32ToAuxInt(off1 + off2)
2649 v.Aux = symToAux(mergeSym(sym1, sym2))
2650 v.AddArg3(val, base, mem)
2651 return true
2652 }
2653
2654
2655 for {
2656 off := auxIntToInt32(v.AuxInt)
2657 sym := auxToSym(v.Aux)
2658 x := v_0
2659 ptr := v_1
2660 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2661 break
2662 }
2663 y := v_2.Args[1]
2664 if ptr != v_2.Args[0] {
2665 break
2666 }
2667 v.reset(OpAMD64ADDSS)
2668 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2669 v0.AddArg(y)
2670 v.AddArg2(x, v0)
2671 return true
2672 }
2673 return false
2674 }
2675 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2676 v_1 := v.Args[1]
2677 v_0 := v.Args[0]
2678
2679
2680 for {
2681 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2682 if v_0.Op != OpAMD64NOTL {
2683 continue
2684 }
2685 v_0_0 := v_0.Args[0]
2686 if v_0_0.Op != OpAMD64SHLL {
2687 continue
2688 }
2689 y := v_0_0.Args[1]
2690 v_0_0_0 := v_0_0.Args[0]
2691 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2692 continue
2693 }
2694 x := v_1
2695 v.reset(OpAMD64BTRL)
2696 v.AddArg2(x, y)
2697 return true
2698 }
2699 break
2700 }
2701
2702
2703
2704 for {
2705 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2706 if v_0.Op != OpAMD64MOVLconst {
2707 continue
2708 }
2709 c := auxIntToInt32(v_0.AuxInt)
2710 x := v_1
2711 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2712 continue
2713 }
2714 v.reset(OpAMD64BTRLconst)
2715 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2716 v.AddArg(x)
2717 return true
2718 }
2719 break
2720 }
2721
2722
2723 for {
2724 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2725 x := v_0
2726 if v_1.Op != OpAMD64MOVLconst {
2727 continue
2728 }
2729 c := auxIntToInt32(v_1.AuxInt)
2730 v.reset(OpAMD64ANDLconst)
2731 v.AuxInt = int32ToAuxInt(c)
2732 v.AddArg(x)
2733 return true
2734 }
2735 break
2736 }
2737
2738
2739 for {
2740 x := v_0
2741 if x != v_1 {
2742 break
2743 }
2744 v.copyOf(x)
2745 return true
2746 }
2747
2748
2749
2750 for {
2751 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2752 x := v_0
2753 l := v_1
2754 if l.Op != OpAMD64MOVLload {
2755 continue
2756 }
2757 off := auxIntToInt32(l.AuxInt)
2758 sym := auxToSym(l.Aux)
2759 mem := l.Args[1]
2760 ptr := l.Args[0]
2761 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2762 continue
2763 }
2764 v.reset(OpAMD64ANDLload)
2765 v.AuxInt = int32ToAuxInt(off)
2766 v.Aux = symToAux(sym)
2767 v.AddArg3(x, ptr, mem)
2768 return true
2769 }
2770 break
2771 }
2772
2773
2774
2775 for {
2776 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2777 x := v_0
2778 if v_1.Op != OpAMD64NOTL {
2779 continue
2780 }
2781 y := v_1.Args[0]
2782 if !(buildcfg.GOAMD64 >= 3) {
2783 continue
2784 }
2785 v.reset(OpAMD64ANDNL)
2786 v.AddArg2(x, y)
2787 return true
2788 }
2789 break
2790 }
2791
2792
2793
2794 for {
2795 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2796 x := v_0
2797 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2798 continue
2799 }
2800 v.reset(OpAMD64BLSIL)
2801 v.AddArg(x)
2802 return true
2803 }
2804 break
2805 }
2806
2807
2808
2809 for {
2810 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2811 x := v_0
2812 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2813 continue
2814 }
2815 v.reset(OpAMD64BLSRL)
2816 v.AddArg(x)
2817 return true
2818 }
2819 break
2820 }
2821 return false
2822 }
2823 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2824 v_0 := v.Args[0]
2825
2826
2827
2828 for {
2829 c := auxIntToInt32(v.AuxInt)
2830 x := v_0
2831 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2832 break
2833 }
2834 v.reset(OpAMD64BTRLconst)
2835 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2836 v.AddArg(x)
2837 return true
2838 }
2839
2840
2841 for {
2842 c := auxIntToInt32(v.AuxInt)
2843 if v_0.Op != OpAMD64ANDLconst {
2844 break
2845 }
2846 d := auxIntToInt32(v_0.AuxInt)
2847 x := v_0.Args[0]
2848 v.reset(OpAMD64ANDLconst)
2849 v.AuxInt = int32ToAuxInt(c & d)
2850 v.AddArg(x)
2851 return true
2852 }
2853
2854
2855 for {
2856 c := auxIntToInt32(v.AuxInt)
2857 if v_0.Op != OpAMD64BTRLconst {
2858 break
2859 }
2860 d := auxIntToInt8(v_0.AuxInt)
2861 x := v_0.Args[0]
2862 v.reset(OpAMD64ANDLconst)
2863 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2864 v.AddArg(x)
2865 return true
2866 }
2867
2868
2869 for {
2870 if auxIntToInt32(v.AuxInt) != 0xFF {
2871 break
2872 }
2873 x := v_0
2874 v.reset(OpAMD64MOVBQZX)
2875 v.AddArg(x)
2876 return true
2877 }
2878
2879
2880 for {
2881 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2882 break
2883 }
2884 x := v_0
2885 v.reset(OpAMD64MOVWQZX)
2886 v.AddArg(x)
2887 return true
2888 }
2889
2890
2891
2892 for {
2893 c := auxIntToInt32(v.AuxInt)
2894 if !(c == 0) {
2895 break
2896 }
2897 v.reset(OpAMD64MOVLconst)
2898 v.AuxInt = int32ToAuxInt(0)
2899 return true
2900 }
2901
2902
2903
2904 for {
2905 c := auxIntToInt32(v.AuxInt)
2906 x := v_0
2907 if !(c == -1) {
2908 break
2909 }
2910 v.copyOf(x)
2911 return true
2912 }
2913
2914
2915 for {
2916 c := auxIntToInt32(v.AuxInt)
2917 if v_0.Op != OpAMD64MOVLconst {
2918 break
2919 }
2920 d := auxIntToInt32(v_0.AuxInt)
2921 v.reset(OpAMD64MOVLconst)
2922 v.AuxInt = int32ToAuxInt(c & d)
2923 return true
2924 }
2925 return false
2926 }
2927 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2928 v_1 := v.Args[1]
2929 v_0 := v.Args[0]
2930
2931
2932
2933 for {
2934 valoff1 := auxIntToValAndOff(v.AuxInt)
2935 sym := auxToSym(v.Aux)
2936 if v_0.Op != OpAMD64ADDQconst {
2937 break
2938 }
2939 off2 := auxIntToInt32(v_0.AuxInt)
2940 base := v_0.Args[0]
2941 mem := v_1
2942 if !(ValAndOff(valoff1).canAdd32(off2)) {
2943 break
2944 }
2945 v.reset(OpAMD64ANDLconstmodify)
2946 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2947 v.Aux = symToAux(sym)
2948 v.AddArg2(base, mem)
2949 return true
2950 }
2951
2952
2953
2954 for {
2955 valoff1 := auxIntToValAndOff(v.AuxInt)
2956 sym1 := auxToSym(v.Aux)
2957 if v_0.Op != OpAMD64LEAQ {
2958 break
2959 }
2960 off2 := auxIntToInt32(v_0.AuxInt)
2961 sym2 := auxToSym(v_0.Aux)
2962 base := v_0.Args[0]
2963 mem := v_1
2964 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2965 break
2966 }
2967 v.reset(OpAMD64ANDLconstmodify)
2968 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2969 v.Aux = symToAux(mergeSym(sym1, sym2))
2970 v.AddArg2(base, mem)
2971 return true
2972 }
2973 return false
2974 }
2975 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2976 v_2 := v.Args[2]
2977 v_1 := v.Args[1]
2978 v_0 := v.Args[0]
2979 b := v.Block
2980 typ := &b.Func.Config.Types
2981
2982
2983
2984 for {
2985 off1 := auxIntToInt32(v.AuxInt)
2986 sym := auxToSym(v.Aux)
2987 val := v_0
2988 if v_1.Op != OpAMD64ADDQconst {
2989 break
2990 }
2991 off2 := auxIntToInt32(v_1.AuxInt)
2992 base := v_1.Args[0]
2993 mem := v_2
2994 if !(is32Bit(int64(off1) + int64(off2))) {
2995 break
2996 }
2997 v.reset(OpAMD64ANDLload)
2998 v.AuxInt = int32ToAuxInt(off1 + off2)
2999 v.Aux = symToAux(sym)
3000 v.AddArg3(val, base, mem)
3001 return true
3002 }
3003
3004
3005
3006 for {
3007 off1 := auxIntToInt32(v.AuxInt)
3008 sym1 := auxToSym(v.Aux)
3009 val := v_0
3010 if v_1.Op != OpAMD64LEAQ {
3011 break
3012 }
3013 off2 := auxIntToInt32(v_1.AuxInt)
3014 sym2 := auxToSym(v_1.Aux)
3015 base := v_1.Args[0]
3016 mem := v_2
3017 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3018 break
3019 }
3020 v.reset(OpAMD64ANDLload)
3021 v.AuxInt = int32ToAuxInt(off1 + off2)
3022 v.Aux = symToAux(mergeSym(sym1, sym2))
3023 v.AddArg3(val, base, mem)
3024 return true
3025 }
3026
3027
3028 for {
3029 off := auxIntToInt32(v.AuxInt)
3030 sym := auxToSym(v.Aux)
3031 x := v_0
3032 ptr := v_1
3033 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3034 break
3035 }
3036 y := v_2.Args[1]
3037 if ptr != v_2.Args[0] {
3038 break
3039 }
3040 v.reset(OpAMD64ANDL)
3041 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
3042 v0.AddArg(y)
3043 v.AddArg2(x, v0)
3044 return true
3045 }
3046 return false
3047 }
3048 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
3049 v_2 := v.Args[2]
3050 v_1 := v.Args[1]
3051 v_0 := v.Args[0]
3052
3053
3054
3055 for {
3056 off1 := auxIntToInt32(v.AuxInt)
3057 sym := auxToSym(v.Aux)
3058 if v_0.Op != OpAMD64ADDQconst {
3059 break
3060 }
3061 off2 := auxIntToInt32(v_0.AuxInt)
3062 base := v_0.Args[0]
3063 val := v_1
3064 mem := v_2
3065 if !(is32Bit(int64(off1) + int64(off2))) {
3066 break
3067 }
3068 v.reset(OpAMD64ANDLmodify)
3069 v.AuxInt = int32ToAuxInt(off1 + off2)
3070 v.Aux = symToAux(sym)
3071 v.AddArg3(base, val, mem)
3072 return true
3073 }
3074
3075
3076
3077 for {
3078 off1 := auxIntToInt32(v.AuxInt)
3079 sym1 := auxToSym(v.Aux)
3080 if v_0.Op != OpAMD64LEAQ {
3081 break
3082 }
3083 off2 := auxIntToInt32(v_0.AuxInt)
3084 sym2 := auxToSym(v_0.Aux)
3085 base := v_0.Args[0]
3086 val := v_1
3087 mem := v_2
3088 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3089 break
3090 }
3091 v.reset(OpAMD64ANDLmodify)
3092 v.AuxInt = int32ToAuxInt(off1 + off2)
3093 v.Aux = symToAux(mergeSym(sym1, sym2))
3094 v.AddArg3(base, val, mem)
3095 return true
3096 }
3097 return false
3098 }
3099 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
3100 v_1 := v.Args[1]
3101 v_0 := v.Args[0]
3102
3103
3104 for {
3105 x := v_0
3106 if v_1.Op != OpAMD64SHLL {
3107 break
3108 }
3109 y := v_1.Args[1]
3110 v_1_0 := v_1.Args[0]
3111 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3112 break
3113 }
3114 v.reset(OpAMD64BTRL)
3115 v.AddArg2(x, y)
3116 return true
3117 }
3118 return false
3119 }
3120 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3121 v_1 := v.Args[1]
3122 v_0 := v.Args[0]
3123
3124
3125 for {
3126 x := v_0
3127 if v_1.Op != OpAMD64SHLQ {
3128 break
3129 }
3130 y := v_1.Args[1]
3131 v_1_0 := v_1.Args[0]
3132 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3133 break
3134 }
3135 v.reset(OpAMD64BTRQ)
3136 v.AddArg2(x, y)
3137 return true
3138 }
3139 return false
3140 }
3141 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3142 v_1 := v.Args[1]
3143 v_0 := v.Args[0]
3144
3145
3146 for {
3147 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3148 if v_0.Op != OpAMD64NOTQ {
3149 continue
3150 }
3151 v_0_0 := v_0.Args[0]
3152 if v_0_0.Op != OpAMD64SHLQ {
3153 continue
3154 }
3155 y := v_0_0.Args[1]
3156 v_0_0_0 := v_0_0.Args[0]
3157 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3158 continue
3159 }
3160 x := v_1
3161 v.reset(OpAMD64BTRQ)
3162 v.AddArg2(x, y)
3163 return true
3164 }
3165 break
3166 }
3167
3168
3169
3170 for {
3171 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3172 if v_0.Op != OpAMD64MOVQconst {
3173 continue
3174 }
3175 c := auxIntToInt64(v_0.AuxInt)
3176 x := v_1
3177 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3178 continue
3179 }
3180 v.reset(OpAMD64BTRQconst)
3181 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3182 v.AddArg(x)
3183 return true
3184 }
3185 break
3186 }
3187
3188
3189
3190 for {
3191 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3192 x := v_0
3193 if v_1.Op != OpAMD64MOVQconst {
3194 continue
3195 }
3196 c := auxIntToInt64(v_1.AuxInt)
3197 if !(is32Bit(c)) {
3198 continue
3199 }
3200 v.reset(OpAMD64ANDQconst)
3201 v.AuxInt = int32ToAuxInt(int32(c))
3202 v.AddArg(x)
3203 return true
3204 }
3205 break
3206 }
3207
3208
3209 for {
3210 x := v_0
3211 if x != v_1 {
3212 break
3213 }
3214 v.copyOf(x)
3215 return true
3216 }
3217
3218
3219
3220 for {
3221 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3222 x := v_0
3223 l := v_1
3224 if l.Op != OpAMD64MOVQload {
3225 continue
3226 }
3227 off := auxIntToInt32(l.AuxInt)
3228 sym := auxToSym(l.Aux)
3229 mem := l.Args[1]
3230 ptr := l.Args[0]
3231 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3232 continue
3233 }
3234 v.reset(OpAMD64ANDQload)
3235 v.AuxInt = int32ToAuxInt(off)
3236 v.Aux = symToAux(sym)
3237 v.AddArg3(x, ptr, mem)
3238 return true
3239 }
3240 break
3241 }
3242
3243
3244
3245 for {
3246 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3247 x := v_0
3248 if v_1.Op != OpAMD64NOTQ {
3249 continue
3250 }
3251 y := v_1.Args[0]
3252 if !(buildcfg.GOAMD64 >= 3) {
3253 continue
3254 }
3255 v.reset(OpAMD64ANDNQ)
3256 v.AddArg2(x, y)
3257 return true
3258 }
3259 break
3260 }
3261
3262
3263
3264 for {
3265 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3266 x := v_0
3267 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3268 continue
3269 }
3270 v.reset(OpAMD64BLSIQ)
3271 v.AddArg(x)
3272 return true
3273 }
3274 break
3275 }
3276
3277
3278
3279 for {
3280 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3281 x := v_0
3282 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3283 continue
3284 }
3285 v.reset(OpAMD64BLSRQ)
3286 v.AddArg(x)
3287 return true
3288 }
3289 break
3290 }
3291 return false
3292 }
3293 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3294 v_0 := v.Args[0]
3295
3296
3297
3298 for {
3299 c := auxIntToInt32(v.AuxInt)
3300 x := v_0
3301 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
3302 break
3303 }
3304 v.reset(OpAMD64BTRQconst)
3305 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
3306 v.AddArg(x)
3307 return true
3308 }
3309
3310
3311 for {
3312 c := auxIntToInt32(v.AuxInt)
3313 if v_0.Op != OpAMD64ANDQconst {
3314 break
3315 }
3316 d := auxIntToInt32(v_0.AuxInt)
3317 x := v_0.Args[0]
3318 v.reset(OpAMD64ANDQconst)
3319 v.AuxInt = int32ToAuxInt(c & d)
3320 v.AddArg(x)
3321 return true
3322 }
3323
3324
3325
3326 for {
3327 c := auxIntToInt32(v.AuxInt)
3328 if v_0.Op != OpAMD64BTRQconst {
3329 break
3330 }
3331 d := auxIntToInt8(v_0.AuxInt)
3332 x := v_0.Args[0]
3333 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3334 break
3335 }
3336 v.reset(OpAMD64ANDQconst)
3337 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3338 v.AddArg(x)
3339 return true
3340 }
3341
3342
3343 for {
3344 if auxIntToInt32(v.AuxInt) != 0xFF {
3345 break
3346 }
3347 x := v_0
3348 v.reset(OpAMD64MOVBQZX)
3349 v.AddArg(x)
3350 return true
3351 }
3352
3353
3354 for {
3355 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3356 break
3357 }
3358 x := v_0
3359 v.reset(OpAMD64MOVWQZX)
3360 v.AddArg(x)
3361 return true
3362 }
3363
3364
3365 for {
3366 if auxIntToInt32(v.AuxInt) != 0 {
3367 break
3368 }
3369 v.reset(OpAMD64MOVQconst)
3370 v.AuxInt = int64ToAuxInt(0)
3371 return true
3372 }
3373
3374
3375 for {
3376 if auxIntToInt32(v.AuxInt) != -1 {
3377 break
3378 }
3379 x := v_0
3380 v.copyOf(x)
3381 return true
3382 }
3383
3384
3385 for {
3386 c := auxIntToInt32(v.AuxInt)
3387 if v_0.Op != OpAMD64MOVQconst {
3388 break
3389 }
3390 d := auxIntToInt64(v_0.AuxInt)
3391 v.reset(OpAMD64MOVQconst)
3392 v.AuxInt = int64ToAuxInt(int64(c) & d)
3393 return true
3394 }
3395 return false
3396 }
3397 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3398 v_1 := v.Args[1]
3399 v_0 := v.Args[0]
3400
3401
3402
3403 for {
3404 valoff1 := auxIntToValAndOff(v.AuxInt)
3405 sym := auxToSym(v.Aux)
3406 if v_0.Op != OpAMD64ADDQconst {
3407 break
3408 }
3409 off2 := auxIntToInt32(v_0.AuxInt)
3410 base := v_0.Args[0]
3411 mem := v_1
3412 if !(ValAndOff(valoff1).canAdd32(off2)) {
3413 break
3414 }
3415 v.reset(OpAMD64ANDQconstmodify)
3416 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3417 v.Aux = symToAux(sym)
3418 v.AddArg2(base, mem)
3419 return true
3420 }
3421
3422
3423
3424 for {
3425 valoff1 := auxIntToValAndOff(v.AuxInt)
3426 sym1 := auxToSym(v.Aux)
3427 if v_0.Op != OpAMD64LEAQ {
3428 break
3429 }
3430 off2 := auxIntToInt32(v_0.AuxInt)
3431 sym2 := auxToSym(v_0.Aux)
3432 base := v_0.Args[0]
3433 mem := v_1
3434 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3435 break
3436 }
3437 v.reset(OpAMD64ANDQconstmodify)
3438 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3439 v.Aux = symToAux(mergeSym(sym1, sym2))
3440 v.AddArg2(base, mem)
3441 return true
3442 }
3443 return false
3444 }
3445 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3446 v_2 := v.Args[2]
3447 v_1 := v.Args[1]
3448 v_0 := v.Args[0]
3449 b := v.Block
3450 typ := &b.Func.Config.Types
3451
3452
3453
3454 for {
3455 off1 := auxIntToInt32(v.AuxInt)
3456 sym := auxToSym(v.Aux)
3457 val := v_0
3458 if v_1.Op != OpAMD64ADDQconst {
3459 break
3460 }
3461 off2 := auxIntToInt32(v_1.AuxInt)
3462 base := v_1.Args[0]
3463 mem := v_2
3464 if !(is32Bit(int64(off1) + int64(off2))) {
3465 break
3466 }
3467 v.reset(OpAMD64ANDQload)
3468 v.AuxInt = int32ToAuxInt(off1 + off2)
3469 v.Aux = symToAux(sym)
3470 v.AddArg3(val, base, mem)
3471 return true
3472 }
3473
3474
3475
3476 for {
3477 off1 := auxIntToInt32(v.AuxInt)
3478 sym1 := auxToSym(v.Aux)
3479 val := v_0
3480 if v_1.Op != OpAMD64LEAQ {
3481 break
3482 }
3483 off2 := auxIntToInt32(v_1.AuxInt)
3484 sym2 := auxToSym(v_1.Aux)
3485 base := v_1.Args[0]
3486 mem := v_2
3487 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3488 break
3489 }
3490 v.reset(OpAMD64ANDQload)
3491 v.AuxInt = int32ToAuxInt(off1 + off2)
3492 v.Aux = symToAux(mergeSym(sym1, sym2))
3493 v.AddArg3(val, base, mem)
3494 return true
3495 }
3496
3497
3498 for {
3499 off := auxIntToInt32(v.AuxInt)
3500 sym := auxToSym(v.Aux)
3501 x := v_0
3502 ptr := v_1
3503 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3504 break
3505 }
3506 y := v_2.Args[1]
3507 if ptr != v_2.Args[0] {
3508 break
3509 }
3510 v.reset(OpAMD64ANDQ)
3511 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3512 v0.AddArg(y)
3513 v.AddArg2(x, v0)
3514 return true
3515 }
3516 return false
3517 }
3518 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3519 v_2 := v.Args[2]
3520 v_1 := v.Args[1]
3521 v_0 := v.Args[0]
3522
3523
3524
3525 for {
3526 off1 := auxIntToInt32(v.AuxInt)
3527 sym := auxToSym(v.Aux)
3528 if v_0.Op != OpAMD64ADDQconst {
3529 break
3530 }
3531 off2 := auxIntToInt32(v_0.AuxInt)
3532 base := v_0.Args[0]
3533 val := v_1
3534 mem := v_2
3535 if !(is32Bit(int64(off1) + int64(off2))) {
3536 break
3537 }
3538 v.reset(OpAMD64ANDQmodify)
3539 v.AuxInt = int32ToAuxInt(off1 + off2)
3540 v.Aux = symToAux(sym)
3541 v.AddArg3(base, val, mem)
3542 return true
3543 }
3544
3545
3546
3547 for {
3548 off1 := auxIntToInt32(v.AuxInt)
3549 sym1 := auxToSym(v.Aux)
3550 if v_0.Op != OpAMD64LEAQ {
3551 break
3552 }
3553 off2 := auxIntToInt32(v_0.AuxInt)
3554 sym2 := auxToSym(v_0.Aux)
3555 base := v_0.Args[0]
3556 val := v_1
3557 mem := v_2
3558 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3559 break
3560 }
3561 v.reset(OpAMD64ANDQmodify)
3562 v.AuxInt = int32ToAuxInt(off1 + off2)
3563 v.Aux = symToAux(mergeSym(sym1, sym2))
3564 v.AddArg3(base, val, mem)
3565 return true
3566 }
3567 return false
3568 }
3569 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3570 v_0 := v.Args[0]
3571 b := v.Block
3572
3573
3574 for {
3575 if v_0.Op != OpAMD64ORQconst {
3576 break
3577 }
3578 t := v_0.Type
3579 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3580 break
3581 }
3582 v_0_0 := v_0.Args[0]
3583 if v_0_0.Op != OpAMD64MOVBQZX {
3584 break
3585 }
3586 x := v_0_0.Args[0]
3587 v.reset(OpAMD64BSFQ)
3588 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3589 v0.AuxInt = int32ToAuxInt(1 << 8)
3590 v0.AddArg(x)
3591 v.AddArg(v0)
3592 return true
3593 }
3594
3595
3596 for {
3597 if v_0.Op != OpAMD64ORQconst {
3598 break
3599 }
3600 t := v_0.Type
3601 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3602 break
3603 }
3604 v_0_0 := v_0.Args[0]
3605 if v_0_0.Op != OpAMD64MOVWQZX {
3606 break
3607 }
3608 x := v_0_0.Args[0]
3609 v.reset(OpAMD64BSFQ)
3610 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3611 v0.AuxInt = int32ToAuxInt(1 << 16)
3612 v0.AddArg(x)
3613 v.AddArg(v0)
3614 return true
3615 }
3616 return false
3617 }
3618 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3619 v_0 := v.Args[0]
3620
3621
3622 for {
3623 if v_0.Op != OpAMD64BSWAPL {
3624 break
3625 }
3626 p := v_0.Args[0]
3627 v.copyOf(p)
3628 return true
3629 }
3630
3631
3632
3633 for {
3634 x := v_0
3635 if x.Op != OpAMD64MOVLload {
3636 break
3637 }
3638 i := auxIntToInt32(x.AuxInt)
3639 s := auxToSym(x.Aux)
3640 mem := x.Args[1]
3641 p := x.Args[0]
3642 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3643 break
3644 }
3645 v.reset(OpAMD64MOVBELload)
3646 v.AuxInt = int32ToAuxInt(i)
3647 v.Aux = symToAux(s)
3648 v.AddArg2(p, mem)
3649 return true
3650 }
3651
3652
3653 for {
3654 if v_0.Op != OpAMD64MOVBELload {
3655 break
3656 }
3657 i := auxIntToInt32(v_0.AuxInt)
3658 s := auxToSym(v_0.Aux)
3659 m := v_0.Args[1]
3660 p := v_0.Args[0]
3661 v.reset(OpAMD64MOVLload)
3662 v.AuxInt = int32ToAuxInt(i)
3663 v.Aux = symToAux(s)
3664 v.AddArg2(p, m)
3665 return true
3666 }
3667 return false
3668 }
3669 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3670 v_0 := v.Args[0]
3671
3672
3673 for {
3674 if v_0.Op != OpAMD64BSWAPQ {
3675 break
3676 }
3677 p := v_0.Args[0]
3678 v.copyOf(p)
3679 return true
3680 }
3681
3682
3683
3684 for {
3685 x := v_0
3686 if x.Op != OpAMD64MOVQload {
3687 break
3688 }
3689 i := auxIntToInt32(x.AuxInt)
3690 s := auxToSym(x.Aux)
3691 mem := x.Args[1]
3692 p := x.Args[0]
3693 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3694 break
3695 }
3696 v.reset(OpAMD64MOVBEQload)
3697 v.AuxInt = int32ToAuxInt(i)
3698 v.Aux = symToAux(s)
3699 v.AddArg2(p, mem)
3700 return true
3701 }
3702
3703
3704 for {
3705 if v_0.Op != OpAMD64MOVBEQload {
3706 break
3707 }
3708 i := auxIntToInt32(v_0.AuxInt)
3709 s := auxToSym(v_0.Aux)
3710 m := v_0.Args[1]
3711 p := v_0.Args[0]
3712 v.reset(OpAMD64MOVQload)
3713 v.AuxInt = int32ToAuxInt(i)
3714 v.Aux = symToAux(s)
3715 v.AddArg2(p, m)
3716 return true
3717 }
3718 return false
3719 }
3720 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3721 v_0 := v.Args[0]
3722
3723
3724 for {
3725 c := auxIntToInt8(v.AuxInt)
3726 if v_0.Op != OpAMD64XORLconst {
3727 break
3728 }
3729 d := auxIntToInt32(v_0.AuxInt)
3730 x := v_0.Args[0]
3731 v.reset(OpAMD64XORLconst)
3732 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3733 v.AddArg(x)
3734 return true
3735 }
3736
3737
3738 for {
3739 c := auxIntToInt8(v.AuxInt)
3740 if v_0.Op != OpAMD64BTCLconst {
3741 break
3742 }
3743 d := auxIntToInt8(v_0.AuxInt)
3744 x := v_0.Args[0]
3745 v.reset(OpAMD64XORLconst)
3746 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3747 v.AddArg(x)
3748 return true
3749 }
3750
3751
3752 for {
3753 c := auxIntToInt8(v.AuxInt)
3754 if v_0.Op != OpAMD64MOVLconst {
3755 break
3756 }
3757 d := auxIntToInt32(v_0.AuxInt)
3758 v.reset(OpAMD64MOVLconst)
3759 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3760 return true
3761 }
3762 return false
3763 }
3764 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3765 v_0 := v.Args[0]
3766
3767
3768
3769 for {
3770 c := auxIntToInt8(v.AuxInt)
3771 if v_0.Op != OpAMD64XORQconst {
3772 break
3773 }
3774 d := auxIntToInt32(v_0.AuxInt)
3775 x := v_0.Args[0]
3776 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3777 break
3778 }
3779 v.reset(OpAMD64XORQconst)
3780 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3781 v.AddArg(x)
3782 return true
3783 }
3784
3785
3786
3787 for {
3788 c := auxIntToInt8(v.AuxInt)
3789 if v_0.Op != OpAMD64BTCQconst {
3790 break
3791 }
3792 d := auxIntToInt8(v_0.AuxInt)
3793 x := v_0.Args[0]
3794 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3795 break
3796 }
3797 v.reset(OpAMD64XORQconst)
3798 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3799 v.AddArg(x)
3800 return true
3801 }
3802
3803
3804 for {
3805 c := auxIntToInt8(v.AuxInt)
3806 if v_0.Op != OpAMD64MOVQconst {
3807 break
3808 }
3809 d := auxIntToInt64(v_0.AuxInt)
3810 v.reset(OpAMD64MOVQconst)
3811 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3812 return true
3813 }
3814 return false
3815 }
3816 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3817 v_0 := v.Args[0]
3818
3819
3820
3821 for {
3822 c := auxIntToInt8(v.AuxInt)
3823 if v_0.Op != OpAMD64SHRQconst {
3824 break
3825 }
3826 d := auxIntToInt8(v_0.AuxInt)
3827 x := v_0.Args[0]
3828 if !((c + d) < 64) {
3829 break
3830 }
3831 v.reset(OpAMD64BTQconst)
3832 v.AuxInt = int8ToAuxInt(c + d)
3833 v.AddArg(x)
3834 return true
3835 }
3836
3837
3838
3839 for {
3840 c := auxIntToInt8(v.AuxInt)
3841 if v_0.Op != OpAMD64SHLQconst {
3842 break
3843 }
3844 d := auxIntToInt8(v_0.AuxInt)
3845 x := v_0.Args[0]
3846 if !(c > d) {
3847 break
3848 }
3849 v.reset(OpAMD64BTLconst)
3850 v.AuxInt = int8ToAuxInt(c - d)
3851 v.AddArg(x)
3852 return true
3853 }
3854
3855
3856 for {
3857 if auxIntToInt8(v.AuxInt) != 0 {
3858 break
3859 }
3860 s := v_0
3861 if s.Op != OpAMD64SHRQ {
3862 break
3863 }
3864 y := s.Args[1]
3865 x := s.Args[0]
3866 v.reset(OpAMD64BTQ)
3867 v.AddArg2(y, x)
3868 return true
3869 }
3870
3871
3872
3873 for {
3874 c := auxIntToInt8(v.AuxInt)
3875 if v_0.Op != OpAMD64SHRLconst {
3876 break
3877 }
3878 d := auxIntToInt8(v_0.AuxInt)
3879 x := v_0.Args[0]
3880 if !((c + d) < 32) {
3881 break
3882 }
3883 v.reset(OpAMD64BTLconst)
3884 v.AuxInt = int8ToAuxInt(c + d)
3885 v.AddArg(x)
3886 return true
3887 }
3888
3889
3890
3891 for {
3892 c := auxIntToInt8(v.AuxInt)
3893 if v_0.Op != OpAMD64SHLLconst {
3894 break
3895 }
3896 d := auxIntToInt8(v_0.AuxInt)
3897 x := v_0.Args[0]
3898 if !(c > d) {
3899 break
3900 }
3901 v.reset(OpAMD64BTLconst)
3902 v.AuxInt = int8ToAuxInt(c - d)
3903 v.AddArg(x)
3904 return true
3905 }
3906
3907
3908 for {
3909 if auxIntToInt8(v.AuxInt) != 0 {
3910 break
3911 }
3912 s := v_0
3913 if s.Op != OpAMD64SHRL {
3914 break
3915 }
3916 y := s.Args[1]
3917 x := s.Args[0]
3918 v.reset(OpAMD64BTL)
3919 v.AddArg2(y, x)
3920 return true
3921 }
3922 return false
3923 }
3924 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3925 v_0 := v.Args[0]
3926
3927
3928
3929 for {
3930 c := auxIntToInt8(v.AuxInt)
3931 if v_0.Op != OpAMD64SHRQconst {
3932 break
3933 }
3934 d := auxIntToInt8(v_0.AuxInt)
3935 x := v_0.Args[0]
3936 if !((c + d) < 64) {
3937 break
3938 }
3939 v.reset(OpAMD64BTQconst)
3940 v.AuxInt = int8ToAuxInt(c + d)
3941 v.AddArg(x)
3942 return true
3943 }
3944
3945
3946
3947 for {
3948 c := auxIntToInt8(v.AuxInt)
3949 if v_0.Op != OpAMD64SHLQconst {
3950 break
3951 }
3952 d := auxIntToInt8(v_0.AuxInt)
3953 x := v_0.Args[0]
3954 if !(c > d) {
3955 break
3956 }
3957 v.reset(OpAMD64BTQconst)
3958 v.AuxInt = int8ToAuxInt(c - d)
3959 v.AddArg(x)
3960 return true
3961 }
3962
3963
3964 for {
3965 if auxIntToInt8(v.AuxInt) != 0 {
3966 break
3967 }
3968 s := v_0
3969 if s.Op != OpAMD64SHRQ {
3970 break
3971 }
3972 y := s.Args[1]
3973 x := s.Args[0]
3974 v.reset(OpAMD64BTQ)
3975 v.AddArg2(y, x)
3976 return true
3977 }
3978 return false
3979 }
3980 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
3981 v_0 := v.Args[0]
3982
3983
3984 for {
3985 c := auxIntToInt8(v.AuxInt)
3986 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
3987 break
3988 }
3989 x := v_0.Args[0]
3990 v.reset(OpAMD64BTRLconst)
3991 v.AuxInt = int8ToAuxInt(c)
3992 v.AddArg(x)
3993 return true
3994 }
3995
3996
3997 for {
3998 c := auxIntToInt8(v.AuxInt)
3999 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4000 break
4001 }
4002 x := v_0.Args[0]
4003 v.reset(OpAMD64BTRLconst)
4004 v.AuxInt = int8ToAuxInt(c)
4005 v.AddArg(x)
4006 return true
4007 }
4008
4009
4010 for {
4011 c := auxIntToInt8(v.AuxInt)
4012 if v_0.Op != OpAMD64ANDLconst {
4013 break
4014 }
4015 d := auxIntToInt32(v_0.AuxInt)
4016 x := v_0.Args[0]
4017 v.reset(OpAMD64ANDLconst)
4018 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4019 v.AddArg(x)
4020 return true
4021 }
4022
4023
4024 for {
4025 c := auxIntToInt8(v.AuxInt)
4026 if v_0.Op != OpAMD64BTRLconst {
4027 break
4028 }
4029 d := auxIntToInt8(v_0.AuxInt)
4030 x := v_0.Args[0]
4031 v.reset(OpAMD64ANDLconst)
4032 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4033 v.AddArg(x)
4034 return true
4035 }
4036
4037
4038 for {
4039 c := auxIntToInt8(v.AuxInt)
4040 if v_0.Op != OpAMD64MOVLconst {
4041 break
4042 }
4043 d := auxIntToInt32(v_0.AuxInt)
4044 v.reset(OpAMD64MOVLconst)
4045 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4046 return true
4047 }
4048 return false
4049 }
4050 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
4051 v_0 := v.Args[0]
4052
4053
4054 for {
4055 c := auxIntToInt8(v.AuxInt)
4056 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
4057 break
4058 }
4059 x := v_0.Args[0]
4060 v.reset(OpAMD64BTRQconst)
4061 v.AuxInt = int8ToAuxInt(c)
4062 v.AddArg(x)
4063 return true
4064 }
4065
4066
4067 for {
4068 c := auxIntToInt8(v.AuxInt)
4069 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4070 break
4071 }
4072 x := v_0.Args[0]
4073 v.reset(OpAMD64BTRQconst)
4074 v.AuxInt = int8ToAuxInt(c)
4075 v.AddArg(x)
4076 return true
4077 }
4078
4079
4080
4081 for {
4082 c := auxIntToInt8(v.AuxInt)
4083 if v_0.Op != OpAMD64ANDQconst {
4084 break
4085 }
4086 d := auxIntToInt32(v_0.AuxInt)
4087 x := v_0.Args[0]
4088 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4089 break
4090 }
4091 v.reset(OpAMD64ANDQconst)
4092 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4093 v.AddArg(x)
4094 return true
4095 }
4096
4097
4098
4099 for {
4100 c := auxIntToInt8(v.AuxInt)
4101 if v_0.Op != OpAMD64BTRQconst {
4102 break
4103 }
4104 d := auxIntToInt8(v_0.AuxInt)
4105 x := v_0.Args[0]
4106 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4107 break
4108 }
4109 v.reset(OpAMD64ANDQconst)
4110 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4111 v.AddArg(x)
4112 return true
4113 }
4114
4115
4116 for {
4117 c := auxIntToInt8(v.AuxInt)
4118 if v_0.Op != OpAMD64MOVQconst {
4119 break
4120 }
4121 d := auxIntToInt64(v_0.AuxInt)
4122 v.reset(OpAMD64MOVQconst)
4123 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
4124 return true
4125 }
4126 return false
4127 }
4128 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4129 v_0 := v.Args[0]
4130
4131
4132 for {
4133 c := auxIntToInt8(v.AuxInt)
4134 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
4135 break
4136 }
4137 x := v_0.Args[0]
4138 v.reset(OpAMD64BTSLconst)
4139 v.AuxInt = int8ToAuxInt(c)
4140 v.AddArg(x)
4141 return true
4142 }
4143
4144
4145 for {
4146 c := auxIntToInt8(v.AuxInt)
4147 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4148 break
4149 }
4150 x := v_0.Args[0]
4151 v.reset(OpAMD64BTSLconst)
4152 v.AuxInt = int8ToAuxInt(c)
4153 v.AddArg(x)
4154 return true
4155 }
4156
4157
4158 for {
4159 c := auxIntToInt8(v.AuxInt)
4160 if v_0.Op != OpAMD64ORLconst {
4161 break
4162 }
4163 d := auxIntToInt32(v_0.AuxInt)
4164 x := v_0.Args[0]
4165 v.reset(OpAMD64ORLconst)
4166 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4167 v.AddArg(x)
4168 return true
4169 }
4170
4171
4172 for {
4173 c := auxIntToInt8(v.AuxInt)
4174 if v_0.Op != OpAMD64BTSLconst {
4175 break
4176 }
4177 d := auxIntToInt8(v_0.AuxInt)
4178 x := v_0.Args[0]
4179 v.reset(OpAMD64ORLconst)
4180 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4181 v.AddArg(x)
4182 return true
4183 }
4184
4185
4186 for {
4187 c := auxIntToInt8(v.AuxInt)
4188 if v_0.Op != OpAMD64MOVLconst {
4189 break
4190 }
4191 d := auxIntToInt32(v_0.AuxInt)
4192 v.reset(OpAMD64MOVLconst)
4193 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4194 return true
4195 }
4196 return false
4197 }
4198 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4199 v_0 := v.Args[0]
4200
4201
4202 for {
4203 c := auxIntToInt8(v.AuxInt)
4204 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
4205 break
4206 }
4207 x := v_0.Args[0]
4208 v.reset(OpAMD64BTSQconst)
4209 v.AuxInt = int8ToAuxInt(c)
4210 v.AddArg(x)
4211 return true
4212 }
4213
4214
4215 for {
4216 c := auxIntToInt8(v.AuxInt)
4217 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4218 break
4219 }
4220 x := v_0.Args[0]
4221 v.reset(OpAMD64BTSQconst)
4222 v.AuxInt = int8ToAuxInt(c)
4223 v.AddArg(x)
4224 return true
4225 }
4226
4227
4228
4229 for {
4230 c := auxIntToInt8(v.AuxInt)
4231 if v_0.Op != OpAMD64ORQconst {
4232 break
4233 }
4234 d := auxIntToInt32(v_0.AuxInt)
4235 x := v_0.Args[0]
4236 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4237 break
4238 }
4239 v.reset(OpAMD64ORQconst)
4240 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4241 v.AddArg(x)
4242 return true
4243 }
4244
4245
4246
4247 for {
4248 c := auxIntToInt8(v.AuxInt)
4249 if v_0.Op != OpAMD64BTSQconst {
4250 break
4251 }
4252 d := auxIntToInt8(v_0.AuxInt)
4253 x := v_0.Args[0]
4254 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4255 break
4256 }
4257 v.reset(OpAMD64ORQconst)
4258 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4259 v.AddArg(x)
4260 return true
4261 }
4262
4263
4264 for {
4265 c := auxIntToInt8(v.AuxInt)
4266 if v_0.Op != OpAMD64MOVQconst {
4267 break
4268 }
4269 d := auxIntToInt64(v_0.AuxInt)
4270 v.reset(OpAMD64MOVQconst)
4271 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
4272 return true
4273 }
4274 return false
4275 }
4276 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4277 v_2 := v.Args[2]
4278 v_1 := v.Args[1]
4279 v_0 := v.Args[0]
4280
4281
4282 for {
4283 x := v_0
4284 y := v_1
4285 if v_2.Op != OpAMD64InvertFlags {
4286 break
4287 }
4288 cond := v_2.Args[0]
4289 v.reset(OpAMD64CMOVLLS)
4290 v.AddArg3(x, y, cond)
4291 return true
4292 }
4293
4294
4295 for {
4296 x := v_1
4297 if v_2.Op != OpAMD64FlagEQ {
4298 break
4299 }
4300 v.copyOf(x)
4301 return true
4302 }
4303
4304
4305 for {
4306 x := v_1
4307 if v_2.Op != OpAMD64FlagGT_UGT {
4308 break
4309 }
4310 v.copyOf(x)
4311 return true
4312 }
4313
4314
4315 for {
4316 y := v_0
4317 if v_2.Op != OpAMD64FlagGT_ULT {
4318 break
4319 }
4320 v.copyOf(y)
4321 return true
4322 }
4323
4324
4325 for {
4326 y := v_0
4327 if v_2.Op != OpAMD64FlagLT_ULT {
4328 break
4329 }
4330 v.copyOf(y)
4331 return true
4332 }
4333
4334
4335 for {
4336 x := v_1
4337 if v_2.Op != OpAMD64FlagLT_UGT {
4338 break
4339 }
4340 v.copyOf(x)
4341 return true
4342 }
4343 return false
4344 }
4345 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4346 v_2 := v.Args[2]
4347 v_1 := v.Args[1]
4348 v_0 := v.Args[0]
4349
4350
4351 for {
4352 x := v_0
4353 y := v_1
4354 if v_2.Op != OpAMD64InvertFlags {
4355 break
4356 }
4357 cond := v_2.Args[0]
4358 v.reset(OpAMD64CMOVLHI)
4359 v.AddArg3(x, y, cond)
4360 return true
4361 }
4362
4363
4364 for {
4365 y := v_0
4366 if v_2.Op != OpAMD64FlagEQ {
4367 break
4368 }
4369 v.copyOf(y)
4370 return true
4371 }
4372
4373
4374 for {
4375 y := v_0
4376 if v_2.Op != OpAMD64FlagGT_UGT {
4377 break
4378 }
4379 v.copyOf(y)
4380 return true
4381 }
4382
4383
4384 for {
4385 x := v_1
4386 if v_2.Op != OpAMD64FlagGT_ULT {
4387 break
4388 }
4389 v.copyOf(x)
4390 return true
4391 }
4392
4393
4394 for {
4395 x := v_1
4396 if v_2.Op != OpAMD64FlagLT_ULT {
4397 break
4398 }
4399 v.copyOf(x)
4400 return true
4401 }
4402
4403
4404 for {
4405 y := v_0
4406 if v_2.Op != OpAMD64FlagLT_UGT {
4407 break
4408 }
4409 v.copyOf(y)
4410 return true
4411 }
4412 return false
4413 }
4414 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4415 v_2 := v.Args[2]
4416 v_1 := v.Args[1]
4417 v_0 := v.Args[0]
4418
4419
4420 for {
4421 x := v_0
4422 y := v_1
4423 if v_2.Op != OpAMD64InvertFlags {
4424 break
4425 }
4426 cond := v_2.Args[0]
4427 v.reset(OpAMD64CMOVLEQ)
4428 v.AddArg3(x, y, cond)
4429 return true
4430 }
4431
4432
4433 for {
4434 x := v_1
4435 if v_2.Op != OpAMD64FlagEQ {
4436 break
4437 }
4438 v.copyOf(x)
4439 return true
4440 }
4441
4442
4443 for {
4444 y := v_0
4445 if v_2.Op != OpAMD64FlagGT_UGT {
4446 break
4447 }
4448 v.copyOf(y)
4449 return true
4450 }
4451
4452
4453 for {
4454 y := v_0
4455 if v_2.Op != OpAMD64FlagGT_ULT {
4456 break
4457 }
4458 v.copyOf(y)
4459 return true
4460 }
4461
4462
4463 for {
4464 y := v_0
4465 if v_2.Op != OpAMD64FlagLT_ULT {
4466 break
4467 }
4468 v.copyOf(y)
4469 return true
4470 }
4471
4472
4473 for {
4474 y := v_0
4475 if v_2.Op != OpAMD64FlagLT_UGT {
4476 break
4477 }
4478 v.copyOf(y)
4479 return true
4480 }
4481 return false
4482 }
4483 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4484 v_2 := v.Args[2]
4485 v_1 := v.Args[1]
4486 v_0 := v.Args[0]
4487
4488
4489 for {
4490 x := v_0
4491 y := v_1
4492 if v_2.Op != OpAMD64InvertFlags {
4493 break
4494 }
4495 cond := v_2.Args[0]
4496 v.reset(OpAMD64CMOVLLE)
4497 v.AddArg3(x, y, cond)
4498 return true
4499 }
4500
4501
4502 for {
4503 x := v_1
4504 if v_2.Op != OpAMD64FlagEQ {
4505 break
4506 }
4507 v.copyOf(x)
4508 return true
4509 }
4510
4511
4512 for {
4513 x := v_1
4514 if v_2.Op != OpAMD64FlagGT_UGT {
4515 break
4516 }
4517 v.copyOf(x)
4518 return true
4519 }
4520
4521
4522 for {
4523 x := v_1
4524 if v_2.Op != OpAMD64FlagGT_ULT {
4525 break
4526 }
4527 v.copyOf(x)
4528 return true
4529 }
4530
4531
4532 for {
4533 y := v_0
4534 if v_2.Op != OpAMD64FlagLT_ULT {
4535 break
4536 }
4537 v.copyOf(y)
4538 return true
4539 }
4540
4541
4542 for {
4543 y := v_0
4544 if v_2.Op != OpAMD64FlagLT_UGT {
4545 break
4546 }
4547 v.copyOf(y)
4548 return true
4549 }
4550 return false
4551 }
4552 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4553 v_2 := v.Args[2]
4554 v_1 := v.Args[1]
4555 v_0 := v.Args[0]
4556
4557
4558 for {
4559 x := v_0
4560 y := v_1
4561 if v_2.Op != OpAMD64InvertFlags {
4562 break
4563 }
4564 cond := v_2.Args[0]
4565 v.reset(OpAMD64CMOVLLT)
4566 v.AddArg3(x, y, cond)
4567 return true
4568 }
4569
4570
4571 for {
4572 y := v_0
4573 if v_2.Op != OpAMD64FlagEQ {
4574 break
4575 }
4576 v.copyOf(y)
4577 return true
4578 }
4579
4580
4581 for {
4582 x := v_1
4583 if v_2.Op != OpAMD64FlagGT_UGT {
4584 break
4585 }
4586 v.copyOf(x)
4587 return true
4588 }
4589
4590
4591 for {
4592 x := v_1
4593 if v_2.Op != OpAMD64FlagGT_ULT {
4594 break
4595 }
4596 v.copyOf(x)
4597 return true
4598 }
4599
4600
4601 for {
4602 y := v_0
4603 if v_2.Op != OpAMD64FlagLT_ULT {
4604 break
4605 }
4606 v.copyOf(y)
4607 return true
4608 }
4609
4610
4611 for {
4612 y := v_0
4613 if v_2.Op != OpAMD64FlagLT_UGT {
4614 break
4615 }
4616 v.copyOf(y)
4617 return true
4618 }
4619 return false
4620 }
4621 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4622 v_2 := v.Args[2]
4623 v_1 := v.Args[1]
4624 v_0 := v.Args[0]
4625
4626
4627 for {
4628 x := v_0
4629 y := v_1
4630 if v_2.Op != OpAMD64InvertFlags {
4631 break
4632 }
4633 cond := v_2.Args[0]
4634 v.reset(OpAMD64CMOVLCS)
4635 v.AddArg3(x, y, cond)
4636 return true
4637 }
4638
4639
4640 for {
4641 y := v_0
4642 if v_2.Op != OpAMD64FlagEQ {
4643 break
4644 }
4645 v.copyOf(y)
4646 return true
4647 }
4648
4649
4650 for {
4651 x := v_1
4652 if v_2.Op != OpAMD64FlagGT_UGT {
4653 break
4654 }
4655 v.copyOf(x)
4656 return true
4657 }
4658
4659
4660 for {
4661 y := v_0
4662 if v_2.Op != OpAMD64FlagGT_ULT {
4663 break
4664 }
4665 v.copyOf(y)
4666 return true
4667 }
4668
4669
4670 for {
4671 y := v_0
4672 if v_2.Op != OpAMD64FlagLT_ULT {
4673 break
4674 }
4675 v.copyOf(y)
4676 return true
4677 }
4678
4679
4680 for {
4681 x := v_1
4682 if v_2.Op != OpAMD64FlagLT_UGT {
4683 break
4684 }
4685 v.copyOf(x)
4686 return true
4687 }
4688 return false
4689 }
4690 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4691 v_2 := v.Args[2]
4692 v_1 := v.Args[1]
4693 v_0 := v.Args[0]
4694
4695
4696 for {
4697 x := v_0
4698 y := v_1
4699 if v_2.Op != OpAMD64InvertFlags {
4700 break
4701 }
4702 cond := v_2.Args[0]
4703 v.reset(OpAMD64CMOVLGE)
4704 v.AddArg3(x, y, cond)
4705 return true
4706 }
4707
4708
4709 for {
4710 x := v_1
4711 if v_2.Op != OpAMD64FlagEQ {
4712 break
4713 }
4714 v.copyOf(x)
4715 return true
4716 }
4717
4718
4719 for {
4720 y := v_0
4721 if v_2.Op != OpAMD64FlagGT_UGT {
4722 break
4723 }
4724 v.copyOf(y)
4725 return true
4726 }
4727
4728
4729 for {
4730 y := v_0
4731 if v_2.Op != OpAMD64FlagGT_ULT {
4732 break
4733 }
4734 v.copyOf(y)
4735 return true
4736 }
4737
4738
4739 for {
4740 x := v_1
4741 if v_2.Op != OpAMD64FlagLT_ULT {
4742 break
4743 }
4744 v.copyOf(x)
4745 return true
4746 }
4747
4748
4749 for {
4750 x := v_1
4751 if v_2.Op != OpAMD64FlagLT_UGT {
4752 break
4753 }
4754 v.copyOf(x)
4755 return true
4756 }
4757 return false
4758 }
4759 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4760 v_2 := v.Args[2]
4761 v_1 := v.Args[1]
4762 v_0 := v.Args[0]
4763
4764
4765 for {
4766 x := v_0
4767 y := v_1
4768 if v_2.Op != OpAMD64InvertFlags {
4769 break
4770 }
4771 cond := v_2.Args[0]
4772 v.reset(OpAMD64CMOVLCC)
4773 v.AddArg3(x, y, cond)
4774 return true
4775 }
4776
4777
4778 for {
4779 x := v_1
4780 if v_2.Op != OpAMD64FlagEQ {
4781 break
4782 }
4783 v.copyOf(x)
4784 return true
4785 }
4786
4787
4788 for {
4789 y := v_0
4790 if v_2.Op != OpAMD64FlagGT_UGT {
4791 break
4792 }
4793 v.copyOf(y)
4794 return true
4795 }
4796
4797
4798 for {
4799 x := v_1
4800 if v_2.Op != OpAMD64FlagGT_ULT {
4801 break
4802 }
4803 v.copyOf(x)
4804 return true
4805 }
4806
4807
4808 for {
4809 x := v_1
4810 if v_2.Op != OpAMD64FlagLT_ULT {
4811 break
4812 }
4813 v.copyOf(x)
4814 return true
4815 }
4816
4817
4818 for {
4819 y := v_0
4820 if v_2.Op != OpAMD64FlagLT_UGT {
4821 break
4822 }
4823 v.copyOf(y)
4824 return true
4825 }
4826 return false
4827 }
4828 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4829 v_2 := v.Args[2]
4830 v_1 := v.Args[1]
4831 v_0 := v.Args[0]
4832
4833
4834 for {
4835 x := v_0
4836 y := v_1
4837 if v_2.Op != OpAMD64InvertFlags {
4838 break
4839 }
4840 cond := v_2.Args[0]
4841 v.reset(OpAMD64CMOVLGT)
4842 v.AddArg3(x, y, cond)
4843 return true
4844 }
4845
4846
4847 for {
4848 y := v_0
4849 if v_2.Op != OpAMD64FlagEQ {
4850 break
4851 }
4852 v.copyOf(y)
4853 return true
4854 }
4855
4856
4857 for {
4858 y := v_0
4859 if v_2.Op != OpAMD64FlagGT_UGT {
4860 break
4861 }
4862 v.copyOf(y)
4863 return true
4864 }
4865
4866
4867 for {
4868 y := v_0
4869 if v_2.Op != OpAMD64FlagGT_ULT {
4870 break
4871 }
4872 v.copyOf(y)
4873 return true
4874 }
4875
4876
4877 for {
4878 x := v_1
4879 if v_2.Op != OpAMD64FlagLT_ULT {
4880 break
4881 }
4882 v.copyOf(x)
4883 return true
4884 }
4885
4886
4887 for {
4888 x := v_1
4889 if v_2.Op != OpAMD64FlagLT_UGT {
4890 break
4891 }
4892 v.copyOf(x)
4893 return true
4894 }
4895 return false
4896 }
4897 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4898 v_2 := v.Args[2]
4899 v_1 := v.Args[1]
4900 v_0 := v.Args[0]
4901
4902
4903 for {
4904 x := v_0
4905 y := v_1
4906 if v_2.Op != OpAMD64InvertFlags {
4907 break
4908 }
4909 cond := v_2.Args[0]
4910 v.reset(OpAMD64CMOVLNE)
4911 v.AddArg3(x, y, cond)
4912 return true
4913 }
4914
4915
4916 for {
4917 y := v_0
4918 if v_2.Op != OpAMD64FlagEQ {
4919 break
4920 }
4921 v.copyOf(y)
4922 return true
4923 }
4924
4925
4926 for {
4927 x := v_1
4928 if v_2.Op != OpAMD64FlagGT_UGT {
4929 break
4930 }
4931 v.copyOf(x)
4932 return true
4933 }
4934
4935
4936 for {
4937 x := v_1
4938 if v_2.Op != OpAMD64FlagGT_ULT {
4939 break
4940 }
4941 v.copyOf(x)
4942 return true
4943 }
4944
4945
4946 for {
4947 x := v_1
4948 if v_2.Op != OpAMD64FlagLT_ULT {
4949 break
4950 }
4951 v.copyOf(x)
4952 return true
4953 }
4954
4955
4956 for {
4957 x := v_1
4958 if v_2.Op != OpAMD64FlagLT_UGT {
4959 break
4960 }
4961 v.copyOf(x)
4962 return true
4963 }
4964 return false
4965 }
4966 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4967 v_2 := v.Args[2]
4968 v_1 := v.Args[1]
4969 v_0 := v.Args[0]
4970
4971
4972 for {
4973 x := v_0
4974 y := v_1
4975 if v_2.Op != OpAMD64InvertFlags {
4976 break
4977 }
4978 cond := v_2.Args[0]
4979 v.reset(OpAMD64CMOVQLS)
4980 v.AddArg3(x, y, cond)
4981 return true
4982 }
4983
4984
4985 for {
4986 x := v_1
4987 if v_2.Op != OpAMD64FlagEQ {
4988 break
4989 }
4990 v.copyOf(x)
4991 return true
4992 }
4993
4994
4995 for {
4996 x := v_1
4997 if v_2.Op != OpAMD64FlagGT_UGT {
4998 break
4999 }
5000 v.copyOf(x)
5001 return true
5002 }
5003
5004
5005 for {
5006 y := v_0
5007 if v_2.Op != OpAMD64FlagGT_ULT {
5008 break
5009 }
5010 v.copyOf(y)
5011 return true
5012 }
5013
5014
5015 for {
5016 y := v_0
5017 if v_2.Op != OpAMD64FlagLT_ULT {
5018 break
5019 }
5020 v.copyOf(y)
5021 return true
5022 }
5023
5024
5025 for {
5026 x := v_1
5027 if v_2.Op != OpAMD64FlagLT_UGT {
5028 break
5029 }
5030 v.copyOf(x)
5031 return true
5032 }
5033 return false
5034 }
5035 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
5036 v_2 := v.Args[2]
5037 v_1 := v.Args[1]
5038 v_0 := v.Args[0]
5039
5040
5041 for {
5042 x := v_0
5043 y := v_1
5044 if v_2.Op != OpAMD64InvertFlags {
5045 break
5046 }
5047 cond := v_2.Args[0]
5048 v.reset(OpAMD64CMOVQHI)
5049 v.AddArg3(x, y, cond)
5050 return true
5051 }
5052
5053
5054 for {
5055 y := v_0
5056 if v_2.Op != OpAMD64FlagEQ {
5057 break
5058 }
5059 v.copyOf(y)
5060 return true
5061 }
5062
5063
5064 for {
5065 y := v_0
5066 if v_2.Op != OpAMD64FlagGT_UGT {
5067 break
5068 }
5069 v.copyOf(y)
5070 return true
5071 }
5072
5073
5074 for {
5075 x := v_1
5076 if v_2.Op != OpAMD64FlagGT_ULT {
5077 break
5078 }
5079 v.copyOf(x)
5080 return true
5081 }
5082
5083
5084 for {
5085 x := v_1
5086 if v_2.Op != OpAMD64FlagLT_ULT {
5087 break
5088 }
5089 v.copyOf(x)
5090 return true
5091 }
5092
5093
5094 for {
5095 y := v_0
5096 if v_2.Op != OpAMD64FlagLT_UGT {
5097 break
5098 }
5099 v.copyOf(y)
5100 return true
5101 }
5102 return false
5103 }
5104 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5105 v_2 := v.Args[2]
5106 v_1 := v.Args[1]
5107 v_0 := v.Args[0]
5108
5109
5110 for {
5111 x := v_0
5112 y := v_1
5113 if v_2.Op != OpAMD64InvertFlags {
5114 break
5115 }
5116 cond := v_2.Args[0]
5117 v.reset(OpAMD64CMOVQEQ)
5118 v.AddArg3(x, y, cond)
5119 return true
5120 }
5121
5122
5123 for {
5124 x := v_1
5125 if v_2.Op != OpAMD64FlagEQ {
5126 break
5127 }
5128 v.copyOf(x)
5129 return true
5130 }
5131
5132
5133 for {
5134 y := v_0
5135 if v_2.Op != OpAMD64FlagGT_UGT {
5136 break
5137 }
5138 v.copyOf(y)
5139 return true
5140 }
5141
5142
5143 for {
5144 y := v_0
5145 if v_2.Op != OpAMD64FlagGT_ULT {
5146 break
5147 }
5148 v.copyOf(y)
5149 return true
5150 }
5151
5152
5153 for {
5154 y := v_0
5155 if v_2.Op != OpAMD64FlagLT_ULT {
5156 break
5157 }
5158 v.copyOf(y)
5159 return true
5160 }
5161
5162
5163 for {
5164 y := v_0
5165 if v_2.Op != OpAMD64FlagLT_UGT {
5166 break
5167 }
5168 v.copyOf(y)
5169 return true
5170 }
5171
5172
5173
5174 for {
5175 x := v_0
5176 if v_2.Op != OpSelect1 {
5177 break
5178 }
5179 v_2_0 := v_2.Args[0]
5180 if v_2_0.Op != OpAMD64BSFQ {
5181 break
5182 }
5183 v_2_0_0 := v_2_0.Args[0]
5184 if v_2_0_0.Op != OpAMD64ORQconst {
5185 break
5186 }
5187 c := auxIntToInt32(v_2_0_0.AuxInt)
5188 if !(c != 0) {
5189 break
5190 }
5191 v.copyOf(x)
5192 return true
5193 }
5194 return false
5195 }
5196 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5197 v_2 := v.Args[2]
5198 v_1 := v.Args[1]
5199 v_0 := v.Args[0]
5200
5201
5202 for {
5203 x := v_0
5204 y := v_1
5205 if v_2.Op != OpAMD64InvertFlags {
5206 break
5207 }
5208 cond := v_2.Args[0]
5209 v.reset(OpAMD64CMOVQLE)
5210 v.AddArg3(x, y, cond)
5211 return true
5212 }
5213
5214
5215 for {
5216 x := v_1
5217 if v_2.Op != OpAMD64FlagEQ {
5218 break
5219 }
5220 v.copyOf(x)
5221 return true
5222 }
5223
5224
5225 for {
5226 x := v_1
5227 if v_2.Op != OpAMD64FlagGT_UGT {
5228 break
5229 }
5230 v.copyOf(x)
5231 return true
5232 }
5233
5234
5235 for {
5236 x := v_1
5237 if v_2.Op != OpAMD64FlagGT_ULT {
5238 break
5239 }
5240 v.copyOf(x)
5241 return true
5242 }
5243
5244
5245 for {
5246 y := v_0
5247 if v_2.Op != OpAMD64FlagLT_ULT {
5248 break
5249 }
5250 v.copyOf(y)
5251 return true
5252 }
5253
5254
5255 for {
5256 y := v_0
5257 if v_2.Op != OpAMD64FlagLT_UGT {
5258 break
5259 }
5260 v.copyOf(y)
5261 return true
5262 }
5263 return false
5264 }
5265 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5266 v_2 := v.Args[2]
5267 v_1 := v.Args[1]
5268 v_0 := v.Args[0]
5269
5270
5271 for {
5272 x := v_0
5273 y := v_1
5274 if v_2.Op != OpAMD64InvertFlags {
5275 break
5276 }
5277 cond := v_2.Args[0]
5278 v.reset(OpAMD64CMOVQLT)
5279 v.AddArg3(x, y, cond)
5280 return true
5281 }
5282
5283
5284 for {
5285 y := v_0
5286 if v_2.Op != OpAMD64FlagEQ {
5287 break
5288 }
5289 v.copyOf(y)
5290 return true
5291 }
5292
5293
5294 for {
5295 x := v_1
5296 if v_2.Op != OpAMD64FlagGT_UGT {
5297 break
5298 }
5299 v.copyOf(x)
5300 return true
5301 }
5302
5303
5304 for {
5305 x := v_1
5306 if v_2.Op != OpAMD64FlagGT_ULT {
5307 break
5308 }
5309 v.copyOf(x)
5310 return true
5311 }
5312
5313
5314 for {
5315 y := v_0
5316 if v_2.Op != OpAMD64FlagLT_ULT {
5317 break
5318 }
5319 v.copyOf(y)
5320 return true
5321 }
5322
5323
5324 for {
5325 y := v_0
5326 if v_2.Op != OpAMD64FlagLT_UGT {
5327 break
5328 }
5329 v.copyOf(y)
5330 return true
5331 }
5332 return false
5333 }
5334 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5335 v_2 := v.Args[2]
5336 v_1 := v.Args[1]
5337 v_0 := v.Args[0]
5338
5339
5340 for {
5341 x := v_0
5342 y := v_1
5343 if v_2.Op != OpAMD64InvertFlags {
5344 break
5345 }
5346 cond := v_2.Args[0]
5347 v.reset(OpAMD64CMOVQCS)
5348 v.AddArg3(x, y, cond)
5349 return true
5350 }
5351
5352
5353 for {
5354 y := v_0
5355 if v_2.Op != OpAMD64FlagEQ {
5356 break
5357 }
5358 v.copyOf(y)
5359 return true
5360 }
5361
5362
5363 for {
5364 x := v_1
5365 if v_2.Op != OpAMD64FlagGT_UGT {
5366 break
5367 }
5368 v.copyOf(x)
5369 return true
5370 }
5371
5372
5373 for {
5374 y := v_0
5375 if v_2.Op != OpAMD64FlagGT_ULT {
5376 break
5377 }
5378 v.copyOf(y)
5379 return true
5380 }
5381
5382
5383 for {
5384 y := v_0
5385 if v_2.Op != OpAMD64FlagLT_ULT {
5386 break
5387 }
5388 v.copyOf(y)
5389 return true
5390 }
5391
5392
5393 for {
5394 x := v_1
5395 if v_2.Op != OpAMD64FlagLT_UGT {
5396 break
5397 }
5398 v.copyOf(x)
5399 return true
5400 }
5401 return false
5402 }
5403 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5404 v_2 := v.Args[2]
5405 v_1 := v.Args[1]
5406 v_0 := v.Args[0]
5407
5408
5409 for {
5410 x := v_0
5411 y := v_1
5412 if v_2.Op != OpAMD64InvertFlags {
5413 break
5414 }
5415 cond := v_2.Args[0]
5416 v.reset(OpAMD64CMOVQGE)
5417 v.AddArg3(x, y, cond)
5418 return true
5419 }
5420
5421
5422 for {
5423 x := v_1
5424 if v_2.Op != OpAMD64FlagEQ {
5425 break
5426 }
5427 v.copyOf(x)
5428 return true
5429 }
5430
5431
5432 for {
5433 y := v_0
5434 if v_2.Op != OpAMD64FlagGT_UGT {
5435 break
5436 }
5437 v.copyOf(y)
5438 return true
5439 }
5440
5441
5442 for {
5443 y := v_0
5444 if v_2.Op != OpAMD64FlagGT_ULT {
5445 break
5446 }
5447 v.copyOf(y)
5448 return true
5449 }
5450
5451
5452 for {
5453 x := v_1
5454 if v_2.Op != OpAMD64FlagLT_ULT {
5455 break
5456 }
5457 v.copyOf(x)
5458 return true
5459 }
5460
5461
5462 for {
5463 x := v_1
5464 if v_2.Op != OpAMD64FlagLT_UGT {
5465 break
5466 }
5467 v.copyOf(x)
5468 return true
5469 }
5470 return false
5471 }
5472 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5473 v_2 := v.Args[2]
5474 v_1 := v.Args[1]
5475 v_0 := v.Args[0]
5476
5477
5478 for {
5479 x := v_0
5480 y := v_1
5481 if v_2.Op != OpAMD64InvertFlags {
5482 break
5483 }
5484 cond := v_2.Args[0]
5485 v.reset(OpAMD64CMOVQCC)
5486 v.AddArg3(x, y, cond)
5487 return true
5488 }
5489
5490
5491 for {
5492 x := v_1
5493 if v_2.Op != OpAMD64FlagEQ {
5494 break
5495 }
5496 v.copyOf(x)
5497 return true
5498 }
5499
5500
5501 for {
5502 y := v_0
5503 if v_2.Op != OpAMD64FlagGT_UGT {
5504 break
5505 }
5506 v.copyOf(y)
5507 return true
5508 }
5509
5510
5511 for {
5512 x := v_1
5513 if v_2.Op != OpAMD64FlagGT_ULT {
5514 break
5515 }
5516 v.copyOf(x)
5517 return true
5518 }
5519
5520
5521 for {
5522 x := v_1
5523 if v_2.Op != OpAMD64FlagLT_ULT {
5524 break
5525 }
5526 v.copyOf(x)
5527 return true
5528 }
5529
5530
5531 for {
5532 y := v_0
5533 if v_2.Op != OpAMD64FlagLT_UGT {
5534 break
5535 }
5536 v.copyOf(y)
5537 return true
5538 }
5539 return false
5540 }
5541 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5542 v_2 := v.Args[2]
5543 v_1 := v.Args[1]
5544 v_0 := v.Args[0]
5545
5546
5547 for {
5548 x := v_0
5549 y := v_1
5550 if v_2.Op != OpAMD64InvertFlags {
5551 break
5552 }
5553 cond := v_2.Args[0]
5554 v.reset(OpAMD64CMOVQGT)
5555 v.AddArg3(x, y, cond)
5556 return true
5557 }
5558
5559
5560 for {
5561 y := v_0
5562 if v_2.Op != OpAMD64FlagEQ {
5563 break
5564 }
5565 v.copyOf(y)
5566 return true
5567 }
5568
5569
5570 for {
5571 y := v_0
5572 if v_2.Op != OpAMD64FlagGT_UGT {
5573 break
5574 }
5575 v.copyOf(y)
5576 return true
5577 }
5578
5579
5580 for {
5581 y := v_0
5582 if v_2.Op != OpAMD64FlagGT_ULT {
5583 break
5584 }
5585 v.copyOf(y)
5586 return true
5587 }
5588
5589
5590 for {
5591 x := v_1
5592 if v_2.Op != OpAMD64FlagLT_ULT {
5593 break
5594 }
5595 v.copyOf(x)
5596 return true
5597 }
5598
5599
5600 for {
5601 x := v_1
5602 if v_2.Op != OpAMD64FlagLT_UGT {
5603 break
5604 }
5605 v.copyOf(x)
5606 return true
5607 }
5608 return false
5609 }
5610 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5611 v_2 := v.Args[2]
5612 v_1 := v.Args[1]
5613 v_0 := v.Args[0]
5614
5615
5616 for {
5617 x := v_0
5618 y := v_1
5619 if v_2.Op != OpAMD64InvertFlags {
5620 break
5621 }
5622 cond := v_2.Args[0]
5623 v.reset(OpAMD64CMOVQNE)
5624 v.AddArg3(x, y, cond)
5625 return true
5626 }
5627
5628
5629 for {
5630 y := v_0
5631 if v_2.Op != OpAMD64FlagEQ {
5632 break
5633 }
5634 v.copyOf(y)
5635 return true
5636 }
5637
5638
5639 for {
5640 x := v_1
5641 if v_2.Op != OpAMD64FlagGT_UGT {
5642 break
5643 }
5644 v.copyOf(x)
5645 return true
5646 }
5647
5648
5649 for {
5650 x := v_1
5651 if v_2.Op != OpAMD64FlagGT_ULT {
5652 break
5653 }
5654 v.copyOf(x)
5655 return true
5656 }
5657
5658
5659 for {
5660 x := v_1
5661 if v_2.Op != OpAMD64FlagLT_ULT {
5662 break
5663 }
5664 v.copyOf(x)
5665 return true
5666 }
5667
5668
5669 for {
5670 x := v_1
5671 if v_2.Op != OpAMD64FlagLT_UGT {
5672 break
5673 }
5674 v.copyOf(x)
5675 return true
5676 }
5677 return false
5678 }
5679 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5680 v_2 := v.Args[2]
5681 v_1 := v.Args[1]
5682 v_0 := v.Args[0]
5683
5684
5685 for {
5686 x := v_0
5687 y := v_1
5688 if v_2.Op != OpAMD64InvertFlags {
5689 break
5690 }
5691 cond := v_2.Args[0]
5692 v.reset(OpAMD64CMOVWLS)
5693 v.AddArg3(x, y, cond)
5694 return true
5695 }
5696
5697
5698 for {
5699 x := v_1
5700 if v_2.Op != OpAMD64FlagEQ {
5701 break
5702 }
5703 v.copyOf(x)
5704 return true
5705 }
5706
5707
5708 for {
5709 x := v_1
5710 if v_2.Op != OpAMD64FlagGT_UGT {
5711 break
5712 }
5713 v.copyOf(x)
5714 return true
5715 }
5716
5717
5718 for {
5719 y := v_0
5720 if v_2.Op != OpAMD64FlagGT_ULT {
5721 break
5722 }
5723 v.copyOf(y)
5724 return true
5725 }
5726
5727
5728 for {
5729 y := v_0
5730 if v_2.Op != OpAMD64FlagLT_ULT {
5731 break
5732 }
5733 v.copyOf(y)
5734 return true
5735 }
5736
5737
5738 for {
5739 x := v_1
5740 if v_2.Op != OpAMD64FlagLT_UGT {
5741 break
5742 }
5743 v.copyOf(x)
5744 return true
5745 }
5746 return false
5747 }
5748 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5749 v_2 := v.Args[2]
5750 v_1 := v.Args[1]
5751 v_0 := v.Args[0]
5752
5753
5754 for {
5755 x := v_0
5756 y := v_1
5757 if v_2.Op != OpAMD64InvertFlags {
5758 break
5759 }
5760 cond := v_2.Args[0]
5761 v.reset(OpAMD64CMOVWHI)
5762 v.AddArg3(x, y, cond)
5763 return true
5764 }
5765
5766
5767 for {
5768 y := v_0
5769 if v_2.Op != OpAMD64FlagEQ {
5770 break
5771 }
5772 v.copyOf(y)
5773 return true
5774 }
5775
5776
5777 for {
5778 y := v_0
5779 if v_2.Op != OpAMD64FlagGT_UGT {
5780 break
5781 }
5782 v.copyOf(y)
5783 return true
5784 }
5785
5786
5787 for {
5788 x := v_1
5789 if v_2.Op != OpAMD64FlagGT_ULT {
5790 break
5791 }
5792 v.copyOf(x)
5793 return true
5794 }
5795
5796
5797 for {
5798 x := v_1
5799 if v_2.Op != OpAMD64FlagLT_ULT {
5800 break
5801 }
5802 v.copyOf(x)
5803 return true
5804 }
5805
5806
5807 for {
5808 y := v_0
5809 if v_2.Op != OpAMD64FlagLT_UGT {
5810 break
5811 }
5812 v.copyOf(y)
5813 return true
5814 }
5815 return false
5816 }
5817 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5818 v_2 := v.Args[2]
5819 v_1 := v.Args[1]
5820 v_0 := v.Args[0]
5821
5822
5823 for {
5824 x := v_0
5825 y := v_1
5826 if v_2.Op != OpAMD64InvertFlags {
5827 break
5828 }
5829 cond := v_2.Args[0]
5830 v.reset(OpAMD64CMOVWEQ)
5831 v.AddArg3(x, y, cond)
5832 return true
5833 }
5834
5835
5836 for {
5837 x := v_1
5838 if v_2.Op != OpAMD64FlagEQ {
5839 break
5840 }
5841 v.copyOf(x)
5842 return true
5843 }
5844
5845
5846 for {
5847 y := v_0
5848 if v_2.Op != OpAMD64FlagGT_UGT {
5849 break
5850 }
5851 v.copyOf(y)
5852 return true
5853 }
5854
5855
5856 for {
5857 y := v_0
5858 if v_2.Op != OpAMD64FlagGT_ULT {
5859 break
5860 }
5861 v.copyOf(y)
5862 return true
5863 }
5864
5865
5866 for {
5867 y := v_0
5868 if v_2.Op != OpAMD64FlagLT_ULT {
5869 break
5870 }
5871 v.copyOf(y)
5872 return true
5873 }
5874
5875
5876 for {
5877 y := v_0
5878 if v_2.Op != OpAMD64FlagLT_UGT {
5879 break
5880 }
5881 v.copyOf(y)
5882 return true
5883 }
5884 return false
5885 }
5886 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5887 v_2 := v.Args[2]
5888 v_1 := v.Args[1]
5889 v_0 := v.Args[0]
5890
5891
5892 for {
5893 x := v_0
5894 y := v_1
5895 if v_2.Op != OpAMD64InvertFlags {
5896 break
5897 }
5898 cond := v_2.Args[0]
5899 v.reset(OpAMD64CMOVWLE)
5900 v.AddArg3(x, y, cond)
5901 return true
5902 }
5903
5904
5905 for {
5906 x := v_1
5907 if v_2.Op != OpAMD64FlagEQ {
5908 break
5909 }
5910 v.copyOf(x)
5911 return true
5912 }
5913
5914
5915 for {
5916 x := v_1
5917 if v_2.Op != OpAMD64FlagGT_UGT {
5918 break
5919 }
5920 v.copyOf(x)
5921 return true
5922 }
5923
5924
5925 for {
5926 x := v_1
5927 if v_2.Op != OpAMD64FlagGT_ULT {
5928 break
5929 }
5930 v.copyOf(x)
5931 return true
5932 }
5933
5934
5935 for {
5936 y := v_0
5937 if v_2.Op != OpAMD64FlagLT_ULT {
5938 break
5939 }
5940 v.copyOf(y)
5941 return true
5942 }
5943
5944
5945 for {
5946 y := v_0
5947 if v_2.Op != OpAMD64FlagLT_UGT {
5948 break
5949 }
5950 v.copyOf(y)
5951 return true
5952 }
5953 return false
5954 }
5955 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5956 v_2 := v.Args[2]
5957 v_1 := v.Args[1]
5958 v_0 := v.Args[0]
5959
5960
5961 for {
5962 x := v_0
5963 y := v_1
5964 if v_2.Op != OpAMD64InvertFlags {
5965 break
5966 }
5967 cond := v_2.Args[0]
5968 v.reset(OpAMD64CMOVWLT)
5969 v.AddArg3(x, y, cond)
5970 return true
5971 }
5972
5973
5974 for {
5975 y := v_0
5976 if v_2.Op != OpAMD64FlagEQ {
5977 break
5978 }
5979 v.copyOf(y)
5980 return true
5981 }
5982
5983
5984 for {
5985 x := v_1
5986 if v_2.Op != OpAMD64FlagGT_UGT {
5987 break
5988 }
5989 v.copyOf(x)
5990 return true
5991 }
5992
5993
5994 for {
5995 x := v_1
5996 if v_2.Op != OpAMD64FlagGT_ULT {
5997 break
5998 }
5999 v.copyOf(x)
6000 return true
6001 }
6002
6003
6004 for {
6005 y := v_0
6006 if v_2.Op != OpAMD64FlagLT_ULT {
6007 break
6008 }
6009 v.copyOf(y)
6010 return true
6011 }
6012
6013
6014 for {
6015 y := v_0
6016 if v_2.Op != OpAMD64FlagLT_UGT {
6017 break
6018 }
6019 v.copyOf(y)
6020 return true
6021 }
6022 return false
6023 }
6024 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6025 v_2 := v.Args[2]
6026 v_1 := v.Args[1]
6027 v_0 := v.Args[0]
6028
6029
6030 for {
6031 x := v_0
6032 y := v_1
6033 if v_2.Op != OpAMD64InvertFlags {
6034 break
6035 }
6036 cond := v_2.Args[0]
6037 v.reset(OpAMD64CMOVWCS)
6038 v.AddArg3(x, y, cond)
6039 return true
6040 }
6041
6042
6043 for {
6044 y := v_0
6045 if v_2.Op != OpAMD64FlagEQ {
6046 break
6047 }
6048 v.copyOf(y)
6049 return true
6050 }
6051
6052
6053 for {
6054 x := v_1
6055 if v_2.Op != OpAMD64FlagGT_UGT {
6056 break
6057 }
6058 v.copyOf(x)
6059 return true
6060 }
6061
6062
6063 for {
6064 y := v_0
6065 if v_2.Op != OpAMD64FlagGT_ULT {
6066 break
6067 }
6068 v.copyOf(y)
6069 return true
6070 }
6071
6072
6073 for {
6074 y := v_0
6075 if v_2.Op != OpAMD64FlagLT_ULT {
6076 break
6077 }
6078 v.copyOf(y)
6079 return true
6080 }
6081
6082
6083 for {
6084 x := v_1
6085 if v_2.Op != OpAMD64FlagLT_UGT {
6086 break
6087 }
6088 v.copyOf(x)
6089 return true
6090 }
6091 return false
6092 }
6093 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6094 v_2 := v.Args[2]
6095 v_1 := v.Args[1]
6096 v_0 := v.Args[0]
6097
6098
6099 for {
6100 x := v_0
6101 y := v_1
6102 if v_2.Op != OpAMD64InvertFlags {
6103 break
6104 }
6105 cond := v_2.Args[0]
6106 v.reset(OpAMD64CMOVWGE)
6107 v.AddArg3(x, y, cond)
6108 return true
6109 }
6110
6111
6112 for {
6113 x := v_1
6114 if v_2.Op != OpAMD64FlagEQ {
6115 break
6116 }
6117 v.copyOf(x)
6118 return true
6119 }
6120
6121
6122 for {
6123 y := v_0
6124 if v_2.Op != OpAMD64FlagGT_UGT {
6125 break
6126 }
6127 v.copyOf(y)
6128 return true
6129 }
6130
6131
6132 for {
6133 y := v_0
6134 if v_2.Op != OpAMD64FlagGT_ULT {
6135 break
6136 }
6137 v.copyOf(y)
6138 return true
6139 }
6140
6141
6142 for {
6143 x := v_1
6144 if v_2.Op != OpAMD64FlagLT_ULT {
6145 break
6146 }
6147 v.copyOf(x)
6148 return true
6149 }
6150
6151
6152 for {
6153 x := v_1
6154 if v_2.Op != OpAMD64FlagLT_UGT {
6155 break
6156 }
6157 v.copyOf(x)
6158 return true
6159 }
6160 return false
6161 }
6162 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6163 v_2 := v.Args[2]
6164 v_1 := v.Args[1]
6165 v_0 := v.Args[0]
6166
6167
6168 for {
6169 x := v_0
6170 y := v_1
6171 if v_2.Op != OpAMD64InvertFlags {
6172 break
6173 }
6174 cond := v_2.Args[0]
6175 v.reset(OpAMD64CMOVWCC)
6176 v.AddArg3(x, y, cond)
6177 return true
6178 }
6179
6180
6181 for {
6182 x := v_1
6183 if v_2.Op != OpAMD64FlagEQ {
6184 break
6185 }
6186 v.copyOf(x)
6187 return true
6188 }
6189
6190
6191 for {
6192 y := v_0
6193 if v_2.Op != OpAMD64FlagGT_UGT {
6194 break
6195 }
6196 v.copyOf(y)
6197 return true
6198 }
6199
6200
6201 for {
6202 x := v_1
6203 if v_2.Op != OpAMD64FlagGT_ULT {
6204 break
6205 }
6206 v.copyOf(x)
6207 return true
6208 }
6209
6210
6211 for {
6212 x := v_1
6213 if v_2.Op != OpAMD64FlagLT_ULT {
6214 break
6215 }
6216 v.copyOf(x)
6217 return true
6218 }
6219
6220
6221 for {
6222 y := v_0
6223 if v_2.Op != OpAMD64FlagLT_UGT {
6224 break
6225 }
6226 v.copyOf(y)
6227 return true
6228 }
6229 return false
6230 }
6231 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6232 v_2 := v.Args[2]
6233 v_1 := v.Args[1]
6234 v_0 := v.Args[0]
6235
6236
6237 for {
6238 x := v_0
6239 y := v_1
6240 if v_2.Op != OpAMD64InvertFlags {
6241 break
6242 }
6243 cond := v_2.Args[0]
6244 v.reset(OpAMD64CMOVWGT)
6245 v.AddArg3(x, y, cond)
6246 return true
6247 }
6248
6249
6250 for {
6251 y := v_0
6252 if v_2.Op != OpAMD64FlagEQ {
6253 break
6254 }
6255 v.copyOf(y)
6256 return true
6257 }
6258
6259
6260 for {
6261 y := v_0
6262 if v_2.Op != OpAMD64FlagGT_UGT {
6263 break
6264 }
6265 v.copyOf(y)
6266 return true
6267 }
6268
6269
6270 for {
6271 y := v_0
6272 if v_2.Op != OpAMD64FlagGT_ULT {
6273 break
6274 }
6275 v.copyOf(y)
6276 return true
6277 }
6278
6279
6280 for {
6281 x := v_1
6282 if v_2.Op != OpAMD64FlagLT_ULT {
6283 break
6284 }
6285 v.copyOf(x)
6286 return true
6287 }
6288
6289
6290 for {
6291 x := v_1
6292 if v_2.Op != OpAMD64FlagLT_UGT {
6293 break
6294 }
6295 v.copyOf(x)
6296 return true
6297 }
6298 return false
6299 }
6300 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6301 v_2 := v.Args[2]
6302 v_1 := v.Args[1]
6303 v_0 := v.Args[0]
6304
6305
6306 for {
6307 x := v_0
6308 y := v_1
6309 if v_2.Op != OpAMD64InvertFlags {
6310 break
6311 }
6312 cond := v_2.Args[0]
6313 v.reset(OpAMD64CMOVWNE)
6314 v.AddArg3(x, y, cond)
6315 return true
6316 }
6317
6318
6319 for {
6320 y := v_0
6321 if v_2.Op != OpAMD64FlagEQ {
6322 break
6323 }
6324 v.copyOf(y)
6325 return true
6326 }
6327
6328
6329 for {
6330 x := v_1
6331 if v_2.Op != OpAMD64FlagGT_UGT {
6332 break
6333 }
6334 v.copyOf(x)
6335 return true
6336 }
6337
6338
6339 for {
6340 x := v_1
6341 if v_2.Op != OpAMD64FlagGT_ULT {
6342 break
6343 }
6344 v.copyOf(x)
6345 return true
6346 }
6347
6348
6349 for {
6350 x := v_1
6351 if v_2.Op != OpAMD64FlagLT_ULT {
6352 break
6353 }
6354 v.copyOf(x)
6355 return true
6356 }
6357
6358
6359 for {
6360 x := v_1
6361 if v_2.Op != OpAMD64FlagLT_UGT {
6362 break
6363 }
6364 v.copyOf(x)
6365 return true
6366 }
6367 return false
6368 }
6369 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6370 v_1 := v.Args[1]
6371 v_0 := v.Args[0]
6372 b := v.Block
6373
6374
6375 for {
6376 x := v_0
6377 if v_1.Op != OpAMD64MOVLconst {
6378 break
6379 }
6380 c := auxIntToInt32(v_1.AuxInt)
6381 v.reset(OpAMD64CMPBconst)
6382 v.AuxInt = int8ToAuxInt(int8(c))
6383 v.AddArg(x)
6384 return true
6385 }
6386
6387
6388 for {
6389 if v_0.Op != OpAMD64MOVLconst {
6390 break
6391 }
6392 c := auxIntToInt32(v_0.AuxInt)
6393 x := v_1
6394 v.reset(OpAMD64InvertFlags)
6395 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6396 v0.AuxInt = int8ToAuxInt(int8(c))
6397 v0.AddArg(x)
6398 v.AddArg(v0)
6399 return true
6400 }
6401
6402
6403
6404 for {
6405 x := v_0
6406 y := v_1
6407 if !(canonLessThan(x, y)) {
6408 break
6409 }
6410 v.reset(OpAMD64InvertFlags)
6411 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6412 v0.AddArg2(y, x)
6413 v.AddArg(v0)
6414 return true
6415 }
6416
6417
6418
6419 for {
6420 l := v_0
6421 if l.Op != OpAMD64MOVBload {
6422 break
6423 }
6424 off := auxIntToInt32(l.AuxInt)
6425 sym := auxToSym(l.Aux)
6426 mem := l.Args[1]
6427 ptr := l.Args[0]
6428 x := v_1
6429 if !(canMergeLoad(v, l) && clobber(l)) {
6430 break
6431 }
6432 v.reset(OpAMD64CMPBload)
6433 v.AuxInt = int32ToAuxInt(off)
6434 v.Aux = symToAux(sym)
6435 v.AddArg3(ptr, x, mem)
6436 return true
6437 }
6438
6439
6440
6441 for {
6442 x := v_0
6443 l := v_1
6444 if l.Op != OpAMD64MOVBload {
6445 break
6446 }
6447 off := auxIntToInt32(l.AuxInt)
6448 sym := auxToSym(l.Aux)
6449 mem := l.Args[1]
6450 ptr := l.Args[0]
6451 if !(canMergeLoad(v, l) && clobber(l)) {
6452 break
6453 }
6454 v.reset(OpAMD64InvertFlags)
6455 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6456 v0.AuxInt = int32ToAuxInt(off)
6457 v0.Aux = symToAux(sym)
6458 v0.AddArg3(ptr, x, mem)
6459 v.AddArg(v0)
6460 return true
6461 }
6462 return false
6463 }
6464 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6465 v_0 := v.Args[0]
6466 b := v.Block
6467
6468
6469
6470 for {
6471 y := auxIntToInt8(v.AuxInt)
6472 if v_0.Op != OpAMD64MOVLconst {
6473 break
6474 }
6475 x := auxIntToInt32(v_0.AuxInt)
6476 if !(int8(x) == y) {
6477 break
6478 }
6479 v.reset(OpAMD64FlagEQ)
6480 return true
6481 }
6482
6483
6484
6485 for {
6486 y := auxIntToInt8(v.AuxInt)
6487 if v_0.Op != OpAMD64MOVLconst {
6488 break
6489 }
6490 x := auxIntToInt32(v_0.AuxInt)
6491 if !(int8(x) < y && uint8(x) < uint8(y)) {
6492 break
6493 }
6494 v.reset(OpAMD64FlagLT_ULT)
6495 return true
6496 }
6497
6498
6499
6500 for {
6501 y := auxIntToInt8(v.AuxInt)
6502 if v_0.Op != OpAMD64MOVLconst {
6503 break
6504 }
6505 x := auxIntToInt32(v_0.AuxInt)
6506 if !(int8(x) < y && uint8(x) > uint8(y)) {
6507 break
6508 }
6509 v.reset(OpAMD64FlagLT_UGT)
6510 return true
6511 }
6512
6513
6514
6515 for {
6516 y := auxIntToInt8(v.AuxInt)
6517 if v_0.Op != OpAMD64MOVLconst {
6518 break
6519 }
6520 x := auxIntToInt32(v_0.AuxInt)
6521 if !(int8(x) > y && uint8(x) < uint8(y)) {
6522 break
6523 }
6524 v.reset(OpAMD64FlagGT_ULT)
6525 return true
6526 }
6527
6528
6529
6530 for {
6531 y := auxIntToInt8(v.AuxInt)
6532 if v_0.Op != OpAMD64MOVLconst {
6533 break
6534 }
6535 x := auxIntToInt32(v_0.AuxInt)
6536 if !(int8(x) > y && uint8(x) > uint8(y)) {
6537 break
6538 }
6539 v.reset(OpAMD64FlagGT_UGT)
6540 return true
6541 }
6542
6543
6544
6545 for {
6546 n := auxIntToInt8(v.AuxInt)
6547 if v_0.Op != OpAMD64ANDLconst {
6548 break
6549 }
6550 m := auxIntToInt32(v_0.AuxInt)
6551 if !(0 <= int8(m) && int8(m) < n) {
6552 break
6553 }
6554 v.reset(OpAMD64FlagLT_ULT)
6555 return true
6556 }
6557
6558
6559
6560 for {
6561 if auxIntToInt8(v.AuxInt) != 0 {
6562 break
6563 }
6564 a := v_0
6565 if a.Op != OpAMD64ANDL {
6566 break
6567 }
6568 y := a.Args[1]
6569 x := a.Args[0]
6570 if !(a.Uses == 1) {
6571 break
6572 }
6573 v.reset(OpAMD64TESTB)
6574 v.AddArg2(x, y)
6575 return true
6576 }
6577
6578
6579
6580 for {
6581 if auxIntToInt8(v.AuxInt) != 0 {
6582 break
6583 }
6584 a := v_0
6585 if a.Op != OpAMD64ANDLconst {
6586 break
6587 }
6588 c := auxIntToInt32(a.AuxInt)
6589 x := a.Args[0]
6590 if !(a.Uses == 1) {
6591 break
6592 }
6593 v.reset(OpAMD64TESTBconst)
6594 v.AuxInt = int8ToAuxInt(int8(c))
6595 v.AddArg(x)
6596 return true
6597 }
6598
6599
6600 for {
6601 if auxIntToInt8(v.AuxInt) != 0 {
6602 break
6603 }
6604 x := v_0
6605 v.reset(OpAMD64TESTB)
6606 v.AddArg2(x, x)
6607 return true
6608 }
6609
6610
6611
6612 for {
6613 c := auxIntToInt8(v.AuxInt)
6614 l := v_0
6615 if l.Op != OpAMD64MOVBload {
6616 break
6617 }
6618 off := auxIntToInt32(l.AuxInt)
6619 sym := auxToSym(l.Aux)
6620 mem := l.Args[1]
6621 ptr := l.Args[0]
6622 if !(l.Uses == 1 && clobber(l)) {
6623 break
6624 }
6625 b = l.Block
6626 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6627 v.copyOf(v0)
6628 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6629 v0.Aux = symToAux(sym)
6630 v0.AddArg2(ptr, mem)
6631 return true
6632 }
6633 return false
6634 }
6635 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6636 v_1 := v.Args[1]
6637 v_0 := v.Args[0]
6638
6639
6640
6641 for {
6642 valoff1 := auxIntToValAndOff(v.AuxInt)
6643 sym := auxToSym(v.Aux)
6644 if v_0.Op != OpAMD64ADDQconst {
6645 break
6646 }
6647 off2 := auxIntToInt32(v_0.AuxInt)
6648 base := v_0.Args[0]
6649 mem := v_1
6650 if !(ValAndOff(valoff1).canAdd32(off2)) {
6651 break
6652 }
6653 v.reset(OpAMD64CMPBconstload)
6654 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6655 v.Aux = symToAux(sym)
6656 v.AddArg2(base, mem)
6657 return true
6658 }
6659
6660
6661
6662 for {
6663 valoff1 := auxIntToValAndOff(v.AuxInt)
6664 sym1 := auxToSym(v.Aux)
6665 if v_0.Op != OpAMD64LEAQ {
6666 break
6667 }
6668 off2 := auxIntToInt32(v_0.AuxInt)
6669 sym2 := auxToSym(v_0.Aux)
6670 base := v_0.Args[0]
6671 mem := v_1
6672 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6673 break
6674 }
6675 v.reset(OpAMD64CMPBconstload)
6676 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6677 v.Aux = symToAux(mergeSym(sym1, sym2))
6678 v.AddArg2(base, mem)
6679 return true
6680 }
6681 return false
6682 }
6683 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6684 v_2 := v.Args[2]
6685 v_1 := v.Args[1]
6686 v_0 := v.Args[0]
6687
6688
6689
6690 for {
6691 off1 := auxIntToInt32(v.AuxInt)
6692 sym := auxToSym(v.Aux)
6693 if v_0.Op != OpAMD64ADDQconst {
6694 break
6695 }
6696 off2 := auxIntToInt32(v_0.AuxInt)
6697 base := v_0.Args[0]
6698 val := v_1
6699 mem := v_2
6700 if !(is32Bit(int64(off1) + int64(off2))) {
6701 break
6702 }
6703 v.reset(OpAMD64CMPBload)
6704 v.AuxInt = int32ToAuxInt(off1 + off2)
6705 v.Aux = symToAux(sym)
6706 v.AddArg3(base, val, mem)
6707 return true
6708 }
6709
6710
6711
6712 for {
6713 off1 := auxIntToInt32(v.AuxInt)
6714 sym1 := auxToSym(v.Aux)
6715 if v_0.Op != OpAMD64LEAQ {
6716 break
6717 }
6718 off2 := auxIntToInt32(v_0.AuxInt)
6719 sym2 := auxToSym(v_0.Aux)
6720 base := v_0.Args[0]
6721 val := v_1
6722 mem := v_2
6723 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6724 break
6725 }
6726 v.reset(OpAMD64CMPBload)
6727 v.AuxInt = int32ToAuxInt(off1 + off2)
6728 v.Aux = symToAux(mergeSym(sym1, sym2))
6729 v.AddArg3(base, val, mem)
6730 return true
6731 }
6732
6733
6734 for {
6735 off := auxIntToInt32(v.AuxInt)
6736 sym := auxToSym(v.Aux)
6737 ptr := v_0
6738 if v_1.Op != OpAMD64MOVLconst {
6739 break
6740 }
6741 c := auxIntToInt32(v_1.AuxInt)
6742 mem := v_2
6743 v.reset(OpAMD64CMPBconstload)
6744 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6745 v.Aux = symToAux(sym)
6746 v.AddArg2(ptr, mem)
6747 return true
6748 }
6749 return false
6750 }
6751 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6752 v_1 := v.Args[1]
6753 v_0 := v.Args[0]
6754 b := v.Block
6755
6756
6757 for {
6758 x := v_0
6759 if v_1.Op != OpAMD64MOVLconst {
6760 break
6761 }
6762 c := auxIntToInt32(v_1.AuxInt)
6763 v.reset(OpAMD64CMPLconst)
6764 v.AuxInt = int32ToAuxInt(c)
6765 v.AddArg(x)
6766 return true
6767 }
6768
6769
6770 for {
6771 if v_0.Op != OpAMD64MOVLconst {
6772 break
6773 }
6774 c := auxIntToInt32(v_0.AuxInt)
6775 x := v_1
6776 v.reset(OpAMD64InvertFlags)
6777 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6778 v0.AuxInt = int32ToAuxInt(c)
6779 v0.AddArg(x)
6780 v.AddArg(v0)
6781 return true
6782 }
6783
6784
6785
6786 for {
6787 x := v_0
6788 y := v_1
6789 if !(canonLessThan(x, y)) {
6790 break
6791 }
6792 v.reset(OpAMD64InvertFlags)
6793 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6794 v0.AddArg2(y, x)
6795 v.AddArg(v0)
6796 return true
6797 }
6798
6799
6800
6801 for {
6802 l := v_0
6803 if l.Op != OpAMD64MOVLload {
6804 break
6805 }
6806 off := auxIntToInt32(l.AuxInt)
6807 sym := auxToSym(l.Aux)
6808 mem := l.Args[1]
6809 ptr := l.Args[0]
6810 x := v_1
6811 if !(canMergeLoad(v, l) && clobber(l)) {
6812 break
6813 }
6814 v.reset(OpAMD64CMPLload)
6815 v.AuxInt = int32ToAuxInt(off)
6816 v.Aux = symToAux(sym)
6817 v.AddArg3(ptr, x, mem)
6818 return true
6819 }
6820
6821
6822
6823 for {
6824 x := v_0
6825 l := v_1
6826 if l.Op != OpAMD64MOVLload {
6827 break
6828 }
6829 off := auxIntToInt32(l.AuxInt)
6830 sym := auxToSym(l.Aux)
6831 mem := l.Args[1]
6832 ptr := l.Args[0]
6833 if !(canMergeLoad(v, l) && clobber(l)) {
6834 break
6835 }
6836 v.reset(OpAMD64InvertFlags)
6837 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6838 v0.AuxInt = int32ToAuxInt(off)
6839 v0.Aux = symToAux(sym)
6840 v0.AddArg3(ptr, x, mem)
6841 v.AddArg(v0)
6842 return true
6843 }
6844 return false
6845 }
6846 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6847 v_0 := v.Args[0]
6848 b := v.Block
6849
6850
6851
6852 for {
6853 y := auxIntToInt32(v.AuxInt)
6854 if v_0.Op != OpAMD64MOVLconst {
6855 break
6856 }
6857 x := auxIntToInt32(v_0.AuxInt)
6858 if !(x == y) {
6859 break
6860 }
6861 v.reset(OpAMD64FlagEQ)
6862 return true
6863 }
6864
6865
6866
6867 for {
6868 y := auxIntToInt32(v.AuxInt)
6869 if v_0.Op != OpAMD64MOVLconst {
6870 break
6871 }
6872 x := auxIntToInt32(v_0.AuxInt)
6873 if !(x < y && uint32(x) < uint32(y)) {
6874 break
6875 }
6876 v.reset(OpAMD64FlagLT_ULT)
6877 return true
6878 }
6879
6880
6881
6882 for {
6883 y := auxIntToInt32(v.AuxInt)
6884 if v_0.Op != OpAMD64MOVLconst {
6885 break
6886 }
6887 x := auxIntToInt32(v_0.AuxInt)
6888 if !(x < y && uint32(x) > uint32(y)) {
6889 break
6890 }
6891 v.reset(OpAMD64FlagLT_UGT)
6892 return true
6893 }
6894
6895
6896
6897 for {
6898 y := auxIntToInt32(v.AuxInt)
6899 if v_0.Op != OpAMD64MOVLconst {
6900 break
6901 }
6902 x := auxIntToInt32(v_0.AuxInt)
6903 if !(x > y && uint32(x) < uint32(y)) {
6904 break
6905 }
6906 v.reset(OpAMD64FlagGT_ULT)
6907 return true
6908 }
6909
6910
6911
6912 for {
6913 y := auxIntToInt32(v.AuxInt)
6914 if v_0.Op != OpAMD64MOVLconst {
6915 break
6916 }
6917 x := auxIntToInt32(v_0.AuxInt)
6918 if !(x > y && uint32(x) > uint32(y)) {
6919 break
6920 }
6921 v.reset(OpAMD64FlagGT_UGT)
6922 return true
6923 }
6924
6925
6926
6927 for {
6928 n := auxIntToInt32(v.AuxInt)
6929 if v_0.Op != OpAMD64SHRLconst {
6930 break
6931 }
6932 c := auxIntToInt8(v_0.AuxInt)
6933 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6934 break
6935 }
6936 v.reset(OpAMD64FlagLT_ULT)
6937 return true
6938 }
6939
6940
6941
6942 for {
6943 n := auxIntToInt32(v.AuxInt)
6944 if v_0.Op != OpAMD64ANDLconst {
6945 break
6946 }
6947 m := auxIntToInt32(v_0.AuxInt)
6948 if !(0 <= m && m < n) {
6949 break
6950 }
6951 v.reset(OpAMD64FlagLT_ULT)
6952 return true
6953 }
6954
6955
6956
6957 for {
6958 if auxIntToInt32(v.AuxInt) != 0 {
6959 break
6960 }
6961 a := v_0
6962 if a.Op != OpAMD64ANDL {
6963 break
6964 }
6965 y := a.Args[1]
6966 x := a.Args[0]
6967 if !(a.Uses == 1) {
6968 break
6969 }
6970 v.reset(OpAMD64TESTL)
6971 v.AddArg2(x, y)
6972 return true
6973 }
6974
6975
6976
6977 for {
6978 if auxIntToInt32(v.AuxInt) != 0 {
6979 break
6980 }
6981 a := v_0
6982 if a.Op != OpAMD64ANDLconst {
6983 break
6984 }
6985 c := auxIntToInt32(a.AuxInt)
6986 x := a.Args[0]
6987 if !(a.Uses == 1) {
6988 break
6989 }
6990 v.reset(OpAMD64TESTLconst)
6991 v.AuxInt = int32ToAuxInt(c)
6992 v.AddArg(x)
6993 return true
6994 }
6995
6996
6997 for {
6998 if auxIntToInt32(v.AuxInt) != 0 {
6999 break
7000 }
7001 x := v_0
7002 v.reset(OpAMD64TESTL)
7003 v.AddArg2(x, x)
7004 return true
7005 }
7006
7007
7008
7009 for {
7010 c := auxIntToInt32(v.AuxInt)
7011 l := v_0
7012 if l.Op != OpAMD64MOVLload {
7013 break
7014 }
7015 off := auxIntToInt32(l.AuxInt)
7016 sym := auxToSym(l.Aux)
7017 mem := l.Args[1]
7018 ptr := l.Args[0]
7019 if !(l.Uses == 1 && clobber(l)) {
7020 break
7021 }
7022 b = l.Block
7023 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7024 v.copyOf(v0)
7025 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7026 v0.Aux = symToAux(sym)
7027 v0.AddArg2(ptr, mem)
7028 return true
7029 }
7030 return false
7031 }
7032 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7033 v_1 := v.Args[1]
7034 v_0 := v.Args[0]
7035
7036
7037
7038 for {
7039 valoff1 := auxIntToValAndOff(v.AuxInt)
7040 sym := auxToSym(v.Aux)
7041 if v_0.Op != OpAMD64ADDQconst {
7042 break
7043 }
7044 off2 := auxIntToInt32(v_0.AuxInt)
7045 base := v_0.Args[0]
7046 mem := v_1
7047 if !(ValAndOff(valoff1).canAdd32(off2)) {
7048 break
7049 }
7050 v.reset(OpAMD64CMPLconstload)
7051 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7052 v.Aux = symToAux(sym)
7053 v.AddArg2(base, mem)
7054 return true
7055 }
7056
7057
7058
7059 for {
7060 valoff1 := auxIntToValAndOff(v.AuxInt)
7061 sym1 := auxToSym(v.Aux)
7062 if v_0.Op != OpAMD64LEAQ {
7063 break
7064 }
7065 off2 := auxIntToInt32(v_0.AuxInt)
7066 sym2 := auxToSym(v_0.Aux)
7067 base := v_0.Args[0]
7068 mem := v_1
7069 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7070 break
7071 }
7072 v.reset(OpAMD64CMPLconstload)
7073 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7074 v.Aux = symToAux(mergeSym(sym1, sym2))
7075 v.AddArg2(base, mem)
7076 return true
7077 }
7078 return false
7079 }
7080 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7081 v_2 := v.Args[2]
7082 v_1 := v.Args[1]
7083 v_0 := v.Args[0]
7084
7085
7086
7087 for {
7088 off1 := auxIntToInt32(v.AuxInt)
7089 sym := auxToSym(v.Aux)
7090 if v_0.Op != OpAMD64ADDQconst {
7091 break
7092 }
7093 off2 := auxIntToInt32(v_0.AuxInt)
7094 base := v_0.Args[0]
7095 val := v_1
7096 mem := v_2
7097 if !(is32Bit(int64(off1) + int64(off2))) {
7098 break
7099 }
7100 v.reset(OpAMD64CMPLload)
7101 v.AuxInt = int32ToAuxInt(off1 + off2)
7102 v.Aux = symToAux(sym)
7103 v.AddArg3(base, val, mem)
7104 return true
7105 }
7106
7107
7108
7109 for {
7110 off1 := auxIntToInt32(v.AuxInt)
7111 sym1 := auxToSym(v.Aux)
7112 if v_0.Op != OpAMD64LEAQ {
7113 break
7114 }
7115 off2 := auxIntToInt32(v_0.AuxInt)
7116 sym2 := auxToSym(v_0.Aux)
7117 base := v_0.Args[0]
7118 val := v_1
7119 mem := v_2
7120 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7121 break
7122 }
7123 v.reset(OpAMD64CMPLload)
7124 v.AuxInt = int32ToAuxInt(off1 + off2)
7125 v.Aux = symToAux(mergeSym(sym1, sym2))
7126 v.AddArg3(base, val, mem)
7127 return true
7128 }
7129
7130
7131 for {
7132 off := auxIntToInt32(v.AuxInt)
7133 sym := auxToSym(v.Aux)
7134 ptr := v_0
7135 if v_1.Op != OpAMD64MOVLconst {
7136 break
7137 }
7138 c := auxIntToInt32(v_1.AuxInt)
7139 mem := v_2
7140 v.reset(OpAMD64CMPLconstload)
7141 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7142 v.Aux = symToAux(sym)
7143 v.AddArg2(ptr, mem)
7144 return true
7145 }
7146 return false
7147 }
7148 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7149 v_1 := v.Args[1]
7150 v_0 := v.Args[0]
7151 b := v.Block
7152
7153
7154
7155 for {
7156 x := v_0
7157 if v_1.Op != OpAMD64MOVQconst {
7158 break
7159 }
7160 c := auxIntToInt64(v_1.AuxInt)
7161 if !(is32Bit(c)) {
7162 break
7163 }
7164 v.reset(OpAMD64CMPQconst)
7165 v.AuxInt = int32ToAuxInt(int32(c))
7166 v.AddArg(x)
7167 return true
7168 }
7169
7170
7171
7172 for {
7173 if v_0.Op != OpAMD64MOVQconst {
7174 break
7175 }
7176 c := auxIntToInt64(v_0.AuxInt)
7177 x := v_1
7178 if !(is32Bit(c)) {
7179 break
7180 }
7181 v.reset(OpAMD64InvertFlags)
7182 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7183 v0.AuxInt = int32ToAuxInt(int32(c))
7184 v0.AddArg(x)
7185 v.AddArg(v0)
7186 return true
7187 }
7188
7189
7190
7191 for {
7192 x := v_0
7193 y := v_1
7194 if !(canonLessThan(x, y)) {
7195 break
7196 }
7197 v.reset(OpAMD64InvertFlags)
7198 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7199 v0.AddArg2(y, x)
7200 v.AddArg(v0)
7201 return true
7202 }
7203
7204
7205
7206 for {
7207 if v_0.Op != OpAMD64MOVQconst {
7208 break
7209 }
7210 x := auxIntToInt64(v_0.AuxInt)
7211 if v_1.Op != OpAMD64MOVQconst {
7212 break
7213 }
7214 y := auxIntToInt64(v_1.AuxInt)
7215 if !(x == y) {
7216 break
7217 }
7218 v.reset(OpAMD64FlagEQ)
7219 return true
7220 }
7221
7222
7223
7224 for {
7225 if v_0.Op != OpAMD64MOVQconst {
7226 break
7227 }
7228 x := auxIntToInt64(v_0.AuxInt)
7229 if v_1.Op != OpAMD64MOVQconst {
7230 break
7231 }
7232 y := auxIntToInt64(v_1.AuxInt)
7233 if !(x < y && uint64(x) < uint64(y)) {
7234 break
7235 }
7236 v.reset(OpAMD64FlagLT_ULT)
7237 return true
7238 }
7239
7240
7241
7242 for {
7243 if v_0.Op != OpAMD64MOVQconst {
7244 break
7245 }
7246 x := auxIntToInt64(v_0.AuxInt)
7247 if v_1.Op != OpAMD64MOVQconst {
7248 break
7249 }
7250 y := auxIntToInt64(v_1.AuxInt)
7251 if !(x < y && uint64(x) > uint64(y)) {
7252 break
7253 }
7254 v.reset(OpAMD64FlagLT_UGT)
7255 return true
7256 }
7257
7258
7259
7260 for {
7261 if v_0.Op != OpAMD64MOVQconst {
7262 break
7263 }
7264 x := auxIntToInt64(v_0.AuxInt)
7265 if v_1.Op != OpAMD64MOVQconst {
7266 break
7267 }
7268 y := auxIntToInt64(v_1.AuxInt)
7269 if !(x > y && uint64(x) < uint64(y)) {
7270 break
7271 }
7272 v.reset(OpAMD64FlagGT_ULT)
7273 return true
7274 }
7275
7276
7277
7278 for {
7279 if v_0.Op != OpAMD64MOVQconst {
7280 break
7281 }
7282 x := auxIntToInt64(v_0.AuxInt)
7283 if v_1.Op != OpAMD64MOVQconst {
7284 break
7285 }
7286 y := auxIntToInt64(v_1.AuxInt)
7287 if !(x > y && uint64(x) > uint64(y)) {
7288 break
7289 }
7290 v.reset(OpAMD64FlagGT_UGT)
7291 return true
7292 }
7293
7294
7295
7296 for {
7297 l := v_0
7298 if l.Op != OpAMD64MOVQload {
7299 break
7300 }
7301 off := auxIntToInt32(l.AuxInt)
7302 sym := auxToSym(l.Aux)
7303 mem := l.Args[1]
7304 ptr := l.Args[0]
7305 x := v_1
7306 if !(canMergeLoad(v, l) && clobber(l)) {
7307 break
7308 }
7309 v.reset(OpAMD64CMPQload)
7310 v.AuxInt = int32ToAuxInt(off)
7311 v.Aux = symToAux(sym)
7312 v.AddArg3(ptr, x, mem)
7313 return true
7314 }
7315
7316
7317
7318 for {
7319 x := v_0
7320 l := v_1
7321 if l.Op != OpAMD64MOVQload {
7322 break
7323 }
7324 off := auxIntToInt32(l.AuxInt)
7325 sym := auxToSym(l.Aux)
7326 mem := l.Args[1]
7327 ptr := l.Args[0]
7328 if !(canMergeLoad(v, l) && clobber(l)) {
7329 break
7330 }
7331 v.reset(OpAMD64InvertFlags)
7332 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7333 v0.AuxInt = int32ToAuxInt(off)
7334 v0.Aux = symToAux(sym)
7335 v0.AddArg3(ptr, x, mem)
7336 v.AddArg(v0)
7337 return true
7338 }
7339 return false
7340 }
7341 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7342 v_0 := v.Args[0]
7343 b := v.Block
7344
7345
7346 for {
7347 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7348 break
7349 }
7350 v_0_0 := v_0.Args[0]
7351 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
7352 break
7353 }
7354 v_0_0_0 := v_0_0.Args[0]
7355 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
7356 break
7357 }
7358 v.reset(OpAMD64FlagLT_ULT)
7359 return true
7360 }
7361
7362
7363 for {
7364 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7365 break
7366 }
7367 v_0_0 := v_0.Args[0]
7368 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
7369 break
7370 }
7371 v_0_0_0 := v_0_0.Args[0]
7372 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
7373 break
7374 }
7375 v.reset(OpAMD64FlagLT_ULT)
7376 return true
7377 }
7378
7379
7380
7381 for {
7382 y := auxIntToInt32(v.AuxInt)
7383 if v_0.Op != OpAMD64MOVQconst {
7384 break
7385 }
7386 x := auxIntToInt64(v_0.AuxInt)
7387 if !(x == int64(y)) {
7388 break
7389 }
7390 v.reset(OpAMD64FlagEQ)
7391 return true
7392 }
7393
7394
7395
7396 for {
7397 y := auxIntToInt32(v.AuxInt)
7398 if v_0.Op != OpAMD64MOVQconst {
7399 break
7400 }
7401 x := auxIntToInt64(v_0.AuxInt)
7402 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7403 break
7404 }
7405 v.reset(OpAMD64FlagLT_ULT)
7406 return true
7407 }
7408
7409
7410
7411 for {
7412 y := auxIntToInt32(v.AuxInt)
7413 if v_0.Op != OpAMD64MOVQconst {
7414 break
7415 }
7416 x := auxIntToInt64(v_0.AuxInt)
7417 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7418 break
7419 }
7420 v.reset(OpAMD64FlagLT_UGT)
7421 return true
7422 }
7423
7424
7425
7426 for {
7427 y := auxIntToInt32(v.AuxInt)
7428 if v_0.Op != OpAMD64MOVQconst {
7429 break
7430 }
7431 x := auxIntToInt64(v_0.AuxInt)
7432 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7433 break
7434 }
7435 v.reset(OpAMD64FlagGT_ULT)
7436 return true
7437 }
7438
7439
7440
7441 for {
7442 y := auxIntToInt32(v.AuxInt)
7443 if v_0.Op != OpAMD64MOVQconst {
7444 break
7445 }
7446 x := auxIntToInt64(v_0.AuxInt)
7447 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7448 break
7449 }
7450 v.reset(OpAMD64FlagGT_UGT)
7451 return true
7452 }
7453
7454
7455
7456 for {
7457 c := auxIntToInt32(v.AuxInt)
7458 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7459 break
7460 }
7461 v.reset(OpAMD64FlagLT_ULT)
7462 return true
7463 }
7464
7465
7466
7467 for {
7468 c := auxIntToInt32(v.AuxInt)
7469 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7470 break
7471 }
7472 v.reset(OpAMD64FlagLT_ULT)
7473 return true
7474 }
7475
7476
7477
7478 for {
7479 n := auxIntToInt32(v.AuxInt)
7480 if v_0.Op != OpAMD64SHRQconst {
7481 break
7482 }
7483 c := auxIntToInt8(v_0.AuxInt)
7484 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7485 break
7486 }
7487 v.reset(OpAMD64FlagLT_ULT)
7488 return true
7489 }
7490
7491
7492
7493 for {
7494 n := auxIntToInt32(v.AuxInt)
7495 if v_0.Op != OpAMD64ANDQconst {
7496 break
7497 }
7498 m := auxIntToInt32(v_0.AuxInt)
7499 if !(0 <= m && m < n) {
7500 break
7501 }
7502 v.reset(OpAMD64FlagLT_ULT)
7503 return true
7504 }
7505
7506
7507
7508 for {
7509 n := auxIntToInt32(v.AuxInt)
7510 if v_0.Op != OpAMD64ANDLconst {
7511 break
7512 }
7513 m := auxIntToInt32(v_0.AuxInt)
7514 if !(0 <= m && m < n) {
7515 break
7516 }
7517 v.reset(OpAMD64FlagLT_ULT)
7518 return true
7519 }
7520
7521
7522
7523 for {
7524 if auxIntToInt32(v.AuxInt) != 0 {
7525 break
7526 }
7527 a := v_0
7528 if a.Op != OpAMD64ANDQ {
7529 break
7530 }
7531 y := a.Args[1]
7532 x := a.Args[0]
7533 if !(a.Uses == 1) {
7534 break
7535 }
7536 v.reset(OpAMD64TESTQ)
7537 v.AddArg2(x, y)
7538 return true
7539 }
7540
7541
7542
7543 for {
7544 if auxIntToInt32(v.AuxInt) != 0 {
7545 break
7546 }
7547 a := v_0
7548 if a.Op != OpAMD64ANDQconst {
7549 break
7550 }
7551 c := auxIntToInt32(a.AuxInt)
7552 x := a.Args[0]
7553 if !(a.Uses == 1) {
7554 break
7555 }
7556 v.reset(OpAMD64TESTQconst)
7557 v.AuxInt = int32ToAuxInt(c)
7558 v.AddArg(x)
7559 return true
7560 }
7561
7562
7563 for {
7564 if auxIntToInt32(v.AuxInt) != 0 {
7565 break
7566 }
7567 x := v_0
7568 v.reset(OpAMD64TESTQ)
7569 v.AddArg2(x, x)
7570 return true
7571 }
7572
7573
7574
7575 for {
7576 c := auxIntToInt32(v.AuxInt)
7577 l := v_0
7578 if l.Op != OpAMD64MOVQload {
7579 break
7580 }
7581 off := auxIntToInt32(l.AuxInt)
7582 sym := auxToSym(l.Aux)
7583 mem := l.Args[1]
7584 ptr := l.Args[0]
7585 if !(l.Uses == 1 && clobber(l)) {
7586 break
7587 }
7588 b = l.Block
7589 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7590 v.copyOf(v0)
7591 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7592 v0.Aux = symToAux(sym)
7593 v0.AddArg2(ptr, mem)
7594 return true
7595 }
7596 return false
7597 }
7598 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7599 v_1 := v.Args[1]
7600 v_0 := v.Args[0]
7601
7602
7603
7604 for {
7605 valoff1 := auxIntToValAndOff(v.AuxInt)
7606 sym := auxToSym(v.Aux)
7607 if v_0.Op != OpAMD64ADDQconst {
7608 break
7609 }
7610 off2 := auxIntToInt32(v_0.AuxInt)
7611 base := v_0.Args[0]
7612 mem := v_1
7613 if !(ValAndOff(valoff1).canAdd32(off2)) {
7614 break
7615 }
7616 v.reset(OpAMD64CMPQconstload)
7617 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7618 v.Aux = symToAux(sym)
7619 v.AddArg2(base, mem)
7620 return true
7621 }
7622
7623
7624
7625 for {
7626 valoff1 := auxIntToValAndOff(v.AuxInt)
7627 sym1 := auxToSym(v.Aux)
7628 if v_0.Op != OpAMD64LEAQ {
7629 break
7630 }
7631 off2 := auxIntToInt32(v_0.AuxInt)
7632 sym2 := auxToSym(v_0.Aux)
7633 base := v_0.Args[0]
7634 mem := v_1
7635 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7636 break
7637 }
7638 v.reset(OpAMD64CMPQconstload)
7639 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7640 v.Aux = symToAux(mergeSym(sym1, sym2))
7641 v.AddArg2(base, mem)
7642 return true
7643 }
7644 return false
7645 }
7646 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7647 v_2 := v.Args[2]
7648 v_1 := v.Args[1]
7649 v_0 := v.Args[0]
7650
7651
7652
7653 for {
7654 off1 := auxIntToInt32(v.AuxInt)
7655 sym := auxToSym(v.Aux)
7656 if v_0.Op != OpAMD64ADDQconst {
7657 break
7658 }
7659 off2 := auxIntToInt32(v_0.AuxInt)
7660 base := v_0.Args[0]
7661 val := v_1
7662 mem := v_2
7663 if !(is32Bit(int64(off1) + int64(off2))) {
7664 break
7665 }
7666 v.reset(OpAMD64CMPQload)
7667 v.AuxInt = int32ToAuxInt(off1 + off2)
7668 v.Aux = symToAux(sym)
7669 v.AddArg3(base, val, mem)
7670 return true
7671 }
7672
7673
7674
7675 for {
7676 off1 := auxIntToInt32(v.AuxInt)
7677 sym1 := auxToSym(v.Aux)
7678 if v_0.Op != OpAMD64LEAQ {
7679 break
7680 }
7681 off2 := auxIntToInt32(v_0.AuxInt)
7682 sym2 := auxToSym(v_0.Aux)
7683 base := v_0.Args[0]
7684 val := v_1
7685 mem := v_2
7686 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7687 break
7688 }
7689 v.reset(OpAMD64CMPQload)
7690 v.AuxInt = int32ToAuxInt(off1 + off2)
7691 v.Aux = symToAux(mergeSym(sym1, sym2))
7692 v.AddArg3(base, val, mem)
7693 return true
7694 }
7695
7696
7697
7698 for {
7699 off := auxIntToInt32(v.AuxInt)
7700 sym := auxToSym(v.Aux)
7701 ptr := v_0
7702 if v_1.Op != OpAMD64MOVQconst {
7703 break
7704 }
7705 c := auxIntToInt64(v_1.AuxInt)
7706 mem := v_2
7707 if !(validVal(c)) {
7708 break
7709 }
7710 v.reset(OpAMD64CMPQconstload)
7711 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7712 v.Aux = symToAux(sym)
7713 v.AddArg2(ptr, mem)
7714 return true
7715 }
7716 return false
7717 }
7718 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7719 v_1 := v.Args[1]
7720 v_0 := v.Args[0]
7721 b := v.Block
7722
7723
7724 for {
7725 x := v_0
7726 if v_1.Op != OpAMD64MOVLconst {
7727 break
7728 }
7729 c := auxIntToInt32(v_1.AuxInt)
7730 v.reset(OpAMD64CMPWconst)
7731 v.AuxInt = int16ToAuxInt(int16(c))
7732 v.AddArg(x)
7733 return true
7734 }
7735
7736
7737 for {
7738 if v_0.Op != OpAMD64MOVLconst {
7739 break
7740 }
7741 c := auxIntToInt32(v_0.AuxInt)
7742 x := v_1
7743 v.reset(OpAMD64InvertFlags)
7744 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7745 v0.AuxInt = int16ToAuxInt(int16(c))
7746 v0.AddArg(x)
7747 v.AddArg(v0)
7748 return true
7749 }
7750
7751
7752
7753 for {
7754 x := v_0
7755 y := v_1
7756 if !(canonLessThan(x, y)) {
7757 break
7758 }
7759 v.reset(OpAMD64InvertFlags)
7760 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7761 v0.AddArg2(y, x)
7762 v.AddArg(v0)
7763 return true
7764 }
7765
7766
7767
7768 for {
7769 l := v_0
7770 if l.Op != OpAMD64MOVWload {
7771 break
7772 }
7773 off := auxIntToInt32(l.AuxInt)
7774 sym := auxToSym(l.Aux)
7775 mem := l.Args[1]
7776 ptr := l.Args[0]
7777 x := v_1
7778 if !(canMergeLoad(v, l) && clobber(l)) {
7779 break
7780 }
7781 v.reset(OpAMD64CMPWload)
7782 v.AuxInt = int32ToAuxInt(off)
7783 v.Aux = symToAux(sym)
7784 v.AddArg3(ptr, x, mem)
7785 return true
7786 }
7787
7788
7789
7790 for {
7791 x := v_0
7792 l := v_1
7793 if l.Op != OpAMD64MOVWload {
7794 break
7795 }
7796 off := auxIntToInt32(l.AuxInt)
7797 sym := auxToSym(l.Aux)
7798 mem := l.Args[1]
7799 ptr := l.Args[0]
7800 if !(canMergeLoad(v, l) && clobber(l)) {
7801 break
7802 }
7803 v.reset(OpAMD64InvertFlags)
7804 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7805 v0.AuxInt = int32ToAuxInt(off)
7806 v0.Aux = symToAux(sym)
7807 v0.AddArg3(ptr, x, mem)
7808 v.AddArg(v0)
7809 return true
7810 }
7811 return false
7812 }
7813 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7814 v_0 := v.Args[0]
7815 b := v.Block
7816
7817
7818
7819 for {
7820 y := auxIntToInt16(v.AuxInt)
7821 if v_0.Op != OpAMD64MOVLconst {
7822 break
7823 }
7824 x := auxIntToInt32(v_0.AuxInt)
7825 if !(int16(x) == y) {
7826 break
7827 }
7828 v.reset(OpAMD64FlagEQ)
7829 return true
7830 }
7831
7832
7833
7834 for {
7835 y := auxIntToInt16(v.AuxInt)
7836 if v_0.Op != OpAMD64MOVLconst {
7837 break
7838 }
7839 x := auxIntToInt32(v_0.AuxInt)
7840 if !(int16(x) < y && uint16(x) < uint16(y)) {
7841 break
7842 }
7843 v.reset(OpAMD64FlagLT_ULT)
7844 return true
7845 }
7846
7847
7848
7849 for {
7850 y := auxIntToInt16(v.AuxInt)
7851 if v_0.Op != OpAMD64MOVLconst {
7852 break
7853 }
7854 x := auxIntToInt32(v_0.AuxInt)
7855 if !(int16(x) < y && uint16(x) > uint16(y)) {
7856 break
7857 }
7858 v.reset(OpAMD64FlagLT_UGT)
7859 return true
7860 }
7861
7862
7863
7864 for {
7865 y := auxIntToInt16(v.AuxInt)
7866 if v_0.Op != OpAMD64MOVLconst {
7867 break
7868 }
7869 x := auxIntToInt32(v_0.AuxInt)
7870 if !(int16(x) > y && uint16(x) < uint16(y)) {
7871 break
7872 }
7873 v.reset(OpAMD64FlagGT_ULT)
7874 return true
7875 }
7876
7877
7878
7879 for {
7880 y := auxIntToInt16(v.AuxInt)
7881 if v_0.Op != OpAMD64MOVLconst {
7882 break
7883 }
7884 x := auxIntToInt32(v_0.AuxInt)
7885 if !(int16(x) > y && uint16(x) > uint16(y)) {
7886 break
7887 }
7888 v.reset(OpAMD64FlagGT_UGT)
7889 return true
7890 }
7891
7892
7893
7894 for {
7895 n := auxIntToInt16(v.AuxInt)
7896 if v_0.Op != OpAMD64ANDLconst {
7897 break
7898 }
7899 m := auxIntToInt32(v_0.AuxInt)
7900 if !(0 <= int16(m) && int16(m) < n) {
7901 break
7902 }
7903 v.reset(OpAMD64FlagLT_ULT)
7904 return true
7905 }
7906
7907
7908
7909 for {
7910 if auxIntToInt16(v.AuxInt) != 0 {
7911 break
7912 }
7913 a := v_0
7914 if a.Op != OpAMD64ANDL {
7915 break
7916 }
7917 y := a.Args[1]
7918 x := a.Args[0]
7919 if !(a.Uses == 1) {
7920 break
7921 }
7922 v.reset(OpAMD64TESTW)
7923 v.AddArg2(x, y)
7924 return true
7925 }
7926
7927
7928
7929 for {
7930 if auxIntToInt16(v.AuxInt) != 0 {
7931 break
7932 }
7933 a := v_0
7934 if a.Op != OpAMD64ANDLconst {
7935 break
7936 }
7937 c := auxIntToInt32(a.AuxInt)
7938 x := a.Args[0]
7939 if !(a.Uses == 1) {
7940 break
7941 }
7942 v.reset(OpAMD64TESTWconst)
7943 v.AuxInt = int16ToAuxInt(int16(c))
7944 v.AddArg(x)
7945 return true
7946 }
7947
7948
7949 for {
7950 if auxIntToInt16(v.AuxInt) != 0 {
7951 break
7952 }
7953 x := v_0
7954 v.reset(OpAMD64TESTW)
7955 v.AddArg2(x, x)
7956 return true
7957 }
7958
7959
7960
7961 for {
7962 c := auxIntToInt16(v.AuxInt)
7963 l := v_0
7964 if l.Op != OpAMD64MOVWload {
7965 break
7966 }
7967 off := auxIntToInt32(l.AuxInt)
7968 sym := auxToSym(l.Aux)
7969 mem := l.Args[1]
7970 ptr := l.Args[0]
7971 if !(l.Uses == 1 && clobber(l)) {
7972 break
7973 }
7974 b = l.Block
7975 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7976 v.copyOf(v0)
7977 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7978 v0.Aux = symToAux(sym)
7979 v0.AddArg2(ptr, mem)
7980 return true
7981 }
7982 return false
7983 }
7984 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7985 v_1 := v.Args[1]
7986 v_0 := v.Args[0]
7987
7988
7989
7990 for {
7991 valoff1 := auxIntToValAndOff(v.AuxInt)
7992 sym := auxToSym(v.Aux)
7993 if v_0.Op != OpAMD64ADDQconst {
7994 break
7995 }
7996 off2 := auxIntToInt32(v_0.AuxInt)
7997 base := v_0.Args[0]
7998 mem := v_1
7999 if !(ValAndOff(valoff1).canAdd32(off2)) {
8000 break
8001 }
8002 v.reset(OpAMD64CMPWconstload)
8003 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8004 v.Aux = symToAux(sym)
8005 v.AddArg2(base, mem)
8006 return true
8007 }
8008
8009
8010
8011 for {
8012 valoff1 := auxIntToValAndOff(v.AuxInt)
8013 sym1 := auxToSym(v.Aux)
8014 if v_0.Op != OpAMD64LEAQ {
8015 break
8016 }
8017 off2 := auxIntToInt32(v_0.AuxInt)
8018 sym2 := auxToSym(v_0.Aux)
8019 base := v_0.Args[0]
8020 mem := v_1
8021 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8022 break
8023 }
8024 v.reset(OpAMD64CMPWconstload)
8025 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8026 v.Aux = symToAux(mergeSym(sym1, sym2))
8027 v.AddArg2(base, mem)
8028 return true
8029 }
8030 return false
8031 }
8032 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8033 v_2 := v.Args[2]
8034 v_1 := v.Args[1]
8035 v_0 := v.Args[0]
8036
8037
8038
8039 for {
8040 off1 := auxIntToInt32(v.AuxInt)
8041 sym := auxToSym(v.Aux)
8042 if v_0.Op != OpAMD64ADDQconst {
8043 break
8044 }
8045 off2 := auxIntToInt32(v_0.AuxInt)
8046 base := v_0.Args[0]
8047 val := v_1
8048 mem := v_2
8049 if !(is32Bit(int64(off1) + int64(off2))) {
8050 break
8051 }
8052 v.reset(OpAMD64CMPWload)
8053 v.AuxInt = int32ToAuxInt(off1 + off2)
8054 v.Aux = symToAux(sym)
8055 v.AddArg3(base, val, mem)
8056 return true
8057 }
8058
8059
8060
8061 for {
8062 off1 := auxIntToInt32(v.AuxInt)
8063 sym1 := auxToSym(v.Aux)
8064 if v_0.Op != OpAMD64LEAQ {
8065 break
8066 }
8067 off2 := auxIntToInt32(v_0.AuxInt)
8068 sym2 := auxToSym(v_0.Aux)
8069 base := v_0.Args[0]
8070 val := v_1
8071 mem := v_2
8072 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8073 break
8074 }
8075 v.reset(OpAMD64CMPWload)
8076 v.AuxInt = int32ToAuxInt(off1 + off2)
8077 v.Aux = symToAux(mergeSym(sym1, sym2))
8078 v.AddArg3(base, val, mem)
8079 return true
8080 }
8081
8082
8083 for {
8084 off := auxIntToInt32(v.AuxInt)
8085 sym := auxToSym(v.Aux)
8086 ptr := v_0
8087 if v_1.Op != OpAMD64MOVLconst {
8088 break
8089 }
8090 c := auxIntToInt32(v_1.AuxInt)
8091 mem := v_2
8092 v.reset(OpAMD64CMPWconstload)
8093 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8094 v.Aux = symToAux(sym)
8095 v.AddArg2(ptr, mem)
8096 return true
8097 }
8098 return false
8099 }
8100 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8101 v_3 := v.Args[3]
8102 v_2 := v.Args[2]
8103 v_1 := v.Args[1]
8104 v_0 := v.Args[0]
8105
8106
8107
8108 for {
8109 off1 := auxIntToInt32(v.AuxInt)
8110 sym := auxToSym(v.Aux)
8111 if v_0.Op != OpAMD64ADDQconst {
8112 break
8113 }
8114 off2 := auxIntToInt32(v_0.AuxInt)
8115 ptr := v_0.Args[0]
8116 old := v_1
8117 new_ := v_2
8118 mem := v_3
8119 if !(is32Bit(int64(off1) + int64(off2))) {
8120 break
8121 }
8122 v.reset(OpAMD64CMPXCHGLlock)
8123 v.AuxInt = int32ToAuxInt(off1 + off2)
8124 v.Aux = symToAux(sym)
8125 v.AddArg4(ptr, old, new_, mem)
8126 return true
8127 }
8128 return false
8129 }
8130 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8131 v_3 := v.Args[3]
8132 v_2 := v.Args[2]
8133 v_1 := v.Args[1]
8134 v_0 := v.Args[0]
8135
8136
8137
8138 for {
8139 off1 := auxIntToInt32(v.AuxInt)
8140 sym := auxToSym(v.Aux)
8141 if v_0.Op != OpAMD64ADDQconst {
8142 break
8143 }
8144 off2 := auxIntToInt32(v_0.AuxInt)
8145 ptr := v_0.Args[0]
8146 old := v_1
8147 new_ := v_2
8148 mem := v_3
8149 if !(is32Bit(int64(off1) + int64(off2))) {
8150 break
8151 }
8152 v.reset(OpAMD64CMPXCHGQlock)
8153 v.AuxInt = int32ToAuxInt(off1 + off2)
8154 v.Aux = symToAux(sym)
8155 v.AddArg4(ptr, old, new_, mem)
8156 return true
8157 }
8158 return false
8159 }
8160 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8161 v_1 := v.Args[1]
8162 v_0 := v.Args[0]
8163
8164
8165
8166 for {
8167 x := v_0
8168 l := v_1
8169 if l.Op != OpAMD64MOVSDload {
8170 break
8171 }
8172 off := auxIntToInt32(l.AuxInt)
8173 sym := auxToSym(l.Aux)
8174 mem := l.Args[1]
8175 ptr := l.Args[0]
8176 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8177 break
8178 }
8179 v.reset(OpAMD64DIVSDload)
8180 v.AuxInt = int32ToAuxInt(off)
8181 v.Aux = symToAux(sym)
8182 v.AddArg3(x, ptr, mem)
8183 return true
8184 }
8185 return false
8186 }
8187 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8188 v_2 := v.Args[2]
8189 v_1 := v.Args[1]
8190 v_0 := v.Args[0]
8191
8192
8193
8194 for {
8195 off1 := auxIntToInt32(v.AuxInt)
8196 sym := auxToSym(v.Aux)
8197 val := v_0
8198 if v_1.Op != OpAMD64ADDQconst {
8199 break
8200 }
8201 off2 := auxIntToInt32(v_1.AuxInt)
8202 base := v_1.Args[0]
8203 mem := v_2
8204 if !(is32Bit(int64(off1) + int64(off2))) {
8205 break
8206 }
8207 v.reset(OpAMD64DIVSDload)
8208 v.AuxInt = int32ToAuxInt(off1 + off2)
8209 v.Aux = symToAux(sym)
8210 v.AddArg3(val, base, mem)
8211 return true
8212 }
8213
8214
8215
8216 for {
8217 off1 := auxIntToInt32(v.AuxInt)
8218 sym1 := auxToSym(v.Aux)
8219 val := v_0
8220 if v_1.Op != OpAMD64LEAQ {
8221 break
8222 }
8223 off2 := auxIntToInt32(v_1.AuxInt)
8224 sym2 := auxToSym(v_1.Aux)
8225 base := v_1.Args[0]
8226 mem := v_2
8227 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8228 break
8229 }
8230 v.reset(OpAMD64DIVSDload)
8231 v.AuxInt = int32ToAuxInt(off1 + off2)
8232 v.Aux = symToAux(mergeSym(sym1, sym2))
8233 v.AddArg3(val, base, mem)
8234 return true
8235 }
8236 return false
8237 }
8238 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8239 v_1 := v.Args[1]
8240 v_0 := v.Args[0]
8241
8242
8243
8244 for {
8245 x := v_0
8246 l := v_1
8247 if l.Op != OpAMD64MOVSSload {
8248 break
8249 }
8250 off := auxIntToInt32(l.AuxInt)
8251 sym := auxToSym(l.Aux)
8252 mem := l.Args[1]
8253 ptr := l.Args[0]
8254 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8255 break
8256 }
8257 v.reset(OpAMD64DIVSSload)
8258 v.AuxInt = int32ToAuxInt(off)
8259 v.Aux = symToAux(sym)
8260 v.AddArg3(x, ptr, mem)
8261 return true
8262 }
8263 return false
8264 }
8265 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8266 v_2 := v.Args[2]
8267 v_1 := v.Args[1]
8268 v_0 := v.Args[0]
8269
8270
8271
8272 for {
8273 off1 := auxIntToInt32(v.AuxInt)
8274 sym := auxToSym(v.Aux)
8275 val := v_0
8276 if v_1.Op != OpAMD64ADDQconst {
8277 break
8278 }
8279 off2 := auxIntToInt32(v_1.AuxInt)
8280 base := v_1.Args[0]
8281 mem := v_2
8282 if !(is32Bit(int64(off1) + int64(off2))) {
8283 break
8284 }
8285 v.reset(OpAMD64DIVSSload)
8286 v.AuxInt = int32ToAuxInt(off1 + off2)
8287 v.Aux = symToAux(sym)
8288 v.AddArg3(val, base, mem)
8289 return true
8290 }
8291
8292
8293
8294 for {
8295 off1 := auxIntToInt32(v.AuxInt)
8296 sym1 := auxToSym(v.Aux)
8297 val := v_0
8298 if v_1.Op != OpAMD64LEAQ {
8299 break
8300 }
8301 off2 := auxIntToInt32(v_1.AuxInt)
8302 sym2 := auxToSym(v_1.Aux)
8303 base := v_1.Args[0]
8304 mem := v_2
8305 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8306 break
8307 }
8308 v.reset(OpAMD64DIVSSload)
8309 v.AuxInt = int32ToAuxInt(off1 + off2)
8310 v.Aux = symToAux(mergeSym(sym1, sym2))
8311 v.AddArg3(val, base, mem)
8312 return true
8313 }
8314 return false
8315 }
8316 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8317 v_1 := v.Args[1]
8318 v_0 := v.Args[0]
8319
8320
8321
8322 for {
8323 x := v_0
8324 y := v_1
8325 if !(!x.rematerializeable() && y.rematerializeable()) {
8326 break
8327 }
8328 v.reset(OpAMD64HMULL)
8329 v.AddArg2(y, x)
8330 return true
8331 }
8332 return false
8333 }
8334 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8335 v_1 := v.Args[1]
8336 v_0 := v.Args[0]
8337
8338
8339
8340 for {
8341 x := v_0
8342 y := v_1
8343 if !(!x.rematerializeable() && y.rematerializeable()) {
8344 break
8345 }
8346 v.reset(OpAMD64HMULLU)
8347 v.AddArg2(y, x)
8348 return true
8349 }
8350 return false
8351 }
8352 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8353 v_1 := v.Args[1]
8354 v_0 := v.Args[0]
8355
8356
8357
8358 for {
8359 x := v_0
8360 y := v_1
8361 if !(!x.rematerializeable() && y.rematerializeable()) {
8362 break
8363 }
8364 v.reset(OpAMD64HMULQ)
8365 v.AddArg2(y, x)
8366 return true
8367 }
8368 return false
8369 }
8370 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8371 v_1 := v.Args[1]
8372 v_0 := v.Args[0]
8373
8374
8375
8376 for {
8377 x := v_0
8378 y := v_1
8379 if !(!x.rematerializeable() && y.rematerializeable()) {
8380 break
8381 }
8382 v.reset(OpAMD64HMULQU)
8383 v.AddArg2(y, x)
8384 return true
8385 }
8386 return false
8387 }
8388 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8389 v_0 := v.Args[0]
8390
8391
8392
8393 for {
8394 c := auxIntToInt32(v.AuxInt)
8395 s := auxToSym(v.Aux)
8396 if v_0.Op != OpAMD64ADDLconst {
8397 break
8398 }
8399 d := auxIntToInt32(v_0.AuxInt)
8400 x := v_0.Args[0]
8401 if !(is32Bit(int64(c) + int64(d))) {
8402 break
8403 }
8404 v.reset(OpAMD64LEAL)
8405 v.AuxInt = int32ToAuxInt(c + d)
8406 v.Aux = symToAux(s)
8407 v.AddArg(x)
8408 return true
8409 }
8410
8411
8412
8413 for {
8414 c := auxIntToInt32(v.AuxInt)
8415 s := auxToSym(v.Aux)
8416 if v_0.Op != OpAMD64ADDL {
8417 break
8418 }
8419 _ = v_0.Args[1]
8420 v_0_0 := v_0.Args[0]
8421 v_0_1 := v_0.Args[1]
8422 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8423 x := v_0_0
8424 y := v_0_1
8425 if !(x.Op != OpSB && y.Op != OpSB) {
8426 continue
8427 }
8428 v.reset(OpAMD64LEAL1)
8429 v.AuxInt = int32ToAuxInt(c)
8430 v.Aux = symToAux(s)
8431 v.AddArg2(x, y)
8432 return true
8433 }
8434 break
8435 }
8436 return false
8437 }
8438 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8439 v_1 := v.Args[1]
8440 v_0 := v.Args[0]
8441
8442
8443
8444 for {
8445 c := auxIntToInt32(v.AuxInt)
8446 s := auxToSym(v.Aux)
8447 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8448 if v_0.Op != OpAMD64ADDLconst {
8449 continue
8450 }
8451 d := auxIntToInt32(v_0.AuxInt)
8452 x := v_0.Args[0]
8453 y := v_1
8454 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8455 continue
8456 }
8457 v.reset(OpAMD64LEAL1)
8458 v.AuxInt = int32ToAuxInt(c + d)
8459 v.Aux = symToAux(s)
8460 v.AddArg2(x, y)
8461 return true
8462 }
8463 break
8464 }
8465
8466
8467 for {
8468 c := auxIntToInt32(v.AuxInt)
8469 s := auxToSym(v.Aux)
8470 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8471 x := v_0
8472 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8473 continue
8474 }
8475 y := v_1.Args[0]
8476 v.reset(OpAMD64LEAL2)
8477 v.AuxInt = int32ToAuxInt(c)
8478 v.Aux = symToAux(s)
8479 v.AddArg2(x, y)
8480 return true
8481 }
8482 break
8483 }
8484
8485
8486 for {
8487 c := auxIntToInt32(v.AuxInt)
8488 s := auxToSym(v.Aux)
8489 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8490 x := v_0
8491 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8492 continue
8493 }
8494 y := v_1.Args[0]
8495 v.reset(OpAMD64LEAL4)
8496 v.AuxInt = int32ToAuxInt(c)
8497 v.Aux = symToAux(s)
8498 v.AddArg2(x, y)
8499 return true
8500 }
8501 break
8502 }
8503
8504
8505 for {
8506 c := auxIntToInt32(v.AuxInt)
8507 s := auxToSym(v.Aux)
8508 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8509 x := v_0
8510 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8511 continue
8512 }
8513 y := v_1.Args[0]
8514 v.reset(OpAMD64LEAL8)
8515 v.AuxInt = int32ToAuxInt(c)
8516 v.Aux = symToAux(s)
8517 v.AddArg2(x, y)
8518 return true
8519 }
8520 break
8521 }
8522 return false
8523 }
8524 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8525 v_1 := v.Args[1]
8526 v_0 := v.Args[0]
8527
8528
8529
8530 for {
8531 c := auxIntToInt32(v.AuxInt)
8532 s := auxToSym(v.Aux)
8533 if v_0.Op != OpAMD64ADDLconst {
8534 break
8535 }
8536 d := auxIntToInt32(v_0.AuxInt)
8537 x := v_0.Args[0]
8538 y := v_1
8539 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8540 break
8541 }
8542 v.reset(OpAMD64LEAL2)
8543 v.AuxInt = int32ToAuxInt(c + d)
8544 v.Aux = symToAux(s)
8545 v.AddArg2(x, y)
8546 return true
8547 }
8548
8549
8550
8551 for {
8552 c := auxIntToInt32(v.AuxInt)
8553 s := auxToSym(v.Aux)
8554 x := v_0
8555 if v_1.Op != OpAMD64ADDLconst {
8556 break
8557 }
8558 d := auxIntToInt32(v_1.AuxInt)
8559 y := v_1.Args[0]
8560 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8561 break
8562 }
8563 v.reset(OpAMD64LEAL2)
8564 v.AuxInt = int32ToAuxInt(c + 2*d)
8565 v.Aux = symToAux(s)
8566 v.AddArg2(x, y)
8567 return true
8568 }
8569
8570
8571 for {
8572 c := auxIntToInt32(v.AuxInt)
8573 s := auxToSym(v.Aux)
8574 x := v_0
8575 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8576 break
8577 }
8578 y := v_1.Args[0]
8579 v.reset(OpAMD64LEAL4)
8580 v.AuxInt = int32ToAuxInt(c)
8581 v.Aux = symToAux(s)
8582 v.AddArg2(x, y)
8583 return true
8584 }
8585
8586
8587 for {
8588 c := auxIntToInt32(v.AuxInt)
8589 s := auxToSym(v.Aux)
8590 x := v_0
8591 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8592 break
8593 }
8594 y := v_1.Args[0]
8595 v.reset(OpAMD64LEAL8)
8596 v.AuxInt = int32ToAuxInt(c)
8597 v.Aux = symToAux(s)
8598 v.AddArg2(x, y)
8599 return true
8600 }
8601 return false
8602 }
8603 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8604 v_1 := v.Args[1]
8605 v_0 := v.Args[0]
8606
8607
8608
8609 for {
8610 c := auxIntToInt32(v.AuxInt)
8611 s := auxToSym(v.Aux)
8612 if v_0.Op != OpAMD64ADDLconst {
8613 break
8614 }
8615 d := auxIntToInt32(v_0.AuxInt)
8616 x := v_0.Args[0]
8617 y := v_1
8618 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8619 break
8620 }
8621 v.reset(OpAMD64LEAL4)
8622 v.AuxInt = int32ToAuxInt(c + d)
8623 v.Aux = symToAux(s)
8624 v.AddArg2(x, y)
8625 return true
8626 }
8627
8628
8629
8630 for {
8631 c := auxIntToInt32(v.AuxInt)
8632 s := auxToSym(v.Aux)
8633 x := v_0
8634 if v_1.Op != OpAMD64ADDLconst {
8635 break
8636 }
8637 d := auxIntToInt32(v_1.AuxInt)
8638 y := v_1.Args[0]
8639 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8640 break
8641 }
8642 v.reset(OpAMD64LEAL4)
8643 v.AuxInt = int32ToAuxInt(c + 4*d)
8644 v.Aux = symToAux(s)
8645 v.AddArg2(x, y)
8646 return true
8647 }
8648
8649
8650 for {
8651 c := auxIntToInt32(v.AuxInt)
8652 s := auxToSym(v.Aux)
8653 x := v_0
8654 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8655 break
8656 }
8657 y := v_1.Args[0]
8658 v.reset(OpAMD64LEAL8)
8659 v.AuxInt = int32ToAuxInt(c)
8660 v.Aux = symToAux(s)
8661 v.AddArg2(x, y)
8662 return true
8663 }
8664 return false
8665 }
8666 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8667 v_1 := v.Args[1]
8668 v_0 := v.Args[0]
8669
8670
8671
8672 for {
8673 c := auxIntToInt32(v.AuxInt)
8674 s := auxToSym(v.Aux)
8675 if v_0.Op != OpAMD64ADDLconst {
8676 break
8677 }
8678 d := auxIntToInt32(v_0.AuxInt)
8679 x := v_0.Args[0]
8680 y := v_1
8681 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8682 break
8683 }
8684 v.reset(OpAMD64LEAL8)
8685 v.AuxInt = int32ToAuxInt(c + d)
8686 v.Aux = symToAux(s)
8687 v.AddArg2(x, y)
8688 return true
8689 }
8690
8691
8692
8693 for {
8694 c := auxIntToInt32(v.AuxInt)
8695 s := auxToSym(v.Aux)
8696 x := v_0
8697 if v_1.Op != OpAMD64ADDLconst {
8698 break
8699 }
8700 d := auxIntToInt32(v_1.AuxInt)
8701 y := v_1.Args[0]
8702 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8703 break
8704 }
8705 v.reset(OpAMD64LEAL8)
8706 v.AuxInt = int32ToAuxInt(c + 8*d)
8707 v.Aux = symToAux(s)
8708 v.AddArg2(x, y)
8709 return true
8710 }
8711 return false
8712 }
8713 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8714 v_0 := v.Args[0]
8715
8716
8717
8718 for {
8719 c := auxIntToInt32(v.AuxInt)
8720 s := auxToSym(v.Aux)
8721 if v_0.Op != OpAMD64ADDQconst {
8722 break
8723 }
8724 d := auxIntToInt32(v_0.AuxInt)
8725 x := v_0.Args[0]
8726 if !(is32Bit(int64(c) + int64(d))) {
8727 break
8728 }
8729 v.reset(OpAMD64LEAQ)
8730 v.AuxInt = int32ToAuxInt(c + d)
8731 v.Aux = symToAux(s)
8732 v.AddArg(x)
8733 return true
8734 }
8735
8736
8737
8738 for {
8739 c := auxIntToInt32(v.AuxInt)
8740 s := auxToSym(v.Aux)
8741 if v_0.Op != OpAMD64ADDQ {
8742 break
8743 }
8744 _ = v_0.Args[1]
8745 v_0_0 := v_0.Args[0]
8746 v_0_1 := v_0.Args[1]
8747 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8748 x := v_0_0
8749 y := v_0_1
8750 if !(x.Op != OpSB && y.Op != OpSB) {
8751 continue
8752 }
8753 v.reset(OpAMD64LEAQ1)
8754 v.AuxInt = int32ToAuxInt(c)
8755 v.Aux = symToAux(s)
8756 v.AddArg2(x, y)
8757 return true
8758 }
8759 break
8760 }
8761
8762
8763
8764 for {
8765 off1 := auxIntToInt32(v.AuxInt)
8766 sym1 := auxToSym(v.Aux)
8767 if v_0.Op != OpAMD64LEAQ {
8768 break
8769 }
8770 off2 := auxIntToInt32(v_0.AuxInt)
8771 sym2 := auxToSym(v_0.Aux)
8772 x := v_0.Args[0]
8773 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8774 break
8775 }
8776 v.reset(OpAMD64LEAQ)
8777 v.AuxInt = int32ToAuxInt(off1 + off2)
8778 v.Aux = symToAux(mergeSym(sym1, sym2))
8779 v.AddArg(x)
8780 return true
8781 }
8782
8783
8784
8785 for {
8786 off1 := auxIntToInt32(v.AuxInt)
8787 sym1 := auxToSym(v.Aux)
8788 if v_0.Op != OpAMD64LEAQ1 {
8789 break
8790 }
8791 off2 := auxIntToInt32(v_0.AuxInt)
8792 sym2 := auxToSym(v_0.Aux)
8793 y := v_0.Args[1]
8794 x := v_0.Args[0]
8795 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8796 break
8797 }
8798 v.reset(OpAMD64LEAQ1)
8799 v.AuxInt = int32ToAuxInt(off1 + off2)
8800 v.Aux = symToAux(mergeSym(sym1, sym2))
8801 v.AddArg2(x, y)
8802 return true
8803 }
8804
8805
8806
8807 for {
8808 off1 := auxIntToInt32(v.AuxInt)
8809 sym1 := auxToSym(v.Aux)
8810 if v_0.Op != OpAMD64LEAQ2 {
8811 break
8812 }
8813 off2 := auxIntToInt32(v_0.AuxInt)
8814 sym2 := auxToSym(v_0.Aux)
8815 y := v_0.Args[1]
8816 x := v_0.Args[0]
8817 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8818 break
8819 }
8820 v.reset(OpAMD64LEAQ2)
8821 v.AuxInt = int32ToAuxInt(off1 + off2)
8822 v.Aux = symToAux(mergeSym(sym1, sym2))
8823 v.AddArg2(x, y)
8824 return true
8825 }
8826
8827
8828
8829 for {
8830 off1 := auxIntToInt32(v.AuxInt)
8831 sym1 := auxToSym(v.Aux)
8832 if v_0.Op != OpAMD64LEAQ4 {
8833 break
8834 }
8835 off2 := auxIntToInt32(v_0.AuxInt)
8836 sym2 := auxToSym(v_0.Aux)
8837 y := v_0.Args[1]
8838 x := v_0.Args[0]
8839 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8840 break
8841 }
8842 v.reset(OpAMD64LEAQ4)
8843 v.AuxInt = int32ToAuxInt(off1 + off2)
8844 v.Aux = symToAux(mergeSym(sym1, sym2))
8845 v.AddArg2(x, y)
8846 return true
8847 }
8848
8849
8850
8851 for {
8852 off1 := auxIntToInt32(v.AuxInt)
8853 sym1 := auxToSym(v.Aux)
8854 if v_0.Op != OpAMD64LEAQ8 {
8855 break
8856 }
8857 off2 := auxIntToInt32(v_0.AuxInt)
8858 sym2 := auxToSym(v_0.Aux)
8859 y := v_0.Args[1]
8860 x := v_0.Args[0]
8861 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8862 break
8863 }
8864 v.reset(OpAMD64LEAQ8)
8865 v.AuxInt = int32ToAuxInt(off1 + off2)
8866 v.Aux = symToAux(mergeSym(sym1, sym2))
8867 v.AddArg2(x, y)
8868 return true
8869 }
8870 return false
8871 }
8872 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8873 v_1 := v.Args[1]
8874 v_0 := v.Args[0]
8875
8876
8877
8878 for {
8879 c := auxIntToInt32(v.AuxInt)
8880 s := auxToSym(v.Aux)
8881 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8882 if v_0.Op != OpAMD64ADDQconst {
8883 continue
8884 }
8885 d := auxIntToInt32(v_0.AuxInt)
8886 x := v_0.Args[0]
8887 y := v_1
8888 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8889 continue
8890 }
8891 v.reset(OpAMD64LEAQ1)
8892 v.AuxInt = int32ToAuxInt(c + d)
8893 v.Aux = symToAux(s)
8894 v.AddArg2(x, y)
8895 return true
8896 }
8897 break
8898 }
8899
8900
8901 for {
8902 c := auxIntToInt32(v.AuxInt)
8903 s := auxToSym(v.Aux)
8904 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8905 x := v_0
8906 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8907 continue
8908 }
8909 y := v_1.Args[0]
8910 v.reset(OpAMD64LEAQ2)
8911 v.AuxInt = int32ToAuxInt(c)
8912 v.Aux = symToAux(s)
8913 v.AddArg2(x, y)
8914 return true
8915 }
8916 break
8917 }
8918
8919
8920 for {
8921 c := auxIntToInt32(v.AuxInt)
8922 s := auxToSym(v.Aux)
8923 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8924 x := v_0
8925 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8926 continue
8927 }
8928 y := v_1.Args[0]
8929 v.reset(OpAMD64LEAQ4)
8930 v.AuxInt = int32ToAuxInt(c)
8931 v.Aux = symToAux(s)
8932 v.AddArg2(x, y)
8933 return true
8934 }
8935 break
8936 }
8937
8938
8939 for {
8940 c := auxIntToInt32(v.AuxInt)
8941 s := auxToSym(v.Aux)
8942 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8943 x := v_0
8944 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8945 continue
8946 }
8947 y := v_1.Args[0]
8948 v.reset(OpAMD64LEAQ8)
8949 v.AuxInt = int32ToAuxInt(c)
8950 v.Aux = symToAux(s)
8951 v.AddArg2(x, y)
8952 return true
8953 }
8954 break
8955 }
8956
8957
8958
8959 for {
8960 off1 := auxIntToInt32(v.AuxInt)
8961 sym1 := auxToSym(v.Aux)
8962 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8963 if v_0.Op != OpAMD64LEAQ {
8964 continue
8965 }
8966 off2 := auxIntToInt32(v_0.AuxInt)
8967 sym2 := auxToSym(v_0.Aux)
8968 x := v_0.Args[0]
8969 y := v_1
8970 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8971 continue
8972 }
8973 v.reset(OpAMD64LEAQ1)
8974 v.AuxInt = int32ToAuxInt(off1 + off2)
8975 v.Aux = symToAux(mergeSym(sym1, sym2))
8976 v.AddArg2(x, y)
8977 return true
8978 }
8979 break
8980 }
8981
8982
8983
8984 for {
8985 off1 := auxIntToInt32(v.AuxInt)
8986 sym1 := auxToSym(v.Aux)
8987 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8988 x := v_0
8989 if v_1.Op != OpAMD64LEAQ1 {
8990 continue
8991 }
8992 off2 := auxIntToInt32(v_1.AuxInt)
8993 sym2 := auxToSym(v_1.Aux)
8994 y := v_1.Args[1]
8995 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8996 continue
8997 }
8998 v.reset(OpAMD64LEAQ2)
8999 v.AuxInt = int32ToAuxInt(off1 + off2)
9000 v.Aux = symToAux(mergeSym(sym1, sym2))
9001 v.AddArg2(x, y)
9002 return true
9003 }
9004 break
9005 }
9006
9007
9008
9009 for {
9010 off1 := auxIntToInt32(v.AuxInt)
9011 sym1 := auxToSym(v.Aux)
9012 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9013 x := v_0
9014 if v_1.Op != OpAMD64LEAQ1 {
9015 continue
9016 }
9017 off2 := auxIntToInt32(v_1.AuxInt)
9018 sym2 := auxToSym(v_1.Aux)
9019 _ = v_1.Args[1]
9020 v_1_0 := v_1.Args[0]
9021 v_1_1 := v_1.Args[1]
9022 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9023 if x != v_1_0 {
9024 continue
9025 }
9026 y := v_1_1
9027 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9028 continue
9029 }
9030 v.reset(OpAMD64LEAQ2)
9031 v.AuxInt = int32ToAuxInt(off1 + off2)
9032 v.Aux = symToAux(mergeSym(sym1, sym2))
9033 v.AddArg2(y, x)
9034 return true
9035 }
9036 }
9037 break
9038 }
9039
9040
9041
9042 for {
9043 if auxIntToInt32(v.AuxInt) != 0 {
9044 break
9045 }
9046 x := v_0
9047 y := v_1
9048 if !(v.Aux == nil) {
9049 break
9050 }
9051 v.reset(OpAMD64ADDQ)
9052 v.AddArg2(x, y)
9053 return true
9054 }
9055 return false
9056 }
9057 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9058 v_1 := v.Args[1]
9059 v_0 := v.Args[0]
9060
9061
9062
9063 for {
9064 c := auxIntToInt32(v.AuxInt)
9065 s := auxToSym(v.Aux)
9066 if v_0.Op != OpAMD64ADDQconst {
9067 break
9068 }
9069 d := auxIntToInt32(v_0.AuxInt)
9070 x := v_0.Args[0]
9071 y := v_1
9072 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9073 break
9074 }
9075 v.reset(OpAMD64LEAQ2)
9076 v.AuxInt = int32ToAuxInt(c + d)
9077 v.Aux = symToAux(s)
9078 v.AddArg2(x, y)
9079 return true
9080 }
9081
9082
9083
9084 for {
9085 c := auxIntToInt32(v.AuxInt)
9086 s := auxToSym(v.Aux)
9087 x := v_0
9088 if v_1.Op != OpAMD64ADDQconst {
9089 break
9090 }
9091 d := auxIntToInt32(v_1.AuxInt)
9092 y := v_1.Args[0]
9093 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9094 break
9095 }
9096 v.reset(OpAMD64LEAQ2)
9097 v.AuxInt = int32ToAuxInt(c + 2*d)
9098 v.Aux = symToAux(s)
9099 v.AddArg2(x, y)
9100 return true
9101 }
9102
9103
9104 for {
9105 c := auxIntToInt32(v.AuxInt)
9106 s := auxToSym(v.Aux)
9107 x := v_0
9108 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9109 break
9110 }
9111 y := v_1.Args[0]
9112 v.reset(OpAMD64LEAQ4)
9113 v.AuxInt = int32ToAuxInt(c)
9114 v.Aux = symToAux(s)
9115 v.AddArg2(x, y)
9116 return true
9117 }
9118
9119
9120 for {
9121 c := auxIntToInt32(v.AuxInt)
9122 s := auxToSym(v.Aux)
9123 x := v_0
9124 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9125 break
9126 }
9127 y := v_1.Args[0]
9128 v.reset(OpAMD64LEAQ8)
9129 v.AuxInt = int32ToAuxInt(c)
9130 v.Aux = symToAux(s)
9131 v.AddArg2(x, y)
9132 return true
9133 }
9134
9135
9136
9137 for {
9138 off1 := auxIntToInt32(v.AuxInt)
9139 sym1 := auxToSym(v.Aux)
9140 if v_0.Op != OpAMD64LEAQ {
9141 break
9142 }
9143 off2 := auxIntToInt32(v_0.AuxInt)
9144 sym2 := auxToSym(v_0.Aux)
9145 x := v_0.Args[0]
9146 y := v_1
9147 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9148 break
9149 }
9150 v.reset(OpAMD64LEAQ2)
9151 v.AuxInt = int32ToAuxInt(off1 + off2)
9152 v.Aux = symToAux(mergeSym(sym1, sym2))
9153 v.AddArg2(x, y)
9154 return true
9155 }
9156
9157
9158
9159 for {
9160 off1 := auxIntToInt32(v.AuxInt)
9161 sym1 := auxToSym(v.Aux)
9162 x := v_0
9163 if v_1.Op != OpAMD64LEAQ1 {
9164 break
9165 }
9166 off2 := auxIntToInt32(v_1.AuxInt)
9167 sym2 := auxToSym(v_1.Aux)
9168 y := v_1.Args[1]
9169 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9170 break
9171 }
9172 v.reset(OpAMD64LEAQ4)
9173 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9174 v.Aux = symToAux(sym1)
9175 v.AddArg2(x, y)
9176 return true
9177 }
9178
9179
9180
9181 for {
9182 off := auxIntToInt32(v.AuxInt)
9183 sym := auxToSym(v.Aux)
9184 x := v_0
9185 if v_1.Op != OpAMD64MOVQconst {
9186 break
9187 }
9188 scale := auxIntToInt64(v_1.AuxInt)
9189 if !(is32Bit(int64(off) + int64(scale)*2)) {
9190 break
9191 }
9192 v.reset(OpAMD64LEAQ)
9193 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9194 v.Aux = symToAux(sym)
9195 v.AddArg(x)
9196 return true
9197 }
9198
9199
9200
9201 for {
9202 off := auxIntToInt32(v.AuxInt)
9203 sym := auxToSym(v.Aux)
9204 x := v_0
9205 if v_1.Op != OpAMD64MOVLconst {
9206 break
9207 }
9208 scale := auxIntToInt32(v_1.AuxInt)
9209 if !(is32Bit(int64(off) + int64(scale)*2)) {
9210 break
9211 }
9212 v.reset(OpAMD64LEAQ)
9213 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9214 v.Aux = symToAux(sym)
9215 v.AddArg(x)
9216 return true
9217 }
9218 return false
9219 }
9220 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9221 v_1 := v.Args[1]
9222 v_0 := v.Args[0]
9223
9224
9225
9226 for {
9227 c := auxIntToInt32(v.AuxInt)
9228 s := auxToSym(v.Aux)
9229 if v_0.Op != OpAMD64ADDQconst {
9230 break
9231 }
9232 d := auxIntToInt32(v_0.AuxInt)
9233 x := v_0.Args[0]
9234 y := v_1
9235 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9236 break
9237 }
9238 v.reset(OpAMD64LEAQ4)
9239 v.AuxInt = int32ToAuxInt(c + d)
9240 v.Aux = symToAux(s)
9241 v.AddArg2(x, y)
9242 return true
9243 }
9244
9245
9246
9247 for {
9248 c := auxIntToInt32(v.AuxInt)
9249 s := auxToSym(v.Aux)
9250 x := v_0
9251 if v_1.Op != OpAMD64ADDQconst {
9252 break
9253 }
9254 d := auxIntToInt32(v_1.AuxInt)
9255 y := v_1.Args[0]
9256 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9257 break
9258 }
9259 v.reset(OpAMD64LEAQ4)
9260 v.AuxInt = int32ToAuxInt(c + 4*d)
9261 v.Aux = symToAux(s)
9262 v.AddArg2(x, y)
9263 return true
9264 }
9265
9266
9267 for {
9268 c := auxIntToInt32(v.AuxInt)
9269 s := auxToSym(v.Aux)
9270 x := v_0
9271 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9272 break
9273 }
9274 y := v_1.Args[0]
9275 v.reset(OpAMD64LEAQ8)
9276 v.AuxInt = int32ToAuxInt(c)
9277 v.Aux = symToAux(s)
9278 v.AddArg2(x, y)
9279 return true
9280 }
9281
9282
9283
9284 for {
9285 off1 := auxIntToInt32(v.AuxInt)
9286 sym1 := auxToSym(v.Aux)
9287 if v_0.Op != OpAMD64LEAQ {
9288 break
9289 }
9290 off2 := auxIntToInt32(v_0.AuxInt)
9291 sym2 := auxToSym(v_0.Aux)
9292 x := v_0.Args[0]
9293 y := v_1
9294 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9295 break
9296 }
9297 v.reset(OpAMD64LEAQ4)
9298 v.AuxInt = int32ToAuxInt(off1 + off2)
9299 v.Aux = symToAux(mergeSym(sym1, sym2))
9300 v.AddArg2(x, y)
9301 return true
9302 }
9303
9304
9305
9306 for {
9307 off1 := auxIntToInt32(v.AuxInt)
9308 sym1 := auxToSym(v.Aux)
9309 x := v_0
9310 if v_1.Op != OpAMD64LEAQ1 {
9311 break
9312 }
9313 off2 := auxIntToInt32(v_1.AuxInt)
9314 sym2 := auxToSym(v_1.Aux)
9315 y := v_1.Args[1]
9316 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9317 break
9318 }
9319 v.reset(OpAMD64LEAQ8)
9320 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9321 v.Aux = symToAux(sym1)
9322 v.AddArg2(x, y)
9323 return true
9324 }
9325
9326
9327
9328 for {
9329 off := auxIntToInt32(v.AuxInt)
9330 sym := auxToSym(v.Aux)
9331 x := v_0
9332 if v_1.Op != OpAMD64MOVQconst {
9333 break
9334 }
9335 scale := auxIntToInt64(v_1.AuxInt)
9336 if !(is32Bit(int64(off) + int64(scale)*4)) {
9337 break
9338 }
9339 v.reset(OpAMD64LEAQ)
9340 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9341 v.Aux = symToAux(sym)
9342 v.AddArg(x)
9343 return true
9344 }
9345
9346
9347
9348 for {
9349 off := auxIntToInt32(v.AuxInt)
9350 sym := auxToSym(v.Aux)
9351 x := v_0
9352 if v_1.Op != OpAMD64MOVLconst {
9353 break
9354 }
9355 scale := auxIntToInt32(v_1.AuxInt)
9356 if !(is32Bit(int64(off) + int64(scale)*4)) {
9357 break
9358 }
9359 v.reset(OpAMD64LEAQ)
9360 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9361 v.Aux = symToAux(sym)
9362 v.AddArg(x)
9363 return true
9364 }
9365 return false
9366 }
9367 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9368 v_1 := v.Args[1]
9369 v_0 := v.Args[0]
9370
9371
9372
9373 for {
9374 c := auxIntToInt32(v.AuxInt)
9375 s := auxToSym(v.Aux)
9376 if v_0.Op != OpAMD64ADDQconst {
9377 break
9378 }
9379 d := auxIntToInt32(v_0.AuxInt)
9380 x := v_0.Args[0]
9381 y := v_1
9382 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9383 break
9384 }
9385 v.reset(OpAMD64LEAQ8)
9386 v.AuxInt = int32ToAuxInt(c + d)
9387 v.Aux = symToAux(s)
9388 v.AddArg2(x, y)
9389 return true
9390 }
9391
9392
9393
9394 for {
9395 c := auxIntToInt32(v.AuxInt)
9396 s := auxToSym(v.Aux)
9397 x := v_0
9398 if v_1.Op != OpAMD64ADDQconst {
9399 break
9400 }
9401 d := auxIntToInt32(v_1.AuxInt)
9402 y := v_1.Args[0]
9403 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9404 break
9405 }
9406 v.reset(OpAMD64LEAQ8)
9407 v.AuxInt = int32ToAuxInt(c + 8*d)
9408 v.Aux = symToAux(s)
9409 v.AddArg2(x, y)
9410 return true
9411 }
9412
9413
9414
9415 for {
9416 off1 := auxIntToInt32(v.AuxInt)
9417 sym1 := auxToSym(v.Aux)
9418 if v_0.Op != OpAMD64LEAQ {
9419 break
9420 }
9421 off2 := auxIntToInt32(v_0.AuxInt)
9422 sym2 := auxToSym(v_0.Aux)
9423 x := v_0.Args[0]
9424 y := v_1
9425 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9426 break
9427 }
9428 v.reset(OpAMD64LEAQ8)
9429 v.AuxInt = int32ToAuxInt(off1 + off2)
9430 v.Aux = symToAux(mergeSym(sym1, sym2))
9431 v.AddArg2(x, y)
9432 return true
9433 }
9434
9435
9436
9437 for {
9438 off := auxIntToInt32(v.AuxInt)
9439 sym := auxToSym(v.Aux)
9440 x := v_0
9441 if v_1.Op != OpAMD64MOVQconst {
9442 break
9443 }
9444 scale := auxIntToInt64(v_1.AuxInt)
9445 if !(is32Bit(int64(off) + int64(scale)*8)) {
9446 break
9447 }
9448 v.reset(OpAMD64LEAQ)
9449 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9450 v.Aux = symToAux(sym)
9451 v.AddArg(x)
9452 return true
9453 }
9454
9455
9456
9457 for {
9458 off := auxIntToInt32(v.AuxInt)
9459 sym := auxToSym(v.Aux)
9460 x := v_0
9461 if v_1.Op != OpAMD64MOVLconst {
9462 break
9463 }
9464 scale := auxIntToInt32(v_1.AuxInt)
9465 if !(is32Bit(int64(off) + int64(scale)*8)) {
9466 break
9467 }
9468 v.reset(OpAMD64LEAQ)
9469 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9470 v.Aux = symToAux(sym)
9471 v.AddArg(x)
9472 return true
9473 }
9474 return false
9475 }
9476 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9477 v_2 := v.Args[2]
9478 v_1 := v.Args[1]
9479 v_0 := v.Args[0]
9480
9481
9482 for {
9483 i := auxIntToInt32(v.AuxInt)
9484 s := auxToSym(v.Aux)
9485 p := v_0
9486 if v_1.Op != OpAMD64BSWAPL {
9487 break
9488 }
9489 x := v_1.Args[0]
9490 m := v_2
9491 v.reset(OpAMD64MOVLstore)
9492 v.AuxInt = int32ToAuxInt(i)
9493 v.Aux = symToAux(s)
9494 v.AddArg3(p, x, m)
9495 return true
9496 }
9497 return false
9498 }
9499 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9500 v_2 := v.Args[2]
9501 v_1 := v.Args[1]
9502 v_0 := v.Args[0]
9503
9504
9505 for {
9506 i := auxIntToInt32(v.AuxInt)
9507 s := auxToSym(v.Aux)
9508 p := v_0
9509 if v_1.Op != OpAMD64BSWAPQ {
9510 break
9511 }
9512 x := v_1.Args[0]
9513 m := v_2
9514 v.reset(OpAMD64MOVQstore)
9515 v.AuxInt = int32ToAuxInt(i)
9516 v.Aux = symToAux(s)
9517 v.AddArg3(p, x, m)
9518 return true
9519 }
9520 return false
9521 }
9522 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9523 v_0 := v.Args[0]
9524 b := v.Block
9525
9526
9527
9528 for {
9529 x := v_0
9530 if x.Op != OpAMD64MOVBload {
9531 break
9532 }
9533 off := auxIntToInt32(x.AuxInt)
9534 sym := auxToSym(x.Aux)
9535 mem := x.Args[1]
9536 ptr := x.Args[0]
9537 if !(x.Uses == 1 && clobber(x)) {
9538 break
9539 }
9540 b = x.Block
9541 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9542 v.copyOf(v0)
9543 v0.AuxInt = int32ToAuxInt(off)
9544 v0.Aux = symToAux(sym)
9545 v0.AddArg2(ptr, mem)
9546 return true
9547 }
9548
9549
9550
9551 for {
9552 x := v_0
9553 if x.Op != OpAMD64MOVWload {
9554 break
9555 }
9556 off := auxIntToInt32(x.AuxInt)
9557 sym := auxToSym(x.Aux)
9558 mem := x.Args[1]
9559 ptr := x.Args[0]
9560 if !(x.Uses == 1 && clobber(x)) {
9561 break
9562 }
9563 b = x.Block
9564 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9565 v.copyOf(v0)
9566 v0.AuxInt = int32ToAuxInt(off)
9567 v0.Aux = symToAux(sym)
9568 v0.AddArg2(ptr, mem)
9569 return true
9570 }
9571
9572
9573
9574 for {
9575 x := v_0
9576 if x.Op != OpAMD64MOVLload {
9577 break
9578 }
9579 off := auxIntToInt32(x.AuxInt)
9580 sym := auxToSym(x.Aux)
9581 mem := x.Args[1]
9582 ptr := x.Args[0]
9583 if !(x.Uses == 1 && clobber(x)) {
9584 break
9585 }
9586 b = x.Block
9587 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9588 v.copyOf(v0)
9589 v0.AuxInt = int32ToAuxInt(off)
9590 v0.Aux = symToAux(sym)
9591 v0.AddArg2(ptr, mem)
9592 return true
9593 }
9594
9595
9596
9597 for {
9598 x := v_0
9599 if x.Op != OpAMD64MOVQload {
9600 break
9601 }
9602 off := auxIntToInt32(x.AuxInt)
9603 sym := auxToSym(x.Aux)
9604 mem := x.Args[1]
9605 ptr := x.Args[0]
9606 if !(x.Uses == 1 && clobber(x)) {
9607 break
9608 }
9609 b = x.Block
9610 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9611 v.copyOf(v0)
9612 v0.AuxInt = int32ToAuxInt(off)
9613 v0.Aux = symToAux(sym)
9614 v0.AddArg2(ptr, mem)
9615 return true
9616 }
9617
9618
9619
9620 for {
9621 if v_0.Op != OpAMD64ANDLconst {
9622 break
9623 }
9624 c := auxIntToInt32(v_0.AuxInt)
9625 x := v_0.Args[0]
9626 if !(c&0x80 == 0) {
9627 break
9628 }
9629 v.reset(OpAMD64ANDLconst)
9630 v.AuxInt = int32ToAuxInt(c & 0x7f)
9631 v.AddArg(x)
9632 return true
9633 }
9634
9635
9636 for {
9637 if v_0.Op != OpAMD64MOVBQSX {
9638 break
9639 }
9640 x := v_0.Args[0]
9641 v.reset(OpAMD64MOVBQSX)
9642 v.AddArg(x)
9643 return true
9644 }
9645 return false
9646 }
9647 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9648 v_1 := v.Args[1]
9649 v_0 := v.Args[0]
9650
9651
9652
9653 for {
9654 off := auxIntToInt32(v.AuxInt)
9655 sym := auxToSym(v.Aux)
9656 ptr := v_0
9657 if v_1.Op != OpAMD64MOVBstore {
9658 break
9659 }
9660 off2 := auxIntToInt32(v_1.AuxInt)
9661 sym2 := auxToSym(v_1.Aux)
9662 x := v_1.Args[1]
9663 ptr2 := v_1.Args[0]
9664 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9665 break
9666 }
9667 v.reset(OpAMD64MOVBQSX)
9668 v.AddArg(x)
9669 return true
9670 }
9671
9672
9673
9674 for {
9675 off1 := auxIntToInt32(v.AuxInt)
9676 sym1 := auxToSym(v.Aux)
9677 if v_0.Op != OpAMD64LEAQ {
9678 break
9679 }
9680 off2 := auxIntToInt32(v_0.AuxInt)
9681 sym2 := auxToSym(v_0.Aux)
9682 base := v_0.Args[0]
9683 mem := v_1
9684 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9685 break
9686 }
9687 v.reset(OpAMD64MOVBQSXload)
9688 v.AuxInt = int32ToAuxInt(off1 + off2)
9689 v.Aux = symToAux(mergeSym(sym1, sym2))
9690 v.AddArg2(base, mem)
9691 return true
9692 }
9693 return false
9694 }
9695 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9696 v_0 := v.Args[0]
9697 b := v.Block
9698
9699
9700
9701 for {
9702 x := v_0
9703 if x.Op != OpAMD64MOVBload {
9704 break
9705 }
9706 off := auxIntToInt32(x.AuxInt)
9707 sym := auxToSym(x.Aux)
9708 mem := x.Args[1]
9709 ptr := x.Args[0]
9710 if !(x.Uses == 1 && clobber(x)) {
9711 break
9712 }
9713 b = x.Block
9714 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9715 v.copyOf(v0)
9716 v0.AuxInt = int32ToAuxInt(off)
9717 v0.Aux = symToAux(sym)
9718 v0.AddArg2(ptr, mem)
9719 return true
9720 }
9721
9722
9723
9724 for {
9725 x := v_0
9726 if x.Op != OpAMD64MOVWload {
9727 break
9728 }
9729 off := auxIntToInt32(x.AuxInt)
9730 sym := auxToSym(x.Aux)
9731 mem := x.Args[1]
9732 ptr := x.Args[0]
9733 if !(x.Uses == 1 && clobber(x)) {
9734 break
9735 }
9736 b = x.Block
9737 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9738 v.copyOf(v0)
9739 v0.AuxInt = int32ToAuxInt(off)
9740 v0.Aux = symToAux(sym)
9741 v0.AddArg2(ptr, mem)
9742 return true
9743 }
9744
9745
9746
9747 for {
9748 x := v_0
9749 if x.Op != OpAMD64MOVLload {
9750 break
9751 }
9752 off := auxIntToInt32(x.AuxInt)
9753 sym := auxToSym(x.Aux)
9754 mem := x.Args[1]
9755 ptr := x.Args[0]
9756 if !(x.Uses == 1 && clobber(x)) {
9757 break
9758 }
9759 b = x.Block
9760 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9761 v.copyOf(v0)
9762 v0.AuxInt = int32ToAuxInt(off)
9763 v0.Aux = symToAux(sym)
9764 v0.AddArg2(ptr, mem)
9765 return true
9766 }
9767
9768
9769
9770 for {
9771 x := v_0
9772 if x.Op != OpAMD64MOVQload {
9773 break
9774 }
9775 off := auxIntToInt32(x.AuxInt)
9776 sym := auxToSym(x.Aux)
9777 mem := x.Args[1]
9778 ptr := x.Args[0]
9779 if !(x.Uses == 1 && clobber(x)) {
9780 break
9781 }
9782 b = x.Block
9783 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9784 v.copyOf(v0)
9785 v0.AuxInt = int32ToAuxInt(off)
9786 v0.Aux = symToAux(sym)
9787 v0.AddArg2(ptr, mem)
9788 return true
9789 }
9790
9791
9792
9793 for {
9794 x := v_0
9795 if !(zeroUpper56Bits(x, 3)) {
9796 break
9797 }
9798 v.copyOf(x)
9799 return true
9800 }
9801
9802
9803 for {
9804 if v_0.Op != OpAMD64ANDLconst {
9805 break
9806 }
9807 c := auxIntToInt32(v_0.AuxInt)
9808 x := v_0.Args[0]
9809 v.reset(OpAMD64ANDLconst)
9810 v.AuxInt = int32ToAuxInt(c & 0xff)
9811 v.AddArg(x)
9812 return true
9813 }
9814
9815
9816 for {
9817 if v_0.Op != OpAMD64MOVBQZX {
9818 break
9819 }
9820 x := v_0.Args[0]
9821 v.reset(OpAMD64MOVBQZX)
9822 v.AddArg(x)
9823 return true
9824 }
9825 return false
9826 }
9827 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9828 v_1 := v.Args[1]
9829 v_0 := v.Args[0]
9830
9831
9832
9833 for {
9834 off1 := auxIntToInt32(v.AuxInt)
9835 sym := auxToSym(v.Aux)
9836 if v_0.Op != OpAMD64ADDQconst {
9837 break
9838 }
9839 off2 := auxIntToInt32(v_0.AuxInt)
9840 ptr := v_0.Args[0]
9841 mem := v_1
9842 if !(is32Bit(int64(off1) + int64(off2))) {
9843 break
9844 }
9845 v.reset(OpAMD64MOVBatomicload)
9846 v.AuxInt = int32ToAuxInt(off1 + off2)
9847 v.Aux = symToAux(sym)
9848 v.AddArg2(ptr, mem)
9849 return true
9850 }
9851
9852
9853
9854 for {
9855 off1 := auxIntToInt32(v.AuxInt)
9856 sym1 := auxToSym(v.Aux)
9857 if v_0.Op != OpAMD64LEAQ {
9858 break
9859 }
9860 off2 := auxIntToInt32(v_0.AuxInt)
9861 sym2 := auxToSym(v_0.Aux)
9862 ptr := v_0.Args[0]
9863 mem := v_1
9864 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9865 break
9866 }
9867 v.reset(OpAMD64MOVBatomicload)
9868 v.AuxInt = int32ToAuxInt(off1 + off2)
9869 v.Aux = symToAux(mergeSym(sym1, sym2))
9870 v.AddArg2(ptr, mem)
9871 return true
9872 }
9873 return false
9874 }
9875 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9876 v_1 := v.Args[1]
9877 v_0 := v.Args[0]
9878
9879
9880
9881 for {
9882 off := auxIntToInt32(v.AuxInt)
9883 sym := auxToSym(v.Aux)
9884 ptr := v_0
9885 if v_1.Op != OpAMD64MOVBstore {
9886 break
9887 }
9888 off2 := auxIntToInt32(v_1.AuxInt)
9889 sym2 := auxToSym(v_1.Aux)
9890 x := v_1.Args[1]
9891 ptr2 := v_1.Args[0]
9892 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9893 break
9894 }
9895 v.reset(OpAMD64MOVBQZX)
9896 v.AddArg(x)
9897 return true
9898 }
9899
9900
9901
9902 for {
9903 off1 := auxIntToInt32(v.AuxInt)
9904 sym := auxToSym(v.Aux)
9905 if v_0.Op != OpAMD64ADDQconst {
9906 break
9907 }
9908 off2 := auxIntToInt32(v_0.AuxInt)
9909 ptr := v_0.Args[0]
9910 mem := v_1
9911 if !(is32Bit(int64(off1) + int64(off2))) {
9912 break
9913 }
9914 v.reset(OpAMD64MOVBload)
9915 v.AuxInt = int32ToAuxInt(off1 + off2)
9916 v.Aux = symToAux(sym)
9917 v.AddArg2(ptr, mem)
9918 return true
9919 }
9920
9921
9922
9923 for {
9924 off1 := auxIntToInt32(v.AuxInt)
9925 sym1 := auxToSym(v.Aux)
9926 if v_0.Op != OpAMD64LEAQ {
9927 break
9928 }
9929 off2 := auxIntToInt32(v_0.AuxInt)
9930 sym2 := auxToSym(v_0.Aux)
9931 base := v_0.Args[0]
9932 mem := v_1
9933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9934 break
9935 }
9936 v.reset(OpAMD64MOVBload)
9937 v.AuxInt = int32ToAuxInt(off1 + off2)
9938 v.Aux = symToAux(mergeSym(sym1, sym2))
9939 v.AddArg2(base, mem)
9940 return true
9941 }
9942
9943
9944
9945 for {
9946 off := auxIntToInt32(v.AuxInt)
9947 sym := auxToSym(v.Aux)
9948 if v_0.Op != OpSB || !(symIsRO(sym)) {
9949 break
9950 }
9951 v.reset(OpAMD64MOVLconst)
9952 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9953 return true
9954 }
9955 return false
9956 }
9957 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9958 v_2 := v.Args[2]
9959 v_1 := v.Args[1]
9960 v_0 := v.Args[0]
9961 b := v.Block
9962 typ := &b.Func.Config.Types
9963
9964
9965
9966 for {
9967 off := auxIntToInt32(v.AuxInt)
9968 sym := auxToSym(v.Aux)
9969 ptr := v_0
9970 y := v_1
9971 if y.Op != OpAMD64SETL {
9972 break
9973 }
9974 x := y.Args[0]
9975 mem := v_2
9976 if !(y.Uses == 1) {
9977 break
9978 }
9979 v.reset(OpAMD64SETLstore)
9980 v.AuxInt = int32ToAuxInt(off)
9981 v.Aux = symToAux(sym)
9982 v.AddArg3(ptr, x, mem)
9983 return true
9984 }
9985
9986
9987
9988 for {
9989 off := auxIntToInt32(v.AuxInt)
9990 sym := auxToSym(v.Aux)
9991 ptr := v_0
9992 y := v_1
9993 if y.Op != OpAMD64SETLE {
9994 break
9995 }
9996 x := y.Args[0]
9997 mem := v_2
9998 if !(y.Uses == 1) {
9999 break
10000 }
10001 v.reset(OpAMD64SETLEstore)
10002 v.AuxInt = int32ToAuxInt(off)
10003 v.Aux = symToAux(sym)
10004 v.AddArg3(ptr, x, mem)
10005 return true
10006 }
10007
10008
10009
10010 for {
10011 off := auxIntToInt32(v.AuxInt)
10012 sym := auxToSym(v.Aux)
10013 ptr := v_0
10014 y := v_1
10015 if y.Op != OpAMD64SETG {
10016 break
10017 }
10018 x := y.Args[0]
10019 mem := v_2
10020 if !(y.Uses == 1) {
10021 break
10022 }
10023 v.reset(OpAMD64SETGstore)
10024 v.AuxInt = int32ToAuxInt(off)
10025 v.Aux = symToAux(sym)
10026 v.AddArg3(ptr, x, mem)
10027 return true
10028 }
10029
10030
10031
10032 for {
10033 off := auxIntToInt32(v.AuxInt)
10034 sym := auxToSym(v.Aux)
10035 ptr := v_0
10036 y := v_1
10037 if y.Op != OpAMD64SETGE {
10038 break
10039 }
10040 x := y.Args[0]
10041 mem := v_2
10042 if !(y.Uses == 1) {
10043 break
10044 }
10045 v.reset(OpAMD64SETGEstore)
10046 v.AuxInt = int32ToAuxInt(off)
10047 v.Aux = symToAux(sym)
10048 v.AddArg3(ptr, x, mem)
10049 return true
10050 }
10051
10052
10053
10054 for {
10055 off := auxIntToInt32(v.AuxInt)
10056 sym := auxToSym(v.Aux)
10057 ptr := v_0
10058 y := v_1
10059 if y.Op != OpAMD64SETEQ {
10060 break
10061 }
10062 x := y.Args[0]
10063 mem := v_2
10064 if !(y.Uses == 1) {
10065 break
10066 }
10067 v.reset(OpAMD64SETEQstore)
10068 v.AuxInt = int32ToAuxInt(off)
10069 v.Aux = symToAux(sym)
10070 v.AddArg3(ptr, x, mem)
10071 return true
10072 }
10073
10074
10075
10076 for {
10077 off := auxIntToInt32(v.AuxInt)
10078 sym := auxToSym(v.Aux)
10079 ptr := v_0
10080 y := v_1
10081 if y.Op != OpAMD64SETNE {
10082 break
10083 }
10084 x := y.Args[0]
10085 mem := v_2
10086 if !(y.Uses == 1) {
10087 break
10088 }
10089 v.reset(OpAMD64SETNEstore)
10090 v.AuxInt = int32ToAuxInt(off)
10091 v.Aux = symToAux(sym)
10092 v.AddArg3(ptr, x, mem)
10093 return true
10094 }
10095
10096
10097
10098 for {
10099 off := auxIntToInt32(v.AuxInt)
10100 sym := auxToSym(v.Aux)
10101 ptr := v_0
10102 y := v_1
10103 if y.Op != OpAMD64SETB {
10104 break
10105 }
10106 x := y.Args[0]
10107 mem := v_2
10108 if !(y.Uses == 1) {
10109 break
10110 }
10111 v.reset(OpAMD64SETBstore)
10112 v.AuxInt = int32ToAuxInt(off)
10113 v.Aux = symToAux(sym)
10114 v.AddArg3(ptr, x, mem)
10115 return true
10116 }
10117
10118
10119
10120 for {
10121 off := auxIntToInt32(v.AuxInt)
10122 sym := auxToSym(v.Aux)
10123 ptr := v_0
10124 y := v_1
10125 if y.Op != OpAMD64SETBE {
10126 break
10127 }
10128 x := y.Args[0]
10129 mem := v_2
10130 if !(y.Uses == 1) {
10131 break
10132 }
10133 v.reset(OpAMD64SETBEstore)
10134 v.AuxInt = int32ToAuxInt(off)
10135 v.Aux = symToAux(sym)
10136 v.AddArg3(ptr, x, mem)
10137 return true
10138 }
10139
10140
10141
10142 for {
10143 off := auxIntToInt32(v.AuxInt)
10144 sym := auxToSym(v.Aux)
10145 ptr := v_0
10146 y := v_1
10147 if y.Op != OpAMD64SETA {
10148 break
10149 }
10150 x := y.Args[0]
10151 mem := v_2
10152 if !(y.Uses == 1) {
10153 break
10154 }
10155 v.reset(OpAMD64SETAstore)
10156 v.AuxInt = int32ToAuxInt(off)
10157 v.Aux = symToAux(sym)
10158 v.AddArg3(ptr, x, mem)
10159 return true
10160 }
10161
10162
10163
10164 for {
10165 off := auxIntToInt32(v.AuxInt)
10166 sym := auxToSym(v.Aux)
10167 ptr := v_0
10168 y := v_1
10169 if y.Op != OpAMD64SETAE {
10170 break
10171 }
10172 x := y.Args[0]
10173 mem := v_2
10174 if !(y.Uses == 1) {
10175 break
10176 }
10177 v.reset(OpAMD64SETAEstore)
10178 v.AuxInt = int32ToAuxInt(off)
10179 v.Aux = symToAux(sym)
10180 v.AddArg3(ptr, x, mem)
10181 return true
10182 }
10183
10184
10185 for {
10186 off := auxIntToInt32(v.AuxInt)
10187 sym := auxToSym(v.Aux)
10188 ptr := v_0
10189 if v_1.Op != OpAMD64MOVBQSX {
10190 break
10191 }
10192 x := v_1.Args[0]
10193 mem := v_2
10194 v.reset(OpAMD64MOVBstore)
10195 v.AuxInt = int32ToAuxInt(off)
10196 v.Aux = symToAux(sym)
10197 v.AddArg3(ptr, x, mem)
10198 return true
10199 }
10200
10201
10202 for {
10203 off := auxIntToInt32(v.AuxInt)
10204 sym := auxToSym(v.Aux)
10205 ptr := v_0
10206 if v_1.Op != OpAMD64MOVBQZX {
10207 break
10208 }
10209 x := v_1.Args[0]
10210 mem := v_2
10211 v.reset(OpAMD64MOVBstore)
10212 v.AuxInt = int32ToAuxInt(off)
10213 v.Aux = symToAux(sym)
10214 v.AddArg3(ptr, x, mem)
10215 return true
10216 }
10217
10218
10219
10220 for {
10221 off1 := auxIntToInt32(v.AuxInt)
10222 sym := auxToSym(v.Aux)
10223 if v_0.Op != OpAMD64ADDQconst {
10224 break
10225 }
10226 off2 := auxIntToInt32(v_0.AuxInt)
10227 ptr := v_0.Args[0]
10228 val := v_1
10229 mem := v_2
10230 if !(is32Bit(int64(off1) + int64(off2))) {
10231 break
10232 }
10233 v.reset(OpAMD64MOVBstore)
10234 v.AuxInt = int32ToAuxInt(off1 + off2)
10235 v.Aux = symToAux(sym)
10236 v.AddArg3(ptr, val, mem)
10237 return true
10238 }
10239
10240
10241 for {
10242 off := auxIntToInt32(v.AuxInt)
10243 sym := auxToSym(v.Aux)
10244 ptr := v_0
10245 if v_1.Op != OpAMD64MOVLconst {
10246 break
10247 }
10248 c := auxIntToInt32(v_1.AuxInt)
10249 mem := v_2
10250 v.reset(OpAMD64MOVBstoreconst)
10251 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10252 v.Aux = symToAux(sym)
10253 v.AddArg2(ptr, mem)
10254 return true
10255 }
10256
10257
10258 for {
10259 off := auxIntToInt32(v.AuxInt)
10260 sym := auxToSym(v.Aux)
10261 ptr := v_0
10262 if v_1.Op != OpAMD64MOVQconst {
10263 break
10264 }
10265 c := auxIntToInt64(v_1.AuxInt)
10266 mem := v_2
10267 v.reset(OpAMD64MOVBstoreconst)
10268 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10269 v.Aux = symToAux(sym)
10270 v.AddArg2(ptr, mem)
10271 return true
10272 }
10273
10274
10275
10276 for {
10277 off1 := auxIntToInt32(v.AuxInt)
10278 sym1 := auxToSym(v.Aux)
10279 if v_0.Op != OpAMD64LEAQ {
10280 break
10281 }
10282 off2 := auxIntToInt32(v_0.AuxInt)
10283 sym2 := auxToSym(v_0.Aux)
10284 base := v_0.Args[0]
10285 val := v_1
10286 mem := v_2
10287 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10288 break
10289 }
10290 v.reset(OpAMD64MOVBstore)
10291 v.AuxInt = int32ToAuxInt(off1 + off2)
10292 v.Aux = symToAux(mergeSym(sym1, sym2))
10293 v.AddArg3(base, val, mem)
10294 return true
10295 }
10296
10297
10298
10299 for {
10300 i := auxIntToInt32(v.AuxInt)
10301 s := auxToSym(v.Aux)
10302 p := v_0
10303 w := v_1
10304 x0 := v_2
10305 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
10306 break
10307 }
10308 mem := x0.Args[2]
10309 if p != x0.Args[0] {
10310 break
10311 }
10312 x0_1 := x0.Args[1]
10313 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10314 break
10315 }
10316 v.reset(OpAMD64MOVWstore)
10317 v.AuxInt = int32ToAuxInt(i - 1)
10318 v.Aux = symToAux(s)
10319 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10320 v0.AuxInt = int8ToAuxInt(8)
10321 v0.AddArg(w)
10322 v.AddArg3(p, v0, mem)
10323 return true
10324 }
10325
10326
10327
10328 for {
10329 i := auxIntToInt32(v.AuxInt)
10330 s := auxToSym(v.Aux)
10331 p1 := v_0
10332 w := v_1
10333 x0 := v_2
10334 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10335 break
10336 }
10337 mem := x0.Args[2]
10338 p0 := x0.Args[0]
10339 x0_1 := x0.Args[1]
10340 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10341 break
10342 }
10343 v.reset(OpAMD64MOVWstore)
10344 v.AuxInt = int32ToAuxInt(i)
10345 v.Aux = symToAux(s)
10346 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10347 v0.AuxInt = int8ToAuxInt(8)
10348 v0.AddArg(w)
10349 v.AddArg3(p0, v0, mem)
10350 return true
10351 }
10352
10353
10354
10355 for {
10356 i := auxIntToInt32(v.AuxInt)
10357 s := auxToSym(v.Aux)
10358 p := v_0
10359 w := v_1
10360 x2 := v_2
10361 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
10362 break
10363 }
10364 _ = x2.Args[2]
10365 if p != x2.Args[0] {
10366 break
10367 }
10368 x2_1 := x2.Args[1]
10369 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10370 break
10371 }
10372 x1 := x2.Args[2]
10373 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
10374 break
10375 }
10376 _ = x1.Args[2]
10377 if p != x1.Args[0] {
10378 break
10379 }
10380 x1_1 := x1.Args[1]
10381 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10382 break
10383 }
10384 x0 := x1.Args[2]
10385 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
10386 break
10387 }
10388 mem := x0.Args[2]
10389 if p != x0.Args[0] {
10390 break
10391 }
10392 x0_1 := x0.Args[1]
10393 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10394 break
10395 }
10396 v.reset(OpAMD64MOVLstore)
10397 v.AuxInt = int32ToAuxInt(i - 3)
10398 v.Aux = symToAux(s)
10399 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10400 v0.AddArg(w)
10401 v.AddArg3(p, v0, mem)
10402 return true
10403 }
10404
10405
10406
10407 for {
10408 i := auxIntToInt32(v.AuxInt)
10409 s := auxToSym(v.Aux)
10410 p3 := v_0
10411 w := v_1
10412 x2 := v_2
10413 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10414 break
10415 }
10416 _ = x2.Args[2]
10417 p2 := x2.Args[0]
10418 x2_1 := x2.Args[1]
10419 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10420 break
10421 }
10422 x1 := x2.Args[2]
10423 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10424 break
10425 }
10426 _ = x1.Args[2]
10427 p1 := x1.Args[0]
10428 x1_1 := x1.Args[1]
10429 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10430 break
10431 }
10432 x0 := x1.Args[2]
10433 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10434 break
10435 }
10436 mem := x0.Args[2]
10437 p0 := x0.Args[0]
10438 x0_1 := x0.Args[1]
10439 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10440 break
10441 }
10442 v.reset(OpAMD64MOVLstore)
10443 v.AuxInt = int32ToAuxInt(i)
10444 v.Aux = symToAux(s)
10445 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10446 v0.AddArg(w)
10447 v.AddArg3(p0, v0, mem)
10448 return true
10449 }
10450
10451
10452
10453 for {
10454 i := auxIntToInt32(v.AuxInt)
10455 s := auxToSym(v.Aux)
10456 p := v_0
10457 w := v_1
10458 x6 := v_2
10459 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
10460 break
10461 }
10462 _ = x6.Args[2]
10463 if p != x6.Args[0] {
10464 break
10465 }
10466 x6_1 := x6.Args[1]
10467 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10468 break
10469 }
10470 x5 := x6.Args[2]
10471 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
10472 break
10473 }
10474 _ = x5.Args[2]
10475 if p != x5.Args[0] {
10476 break
10477 }
10478 x5_1 := x5.Args[1]
10479 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10480 break
10481 }
10482 x4 := x5.Args[2]
10483 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
10484 break
10485 }
10486 _ = x4.Args[2]
10487 if p != x4.Args[0] {
10488 break
10489 }
10490 x4_1 := x4.Args[1]
10491 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10492 break
10493 }
10494 x3 := x4.Args[2]
10495 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
10496 break
10497 }
10498 _ = x3.Args[2]
10499 if p != x3.Args[0] {
10500 break
10501 }
10502 x3_1 := x3.Args[1]
10503 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10504 break
10505 }
10506 x2 := x3.Args[2]
10507 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
10508 break
10509 }
10510 _ = x2.Args[2]
10511 if p != x2.Args[0] {
10512 break
10513 }
10514 x2_1 := x2.Args[1]
10515 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10516 break
10517 }
10518 x1 := x2.Args[2]
10519 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
10520 break
10521 }
10522 _ = x1.Args[2]
10523 if p != x1.Args[0] {
10524 break
10525 }
10526 x1_1 := x1.Args[1]
10527 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10528 break
10529 }
10530 x0 := x1.Args[2]
10531 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
10532 break
10533 }
10534 mem := x0.Args[2]
10535 if p != x0.Args[0] {
10536 break
10537 }
10538 x0_1 := x0.Args[1]
10539 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10540 break
10541 }
10542 v.reset(OpAMD64MOVQstore)
10543 v.AuxInt = int32ToAuxInt(i - 7)
10544 v.Aux = symToAux(s)
10545 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10546 v0.AddArg(w)
10547 v.AddArg3(p, v0, mem)
10548 return true
10549 }
10550
10551
10552
10553 for {
10554 i := auxIntToInt32(v.AuxInt)
10555 s := auxToSym(v.Aux)
10556 p7 := v_0
10557 w := v_1
10558 x6 := v_2
10559 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
10560 break
10561 }
10562 _ = x6.Args[2]
10563 p6 := x6.Args[0]
10564 x6_1 := x6.Args[1]
10565 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10566 break
10567 }
10568 x5 := x6.Args[2]
10569 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
10570 break
10571 }
10572 _ = x5.Args[2]
10573 p5 := x5.Args[0]
10574 x5_1 := x5.Args[1]
10575 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10576 break
10577 }
10578 x4 := x5.Args[2]
10579 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
10580 break
10581 }
10582 _ = x4.Args[2]
10583 p4 := x4.Args[0]
10584 x4_1 := x4.Args[1]
10585 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10586 break
10587 }
10588 x3 := x4.Args[2]
10589 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
10590 break
10591 }
10592 _ = x3.Args[2]
10593 p3 := x3.Args[0]
10594 x3_1 := x3.Args[1]
10595 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10596 break
10597 }
10598 x2 := x3.Args[2]
10599 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10600 break
10601 }
10602 _ = x2.Args[2]
10603 p2 := x2.Args[0]
10604 x2_1 := x2.Args[1]
10605 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10606 break
10607 }
10608 x1 := x2.Args[2]
10609 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10610 break
10611 }
10612 _ = x1.Args[2]
10613 p1 := x1.Args[0]
10614 x1_1 := x1.Args[1]
10615 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10616 break
10617 }
10618 x0 := x1.Args[2]
10619 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10620 break
10621 }
10622 mem := x0.Args[2]
10623 p0 := x0.Args[0]
10624 x0_1 := x0.Args[1]
10625 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10626 break
10627 }
10628 v.reset(OpAMD64MOVQstore)
10629 v.AuxInt = int32ToAuxInt(i)
10630 v.Aux = symToAux(s)
10631 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10632 v0.AddArg(w)
10633 v.AddArg3(p0, v0, mem)
10634 return true
10635 }
10636
10637
10638
10639 for {
10640 i := auxIntToInt32(v.AuxInt)
10641 s := auxToSym(v.Aux)
10642 p := v_0
10643 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
10644 break
10645 }
10646 w := v_1.Args[0]
10647 x := v_2
10648 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10649 break
10650 }
10651 mem := x.Args[2]
10652 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10653 break
10654 }
10655 v.reset(OpAMD64MOVWstore)
10656 v.AuxInt = int32ToAuxInt(i - 1)
10657 v.Aux = symToAux(s)
10658 v.AddArg3(p, w, mem)
10659 return true
10660 }
10661
10662
10663
10664 for {
10665 i := auxIntToInt32(v.AuxInt)
10666 s := auxToSym(v.Aux)
10667 p := v_0
10668 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
10669 break
10670 }
10671 w := v_1.Args[0]
10672 x := v_2
10673 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10674 break
10675 }
10676 mem := x.Args[2]
10677 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10678 break
10679 }
10680 v.reset(OpAMD64MOVWstore)
10681 v.AuxInt = int32ToAuxInt(i - 1)
10682 v.Aux = symToAux(s)
10683 v.AddArg3(p, w, mem)
10684 return true
10685 }
10686
10687
10688
10689 for {
10690 i := auxIntToInt32(v.AuxInt)
10691 s := auxToSym(v.Aux)
10692 p := v_0
10693 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
10694 break
10695 }
10696 w := v_1.Args[0]
10697 x := v_2
10698 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10699 break
10700 }
10701 mem := x.Args[2]
10702 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10703 break
10704 }
10705 v.reset(OpAMD64MOVWstore)
10706 v.AuxInt = int32ToAuxInt(i - 1)
10707 v.Aux = symToAux(s)
10708 v.AddArg3(p, w, mem)
10709 return true
10710 }
10711
10712
10713
10714 for {
10715 i := auxIntToInt32(v.AuxInt)
10716 s := auxToSym(v.Aux)
10717 p := v_0
10718 w := v_1
10719 x := v_2
10720 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10721 break
10722 }
10723 mem := x.Args[2]
10724 if p != x.Args[0] {
10725 break
10726 }
10727 x_1 := x.Args[1]
10728 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10729 break
10730 }
10731 v.reset(OpAMD64MOVWstore)
10732 v.AuxInt = int32ToAuxInt(i)
10733 v.Aux = symToAux(s)
10734 v.AddArg3(p, w, mem)
10735 return true
10736 }
10737
10738
10739
10740 for {
10741 i := auxIntToInt32(v.AuxInt)
10742 s := auxToSym(v.Aux)
10743 p := v_0
10744 w := v_1
10745 x := v_2
10746 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10747 break
10748 }
10749 mem := x.Args[2]
10750 if p != x.Args[0] {
10751 break
10752 }
10753 x_1 := x.Args[1]
10754 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10755 break
10756 }
10757 v.reset(OpAMD64MOVWstore)
10758 v.AuxInt = int32ToAuxInt(i)
10759 v.Aux = symToAux(s)
10760 v.AddArg3(p, w, mem)
10761 return true
10762 }
10763
10764
10765
10766 for {
10767 i := auxIntToInt32(v.AuxInt)
10768 s := auxToSym(v.Aux)
10769 p := v_0
10770 w := v_1
10771 x := v_2
10772 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10773 break
10774 }
10775 mem := x.Args[2]
10776 if p != x.Args[0] {
10777 break
10778 }
10779 x_1 := x.Args[1]
10780 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10781 break
10782 }
10783 v.reset(OpAMD64MOVWstore)
10784 v.AuxInt = int32ToAuxInt(i)
10785 v.Aux = symToAux(s)
10786 v.AddArg3(p, w, mem)
10787 return true
10788 }
10789
10790
10791
10792 for {
10793 i := auxIntToInt32(v.AuxInt)
10794 s := auxToSym(v.Aux)
10795 p := v_0
10796 if v_1.Op != OpAMD64SHRLconst {
10797 break
10798 }
10799 j := auxIntToInt8(v_1.AuxInt)
10800 w := v_1.Args[0]
10801 x := v_2
10802 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10803 break
10804 }
10805 mem := x.Args[2]
10806 if p != x.Args[0] {
10807 break
10808 }
10809 w0 := x.Args[1]
10810 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
10811 break
10812 }
10813 v.reset(OpAMD64MOVWstore)
10814 v.AuxInt = int32ToAuxInt(i - 1)
10815 v.Aux = symToAux(s)
10816 v.AddArg3(p, w0, mem)
10817 return true
10818 }
10819
10820
10821
10822 for {
10823 i := auxIntToInt32(v.AuxInt)
10824 s := auxToSym(v.Aux)
10825 p := v_0
10826 if v_1.Op != OpAMD64SHRQconst {
10827 break
10828 }
10829 j := auxIntToInt8(v_1.AuxInt)
10830 w := v_1.Args[0]
10831 x := v_2
10832 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10833 break
10834 }
10835 mem := x.Args[2]
10836 if p != x.Args[0] {
10837 break
10838 }
10839 w0 := x.Args[1]
10840 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
10841 break
10842 }
10843 v.reset(OpAMD64MOVWstore)
10844 v.AuxInt = int32ToAuxInt(i - 1)
10845 v.Aux = symToAux(s)
10846 v.AddArg3(p, w0, mem)
10847 return true
10848 }
10849
10850
10851
10852 for {
10853 i := auxIntToInt32(v.AuxInt)
10854 s := auxToSym(v.Aux)
10855 p1 := v_0
10856 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
10857 break
10858 }
10859 w := v_1.Args[0]
10860 x := v_2
10861 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10862 break
10863 }
10864 mem := x.Args[2]
10865 p0 := x.Args[0]
10866 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10867 break
10868 }
10869 v.reset(OpAMD64MOVWstore)
10870 v.AuxInt = int32ToAuxInt(i)
10871 v.Aux = symToAux(s)
10872 v.AddArg3(p0, w, mem)
10873 return true
10874 }
10875
10876
10877
10878 for {
10879 i := auxIntToInt32(v.AuxInt)
10880 s := auxToSym(v.Aux)
10881 p1 := v_0
10882 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
10883 break
10884 }
10885 w := v_1.Args[0]
10886 x := v_2
10887 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10888 break
10889 }
10890 mem := x.Args[2]
10891 p0 := x.Args[0]
10892 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10893 break
10894 }
10895 v.reset(OpAMD64MOVWstore)
10896 v.AuxInt = int32ToAuxInt(i)
10897 v.Aux = symToAux(s)
10898 v.AddArg3(p0, w, mem)
10899 return true
10900 }
10901
10902
10903
10904 for {
10905 i := auxIntToInt32(v.AuxInt)
10906 s := auxToSym(v.Aux)
10907 p1 := v_0
10908 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
10909 break
10910 }
10911 w := v_1.Args[0]
10912 x := v_2
10913 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10914 break
10915 }
10916 mem := x.Args[2]
10917 p0 := x.Args[0]
10918 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10919 break
10920 }
10921 v.reset(OpAMD64MOVWstore)
10922 v.AuxInt = int32ToAuxInt(i)
10923 v.Aux = symToAux(s)
10924 v.AddArg3(p0, w, mem)
10925 return true
10926 }
10927
10928
10929
10930 for {
10931 i := auxIntToInt32(v.AuxInt)
10932 s := auxToSym(v.Aux)
10933 p0 := v_0
10934 w := v_1
10935 x := v_2
10936 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10937 break
10938 }
10939 mem := x.Args[2]
10940 p1 := x.Args[0]
10941 x_1 := x.Args[1]
10942 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10943 break
10944 }
10945 v.reset(OpAMD64MOVWstore)
10946 v.AuxInt = int32ToAuxInt(i)
10947 v.Aux = symToAux(s)
10948 v.AddArg3(p0, w, mem)
10949 return true
10950 }
10951
10952
10953
10954 for {
10955 i := auxIntToInt32(v.AuxInt)
10956 s := auxToSym(v.Aux)
10957 p0 := v_0
10958 w := v_1
10959 x := v_2
10960 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10961 break
10962 }
10963 mem := x.Args[2]
10964 p1 := x.Args[0]
10965 x_1 := x.Args[1]
10966 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10967 break
10968 }
10969 v.reset(OpAMD64MOVWstore)
10970 v.AuxInt = int32ToAuxInt(i)
10971 v.Aux = symToAux(s)
10972 v.AddArg3(p0, w, mem)
10973 return true
10974 }
10975
10976
10977
10978 for {
10979 i := auxIntToInt32(v.AuxInt)
10980 s := auxToSym(v.Aux)
10981 p0 := v_0
10982 w := v_1
10983 x := v_2
10984 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10985 break
10986 }
10987 mem := x.Args[2]
10988 p1 := x.Args[0]
10989 x_1 := x.Args[1]
10990 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10991 break
10992 }
10993 v.reset(OpAMD64MOVWstore)
10994 v.AuxInt = int32ToAuxInt(i)
10995 v.Aux = symToAux(s)
10996 v.AddArg3(p0, w, mem)
10997 return true
10998 }
10999
11000
11001
11002 for {
11003 i := auxIntToInt32(v.AuxInt)
11004 s := auxToSym(v.Aux)
11005 p1 := v_0
11006 if v_1.Op != OpAMD64SHRLconst {
11007 break
11008 }
11009 j := auxIntToInt8(v_1.AuxInt)
11010 w := v_1.Args[0]
11011 x := v_2
11012 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11013 break
11014 }
11015 mem := x.Args[2]
11016 p0 := x.Args[0]
11017 w0 := x.Args[1]
11018 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11019 break
11020 }
11021 v.reset(OpAMD64MOVWstore)
11022 v.AuxInt = int32ToAuxInt(i)
11023 v.Aux = symToAux(s)
11024 v.AddArg3(p0, w0, mem)
11025 return true
11026 }
11027
11028
11029
11030 for {
11031 i := auxIntToInt32(v.AuxInt)
11032 s := auxToSym(v.Aux)
11033 p1 := v_0
11034 if v_1.Op != OpAMD64SHRQconst {
11035 break
11036 }
11037 j := auxIntToInt8(v_1.AuxInt)
11038 w := v_1.Args[0]
11039 x := v_2
11040 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11041 break
11042 }
11043 mem := x.Args[2]
11044 p0 := x.Args[0]
11045 w0 := x.Args[1]
11046 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11047 break
11048 }
11049 v.reset(OpAMD64MOVWstore)
11050 v.AuxInt = int32ToAuxInt(i)
11051 v.Aux = symToAux(s)
11052 v.AddArg3(p0, w0, mem)
11053 return true
11054 }
11055
11056
11057
11058 for {
11059 if auxIntToInt32(v.AuxInt) != 7 {
11060 break
11061 }
11062 s := auxToSym(v.Aux)
11063 p1 := v_0
11064 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
11065 break
11066 }
11067 w := v_1.Args[0]
11068 x1 := v_2
11069 if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
11070 break
11071 }
11072 _ = x1.Args[2]
11073 if p1 != x1.Args[0] {
11074 break
11075 }
11076 x1_1 := x1.Args[1]
11077 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
11078 break
11079 }
11080 x2 := x1.Args[2]
11081 if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
11082 break
11083 }
11084 _ = x2.Args[2]
11085 if p1 != x2.Args[0] {
11086 break
11087 }
11088 x2_1 := x2.Args[1]
11089 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
11090 break
11091 }
11092 x3 := x2.Args[2]
11093 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
11094 break
11095 }
11096 mem := x3.Args[2]
11097 if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
11098 break
11099 }
11100 v.reset(OpAMD64MOVQstore)
11101 v.Aux = symToAux(s)
11102 v.AddArg3(p1, w, mem)
11103 return true
11104 }
11105
11106
11107
11108 for {
11109 i := auxIntToInt32(v.AuxInt)
11110 s := auxToSym(v.Aux)
11111 p := v_0
11112 x1 := v_1
11113 if x1.Op != OpAMD64MOVBload {
11114 break
11115 }
11116 j := auxIntToInt32(x1.AuxInt)
11117 s2 := auxToSym(x1.Aux)
11118 mem := x1.Args[1]
11119 p2 := x1.Args[0]
11120 mem2 := v_2
11121 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
11122 break
11123 }
11124 _ = mem2.Args[2]
11125 if p != mem2.Args[0] {
11126 break
11127 }
11128 x2 := mem2.Args[1]
11129 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
11130 break
11131 }
11132 _ = x2.Args[1]
11133 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11134 break
11135 }
11136 v.reset(OpAMD64MOVWstore)
11137 v.AuxInt = int32ToAuxInt(i - 1)
11138 v.Aux = symToAux(s)
11139 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11140 v0.AuxInt = int32ToAuxInt(j - 1)
11141 v0.Aux = symToAux(s2)
11142 v0.AddArg2(p2, mem)
11143 v.AddArg3(p, v0, mem)
11144 return true
11145 }
11146 return false
11147 }
11148 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11149 v_1 := v.Args[1]
11150 v_0 := v.Args[0]
11151
11152
11153
11154 for {
11155 sc := auxIntToValAndOff(v.AuxInt)
11156 s := auxToSym(v.Aux)
11157 if v_0.Op != OpAMD64ADDQconst {
11158 break
11159 }
11160 off := auxIntToInt32(v_0.AuxInt)
11161 ptr := v_0.Args[0]
11162 mem := v_1
11163 if !(ValAndOff(sc).canAdd32(off)) {
11164 break
11165 }
11166 v.reset(OpAMD64MOVBstoreconst)
11167 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11168 v.Aux = symToAux(s)
11169 v.AddArg2(ptr, mem)
11170 return true
11171 }
11172
11173
11174
11175 for {
11176 sc := auxIntToValAndOff(v.AuxInt)
11177 sym1 := auxToSym(v.Aux)
11178 if v_0.Op != OpAMD64LEAQ {
11179 break
11180 }
11181 off := auxIntToInt32(v_0.AuxInt)
11182 sym2 := auxToSym(v_0.Aux)
11183 ptr := v_0.Args[0]
11184 mem := v_1
11185 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11186 break
11187 }
11188 v.reset(OpAMD64MOVBstoreconst)
11189 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11190 v.Aux = symToAux(mergeSym(sym1, sym2))
11191 v.AddArg2(ptr, mem)
11192 return true
11193 }
11194
11195
11196
11197 for {
11198 c := auxIntToValAndOff(v.AuxInt)
11199 s := auxToSym(v.Aux)
11200 p := v_0
11201 x := v_1
11202 if x.Op != OpAMD64MOVBstoreconst {
11203 break
11204 }
11205 a := auxIntToValAndOff(x.AuxInt)
11206 if auxToSym(x.Aux) != s {
11207 break
11208 }
11209 mem := x.Args[1]
11210 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11211 break
11212 }
11213 v.reset(OpAMD64MOVWstoreconst)
11214 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11215 v.Aux = symToAux(s)
11216 v.AddArg2(p, mem)
11217 return true
11218 }
11219
11220
11221
11222 for {
11223 a := auxIntToValAndOff(v.AuxInt)
11224 s := auxToSym(v.Aux)
11225 p := v_0
11226 x := v_1
11227 if x.Op != OpAMD64MOVBstoreconst {
11228 break
11229 }
11230 c := auxIntToValAndOff(x.AuxInt)
11231 if auxToSym(x.Aux) != s {
11232 break
11233 }
11234 mem := x.Args[1]
11235 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11236 break
11237 }
11238 v.reset(OpAMD64MOVWstoreconst)
11239 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11240 v.Aux = symToAux(s)
11241 v.AddArg2(p, mem)
11242 return true
11243 }
11244 return false
11245 }
11246 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11247 v_0 := v.Args[0]
11248 b := v.Block
11249
11250
11251
11252 for {
11253 x := v_0
11254 if x.Op != OpAMD64MOVLload {
11255 break
11256 }
11257 off := auxIntToInt32(x.AuxInt)
11258 sym := auxToSym(x.Aux)
11259 mem := x.Args[1]
11260 ptr := x.Args[0]
11261 if !(x.Uses == 1 && clobber(x)) {
11262 break
11263 }
11264 b = x.Block
11265 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11266 v.copyOf(v0)
11267 v0.AuxInt = int32ToAuxInt(off)
11268 v0.Aux = symToAux(sym)
11269 v0.AddArg2(ptr, mem)
11270 return true
11271 }
11272
11273
11274
11275 for {
11276 x := v_0
11277 if x.Op != OpAMD64MOVQload {
11278 break
11279 }
11280 off := auxIntToInt32(x.AuxInt)
11281 sym := auxToSym(x.Aux)
11282 mem := x.Args[1]
11283 ptr := x.Args[0]
11284 if !(x.Uses == 1 && clobber(x)) {
11285 break
11286 }
11287 b = x.Block
11288 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11289 v.copyOf(v0)
11290 v0.AuxInt = int32ToAuxInt(off)
11291 v0.Aux = symToAux(sym)
11292 v0.AddArg2(ptr, mem)
11293 return true
11294 }
11295
11296
11297
11298 for {
11299 if v_0.Op != OpAMD64ANDLconst {
11300 break
11301 }
11302 c := auxIntToInt32(v_0.AuxInt)
11303 x := v_0.Args[0]
11304 if !(uint32(c)&0x80000000 == 0) {
11305 break
11306 }
11307 v.reset(OpAMD64ANDLconst)
11308 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
11309 v.AddArg(x)
11310 return true
11311 }
11312
11313
11314 for {
11315 if v_0.Op != OpAMD64MOVLQSX {
11316 break
11317 }
11318 x := v_0.Args[0]
11319 v.reset(OpAMD64MOVLQSX)
11320 v.AddArg(x)
11321 return true
11322 }
11323
11324
11325 for {
11326 if v_0.Op != OpAMD64MOVWQSX {
11327 break
11328 }
11329 x := v_0.Args[0]
11330 v.reset(OpAMD64MOVWQSX)
11331 v.AddArg(x)
11332 return true
11333 }
11334
11335
11336 for {
11337 if v_0.Op != OpAMD64MOVBQSX {
11338 break
11339 }
11340 x := v_0.Args[0]
11341 v.reset(OpAMD64MOVBQSX)
11342 v.AddArg(x)
11343 return true
11344 }
11345 return false
11346 }
11347 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11348 v_1 := v.Args[1]
11349 v_0 := v.Args[0]
11350
11351
11352
11353 for {
11354 off := auxIntToInt32(v.AuxInt)
11355 sym := auxToSym(v.Aux)
11356 ptr := v_0
11357 if v_1.Op != OpAMD64MOVLstore {
11358 break
11359 }
11360 off2 := auxIntToInt32(v_1.AuxInt)
11361 sym2 := auxToSym(v_1.Aux)
11362 x := v_1.Args[1]
11363 ptr2 := v_1.Args[0]
11364 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11365 break
11366 }
11367 v.reset(OpAMD64MOVLQSX)
11368 v.AddArg(x)
11369 return true
11370 }
11371
11372
11373
11374 for {
11375 off1 := auxIntToInt32(v.AuxInt)
11376 sym1 := auxToSym(v.Aux)
11377 if v_0.Op != OpAMD64LEAQ {
11378 break
11379 }
11380 off2 := auxIntToInt32(v_0.AuxInt)
11381 sym2 := auxToSym(v_0.Aux)
11382 base := v_0.Args[0]
11383 mem := v_1
11384 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11385 break
11386 }
11387 v.reset(OpAMD64MOVLQSXload)
11388 v.AuxInt = int32ToAuxInt(off1 + off2)
11389 v.Aux = symToAux(mergeSym(sym1, sym2))
11390 v.AddArg2(base, mem)
11391 return true
11392 }
11393 return false
11394 }
11395 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11396 v_0 := v.Args[0]
11397 b := v.Block
11398
11399
11400
11401 for {
11402 x := v_0
11403 if x.Op != OpAMD64MOVLload {
11404 break
11405 }
11406 off := auxIntToInt32(x.AuxInt)
11407 sym := auxToSym(x.Aux)
11408 mem := x.Args[1]
11409 ptr := x.Args[0]
11410 if !(x.Uses == 1 && clobber(x)) {
11411 break
11412 }
11413 b = x.Block
11414 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11415 v.copyOf(v0)
11416 v0.AuxInt = int32ToAuxInt(off)
11417 v0.Aux = symToAux(sym)
11418 v0.AddArg2(ptr, mem)
11419 return true
11420 }
11421
11422
11423
11424 for {
11425 x := v_0
11426 if x.Op != OpAMD64MOVQload {
11427 break
11428 }
11429 off := auxIntToInt32(x.AuxInt)
11430 sym := auxToSym(x.Aux)
11431 mem := x.Args[1]
11432 ptr := x.Args[0]
11433 if !(x.Uses == 1 && clobber(x)) {
11434 break
11435 }
11436 b = x.Block
11437 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11438 v.copyOf(v0)
11439 v0.AuxInt = int32ToAuxInt(off)
11440 v0.Aux = symToAux(sym)
11441 v0.AddArg2(ptr, mem)
11442 return true
11443 }
11444
11445
11446
11447 for {
11448 x := v_0
11449 if !(zeroUpper32Bits(x, 3)) {
11450 break
11451 }
11452 v.copyOf(x)
11453 return true
11454 }
11455
11456
11457 for {
11458 if v_0.Op != OpAMD64ANDLconst {
11459 break
11460 }
11461 c := auxIntToInt32(v_0.AuxInt)
11462 x := v_0.Args[0]
11463 v.reset(OpAMD64ANDLconst)
11464 v.AuxInt = int32ToAuxInt(c)
11465 v.AddArg(x)
11466 return true
11467 }
11468
11469
11470 for {
11471 if v_0.Op != OpAMD64MOVLQZX {
11472 break
11473 }
11474 x := v_0.Args[0]
11475 v.reset(OpAMD64MOVLQZX)
11476 v.AddArg(x)
11477 return true
11478 }
11479
11480
11481 for {
11482 if v_0.Op != OpAMD64MOVWQZX {
11483 break
11484 }
11485 x := v_0.Args[0]
11486 v.reset(OpAMD64MOVWQZX)
11487 v.AddArg(x)
11488 return true
11489 }
11490
11491
11492 for {
11493 if v_0.Op != OpAMD64MOVBQZX {
11494 break
11495 }
11496 x := v_0.Args[0]
11497 v.reset(OpAMD64MOVBQZX)
11498 v.AddArg(x)
11499 return true
11500 }
11501 return false
11502 }
11503 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
11504 v_1 := v.Args[1]
11505 v_0 := v.Args[0]
11506
11507
11508
11509 for {
11510 off1 := auxIntToInt32(v.AuxInt)
11511 sym := auxToSym(v.Aux)
11512 if v_0.Op != OpAMD64ADDQconst {
11513 break
11514 }
11515 off2 := auxIntToInt32(v_0.AuxInt)
11516 ptr := v_0.Args[0]
11517 mem := v_1
11518 if !(is32Bit(int64(off1) + int64(off2))) {
11519 break
11520 }
11521 v.reset(OpAMD64MOVLatomicload)
11522 v.AuxInt = int32ToAuxInt(off1 + off2)
11523 v.Aux = symToAux(sym)
11524 v.AddArg2(ptr, mem)
11525 return true
11526 }
11527
11528
11529
11530 for {
11531 off1 := auxIntToInt32(v.AuxInt)
11532 sym1 := auxToSym(v.Aux)
11533 if v_0.Op != OpAMD64LEAQ {
11534 break
11535 }
11536 off2 := auxIntToInt32(v_0.AuxInt)
11537 sym2 := auxToSym(v_0.Aux)
11538 ptr := v_0.Args[0]
11539 mem := v_1
11540 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11541 break
11542 }
11543 v.reset(OpAMD64MOVLatomicload)
11544 v.AuxInt = int32ToAuxInt(off1 + off2)
11545 v.Aux = symToAux(mergeSym(sym1, sym2))
11546 v.AddArg2(ptr, mem)
11547 return true
11548 }
11549 return false
11550 }
11551 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
11552 v_0 := v.Args[0]
11553 b := v.Block
11554
11555
11556
11557 for {
11558 t := v.Type
11559 if v_0.Op != OpArg {
11560 break
11561 }
11562 u := v_0.Type
11563 off := auxIntToInt32(v_0.AuxInt)
11564 sym := auxToSym(v_0.Aux)
11565 if !(t.Size() == u.Size()) {
11566 break
11567 }
11568 b = b.Func.Entry
11569 v0 := b.NewValue0(v.Pos, OpArg, t)
11570 v.copyOf(v0)
11571 v0.AuxInt = int32ToAuxInt(off)
11572 v0.Aux = symToAux(sym)
11573 return true
11574 }
11575 return false
11576 }
11577 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
11578 v_0 := v.Args[0]
11579 b := v.Block
11580
11581
11582
11583 for {
11584 t := v.Type
11585 if v_0.Op != OpArg {
11586 break
11587 }
11588 u := v_0.Type
11589 off := auxIntToInt32(v_0.AuxInt)
11590 sym := auxToSym(v_0.Aux)
11591 if !(t.Size() == u.Size()) {
11592 break
11593 }
11594 b = b.Func.Entry
11595 v0 := b.NewValue0(v.Pos, OpArg, t)
11596 v.copyOf(v0)
11597 v0.AuxInt = int32ToAuxInt(off)
11598 v0.Aux = symToAux(sym)
11599 return true
11600 }
11601 return false
11602 }
11603 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
11604 v_1 := v.Args[1]
11605 v_0 := v.Args[0]
11606 b := v.Block
11607 config := b.Func.Config
11608
11609
11610
11611 for {
11612 off := auxIntToInt32(v.AuxInt)
11613 sym := auxToSym(v.Aux)
11614 ptr := v_0
11615 if v_1.Op != OpAMD64MOVLstore {
11616 break
11617 }
11618 off2 := auxIntToInt32(v_1.AuxInt)
11619 sym2 := auxToSym(v_1.Aux)
11620 x := v_1.Args[1]
11621 ptr2 := v_1.Args[0]
11622 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11623 break
11624 }
11625 v.reset(OpAMD64MOVLQZX)
11626 v.AddArg(x)
11627 return true
11628 }
11629
11630
11631
11632 for {
11633 off1 := auxIntToInt32(v.AuxInt)
11634 sym := auxToSym(v.Aux)
11635 if v_0.Op != OpAMD64ADDQconst {
11636 break
11637 }
11638 off2 := auxIntToInt32(v_0.AuxInt)
11639 ptr := v_0.Args[0]
11640 mem := v_1
11641 if !(is32Bit(int64(off1) + int64(off2))) {
11642 break
11643 }
11644 v.reset(OpAMD64MOVLload)
11645 v.AuxInt = int32ToAuxInt(off1 + off2)
11646 v.Aux = symToAux(sym)
11647 v.AddArg2(ptr, mem)
11648 return true
11649 }
11650
11651
11652
11653 for {
11654 off1 := auxIntToInt32(v.AuxInt)
11655 sym1 := auxToSym(v.Aux)
11656 if v_0.Op != OpAMD64LEAQ {
11657 break
11658 }
11659 off2 := auxIntToInt32(v_0.AuxInt)
11660 sym2 := auxToSym(v_0.Aux)
11661 base := v_0.Args[0]
11662 mem := v_1
11663 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11664 break
11665 }
11666 v.reset(OpAMD64MOVLload)
11667 v.AuxInt = int32ToAuxInt(off1 + off2)
11668 v.Aux = symToAux(mergeSym(sym1, sym2))
11669 v.AddArg2(base, mem)
11670 return true
11671 }
11672
11673
11674 for {
11675 off := auxIntToInt32(v.AuxInt)
11676 sym := auxToSym(v.Aux)
11677 ptr := v_0
11678 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11679 break
11680 }
11681 val := v_1.Args[1]
11682 if ptr != v_1.Args[0] {
11683 break
11684 }
11685 v.reset(OpAMD64MOVLf2i)
11686 v.AddArg(val)
11687 return true
11688 }
11689
11690
11691
11692 for {
11693 off := auxIntToInt32(v.AuxInt)
11694 sym := auxToSym(v.Aux)
11695 if v_0.Op != OpSB || !(symIsRO(sym)) {
11696 break
11697 }
11698 v.reset(OpAMD64MOVQconst)
11699 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11700 return true
11701 }
11702 return false
11703 }
11704 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
11705 v_2 := v.Args[2]
11706 v_1 := v.Args[1]
11707 v_0 := v.Args[0]
11708 b := v.Block
11709 typ := &b.Func.Config.Types
11710
11711
11712 for {
11713 off := auxIntToInt32(v.AuxInt)
11714 sym := auxToSym(v.Aux)
11715 ptr := v_0
11716 if v_1.Op != OpAMD64MOVLQSX {
11717 break
11718 }
11719 x := v_1.Args[0]
11720 mem := v_2
11721 v.reset(OpAMD64MOVLstore)
11722 v.AuxInt = int32ToAuxInt(off)
11723 v.Aux = symToAux(sym)
11724 v.AddArg3(ptr, x, mem)
11725 return true
11726 }
11727
11728
11729 for {
11730 off := auxIntToInt32(v.AuxInt)
11731 sym := auxToSym(v.Aux)
11732 ptr := v_0
11733 if v_1.Op != OpAMD64MOVLQZX {
11734 break
11735 }
11736 x := v_1.Args[0]
11737 mem := v_2
11738 v.reset(OpAMD64MOVLstore)
11739 v.AuxInt = int32ToAuxInt(off)
11740 v.Aux = symToAux(sym)
11741 v.AddArg3(ptr, x, mem)
11742 return true
11743 }
11744
11745
11746
11747 for {
11748 off1 := auxIntToInt32(v.AuxInt)
11749 sym := auxToSym(v.Aux)
11750 if v_0.Op != OpAMD64ADDQconst {
11751 break
11752 }
11753 off2 := auxIntToInt32(v_0.AuxInt)
11754 ptr := v_0.Args[0]
11755 val := v_1
11756 mem := v_2
11757 if !(is32Bit(int64(off1) + int64(off2))) {
11758 break
11759 }
11760 v.reset(OpAMD64MOVLstore)
11761 v.AuxInt = int32ToAuxInt(off1 + off2)
11762 v.Aux = symToAux(sym)
11763 v.AddArg3(ptr, val, mem)
11764 return true
11765 }
11766
11767
11768 for {
11769 off := auxIntToInt32(v.AuxInt)
11770 sym := auxToSym(v.Aux)
11771 ptr := v_0
11772 if v_1.Op != OpAMD64MOVLconst {
11773 break
11774 }
11775 c := auxIntToInt32(v_1.AuxInt)
11776 mem := v_2
11777 v.reset(OpAMD64MOVLstoreconst)
11778 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11779 v.Aux = symToAux(sym)
11780 v.AddArg2(ptr, mem)
11781 return true
11782 }
11783
11784
11785 for {
11786 off := auxIntToInt32(v.AuxInt)
11787 sym := auxToSym(v.Aux)
11788 ptr := v_0
11789 if v_1.Op != OpAMD64MOVQconst {
11790 break
11791 }
11792 c := auxIntToInt64(v_1.AuxInt)
11793 mem := v_2
11794 v.reset(OpAMD64MOVLstoreconst)
11795 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11796 v.Aux = symToAux(sym)
11797 v.AddArg2(ptr, mem)
11798 return true
11799 }
11800
11801
11802
11803 for {
11804 off1 := auxIntToInt32(v.AuxInt)
11805 sym1 := auxToSym(v.Aux)
11806 if v_0.Op != OpAMD64LEAQ {
11807 break
11808 }
11809 off2 := auxIntToInt32(v_0.AuxInt)
11810 sym2 := auxToSym(v_0.Aux)
11811 base := v_0.Args[0]
11812 val := v_1
11813 mem := v_2
11814 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11815 break
11816 }
11817 v.reset(OpAMD64MOVLstore)
11818 v.AuxInt = int32ToAuxInt(off1 + off2)
11819 v.Aux = symToAux(mergeSym(sym1, sym2))
11820 v.AddArg3(base, val, mem)
11821 return true
11822 }
11823
11824
11825
11826 for {
11827 i := auxIntToInt32(v.AuxInt)
11828 s := auxToSym(v.Aux)
11829 p := v_0
11830 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
11831 break
11832 }
11833 w := v_1.Args[0]
11834 x := v_2
11835 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
11836 break
11837 }
11838 mem := x.Args[2]
11839 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11840 break
11841 }
11842 v.reset(OpAMD64MOVQstore)
11843 v.AuxInt = int32ToAuxInt(i - 4)
11844 v.Aux = symToAux(s)
11845 v.AddArg3(p, w, mem)
11846 return true
11847 }
11848
11849
11850
11851 for {
11852 i := auxIntToInt32(v.AuxInt)
11853 s := auxToSym(v.Aux)
11854 p := v_0
11855 if v_1.Op != OpAMD64SHRQconst {
11856 break
11857 }
11858 j := auxIntToInt8(v_1.AuxInt)
11859 w := v_1.Args[0]
11860 x := v_2
11861 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
11862 break
11863 }
11864 mem := x.Args[2]
11865 if p != x.Args[0] {
11866 break
11867 }
11868 w0 := x.Args[1]
11869 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11870 break
11871 }
11872 v.reset(OpAMD64MOVQstore)
11873 v.AuxInt = int32ToAuxInt(i - 4)
11874 v.Aux = symToAux(s)
11875 v.AddArg3(p, w0, mem)
11876 return true
11877 }
11878
11879
11880
11881 for {
11882 i := auxIntToInt32(v.AuxInt)
11883 s := auxToSym(v.Aux)
11884 p1 := v_0
11885 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
11886 break
11887 }
11888 w := v_1.Args[0]
11889 x := v_2
11890 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11891 break
11892 }
11893 mem := x.Args[2]
11894 p0 := x.Args[0]
11895 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
11896 break
11897 }
11898 v.reset(OpAMD64MOVQstore)
11899 v.AuxInt = int32ToAuxInt(i)
11900 v.Aux = symToAux(s)
11901 v.AddArg3(p0, w, mem)
11902 return true
11903 }
11904
11905
11906
11907 for {
11908 i := auxIntToInt32(v.AuxInt)
11909 s := auxToSym(v.Aux)
11910 p1 := v_0
11911 if v_1.Op != OpAMD64SHRQconst {
11912 break
11913 }
11914 j := auxIntToInt8(v_1.AuxInt)
11915 w := v_1.Args[0]
11916 x := v_2
11917 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11918 break
11919 }
11920 mem := x.Args[2]
11921 p0 := x.Args[0]
11922 w0 := x.Args[1]
11923 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
11924 break
11925 }
11926 v.reset(OpAMD64MOVQstore)
11927 v.AuxInt = int32ToAuxInt(i)
11928 v.Aux = symToAux(s)
11929 v.AddArg3(p0, w0, mem)
11930 return true
11931 }
11932
11933
11934
11935 for {
11936 i := auxIntToInt32(v.AuxInt)
11937 s := auxToSym(v.Aux)
11938 p := v_0
11939 x1 := v_1
11940 if x1.Op != OpAMD64MOVLload {
11941 break
11942 }
11943 j := auxIntToInt32(x1.AuxInt)
11944 s2 := auxToSym(x1.Aux)
11945 mem := x1.Args[1]
11946 p2 := x1.Args[0]
11947 mem2 := v_2
11948 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
11949 break
11950 }
11951 _ = mem2.Args[2]
11952 if p != mem2.Args[0] {
11953 break
11954 }
11955 x2 := mem2.Args[1]
11956 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
11957 break
11958 }
11959 _ = x2.Args[1]
11960 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11961 break
11962 }
11963 v.reset(OpAMD64MOVQstore)
11964 v.AuxInt = int32ToAuxInt(i - 4)
11965 v.Aux = symToAux(s)
11966 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
11967 v0.AuxInt = int32ToAuxInt(j - 4)
11968 v0.Aux = symToAux(s2)
11969 v0.AddArg2(p2, mem)
11970 v.AddArg3(p, v0, mem)
11971 return true
11972 }
11973
11974
11975
11976 for {
11977 off := auxIntToInt32(v.AuxInt)
11978 sym := auxToSym(v.Aux)
11979 ptr := v_0
11980 y := v_1
11981 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11982 break
11983 }
11984 mem := y.Args[2]
11985 x := y.Args[0]
11986 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11987 break
11988 }
11989 v.reset(OpAMD64ADDLmodify)
11990 v.AuxInt = int32ToAuxInt(off)
11991 v.Aux = symToAux(sym)
11992 v.AddArg3(ptr, x, mem)
11993 return true
11994 }
11995
11996
11997
11998 for {
11999 off := auxIntToInt32(v.AuxInt)
12000 sym := auxToSym(v.Aux)
12001 ptr := v_0
12002 y := v_1
12003 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12004 break
12005 }
12006 mem := y.Args[2]
12007 x := y.Args[0]
12008 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12009 break
12010 }
12011 v.reset(OpAMD64ANDLmodify)
12012 v.AuxInt = int32ToAuxInt(off)
12013 v.Aux = symToAux(sym)
12014 v.AddArg3(ptr, x, mem)
12015 return true
12016 }
12017
12018
12019
12020 for {
12021 off := auxIntToInt32(v.AuxInt)
12022 sym := auxToSym(v.Aux)
12023 ptr := v_0
12024 y := v_1
12025 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12026 break
12027 }
12028 mem := y.Args[2]
12029 x := y.Args[0]
12030 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12031 break
12032 }
12033 v.reset(OpAMD64ORLmodify)
12034 v.AuxInt = int32ToAuxInt(off)
12035 v.Aux = symToAux(sym)
12036 v.AddArg3(ptr, x, mem)
12037 return true
12038 }
12039
12040
12041
12042 for {
12043 off := auxIntToInt32(v.AuxInt)
12044 sym := auxToSym(v.Aux)
12045 ptr := v_0
12046 y := v_1
12047 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12048 break
12049 }
12050 mem := y.Args[2]
12051 x := y.Args[0]
12052 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12053 break
12054 }
12055 v.reset(OpAMD64XORLmodify)
12056 v.AuxInt = int32ToAuxInt(off)
12057 v.Aux = symToAux(sym)
12058 v.AddArg3(ptr, x, mem)
12059 return true
12060 }
12061
12062
12063
12064 for {
12065 off := auxIntToInt32(v.AuxInt)
12066 sym := auxToSym(v.Aux)
12067 ptr := v_0
12068 y := v_1
12069 if y.Op != OpAMD64ADDL {
12070 break
12071 }
12072 _ = y.Args[1]
12073 y_0 := y.Args[0]
12074 y_1 := y.Args[1]
12075 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12076 l := y_0
12077 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12078 continue
12079 }
12080 mem := l.Args[1]
12081 if ptr != l.Args[0] {
12082 continue
12083 }
12084 x := y_1
12085 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12086 continue
12087 }
12088 v.reset(OpAMD64ADDLmodify)
12089 v.AuxInt = int32ToAuxInt(off)
12090 v.Aux = symToAux(sym)
12091 v.AddArg3(ptr, x, mem)
12092 return true
12093 }
12094 break
12095 }
12096
12097
12098
12099 for {
12100 off := auxIntToInt32(v.AuxInt)
12101 sym := auxToSym(v.Aux)
12102 ptr := v_0
12103 y := v_1
12104 if y.Op != OpAMD64SUBL {
12105 break
12106 }
12107 x := y.Args[1]
12108 l := y.Args[0]
12109 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12110 break
12111 }
12112 mem := l.Args[1]
12113 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12114 break
12115 }
12116 v.reset(OpAMD64SUBLmodify)
12117 v.AuxInt = int32ToAuxInt(off)
12118 v.Aux = symToAux(sym)
12119 v.AddArg3(ptr, x, mem)
12120 return true
12121 }
12122
12123
12124
12125 for {
12126 off := auxIntToInt32(v.AuxInt)
12127 sym := auxToSym(v.Aux)
12128 ptr := v_0
12129 y := v_1
12130 if y.Op != OpAMD64ANDL {
12131 break
12132 }
12133 _ = y.Args[1]
12134 y_0 := y.Args[0]
12135 y_1 := y.Args[1]
12136 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12137 l := y_0
12138 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12139 continue
12140 }
12141 mem := l.Args[1]
12142 if ptr != l.Args[0] {
12143 continue
12144 }
12145 x := y_1
12146 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12147 continue
12148 }
12149 v.reset(OpAMD64ANDLmodify)
12150 v.AuxInt = int32ToAuxInt(off)
12151 v.Aux = symToAux(sym)
12152 v.AddArg3(ptr, x, mem)
12153 return true
12154 }
12155 break
12156 }
12157
12158
12159
12160 for {
12161 off := auxIntToInt32(v.AuxInt)
12162 sym := auxToSym(v.Aux)
12163 ptr := v_0
12164 y := v_1
12165 if y.Op != OpAMD64ORL {
12166 break
12167 }
12168 _ = y.Args[1]
12169 y_0 := y.Args[0]
12170 y_1 := y.Args[1]
12171 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12172 l := y_0
12173 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12174 continue
12175 }
12176 mem := l.Args[1]
12177 if ptr != l.Args[0] {
12178 continue
12179 }
12180 x := y_1
12181 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12182 continue
12183 }
12184 v.reset(OpAMD64ORLmodify)
12185 v.AuxInt = int32ToAuxInt(off)
12186 v.Aux = symToAux(sym)
12187 v.AddArg3(ptr, x, mem)
12188 return true
12189 }
12190 break
12191 }
12192
12193
12194
12195 for {
12196 off := auxIntToInt32(v.AuxInt)
12197 sym := auxToSym(v.Aux)
12198 ptr := v_0
12199 y := v_1
12200 if y.Op != OpAMD64XORL {
12201 break
12202 }
12203 _ = y.Args[1]
12204 y_0 := y.Args[0]
12205 y_1 := y.Args[1]
12206 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12207 l := y_0
12208 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12209 continue
12210 }
12211 mem := l.Args[1]
12212 if ptr != l.Args[0] {
12213 continue
12214 }
12215 x := y_1
12216 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12217 continue
12218 }
12219 v.reset(OpAMD64XORLmodify)
12220 v.AuxInt = int32ToAuxInt(off)
12221 v.Aux = symToAux(sym)
12222 v.AddArg3(ptr, x, mem)
12223 return true
12224 }
12225 break
12226 }
12227
12228
12229
12230 for {
12231 off := auxIntToInt32(v.AuxInt)
12232 sym := auxToSym(v.Aux)
12233 ptr := v_0
12234 a := v_1
12235 if a.Op != OpAMD64ADDLconst {
12236 break
12237 }
12238 c := auxIntToInt32(a.AuxInt)
12239 l := a.Args[0]
12240 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12241 break
12242 }
12243 mem := l.Args[1]
12244 ptr2 := l.Args[0]
12245 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12246 break
12247 }
12248 v.reset(OpAMD64ADDLconstmodify)
12249 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12250 v.Aux = symToAux(sym)
12251 v.AddArg2(ptr, mem)
12252 return true
12253 }
12254
12255
12256
12257 for {
12258 off := auxIntToInt32(v.AuxInt)
12259 sym := auxToSym(v.Aux)
12260 ptr := v_0
12261 a := v_1
12262 if a.Op != OpAMD64ANDLconst {
12263 break
12264 }
12265 c := auxIntToInt32(a.AuxInt)
12266 l := a.Args[0]
12267 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12268 break
12269 }
12270 mem := l.Args[1]
12271 ptr2 := l.Args[0]
12272 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12273 break
12274 }
12275 v.reset(OpAMD64ANDLconstmodify)
12276 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12277 v.Aux = symToAux(sym)
12278 v.AddArg2(ptr, mem)
12279 return true
12280 }
12281
12282
12283
12284 for {
12285 off := auxIntToInt32(v.AuxInt)
12286 sym := auxToSym(v.Aux)
12287 ptr := v_0
12288 a := v_1
12289 if a.Op != OpAMD64ORLconst {
12290 break
12291 }
12292 c := auxIntToInt32(a.AuxInt)
12293 l := a.Args[0]
12294 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12295 break
12296 }
12297 mem := l.Args[1]
12298 ptr2 := l.Args[0]
12299 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12300 break
12301 }
12302 v.reset(OpAMD64ORLconstmodify)
12303 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12304 v.Aux = symToAux(sym)
12305 v.AddArg2(ptr, mem)
12306 return true
12307 }
12308
12309
12310
12311 for {
12312 off := auxIntToInt32(v.AuxInt)
12313 sym := auxToSym(v.Aux)
12314 ptr := v_0
12315 a := v_1
12316 if a.Op != OpAMD64XORLconst {
12317 break
12318 }
12319 c := auxIntToInt32(a.AuxInt)
12320 l := a.Args[0]
12321 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12322 break
12323 }
12324 mem := l.Args[1]
12325 ptr2 := l.Args[0]
12326 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12327 break
12328 }
12329 v.reset(OpAMD64XORLconstmodify)
12330 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12331 v.Aux = symToAux(sym)
12332 v.AddArg2(ptr, mem)
12333 return true
12334 }
12335
12336
12337 for {
12338 off := auxIntToInt32(v.AuxInt)
12339 sym := auxToSym(v.Aux)
12340 ptr := v_0
12341 if v_1.Op != OpAMD64MOVLf2i {
12342 break
12343 }
12344 val := v_1.Args[0]
12345 mem := v_2
12346 v.reset(OpAMD64MOVSSstore)
12347 v.AuxInt = int32ToAuxInt(off)
12348 v.Aux = symToAux(sym)
12349 v.AddArg3(ptr, val, mem)
12350 return true
12351 }
12352
12353
12354
12355 for {
12356 i := auxIntToInt32(v.AuxInt)
12357 s := auxToSym(v.Aux)
12358 p := v_0
12359 x := v_1
12360 if x.Op != OpAMD64BSWAPL {
12361 break
12362 }
12363 w := x.Args[0]
12364 mem := v_2
12365 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12366 break
12367 }
12368 v.reset(OpAMD64MOVBELstore)
12369 v.AuxInt = int32ToAuxInt(i)
12370 v.Aux = symToAux(s)
12371 v.AddArg3(p, w, mem)
12372 return true
12373 }
12374 return false
12375 }
12376 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
12377 v_1 := v.Args[1]
12378 v_0 := v.Args[0]
12379 b := v.Block
12380 typ := &b.Func.Config.Types
12381
12382
12383
12384 for {
12385 sc := auxIntToValAndOff(v.AuxInt)
12386 s := auxToSym(v.Aux)
12387 if v_0.Op != OpAMD64ADDQconst {
12388 break
12389 }
12390 off := auxIntToInt32(v_0.AuxInt)
12391 ptr := v_0.Args[0]
12392 mem := v_1
12393 if !(ValAndOff(sc).canAdd32(off)) {
12394 break
12395 }
12396 v.reset(OpAMD64MOVLstoreconst)
12397 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12398 v.Aux = symToAux(s)
12399 v.AddArg2(ptr, mem)
12400 return true
12401 }
12402
12403
12404
12405 for {
12406 sc := auxIntToValAndOff(v.AuxInt)
12407 sym1 := auxToSym(v.Aux)
12408 if v_0.Op != OpAMD64LEAQ {
12409 break
12410 }
12411 off := auxIntToInt32(v_0.AuxInt)
12412 sym2 := auxToSym(v_0.Aux)
12413 ptr := v_0.Args[0]
12414 mem := v_1
12415 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12416 break
12417 }
12418 v.reset(OpAMD64MOVLstoreconst)
12419 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12420 v.Aux = symToAux(mergeSym(sym1, sym2))
12421 v.AddArg2(ptr, mem)
12422 return true
12423 }
12424
12425
12426
12427 for {
12428 c := auxIntToValAndOff(v.AuxInt)
12429 s := auxToSym(v.Aux)
12430 p := v_0
12431 x := v_1
12432 if x.Op != OpAMD64MOVLstoreconst {
12433 break
12434 }
12435 a := auxIntToValAndOff(x.AuxInt)
12436 if auxToSym(x.Aux) != s {
12437 break
12438 }
12439 mem := x.Args[1]
12440 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
12441 break
12442 }
12443 v.reset(OpAMD64MOVQstore)
12444 v.AuxInt = int32ToAuxInt(a.Off())
12445 v.Aux = symToAux(s)
12446 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
12447 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
12448 v.AddArg3(p, v0, mem)
12449 return true
12450 }
12451
12452
12453
12454 for {
12455 a := auxIntToValAndOff(v.AuxInt)
12456 s := auxToSym(v.Aux)
12457 p := v_0
12458 x := v_1
12459 if x.Op != OpAMD64MOVLstoreconst {
12460 break
12461 }
12462 c := auxIntToValAndOff(x.AuxInt)
12463 if auxToSym(x.Aux) != s {
12464 break
12465 }
12466 mem := x.Args[1]
12467 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
12468 break
12469 }
12470 v.reset(OpAMD64MOVQstore)
12471 v.AuxInt = int32ToAuxInt(a.Off())
12472 v.Aux = symToAux(s)
12473 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
12474 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
12475 v.AddArg3(p, v0, mem)
12476 return true
12477 }
12478 return false
12479 }
12480 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
12481 v_1 := v.Args[1]
12482 v_0 := v.Args[0]
12483
12484
12485
12486 for {
12487 off1 := auxIntToInt32(v.AuxInt)
12488 sym := auxToSym(v.Aux)
12489 if v_0.Op != OpAMD64ADDQconst {
12490 break
12491 }
12492 off2 := auxIntToInt32(v_0.AuxInt)
12493 ptr := v_0.Args[0]
12494 mem := v_1
12495 if !(is32Bit(int64(off1) + int64(off2))) {
12496 break
12497 }
12498 v.reset(OpAMD64MOVOload)
12499 v.AuxInt = int32ToAuxInt(off1 + off2)
12500 v.Aux = symToAux(sym)
12501 v.AddArg2(ptr, mem)
12502 return true
12503 }
12504
12505
12506
12507 for {
12508 off1 := auxIntToInt32(v.AuxInt)
12509 sym1 := auxToSym(v.Aux)
12510 if v_0.Op != OpAMD64LEAQ {
12511 break
12512 }
12513 off2 := auxIntToInt32(v_0.AuxInt)
12514 sym2 := auxToSym(v_0.Aux)
12515 base := v_0.Args[0]
12516 mem := v_1
12517 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12518 break
12519 }
12520 v.reset(OpAMD64MOVOload)
12521 v.AuxInt = int32ToAuxInt(off1 + off2)
12522 v.Aux = symToAux(mergeSym(sym1, sym2))
12523 v.AddArg2(base, mem)
12524 return true
12525 }
12526 return false
12527 }
12528 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
12529 v_2 := v.Args[2]
12530 v_1 := v.Args[1]
12531 v_0 := v.Args[0]
12532 b := v.Block
12533 config := b.Func.Config
12534 typ := &b.Func.Config.Types
12535
12536
12537
12538 for {
12539 off1 := auxIntToInt32(v.AuxInt)
12540 sym := auxToSym(v.Aux)
12541 if v_0.Op != OpAMD64ADDQconst {
12542 break
12543 }
12544 off2 := auxIntToInt32(v_0.AuxInt)
12545 ptr := v_0.Args[0]
12546 val := v_1
12547 mem := v_2
12548 if !(is32Bit(int64(off1) + int64(off2))) {
12549 break
12550 }
12551 v.reset(OpAMD64MOVOstore)
12552 v.AuxInt = int32ToAuxInt(off1 + off2)
12553 v.Aux = symToAux(sym)
12554 v.AddArg3(ptr, val, mem)
12555 return true
12556 }
12557
12558
12559
12560 for {
12561 off1 := auxIntToInt32(v.AuxInt)
12562 sym1 := auxToSym(v.Aux)
12563 if v_0.Op != OpAMD64LEAQ {
12564 break
12565 }
12566 off2 := auxIntToInt32(v_0.AuxInt)
12567 sym2 := auxToSym(v_0.Aux)
12568 base := v_0.Args[0]
12569 val := v_1
12570 mem := v_2
12571 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12572 break
12573 }
12574 v.reset(OpAMD64MOVOstore)
12575 v.AuxInt = int32ToAuxInt(off1 + off2)
12576 v.Aux = symToAux(mergeSym(sym1, sym2))
12577 v.AddArg3(base, val, mem)
12578 return true
12579 }
12580
12581
12582
12583 for {
12584 dstOff := auxIntToInt32(v.AuxInt)
12585 dstSym := auxToSym(v.Aux)
12586 ptr := v_0
12587 if v_1.Op != OpAMD64MOVOload {
12588 break
12589 }
12590 srcOff := auxIntToInt32(v_1.AuxInt)
12591 srcSym := auxToSym(v_1.Aux)
12592 v_1_0 := v_1.Args[0]
12593 if v_1_0.Op != OpSB {
12594 break
12595 }
12596 mem := v_2
12597 if !(symIsRO(srcSym)) {
12598 break
12599 }
12600 v.reset(OpAMD64MOVQstore)
12601 v.AuxInt = int32ToAuxInt(dstOff + 8)
12602 v.Aux = symToAux(dstSym)
12603 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
12604 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
12605 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
12606 v1.AuxInt = int32ToAuxInt(dstOff)
12607 v1.Aux = symToAux(dstSym)
12608 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
12609 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
12610 v1.AddArg3(ptr, v2, mem)
12611 v.AddArg3(ptr, v0, v1)
12612 return true
12613 }
12614 return false
12615 }
12616 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
12617 v_1 := v.Args[1]
12618 v_0 := v.Args[0]
12619
12620
12621
12622 for {
12623 sc := auxIntToValAndOff(v.AuxInt)
12624 s := auxToSym(v.Aux)
12625 if v_0.Op != OpAMD64ADDQconst {
12626 break
12627 }
12628 off := auxIntToInt32(v_0.AuxInt)
12629 ptr := v_0.Args[0]
12630 mem := v_1
12631 if !(ValAndOff(sc).canAdd32(off)) {
12632 break
12633 }
12634 v.reset(OpAMD64MOVOstoreconst)
12635 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12636 v.Aux = symToAux(s)
12637 v.AddArg2(ptr, mem)
12638 return true
12639 }
12640
12641
12642
12643 for {
12644 sc := auxIntToValAndOff(v.AuxInt)
12645 sym1 := auxToSym(v.Aux)
12646 if v_0.Op != OpAMD64LEAQ {
12647 break
12648 }
12649 off := auxIntToInt32(v_0.AuxInt)
12650 sym2 := auxToSym(v_0.Aux)
12651 ptr := v_0.Args[0]
12652 mem := v_1
12653 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12654 break
12655 }
12656 v.reset(OpAMD64MOVOstoreconst)
12657 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12658 v.Aux = symToAux(mergeSym(sym1, sym2))
12659 v.AddArg2(ptr, mem)
12660 return true
12661 }
12662 return false
12663 }
12664 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
12665 v_1 := v.Args[1]
12666 v_0 := v.Args[0]
12667
12668
12669
12670 for {
12671 off1 := auxIntToInt32(v.AuxInt)
12672 sym := auxToSym(v.Aux)
12673 if v_0.Op != OpAMD64ADDQconst {
12674 break
12675 }
12676 off2 := auxIntToInt32(v_0.AuxInt)
12677 ptr := v_0.Args[0]
12678 mem := v_1
12679 if !(is32Bit(int64(off1) + int64(off2))) {
12680 break
12681 }
12682 v.reset(OpAMD64MOVQatomicload)
12683 v.AuxInt = int32ToAuxInt(off1 + off2)
12684 v.Aux = symToAux(sym)
12685 v.AddArg2(ptr, mem)
12686 return true
12687 }
12688
12689
12690
12691 for {
12692 off1 := auxIntToInt32(v.AuxInt)
12693 sym1 := auxToSym(v.Aux)
12694 if v_0.Op != OpAMD64LEAQ {
12695 break
12696 }
12697 off2 := auxIntToInt32(v_0.AuxInt)
12698 sym2 := auxToSym(v_0.Aux)
12699 ptr := v_0.Args[0]
12700 mem := v_1
12701 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12702 break
12703 }
12704 v.reset(OpAMD64MOVQatomicload)
12705 v.AuxInt = int32ToAuxInt(off1 + off2)
12706 v.Aux = symToAux(mergeSym(sym1, sym2))
12707 v.AddArg2(ptr, mem)
12708 return true
12709 }
12710 return false
12711 }
12712 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
12713 v_0 := v.Args[0]
12714 b := v.Block
12715
12716
12717
12718 for {
12719 t := v.Type
12720 if v_0.Op != OpArg {
12721 break
12722 }
12723 u := v_0.Type
12724 off := auxIntToInt32(v_0.AuxInt)
12725 sym := auxToSym(v_0.Aux)
12726 if !(t.Size() == u.Size()) {
12727 break
12728 }
12729 b = b.Func.Entry
12730 v0 := b.NewValue0(v.Pos, OpArg, t)
12731 v.copyOf(v0)
12732 v0.AuxInt = int32ToAuxInt(off)
12733 v0.Aux = symToAux(sym)
12734 return true
12735 }
12736 return false
12737 }
12738 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
12739 v_0 := v.Args[0]
12740 b := v.Block
12741
12742
12743
12744 for {
12745 t := v.Type
12746 if v_0.Op != OpArg {
12747 break
12748 }
12749 u := v_0.Type
12750 off := auxIntToInt32(v_0.AuxInt)
12751 sym := auxToSym(v_0.Aux)
12752 if !(t.Size() == u.Size()) {
12753 break
12754 }
12755 b = b.Func.Entry
12756 v0 := b.NewValue0(v.Pos, OpArg, t)
12757 v.copyOf(v0)
12758 v0.AuxInt = int32ToAuxInt(off)
12759 v0.Aux = symToAux(sym)
12760 return true
12761 }
12762 return false
12763 }
12764 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
12765 v_1 := v.Args[1]
12766 v_0 := v.Args[0]
12767 b := v.Block
12768 config := b.Func.Config
12769
12770
12771
12772 for {
12773 off := auxIntToInt32(v.AuxInt)
12774 sym := auxToSym(v.Aux)
12775 ptr := v_0
12776 if v_1.Op != OpAMD64MOVQstore {
12777 break
12778 }
12779 off2 := auxIntToInt32(v_1.AuxInt)
12780 sym2 := auxToSym(v_1.Aux)
12781 x := v_1.Args[1]
12782 ptr2 := v_1.Args[0]
12783 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12784 break
12785 }
12786 v.copyOf(x)
12787 return true
12788 }
12789
12790
12791
12792 for {
12793 off1 := auxIntToInt32(v.AuxInt)
12794 sym := auxToSym(v.Aux)
12795 if v_0.Op != OpAMD64ADDQconst {
12796 break
12797 }
12798 off2 := auxIntToInt32(v_0.AuxInt)
12799 ptr := v_0.Args[0]
12800 mem := v_1
12801 if !(is32Bit(int64(off1) + int64(off2))) {
12802 break
12803 }
12804 v.reset(OpAMD64MOVQload)
12805 v.AuxInt = int32ToAuxInt(off1 + off2)
12806 v.Aux = symToAux(sym)
12807 v.AddArg2(ptr, mem)
12808 return true
12809 }
12810
12811
12812
12813 for {
12814 off1 := auxIntToInt32(v.AuxInt)
12815 sym1 := auxToSym(v.Aux)
12816 if v_0.Op != OpAMD64LEAQ {
12817 break
12818 }
12819 off2 := auxIntToInt32(v_0.AuxInt)
12820 sym2 := auxToSym(v_0.Aux)
12821 base := v_0.Args[0]
12822 mem := v_1
12823 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12824 break
12825 }
12826 v.reset(OpAMD64MOVQload)
12827 v.AuxInt = int32ToAuxInt(off1 + off2)
12828 v.Aux = symToAux(mergeSym(sym1, sym2))
12829 v.AddArg2(base, mem)
12830 return true
12831 }
12832
12833
12834 for {
12835 off := auxIntToInt32(v.AuxInt)
12836 sym := auxToSym(v.Aux)
12837 ptr := v_0
12838 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12839 break
12840 }
12841 val := v_1.Args[1]
12842 if ptr != v_1.Args[0] {
12843 break
12844 }
12845 v.reset(OpAMD64MOVQf2i)
12846 v.AddArg(val)
12847 return true
12848 }
12849
12850
12851
12852 for {
12853 off := auxIntToInt32(v.AuxInt)
12854 sym := auxToSym(v.Aux)
12855 if v_0.Op != OpSB || !(symIsRO(sym)) {
12856 break
12857 }
12858 v.reset(OpAMD64MOVQconst)
12859 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12860 return true
12861 }
12862 return false
12863 }
12864 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
12865 v_2 := v.Args[2]
12866 v_1 := v.Args[1]
12867 v_0 := v.Args[0]
12868
12869
12870
12871 for {
12872 off1 := auxIntToInt32(v.AuxInt)
12873 sym := auxToSym(v.Aux)
12874 if v_0.Op != OpAMD64ADDQconst {
12875 break
12876 }
12877 off2 := auxIntToInt32(v_0.AuxInt)
12878 ptr := v_0.Args[0]
12879 val := v_1
12880 mem := v_2
12881 if !(is32Bit(int64(off1) + int64(off2))) {
12882 break
12883 }
12884 v.reset(OpAMD64MOVQstore)
12885 v.AuxInt = int32ToAuxInt(off1 + off2)
12886 v.Aux = symToAux(sym)
12887 v.AddArg3(ptr, val, mem)
12888 return true
12889 }
12890
12891
12892
12893 for {
12894 off := auxIntToInt32(v.AuxInt)
12895 sym := auxToSym(v.Aux)
12896 ptr := v_0
12897 if v_1.Op != OpAMD64MOVQconst {
12898 break
12899 }
12900 c := auxIntToInt64(v_1.AuxInt)
12901 mem := v_2
12902 if !(validVal(c)) {
12903 break
12904 }
12905 v.reset(OpAMD64MOVQstoreconst)
12906 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12907 v.Aux = symToAux(sym)
12908 v.AddArg2(ptr, mem)
12909 return true
12910 }
12911
12912
12913
12914 for {
12915 off1 := auxIntToInt32(v.AuxInt)
12916 sym1 := auxToSym(v.Aux)
12917 if v_0.Op != OpAMD64LEAQ {
12918 break
12919 }
12920 off2 := auxIntToInt32(v_0.AuxInt)
12921 sym2 := auxToSym(v_0.Aux)
12922 base := v_0.Args[0]
12923 val := v_1
12924 mem := v_2
12925 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12926 break
12927 }
12928 v.reset(OpAMD64MOVQstore)
12929 v.AuxInt = int32ToAuxInt(off1 + off2)
12930 v.Aux = symToAux(mergeSym(sym1, sym2))
12931 v.AddArg3(base, val, mem)
12932 return true
12933 }
12934
12935
12936
12937 for {
12938 off := auxIntToInt32(v.AuxInt)
12939 sym := auxToSym(v.Aux)
12940 ptr := v_0
12941 y := v_1
12942 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12943