| /* |
| * Long integer shift. This is different from the generic 32/64-bit |
| * binary operations because vAA/vBB are 64-bit but vCC (the shift |
| * distance) is 32-bit. Also, Dalvik requires us to mask off the low |
| * 6 bits of the shift distance. |
| */ |
| /* shl-long vAA, vBB, vCC */ |
| FETCH(a0, 1) # a0 <- CCBB |
| GET_OPA(t2) # t2 <- AA |
| and a3, a0, 255 # a3 <- BB |
| srl a0, a0, 8 # a0 <- CC |
| EAS2(a3, rFP, a3) # a3 <- &fp[BB] |
| GET_VREG(a2, a0) # a2 <- vCC |
| LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1 |
| |
| FETCH_ADVANCE_INST(2) # advance rPC, load rINST |
| GET_INST_OPCODE(t0) # extract opcode from rINST |
| |
| andi v1, a2, 0x20 # shift< shift & 0x20 |
| sll v0, a0, a2 # rlo<- alo << (shift&31) |
| bnez v1, .L${opcode}_finish |
| not v1, a2 # rhi<- 31-shift (shift is 5b) |
| srl a0, 1 |
| srl a0, v1 # alo<- alo >> (32-(shift&31)) |
| sll v1, a1, a2 # rhi<- ahi << (shift&31) |
| or v1, a0 # rhi<- rhi | alo |
| SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1 |
| %break |
| |
| .L${opcode}_finish: |
| SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi |