[*] Binary protection state of ntpd
Full RELRO Canary found NX disabled PIE enabled No RPATH No RUNPATH No Symbols
[*] Function mmap tear down of ntpd
; assembly | /* r2dec pseudo code output */
| /* /logs/firmware/unblob_extracted/firmware_extract/4325012-58052244.squashfs_v4_le_extract/usr/sbin/ntpd @ 0x12e40 */
| #include <stdint.h>
|
; (fcn) fcn.00012e40 () | void fcn_00012e40 () {
0x00012e40 lui gp, 2 |
0x00012e44 addiu gp, gp, -0xda0 |
0x00012e48 addu gp, gp, t9 | gp += t9;
0x00012e4c addiu sp, sp, -0x1c0 |
0x00012e50 addiu v0, sp, 0x12c | v0 = sp + aav.0x0000012c;
0x00012e54 sw s2, 0x1a0(sp) | *(var_1a0h) = s2;
0x00012e58 lw s2, -0x7b04(gp) | s2 = *((gp - 7873));
0x00012e5c sw gp, 0x18(sp) | *(var_18h) = gp;
0x00012e60 sw ra, 0x1bc(sp) | *(var_1bch) = ra;
0x00012e64 sw fp, 0x1b8(sp) | *(var_1b8h) = fp;
0x00012e68 sw s7, 0x1b4(sp) | *(var_1b4h) = s7;
0x00012e6c sw s6, 0x1b0(sp) | *(var_1b0h) = s6;
0x00012e70 sw s5, 0x1ac(sp) | *(var_1ach) = s5;
0x00012e74 sw s4, 0x1a8(sp) | *(var_1a8h) = s4;
0x00012e78 sw s3, 0x1a4(sp) | *(var_1a4h) = s3;
0x00012e7c sw s1, 0x19c(sp) | *(var_19ch) = s1;
0x00012e80 sw s0, 0x198(sp) | *(var_198h) = s0;
0x00012e84 move a0, v0 | a0 = v0;
0x00012e88 sw v0, 0x64(sp) | *(var_64h) = v0;
0x00012e8c lw v0, (s2) | v0 = *(s2);
0x00012e90 lw t9, -0x7b0c(gp) | t9 = sym.imp.getentropy;
0x00012e94 addiu a1, zero, 0x28 | a1 = 0x28;
0x00012e98 sw v0, 0x194(sp) | *(var_194h) = v0;
0x00012e9c jalr t9 | t9 ();
0x00012ea0 nop |
0x00012ea4 addiu v1, zero, -1 | v1 = -1;
0x00012ea8 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 == v1) {
0x00012eac beq v0, v1, 0x13778 | goto label_3;
| }
0x00012eb0 lw s3, -0x7fa8(gp) | s3 = *((gp - 8170));
0x00012eb4 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00012eb8 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
| if (v0 == 0) {
0x00012ebc beqz v0, 0x1379c | goto label_4;
| }
0x00012ec0 lw s4, -0x7fa8(gp) | s4 = *((gp - 8170));
| label_1:
0x00012ec4 addiu s0, zero, 0x400 | s0 = 0x400;
0x00012ec8 addiu fp, sp, 0x154 | fp = sp + 0x154;
0x00012ecc lw s1, -0x34f0(s4) | s1 = *((s4 - 3388));
0x00012ed0 addiu s7, s1, 0x40 | s7 = s1 + 0x40;
0x00012ed4 lw v0, (s1) | v0 = *(s1);
0x00012ed8 lw s5, 0x30(s1) | s5 = *((s1 + 12));
0x00012edc sw v0, 0x28(sp) | *(var_28h) = v0;
0x00012ee0 lw v0, 4(s1) | v0 = *((s1 + 1));
0x00012ee4 lw s6, 0x34(s1) | s6 = *((s1 + 13));
0x00012ee8 sw v0, 0x2c(sp) | *(var_2ch) = v0;
0x00012eec lw v0, 8(s1) | v0 = *((s1 + 2));
0x00012ef0 sw s5, 0x124(sp) | *(var_124h) = s5;
0x00012ef4 sw v0, 0x30(sp) | *(var_30h) = v0;
0x00012ef8 lw v0, 0xc(s1) | v0 = *((s1 + 3));
0x00012efc sw v0, 0x34(sp) | *(var_34h) = v0;
0x00012f00 lw v0, 0x10(s1) | v0 = *((s1 + 4));
0x00012f04 sw v0, 0x38(sp) | *(var_38h) = v0;
0x00012f08 lw v0, 0x14(s1) | v0 = *((s1 + 5));
0x00012f0c sw v0, 0x3c(sp) | *(var_3ch) = v0;
0x00012f10 lw v0, 0x18(s1) | v0 = *((s1 + 6));
0x00012f14 sw v0, 0x40(sp) | *(var_40h) = v0;
0x00012f18 lw v0, 0x1c(s1) | v0 = *((s1 + 7));
0x00012f1c sw v0, 0x44(sp) | *(var_44h) = v0;
0x00012f20 lw v0, 0x20(s1) | v0 = *((s1 + 8));
0x00012f24 sw v0, 0x48(sp) | *(var_48h) = v0;
0x00012f28 lw v0, 0x24(s1) | v0 = *((s1 + 9));
0x00012f2c sw v0, 0x4c(sp) | *(var_4ch) = v0;
0x00012f30 lw v0, 0x28(s1) | v0 = *((s1 + 10));
0x00012f34 sw v0, 0x50(sp) | *(var_50h) = v0;
0x00012f38 lw v0, 0x2c(s1) | v0 = *((s1 + 11));
0x00012f3c sw v0, 0x54(sp) | *(var_54h) = v0;
0x00012f40 lw v0, 0x38(s1) | v0 = *((s1 + 14));
0x00012f44 sw v0, 0x58(sp) | *(var_58h) = v0;
0x00012f48 lw v0, 0x3c(s1) | v0 = *((s1 + 15));
0x00012f4c sw v0, 0x5c(sp) | *(var_5ch) = v0;
0x00012f50 move v0, s4 | v0 = s4;
0x00012f54 sw s7, 0x60(sp) | *(var_60h) = s7;
0x00012f58 move s4, s0 | s4 = s0;
0x00012f5c move s0, s1 | s0 = s1;
0x00012f60 move s1, s7 | s1 = s7;
0x00012f64 move s7, v0 | s7 = v0;
| label_0:
0x00012f68 lw t8, 0x54(sp) | t8 = *(var_54h);
0x00012f6c lw t1, 0x5c(sp) | t1 = *(var_5ch);
0x00012f70 lw a3, 0x44(sp) | a3 = *(var_44h);
0x00012f74 lw t7, 0x34(sp) | t7 = *(var_34h);
0x00012f78 lw t9, 0x50(sp) | t9 = *(var_50h);
0x00012f7c lw t0, 0x58(sp) | t0 = *(var_58h);
0x00012f80 lw v1, 0x40(sp) | v1 = *(var_40h);
0x00012f84 lw t2, 0x30(sp) | t2 = *(var_30h);
0x00012f88 lw t5, 0x4c(sp) | t5 = *(var_4ch);
0x00012f8c lw a0, 0x3c(sp) | a0 = *(var_3ch);
0x00012f90 lw t3, 0x2c(sp) | t3 = *(var_2ch);
0x00012f94 lw t6, 0x48(sp) | t6 = *(var_48h);
0x00012f98 lw a1, 0x38(sp) | a1 = *(var_38h);
0x00012f9c lw t4, 0x28(sp) | t4 = *(var_28h);
0x00012fa0 move v0, s6 | v0 = s6;
0x00012fa4 move a2, s5 | a2 = s5;
0x00012fa8 addiu ra, zero, 0xa | ra = 0xa;
| do {
0x00012fac addu t4, t4, a1 | t4 += a1;
0x00012fb0 addu t3, t3, a0 | t3 += a0;
0x00012fb4 addu t2, t2, v1 | t2 += v1;
0x00012fb8 addu t7, t7, a3 | t7 += a3;
0x00012fbc xor a2, t4, a2 | a2 = t4 ^ a2;
0x00012fc0 xor v0, t3, v0 | v0 = t3 ^ v0;
0x00012fc4 xor t0, t2, t0 | t0 = t2 ^ t0;
0x00012fc8 xor t1, t1, t7 | t1 ^= t7;
0x00012fcc rotr a2, a2, 0x10 | __asm ("rotr a2, a2, 0x10");
0x00012fd0 rotr v0, v0, 0x10 | __asm ("rotr v0, v0, 0x10");
0x00012fd4 rotr t0, t0, 0x10 | __asm ("rotr t0, t0, 0x10");
0x00012fd8 rotr t1, t1, 0x10 | __asm ("rotr t1, t1, 0x10");
0x00012fdc addu t6, a2, t6 | t6 = a2 + t6;
0x00012fe0 addu t5, v0, t5 | t5 = v0 + t5;
0x00012fe4 addu t9, t9, t0 | t9 += t0;
0x00012fe8 addu t8, t8, t1 | t8 += t1;
0x00012fec xor a1, t6, a1 | a1 = t6 ^ a1;
0x00012ff0 xor a0, t5, a0 | a0 = t5 ^ a0;
0x00012ff4 xor v1, t9, v1 | v1 = t9 ^ v1;
0x00012ff8 xor a3, a3, t8 | a3 ^= t8;
0x00012ffc rotr a1, a1, 0x14 | __asm ("rotr a1, a1, 0x14");
0x00013000 rotr a0, a0, 0x14 | __asm ("rotr a0, a0, 0x14");
0x00013004 rotr v1, v1, 0x14 | __asm ("rotr v1, v1, 0x14");
0x00013008 rotr a3, a3, 0x14 | __asm ("rotr a3, a3, 0x14");
0x0001300c addu t4, t4, a1 | t4 += a1;
0x00013010 addu t3, t3, a0 | t3 += a0;
0x00013014 addu t2, t2, v1 | t2 += v1;
0x00013018 addu t7, t7, a3 | t7 += a3;
0x0001301c xor a2, a2, t4 | a2 ^= t4;
0x00013020 xor v0, v0, t3 | v0 ^= t3;
0x00013024 xor t0, t0, t2 | t0 ^= t2;
0x00013028 xor t1, t1, t7 | t1 ^= t7;
0x0001302c rotr a2, a2, 0x18 | __asm ("rotr a2, a2, 0x18");
0x00013030 rotr v0, v0, 0x18 | __asm ("rotr v0, v0, 0x18");
0x00013034 rotr t0, t0, 0x18 | __asm ("rotr t0, t0, 0x18");
0x00013038 rotr t1, t1, 0x18 | __asm ("rotr t1, t1, 0x18");
0x0001303c addu t6, t6, a2 | t6 += a2;
0x00013040 addu t5, t5, v0 | t5 += v0;
0x00013044 addu t9, t9, t0 | t9 += t0;
0x00013048 addu t8, t8, t1 | t8 += t1;
0x0001304c xor a1, a1, t6 | a1 ^= t6;
0x00013050 xor a0, a0, t5 | a0 ^= t5;
0x00013054 xor v1, v1, t9 | v1 ^= t9;
0x00013058 xor a3, a3, t8 | a3 ^= t8;
0x0001305c rotr a1, a1, 0x19 | __asm ("rotr a1, a1, 0x19");
0x00013060 rotr a0, a0, 0x19 | __asm ("rotr a0, a0, 0x19");
0x00013064 rotr v1, v1, 0x19 | __asm ("rotr v1, v1, 0x19");
0x00013068 rotr a3, a3, 0x19 | __asm ("rotr a3, a3, 0x19");
0x0001306c addu t4, t4, a0 | t4 += a0;
0x00013070 addu t3, t3, v1 | t3 += v1;
0x00013074 addu t2, t2, a3 | t2 += a3;
0x00013078 addu t7, a1, t7 | t7 = a1 + t7;
0x0001307c xor t1, t1, t4 | t1 ^= t4;
0x00013080 xor a2, a2, t3 | a2 ^= t3;
0x00013084 xor v0, v0, t2 | v0 ^= t2;
0x00013088 xor t0, t0, t7 | t0 ^= t7;
0x0001308c rotr t1, t1, 0x10 | __asm ("rotr t1, t1, 0x10");
0x00013090 rotr a2, a2, 0x10 | __asm ("rotr a2, a2, 0x10");
0x00013094 rotr v0, v0, 0x10 | __asm ("rotr v0, v0, 0x10");
0x00013098 rotr t0, t0, 0x10 | __asm ("rotr t0, t0, 0x10");
0x0001309c addu t9, t9, t1 | t9 += t1;
0x000130a0 addu t8, t8, a2 | t8 += a2;
0x000130a4 addu t6, t6, v0 | t6 += v0;
0x000130a8 addu t5, t5, t0 | t5 += t0;
0x000130ac xor a0, a0, t9 | a0 ^= t9;
0x000130b0 xor v1, v1, t8 | v1 ^= t8;
0x000130b4 xor a3, a3, t6 | a3 ^= t6;
0x000130b8 xor a1, a1, t5 | a1 ^= t5;
0x000130bc rotr a0, a0, 0x14 | __asm ("rotr a0, a0, 0x14");
0x000130c0 rotr v1, v1, 0x14 | __asm ("rotr v1, v1, 0x14");
0x000130c4 rotr a3, a3, 0x14 | __asm ("rotr a3, a3, 0x14");
0x000130c8 rotr a1, a1, 0x14 | __asm ("rotr a1, a1, 0x14");
0x000130cc addu t4, t4, a0 | t4 += a0;
0x000130d0 addu t3, t3, v1 | t3 += v1;
0x000130d4 addu t2, t2, a3 | t2 += a3;
0x000130d8 addu t7, t7, a1 | t7 += a1;
0x000130dc xor t1, t1, t4 | t1 ^= t4;
0x000130e0 xor a2, a2, t3 | a2 ^= t3;
0x000130e4 xor v0, v0, t2 | v0 ^= t2;
0x000130e8 xor t0, t0, t7 | t0 ^= t7;
0x000130ec rotr t1, t1, 0x18 | __asm ("rotr t1, t1, 0x18");
0x000130f0 rotr a2, a2, 0x18 | __asm ("rotr a2, a2, 0x18");
0x000130f4 rotr v0, v0, 0x18 | __asm ("rotr v0, v0, 0x18");
0x000130f8 rotr t0, t0, 0x18 | __asm ("rotr t0, t0, 0x18");
0x000130fc addu t9, t9, t1 | t9 += t1;
0x00013100 addu t8, t8, a2 | t8 += a2;
0x00013104 addu t6, t6, v0 | t6 += v0;
0x00013108 addu t5, t5, t0 | t5 += t0;
0x0001310c xor a0, a0, t9 | a0 ^= t9;
0x00013110 xor v1, v1, t8 | v1 ^= t8;
0x00013114 xor a3, a3, t6 | a3 ^= t6;
0x00013118 xor a1, a1, t5 | a1 ^= t5;
0x0001311c addiu ra, ra, -1 | ra += -1;
0x00013120 rotr a0, a0, 0x19 | __asm ("rotr a0, a0, 0x19");
0x00013124 rotr v1, v1, 0x19 | __asm ("rotr v1, v1, 0x19");
0x00013128 rotr a3, a3, 0x19 | __asm ("rotr a3, a3, 0x19");
0x0001312c rotr a1, a1, 0x19 | __asm ("rotr a1, a1, 0x19");
0x00013130 bnez ra, 0x12fac |
| } while (ra != 0);
0x00013134 lw ra, 0x28(sp) | ra = *(var_28h);
0x00013138 addu a2, a2, s5 | a2 += s5;
0x0001313c addu ra, ra, t4 | ra += t4;
0x00013140 lw t4, 0x2c(sp) | t4 = *(var_2ch);
0x00013144 addiu s5, s5, 1 | s5++;
0x00013148 addu t4, t4, t3 | t4 += t3;
0x0001314c lw t3, 0x30(sp) | t3 = *(var_30h);
0x00013150 addu t3, t3, t2 | t3 += t2;
0x00013154 lw t2, 0x34(sp) | t2 = *(var_34h);
0x00013158 addu t2, t2, t7 | t2 += t7;
0x0001315c sw t2, 0x24(sp) | *(var_24h) = t2;
0x00013160 lw t2, 0x38(sp) | t2 = *(var_38h);
0x00013164 addu t2, t2, a1 | t2 += a1;
0x00013168 lw a1, 0x3c(sp) | a1 = *(var_3ch);
0x0001316c addu a1, a1, a0 | a1 += a0;
0x00013170 lw a0, 0x40(sp) | a0 = *(var_40h);
0x00013174 addu a0, a0, v1 | a0 += v1;
0x00013178 lw v1, 0x44(sp) | v1 = *(var_44h);
0x0001317c addu a3, v1, a3 | a3 = v1 + a3;
0x00013180 lw v1, 0x48(sp) | v1 = *(var_48h);
0x00013184 addu t6, v1, t6 | t6 = v1 + t6;
0x00013188 lw v1, 0x4c(sp) | v1 = *(var_4ch);
0x0001318c addu t5, v1, t5 | t5 = v1 + t5;
0x00013190 lw v1, 0x50(sp) | v1 = *(var_50h);
0x00013194 addu t9, v1, t9 | t9 = v1 + t9;
0x00013198 lw v1, 0x54(sp) | v1 = *(var_54h);
0x0001319c addu t8, v1, t8 | t8 = v1 + t8;
0x000131a0 addu v1, v0, s6 | v1 = v0 + s6;
0x000131a4 lw v0, 0x58(sp) | v0 = *(var_58h);
0x000131a8 addu t0, v0, t0 | t0 = v0 + t0;
0x000131ac lw v0, 0x5c(sp) | v0 = *(var_5ch);
0x000131b0 addu v0, v0, t1 | v0 += t1;
| if (s5 == 0) {
0x000131b4 bnez s5, 0x131bc |
0x000131b8 addiu s6, s6, 1 | s6++;
| }
0x000131bc srl t1, ra, 8 | t1 = ra >> 8;
0x000131c0 sw t1, 0x68(sp) | *(var_68h) = t1;
0x000131c4 srl t1, ra, 0x10 | t1 = ra >> 0x10;
0x000131c8 sw t1, 0x6c(sp) | *(var_6ch) = t1;
0x000131cc srl t1, ra, 0x18 | t1 = ra >> 0x18;
0x000131d0 sw t1, 0x70(sp) | *(var_70h) = t1;
0x000131d4 srl t1, t4, 8 | t1 = t4 >> 8;
0x000131d8 sw t1, 0x74(sp) | *(var_74h) = t1;
0x000131dc srl t1, t4, 0x10 | t1 = t4 >> 0x10;
0x000131e0 sw t1, 0x78(sp) | *(var_78h) = t1;
0x000131e4 srl t1, t4, 0x18 | t1 = t4 >> 0x18;
0x000131e8 sw t1, 0x7c(sp) | *(var_7ch) = t1;
0x000131ec srl t1, t3, 8 | t1 = t3 >> 8;
0x000131f0 lw t7, 0x24(sp) | t7 = *(var_24h);
0x000131f4 sw t1, 0x80(sp) | *(var_80h) = t1;
0x000131f8 srl t1, t3, 0x10 | t1 = t3 >> 0x10;
0x000131fc sw t1, 0x84(sp) | *(var_84h) = t1;
0x00013200 srl t1, t3, 0x18 | t1 = t3 >> 0x18;
0x00013204 sw t1, 0x88(sp) | *(var_88h) = t1;
0x00013208 srl t1, t7, 8 | t1 = t7 >> 8;
0x0001320c sw t1, 0x8c(sp) | *(var_8ch) = t1;
0x00013210 srl t1, t7, 0x10 | t1 = t7 >> 0x10;
0x00013214 sw t1, 0x90(sp) | *(var_90h) = t1;
0x00013218 srl t1, t7, 0x18 | t1 = t7 >> 0x18;
0x0001321c srl t7, a1, 0x18 | t7 = a1 >> 0x18;
0x00013220 sw t7, 0xa8(sp) | *(var_a8h) = t7;
0x00013224 srl t7, a0, 8 | t7 = a0 >> 8;
0x00013228 sw t7, 0xac(sp) | *(var_ach) = t7;
0x0001322c srl t7, a0, 0x10 | t7 = a0 >> 0x10;
0x00013230 sw t7, 0xb0(sp) | *(var_b0h) = t7;
0x00013234 srl t7, a0, 0x18 | t7 = a0 >> 0x18;
0x00013238 sw t7, 0xb4(sp) | *(var_b4h) = t7;
0x0001323c srl t7, a3, 8 | t7 = a3 >> 8;
0x00013240 sw t7, 0xb8(sp) | *(var_b8h) = t7;
0x00013244 srl t7, a3, 0x10 | t7 = a3 >> 0x10;
0x00013248 sw t7, 0xbc(sp) | *(var_bch) = t7;
0x0001324c srl t7, a3, 0x18 | t7 = a3 >> 0x18;
0x00013250 sw t7, 0xc0(sp) | *(var_c0h) = t7;
0x00013254 srl t7, t6, 8 | t7 = t6 >> 8;
0x00013258 sw t7, 0xc4(sp) | *(var_c4h) = t7;
0x0001325c srl t7, t6, 0x10 | t7 = t6 >> 0x10;
0x00013260 sw t7, 0xc8(sp) | *(var_c8h) = t7;
0x00013264 srl t7, t6, 0x18 | t7 = t6 >> 0x18;
0x00013268 sw t7, 0xcc(sp) | *(var_cch) = t7;
0x0001326c srl t7, t5, 8 | t7 = t5 >> 8;
0x00013270 sw t7, 0xd0(sp) | *(var_d0h) = t7;
0x00013274 srl t7, t5, 0x10 | t7 = t5 >> 0x10;
0x00013278 sw t1, 0x94(sp) | *(var_94h) = t1;
0x0001327c sw t7, 0xd4(sp) | *(var_d4h) = t7;
0x00013280 srl t1, t2, 8 | t1 = t2 >> 8;
0x00013284 srl t7, t5, 0x18 | t7 = t5 >> 0x18;
0x00013288 sw t1, 0x98(sp) | *(var_98h) = t1;
0x0001328c sw t7, 0xd8(sp) | *(var_d8h) = t7;
0x00013290 srl t1, t2, 0x10 | t1 = t2 >> 0x10;
0x00013294 srl t7, t9, 8 | t7 = t9 >> 8;
0x00013298 sw t1, 0x9c(sp) | *(var_9ch) = t1;
0x0001329c sw t7, 0xdc(sp) | *(var_dch) = t7;
0x000132a0 srl t1, t2, 0x18 | t1 = t2 >> 0x18;
0x000132a4 srl t7, t9, 0x10 | t7 = t9 >> 0x10;
0x000132a8 sw t1, 0xa0(sp) | *(var_a0h) = t1;
0x000132ac sw t7, 0xe0(sp) | *(var_e0h) = t7;
0x000132b0 srl t1, a1, 8 | t1 = a1 >> 8;
0x000132b4 srl t7, t9, 0x18 | t7 = t9 >> 0x18;
0x000132b8 sw t1, 0xa4(sp) | *(var_a4h) = t1;
0x000132bc sw t7, 0xe4(sp) | *(var_e4h) = t7;
0x000132c0 srl t7, t8, 8 | t7 = t8 >> 8;
0x000132c4 sw t7, 0xe8(sp) | *(var_e8h) = t7;
0x000132c8 srl t7, t8, 0x10 | t7 = t8 >> 0x10;
0x000132cc sw t7, 0xec(sp) | *(var_ech) = t7;
0x000132d0 srl t7, t8, 0x18 | t7 = t8 >> 0x18;
0x000132d4 sw t7, 0xf0(sp) | *(var_f0h) = t7;
0x000132d8 srl t7, a2, 8 | t7 = a2 >> 8;
0x000132dc sw t7, 0xf4(sp) | *(var_f4h) = t7;
0x000132e0 srl t7, a2, 0x10 | t7 = a2 >> 0x10;
0x000132e4 sw t7, 0xf8(sp) | *(var_f8h) = t7;
0x000132e8 srl t7, a2, 0x18 | t7 = a2 >> 0x18;
0x000132ec sw t7, 0xfc(sp) | *(var_fch) = t7;
0x000132f0 srl t7, v1, 8 | t7 = v1 >> 8;
0x000132f4 sw t7, 0x100(sp) | *(var_100h) = t7;
0x000132f8 srl t7, v1, 0x10 | t7 = v1 >> 0x10;
0x000132fc sw t7, 0x104(sp) | *(var_104h) = t7;
0x00013300 srl t7, v1, 0x18 | t7 = v1 >> 0x18;
0x00013304 sw t7, 0x108(sp) | *(var_108h) = t7;
0x00013308 sb ra, (s1) | *(s1) = ra;
0x0001330c srl t7, t0, 8 | t7 = t0 >> 8;
0x00013310 lw ra, 0x68(sp) | ra = *(var_68h);
0x00013314 sb t4, 4(s1) | *((s1 + 4)) = t4;
0x00013318 lw t4, 0x74(sp) | t4 = *(var_74h);
0x0001331c sw t7, 0x10c(sp) | *(var_10ch) = t7;
0x00013320 srl t7, t0, 0x10 | t7 = t0 >> 0x10;
0x00013324 sw t7, 0x110(sp) | *(var_110h) = t7;
0x00013328 sb ra, 1(s1) | *((s1 + 1)) = ra;
0x0001332c srl t7, t0, 0x18 | t7 = t0 >> 0x18;
0x00013330 lw ra, 0x6c(sp) | ra = *(var_6ch);
0x00013334 sb t4, 5(s1) | *((s1 + 5)) = t4;
0x00013338 lw t4, 0x78(sp) | t4 = *(var_78h);
0x0001333c sw t7, 0x114(sp) | *(var_114h) = t7;
0x00013340 srl t7, v0, 8 | t7 = v0 >> 8;
0x00013344 sw t7, 0x118(sp) | *(var_118h) = t7;
0x00013348 sb ra, 2(s1) | *((s1 + 2)) = ra;
0x0001334c srl t7, v0, 0x10 | t7 = v0 >> 0x10;
0x00013350 lw ra, 0x70(sp) | ra = *(var_70h);
0x00013354 sb t4, 6(s1) | *((s1 + 6)) = t4;
0x00013358 sb t3, 8(s1) | *((s1 + 8)) = t3;
0x0001335c lw t4, 0x7c(sp) | t4 = *(var_7ch);
0x00013360 lw t3, 0x80(sp) | t3 = *(var_80h);
0x00013364 sw t7, 0x11c(sp) | *(var_11ch) = t7;
0x00013368 srl t7, v0, 0x18 | t7 = v0 >> 0x18;
0x0001336c sw t7, 0x120(sp) | *(var_120h) = t7;
0x00013370 sb ra, 3(s1) | *((s1 + 3)) = ra;
0x00013374 sb t4, 7(s1) | *((s1 + 7)) = t4;
0x00013378 sb t3, 9(s1) | *((s1 + 9)) = t3;
0x0001337c lw t3, 0x84(sp) | t3 = *(var_84h);
0x00013380 lw t7, 0x24(sp) | t7 = *(var_24h);
0x00013384 sb t3, 0xa(s1) | *((s1 + 10)) = t3;
0x00013388 sb t7, 0xc(s1) | *((s1 + 12)) = t7;
0x0001338c lw t3, 0x88(sp) | t3 = *(var_88h);
0x00013390 lw t7, 0xa8(sp) | t7 = *(var_a8h);
0x00013394 sb t3, 0xb(s1) | *((s1 + 11)) = t3;
0x00013398 sb t2, 0x10(s1) | *((s1 + 16)) = t2;
0x0001339c lw t3, 0x8c(sp) | t3 = *(var_8ch);
0x000133a0 lw t2, 0x98(sp) | t2 = *(var_98h);
0x000133a4 sb t7, 0x17(s1) | *((s1 + 23)) = t7;
0x000133a8 lw t7, 0xac(sp) | t7 = *(var_ach);
0x000133ac sb t3, 0xd(s1) | *((s1 + 13)) = t3;
0x000133b0 sb t2, 0x11(s1) | *((s1 + 17)) = t2;
0x000133b4 lw t3, 0x90(sp) | t3 = *(var_90h);
0x000133b8 lw t2, 0x9c(sp) | t2 = *(var_9ch);
0x000133bc sb t7, 0x19(s1) | *((s1 + 25)) = t7;
0x000133c0 lw t7, 0xb0(sp) | t7 = *(var_b0h);
0x000133c4 srl t1, a1, 0x10 | t1 = a1 >> 0x10;
0x000133c8 sb t3, 0xe(s1) | *((s1 + 14)) = t3;
0x000133cc sb t2, 0x12(s1) | *((s1 + 18)) = t2;
0x000133d0 lw t3, 0x94(sp) | t3 = *(var_94h);
0x000133d4 lw t2, 0xa0(sp) | t2 = *(var_a0h);
0x000133d8 sb a1, 0x14(s1) | *((s1 + 20)) = a1;
0x000133dc sb t7, 0x1a(s1) | *((s1 + 26)) = t7;
0x000133e0 lw a1, 0xa4(sp) | a1 = *(var_a4h);
0x000133e4 lw t7, 0xb4(sp) | t7 = *(var_b4h);
0x000133e8 sb t3, 0xf(s1) | *((s1 + 15)) = t3;
0x000133ec sb t7, 0x1b(s1) | *((s1 + 27)) = t7;
0x000133f0 sb t2, 0x13(s1) | *((s1 + 19)) = t2;
0x000133f4 sb a1, 0x15(s1) | *((s1 + 21)) = a1;
0x000133f8 sb t1, 0x16(s1) | *((s1 + 22)) = t1;
0x000133fc sb a0, 0x18(s1) | *((s1 + 24)) = a0;
0x00013400 sb a3, 0x1c(s1) | *((s1 + 28)) = a3;
0x00013404 lw t7, 0xb8(sp) | t7 = *(var_b8h);
0x00013408 sb t6, 0x20(s1) | *((s1 + 32)) = t6;
0x0001340c sb t7, 0x1d(s1) | *((s1 + 29)) = t7;
0x00013410 lw t7, 0xbc(sp) | t7 = *(var_bch);
0x00013414 sb t5, 0x24(s1) | *((s1 + 36)) = t5;
0x00013418 sb t7, 0x1e(s1) | *((s1 + 30)) = t7;
0x0001341c lw t7, 0xc0(sp) | t7 = *(var_c0h);
0x00013420 sb t9, 0x28(s1) | *((s1 + 40)) = t9;
0x00013424 sb t7, 0x1f(s1) | *((s1 + 31)) = t7;
0x00013428 lw t7, 0xc4(sp) | t7 = *(var_c4h);
0x0001342c sb t8, 0x2c(s1) | *((s1 + 44)) = t8;
0x00013430 sb t7, 0x21(s1) | *((s1 + 33)) = t7;
0x00013434 lw t7, 0xc8(sp) | t7 = *(var_c8h);
0x00013438 sb t7, 0x22(s1) | *((s1 + 34)) = t7;
0x0001343c lw t7, 0xcc(sp) | t7 = *(var_cch);
0x00013440 sb t7, 0x23(s1) | *((s1 + 35)) = t7;
0x00013444 lw t7, 0xd0(sp) | t7 = *(var_d0h);
0x00013448 sb t7, 0x25(s1) | *((s1 + 37)) = t7;
0x0001344c lw t7, 0xd4(sp) | t7 = *(var_d4h);
0x00013450 sb t7, 0x26(s1) | *((s1 + 38)) = t7;
0x00013454 lw t7, 0xd8(sp) | t7 = *(var_d8h);
0x00013458 sb t7, 0x27(s1) | *((s1 + 39)) = t7;
0x0001345c lw t7, 0xdc(sp) | t7 = *(var_dch);
0x00013460 sb t7, 0x29(s1) | *((s1 + 41)) = t7;
0x00013464 lw t7, 0xe0(sp) | t7 = *(var_e0h);
0x00013468 sb t7, 0x2a(s1) | *((s1 + 42)) = t7;
0x0001346c lw t7, 0xe4(sp) | t7 = *(var_e4h);
0x00013470 sb t7, 0x2b(s1) | *((s1 + 43)) = t7;
0x00013474 lw t7, 0xe8(sp) | t7 = *(var_e8h);
0x00013478 sb t7, 0x2d(s1) | *((s1 + 45)) = t7;
0x0001347c lw t7, 0xec(sp) | t7 = *(var_ech);
0x00013480 sb t7, 0x2e(s1) | *((s1 + 46)) = t7;
0x00013484 lw t7, 0xf0(sp) | t7 = *(var_f0h);
0x00013488 sb v0, 0x3c(s1) | *((s1 + 60)) = v0;
0x0001348c sb t7, 0x2f(s1) | *((s1 + 47)) = t7;
0x00013490 lw t7, 0xf4(sp) | t7 = *(var_f4h);
0x00013494 addiu v0, zero, 0x40 | v0 = 0x40;
0x00013498 sb t7, 0x31(s1) | *((s1 + 49)) = t7;
0x0001349c lw t7, 0xf8(sp) | t7 = *(var_f8h);
0x000134a0 sb a2, 0x30(s1) | *((s1 + 48)) = a2;
0x000134a4 sb t7, 0x32(s1) | *((s1 + 50)) = t7;
0x000134a8 lw t7, 0xfc(sp) | t7 = *(var_fch);
0x000134ac sb v1, 0x34(s1) | *((s1 + 52)) = v1;
0x000134b0 sb t7, 0x33(s1) | *((s1 + 51)) = t7;
0x000134b4 lw t7, 0x100(sp) | t7 = *(var_100h);
0x000134b8 sb t0, 0x38(s1) | *((s1 + 56)) = t0;
0x000134bc sb t7, 0x35(s1) | *((s1 + 53)) = t7;
0x000134c0 lw t7, 0x104(sp) | t7 = *(var_104h);
0x000134c4 sb t7, 0x36(s1) | *((s1 + 54)) = t7;
0x000134c8 lw t7, 0x108(sp) | t7 = *(var_108h);
0x000134cc sb t7, 0x37(s1) | *((s1 + 55)) = t7;
0x000134d0 lw t7, 0x10c(sp) | t7 = *(var_10ch);
0x000134d4 sb t7, 0x39(s1) | *((s1 + 57)) = t7;
0x000134d8 lw t7, 0x110(sp) | t7 = *(var_110h);
0x000134dc sb t7, 0x3a(s1) | *((s1 + 58)) = t7;
0x000134e0 lw t7, 0x114(sp) | t7 = *(var_114h);
0x000134e4 sb t7, 0x3b(s1) | *((s1 + 59)) = t7;
0x000134e8 lw t7, 0x118(sp) | t7 = *(var_118h);
0x000134ec sb t7, 0x3d(s1) | *((s1 + 61)) = t7;
0x000134f0 lw t7, 0x11c(sp) | t7 = *(var_11ch);
0x000134f4 sb t7, 0x3e(s1) | *((s1 + 62)) = t7;
0x000134f8 lw t7, 0x120(sp) | t7 = *(var_120h);
0x000134fc sb t7, 0x3f(s1) | *((s1 + 63)) = t7;
| if (s4 == v0) {
0x00013500 beq s4, v0, 0x13544 | goto label_5;
| }
0x00013504 addiu s4, s4, -0x40 | s4 += -0x40;
0x00013508 sltiu v0, s4, 0x40 | v0 = (s4 < 0x40) ? 1 : 0;
0x0001350c addiu s1, s1, 0x40 | s1 += 0x40;
| if (v0 == 0) {
0x00013510 beqz v0, 0x12f68 | goto label_0;
| }
0x00013514 move v1, fp | v1 = fp;
0x00013518 move v0, zero | v0 = 0;
| do {
0x0001351c lw a0, 0x60(sp) | a0 = *(var_60h);
0x00013520 addiu v1, v1, 1 | v1++;
0x00013524 lbux a1, v0(a0) | __asm ("lbux a1, v0(a0)");
0x00013528 addiu v0, v0, 1 | v0++;
0x0001352c sltu a0, v0, s4 | a0 = (v0 < s4) ? 1 : 0;
0x00013530 sb a1, -1(v1) | *((v1 - 1)) = a1;
0x00013534 bnez a0, 0x1351c |
| } while (a0 != 0);
0x00013538 sw fp, 0x60(sp) | *(var_60h) = fp;
0x0001353c move s1, fp | s1 = fp;
0x00013540 b 0x12f68 | goto label_0;
| label_5:
0x00013544 lw v0, 0x124(sp) | v0 = *(var_124h);
0x00013548 move s1, s0 | s1 = s0;
0x0001354c addiu v0, v0, 0x10 | v0 += 0x10;
0x00013550 lw s0, -0x34f0(s7) | s0 = *((s7 - 3388));
0x00013554 sw v0, 0x30(s1) | *((s1 + 12)) = v0;
0x00013558 lw v0, 0x64(sp) | v0 = *(var_64h);
0x0001355c move s4, s7 | s4 = s7;
0x00013560 sw s6, 0x34(s1) | *((s1 + 13)) = s6;
0x00013564 addiu v1, s0, 0x40 | v1 = s0 + 0x40;
| do {
0x00013568 lbu a0, (v1) | a0 = *(v1);
0x0001356c lbu a1, (v0) | a1 = *(v0);
0x00013570 addiu v0, v0, 1 | v0++;
0x00013574 xor a0, a0, a1 | a0 ^= a1;
0x00013578 sb a0, (v1) | *(v1) = a0;
0x0001357c addiu v1, v1, 1 | v1++;
0x00013580 bne fp, v0, 0x13568 |
| } while (fp != v0);
0x00013584 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00013588 move v0, s0 | v0 = s0;
0x0001358c beqz v0, 0x136f0 |
| while (1) {
0x00013590 lwl v1, 0x43(s0) | __asm ("lwl v1, 0x43(s0)");
0x00013594 lw t9, -0x7be4(gp) | t9 = sym.imp.memset;
0x00013598 addiu a2, zero, 0x28 | a2 = 0x28;
0x0001359c lwr v1, 0x40(s0) | __asm ("lwr v1, 0x40(s0)");
0x000135a0 move a1, zero | a1 = 0;
0x000135a4 sw v1, 0x10(v0) | *((v0 + 4)) = v1;
0x000135a8 lwl v1, 0x47(s0) | __asm ("lwl v1, 0x47(s0)");
0x000135ac lwr v1, 0x44(s0) | __asm ("lwr v1, 0x44(s0)");
0x000135b0 sw v1, 0x14(v0) | *((v0 + 5)) = v1;
0x000135b4 lwl v1, 0x4b(s0) | __asm ("lwl v1, 0x4b(s0)");
0x000135b8 lwr v1, 0x48(s0) | __asm ("lwr v1, 0x48(s0)");
0x000135bc sw v1, 0x18(v0) | *((v0 + 6)) = v1;
0x000135c0 lwl v1, 0x4f(s0) | __asm ("lwl v1, 0x4f(s0)");
0x000135c4 lwr v1, 0x4c(s0) | __asm ("lwr v1, 0x4c(s0)");
0x000135c8 sw v1, 0x1c(v0) | *((v0 + 7)) = v1;
0x000135cc lwl v1, 0x53(s0) | __asm ("lwl v1, 0x53(s0)");
0x000135d0 lwr v1, 0x50(s0) | __asm ("lwr v1, 0x50(s0)");
0x000135d4 sw v1, 0x20(v0) | *((v0 + 8)) = v1;
0x000135d8 lwl v1, 0x57(s0) | __asm ("lwl v1, 0x57(s0)");
0x000135dc lwr v1, 0x54(s0) | __asm ("lwr v1, 0x54(s0)");
0x000135e0 sw v1, 0x24(v0) | *((v0 + 9)) = v1;
0x000135e4 lwl v1, 0x5b(s0) | __asm ("lwl v1, 0x5b(s0)");
0x000135e8 lwr v1, 0x58(s0) | __asm ("lwr v1, 0x58(s0)");
0x000135ec sw v1, 0x28(v0) | *((v0 + 10)) = v1;
0x000135f0 lwl a0, 0x5f(s0) | __asm ("lwl a0, 0x5f(s0)");
0x000135f4 lui v1, 0x6170 | v1 = 0x61707865;
0x000135f8 addiu v1, v1, 0x7865 |
0x000135fc lwr a0, 0x5c(s0) | __asm ("lwr a0, 0x5c(s0)");
0x00013600 sw v1, (v0) | *(v0) = v1;
0x00013604 sw a0, 0x2c(v0) | *((v0 + 11)) = a0;
0x00013608 lui a0, 0x3320 | a0 = 0x3320646e;
0x0001360c addiu a0, a0, 0x646e |
0x00013610 sw a0, 4(v0) | *((v0 + 1)) = a0;
0x00013614 lui a0, 0x7962 | a0 = 0x79620000;
0x00013618 lw v1, -0x34f0(s4) | v1 = *((s4 - 3388));
0x0001361c addiu a0, a0, 0x2d32 | a0 += 0x2d32;
0x00013620 sw a0, 8(v0) | *((v0 + 2)) = a0;
0x00013624 lui a0, 0x6b20 | a0 = 0x6b206574;
0x00013628 addiu a0, a0, 0x6574 |
0x0001362c sw a0, 0xc(v0) | *((v0 + 3)) = a0;
0x00013630 sw zero, 0x30(v1) | *((v1 + 12)) = 0;
0x00013634 sw zero, 0x34(v1) | *((v1 + 13)) = 0;
0x00013638 lwl v0, 0x63(s0) | __asm ("lwl v0, 0x63(s0)");
0x0001363c addiu a0, v1, 0x40 | a0 = v1 + 0x40;
0x00013640 lwr v0, 0x60(s0) | __asm ("lwr v0, 0x60(s0)");
0x00013644 sw v0, 0x38(v1) | *((v1 + 14)) = v0;
0x00013648 lwl v0, 0x67(s0) | __asm ("lwl v0, 0x67(s0)");
0x0001364c lwr v0, 0x64(s0) | __asm ("lwr v0, 0x64(s0)");
0x00013650 sw v0, 0x3c(v1) | *((v1 + 15)) = v0;
0x00013654 jalr t9 | t9 ();
0x00013658 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x0001365c addiu v1, zero, 0x3d8 | v1 = aav.0x000003d8;
0x00013660 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013664 sw v1, (v0) | *(v0) = v1;
| label_2:
0x00013668 lw t9, -0x7c38(gp) | t9 = sym.imp.__explicit_bzero_chk;
0x0001366c lw a0, 0x64(sp) | a0 = *(var_64h);
0x00013670 addiu a2, zero, 0x28 | a2 = 0x28;
0x00013674 addiu a1, zero, 0x28 | a1 = 0x28;
0x00013678 jalr t9 | t9 ();
0x0001367c lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00013680 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013684 move a1, zero | a1 = 0;
0x00013688 sw zero, (v0) | *(v0) = 0;
0x0001368c lw a0, -0x34f0(s4) | a0 = *((s4 - 3388));
0x00013690 lw t9, -0x7be4(gp) | t9 = sym.imp.memset;
0x00013694 addiu a0, a0, 0x40 | a0 += 0x40;
0x00013698 addiu a2, zero, 0x400 | a2 = 0x400;
0x0001369c jalr t9 | t9 ();
0x000136a0 lw a1, -0x34ec(s3) | a1 = *((s3 - 3387));
0x000136a4 lw a0, 0x194(sp) | a0 = *(var_194h);
0x000136a8 lui v0, 0x18 | v0 = 0x180000;
0x000136ac lw v1, (s2) | v1 = *(s2);
0x000136b0 addiu v0, v0, 0x6a00 | v0 += 0x6a00;
0x000136b4 lw gp, 0x18(sp) | gp = *(var_18h);
0x000136b8 sw v0, 4(a1) | *((a1 + 1)) = v0;
| if (a0 != v1) {
0x000136bc bne a0, v1, 0x138d4 | goto label_6;
| }
0x000136c0 lw ra, 0x1bc(sp) | ra = *(var_1bch);
0x000136c4 lw fp, 0x1b8(sp) | fp = *(var_1b8h);
0x000136c8 lw s7, 0x1b4(sp) | s7 = *(var_1b4h);
0x000136cc lw s6, 0x1b0(sp) | s6 = *(var_1b0h);
0x000136d0 lw s5, 0x1ac(sp) | s5 = *(var_1ach);
0x000136d4 lw s4, 0x1a8(sp) | s4 = *(var_1a8h);
0x000136d8 lw s3, 0x1a4(sp) | s3 = *(var_1a4h);
0x000136dc lw s2, 0x1a0(sp) | s2 = *(var_1a0h);
0x000136e0 lw s1, 0x19c(sp) | s1 = *(var_19ch);
0x000136e4 lw s0, 0x198(sp) | s0 = *(var_198h);
0x000136e8 addiu sp, sp, 0x1c0 |
0x000136ec jr ra | return v0;
0x000136f0 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x000136f4 addiu s1, zero, -1 | s1 = -1;
0x000136f8 sw zero, 0x14(sp) | *(var_14h) = 0;
0x000136fc sw s1, 0x10(sp) | *(var_10h) = s1;
0x00013700 addiu a3, zero, 0x802 | a3 = 0x802;
0x00013704 addiu a2, zero, 3 | a2 = 3;
0x00013708 addiu a1, zero, 8 | a1 = 8;
0x0001370c move a0, zero | a0 = 0;
0x00013710 jalr t9 | t9 ();
0x00013714 sw v0, -0x34ec(s3) | *((s3 - 3387)) = v0;
0x00013718 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 == s1) {
0x0001371c beq v0, s1, 0x138c8 | goto label_7;
| }
0x00013720 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x00013724 sw zero, 0x14(sp) | *(var_14h) = 0;
0x00013728 sw s1, 0x10(sp) | *(var_10h) = s1;
0x0001372c addiu a3, zero, 0x802 | a3 = 0x802;
0x00013730 addiu a2, zero, 3 | a2 = 3;
0x00013734 addiu a1, zero, 0x440 | a1 = 0x440;
0x00013738 move a0, zero | a0 = 0;
0x0001373c jalr t9 | t9 ();
0x00013740 sw v0, -0x34f0(s4) | *((s4 - 3388)) = v0;
0x00013744 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 == s1) {
0x00013748 beq v0, s1, 0x138b0 | goto label_8;
| }
0x0001374c lw v0, -0x7eb8(gp) | v0 = *((gp - 8110));
0x00013750 lw a2, -0x7f74(gp) | a2 = *(gp);
0x00013754 lw t9, -0x7b78(gp) | t9 = sym.imp.__register_atfork;
0x00013758 lw a3, (v0) | a3 = *(v0);
0x0001375c addiu a2, a2, 0x2e20 | a2 += 0x2e20;
0x00013760 move a1, zero | a1 = 0;
0x00013764 move a0, zero | a0 = 0;
0x00013768 jalr t9 | t9 ();
0x0001376c lw gp, 0x18(sp) | gp = *(var_18h);
0x00013770 lw v0, -0x34f0(s4) | v0 = *((s4 - 3388));
0x00013774 b 0x13590 |
| }
| label_3:
0x00013778 lw t9, -0x7ab0(gp) | t9 = sym.imp.raise;
0x0001377c addiu a0, zero, 9 | a0 = 9;
0x00013780 jalr t9 | t9 ();
0x00013784 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013788 lw s3, -0x7fa8(gp) | s3 = *((gp - 8170));
0x0001378c lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00013790 lw s4, -0x7fa8(gp) | s4 = *((gp - 8170));
| if (v0 != 0) {
0x00013794 bnez v0, 0x12ec4 | goto label_1;
| }
0x00013798 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
| label_4:
0x0001379c addiu s0, zero, -1 | s0 = -1;
0x000137a0 sw zero, 0x14(sp) | *(var_14h) = 0;
0x000137a4 sw s0, 0x10(sp) | *(var_10h) = s0;
0x000137a8 addiu a3, zero, 0x802 | a3 = 0x802;
0x000137ac addiu a2, zero, 3 | a2 = 3;
0x000137b0 addiu a1, zero, 8 | a1 = 8;
0x000137b4 move a0, zero | a0 = 0;
0x000137b8 jalr t9 | t9 ();
0x000137bc sw v0, -0x34ec(s3) | *((s3 - 3387)) = v0;
0x000137c0 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 != s0) {
0x000137c4 beq v0, s0, 0x138c8 |
0x000137c8 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x000137cc lw s4, -0x7fa8(gp) | s4 = *((gp - 8170));
0x000137d0 sw zero, 0x14(sp) | *(var_14h) = 0;
0x000137d4 sw s0, 0x10(sp) | *(var_10h) = s0;
0x000137d8 addiu a3, zero, 0x802 | a3 = 0x802;
0x000137dc addiu a2, zero, 3 | a2 = 3;
0x000137e0 addiu a1, zero, 0x440 | a1 = 0x440;
0x000137e4 move a0, zero | a0 = 0;
0x000137e8 jalr t9 | t9 ();
0x000137ec sw v0, -0x34f0(s4) | *((s4 - 3388)) = v0;
0x000137f0 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 != s0) {
0x000137f4 beq v0, s0, 0x138b0 |
0x000137f8 lw v0, -0x7eb8(gp) | v0 = *((gp - 8110));
0x000137fc lw a2, -0x7f74(gp) | a2 = *(gp);
0x00013800 lw t9, -0x7b78(gp) | t9 = sym.imp.__register_atfork;
0x00013804 lw a3, (v0) | a3 = *(v0);
0x00013808 addiu a2, a2, 0x2e20 | a2 += 0x2e20;
0x0001380c move a1, zero | a1 = 0;
0x00013810 move a0, zero | a0 = 0;
0x00013814 jalr t9 | t9 ();
0x00013818 lw v0, -0x34f0(s4) | v0 = *((s4 - 3388));
0x0001381c lw v1, 0x12c(sp) | v1 = *(var_12ch);
0x00013820 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013824 sw v1, 0x10(v0) | *((v0 + 4)) = v1;
0x00013828 lw v1, 0x130(sp) | v1 = *(var_130h);
0x0001382c lw a3, 0x13c(sp) | a3 = *(var_13ch);
0x00013830 sw v1, 0x14(v0) | *((v0 + 5)) = v1;
0x00013834 lw v1, 0x134(sp) | v1 = *(var_134h);
0x00013838 lw a2, 0x140(sp) | a2 = *(var_140h);
0x0001383c sw v1, 0x18(v0) | *((v0 + 6)) = v1;
0x00013840 lw v1, 0x138(sp) | v1 = *(var_138h);
0x00013844 lw a1, 0x144(sp) | a1 = *(var_144h);
0x00013848 sw v1, 0x1c(v0) | *((v0 + 7)) = v1;
0x0001384c lui v1, 0x6170 | v1 = 0x61707865;
0x00013850 addiu v1, v1, 0x7865 |
0x00013854 lw a0, 0x148(sp) | a0 = *(var_148h);
0x00013858 sw v1, (v0) | *(v0) = v1;
0x0001385c lui v1, 0x3320 | v1 = 0x3320646e;
0x00013860 addiu v1, v1, 0x646e |
0x00013864 sw v1, 4(v0) | *((v0 + 1)) = v1;
0x00013868 lui v1, 0x7962 | v1 = 0x79622d32;
0x0001386c addiu v1, v1, 0x2d32 |
0x00013870 sw v1, 8(v0) | *((v0 + 2)) = v1;
0x00013874 lui v1, 0x6b20 | v1 = 0x6b206574;
0x00013878 addiu v1, v1, 0x6574 |
0x0001387c sw v1, 0xc(v0) | *((v0 + 3)) = v1;
0x00013880 sw a3, 0x20(v0) | *((v0 + 8)) = a3;
0x00013884 lw v1, -0x34f0(s4) | v1 = *((s4 - 3388));
0x00013888 sw a2, 0x24(v0) | *((v0 + 9)) = a2;
0x0001388c sw a1, 0x28(v0) | *((v0 + 10)) = a1;
0x00013890 sw a0, 0x2c(v0) | *((v0 + 11)) = a0;
0x00013894 lw v0, 0x14c(sp) | v0 = *(var_14ch);
0x00013898 sw zero, 0x30(v1) | *((v1 + 12)) = 0;
0x0001389c sw v0, 0x38(v1) | *((v1 + 14)) = v0;
0x000138a0 lw v0, 0x150(sp) | v0 = *(var_150h);
0x000138a4 sw zero, 0x34(v1) | *((v1 + 13)) = 0;
0x000138a8 sw v0, 0x3c(v1) | *((v1 + 15)) = v0;
0x000138ac b 0x13668 | goto label_2;
| }
| label_8:
0x000138b0 lw t9, -0x7c18(gp) | t9 = sym.imp.munmap;
0x000138b4 lw a0, -0x34ec(s3) | a0 = *((s3 - 3387));
0x000138b8 addiu a1, zero, 8 | a1 = 8;
0x000138bc jalr t9 | t9 ();
0x000138c0 lw gp, 0x18(sp) | gp = *(var_18h);
0x000138c4 sw zero, -0x34ec(s3) | *((s3 - 3387)) = 0;
| }
| label_7:
0x000138c8 lw t9, -0x7c80(gp) | t9 = sym.imp.abort;
0x000138cc jalr t9 | t9 ();
0x000138d0 nop |
| label_6:
0x000138d4 lw t9, -0x7b2c(gp) | t9 = sym.imp.__stack_chk_fail;
0x000138d8 jalr t9 | t9 ();
0x000138dc nop |
| }
; assembly | /* r2dec pseudo code output */
| /* /logs/firmware/unblob_extracted/firmware_extract/4325012-58052244.squashfs_v4_le_extract/usr/sbin/ntpd @ 0x138e0 */
| #include <stdint.h>
|
; (fcn) sym.arc4random () | void arc4random () {
0x000138e0 lui gp, 2 |
0x000138e4 addiu gp, gp, -0x1840 |
0x000138e8 addu gp, gp, t9 | gp += t9;
0x000138ec addiu sp, sp, -0x198 |
0x000138f0 lw v0, -0x7b04(gp) | v0 = *((gp - 7873));
0x000138f4 sw gp, 0x18(sp) | *(var_18h) = gp;
0x000138f8 sw ra, 0x194(sp) | *(var_194h) = ra;
0x000138fc sw s4, 0x180(sp) | *(var_180h) = s4;
0x00013900 sw fp, 0x190(sp) | *(var_190h) = fp;
0x00013904 sw s7, 0x18c(sp) | *(var_18ch) = s7;
0x00013908 sw s6, 0x188(sp) | *(var_188h) = s6;
0x0001390c sw s5, 0x184(sp) | *(var_184h) = s5;
0x00013910 sw s3, 0x17c(sp) | *(var_17ch) = s3;
0x00013914 sw s2, 0x178(sp) | *(var_178h) = s2;
0x00013918 sw s1, 0x174(sp) | *(var_174h) = s1;
0x0001391c sw s0, 0x170(sp) | *(var_170h) = s0;
0x00013920 lw s4, -0x7fa8(gp) | s4 = *((gp - 8170));
0x00013924 sw v0, 0x120(sp) | *(var_120h) = v0;
0x00013928 lw v0, (v0) | v0 = *(v0);
0x0001392c lw t9, -0x7b40(gp) | t9 = sym.imp.pthread_mutex_lock;
0x00013930 addiu a0, s4, -0x3508 | a0 = s4 + -0x3508;
0x00013934 sw v0, 0x16c(sp) | *(var_16ch) = v0;
0x00013938 jalr t9 | t9 ();
0x0001393c nop |
0x00013940 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013944 lw t9, -0x7b7c(gp) | t9 = sym.imp.getpid;
0x00013948 jalr t9 | t9 ();
0x0001394c nop |
0x00013950 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013954 lw v1, -0x7fa8(gp) | v1 = *((gp - 8170));
0x00013958 lw a0, -0x3510(v1) | a0 = *((v1 - 3396));
0x0001395c sltiu a1, a0, 2 | a1 = (a0 < 2) ? 1 : 0;
0x00013960 lw a0, -0x7fa8(gp) | a0 = *((gp - 8170));
| if (a1 != 0) {
0x00013964 bnel a1, zero, 0x13970 |
0x00013968 lw a0, -0x7fa8(gp) | a0 = *((gp - 8170));
| if (v0 == a0) {
0x0001396c beq v0, a0, 0x14138 | goto label_12;
| }
| }
0x00013970 lw s3, -0x7fa8(gp) | s3 = *((gp - 8170));
| label_1:
0x00013974 sw v0, -0x3510(v1) | *((v1 - 3396)) = v0;
0x00013978 sw zero, -0x350c(a0) | *((a0 - 3395)) = 0;
0x0001397c lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00013980 lw t9, -0x7f74(gp) | t9 = *(gp);
| if (v0 != 0) {
0x00013984 beqz v0, 0x139c8 |
0x00013988 sb zero, (v0) | *(v0) = 0;
0x0001398c sb zero, 1(v0) | *((v0 + 1)) = 0;
0x00013990 sb zero, 2(v0) | *((v0 + 2)) = 0;
0x00013994 sb zero, 3(v0) | *((v0 + 3)) = 0;
0x00013998 sb zero, 4(v0) | *((v0 + 4)) = 0;
0x0001399c sb zero, 5(v0) | *((v0 + 5)) = 0;
0x000139a0 sb zero, 6(v0) | *((v0 + 6)) = 0;
0x000139a4 sb zero, 7(v0) | *((v0 + 7)) = 0;
0x000139a8 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
| label_2:
0x000139ac lw t9, -0x7f74(gp) | t9 = *(gp);
| if (v0 == 0) {
0x000139b0 beqz v0, 0x139c8 | goto label_13;
| }
0x000139b4 lw v1, 4(v0) | v1 = *((v0 + 1));
0x000139b8 sltiu a0, v1, 5 | a0 = (v1 < 5) ? 1 : 0;
0x000139bc lw s5, -0x7fa8(gp) | s5 = *((gp - 8170));
| if (a0 == 0) {
0x000139c0 beqz a0, 0x13a9c | goto label_14;
| }
0x000139c4 lw t9, -0x7f74(gp) | t9 = *(gp);
| }
| /* fcn.00012e40 */
| label_13:
0x000139c8 addiu t9, t9, 0x2e40 | t9 += 0x2e40;
0x000139cc bal 0x12e40 | fcn_00012e40 ();
0x000139d0 nop |
0x000139d4 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x000139d8 lw v1, 4(v0) | v1 = *((v0 + 1));
0x000139dc sltiu a0, v1, 5 | a0 = (v1 < 5) ? 1 : 0;
0x000139e0 lw gp, 0x18(sp) | gp = *(var_18h);
| if (a0 == 0) {
0x000139e4 beqz a0, 0x13a98 | goto label_15;
| }
0x000139e8 lw s5, -0x7fa8(gp) | s5 = *((gp - 8170));
0x000139ec lw a0, (v0) | a0 = *(v0);
0x000139f0 move v1, zero | v1 = 0;
0x000139f4 lw s1, -0x34f0(s5) | s1 = *((s5 - 3388));
0x000139f8 sw v1, 4(v0) | *((v0 + 1)) = v1;
0x000139fc sltiu v0, a0, 4 | v0 = (a0 < 4) ? 1 : 0;
0x00013a00 addiu s0, s1, 0x40 | s0 = s1 + 0x40;
| if (v0 != 0) {
0x00013a04 bnez v0, 0x13ab8 | goto label_16;
| }
| do {
| label_3:
0x00013a08 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00013a0c lw t9, -0x7aa8(gp) | t9 = sym.imp.pthread_mutex_unlock;
0x00013a10 addiu a0, s4, -0x3508 | a0 = s4 + -0x3508;
0x00013a14 lw v0, (v0) | v0 = *(v0);
0x00013a18 subu v0, s0, v0 | __asm ("subu v0, s0, v0");
0x00013a1c addiu v0, v0, 0x400 | v0 += 0x400;
0x00013a20 lwl v1, 3(v0) | __asm ("lwl v1, 3(v0)");
0x00013a24 lwr v1, (v0) | __asm ("lwr v1, (v0)");
0x00013a28 sb zero, (v0) | *(v0) = 0;
0x00013a2c sb zero, 1(v0) | *((v0 + 1)) = 0;
0x00013a30 sb zero, 2(v0) | *((v0 + 2)) = 0;
0x00013a34 sb zero, 3(v0) | *((v0 + 3)) = 0;
0x00013a38 lw a1, -0x34ec(s3) | a1 = *((s3 - 3387));
0x00013a3c sw v1, 0x128(sp) | *(var_128h) = v1;
0x00013a40 lw v0, (a1) | v0 = *(a1);
0x00013a44 addiu v0, v0, -4 | v0 += -4;
0x00013a48 sw v0, (a1) | *(a1) = v0;
0x00013a4c jalr t9 | t9 ();
0x00013a50 lw v0, 0x120(sp) | v0 = *(var_120h);
0x00013a54 lw a0, 0x16c(sp) | a0 = *(var_16ch);
0x00013a58 lw gp, 0x18(sp) | gp = *(var_18h);
0x00013a5c lw v1, (v0) | v1 = *(v0);
0x00013a60 lw v0, 0x128(sp) | v0 = *(var_128h);
| if (a0 != v1) {
0x00013a64 bne a0, v1, 0x14308 | goto label_17;
| }
0x00013a68 lw ra, 0x194(sp) | ra = *(var_194h);
0x00013a6c lw fp, 0x190(sp) | fp = *(var_190h);
0x00013a70 lw s7, 0x18c(sp) | s7 = *(var_18ch);
0x00013a74 lw s6, 0x188(sp) | s6 = *(var_188h);
0x00013a78 lw s5, 0x184(sp) | s5 = *(var_184h);
0x00013a7c lw s4, 0x180(sp) | s4 = *(var_180h);
0x00013a80 lw s3, 0x17c(sp) | s3 = *(var_17ch);
0x00013a84 lw s2, 0x178(sp) | s2 = *(var_178h);
0x00013a88 lw s1, 0x174(sp) | s1 = *(var_174h);
0x00013a8c lw s0, 0x170(sp) | s0 = *(var_170h);
0x00013a90 addiu sp, sp, 0x198 |
0x00013a94 jr ra | return v1;
| label_15:
0x00013a98 lw s5, -0x7fa8(gp) | s5 = *((gp - 8170));
| label_14:
0x00013a9c lw a0, (v0) | a0 = *(v0);
0x00013aa0 addiu v1, v1, -4 | v1 += -4;
0x00013aa4 lw s1, -0x34f0(s5) | s1 = *((s5 - 3388));
0x00013aa8 sw v1, 4(v0) | *((v0 + 1)) = v1;
0x00013aac sltiu v0, a0, 4 | v0 = (a0 < 4) ? 1 : 0;
0x00013ab0 addiu s0, s1, 0x40 | s0 = s1 + 0x40;
0x00013ab4 beqz v0, 0x13a08 |
| } while (v0 == 0);
| label_16:
0x00013ab8 lw v0, (s1) | v0 = *(s1);
0x00013abc lw s2, 0x30(s1) | s2 = *((s1 + 12));
0x00013ac0 sw v0, 0x28(sp) | *(var_28h_2) = v0;
0x00013ac4 lw v0, 4(s1) | v0 = *((s1 + 1));
0x00013ac8 lw s6, 0x34(s1) | s6 = *((s1 + 13));
0x00013acc sw v0, 0x2c(sp) | *(var_2ch_2) = v0;
0x00013ad0 lw v0, 8(s1) | v0 = *((s1 + 2));
0x00013ad4 addiu s7, sp, 0x12c | s7 = sp + aav.0x0000012c;
0x00013ad8 sw v0, 0x30(sp) | *(var_30h_2) = v0;
0x00013adc lw v0, 0xc(s1) | v0 = *((s1 + 3));
0x00013ae0 addiu fp, zero, 0x400 | fp = 0x400;
0x00013ae4 sw v0, 0x34(sp) | *(var_34h_2) = v0;
0x00013ae8 lw v0, 0x10(s1) | v0 = *((s1 + 4));
0x00013aec sw s2, 0x124(sp) | *(var_124h_2) = s2;
0x00013af0 sw v0, 0x38(sp) | *(var_38h_2) = v0;
0x00013af4 lw v0, 0x14(s1) | v0 = *((s1 + 5));
0x00013af8 sw s0, 0x60(sp) | *(var_60h_2) = s0;
0x00013afc sw v0, 0x3c(sp) | *(var_3ch_2) = v0;
0x00013b00 lw v0, 0x18(s1) | v0 = *((s1 + 6));
0x00013b04 sw v0, 0x40(sp) | *(var_40h_2) = v0;
0x00013b08 lw v0, 0x1c(s1) | v0 = *((s1 + 7));
0x00013b0c sw v0, 0x44(sp) | *(var_44h_2) = v0;
0x00013b10 lw v0, 0x20(s1) | v0 = *((s1 + 8));
0x00013b14 sw v0, 0x48(sp) | *(var_48h_2) = v0;
0x00013b18 lw v0, 0x24(s1) | v0 = *((s1 + 9));
0x00013b1c sw v0, 0x4c(sp) | *(var_4ch_2) = v0;
0x00013b20 lw v0, 0x28(s1) | v0 = *((s1 + 10));
0x00013b24 sw v0, 0x50(sp) | *(var_50h_2) = v0;
0x00013b28 lw v0, 0x2c(s1) | v0 = *((s1 + 11));
0x00013b2c sw v0, 0x54(sp) | *(var_54h_2) = v0;
0x00013b30 lw v0, 0x38(s1) | v0 = *((s1 + 14));
0x00013b34 sw v0, 0x58(sp) | *(var_58h_2) = v0;
0x00013b38 lw v0, 0x3c(s1) | v0 = *((s1 + 15));
0x00013b3c sw v0, 0x5c(sp) | *(var_5ch_2) = v0;
0x00013b40 move v0, s7 | v0 = s7;
0x00013b44 move s7, s3 | s7 = s3;
0x00013b48 move s3, s6 | s3 = s6;
0x00013b4c move s6, s1 | s6 = s1;
0x00013b50 move s1, s2 | s1 = s2;
0x00013b54 move s2, fp | s2 = fp;
0x00013b58 move fp, v0 | fp = v0;
| label_0:
0x00013b5c lw t8, 0x54(sp) | t8 = *(var_54h_2);
0x00013b60 lw t1, 0x5c(sp) | t1 = *(var_5ch_2);
0x00013b64 lw a3, 0x44(sp) | a3 = *(var_44h_2);
0x00013b68 lw t7, 0x34(sp) | t7 = *(var_34h_2);
0x00013b6c lw t9, 0x50(sp) | t9 = *(var_50h_2);
0x00013b70 lw t0, 0x58(sp) | t0 = *(var_58h_2);
0x00013b74 lw v1, 0x40(sp) | v1 = *(var_40h_2);
0x00013b78 lw t2, 0x30(sp) | t2 = *(var_30h_2);
0x00013b7c lw t5, 0x4c(sp) | t5 = *(var_4ch_2);
0x00013b80 lw a0, 0x3c(sp) | a0 = *(var_3ch_2);
0x00013b84 lw t3, 0x2c(sp) | t3 = *(var_2ch_2);
0x00013b88 lw t6, 0x48(sp) | t6 = *(var_48h_2);
0x00013b8c lw a1, 0x38(sp) | a1 = *(var_38h_2);
0x00013b90 lw t4, 0x28(sp) | t4 = *(var_28h_2);
0x00013b94 move v0, s3 | v0 = s3;
0x00013b98 move a2, s1 | a2 = s1;
0x00013b9c addiu ra, zero, 0xa | ra = 0xa;
| do {
0x00013ba0 addu t4, t4, a1 | t4 += a1;
0x00013ba4 addu t3, t3, a0 | t3 += a0;
0x00013ba8 addu t2, t2, v1 | t2 += v1;
0x00013bac addu t7, a3, t7 | t7 = a3 + t7;
0x00013bb0 xor a2, t4, a2 | a2 = t4 ^ a2;
0x00013bb4 xor v0, t3, v0 | v0 = t3 ^ v0;
0x00013bb8 xor t0, t0, t2 | t0 ^= t2;
0x00013bbc xor t1, t1, t7 | t1 ^= t7;
0x00013bc0 rotr a2, a2, 0x10 | __asm ("rotr a2, a2, 0x10");
0x00013bc4 rotr v0, v0, 0x10 | __asm ("rotr v0, v0, 0x10");
0x00013bc8 rotr t0, t0, 0x10 | __asm ("rotr t0, t0, 0x10");
0x00013bcc rotr t1, t1, 0x10 | __asm ("rotr t1, t1, 0x10");
0x00013bd0 addu t6, a2, t6 | t6 = a2 + t6;
0x00013bd4 addu t5, v0, t5 | t5 = v0 + t5;
0x00013bd8 addu t9, t0, t9 | t9 = t0 + t9;
0x00013bdc addu t8, t8, t1 | t8 += t1;
0x00013be0 xor a1, t6, a1 | a1 = t6 ^ a1;
0x00013be4 xor a0, t5, a0 | a0 = t5 ^ a0;
0x00013be8 xor v1, t9, v1 | v1 = t9 ^ v1;
0x00013bec xor a3, a3, t8 | a3 ^= t8;
0x00013bf0 rotr a1, a1, 0x14 | __asm ("rotr a1, a1, 0x14");
0x00013bf4 rotr a0, a0, 0x14 | __asm ("rotr a0, a0, 0x14");
0x00013bf8 rotr v1, v1, 0x14 | __asm ("rotr v1, v1, 0x14");
0x00013bfc rotr a3, a3, 0x14 | __asm ("rotr a3, a3, 0x14");
0x00013c00 addu t4, t4, a1 | t4 += a1;
0x00013c04 addu t3, t3, a0 | t3 += a0;
0x00013c08 addu t2, t2, v1 | t2 += v1;
0x00013c0c addu t7, t7, a3 | t7 += a3;
0x00013c10 xor a2, a2, t4 | a2 ^= t4;
0x00013c14 xor v0, v0, t3 | v0 ^= t3;
0x00013c18 xor t0, t0, t2 | t0 ^= t2;
0x00013c1c xor t1, t1, t7 | t1 ^= t7;
0x00013c20 rotr a2, a2, 0x18 | __asm ("rotr a2, a2, 0x18");
0x00013c24 rotr v0, v0, 0x18 | __asm ("rotr v0, v0, 0x18");
0x00013c28 rotr t0, t0, 0x18 | __asm ("rotr t0, t0, 0x18");
0x00013c2c rotr t1, t1, 0x18 | __asm ("rotr t1, t1, 0x18");
0x00013c30 addu t6, t6, a2 | t6 += a2;
0x00013c34 addu t5, t5, v0 | t5 += v0;
0x00013c38 addu t9, t9, t0 | t9 += t0;
0x00013c3c addu t8, t8, t1 | t8 += t1;
0x00013c40 xor a1, a1, t6 | a1 ^= t6;
0x00013c44 xor a0, a0, t5 | a0 ^= t5;
0x00013c48 xor v1, v1, t9 | v1 ^= t9;
0x00013c4c xor a3, a3, t8 | a3 ^= t8;
0x00013c50 rotr a1, a1, 0x19 | __asm ("rotr a1, a1, 0x19");
0x00013c54 rotr a0, a0, 0x19 | __asm ("rotr a0, a0, 0x19");
0x00013c58 rotr v1, v1, 0x19 | __asm ("rotr v1, v1, 0x19");
0x00013c5c rotr a3, a3, 0x19 | __asm ("rotr a3, a3, 0x19");
0x00013c60 addu t4, t4, a0 | t4 += a0;
0x00013c64 addu t3, t3, v1 | t3 += v1;
0x00013c68 addu t2, t2, a3 | t2 += a3;
0x00013c6c addu t7, a1, t7 | t7 = a1 + t7;
0x00013c70 xor t1, t1, t4 | t1 ^= t4;
0x00013c74 xor a2, a2, t3 | a2 ^= t3;
0x00013c78 xor v0, v0, t2 | v0 ^= t2;
0x00013c7c xor t0, t0, t7 | t0 ^= t7;
0x00013c80 rotr t1, t1, 0x10 | __asm ("rotr t1, t1, 0x10");
0x00013c84 rotr a2, a2, 0x10 | __asm ("rotr a2, a2, 0x10");
0x00013c88 rotr v0, v0, 0x10 | __asm ("rotr v0, v0, 0x10");
0x00013c8c rotr t0, t0, 0x10 | __asm ("rotr t0, t0, 0x10");
0x00013c90 addu t9, t9, t1 | t9 += t1;
0x00013c94 addu t8, t8, a2 | t8 += a2;
0x00013c98 addu t6, t6, v0 | t6 += v0;
0x00013c9c addu t5, t5, t0 | t5 += t0;
0x00013ca0 xor a0, a0, t9 | a0 ^= t9;
0x00013ca4 xor v1, v1, t8 | v1 ^= t8;
0x00013ca8 xor a3, a3, t6 | a3 ^= t6;
0x00013cac xor a1, a1, t5 | a1 ^= t5;
0x00013cb0 rotr a0, a0, 0x14 | __asm ("rotr a0, a0, 0x14");
0x00013cb4 rotr v1, v1, 0x14 | __asm ("rotr v1, v1, 0x14");
0x00013cb8 rotr a3, a3, 0x14 | __asm ("rotr a3, a3, 0x14");
0x00013cbc rotr a1, a1, 0x14 | __asm ("rotr a1, a1, 0x14");
0x00013cc0 addu t4, t4, a0 | t4 += a0;
0x00013cc4 addu t3, t3, v1 | t3 += v1;
0x00013cc8 addu t2, t2, a3 | t2 += a3;
0x00013ccc addu t7, t7, a1 | t7 += a1;
0x00013cd0 xor t1, t1, t4 | t1 ^= t4;
0x00013cd4 xor a2, a2, t3 | a2 ^= t3;
0x00013cd8 xor v0, v0, t2 | v0 ^= t2;
0x00013cdc xor t0, t0, t7 | t0 ^= t7;
0x00013ce0 rotr t1, t1, 0x18 | __asm ("rotr t1, t1, 0x18");
0x00013ce4 rotr a2, a2, 0x18 | __asm ("rotr a2, a2, 0x18");
0x00013ce8 rotr v0, v0, 0x18 | __asm ("rotr v0, v0, 0x18");
0x00013cec rotr t0, t0, 0x18 | __asm ("rotr t0, t0, 0x18");
0x00013cf0 addu t9, t9, t1 | t9 += t1;
0x00013cf4 addu t8, t8, a2 | t8 += a2;
0x00013cf8 addu t6, t6, v0 | t6 += v0;
0x00013cfc addu t5, t5, t0 | t5 += t0;
0x00013d00 xor a0, a0, t9 | a0 ^= t9;
0x00013d04 xor v1, v1, t8 | v1 ^= t8;
0x00013d08 xor a3, a3, t6 | a3 ^= t6;
0x00013d0c xor a1, a1, t5 | a1 ^= t5;
0x00013d10 addiu ra, ra, -1 | ra += -1;
0x00013d14 rotr a0, a0, 0x19 | __asm ("rotr a0, a0, 0x19");
0x00013d18 rotr v1, v1, 0x19 | __asm ("rotr v1, v1, 0x19");
0x00013d1c rotr a3, a3, 0x19 | __asm ("rotr a3, a3, 0x19");
0x00013d20 rotr a1, a1, 0x19 | __asm ("rotr a1, a1, 0x19");
0x00013d24 bnez ra, 0x13ba0 |
| } while (ra != 0);
0x00013d28 lw ra, 0x28(sp) | ra = *(var_28h_2);
0x00013d2c addu a2, a2, s1 | a2 += s1;
0x00013d30 addu ra, ra, t4 | ra += t4;
0x00013d34 lw t4, 0x2c(sp) | t4 = *(var_2ch_2);
0x00013d38 addiu s1, s1, 1 | s1++;
0x00013d3c addu t4, t4, t3 | t4 += t3;
0x00013d40 lw t3, 0x30(sp) | t3 = *(var_30h_2);
0x00013d44 addu t3, t3, t2 | t3 += t2;
0x00013d48 lw t2, 0x34(sp) | t2 = *(var_34h_2);
0x00013d4c addu t2, t2, t7 | t2 += t7;
0x00013d50 sw t2, 0x24(sp) | *(var_24h_2) = t2;
0x00013d54 lw t2, 0x38(sp) | t2 = *(var_38h_2);
0x00013d58 addu t2, t2, a1 | t2 += a1;
0x00013d5c lw a1, 0x3c(sp) | a1 = *(var_3ch_2);
0x00013d60 addu a1, a1, a0 | a1 += a0;
0x00013d64 lw a0, 0x40(sp) | a0 = *(var_40h_2);
0x00013d68 addu a0, a0, v1 | a0 += v1;
0x00013d6c lw v1, 0x44(sp) | v1 = *(var_44h_2);
0x00013d70 addu a3, v1, a3 | a3 = v1 + a3;
0x00013d74 lw v1, 0x48(sp) | v1 = *(var_48h_2);
0x00013d78 addu t6, v1, t6 | t6 = v1 + t6;
0x00013d7c lw v1, 0x4c(sp) | v1 = *(var_4ch_2);
0x00013d80 addu t5, v1, t5 | t5 = v1 + t5;
0x00013d84 lw v1, 0x50(sp) | v1 = *(var_50h_2);
0x00013d88 addu t9, v1, t9 | t9 = v1 + t9;
0x00013d8c lw v1, 0x54(sp) | v1 = *(var_54h_2);
0x00013d90 addu t8, v1, t8 | t8 = v1 + t8;
0x00013d94 addu v1, v0, s3 | v1 = v0 + s3;
0x00013d98 lw v0, 0x58(sp) | v0 = *(var_58h_2);
0x00013d9c addu t0, v0, t0 | t0 = v0 + t0;
0x00013da0 lw v0, 0x5c(sp) | v0 = *(var_5ch_2);
0x00013da4 addu v0, v0, t1 | v0 += t1;
| if (s1 == 0) {
0x00013da8 bnez s1, 0x13db0 |
0x00013dac addiu s3, s3, 1 | s3++;
| }
0x00013db0 srl t1, ra, 8 | t1 = ra >> 8;
0x00013db4 sw t1, 0x64(sp) | *(var_64h_2) = t1;
0x00013db8 srl t1, ra, 0x10 | t1 = ra >> 0x10;
0x00013dbc sw t1, 0x68(sp) | *(var_68h_2) = t1;
0x00013dc0 srl t1, ra, 0x18 | t1 = ra >> 0x18;
0x00013dc4 sw t1, 0x6c(sp) | *(var_6ch_2) = t1;
0x00013dc8 srl t1, t4, 8 | t1 = t4 >> 8;
0x00013dcc sw t1, 0x70(sp) | *(var_70h_2) = t1;
0x00013dd0 srl t1, t4, 0x10 | t1 = t4 >> 0x10;
0x00013dd4 sw t1, 0x74(sp) | *(var_74h_2) = t1;
0x00013dd8 srl t1, t4, 0x18 | t1 = t4 >> 0x18;
0x00013ddc sw t1, 0x78(sp) | *(var_78h_2) = t1;
0x00013de0 srl t1, t3, 8 | t1 = t3 >> 8;
0x00013de4 lw t7, 0x24(sp) | t7 = *(var_24h_2);
0x00013de8 sw t1, 0x7c(sp) | *(var_7ch_2) = t1;
0x00013dec srl t1, t3, 0x10 | t1 = t3 >> 0x10;
0x00013df0 sw t1, 0x80(sp) | *(var_80h_2) = t1;
0x00013df4 srl t1, t3, 0x18 | t1 = t3 >> 0x18;
0x00013df8 sw t1, 0x84(sp) | *(var_84h_2) = t1;
0x00013dfc srl t1, t7, 8 | t1 = t7 >> 8;
0x00013e00 sw t1, 0x88(sp) | *(var_88h_2) = t1;
0x00013e04 srl t1, t7, 0x10 | t1 = t7 >> 0x10;
0x00013e08 sw t1, 0x8c(sp) | *(var_8ch_2) = t1;
0x00013e0c srl t1, t7, 0x18 | t1 = t7 >> 0x18;
0x00013e10 srl t7, a1, 0x18 | t7 = a1 >> 0x18;
0x00013e14 sw t7, 0xa4(sp) | *(var_a4h_2) = t7;
0x00013e18 srl t7, a0, 8 | t7 = a0 >> 8;
0x00013e1c sw t7, 0xa8(sp) | *(var_a8h_2) = t7;
0x00013e20 srl t7, a0, 0x10 | t7 = a0 >> 0x10;
0x00013e24 sw t7, 0xac(sp) | *(var_ach_2) = t7;
0x00013e28 srl t7, a0, 0x18 | t7 = a0 >> 0x18;
0x00013e2c sw t7, 0xb0(sp) | *(var_b0h_2) = t7;
0x00013e30 srl t7, a3, 8 | t7 = a3 >> 8;
0x00013e34 sw t7, 0xb4(sp) | *(var_b4h_2) = t7;
0x00013e38 srl t7, a3, 0x10 | t7 = a3 >> 0x10;
0x00013e3c sw t7, 0xb8(sp) | *(var_b8h_2) = t7;
0x00013e40 srl t7, a3, 0x18 | t7 = a3 >> 0x18;
0x00013e44 sw t7, 0xbc(sp) | *(var_bch_2) = t7;
0x00013e48 srl t7, t6, 8 | t7 = t6 >> 8;
0x00013e4c sw t7, 0xc0(sp) | *(var_c0h_2) = t7;
0x00013e50 srl t7, t6, 0x10 | t7 = t6 >> 0x10;
0x00013e54 sw t7, 0xc4(sp) | *(var_c4h_2) = t7;
0x00013e58 srl t7, t6, 0x18 | t7 = t6 >> 0x18;
0x00013e5c sw t7, 0xc8(sp) | *(var_c8h_2) = t7;
0x00013e60 srl t7, t5, 8 | t7 = t5 >> 8;
0x00013e64 sw t7, 0xcc(sp) | *(var_cch_2) = t7;
0x00013e68 srl t7, t5, 0x10 | t7 = t5 >> 0x10;
0x00013e6c sw t1, 0x90(sp) | *(var_90h_2) = t1;
0x00013e70 sw t7, 0xd0(sp) | *(var_d0h_2) = t7;
0x00013e74 srl t1, t2, 8 | t1 = t2 >> 8;
0x00013e78 srl t7, t5, 0x18 | t7 = t5 >> 0x18;
0x00013e7c sw t1, 0x94(sp) | *(var_94h_2) = t1;
0x00013e80 sw t7, 0xd4(sp) | *(var_d4h_2) = t7;
0x00013e84 srl t1, t2, 0x10 | t1 = t2 >> 0x10;
0x00013e88 srl t7, t9, 8 | t7 = t9 >> 8;
0x00013e8c sw t1, 0x98(sp) | *(var_98h_2) = t1;
0x00013e90 sw t7, 0xd8(sp) | *(var_d8h_2) = t7;
0x00013e94 srl t1, t2, 0x18 | t1 = t2 >> 0x18;
0x00013e98 srl t7, t9, 0x10 | t7 = t9 >> 0x10;
0x00013e9c sw t1, 0x9c(sp) | *(var_9ch_2) = t1;
0x00013ea0 sw t7, 0xdc(sp) | *(var_dch_2) = t7;
0x00013ea4 srl t1, a1, 8 | t1 = a1 >> 8;
0x00013ea8 srl t7, t9, 0x18 | t7 = t9 >> 0x18;
0x00013eac sw t1, 0xa0(sp) | *(var_a0h_2) = t1;
0x00013eb0 sw t7, 0xe0(sp) | *(var_e0h_2) = t7;
0x00013eb4 srl t7, t8, 8 | t7 = t8 >> 8;
0x00013eb8 sw t7, 0xe4(sp) | *(var_e4h_2) = t7;
0x00013ebc srl t7, t8, 0x10 | t7 = t8 >> 0x10;
0x00013ec0 sw t7, 0xe8(sp) | *(var_e8h_2) = t7;
0x00013ec4 srl t7, t8, 0x18 | t7 = t8 >> 0x18;
0x00013ec8 sw t7, 0xec(sp) | *(var_ech_2) = t7;
0x00013ecc srl t7, a2, 8 | t7 = a2 >> 8;
0x00013ed0 sw t7, 0xf0(sp) | *(var_f0h_2) = t7;
0x00013ed4 srl t7, a2, 0x10 | t7 = a2 >> 0x10;
0x00013ed8 sw t7, 0xf4(sp) | *(var_f4h_2) = t7;
0x00013edc srl t7, a2, 0x18 | t7 = a2 >> 0x18;
0x00013ee0 sw t7, 0xf8(sp) | *(var_f8h_2) = t7;
0x00013ee4 srl t7, v1, 8 | t7 = v1 >> 8;
0x00013ee8 sw t7, 0xfc(sp) | *(var_fch_2) = t7;
0x00013eec srl t7, v1, 0x10 | t7 = v1 >> 0x10;
0x00013ef0 sw t7, 0x100(sp) | *(var_100h_2) = t7;
0x00013ef4 srl t7, v1, 0x18 | t7 = v1 >> 0x18;
0x00013ef8 sw t7, 0x104(sp) | *(var_104h_2) = t7;
0x00013efc sb ra, (s0) | *(s0) = ra;
0x00013f00 srl t7, t0, 8 | t7 = t0 >> 8;
0x00013f04 lw ra, 0x64(sp) | ra = *(var_64h_2);
0x00013f08 sb t4, 4(s0) | *((s0 + 4)) = t4;
0x00013f0c lw t4, 0x70(sp) | t4 = *(var_70h_2);
0x00013f10 sw t7, 0x108(sp) | *(var_108h_2) = t7;
0x00013f14 srl t7, t0, 0x10 | t7 = t0 >> 0x10;
0x00013f18 sw t7, 0x10c(sp) | *(var_10ch_2) = t7;
0x00013f1c sb ra, 1(s0) | *((s0 + 1)) = ra;
0x00013f20 srl t7, t0, 0x18 | t7 = t0 >> 0x18;
0x00013f24 lw ra, 0x68(sp) | ra = *(var_68h_2);
0x00013f28 sb t4, 5(s0) | *((s0 + 5)) = t4;
0x00013f2c lw t4, 0x74(sp) | t4 = *(var_74h_2);
0x00013f30 sw t7, 0x110(sp) | *(var_110h_2) = t7;
0x00013f34 srl t7, v0, 8 | t7 = v0 >> 8;
0x00013f38 sw t7, 0x114(sp) | *(var_114h_2) = t7;
0x00013f3c sb ra, 2(s0) | *((s0 + 2)) = ra;
0x00013f40 srl t7, v0, 0x10 | t7 = v0 >> 0x10;
0x00013f44 lw ra, 0x6c(sp) | ra = *(var_6ch_2);
0x00013f48 sb t4, 6(s0) | *((s0 + 6)) = t4;
0x00013f4c sb t3, 8(s0) | *((s0 + 8)) = t3;
0x00013f50 lw t4, 0x78(sp) | t4 = *(var_78h_2);
0x00013f54 lw t3, 0x7c(sp) | t3 = *(var_7ch_2);
0x00013f58 sw t7, 0x118(sp) | *(var_118h_2) = t7;
0x00013f5c srl t7, v0, 0x18 | t7 = v0 >> 0x18;
0x00013f60 sw t7, 0x11c(sp) | *(var_11ch_2) = t7;
0x00013f64 sb ra, 3(s0) | *((s0 + 3)) = ra;
0x00013f68 sb t4, 7(s0) | *((s0 + 7)) = t4;
0x00013f6c sb t3, 9(s0) | *((s0 + 9)) = t3;
0x00013f70 lw t3, 0x80(sp) | t3 = *(var_80h_2);
0x00013f74 lw t7, 0x24(sp) | t7 = *(var_24h_2);
0x00013f78 sb t3, 0xa(s0) | *((s0 + 10)) = t3;
0x00013f7c sb t7, 0xc(s0) | *((s0 + 12)) = t7;
0x00013f80 lw t3, 0x84(sp) | t3 = *(var_84h_2);
0x00013f84 lw t7, 0xa4(sp) | t7 = *(var_a4h_2);
0x00013f88 sb t3, 0xb(s0) | *((s0 + 11)) = t3;
0x00013f8c sb t2, 0x10(s0) | *((s0 + 16)) = t2;
0x00013f90 lw t3, 0x88(sp) | t3 = *(var_88h_2);
0x00013f94 lw t2, 0x94(sp) | t2 = *(var_94h_2);
0x00013f98 sb t7, 0x17(s0) | *((s0 + 23)) = t7;
0x00013f9c lw t7, 0xa8(sp) | t7 = *(var_a8h_2);
0x00013fa0 sb t3, 0xd(s0) | *((s0 + 13)) = t3;
0x00013fa4 sb t2, 0x11(s0) | *((s0 + 17)) = t2;
0x00013fa8 lw t3, 0x8c(sp) | t3 = *(var_8ch_2);
0x00013fac lw t2, 0x98(sp) | t2 = *(var_98h_2);
0x00013fb0 sb t7, 0x19(s0) | *((s0 + 25)) = t7;
0x00013fb4 lw t7, 0xac(sp) | t7 = *(var_ach_2);
0x00013fb8 srl t1, a1, 0x10 | t1 = a1 >> 0x10;
0x00013fbc sb t3, 0xe(s0) | *((s0 + 14)) = t3;
0x00013fc0 sb t2, 0x12(s0) | *((s0 + 18)) = t2;
0x00013fc4 lw t3, 0x90(sp) | t3 = *(var_90h_2);
0x00013fc8 lw t2, 0x9c(sp) | t2 = *(var_9ch_2);
0x00013fcc sb a1, 0x14(s0) | *((s0 + 20)) = a1;
0x00013fd0 sb t7, 0x1a(s0) | *((s0 + 26)) = t7;
0x00013fd4 lw a1, 0xa0(sp) | a1 = *(var_a0h_2);
0x00013fd8 lw t7, 0xb0(sp) | t7 = *(var_b0h_2);
0x00013fdc sb t3, 0xf(s0) | *((s0 + 15)) = t3;
0x00013fe0 sb t7, 0x1b(s0) | *((s0 + 27)) = t7;
0x00013fe4 sb t2, 0x13(s0) | *((s0 + 19)) = t2;
0x00013fe8 sb a1, 0x15(s0) | *((s0 + 21)) = a1;
0x00013fec sb t1, 0x16(s0) | *((s0 + 22)) = t1;
0x00013ff0 sb a0, 0x18(s0) | *((s0 + 24)) = a0;
0x00013ff4 sb a3, 0x1c(s0) | *((s0 + 28)) = a3;
0x00013ff8 lw t7, 0xb4(sp) | t7 = *(var_b4h_2);
0x00013ffc sb t6, 0x20(s0) | *((s0 + 32)) = t6;
0x00014000 sb t7, 0x1d(s0) | *((s0 + 29)) = t7;
0x00014004 lw t7, 0xb8(sp) | t7 = *(var_b8h_2);
0x00014008 sb t5, 0x24(s0) | *((s0 + 36)) = t5;
0x0001400c sb t7, 0x1e(s0) | *((s0 + 30)) = t7;
0x00014010 lw t7, 0xbc(sp) | t7 = *(var_bch_2);
0x00014014 sb t9, 0x28(s0) | *((s0 + 40)) = t9;
0x00014018 sb t7, 0x1f(s0) | *((s0 + 31)) = t7;
0x0001401c lw t7, 0xc0(sp) | t7 = *(var_c0h_2);
0x00014020 sb t8, 0x2c(s0) | *((s0 + 44)) = t8;
0x00014024 sb t7, 0x21(s0) | *((s0 + 33)) = t7;
0x00014028 lw t7, 0xc4(sp) | t7 = *(var_c4h_2);
0x0001402c sb t7, 0x22(s0) | *((s0 + 34)) = t7;
0x00014030 lw t7, 0xc8(sp) | t7 = *(var_c8h_2);
0x00014034 sb t7, 0x23(s0) | *((s0 + 35)) = t7;
0x00014038 lw t7, 0xcc(sp) | t7 = *(var_cch_2);
0x0001403c sb t7, 0x25(s0) | *((s0 + 37)) = t7;
0x00014040 lw t7, 0xd0(sp) | t7 = *(var_d0h_2);
0x00014044 sb t7, 0x26(s0) | *((s0 + 38)) = t7;
0x00014048 lw t7, 0xd4(sp) | t7 = *(var_d4h_2);
0x0001404c sb t7, 0x27(s0) | *((s0 + 39)) = t7;
0x00014050 lw t7, 0xd8(sp) | t7 = *(var_d8h_2);
0x00014054 sb t7, 0x29(s0) | *((s0 + 41)) = t7;
0x00014058 lw t7, 0xdc(sp) | t7 = *(var_dch_2);
0x0001405c sb t7, 0x2a(s0) | *((s0 + 42)) = t7;
0x00014060 lw t7, 0xe0(sp) | t7 = *(var_e0h_2);
0x00014064 sb t7, 0x2b(s0) | *((s0 + 43)) = t7;
0x00014068 lw t7, 0xe4(sp) | t7 = *(var_e4h_2);
0x0001406c sb t7, 0x2d(s0) | *((s0 + 45)) = t7;
0x00014070 lw t7, 0xe8(sp) | t7 = *(var_e8h_2);
0x00014074 sb t7, 0x2e(s0) | *((s0 + 46)) = t7;
0x00014078 lw t7, 0xec(sp) | t7 = *(var_ech_2);
0x0001407c sb v0, 0x3c(s0) | *((s0 + 60)) = v0;
0x00014080 sb t7, 0x2f(s0) | *((s0 + 47)) = t7;
0x00014084 lw t7, 0xf0(sp) | t7 = *(var_f0h_2);
0x00014088 addiu v0, zero, 0x40 | v0 = 0x40;
0x0001408c sb t7, 0x31(s0) | *((s0 + 49)) = t7;
0x00014090 lw t7, 0xf4(sp) | t7 = *(var_f4h_2);
0x00014094 sb a2, 0x30(s0) | *((s0 + 48)) = a2;
0x00014098 sb t7, 0x32(s0) | *((s0 + 50)) = t7;
0x0001409c lw t7, 0xf8(sp) | t7 = *(var_f8h_2);
0x000140a0 sb v1, 0x34(s0) | *((s0 + 52)) = v1;
0x000140a4 sb t7, 0x33(s0) | *((s0 + 51)) = t7;
0x000140a8 lw t7, 0xfc(sp) | t7 = *(var_fch_2);
0x000140ac sb t0, 0x38(s0) | *((s0 + 56)) = t0;
0x000140b0 sb t7, 0x35(s0) | *((s0 + 53)) = t7;
0x000140b4 lw t7, 0x100(sp) | t7 = *(var_100h_2);
0x000140b8 sb t7, 0x36(s0) | *((s0 + 54)) = t7;
0x000140bc lw t7, 0x104(sp) | t7 = *(var_104h_2);
0x000140c0 sb t7, 0x37(s0) | *((s0 + 55)) = t7;
0x000140c4 lw t7, 0x108(sp) | t7 = *(var_108h_2);
0x000140c8 sb t7, 0x39(s0) | *((s0 + 57)) = t7;
0x000140cc lw t7, 0x10c(sp) | t7 = *(var_10ch_2);
0x000140d0 sb t7, 0x3a(s0) | *((s0 + 58)) = t7;
0x000140d4 lw t7, 0x110(sp) | t7 = *(var_110h_2);
0x000140d8 sb t7, 0x3b(s0) | *((s0 + 59)) = t7;
0x000140dc lw t7, 0x114(sp) | t7 = *(var_114h_2);
0x000140e0 sb t7, 0x3d(s0) | *((s0 + 61)) = t7;
0x000140e4 lw t7, 0x118(sp) | t7 = *(var_118h_2);
0x000140e8 sb t7, 0x3e(s0) | *((s0 + 62)) = t7;
0x000140ec lw t7, 0x11c(sp) | t7 = *(var_11ch_2);
0x000140f0 sb t7, 0x3f(s0) | *((s0 + 63)) = t7;
| if (s2 == v0) {
0x000140f4 beq s2, v0, 0x1414c | goto label_18;
| }
0x000140f8 addiu s2, s2, -0x40 | s2 += -0x40;
0x000140fc sltiu v0, s2, 0x40 | v0 = (s2 < 0x40) ? 1 : 0;
0x00014100 addiu s0, s0, 0x40 | s0 += 0x40;
| if (v0 == 0) {
0x00014104 beqz v0, 0x13b5c | goto label_0;
| }
0x00014108 move v1, fp | v1 = fp;
0x0001410c move v0, zero | v0 = 0;
| do {
0x00014110 lw a0, 0x60(sp) | a0 = *(var_60h_2);
0x00014114 addiu v1, v1, 1 | v1++;
0x00014118 lbux a1, v0(a0) | __asm ("lbux a1, v0(a0)");
0x0001411c addiu v0, v0, 1 | v0++;
0x00014120 sltu a0, v0, s2 | a0 = (v0 < s2) ? 1 : 0;
0x00014124 sb a1, -1(v1) | *((v1 - 1)) = a1;
0x00014128 bnez a0, 0x14110 |
| } while (a0 != 0);
0x0001412c sw fp, 0x60(sp) | *(var_60h_2) = fp;
0x00014130 move s0, fp | s0 = fp;
0x00014134 b 0x13b5c | goto label_0;
| label_12:
0x00014138 lw a1, -0x350c(a0) | a1 = *((a0 - 3395));
0x0001413c lw s3, -0x7fa8(gp) | s3 = *((gp - 8170));
| if (a1 != 0) {
0x00014140 bnez a1, 0x13974 | goto label_1;
| }
0x00014144 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00014148 b 0x139ac | goto label_2;
| label_18:
0x0001414c lw v0, 0x124(sp) | v0 = *(var_124h_2);
0x00014150 move s1, s6 | s1 = s6;
0x00014154 lw v1, -0x34ec(s7) | v1 = *((s7 - 3387));
0x00014158 move s6, s3 | s6 = s3;
0x0001415c addiu v0, v0, 0x10 | v0 += 0x10;
0x00014160 move s3, s7 | s3 = s7;
0x00014164 sw v0, 0x30(s1) | *((s1 + 12)) = v0;
0x00014168 sw s6, 0x34(s1) | *((s1 + 13)) = s6;
0x0001416c lw s0, -0x34f0(s5) | s0 = *((s5 - 3388));
| if (v1 == 0) {
0x00014170 beqz v1, 0x1425c | goto label_19;
| }
0x00014174 move v0, s0 | v0 = s0;
| do {
0x00014178 lwl v1, 0x43(s0) | __asm ("lwl v1, 0x43(s0)");
0x0001417c lw t9, -0x7be4(gp) | t9 = sym.imp.memset;
0x00014180 addiu a2, zero, 0x28 | a2 = 0x28;
0x00014184 lwr v1, 0x40(s0) | __asm ("lwr v1, 0x40(s0)");
0x00014188 move a1, zero | a1 = 0;
0x0001418c sw v1, 0x10(v0) | *((v0 + 4)) = v1;
0x00014190 lwl v1, 0x47(s0) | __asm ("lwl v1, 0x47(s0)");
0x00014194 lwr v1, 0x44(s0) | __asm ("lwr v1, 0x44(s0)");
0x00014198 sw v1, 0x14(v0) | *((v0 + 5)) = v1;
0x0001419c lwl v1, 0x4b(s0) | __asm ("lwl v1, 0x4b(s0)");
0x000141a0 lwr v1, 0x48(s0) | __asm ("lwr v1, 0x48(s0)");
0x000141a4 sw v1, 0x18(v0) | *((v0 + 6)) = v1;
0x000141a8 lwl v1, 0x4f(s0) | __asm ("lwl v1, 0x4f(s0)");
0x000141ac lwr v1, 0x4c(s0) | __asm ("lwr v1, 0x4c(s0)");
0x000141b0 sw v1, 0x1c(v0) | *((v0 + 7)) = v1;
0x000141b4 lwl v1, 0x53(s0) | __asm ("lwl v1, 0x53(s0)");
0x000141b8 lwr v1, 0x50(s0) | __asm ("lwr v1, 0x50(s0)");
0x000141bc sw v1, 0x20(v0) | *((v0 + 8)) = v1;
0x000141c0 lwl v1, 0x57(s0) | __asm ("lwl v1, 0x57(s0)");
0x000141c4 lwr v1, 0x54(s0) | __asm ("lwr v1, 0x54(s0)");
0x000141c8 sw v1, 0x24(v0) | *((v0 + 9)) = v1;
0x000141cc lwl v1, 0x5b(s0) | __asm ("lwl v1, 0x5b(s0)");
0x000141d0 lwr v1, 0x58(s0) | __asm ("lwr v1, 0x58(s0)");
0x000141d4 sw v1, 0x28(v0) | *((v0 + 10)) = v1;
0x000141d8 lwl a0, 0x5f(s0) | __asm ("lwl a0, 0x5f(s0)");
0x000141dc lui v1, 0x6170 | v1 = 0x61707865;
0x000141e0 addiu v1, v1, 0x7865 |
0x000141e4 lwr a0, 0x5c(s0) | __asm ("lwr a0, 0x5c(s0)");
0x000141e8 sw v1, (v0) | *(v0) = v1;
0x000141ec sw a0, 0x2c(v0) | *((v0 + 11)) = a0;
0x000141f0 lui a0, 0x3320 | a0 = 0x3320646e;
0x000141f4 addiu a0, a0, 0x646e |
0x000141f8 sw a0, 4(v0) | *((v0 + 1)) = a0;
0x000141fc lui a0, 0x7962 | a0 = 0x79620000;
0x00014200 lw v1, -0x34f0(s5) | v1 = *((s5 - 3388));
0x00014204 addiu a0, a0, 0x2d32 | a0 += 0x2d32;
0x00014208 sw a0, 8(v0) | *((v0 + 2)) = a0;
0x0001420c lui a0, 0x6b20 | a0 = 0x6b206574;
0x00014210 addiu a0, a0, 0x6574 |
0x00014214 sw a0, 0xc(v0) | *((v0 + 3)) = a0;
0x00014218 sw zero, 0x30(v1) | *((v1 + 12)) = 0;
0x0001421c sw zero, 0x34(v1) | *((v1 + 13)) = 0;
0x00014220 lwl v0, 0x63(s0) | __asm ("lwl v0, 0x63(s0)");
0x00014224 addiu a0, v1, 0x40 | a0 = v1 + 0x40;
0x00014228 lwr v0, 0x60(s0) | __asm ("lwr v0, 0x60(s0)");
0x0001422c sw v0, 0x38(v1) | *((v1 + 14)) = v0;
0x00014230 lwl v0, 0x67(s0) | __asm ("lwl v0, 0x67(s0)");
0x00014234 lwr v0, 0x64(s0) | __asm ("lwr v0, 0x64(s0)");
0x00014238 sw v0, 0x3c(v1) | *((v1 + 15)) = v0;
0x0001423c jalr t9 | t9 ();
0x00014240 lw v0, -0x34ec(s3) | v0 = *((s3 - 3387));
0x00014244 addiu v1, zero, 0x3d8 | v1 = aav.0x000003d8;
0x00014248 lw gp, 0x18(sp) | gp = *(var_18h);
0x0001424c sw v1, (v0) | *(v0) = v1;
0x00014250 lw v0, -0x34f0(s5) | v0 = *((s5 - 3388));
0x00014254 addiu s0, v0, 0x40 | s0 = v0 + 0x40;
0x00014258 b 0x13a08 | goto label_3;
| label_19:
0x0001425c lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x00014260 addiu s1, zero, -1 | s1 = -1;
0x00014264 sw zero, 0x14(sp) | *(var_14h_2) = 0;
0x00014268 sw s1, 0x10(sp) | *(var_10h_2) = s1;
0x0001426c addiu a3, zero, 0x802 | a3 = 0x802;
0x00014270 addiu a2, zero, 3 | a2 = 3;
0x00014274 addiu a1, zero, 8 | a1 = 8;
0x00014278 move a0, zero | a0 = 0;
0x0001427c jalr t9 | t9 ();
0x00014280 sw v0, -0x34ec(s7) | *((s7 - 3387)) = v0;
0x00014284 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 == s1) {
0x00014288 beq v0, s1, 0x142fc | goto label_20;
| }
0x0001428c lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x00014290 sw zero, 0x14(sp) | *(var_14h_2) = 0;
0x00014294 sw s1, 0x10(sp) | *(var_10h_2) = s1;
0x00014298 addiu a3, zero, 0x802 | a3 = 0x802;
0x0001429c addiu a2, zero, 3 | a2 = 3;
0x000142a0 addiu a1, zero, 0x440 | a1 = 0x440;
0x000142a4 move a0, zero | a0 = 0;
0x000142a8 jalr t9 | t9 ();
0x000142ac sw v0, -0x34f0(s5) | *((s5 - 3388)) = v0;
0x000142b0 lw gp, 0x18(sp) | gp = *(var_18h);
| if (v0 == s1) {
0x000142b4 beq v0, s1, 0x142e4 | goto label_21;
| }
0x000142b8 lw v0, -0x7eb8(gp) | v0 = *((gp - 8110));
0x000142bc lw a2, -0x7f74(gp) | a2 = *(gp);
0x000142c0 lw t9, -0x7b78(gp) | t9 = sym.imp.__register_atfork;
0x000142c4 lw a3, (v0) | a3 = *(v0);
0x000142c8 addiu a2, a2, 0x2e20 | a2 += 0x2e20;
0x000142cc move a1, zero | a1 = 0;
0x000142d0 move a0, zero | a0 = 0;
0x000142d4 jalr t9 | t9 ();
0x000142d8 lw gp, 0x18(sp) | gp = *(var_18h);
0x000142dc lw v0, -0x34f0(s5) | v0 = *((s5 - 3388));
0x000142e0 b 0x14178 |
| } while (1);
| label_21:
0x000142e4 lw t9, -0x7c18(gp) | t9 = sym.imp.munmap;
0x000142e8 lw a0, -0x34ec(s7) | a0 = *((s7 - 3387));
0x000142ec addiu a1, zero, 8 | a1 = 8;
0x000142f0 jalr t9 | t9 ();
0x000142f4 lw gp, 0x18(sp) | gp = *(var_18h);
0x000142f8 sw zero, -0x34ec(s7) | *((s7 - 3387)) = 0;
| label_20:
0x000142fc lw t9, -0x7c80(gp) | t9 = sym.imp.abort;
0x00014300 jalr t9 | t9 ();
0x00014304 nop |
| label_17:
0x00014308 lw t9, -0x7b2c(gp) | t9 = sym.imp.__stack_chk_fail;
0x0001430c jalr t9 | t9 ();
0x00014310 nop |
0x00014314 lui gp, 2 |
0x00014318 addiu gp, gp, -0x2274 |
0x0001431c addu gp, gp, t9 | gp += t9;
0x00014320 addiu sp, sp, -0x1a0 |
0x00014324 lw v0, -0x7b04(gp) | v0 = *((gp - 7873));
0x00014328 sw gp, 0x18(sp) | *(var_18h_2) = gp;
0x0001432c sw ra, 0x19c(sp) | *(var_19ch) = ra;
0x00014330 sw fp, 0x198(sp) | *(var_198h) = fp;
0x00014334 sw s7, 0x194(sp) | *(var_194h_2) = s7;
0x00014338 sw s6, 0x190(sp) | *(var_190h_2) = s6;
0x0001433c sw s5, 0x18c(sp) | *(var_18ch_2) = s5;
0x00014340 sw s4, 0x188(sp) | *(var_188h_2) = s4;
0x00014344 sw s3, 0x184(sp) | *(var_184h_2) = s3;
0x00014348 sw s2, 0x180(sp) | *(var_180h_2) = s2;
0x0001434c sw s1, 0x17c(sp) | *(var_17ch_2) = s1;
0x00014350 sw s0, 0x178(sp) | *(var_178h_2) = s0;
0x00014354 lw v1, -0x7fa8(gp) | v1 = *((gp - 8170));
0x00014358 sw v0, 0x128(sp) | *(var_128h_2) = v0;
0x0001435c lw v0, (v0) | v0 = *(v0);
0x00014360 lw t9, -0x7b40(gp) | t9 = sym.imp.pthread_mutex_lock;
0x00014364 sw v1, 0x124(sp) | *(var_124h) = v1;
0x00014368 move s7, a0 | s7 = a0;
0x0001436c sw v0, 0x174(sp) | *(var_174h_2) = v0;
0x00014370 addiu a0, v1, -0x3508 | a0 = v1 + -0x3508;
0x00014374 move fp, a1 | fp = a1;
0x00014378 jalr t9 | t9 ();
0x0001437c lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014380 lw t9, -0x7b7c(gp) | t9 = sym.imp.getpid;
0x00014384 jalr t9 | t9 ();
0x00014388 nop |
0x0001438c lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014390 lw v1, -0x7fa8(gp) | v1 = *((gp - 8170));
0x00014394 lw a0, -0x3510(v1) | a0 = *((v1 - 3396));
0x00014398 sltiu a1, a0, 2 | a1 = (a0 < 2) ? 1 : 0;
0x0001439c lw a0, -0x7fa8(gp) | a0 = *((gp - 8170));
| if (a1 != 0) {
0x000143a0 bnel a1, zero, 0x143ac |
0x000143a4 lw a0, -0x7fa8(gp) | a0 = *((gp - 8170));
| if (v0 == a0) {
0x000143a8 beq v0, a0, 0x14d5c | goto label_22;
| }
| }
0x000143ac lw s4, -0x7fa8(gp) | s4 = *((gp - 8170));
| label_10:
0x000143b0 sw v0, -0x3510(v1) | *((v1 - 3396)) = v0;
0x000143b4 sw zero, -0x350c(a0) | *((a0 - 3395)) = 0;
0x000143b8 lw v0, -0x34ec(s4) | v0 = *((s4 - 3387));
0x000143bc lw t9, -0x7f74(gp) | t9 = *(gp);
| if (v0 != 0) {
0x000143c0 beqz v0, 0x14404 |
0x000143c4 sb zero, (v0) | *(v0) = 0;
0x000143c8 sb zero, 1(v0) | *((v0 + 1)) = 0;
0x000143cc sb zero, 2(v0) | *((v0 + 2)) = 0;
0x000143d0 sb zero, 3(v0) | *((v0 + 3)) = 0;
0x000143d4 sb zero, 4(v0) | *((v0 + 4)) = 0;
0x000143d8 sb zero, 5(v0) | *((v0 + 5)) = 0;
0x000143dc sb zero, 6(v0) | *((v0 + 6)) = 0;
0x000143e0 sb zero, 7(v0) | *((v0 + 7)) = 0;
0x000143e4 lw v1, -0x34ec(s4) | v1 = *((s4 - 3387));
| label_11:
0x000143e8 lw t9, -0x7f74(gp) | t9 = *(gp);
| if (v1 == 0) {
0x000143ec beqz v1, 0x14404 | goto label_23;
| }
0x000143f0 lw v0, 4(v1) | v0 = *((v1 + 1));
0x000143f4 sltu a0, fp, v0 | a0 = (fp < v0) ? 1 : 0;
0x000143f8 subu v0, v0, fp | __asm ("subu v0, v0, fp");
| if (a0 != 0) {
0x000143fc bnez a0, 0x14428 | goto label_9;
| }
0x00014400 lw t9, -0x7f74(gp) | t9 = *(gp);
| }
| /* fcn.00012e40 */
| label_23:
0x00014404 addiu t9, t9, 0x2e40 | t9 += 0x2e40;
0x00014408 bal 0x12e40 | fcn_00012e40 ();
0x0001440c nop |
0x00014410 lw v1, -0x34ec(s4) | v1 = *((s4 - 3387));
0x00014414 lw v0, 4(v1) | v0 = *((v1 + 1));
0x00014418 sltu a0, fp, v0 | a0 = (fp < v0) ? 1 : 0;
0x0001441c lw gp, 0x18(sp) | gp = *(var_18h_2);
| if (a0 != 0) {
0x00014420 bnez a0, 0x14d54 | goto label_24;
| }
0x00014424 move v0, zero | v0 = 0;
| label_9:
0x00014428 sw v0, 4(v1) | *((v1 + 1)) = v0;
| if (fp == 0) {
0x0001442c beqz fp, 0x14c04 | goto label_25;
| }
0x00014430 lw v0, -0x7fa8(gp) | v0 = *((gp - 8170));
0x00014434 move s2, s7 | s2 = s7;
0x00014438 sw v0, 0x20(sp) | *(var_20h) = v0;
0x0001443c addiu v0, sp, 0x134 | v0 = sp + aav.0x00000134;
0x00014440 sw v0, 0x60(sp) | *(var_60h) = v0;
0x00014444 lw v0, -0x7f74(gp) | v0 = *(gp);
0x00014448 move s3, fp | s3 = fp;
0x0001444c addiu v0, v0, 0x2e20 | v0 += 0x2e20;
0x00014450 sw v0, 0x12c(sp) | *(var_12ch) = v0;
0x00014454 lw v0, -0x34ec(s4) | v0 = *((s4 - 3387));
| label_5:
0x00014458 lw v1, 0x20(sp) | v1 = *(var_20h);
0x0001445c lw v0, (v0) | v0 = *(v0);
0x00014460 lw s0, -0x34f0(v1) | s0 = *((v1 - 3388));
0x00014464 addiu s5, s0, 0x40 | s5 = s0 + 0x40;
| if (v0 != 0) {
0x00014468 bnez v0, 0x14c50 | goto label_26;
| }
| label_7:
0x0001446c lw v0, (s0) | v0 = *(s0);
0x00014470 lw s7, 0x30(s0) | s7 = *((s0 + 12));
0x00014474 sw v0, 0x24(sp) | *(var_24h) = v0;
0x00014478 lw v0, 4(s0) | v0 = *((s0 + 1));
0x0001447c lw s1, 0x34(s0) | s1 = *((s0 + 13));
0x00014480 sw v0, 0x28(sp) | *(var_28h) = v0;
0x00014484 lw v0, 8(s0) | v0 = *((s0 + 2));
0x00014488 addiu fp, zero, 0x400 | fp = 0x400;
0x0001448c sw v0, 0x2c(sp) | *(var_2ch) = v0;
0x00014490 lw v0, 0xc(s0) | v0 = *((s0 + 3));
0x00014494 move v1, s3 | v1 = s3;
0x00014498 sw v0, 0x30(sp) | *(var_30h) = v0;
0x0001449c lw v0, 0x10(s0) | v0 = *((s0 + 4));
0x000144a0 sw s7, 0x64(sp) | *(var_64h) = s7;
0x000144a4 sw v0, 0x34(sp) | *(var_34h) = v0;
0x000144a8 lw v0, 0x14(s0) | v0 = *((s0 + 5));
0x000144ac sw s5, 0x5c(sp) | *(var_5ch) = s5;
0x000144b0 sw v0, 0x38(sp) | *(var_38h) = v0;
0x000144b4 lw v0, 0x18(s0) | v0 = *((s0 + 6));
0x000144b8 move s3, s0 | s3 = s0;
0x000144bc sw v0, 0x3c(sp) | *(var_3ch) = v0;
0x000144c0 lw v0, 0x1c(s0) | v0 = *((s0 + 7));
0x000144c4 sw v0, 0x40(sp) | *(var_40h) = v0;
0x000144c8 lw v0, 0x20(s0) | v0 = *((s0 + 8));
0x000144cc sw v0, 0x44(sp) | *(var_44h) = v0;
0x000144d0 lw v0, 0x24(s0) | v0 = *((s0 + 9));
0x000144d4 sw v0, 0x48(sp) | *(var_48h) = v0;
0x000144d8 lw v0, 0x28(s0) | v0 = *((s0 + 10));
0x000144dc sw v0, 0x4c(sp) | *(var_4ch) = v0;
0x000144e0 lw v0, 0x2c(s0) | v0 = *((s0 + 11));
0x000144e4 sw v0, 0x50(sp) | *(var_50h) = v0;
0x000144e8 lw v0, 0x38(s0) | v0 = *((s0 + 14));
0x000144ec sw v0, 0x54(sp) | *(var_54h) = v0;
0x000144f0 lw v0, 0x3c(s0) | v0 = *((s0 + 15));
0x000144f4 move s0, s7 | s0 = s7;
0x000144f8 sw v0, 0x58(sp) | *(var_58h) = v0;
0x000144fc move v0, s4 | v0 = s4;
0x00014500 move s7, v1 | s7 = v1;
0x00014504 move s4, s5 | s4 = s5;
0x00014508 move s5, s2 | s5 = s2;
0x0001450c move s2, s1 | s2 = s1;
0x00014510 move s1, fp | s1 = fp;
0x00014514 move fp, v0 | fp = v0;
| label_4:
0x00014518 lw t8, 0x50(sp) | t8 = *(var_50h);
0x0001451c lw t1, 0x58(sp) | t1 = *(var_58h);
0x00014520 lw v1, 0x40(sp) | v1 = *(var_40h);
0x00014524 lw t2, 0x30(sp) | t2 = *(var_30h);
0x00014528 lw t9, 0x4c(sp) | t9 = *(var_4ch);
0x0001452c lw t0, 0x54(sp) | t0 = *(var_54h);
0x00014530 lw a0, 0x3c(sp) | a0 = *(var_3ch);
0x00014534 lw t3, 0x2c(sp) | t3 = *(var_2ch);
0x00014538 lw t6, 0x48(sp) | t6 = *(var_48h);
0x0001453c lw a1, 0x38(sp) | a1 = *(var_38h);
0x00014540 lw t4, 0x28(sp) | t4 = *(var_28h);
0x00014544 lw t7, 0x44(sp) | t7 = *(var_44h);
0x00014548 lw a2, 0x34(sp) | a2 = *(var_34h);
0x0001454c lw t5, 0x24(sp) | t5 = *(var_24h);
0x00014550 move v0, s2 | v0 = s2;
0x00014554 move a3, s0 | a3 = s0;
0x00014558 addiu ra, zero, 0xa | ra = 0xa;
| do {
0x0001455c addu t5, a2, t5 | t5 = a2 + t5;
0x00014560 addu t4, t4, a1 | t4 += a1;
0x00014564 addu t3, t3, a0 | t3 += a0;
0x00014568 addu t2, t2, v1 | t2 += v1;
0x0001456c xor a3, a3, t5 | a3 ^= t5;
0x00014570 xor v0, v0, t4 | v0 ^= t4;
0x00014574 xor t0, t0, t3 | t0 ^= t3;
0x00014578 xor t1, t2, t1 | t1 = t2 ^ t1;
0x0001457c rotr a3, a3, 0x10 | __asm ("rotr a3, a3, 0x10");
0x00014580 rotr v0, v0, 0x10 | __asm ("rotr v0, v0, 0x10");
0x00014584 rotr t0, t0, 0x10 | __asm ("rotr t0, t0, 0x10");
0x00014588 rotr t1, t1, 0x10 | __asm ("rotr t1, t1, 0x10");
0x0001458c addu t7, t7, a3 | t7 += a3;
0x00014590 addu t6, t6, v0 | t6 += v0;
0x00014594 addu t9, t0, t9 | t9 = t0 + t9;
0x00014598 addu t8, t1, t8 | t8 = t1 + t8;
0x0001459c xor a2, a2, t7 | a2 ^= t7;
0x000145a0 xor a1, a1, t6 | a1 ^= t6;
0x000145a4 xor a0, a0, t9 | a0 ^= t9;
0x000145a8 xor v1, t8, v1 | v1 = t8 ^ v1;
0x000145ac rotr a2, a2, 0x14 | __asm ("rotr a2, a2, 0x14");
0x000145b0 rotr a1, a1, 0x14 | __asm ("rotr a1, a1, 0x14");
0x000145b4 rotr a0, a0, 0x14 | __asm ("rotr a0, a0, 0x14");
0x000145b8 rotr v1, v1, 0x14 | __asm ("rotr v1, v1, 0x14");
0x000145bc addu t5, t5, a2 | t5 += a2;
0x000145c0 addu t4, t4, a1 | t4 += a1;
0x000145c4 addu t3, t3, a0 | t3 += a0;
0x000145c8 addu t2, t2, v1 | t2 += v1;
0x000145cc xor a3, a3, t5 | a3 ^= t5;
0x000145d0 xor v0, v0, t4 | v0 ^= t4;
0x000145d4 xor t0, t0, t3 | t0 ^= t3;
0x000145d8 xor t1, t1, t2 | t1 ^= t2;
0x000145dc rotr a3, a3, 0x18 | __asm ("rotr a3, a3, 0x18");
0x000145e0 rotr v0, v0, 0x18 | __asm ("rotr v0, v0, 0x18");
0x000145e4 rotr t0, t0, 0x18 | __asm ("rotr t0, t0, 0x18");
0x000145e8 rotr t1, t1, 0x18 | __asm ("rotr t1, t1, 0x18");
0x000145ec addu t7, t7, a3 | t7 += a3;
0x000145f0 addu t6, t6, v0 | t6 += v0;
0x000145f4 addu t9, t9, t0 | t9 += t0;
0x000145f8 addu t8, t8, t1 | t8 += t1;
0x000145fc xor a2, a2, t7 | a2 ^= t7;
0x00014600 xor a1, a1, t6 | a1 ^= t6;
0x00014604 xor a0, a0, t9 | a0 ^= t9;
0x00014608 xor v1, v1, t8 | v1 ^= t8;
0x0001460c rotr a2, a2, 0x19 | __asm ("rotr a2, a2, 0x19");
0x00014610 rotr a1, a1, 0x19 | __asm ("rotr a1, a1, 0x19");
0x00014614 rotr a0, a0, 0x19 | __asm ("rotr a0, a0, 0x19");
0x00014618 rotr v1, v1, 0x19 | __asm ("rotr v1, v1, 0x19");
0x0001461c addu t5, t5, a1 | t5 += a1;
0x00014620 addu t4, t4, a0 | t4 += a0;
0x00014624 addu t3, t3, v1 | t3 += v1;
0x00014628 addu t2, a2, t2 | t2 = a2 + t2;
0x0001462c xor t1, t1, t5 | t1 ^= t5;
0x00014630 xor a3, a3, t4 | a3 ^= t4;
0x00014634 xor v0, v0, t3 | v0 ^= t3;
0x00014638 xor t0, t0, t2 | t0 ^= t2;
0x0001463c rotr t1, t1, 0x10 | __asm ("rotr t1, t1, 0x10");
0x00014640 rotr a3, a3, 0x10 | __asm ("rotr a3, a3, 0x10");
0x00014644 rotr v0, v0, 0x10 | __asm ("rotr v0, v0, 0x10");
0x00014648 rotr t0, t0, 0x10 | __asm ("rotr t0, t0, 0x10");
0x0001464c addu t9, t9, t1 | t9 += t1;
0x00014650 addu t8, t8, a3 | t8 += a3;
0x00014654 addu t7, t7, v0 | t7 += v0;
0x00014658 addu t6, t6, t0 | t6 += t0;
0x0001465c xor a1, a1, t9 | a1 ^= t9;
0x00014660 xor a0, a0, t8 | a0 ^= t8;
0x00014664 xor v1, v1, t7 | v1 ^= t7;
0x00014668 xor a2, a2, t6 | a2 ^= t6;
0x0001466c rotr a1, a1, 0x14 | __asm ("rotr a1, a1, 0x14");
0x00014670 rotr a0, a0, 0x14 | __asm ("rotr a0, a0, 0x14");
0x00014674 rotr v1, v1, 0x14 | __asm ("rotr v1, v1, 0x14");
0x00014678 rotr a2, a2, 0x14 | __asm ("rotr a2, a2, 0x14");
0x0001467c addu t5, t5, a1 | t5 += a1;
0x00014680 addu t4, t4, a0 | t4 += a0;
0x00014684 addu t3, t3, v1 | t3 += v1;
0x00014688 addu t2, t2, a2 | t2 += a2;
0x0001468c xor t1, t1, t5 | t1 ^= t5;
0x00014690 xor a3, a3, t4 | a3 ^= t4;
0x00014694 xor v0, v0, t3 | v0 ^= t3;
0x00014698 xor t0, t0, t2 | t0 ^= t2;
0x0001469c rotr t1, t1, 0x18 | __asm ("rotr t1, t1, 0x18");
0x000146a0 rotr a3, a3, 0x18 | __asm ("rotr a3, a3, 0x18");
0x000146a4 rotr v0, v0, 0x18 | __asm ("rotr v0, v0, 0x18");
0x000146a8 rotr t0, t0, 0x18 | __asm ("rotr t0, t0, 0x18");
0x000146ac addu t9, t9, t1 | t9 += t1;
0x000146b0 addu t8, t8, a3 | t8 += a3;
0x000146b4 addu t7, t7, v0 | t7 += v0;
0x000146b8 addu t6, t6, t0 | t6 += t0;
0x000146bc xor a1, a1, t9 | a1 ^= t9;
0x000146c0 xor a0, a0, t8 | a0 ^= t8;
0x000146c4 xor v1, v1, t7 | v1 ^= t7;
0x000146c8 xor a2, a2, t6 | a2 ^= t6;
0x000146cc addiu ra, ra, -1 | ra += -1;
0x000146d0 rotr a1, a1, 0x19 | __asm ("rotr a1, a1, 0x19");
0x000146d4 rotr a0, a0, 0x19 | __asm ("rotr a0, a0, 0x19");
0x000146d8 rotr v1, v1, 0x19 | __asm ("rotr v1, v1, 0x19");
0x000146dc rotr a2, a2, 0x19 | __asm ("rotr a2, a2, 0x19");
0x000146e0 bnez ra, 0x1455c |
| } while (ra != 0);
0x000146e4 lw ra, 0x24(sp) | ra = *(var_24h);
0x000146e8 addu a3, s0, a3 | a3 = s0 + a3;
0x000146ec addu ra, ra, t5 | ra += t5;
0x000146f0 lw t5, 0x28(sp) | t5 = *(var_28h);
0x000146f4 addiu s0, s0, 1 | s0++;
0x000146f8 addu t5, t5, t4 | t5 += t4;
0x000146fc lw t4, 0x2c(sp) | t4 = *(var_2ch);
0x00014700 addu t4, t4, t3 | t4 += t3;
0x00014704 lw t3, 0x30(sp) | t3 = *(var_30h);
0x00014708 addu t3, t3, t2 | t3 += t2;
0x0001470c lw t2, 0x34(sp) | t2 = *(var_34h);
0x00014710 addu t2, t2, a2 | t2 += a2;
0x00014714 lw a2, 0x38(sp) | a2 = *(var_38h);
0x00014718 addu a2, a2, a1 | a2 += a1;
0x0001471c lw a1, 0x3c(sp) | a1 = *(var_3ch);
0x00014720 addu a1, a1, a0 | a1 += a0;
0x00014724 lw a0, 0x40(sp) | a0 = *(var_40h);
0x00014728 addu a0, a0, v1 | a0 += v1;
0x0001472c lw v1, 0x44(sp) | v1 = *(var_44h);
0x00014730 addu t7, v1, t7 | t7 = v1 + t7;
0x00014734 lw v1, 0x48(sp) | v1 = *(var_48h);
0x00014738 addu t6, v1, t6 | t6 = v1 + t6;
0x0001473c lw v1, 0x4c(sp) | v1 = *(var_4ch);
0x00014740 addu t9, v1, t9 | t9 = v1 + t9;
0x00014744 lw v1, 0x50(sp) | v1 = *(var_50h);
0x00014748 addu t8, v1, t8 | t8 = v1 + t8;
0x0001474c addu v1, s2, v0 | v1 = s2 + v0;
0x00014750 lw v0, 0x54(sp) | v0 = *(var_54h);
0x00014754 addu t0, v0, t0 | t0 = v0 + t0;
0x00014758 lw v0, 0x58(sp) | v0 = *(var_58h);
0x0001475c addu v0, v0, t1 | v0 += t1;
| if (s0 == 0) {
0x00014760 bnez s0, 0x14768 |
0x00014764 addiu s2, s2, 1 | s2++;
| }
0x00014768 srl t1, ra, 8 | t1 = ra >> 8;
0x0001476c sw t1, 0x68(sp) | *(var_68h) = t1;
0x00014770 srl s6, a1, 8 | s6 = a1 >> 8;
0x00014774 srl t1, ra, 0x10 | t1 = ra >> 0x10;
0x00014778 sw t1, 0x6c(sp) | *(var_6ch) = t1;
0x0001477c sw s6, 0xac(sp) | *(var_ach) = s6;
0x00014780 srl t1, ra, 0x18 | t1 = ra >> 0x18;
0x00014784 srl s6, a1, 0x10 | s6 = a1 >> 0x10;
0x00014788 sw t1, 0x70(sp) | *(var_70h) = t1;
0x0001478c sw s6, 0xb0(sp) | *(var_b0h) = s6;
0x00014790 srl t1, t5, 8 | t1 = t5 >> 8;
0x00014794 srl s6, a1, 0x18 | s6 = a1 >> 0x18;
0x00014798 sw t1, 0x74(sp) | *(var_74h) = t1;
0x0001479c sw s6, 0xb4(sp) | *(var_b4h) = s6;
0x000147a0 srl t1, t5, 0x10 | t1 = t5 >> 0x10;
0x000147a4 srl s6, a0, 8 | s6 = a0 >> 8;
0x000147a8 sw t1, 0x78(sp) | *(var_78h) = t1;
0x000147ac sw s6, 0xb8(sp) | *(var_b8h) = s6;
0x000147b0 srl t1, t5, 0x18 | t1 = t5 >> 0x18;
0x000147b4 srl s6, a0, 0x10 | s6 = a0 >> 0x10;
0x000147b8 sw t1, 0x7c(sp) | *(var_7ch) = t1;
0x000147bc sw s6, 0xbc(sp) | *(var_bch) = s6;
0x000147c0 srl t1, t4, 8 | t1 = t4 >> 8;
0x000147c4 srl s6, a0, 0x18 | s6 = a0 >> 0x18;
0x000147c8 sw t1, 0x80(sp) | *(var_80h) = t1;
0x000147cc sw s6, 0xc0(sp) | *(var_c0h) = s6;
0x000147d0 srl t1, t4, 0x10 | t1 = t4 >> 0x10;
0x000147d4 srl s6, t7, 8 | s6 = t7 >> 8;
0x000147d8 sw t1, 0x84(sp) | *(var_84h) = t1;
0x000147dc sw s6, 0xc4(sp) | *(var_c4h) = s6;
0x000147e0 srl t1, t4, 0x18 | t1 = t4 >> 0x18;
0x000147e4 srl s6, t7, 0x10 | s6 = t7 >> 0x10;
0x000147e8 sw t1, 0x88(sp) | *(var_88h) = t1;
0x000147ec sw s6, 0xc8(sp) | *(var_c8h) = s6;
0x000147f0 srl t1, t3, 8 | t1 = t3 >> 8;
0x000147f4 srl s6, t7, 0x18 | s6 = t7 >> 0x18;
0x000147f8 sw t1, 0x8c(sp) | *(var_8ch) = t1;
0x000147fc sw s6, 0xcc(sp) | *(var_cch) = s6;
0x00014800 srl t1, t3, 0x10 | t1 = t3 >> 0x10;
0x00014804 srl s6, t6, 8 | s6 = t6 >> 8;
0x00014808 sw t1, 0x90(sp) | *(var_90h) = t1;
0x0001480c sw s6, 0xd0(sp) | *(var_d0h) = s6;
0x00014810 srl t1, t3, 0x18 | t1 = t3 >> 0x18;
0x00014814 srl s6, t6, 0x10 | s6 = t6 >> 0x10;
0x00014818 sw t1, 0x94(sp) | *(var_94h) = t1;
0x0001481c sw s6, 0xd4(sp) | *(var_d4h) = s6;
0x00014820 srl t1, t2, 8 | t1 = t2 >> 8;
0x00014824 srl s6, t6, 0x18 | s6 = t6 >> 0x18;
0x00014828 sw t1, 0x98(sp) | *(var_98h) = t1;
0x0001482c sw s6, 0xd8(sp) | *(var_d8h) = s6;
0x00014830 srl t1, t2, 0x10 | t1 = t2 >> 0x10;
0x00014834 srl s6, t9, 8 | s6 = t9 >> 8;
0x00014838 sw t1, 0x9c(sp) | *(var_9ch) = t1;
0x0001483c sw s6, 0xdc(sp) | *(var_dch) = s6;
0x00014840 srl t1, t2, 0x18 | t1 = t2 >> 0x18;
0x00014844 srl s6, t9, 0x10 | s6 = t9 >> 0x10;
0x00014848 sw t1, 0xa0(sp) | *(var_a0h) = t1;
0x0001484c sw s6, 0xe0(sp) | *(var_e0h) = s6;
0x00014850 srl t1, a2, 8 | t1 = a2 >> 8;
0x00014854 srl s6, t9, 0x18 | s6 = t9 >> 0x18;
0x00014858 sw t1, 0xa4(sp) | *(var_a4h) = t1;
0x0001485c sw s6, 0xe4(sp) | *(var_e4h) = s6;
0x00014860 srl t1, a2, 0x10 | t1 = a2 >> 0x10;
0x00014864 srl s6, t8, 8 | s6 = t8 >> 8;
0x00014868 sw t1, 0xa8(sp) | *(var_a8h) = t1;
0x0001486c sw s6, 0xe8(sp) | *(var_e8h) = s6;
0x00014870 srl s6, t8, 0x10 | s6 = t8 >> 0x10;
0x00014874 sw s6, 0xec(sp) | *(var_ech) = s6;
0x00014878 srl s6, t8, 0x18 | s6 = t8 >> 0x18;
0x0001487c sw s6, 0xf0(sp) | *(var_f0h) = s6;
0x00014880 srl s6, a3, 8 | s6 = a3 >> 8;
0x00014884 sw s6, 0xf4(sp) | *(var_f4h) = s6;
0x00014888 srl s6, a3, 0x10 | s6 = a3 >> 0x10;
0x0001488c sw s6, 0xf8(sp) | *(var_f8h) = s6;
0x00014890 srl s6, a3, 0x18 | s6 = a3 >> 0x18;
0x00014894 sw s6, 0xfc(sp) | *(var_fch) = s6;
0x00014898 srl s6, v1, 8 | s6 = v1 >> 8;
0x0001489c sw s6, 0x100(sp) | *(var_100h) = s6;
0x000148a0 srl s6, v1, 0x10 | s6 = v1 >> 0x10;
0x000148a4 sw s6, 0x104(sp) | *(var_104h) = s6;
0x000148a8 srl s6, v1, 0x18 | s6 = v1 >> 0x18;
0x000148ac sw s6, 0x108(sp) | *(var_108h) = s6;
0x000148b0 sb ra, (s4) | *(s4) = ra;
0x000148b4 srl s6, t0, 8 | s6 = t0 >> 8;
0x000148b8 lw ra, 0x68(sp) | ra = *(var_68h);
0x000148bc sb t5, 4(s4) | *((s4 + 4)) = t5;
0x000148c0 lw t5, 0x74(sp) | t5 = *(var_74h);
0x000148c4 sw s6, 0x10c(sp) | *(var_10ch) = s6;
0x000148c8 srl s6, t0, 0x10 | s6 = t0 >> 0x10;
0x000148cc sw s6, 0x110(sp) | *(var_110h) = s6;
0x000148d0 sb ra, 1(s4) | *((s4 + 1)) = ra;
0x000148d4 srl s6, t0, 0x18 | s6 = t0 >> 0x18;
0x000148d8 lw ra, 0x6c(sp) | ra = *(var_6ch);
0x000148dc sb t5, 5(s4) | *((s4 + 5)) = t5;
0x000148e0 sb t4, 8(s4) | *((s4 + 8)) = t4;
0x000148e4 lw t5, 0x78(sp) | t5 = *(var_78h);
0x000148e8 lw t4, 0x80(sp) | t4 = *(var_80h);
0x000148ec sw s6, 0x114(sp) | *(var_114h) = s6;
0x000148f0 srl s6, v0, 8 | s6 = v0 >> 8;
0x000148f4 sw s6, 0x118(sp) | *(var_118h) = s6;
0x000148f8 sb ra, 2(s4) | *((s4 + 2)) = ra;
0x000148fc srl s6, v0, 0x10 | s6 = v0 >> 0x10;
0x00014900 lw ra, 0x70(sp) | ra = *(var_70h);
0x00014904 sb t5, 6(s4) | *((s4 + 6)) = t5;
0x00014908 sb t4, 9(s4) | *((s4 + 9)) = t4;
0x0001490c lw t5, 0x7c(sp) | t5 = *(var_7ch);
0x00014910 lw t4, 0x84(sp) | t4 = *(var_84h);
0x00014914 sw s6, 0x11c(sp) | *(var_11ch) = s6;
0x00014918 srl s6, v0, 0x18 | s6 = v0 >> 0x18;
0x0001491c sw s6, 0x120(sp) | *(var_120h_2) = s6;
0x00014920 sb ra, 3(s4) | *((s4 + 3)) = ra;
0x00014924 sb t5, 7(s4) | *((s4 + 7)) = t5;
0x00014928 sb t4, 0xa(s4) | *((s4 + 10)) = t4;
0x0001492c lw s6, 0xac(sp) | s6 = *(var_ach);
0x00014930 sb t3, 0xc(s4) | *((s4 + 12)) = t3;
0x00014934 sb t2, 0x10(s4) | *((s4 + 16)) = t2;
0x00014938 lw t3, 0x8c(sp) | t3 = *(var_8ch);
0x0001493c lw t2, 0x98(sp) | t2 = *(var_98h);
0x00014940 sb s6, 0x19(s4) | *((s4 + 25)) = s6;
0x00014944 lw s6, 0xb0(sp) | s6 = *(var_b0h);
0x00014948 srl t1, a2, 0x18 | t1 = a2 >> 0x18;
0x0001494c sb t3, 0xd(s4) | *((s4 + 13)) = t3;
0x00014950 sb t2, 0x11(s4) | *((s4 + 17)) = t2;
0x00014954 lw t3, 0x90(sp) | t3 = *(var_90h);
0x00014958 lw t2, 0x9c(sp) | t2 = *(var_9ch);
0x0001495c sb a2, 0x14(s4) | *((s4 + 20)) = a2;
0x00014960 sb s6, 0x1a(s4) | *((s4 + 26)) = s6;
0x00014964 lw a2, 0xa4(sp) | a2 = *(var_a4h);
0x00014968 lw s6, 0xb4(sp) | s6 = *(var_b4h);
0x0001496c lw t4, 0x88(sp) | t4 = *(var_88h);
0x00014970 sb t3, 0xe(s4) | *((s4 + 14)) = t3;
0x00014974 sb t2, 0x12(s4) | *((s4 + 18)) = t2;
0x00014978 lw t3, 0x94(sp) | t3 = *(var_94h);
0x0001497c lw t2, 0xa0(sp) | t2 = *(var_a0h);
0x00014980 sb a2, 0x15(s4) | *((s4 + 21)) = a2;
0x00014984 sb s6, 0x1b(s4) | *((s4 + 27)) = s6;
0x00014988 lw a2, 0xa8(sp) | a2 = *(var_a8h);
0x0001498c lw s6, 0xb8(sp) | s6 = *(var_b8h);
0x00014990 sb t4, 0xb(s4) | *((s4 + 11)) = t4;
0x00014994 sb t3, 0xf(s4) | *((s4 + 15)) = t3;
0x00014998 sb t2, 0x13(s4) | *((s4 + 19)) = t2;
0x0001499c sb a2, 0x16(s4) | *((s4 + 22)) = a2;
0x000149a0 sb t1, 0x17(s4) | *((s4 + 23)) = t1;
0x000149a4 sb a1, 0x18(s4) | *((s4 + 24)) = a1;
0x000149a8 sb a0, 0x1c(s4) | *((s4 + 28)) = a0;
0x000149ac sb s6, 0x1d(s4) | *((s4 + 29)) = s6;
0x000149b0 lw s6, 0xbc(sp) | s6 = *(var_bch);
0x000149b4 sb t7, 0x20(s4) | *((s4 + 32)) = t7;
0x000149b8 sb s6, 0x1e(s4) | *((s4 + 30)) = s6;
0x000149bc lw s6, 0xc0(sp) | s6 = *(var_c0h);
0x000149c0 sb t6, 0x24(s4) | *((s4 + 36)) = t6;
0x000149c4 sb s6, 0x1f(s4) | *((s4 + 31)) = s6;
0x000149c8 lw s6, 0xc4(sp) | s6 = *(var_c4h);
0x000149cc sb t9, 0x28(s4) | *((s4 + 40)) = t9;
0x000149d0 sb s6, 0x21(s4) | *((s4 + 33)) = s6;
0x000149d4 lw s6, 0xc8(sp) | s6 = *(var_c8h);
0x000149d8 sb t8, 0x2c(s4) | *((s4 + 44)) = t8;
0x000149dc sb s6, 0x22(s4) | *((s4 + 34)) = s6;
0x000149e0 lw s6, 0xcc(sp) | s6 = *(var_cch);
0x000149e4 sb s6, 0x23(s4) | *((s4 + 35)) = s6;
0x000149e8 lw s6, 0xd0(sp) | s6 = *(var_d0h);
0x000149ec sb s6, 0x25(s4) | *((s4 + 37)) = s6;
0x000149f0 lw s6, 0xd4(sp) | s6 = *(var_d4h);
0x000149f4 sb s6, 0x26(s4) | *((s4 + 38)) = s6;
0x000149f8 lw s6, 0xd8(sp) | s6 = *(var_d8h);
0x000149fc sb s6, 0x27(s4) | *((s4 + 39)) = s6;
0x00014a00 lw s6, 0xdc(sp) | s6 = *(var_dch);
0x00014a04 sb s6, 0x29(s4) | *((s4 + 41)) = s6;
0x00014a08 lw s6, 0xe0(sp) | s6 = *(var_e0h);
0x00014a0c sb s6, 0x2a(s4) | *((s4 + 42)) = s6;
0x00014a10 lw s6, 0xe4(sp) | s6 = *(var_e4h);
0x00014a14 sb s6, 0x2b(s4) | *((s4 + 43)) = s6;
0x00014a18 lw s6, 0xe8(sp) | s6 = *(var_e8h);
0x00014a1c sb s6, 0x2d(s4) | *((s4 + 45)) = s6;
0x00014a20 lw s6, 0xec(sp) | s6 = *(var_ech);
0x00014a24 sb s6, 0x2e(s4) | *((s4 + 46)) = s6;
0x00014a28 lw s6, 0xf0(sp) | s6 = *(var_f0h);
0x00014a2c sb s6, 0x2f(s4) | *((s4 + 47)) = s6;
0x00014a30 sb a3, 0x30(s4) | *((s4 + 48)) = a3;
0x00014a34 lw s6, 0xf4(sp) | s6 = *(var_f4h);
0x00014a38 sb v0, 0x3c(s4) | *((s4 + 60)) = v0;
0x00014a3c sb s6, 0x31(s4) | *((s4 + 49)) = s6;
0x00014a40 lw s6, 0xf8(sp) | s6 = *(var_f8h);
0x00014a44 addiu v0, zero, 0x40 | v0 = 0x40;
0x00014a48 sb s6, 0x32(s4) | *((s4 + 50)) = s6;
0x00014a4c lw s6, 0xfc(sp) | s6 = *(var_fch);
0x00014a50 sb v1, 0x34(s4) | *((s4 + 52)) = v1;
0x00014a54 sb s6, 0x33(s4) | *((s4 + 51)) = s6;
0x00014a58 lw s6, 0x100(sp) | s6 = *(var_100h);
0x00014a5c sb t0, 0x38(s4) | *((s4 + 56)) = t0;
0x00014a60 sb s6, 0x35(s4) | *((s4 + 53)) = s6;
0x00014a64 lw s6, 0x104(sp) | s6 = *(var_104h);
0x00014a68 sb s6, 0x36(s4) | *((s4 + 54)) = s6;
0x00014a6c lw s6, 0x108(sp) | s6 = *(var_108h);
0x00014a70 sb s6, 0x37(s4) | *((s4 + 55)) = s6;
0x00014a74 lw s6, 0x10c(sp) | s6 = *(var_10ch);
0x00014a78 sb s6, 0x39(s4) | *((s4 + 57)) = s6;
0x00014a7c lw s6, 0x110(sp) | s6 = *(var_110h);
0x00014a80 sb s6, 0x3a(s4) | *((s4 + 58)) = s6;
0x00014a84 lw s6, 0x114(sp) | s6 = *(var_114h);
0x00014a88 sb s6, 0x3b(s4) | *((s4 + 59)) = s6;
0x00014a8c lw s6, 0x118(sp) | s6 = *(var_118h);
0x00014a90 sb s6, 0x3d(s4) | *((s4 + 61)) = s6;
0x00014a94 lw s6, 0x11c(sp) | s6 = *(var_11ch);
0x00014a98 sb s6, 0x3e(s4) | *((s4 + 62)) = s6;
0x00014a9c lw s6, 0x120(sp) | s6 = *(var_120h_2);
0x00014aa0 sb s6, 0x3f(s4) | *((s4 + 63)) = s6;
| if (s1 == v0) {
0x00014aa4 beq s1, v0, 0x14ae8 | goto label_27;
| }
0x00014aa8 addiu s1, s1, -0x40 | s1 += -0x40;
0x00014aac sltiu v0, s1, 0x40 | v0 = (s1 < 0x40) ? 1 : 0;
0x00014ab0 addiu s4, s4, 0x40 | s4 += 0x40;
| if (v0 == 0) {
0x00014ab4 beqz v0, 0x14518 | goto label_4;
| }
0x00014ab8 lw v1, 0x60(sp) | v1 = *(var_60h);
0x00014abc move v0, zero | v0 = 0;
| do {
0x00014ac0 lw a0, 0x5c(sp) | a0 = *(var_5ch);
0x00014ac4 addiu v1, v1, 1 | v1++;
0x00014ac8 lbux a1, v0(a0) | __asm ("lbux a1, v0(a0)");
0x00014acc addiu v0, v0, 1 | v0++;
0x00014ad0 sltu a0, v0, s1 | a0 = (v0 < s1) ? 1 : 0;
0x00014ad4 sb a1, -1(v1) | *((v1 - 1)) = a1;
0x00014ad8 bnez a0, 0x14ac0 |
| } while (a0 != 0);
0x00014adc lw s4, 0x60(sp) | s4 = *(var_60h);
0x00014ae0 sw s4, 0x5c(sp) | *(var_5ch) = s4;
0x00014ae4 b 0x14518 | goto label_4;
| label_27:
0x00014ae8 lw v0, 0x64(sp) | v0 = *(var_64h);
0x00014aec move s0, s3 | s0 = s3;
0x00014af0 addiu v0, v0, 0x10 | v0 += 0x10;
0x00014af4 sw v0, 0x30(s0) | *((s0 + 12)) = v0;
0x00014af8 lw v0, 0x20(sp) | v0 = *(var_20h);
0x00014afc lw v1, -0x34ec(fp) | v1 = *(var_34ech);
0x00014b00 move s1, s2 | s1 = s2;
0x00014b04 sw s1, 0x34(s0) | *((s0 + 13)) = s1;
0x00014b08 move s2, s5 | s2 = s5;
0x00014b0c move s3, s7 | s3 = s7;
0x00014b10 move s4, fp | s4 = fp;
0x00014b14 lw s0, -0x34f0(v0) | s0 = *((v0 - 3388));
| if (v1 == 0) {
0x00014b18 beqz v1, 0x14cc8 | goto label_28;
| }
0x00014b1c move v0, s0 | v0 = s0;
| label_8:
0x00014b20 lwl v1, 0x43(s0) | __asm ("lwl v1, 0x43(s0)");
0x00014b24 lw t9, -0x7be4(gp) | t9 = sym.imp.memset;
0x00014b28 addiu a2, zero, 0x28 | a2 = 0x28;
0x00014b2c lwr v1, 0x40(s0) | __asm ("lwr v1, 0x40(s0)");
0x00014b30 move a1, zero | a1 = 0;
0x00014b34 sw v1, 0x10(v0) | *((v0 + 4)) = v1;
0x00014b38 lwl v1, 0x47(s0) | __asm ("lwl v1, 0x47(s0)");
0x00014b3c lwr v1, 0x44(s0) | __asm ("lwr v1, 0x44(s0)");
0x00014b40 sw v1, 0x14(v0) | *((v0 + 5)) = v1;
0x00014b44 lwl v1, 0x4b(s0) | __asm ("lwl v1, 0x4b(s0)");
0x00014b48 lwr v1, 0x48(s0) | __asm ("lwr v1, 0x48(s0)");
0x00014b4c sw v1, 0x18(v0) | *((v0 + 6)) = v1;
0x00014b50 lwl v1, 0x4f(s0) | __asm ("lwl v1, 0x4f(s0)");
0x00014b54 lwr v1, 0x4c(s0) | __asm ("lwr v1, 0x4c(s0)");
0x00014b58 sw v1, 0x1c(v0) | *((v0 + 7)) = v1;
0x00014b5c lwl v1, 0x53(s0) | __asm ("lwl v1, 0x53(s0)");
0x00014b60 lwr v1, 0x50(s0) | __asm ("lwr v1, 0x50(s0)");
0x00014b64 sw v1, 0x20(v0) | *((v0 + 8)) = v1;
0x00014b68 lwl v1, 0x57(s0) | __asm ("lwl v1, 0x57(s0)");
0x00014b6c lwr v1, 0x54(s0) | __asm ("lwr v1, 0x54(s0)");
0x00014b70 sw v1, 0x24(v0) | *((v0 + 9)) = v1;
0x00014b74 lwl v1, 0x5b(s0) | __asm ("lwl v1, 0x5b(s0)");
0x00014b78 lwr v1, 0x58(s0) | __asm ("lwr v1, 0x58(s0)");
0x00014b7c sw v1, 0x28(v0) | *((v0 + 10)) = v1;
0x00014b80 lwl a0, 0x5f(s0) | __asm ("lwl a0, 0x5f(s0)");
0x00014b84 lui v1, 0x6170 | v1 = 0x61707865;
0x00014b88 addiu v1, v1, 0x7865 |
0x00014b8c lwr a0, 0x5c(s0) | __asm ("lwr a0, 0x5c(s0)");
0x00014b90 sw v1, (v0) | *(v0) = v1;
0x00014b94 sw a0, 0x2c(v0) | *((v0 + 11)) = a0;
0x00014b98 lw v1, 0x20(sp) | v1 = *(var_20h);
0x00014b9c lui a0, 0x3320 | a0 = 0x3320646e;
0x00014ba0 addiu a0, a0, 0x646e |
0x00014ba4 sw a0, 4(v0) | *((v0 + 1)) = a0;
0x00014ba8 lui a0, 0x7962 | a0 = 0x79620000;
0x00014bac lw v1, -0x34f0(v1) | v1 = *((v1 - 3388));
0x00014bb0 addiu a0, a0, 0x2d32 | a0 += 0x2d32;
0x00014bb4 sw a0, 8(v0) | *((v0 + 2)) = a0;
0x00014bb8 lui a0, 0x6b20 | a0 = 0x6b206574;
0x00014bbc addiu a0, a0, 0x6574 |
0x00014bc0 sw a0, 0xc(v0) | *((v0 + 3)) = a0;
0x00014bc4 sw zero, 0x30(v1) | *((v1 + 12)) = 0;
0x00014bc8 sw zero, 0x34(v1) | *((v1 + 13)) = 0;
0x00014bcc lwl v0, 0x63(s0) | __asm ("lwl v0, 0x63(s0)");
0x00014bd0 addiu a0, v1, 0x40 | a0 = v1 + 0x40;
0x00014bd4 lwr v0, 0x60(s0) | __asm ("lwr v0, 0x60(s0)");
0x00014bd8 sw v0, 0x38(v1) | *((v1 + 14)) = v0;
0x00014bdc lwl v0, 0x67(s0) | __asm ("lwl v0, 0x67(s0)");
0x00014be0 lwr v0, 0x64(s0) | __asm ("lwr v0, 0x64(s0)");
0x00014be4 sw v0, 0x3c(v1) | *((v1 + 15)) = v0;
0x00014be8 jalr t9 | t9 ();
0x00014bec lw v0, -0x34ec(s4) | v0 = *((s4 - 3387));
0x00014bf0 addiu v1, zero, 0x3d8 | v1 = aav.0x000003d8;
0x00014bf4 lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014bf8 sw v1, (v0) | *(v0) = v1;
| label_6:
0x00014bfc lw v0, -0x34ec(s4) | v0 = *((s4 - 3387));
| if (s3 != 0) {
0x00014c00 bnez s3, 0x14458 | goto label_5;
| }
| label_25:
0x00014c04 lw v0, 0x128(sp) | v0 = *(var_128h_2);
0x00014c08 lw v1, 0x174(sp) | v1 = *(var_174h_2);
0x00014c0c lw v0, (v0) | v0 = *(v0);
0x00014c10 lw v0, 0x124(sp) | v0 = *(var_124h);
| if (v1 != v0) {
0x00014c14 bne v1, v0, 0x14d94 | goto label_29;
| }
0x00014c18 lw ra, 0x19c(sp) | ra = *(var_19ch);
0x00014c1c lw fp, 0x198(sp) | fp = *(var_198h);
0x00014c20 lw s7, 0x194(sp) | s7 = *(var_194h_2);
0x00014c24 lw s6, 0x190(sp) | s6 = *(var_190h_2);
0x00014c28 lw s5, 0x18c(sp) | s5 = *(var_18ch_2);
0x00014c2c lw s4, 0x188(sp) | s4 = *(var_188h_2);
0x00014c30 lw s3, 0x184(sp) | s3 = *(var_184h_2);
0x00014c34 lw s2, 0x180(sp) | s2 = *(var_180h_2);
0x00014c38 lw s1, 0x17c(sp) | s1 = *(var_17ch_2);
0x00014c3c lw s0, 0x178(sp) | s0 = *(var_178h_2);
0x00014c40 lw t9, -0x7aa8(gp) | t9 = sym.imp.pthread_mutex_unlock;
0x00014c44 addiu a0, v0, -0x3508 | a0 = v0 + -0x3508;
0x00014c48 addiu sp, sp, 0x1a0 |
0x00014c4c jr t9 | t9 ();
| label_26:
0x00014c50 sltu s0, s3, v0 | s0 = (s3 < v0) ? 1 : 0;
0x00014c54 subu s1, s5, v0 | __asm ("subu s1, s5, v0");
0x00014c58 lw t9, -0x7b00(gp) | t9 = sym.imp.memcpy;
| if (s0 == 0) {
0x00014c5c movn v0, s3, s0 | v0 = s3;
| }
0x00014c60 addiu s1, s1, 0x400 | s1 += 0x400;
0x00014c64 move a0, s2 | a0 = s2;
0x00014c68 move a2, v0 | a2 = v0;
0x00014c6c move a1, s1 | a1 = s1;
0x00014c70 move s0, v0 | s0 = v0;
0x00014c74 jalr t9 | t9 ();
0x00014c78 lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014c7c move a2, s0 | a2 = s0;
0x00014c80 move a1, zero | a1 = 0;
0x00014c84 lw t9, -0x7be4(gp) | t9 = sym.imp.memset;
0x00014c88 move a0, s1 | a0 = s1;
0x00014c8c jalr t9 | t9 ();
0x00014c90 lw v1, -0x34ec(s4) | v1 = *((s4 - 3387));
0x00014c94 addu s2, s2, s0 | s2 += s0;
0x00014c98 subu s3, s3, s0 | __asm ("subu s3, s3, s0");
0x00014c9c lw v0, (v1) | v0 = *(v1);
0x00014ca0 lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014ca4 subu s0, v0, s0 | __asm ("subu s0, v0, s0");
0x00014ca8 sw s0, (v1) | *(v1) = s0;
0x00014cac lw v0, -0x34ec(s4) | v0 = *((s4 - 3387));
0x00014cb0 lw v0, (v0) | v0 = *(v0);
0x00014cb4 lw v0, 0x20(sp) | v0 = *(var_20h);
| if (v0 != 0) {
0x00014cb8 bnez v0, 0x14bfc | goto label_6;
| }
0x00014cbc lw s0, -0x34f0(v0) | s0 = *((v0 - 3388));
0x00014cc0 addiu s5, s0, 0x40 | s5 = s0 + 0x40;
0x00014cc4 b 0x1446c | goto label_7;
| label_28:
0x00014cc8 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x00014ccc addiu s1, zero, -1 | s1 = -1;
0x00014cd0 sw zero, 0x14(sp) | *(var_14h) = 0;
0x00014cd4 sw s1, 0x10(sp) | *(var_10h) = s1;
0x00014cd8 addiu a3, zero, 0x802 | a3 = 0x802;
0x00014cdc addiu a2, zero, 3 | a2 = 3;
0x00014ce0 addiu a1, zero, 8 | a1 = 8;
0x00014ce4 move a0, zero | a0 = 0;
0x00014ce8 jalr t9 | t9 ();
0x00014cec sw v0, -0x34ec(fp) | *((fp - 3387)) = v0;
0x00014cf0 lw gp, 0x18(sp) | gp = *(var_18h_2);
| if (v0 == s1) {
0x00014cf4 beq v0, s1, 0x14d88 | goto label_30;
| }
0x00014cf8 lw t9, -0x7bb4(gp) | t9 = sym.imp.mmap
0x00014cfc sw zero, 0x14(sp) | *(var_14h) = 0;
0x00014d00 sw s1, 0x10(sp) | *(var_10h) = s1;
0x00014d04 addiu a3, zero, 0x802 | a3 = 0x802;
0x00014d08 addiu a2, zero, 3 | a2 = 3;
0x00014d0c addiu a1, zero, 0x440 | a1 = 0x440;
0x00014d10 move a0, zero | a0 = 0;
0x00014d14 jalr t9 | t9 ();
0x00014d18 lw v1, 0x20(sp) | v1 = *(var_20h);
0x00014d1c lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014d20 sw v0, -0x34f0(v1) | *((v1 - 3388)) = v0;
| if (v0 == s1) {
0x00014d24 beq v0, s1, 0x14d70 | goto label_31;
| }
0x00014d28 lw v0, -0x7eb8(gp) | v0 = *((gp - 8110));
0x00014d2c lw t9, -0x7b78(gp) | t9 = sym.imp.__register_atfork;
0x00014d30 lw a2, 0x12c(sp) | a2 = *(var_12ch);
0x00014d34 lw a3, (v0) | a3 = *(v0);
0x00014d38 move a1, zero | a1 = 0;
0x00014d3c move a0, zero | a0 = 0;
0x00014d40 jalr t9 | t9 ();
0x00014d44 lw v0, 0x20(sp) | v0 = *(var_20h);
0x00014d48 lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014d4c lw v0, -0x34f0(v0) | v0 = *((v0 - 3388));
0x00014d50 b 0x14b20 | goto label_8;
| label_24:
0x00014d54 subu v0, v0, fp | __asm ("subu v0, v0, fp");
0x00014d58 b 0x14428 | goto label_9;
| label_22:
0x00014d5c lw a1, -0x350c(a0) | a1 = *((a0 - 3395));
0x00014d60 lw s4, -0x7fa8(gp) | s4 = *((gp - 8170));
| if (a1 != 0) {
0x00014d64 bnez a1, 0x143b0 | goto label_10;
| }
0x00014d68 lw v1, -0x34ec(s4) | v1 = *((s4 - 3387));
0x00014d6c b 0x143e8 | goto label_11;
| label_31:
0x00014d70 lw t9, -0x7c18(gp) | t9 = sym.imp.munmap;
0x00014d74 lw a0, -0x34ec(fp) | a0 = *(var_34ech);
0x00014d78 addiu a1, zero, 8 | a1 = 8;
0x00014d7c jalr t9 | t9 ();
0x00014d80 lw gp, 0x18(sp) | gp = *(var_18h_2);
0x00014d84 sw zero, -0x34ec(fp) | *((fp - 3387)) = 0;
| label_30:
0x00014d88 lw t9, -0x7c80(gp) | t9 = sym.imp.abort;
0x00014d8c jalr t9 | t9 ();
0x00014d90 nop |
| label_29:
0x00014d94 lw t9, -0x7b2c(gp) | t9 = sym.imp.__stack_chk_fail;
0x00014d98 jalr t9 | t9 ();
0x00014d9c nop |
| }
; assembly | /* r2dec pseudo code output */
| /* /logs/firmware/unblob_extracted/firmware_extract/4325012-58052244.squashfs_v4_le_extract/usr/sbin/ntpd @ 0x14da0 */
| #include <stdint.h>
|
; (fcn) sym.arc4random_uniform () | void arc4random_uniform () {
0x00014da0 lui gp, 2 |
0x00014da4 addiu gp, gp, -0x2d00 |
0x00014da8 addu gp, gp, t9 | gp += t9;
0x00014dac sltiu v0, a0, 2 | v0 = (a0 < 2) ? 1 : 0;
| if (v0 != 0) {
0x00014db0 bnez v0, 0x14e18 | goto label_0;
| }
0x00014db4 nop |
0x00014db8 addiu sp, sp, -0x28 |
0x00014dbc sw s1, 0x20(sp) | *(var_20h) = s1;
0x00014dc0 negu s1, a0 | __asm ("negu s1, a0");
0x00014dc4 divu zero, s1, a0 | __asm ("divu zero, s1, a0");
0x00014dc8 teq a0, zero, 7 | __asm ("teq a0, zero, 7");
0x00014dcc sw s0, 0x1c(sp) | *(var_1ch) = s0;
0x00014dd0 sw gp, 0x10(sp) | *(var_10h) = gp;
0x00014dd4 sw ra, 0x24(sp) | *(var_24h) = ra;
0x00014dd8 move s0, a0 | s0 = a0;
0x00014ddc mfhi s1 | __asm ("mfhi s1");
| do {
0x00014de0 lw t9, -0x7ea0(gp) | t9 = sym.arc4random;
0x00014de4 bal 0x138e0 | sym_arc4random ();
0x00014de8 nop |
0x00014dec sltu v1, v0, s1 | v1 = (v0 < s1) ? 1 : 0;
0x00014df0 lw gp, 0x10(sp) | gp = *(var_10h);
0x00014df4 bnez v1, 0x14de0 |
| } while (v1 != 0);
0x00014df8 divu zero, v0, s0 | __asm ("divu zero, v0, s0");
0x00014dfc teq s0, zero, 7 | __asm ("teq s0, zero, 7");
0x00014e00 lw ra, 0x24(sp) | ra = *(var_24h);
0x00014e04 lw s1, 0x20(sp) | s1 = *(var_20h);
0x00014e08 lw s0, 0x1c(sp) | s0 = *(var_1ch);
0x00014e0c addiu sp, sp, 0x28 |
0x00014e10 mfhi v0 | __asm ("mfhi v0");
0x00014e14 jr ra | return v0;
| label_0:
0x00014e18 move v0, zero | v0 = 0;
0x00014e1c jr ra | return v0;
| }
[*] Function mmap used 10 times ntpd