[*] Binary protection state of libglib-2.0.so.0.3000.2

  
  	No RELRO       No Canary found   NX disabled  DSO          No RPATH     No RUNPATH   No Symbols


[*] Function strcat tear down of libglib-2.0.so.0.3000.2

    ; assembly                           | /* r2dec pseudo code output */
                                         | /* /logs/firmware/patool_extraction/DUMP/mtdblock8_unblob_extracted/mtdblock8_extract/0-9650176.squashfs_v4_le_extract/var/bluetooth/lib/libglib-2.0.so.0.3000.2 @ 0x23330 */
                                         | #include <stdint.h>
                                         |  
    ; (fcn) fcn.00023330 ()              | void fcn_00023330 () {
    0x00023330 addiu t8, t8, 0x3c        |     t8 += 0x3c;
    0x00023334 lw t8, (t8)               |     t8 = *(t8);
    0x00023338 addu t8, v0, t8           |     t8 = v0 + t8;
    0x0002333c addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023340 lui t8, 0x49b4            |     t8 = 0x49b40821;
    0x00023344 ori t8, t8, 0x821         |     
    0x00023348 addu s2, v0, t8           |     s2 = v0 + t8;
    0x0002334c srl v0, s2, 0xa           |     v0 = s2 >> 0xa;
    0x00023350 sll t8, s2, 0x16          |     t8 = s2 << 0x16;
    0x00023354 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023358 addu s2, s2, s1           |     s2 += s1;
    0x0002335c xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x00023360 and t8, t8, s0            |     t8 &= s0;
    0x00023364 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023368 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x0002336c addiu t8, t8, 4           |     t8 += 4;
    0x00023370 lw t8, (t8)               |     t8 = *(t8);
    0x00023374 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023378 addu v0, t8, s3           |     v0 = t8 + s3;
    0x0002337c lui t8, 0xf61e            |     t8 = 0xf61e2562;
    0x00023380 ori t8, t8, 0x2562        |     
    0x00023384 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023388 srl v0, s3, 0x1b          |     v0 = s3 >> 0x1b;
    0x0002338c sll t8, s3, 5             |     t8 = s3 << 5;
    0x00023390 or s3, v0, t8             |     s3 = v0 | t8;
    0x00023394 addu s3, s3, s2           |     s3 += s2;
    0x00023398 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x0002339c and t8, t8, s1            |     t8 &= s1;
    0x000233a0 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x000233a4 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000233a8 addiu t8, t8, 0x18        |     t8 += 0x18;
    0x000233ac lw t8, (t8)               |     t8 = *(t8);
    0x000233b0 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000233b4 addu v0, t8, s0           |     v0 = t8 + s0;
    0x000233b8 lui t8, 0xc040            |     t8 = 0xc040b340;
    0x000233bc ori t8, t8, 0xb340        |     
    0x000233c0 addu s0, v0, t8           |     s0 = v0 + t8;
    0x000233c4 srl v0, s0, 0x17          |     v0 = s0 >> 0x17;
    0x000233c8 sll t8, s0, 9             |     t8 = s0 << 9;
    0x000233cc or s0, v0, t8             |     s0 = v0 | t8;
    0x000233d0 addu s0, s0, s3           |     s0 += s3;
    0x000233d4 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x000233d8 and t8, t8, s2            |     t8 &= s2;
    0x000233dc xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x000233e0 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000233e4 addiu t8, t8, 0x2c        |     t8 += 0x2c;
    0x000233e8 lw t8, (t8)               |     t8 = *(t8);
    0x000233ec addu t8, v0, t8           |     t8 = v0 + t8;
    0x000233f0 addu v0, t8, s1           |     v0 = t8 + s1;
    0x000233f4 lui t8, 0x265e            |     t8 = 0x265e5a51;
    0x000233f8 ori t8, t8, 0x5a51        |     
    0x000233fc addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023400 srl v0, s1, 0x12          |     v0 = s1 >> 0x12;
    0x00023404 sll t8, s1, 0xe           |     t8 = s1 << 0xe;
    0x00023408 or s1, v0, t8             |     s1 = v0 | t8;
    0x0002340c addu s1, s1, s0           |     s1 += s0;
    0x00023410 xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x00023414 and t8, t8, s3            |     t8 &= s3;
    0x00023418 xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x0002341c lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023420 lw t8, (t8)               |     t8 = *(t8);
    0x00023424 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023428 addu v0, t8, s2           |     v0 = t8 + s2;
    0x0002342c lui t8, 0xe9b6            |     t8 = 0xe9b6c7aa;
    0x00023430 ori t8, t8, 0xc7aa        |     
    0x00023434 addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023438 srl v0, s2, 0xc           |     v0 = s2 >> 0xc;
    0x0002343c sll t8, s2, 0x14          |     t8 = s2 << 0x14;
    0x00023440 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023444 addu s2, s2, s1           |     s2 += s1;
    0x00023448 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x0002344c and t8, t8, s0            |     t8 &= s0;
    0x00023450 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023454 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023458 addiu t8, t8, 0x14        |     t8 += 0x14;
    0x0002345c lw t8, (t8)               |     t8 = *(t8);
    0x00023460 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023464 addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023468 lui t8, 0xd62f            |     t8 = 0xd62f105d;
    0x0002346c ori t8, t8, 0x105d        |     
    0x00023470 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023474 srl v0, s3, 0x1b          |     v0 = s3 >> 0x1b;
    0x00023478 sll t8, s3, 5             |     t8 = s3 << 5;
    0x0002347c or s3, v0, t8             |     s3 = v0 | t8;
    0x00023480 addu s3, s3, s2           |     s3 += s2;
    0x00023484 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x00023488 and t8, t8, s1            |     t8 &= s1;
    0x0002348c xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023490 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023494 addiu t8, t8, 0x28        |     t8 += 0x28;
    0x00023498 lw t8, (t8)               |     t8 = *(t8);
    0x0002349c addu t8, v0, t8           |     t8 = v0 + t8;
    0x000234a0 addu v0, t8, s0           |     v0 = t8 + s0;
    0x000234a4 lui t8, 0x244             |     t8 = 0x1453;
    0x000234a8 ori t8, t8, 0x1453        |     
    0x000234ac addu s0, v0, t8           |     s0 = v0 + t8;
    0x000234b0 srl v0, s0, 0x17          |     v0 = s0 >> 0x17;
    0x000234b4 sll t8, s0, 9             |     t8 = s0 << 9;
    0x000234b8 or s0, v0, t8             |     s0 = v0 | t8;
    0x000234bc addu s0, s0, s3           |     s0 += s3;
    0x000234c0 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x000234c4 and t8, t8, s2            |     t8 &= s2;
    0x000234c8 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x000234cc lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000234d0 addiu t8, t8, 0x3c        |     t8 += 0x3c;
    0x000234d4 lw t8, (t8)               |     t8 = *(t8);
    0x000234d8 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000234dc addu v0, t8, s1           |     v0 = t8 + s1;
    0x000234e0 lui t8, 0xd8a1            |     t8 = 0xd8a1e681;
    0x000234e4 ori t8, t8, 0xe681        |     
    0x000234e8 addu s1, v0, t8           |     s1 = v0 + t8;
    0x000234ec srl v0, s1, 0x12          |     v0 = s1 >> 0x12;
    0x000234f0 sll t8, s1, 0xe           |     t8 = s1 << 0xe;
    0x000234f4 or s1, v0, t8             |     s1 = v0 | t8;
    0x000234f8 addu s1, s1, s0           |     s1 += s0;
    0x000234fc xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x00023500 and t8, t8, s3            |     t8 &= s3;
    0x00023504 xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023508 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x0002350c addiu t8, t8, 0x10        |     t8 += 0x10;
    0x00023510 lw t8, (t8)               |     t8 = *(t8);
    0x00023514 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023518 addu v0, t8, s2           |     v0 = t8 + s2;
    0x0002351c lui t8, 0xe7d3            |     t8 = 0xe7d3fbc8;
    0x00023520 ori t8, t8, 0xfbc8        |     
    0x00023524 addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023528 srl v0, s2, 0xc           |     v0 = s2 >> 0xc;
    0x0002352c sll t8, s2, 0x14          |     t8 = s2 << 0x14;
    0x00023530 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023534 addu s2, s2, s1           |     s2 += s1;
    0x00023538 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x0002353c and t8, t8, s0            |     t8 &= s0;
    0x00023540 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023544 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023548 addiu t8, t8, 0x24        |     t8 += 0x24;
    0x0002354c lw t8, (t8)               |     t8 = *(t8);
    0x00023550 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023554 addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023558 lui t8, 0x21e1            |     t8 = 0x21e1cde6;
    0x0002355c ori t8, t8, 0xcde6        |     
    0x00023560 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023564 srl v0, s3, 0x1b          |     v0 = s3 >> 0x1b;
    0x00023568 sll t8, s3, 5             |     t8 = s3 << 5;
    0x0002356c or s3, v0, t8             |     s3 = v0 | t8;
    0x00023570 addu s3, s3, s2           |     s3 += s2;
    0x00023574 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x00023578 and t8, t8, s1            |     t8 &= s1;
    0x0002357c xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023580 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023584 addiu t8, t8, 0x38        |     t8 += 0x38;
    0x00023588 lw t8, (t8)               |     t8 = *(t8);
    0x0002358c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023590 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023594 lui t8, 0xc337            |     t8 = 0xc33707d6;
    0x00023598 ori t8, t8, 0x7d6         |     
    0x0002359c addu s0, v0, t8           |     s0 = v0 + t8;
    0x000235a0 srl v0, s0, 0x17          |     v0 = s0 >> 0x17;
    0x000235a4 sll t8, s0, 9             |     t8 = s0 << 9;
    0x000235a8 or s0, v0, t8             |     s0 = v0 | t8;
    0x000235ac addu s0, s0, s3           |     s0 += s3;
    0x000235b0 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x000235b4 and t8, t8, s2            |     t8 &= s2;
    0x000235b8 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x000235bc lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000235c0 addiu t8, t8, 0xc         |     t8 += 0xc;
    0x000235c4 lw t8, (t8)               |     t8 = *(t8);
    0x000235c8 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000235cc addu v0, t8, s1           |     v0 = t8 + s1;
    0x000235d0 lui t8, 0xf4d5            |     t8 = 0xf4d50d87;
    0x000235d4 ori t8, t8, 0xd87         |     
    0x000235d8 addu s1, v0, t8           |     s1 = v0 + t8;
    0x000235dc srl v0, s1, 0x12          |     v0 = s1 >> 0x12;
    0x000235e0 sll t8, s1, 0xe           |     t8 = s1 << 0xe;
    0x000235e4 or s1, v0, t8             |     s1 = v0 | t8;
    0x000235e8 addu s1, s1, s0           |     s1 += s0;
    0x000235ec xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x000235f0 and t8, t8, s3            |     t8 &= s3;
    0x000235f4 xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x000235f8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000235fc addiu t8, t8, 0x20        |     t8 += 0x20;
    0x00023600 lw t8, (t8)               |     t8 = *(t8);
    0x00023604 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023608 addu v0, t8, s2           |     v0 = t8 + s2;
    0x0002360c lui t8, 0x455a            |     t8 = 0x455a14ed;
    0x00023610 ori t8, t8, 0x14ed        |     
    0x00023614 addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023618 srl v0, s2, 0xc           |     v0 = s2 >> 0xc;
    0x0002361c sll t8, s2, 0x14          |     t8 = s2 << 0x14;
    0x00023620 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023624 addu s2, s2, s1           |     s2 += s1;
    0x00023628 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x0002362c and t8, t8, s0            |     t8 &= s0;
    0x00023630 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023634 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023638 addiu t8, t8, 0x34        |     t8 += 0x34;
    0x0002363c lw t8, (t8)               |     t8 = *(t8);
    0x00023640 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023644 addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023648 lui t8, 0xa9e3            |     t8 = 0xa9e3e905;
    0x0002364c ori t8, t8, 0xe905        |     
    0x00023650 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023654 srl v0, s3, 0x1b          |     v0 = s3 >> 0x1b;
    0x00023658 sll t8, s3, 5             |     t8 = s3 << 5;
    0x0002365c or s3, v0, t8             |     s3 = v0 | t8;
    0x00023660 addu s3, s3, s2           |     s3 += s2;
    0x00023664 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x00023668 and t8, t8, s1            |     t8 &= s1;
    0x0002366c xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023670 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023674 addiu t8, t8, 8           |     t8 += 8;
    0x00023678 lw t8, (t8)               |     t8 = *(t8);
    0x0002367c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023680 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023684 lui t8, 0xfcef            |     t8 = 0xfcefa3f8;
    0x00023688 ori t8, t8, 0xa3f8        |     
    0x0002368c addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023690 srl v0, s0, 0x17          |     v0 = s0 >> 0x17;
    0x00023694 sll t8, s0, 9             |     t8 = s0 << 9;
    0x00023698 or s0, v0, t8             |     s0 = v0 | t8;
    0x0002369c addu s0, s0, s3           |     s0 += s3;
    0x000236a0 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x000236a4 and t8, t8, s2            |     t8 &= s2;
    0x000236a8 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x000236ac lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000236b0 addiu t8, t8, 0x1c        |     t8 += 0x1c;
    0x000236b4 lw t8, (t8)               |     t8 = *(t8);
    0x000236b8 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000236bc addu v0, t8, s1           |     v0 = t8 + s1;
    0x000236c0 lui t8, 0x676f            |     t8 = 0x676f02d9;
    0x000236c4 ori t8, t8, 0x2d9         |     
    0x000236c8 addu s1, v0, t8           |     s1 = v0 + t8;
    0x000236cc srl v0, s1, 0x12          |     v0 = s1 >> 0x12;
    0x000236d0 sll t8, s1, 0xe           |     t8 = s1 << 0xe;
    0x000236d4 or s1, v0, t8             |     s1 = v0 | t8;
    0x000236d8 addu s1, s1, s0           |     s1 += s0;
    0x000236dc xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x000236e0 and t8, t8, s3            |     t8 &= s3;
    0x000236e4 xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x000236e8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000236ec addiu t8, t8, 0x30        |     t8 += 0x30;
    0x000236f0 lw t8, (t8)               |     t8 = *(t8);
    0x000236f4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000236f8 addu v0, t8, s2           |     v0 = t8 + s2;
    0x000236fc lui t8, 0x8d2a            |     t8 = 0x8d2a4c8a;
    0x00023700 ori t8, t8, 0x4c8a        |     
    0x00023704 addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023708 srl v0, s2, 0xc           |     v0 = s2 >> 0xc;
    0x0002370c sll t8, s2, 0x14          |     t8 = s2 << 0x14;
    0x00023710 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023714 addu s2, s2, s1           |     s2 += s1;
    0x00023718 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x0002371c xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023720 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023724 addiu t8, t8, 0x14        |     t8 += 0x14;
    0x00023728 lw t8, (t8)               |     t8 = *(t8);
    0x0002372c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023730 addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023734 lui t8, 0xfffa            |     t8 = 0xfffa3942;
    0x00023738 ori t8, t8, 0x3942        |     
    0x0002373c addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023740 srl v0, s3, 0x1c          |     v0 = s3 >> 0x1c;
    0x00023744 sll t8, s3, 4             |     t8 = s3 << 4;
    0x00023748 or s3, v0, t8             |     s3 = v0 | t8;
    0x0002374c addu s3, s3, s2           |     s3 += s2;
    0x00023750 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x00023754 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023758 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x0002375c addiu t8, t8, 0x20        |     t8 += 0x20;
    0x00023760 lw t8, (t8)               |     t8 = *(t8);
    0x00023764 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023768 addu v0, t8, s0           |     v0 = t8 + s0;
    0x0002376c lui t8, 0x8771            |     t8 = 0x8771f681;
    0x00023770 ori t8, t8, 0xf681        |     
    0x00023774 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023778 srl v0, s0, 0x15          |     v0 = s0 >> 0x15;
    0x0002377c sll t8, s0, 0xb           |     t8 = s0 << 0xb;
    0x00023780 or s0, v0, t8             |     s0 = v0 | t8;
    0x00023784 addu s0, s0, s3           |     s0 += s3;
    0x00023788 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x0002378c xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023790 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023794 addiu t8, t8, 0x2c        |     t8 += 0x2c;
    0x00023798 lw t8, (t8)               |     t8 = *(t8);
    0x0002379c addu t8, v0, t8           |     t8 = v0 + t8;
    0x000237a0 addu v0, t8, s1           |     v0 = t8 + s1;
    0x000237a4 lui t8, 0x6d9d            |     t8 = 0x6d9d6122;
    0x000237a8 ori t8, t8, 0x6122        |     
    0x000237ac addu s1, v0, t8           |     s1 = v0 + t8;
    0x000237b0 srl v0, s1, 0x10          |     v0 = s1 >> 0x10;
    0x000237b4 sll t8, s1, 0x10          |     t8 = s1 << 0x10;
    0x000237b8 or s1, v0, t8             |     s1 = v0 | t8;
    0x000237bc addu s1, s1, s0           |     s1 += s0;
    0x000237c0 xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x000237c4 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x000237c8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000237cc addiu t8, t8, 0x38        |     t8 += 0x38;
    0x000237d0 lw t8, (t8)               |     t8 = *(t8);
    0x000237d4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000237d8 addu v0, t8, s2           |     v0 = t8 + s2;
    0x000237dc lui t8, 0xfde5            |     t8 = 0xfde5380c;
    0x000237e0 ori t8, t8, 0x380c        |     
    0x000237e4 addu s2, v0, t8           |     s2 = v0 + t8;
    0x000237e8 srl v0, s2, 9             |     v0 = s2 >> 9;
    0x000237ec sll t8, s2, 0x17          |     t8 = s2 << 0x17;
    0x000237f0 or s2, v0, t8             |     s2 = v0 | t8;
    0x000237f4 addu s2, s2, s1           |     s2 += s1;
    0x000237f8 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x000237fc xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023800 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023804 addiu t8, t8, 4           |     t8 += 4;
    0x00023808 lw t8, (t8)               |     t8 = *(t8);
    0x0002380c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023810 addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023814 lui t8, 0xa4be            |     t8 = 0xa4beea44;
    0x00023818 ori t8, t8, 0xea44        |     
    0x0002381c addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023820 srl v0, s3, 0x1c          |     v0 = s3 >> 0x1c;
    0x00023824 sll t8, s3, 4             |     t8 = s3 << 4;
    0x00023828 or s3, v0, t8             |     s3 = v0 | t8;
    0x0002382c addu s3, s3, s2           |     s3 += s2;
    0x00023830 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x00023834 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023838 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x0002383c addiu t8, t8, 0x10        |     t8 += 0x10;
    0x00023840 lw t8, (t8)               |     t8 = *(t8);
    0x00023844 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023848 addu v0, t8, s0           |     v0 = t8 + s0;
    0x0002384c lui t8, 0x4bde            |     t8 = 0x4bdecfa9;
    0x00023850 ori t8, t8, 0xcfa9        |     
    0x00023854 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023858 srl v0, s0, 0x15          |     v0 = s0 >> 0x15;
    0x0002385c sll t8, s0, 0xb           |     t8 = s0 << 0xb;
    0x00023860 or s0, v0, t8             |     s0 = v0 | t8;
    0x00023864 addu s0, s0, s3           |     s0 += s3;
    0x00023868 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x0002386c xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023870 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023874 addiu t8, t8, 0x1c        |     t8 += 0x1c;
    0x00023878 lw t8, (t8)               |     t8 = *(t8);
    0x0002387c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023880 addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023884 lui t8, 0xf6bb            |     t8 = 0xf6bb4b60;
    0x00023888 ori t8, t8, 0x4b60        |     
    0x0002388c addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023890 srl v0, s1, 0x10          |     v0 = s1 >> 0x10;
    0x00023894 sll t8, s1, 0x10          |     t8 = s1 << 0x10;
    0x00023898 or s1, v0, t8             |     s1 = v0 | t8;
    0x0002389c addu s1, s1, s0           |     s1 += s0;
    0x000238a0 xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x000238a4 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x000238a8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000238ac addiu t8, t8, 0x28        |     t8 += 0x28;
    0x000238b0 lw t8, (t8)               |     t8 = *(t8);
    0x000238b4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000238b8 addu v0, t8, s2           |     v0 = t8 + s2;
    0x000238bc lui t8, 0xbebf            |     t8 = 0xbebfbc70;
    0x000238c0 ori t8, t8, 0xbc70        |     
    0x000238c4 addu s2, v0, t8           |     s2 = v0 + t8;
    0x000238c8 srl v0, s2, 9             |     v0 = s2 >> 9;
    0x000238cc sll t8, s2, 0x17          |     t8 = s2 << 0x17;
    0x000238d0 or s2, v0, t8             |     s2 = v0 | t8;
    0x000238d4 addu s2, s2, s1           |     s2 += s1;
    0x000238d8 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x000238dc xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x000238e0 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000238e4 addiu t8, t8, 0x34        |     t8 += 0x34;
    0x000238e8 lw t8, (t8)               |     t8 = *(t8);
    0x000238ec addu t8, v0, t8           |     t8 = v0 + t8;
    0x000238f0 addu v0, t8, s3           |     v0 = t8 + s3;
    0x000238f4 lui t8, 0x289b            |     t8 = 0x289b7ec6;
    0x000238f8 ori t8, t8, 0x7ec6        |     
    0x000238fc addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023900 srl v0, s3, 0x1c          |     v0 = s3 >> 0x1c;
    0x00023904 sll t8, s3, 4             |     t8 = s3 << 4;
    0x00023908 or s3, v0, t8             |     s3 = v0 | t8;
    0x0002390c addu s3, s3, s2           |     s3 += s2;
    0x00023910 xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x00023914 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023918 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x0002391c lw t8, (t8)               |     t8 = *(t8);
    0x00023920 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023924 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023928 lui t8, 0xeaa1            |     t8 = 0xeaa127fa;
    0x0002392c ori t8, t8, 0x27fa        |     
    0x00023930 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023934 srl v0, s0, 0x15          |     v0 = s0 >> 0x15;
    0x00023938 sll t8, s0, 0xb           |     t8 = s0 << 0xb;
    0x0002393c or s0, v0, t8             |     s0 = v0 | t8;
    0x00023940 addu s0, s0, s3           |     s0 += s3;
    0x00023944 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x00023948 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x0002394c lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023950 addiu t8, t8, 0xc         |     t8 += 0xc;
    0x00023954 lw t8, (t8)               |     t8 = *(t8);
    0x00023958 addu t8, v0, t8           |     t8 = v0 + t8;
    0x0002395c addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023960 lui t8, 0xd4ef            |     t8 = 0xd4ef3085;
    0x00023964 ori t8, t8, 0x3085        |     
    0x00023968 addu s1, v0, t8           |     s1 = v0 + t8;
    0x0002396c srl v0, s1, 0x10          |     v0 = s1 >> 0x10;
    0x00023970 sll t8, s1, 0x10          |     t8 = s1 << 0x10;
    0x00023974 or s1, v0, t8             |     s1 = v0 | t8;
    0x00023978 addu s1, s1, s0           |     s1 += s0;
    0x0002397c xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x00023980 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x00023984 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023988 addiu t8, t8, 0x18        |     t8 += 0x18;
    0x0002398c lw t8, (t8)               |     t8 = *(t8);
    0x00023990 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023994 addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023998 lui t8, 0x488             |     t8 = 0x1d05;
    0x0002399c ori t8, t8, 0x1d05        |     
    0x000239a0 addu s2, v0, t8           |     s2 = v0 + t8;
    0x000239a4 srl v0, s2, 9             |     v0 = s2 >> 9;
    0x000239a8 sll t8, s2, 0x17          |     t8 = s2 << 0x17;
    0x000239ac or s2, v0, t8             |     s2 = v0 | t8;
    0x000239b0 addu s2, s2, s1           |     s2 += s1;
    0x000239b4 xor t8, s2, s1            |     t8 = s2 ^ s1;
    0x000239b8 xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x000239bc lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000239c0 addiu t8, t8, 0x24        |     t8 += 0x24;
    0x000239c4 lw t8, (t8)               |     t8 = *(t8);
    0x000239c8 addu t8, v0, t8           |     t8 = v0 + t8;
    0x000239cc addu v0, t8, s3           |     v0 = t8 + s3;
    0x000239d0 lui t8, 0xd9d4            |     t8 = 0xd9d4d039;
    0x000239d4 ori t8, t8, 0xd039        |     
    0x000239d8 addu s3, v0, t8           |     s3 = v0 + t8;
    0x000239dc srl v0, s3, 0x1c          |     v0 = s3 >> 0x1c;
    0x000239e0 sll t8, s3, 4             |     t8 = s3 << 4;
    0x000239e4 or s3, v0, t8             |     s3 = v0 | t8;
    0x000239e8 addu s3, s3, s2           |     s3 += s2;
    0x000239ec xor t8, s3, s2            |     t8 = s3 ^ s2;
    0x000239f0 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x000239f4 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x000239f8 addiu t8, t8, 0x30        |     t8 += 0x30;
    0x000239fc lw t8, (t8)               |     t8 = *(t8);
    0x00023a00 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023a04 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023a08 lui t8, 0xe6db            |     t8 = 0xe6db99e5;
    0x00023a0c ori t8, t8, 0x99e5        |     
    0x00023a10 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023a14 srl v0, s0, 0x15          |     v0 = s0 >> 0x15;
    0x00023a18 sll t8, s0, 0xb           |     t8 = s0 << 0xb;
    0x00023a1c or s0, v0, t8             |     s0 = v0 | t8;
    0x00023a20 addu s0, s0, s3           |     s0 += s3;
    0x00023a24 xor t8, s0, s3            |     t8 = s0 ^ s3;
    0x00023a28 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023a2c lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023a30 addiu t8, t8, 0x3c        |     t8 += 0x3c;
    0x00023a34 lw t8, (t8)               |     t8 = *(t8);
    0x00023a38 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023a3c addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023a40 lui t8, 0x1fa2            |     t8 = 0x1fa27cf8;
    0x00023a44 ori t8, t8, 0x7cf8        |     
    0x00023a48 addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023a4c srl v0, s1, 0x10          |     v0 = s1 >> 0x10;
    0x00023a50 sll t8, s1, 0x10          |     t8 = s1 << 0x10;
    0x00023a54 or s1, v0, t8             |     s1 = v0 | t8;
    0x00023a58 addu s1, s1, s0           |     s1 += s0;
    0x00023a5c xor t8, s1, s0            |     t8 = s1 ^ s0;
    0x00023a60 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x00023a64 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023a68 addiu t8, t8, 8           |     t8 += 8;
    0x00023a6c lw t8, (t8)               |     t8 = *(t8);
    0x00023a70 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023a74 addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023a78 lui t8, 0xc4ac            |     t8 = 0xc4ac5665;
    0x00023a7c ori t8, t8, 0x5665        |     
    0x00023a80 addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023a84 srl v0, s2, 9             |     v0 = s2 >> 9;
    0x00023a88 sll t8, s2, 0x17          |     t8 = s2 << 0x17;
    0x00023a8c or s2, v0, t8             |     s2 = v0 | t8;
    0x00023a90 addu s2, s2, s1           |     s2 += s1;
    0x00023a94 nor t8, zero, s0          |     __asm ("nor t8, zero, s0");
    0x00023a98 or t8, t8, s2             |     t8 |= s2;
    0x00023a9c xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023aa0 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023aa4 lw t8, (t8)               |     t8 = *(t8);
    0x00023aa8 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023aac addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023ab0 lui t8, 0xf429            |     t8 = 0xf4290000;
    0x00023ab4 ori t8, t8, 0x2244        |     
    0x00023ab8 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023abc srl v0, s3, 0x1a          |     v0 = s3 >> 0x1a;
    0x00023ac0 sll t8, s3, 6             |     t8 = s3 << 6;
    0x00023ac4 or s3, v0, t8             |     s3 = v0 | t8;
    0x00023ac8 addu s3, s3, s2           |     s3 += s2;
    0x00023acc nor t8, zero, s1          |     __asm ("nor t8, zero, s1");
    0x00023ad0 or t8, t8, s3             |     t8 |= s3;
    0x00023ad4 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023ad8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023adc addiu t8, t8, 0x1c        |     t8 += 0x1c;
    0x00023ae0 lw t8, (t8)               |     t8 = *(t8);
    0x00023ae4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023ae8 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023aec lui t8, 0x432a            |     t8 = 0x432aff97;
    0x00023af0 ori t8, t8, 0xff97        |     
    0x00023af4 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023af8 srl v0, s0, 0x16          |     v0 = s0 >> 0x16;
    0x00023afc sll t8, s0, 0xa           |     t8 = s0 << 0xa;
    0x00023b00 or s0, v0, t8             |     s0 = v0 | t8;
    0x00023b04 addu s0, s0, s3           |     s0 += s3;
    0x00023b08 nor t8, zero, s2          |     __asm ("nor t8, zero, s2");
    0x00023b0c or t8, t8, s0             |     t8 |= s0;
    0x00023b10 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x00023b14 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023b18 addiu t8, t8, 0x38        |     t8 += 0x38;
    0x00023b1c lw t8, (t8)               |     t8 = *(t8);
    0x00023b20 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023b24 addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023b28 lui t8, 0xab94            |     t8 = 0xab9423a7;
    0x00023b2c ori t8, t8, 0x23a7        |     
    0x00023b30 addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023b34 srl v0, s1, 0x11          |     v0 = s1 >> 0x11;
    0x00023b38 sll t8, s1, 0xf           |     t8 = s1 << 0xf;
    0x00023b3c or s1, v0, t8             |     s1 = v0 | t8;
    0x00023b40 addu s1, s1, s0           |     s1 += s0;
    0x00023b44 nor t8, zero, s3          |     __asm ("nor t8, zero, s3");
    0x00023b48 or t8, t8, s1             |     t8 |= s1;
    0x00023b4c xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023b50 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023b54 addiu t8, t8, 0x14        |     t8 += 0x14;
    0x00023b58 lw t8, (t8)               |     t8 = *(t8);
    0x00023b5c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023b60 addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023b64 lui t8, 0xfc93            |     t8 = 0xfc93a039;
    0x00023b68 ori t8, t8, 0xa039        |     
    0x00023b6c addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023b70 srl v0, s2, 0xb           |     v0 = s2 >> 0xb;
    0x00023b74 sll t8, s2, 0x15          |     t8 = s2 << 0x15;
    0x00023b78 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023b7c addu s2, s2, s1           |     s2 += s1;
    0x00023b80 nor t8, zero, s0          |     __asm ("nor t8, zero, s0");
    0x00023b84 or t8, t8, s2             |     t8 |= s2;
    0x00023b88 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023b8c lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023b90 addiu t8, t8, 0x30        |     t8 += 0x30;
    0x00023b94 lw t8, (t8)               |     t8 = *(t8);
    0x00023b98 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023b9c addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023ba0 lui t8, 0x655b            |     t8 = 0x655b59c3;
    0x00023ba4 ori t8, t8, 0x59c3        |     
    0x00023ba8 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023bac srl v0, s3, 0x1a          |     v0 = s3 >> 0x1a;
    0x00023bb0 sll t8, s3, 6             |     t8 = s3 << 6;
    0x00023bb4 or s3, v0, t8             |     s3 = v0 | t8;
    0x00023bb8 addu s3, s3, s2           |     s3 += s2;
    0x00023bbc nor t8, zero, s1          |     __asm ("nor t8, zero, s1");
    0x00023bc0 or t8, t8, s3             |     t8 |= s3;
    0x00023bc4 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023bc8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023bcc addiu t8, t8, 0xc         |     t8 += 0xc;
    0x00023bd0 lw t8, (t8)               |     t8 = *(t8);
    0x00023bd4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023bd8 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023bdc lui t8, 0x8f0c            |     t8 = 0x8f0ccc92;
    0x00023be0 ori t8, t8, 0xcc92        |     
    0x00023be4 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023be8 srl v0, s0, 0x16          |     v0 = s0 >> 0x16;
    0x00023bec sll t8, s0, 0xa           |     t8 = s0 << 0xa;
    0x00023bf0 or s0, v0, t8             |     s0 = v0 | t8;
    0x00023bf4 addu s0, s0, s3           |     s0 += s3;
    0x00023bf8 nor t8, zero, s2          |     __asm ("nor t8, zero, s2");
    0x00023bfc or t8, t8, s0             |     t8 |= s0;
    0x00023c00 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x00023c04 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023c08 addiu t8, t8, 0x28        |     t8 += 0x28;
    0x00023c0c lw t8, (t8)               |     t8 = *(t8);
    0x00023c10 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023c14 addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023c18 lui t8, 0xffef            |     t8 = 0xffeff47d;
    0x00023c1c ori t8, t8, 0xf47d        |     
    0x00023c20 addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023c24 srl v0, s1, 0x11          |     v0 = s1 >> 0x11;
    0x00023c28 sll t8, s1, 0xf           |     t8 = s1 << 0xf;
    0x00023c2c or s1, v0, t8             |     s1 = v0 | t8;
    0x00023c30 addu s1, s1, s0           |     s1 += s0;
    0x00023c34 nor t8, zero, s3          |     __asm ("nor t8, zero, s3");
    0x00023c38 or t8, t8, s1             |     t8 |= s1;
    0x00023c3c xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023c40 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023c44 addiu t8, t8, 4           |     t8 += 4;
    0x00023c48 lw t8, (t8)               |     t8 = *(t8);
    0x00023c4c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023c50 addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023c54 lui t8, 0x8584            |     t8 = 0x85845dd1;
    0x00023c58 ori t8, t8, 0x5dd1        |     
    0x00023c5c addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023c60 srl v0, s2, 0xb           |     v0 = s2 >> 0xb;
    0x00023c64 sll t8, s2, 0x15          |     t8 = s2 << 0x15;
    0x00023c68 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023c6c addu s2, s2, s1           |     s2 += s1;
    0x00023c70 nor t8, zero, s0          |     __asm ("nor t8, zero, s0");
    0x00023c74 or t8, t8, s2             |     t8 |= s2;
    0x00023c78 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023c7c lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023c80 addiu t8, t8, 0x20        |     t8 += 0x20;
    0x00023c84 lw t8, (t8)               |     t8 = *(t8);
    0x00023c88 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023c8c addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023c90 lui t8, 0x6fa8            |     t8 = 0x6fa87e4f;
    0x00023c94 ori t8, t8, 0x7e4f        |     
    0x00023c98 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023c9c srl v0, s3, 0x1a          |     v0 = s3 >> 0x1a;
    0x00023ca0 sll t8, s3, 6             |     t8 = s3 << 6;
    0x00023ca4 or s3, v0, t8             |     s3 = v0 | t8;
    0x00023ca8 addu s3, s3, s2           |     s3 += s2;
    0x00023cac nor t8, zero, s1          |     __asm ("nor t8, zero, s1");
    0x00023cb0 or t8, t8, s3             |     t8 |= s3;
    0x00023cb4 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023cb8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023cbc addiu t8, t8, 0x3c        |     t8 += 0x3c;
    0x00023cc0 lw t8, (t8)               |     t8 = *(t8);
    0x00023cc4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023cc8 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023ccc lui t8, 0xfe2c            |     t8 = 0xfe2ce6e0;
    0x00023cd0 ori t8, t8, 0xe6e0        |     
    0x00023cd4 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023cd8 srl v0, s0, 0x16          |     v0 = s0 >> 0x16;
    0x00023cdc sll t8, s0, 0xa           |     t8 = s0 << 0xa;
    0x00023ce0 or s0, v0, t8             |     s0 = v0 | t8;
    0x00023ce4 addu s0, s0, s3           |     s0 += s3;
    0x00023ce8 nor t8, zero, s2          |     __asm ("nor t8, zero, s2");
    0x00023cec or t8, t8, s0             |     t8 |= s0;
    0x00023cf0 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x00023cf4 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023cf8 addiu t8, t8, 0x18        |     t8 += 0x18;
    0x00023cfc lw t8, (t8)               |     t8 = *(t8);
    0x00023d00 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023d04 addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023d08 lui t8, 0xa301            |     t8 = 0xa3014314;
    0x00023d0c ori t8, t8, 0x4314        |     
    0x00023d10 addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023d14 srl v0, s1, 0x11          |     v0 = s1 >> 0x11;
    0x00023d18 sll t8, s1, 0xf           |     t8 = s1 << 0xf;
    0x00023d1c or s1, v0, t8             |     s1 = v0 | t8;
    0x00023d20 addu s1, s1, s0           |     s1 += s0;
    0x00023d24 nor t8, zero, s3          |     __asm ("nor t8, zero, s3");
    0x00023d28 or t8, t8, s1             |     t8 |= s1;
    0x00023d2c xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023d30 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023d34 addiu t8, t8, 0x34        |     t8 += 0x34;
    0x00023d38 lw t8, (t8)               |     t8 = *(t8);
    0x00023d3c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023d40 addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023d44 lui t8, 0x4e08            |     t8 = 0x4e0811a1;
    0x00023d48 ori t8, t8, 0x11a1        |     
    0x00023d4c addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023d50 srl v0, s2, 0xb           |     v0 = s2 >> 0xb;
    0x00023d54 sll t8, s2, 0x15          |     t8 = s2 << 0x15;
    0x00023d58 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023d5c addu s2, s2, s1           |     s2 += s1;
    0x00023d60 nor t8, zero, s0          |     __asm ("nor t8, zero, s0");
    0x00023d64 or t8, t8, s2             |     t8 |= s2;
    0x00023d68 xor v0, t8, s1            |     v0 = t8 ^ s1;
    0x00023d6c lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023d70 addiu t8, t8, 0x10        |     t8 += 0x10;
    0x00023d74 lw t8, (t8)               |     t8 = *(t8);
    0x00023d78 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023d7c addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023d80 lui t8, 0xf753            |     t8 = 0xf7537e82;
    0x00023d84 ori t8, t8, 0x7e82        |     
    0x00023d88 addu s3, v0, t8           |     s3 = v0 + t8;
    0x00023d8c srl v0, s3, 0x1a          |     v0 = s3 >> 0x1a;
    0x00023d90 sll t8, s3, 6             |     t8 = s3 << 6;
    0x00023d94 or s3, v0, t8             |     s3 = v0 | t8;
    0x00023d98 addu s3, s3, s2           |     s3 += s2;
    0x00023d9c nor t8, zero, s1          |     __asm ("nor t8, zero, s1");
    0x00023da0 or t8, t8, s3             |     t8 |= s3;
    0x00023da4 xor v0, t8, s2            |     v0 = t8 ^ s2;
    0x00023da8 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023dac addiu t8, t8, 0x2c        |     t8 += 0x2c;
    0x00023db0 lw t8, (t8)               |     t8 = *(t8);
    0x00023db4 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023db8 addu v0, t8, s0           |     v0 = t8 + s0;
    0x00023dbc lui t8, 0xbd3a            |     t8 = 0xbd3af235;
    0x00023dc0 ori t8, t8, 0xf235        |     
    0x00023dc4 addu s0, v0, t8           |     s0 = v0 + t8;
    0x00023dc8 srl v0, s0, 0x16          |     v0 = s0 >> 0x16;
    0x00023dcc sll t8, s0, 0xa           |     t8 = s0 << 0xa;
    0x00023dd0 or s0, v0, t8             |     s0 = v0 | t8;
    0x00023dd4 addu s0, s0, s3           |     s0 += s3;
    0x00023dd8 nor t8, zero, s2          |     __asm ("nor t8, zero, s2");
    0x00023ddc or t8, t8, s0             |     t8 |= s0;
    0x00023de0 xor v0, t8, s3            |     v0 = t8 ^ s3;
    0x00023de4 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023de8 addiu t8, t8, 8           |     t8 += 8;
    0x00023dec lw t8, (t8)               |     t8 = *(t8);
    0x00023df0 addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023df4 addu v0, t8, s1           |     v0 = t8 + s1;
    0x00023df8 lui t8, 0x2ad7            |     t8 = 0x2ad7d2bb;
    0x00023dfc ori t8, t8, 0xd2bb        |     
    0x00023e00 addu s1, v0, t8           |     s1 = v0 + t8;
    0x00023e04 srl v0, s1, 0x11          |     v0 = s1 >> 0x11;
    0x00023e08 sll t8, s1, 0xf           |     t8 = s1 << 0xf;
    0x00023e0c or s1, v0, t8             |     s1 = v0 | t8;
    0x00023e10 addu s1, s1, s0           |     s1 += s0;
    0x00023e14 nor t8, zero, s3          |     __asm ("nor t8, zero, s3");
    0x00023e18 or t8, t8, s1             |     t8 |= s1;
    0x00023e1c xor v0, t8, s0            |     v0 = t8 ^ s0;
    0x00023e20 lw t8, 0x1c(fp)           |     t8 = *(arg_1ch);
    0x00023e24 addiu t8, t8, 0x24        |     t8 += 0x24;
    0x00023e28 lw t8, (t8)               |     t8 = *(t8);
    0x00023e2c addu t8, v0, t8           |     t8 = v0 + t8;
    0x00023e30 addu v0, t8, s2           |     v0 = t8 + s2;
    0x00023e34 lui t8, 0xeb86            |     t8 = 0xeb86d391;
    0x00023e38 ori t8, t8, 0xd391        |     
    0x00023e3c addu s2, v0, t8           |     s2 = v0 + t8;
    0x00023e40 srl v0, s2, 0xb           |     v0 = s2 >> 0xb;
    0x00023e44 sll t8, s2, 0x15          |     t8 = s2 << 0x15;
    0x00023e48 or s2, v0, t8             |     s2 = v0 | t8;
    0x00023e4c addu s2, s2, s1           |     s2 += s1;
    0x00023e50 lw t8, 0x18(fp)           |     t8 = *(arg_18h);
    0x00023e54 lw t8, (t8)               |     t8 = *(t8);
    0x00023e58 addu v0, t8, s3           |     v0 = t8 + s3;
    0x00023e5c lw t8, 0x18(fp)           |     t8 = *(arg_18h);
    0x00023e60 sw v0, (t8)               |     *(t8) = v0;
    0x00023e64 lw t8, 0x18(fp)           |     t8 = *(arg_18h);
    0x00023e68 addiu t8, t8, 4           |     t8 += 4;
    0x00023e6c lw v0, 0x18(fp)           |     v0 = *(arg_18h);
    0x00023e70 addiu v0, v0, 4           |     v0 += 4;
    0x00023e74 lw v0, (v0)               |     v0 = *(v0);
    0x00023e78 addu v0, v0, s2           |     v0 += s2;
    0x00023e7c sw v0, (t8)               |     *(t8) = v0;
    0x00023e80 lw t8, 0x18(fp)           |     t8 = *(arg_18h);
    0x00023e84 addiu t8, t8, 8           |     t8 += 8;
    0x00023e88 lw v0, 0x18(fp)           |     v0 = *(arg_18h);
    0x00023e8c addiu v0, v0, 8           |     v0 += 8;
    0x00023e90 lw v0, (v0)               |     v0 = *(v0);
    0x00023e94 addu v0, v0, s1           |     v0 += s1;
    0x00023e98 sw v0, (t8)               |     *(t8) = v0;
    0x00023e9c lw t8, 0x18(fp)           |     t8 = *(arg_18h);
    0x00023ea0 addiu t8, t8, 0xc         |     t8 += 0xc;
    0x00023ea4 lw v0, 0x18(fp)           |     v0 = *(arg_18h);
    0x00023ea8 addiu v0, v0, 0xc         |     v0 += 0xc;
    0x00023eac lw v0, (v0)               |     v0 = *(v0);
    0x00023eb0 addu v0, v0, s0           |     v0 += s0;
    0x00023eb4 sw v0, (t8)               |     *(t8) = v0;
    0x00023eb8 move sp, fp               |     
    0x00023ebc lw fp, 0x14(sp)           |     fp = *(arg_14h);
    0x00023ec0 lw s3, 0x10(sp)           |     s3 = *(arg_10h);
    0x00023ec4 lw s2, 0xc(sp)            |     s2 = *(arg_ch);
    0x00023ec8 lw s1, 8(sp)              |     s1 = *(arg_8h);
    0x00023ecc lw s0, 4(sp)              |     s0 = *(arg_4h);
    0x00023ed0 addiu sp, sp, 0x18        |     
    0x00023ed4 jr ra                     |     return v0;
    0x00023ed8 nop                       |     
                                         | }

[*] Function strcat used 1 times libglib-2.0.so.0.3000.2