Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | /* * linux/arch/arm/lib/csumpartial.S * * Copyright (C) 1995-1998 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <asm/assembler.h> .text /* * Function: __u32 csum_partial(const char *src, int len, __u32 sum) * Params : r0 = buffer, r1 = len, r2 = checksum * Returns : r0 = new checksum */ buf .req r0 len .req r1 sum .req r2 td0 .req r3 td1 .req r4 @ save before use td2 .req r5 @ save before use td3 .req lr .zero: mov r0, sum add sp, sp, #4 ldr pc, [sp], #4 /* * Handle 0 to 7 bytes, with any alignment of source and * destination pointers. Note that when we get here, C = 0 */ .less8: teq len, #0 @ check for zero count beq .zero /* we must have at least one byte. */ tst buf, #1 @ odd address? ldrneb td0, [buf], #1 subne len, len, #1 adcnes sum, sum, td0, lsl #byte(1) .less4: tst len, #6 beq .less8_byte /* we are now half-word aligned */ .less8_wordlp: #ifdef __ARM_ARCH_4__ ldrh td0, [buf], #2 sub len, len, #2 #else ldrb td0, [buf], #1 ldrb td3, [buf], #1 sub len, len, #2 #ifndef __ARMEB__ orr td0, td0, td3, lsl #8 #else orr td0, td3, td0, lsl #8 #endif #endif adcs sum, sum, td0 tst len, #6 bne .less8_wordlp .less8_byte: tst len, #1 @ odd number of bytes ldrneb td0, [buf], #1 @ include last byte adcnes sum, sum, td0, lsl #byte(0) @ update checksum .done: adc r0, sum, #0 @ collect up the last carry ldr td0, [sp], #4 tst td0, #1 @ check buffer alignment movne td0, r0, lsl #8 @ rotate checksum by 8 bits orrne r0, td0, r0, lsr #24 ldr pc, [sp], #4 @ return .not_aligned: tst buf, #1 @ odd address ldrneb td0, [buf], #1 @ make even subne len, len, #1 adcnes sum, sum, td0, lsl #byte(1) @ update checksum tst buf, #2 @ 32-bit aligned? #ifdef __ARM_ARCH_4__ ldrneh td0, [buf], #2 @ make 32-bit aligned subne len, len, #2 #else ldrneb td0, [buf], #1 ldrneb ip, [buf], #1 subne len, len, #2 #ifndef __ARMEB__ orrne td0, td0, ip, lsl #8 #else orrne td0, ip, td0, lsl #8 #endif #endif adcnes sum, sum, td0 @ update checksum mov pc, lr ENTRY(csum_partial) stmfd sp!, {buf, lr} cmp len, #8 @ Ensure that we have at least blo .less8 @ 8 bytes to copy. adds sum, sum, #0 @ C = 0 tst buf, #3 @ Test destination alignment blne .not_aligned @ aligh destination, return here 1: bics ip, len, #31 beq 3f stmfd sp!, {r4 - r5} 2: ldmia buf!, {td0, td1, td2, td3} adcs sum, sum, td0 adcs sum, sum, td1 adcs sum, sum, td2 adcs sum, sum, td3 ldmia buf!, {td0, td1, td2, td3} adcs sum, sum, td0 adcs sum, sum, td1 adcs sum, sum, td2 adcs sum, sum, td3 sub ip, ip, #32 teq ip, #0 bne 2b ldmfd sp!, {r4 - r5} 3: tst len, #0x1c @ should not change C beq .less4 4: ldr td0, [buf], #4 sub len, len, #4 adcs sum, sum, td0 tst len, #0x1c bne 4b b .less4 |