+++ /dev/null
-/*
- * Copyright (c) 2013 ARM Ltd
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the company may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
- /* Prototype: void *memcpy (void *dst, const void *src, size_t count). */
-
- /* Use the version of memcpy implemented using LDRD and STRD.
- This version is tuned for Cortex-A15.
- This might not be the best for other ARMv7-A CPUs,
- but there is no predefine to distinguish between
- different CPUs in the same architecture,
- and this version is better than the plain memcpy provided in newlib.
-
- Therefore, we use this version for all ARMv7-A CPUS. */
-
- /* To make the same code compile for both ARM and Thumb instruction
- sets, switch to unified syntax at the beginning of this function.
- However, by using the same code, we may be missing optimization
- opportunities. For instance, in LDRD/STRD instructions, the first
- destination register must be even and the second consecutive in
- ARM state, but not in Thumb state. */
-
-#include <machine/cpu-features.h>
-#include <machine/asm.h>
-
- .syntax unified
-
-ENTRY(memcpy)
-
- /* Assumes that n >= 0, and dst, src are valid pointers.
- If there is at least 8 bytes to copy, use LDRD/STRD.
- If src and dst are misaligned with different offsets,
- first copy byte by byte until dst is aligned,
- and then copy using LDRD/STRD and shift if needed.
- When less than 8 left, copy a word and then byte by byte. */
-
- /* Save registers (r0 holds the return value):
- optimized push {r0, r4, r5, r6, r7, lr}.
- To try and improve performance, stack layout changed,
- i.e., not keeping the stack looking like users expect
- (highest numbered register at highest address). */
- .save {r0, lr}
- push {r0, lr}
- .save {r4, r5}
- strd r4, r5, [sp, #-8]!
- .save {r6, r7}
- strd r6, r7, [sp, #-8]!
-
- /* TODO: Add debug frame directives.
- We don't need exception unwind directives, because the code below
- does not throw any exceptions and does not call any other functions.
- Generally, newlib functions like this lack debug information for
- assembler source. */
-
- /* Get copying of tiny blocks out of the way first. */
- /* Is there at least 4 bytes to copy? */
- subs r2, r2, #4
- blt copy_less_than_4 /* If n < 4. */
-
- /* Check word alignment. */
- ands ip, r0, #3 /* ip = last 2 bits of dst. */
- bne dst_not_word_aligned /* If dst is not word-aligned. */
-
- /* Get here if dst is word-aligned. */
- ands ip, r1, #3 /* ip = last 2 bits of src. */
- bne src_not_word_aligned /* If src is not word-aligned. */
-word_aligned:
- /* Get here if source and dst both are word-aligned.
- The number of bytes remaining to copy is r2+4. */
-
- /* Is there is at least 64 bytes to copy? */
- subs r2, r2, #60
- blt copy_less_than_64 /* If r2 + 4 < 64. */
-
- /* First, align the destination buffer to 8-bytes,
- to make sure double loads and stores don't cross cache line boundary,
- as they are then more expensive even if the data is in the cache
- (require two load/store issue cycles instead of one).
- If only one of the buffers is not 8-bytes aligned,
- then it's more important to align dst than src,
- because there is more penalty for stores
- than loads that cross cacheline boundary.
- This check and realignment are only worth doing
- if there is a lot to copy. */
-
- /* Get here if dst is word aligned,
- i.e., the 2 least significant bits are 0.
- If dst is not 2w aligned (i.e., the 3rd bit is not set in dst),
- then copy 1 word (4 bytes). */
- ands r3, r0, #4
- beq 11f /* If dst already two-word aligned. */
- ldr r3, [r1], #4
- str r3, [r0], #4
- subs r2, r2, #4
- blt copy_less_than_64
-
-11:
- /* TODO: Align to cacheline (useful for PLD optimization). */
-
- /* Every loop iteration copies 64 bytes. */
-1:
- .irp offset, #0, #8, #16, #24, #32, #40, #48, #56
- ldrd r4, r5, [r1, \offset]
- strd r4, r5, [r0, \offset]
- .endr
-
- add r0, r0, #64
- add r1, r1, #64
- subs r2, r2, #64
- bge 1b /* If there is more to copy. */
-
-copy_less_than_64:
-
- /* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
- Restore the count if there is more than 7 bytes to copy. */
- adds r2, r2, #56
- blt copy_less_than_8
-
- /* Copy 8 bytes at a time. */
-2:
- ldrd r4, r5, [r1], #8
- strd r4, r5, [r0], #8
- subs r2, r2, #8
- bge 2b /* If there is more to copy. */
-
-copy_less_than_8:
-
- /* Get here if less than 8 bytes to copy, -8 <= r2 < 0.
- Check if there is more to copy. */
- cmn r2, #8
- beq return /* If r2 + 8 == 0. */
-
- /* Restore the count if there is more than 3 bytes to copy. */
- adds r2, r2, #4
- blt copy_less_than_4
-
- /* Copy 4 bytes. */
- ldr r3, [r1], #4
- str r3, [r0], #4
-
-copy_less_than_4:
- /* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */
-
- /* Restore the count, check if there is more to copy. */
- adds r2, r2, #4
- beq return /* If r2 == 0. */
-
- /* Get here with r2 is in {1,2,3}={01,10,11}. */
- /* Logical shift left r2, insert 0s, update flags. */
- lsls r2, r2, #31
-
- /* Copy byte by byte.
- Condition ne means the last bit of r2 is 0.
- Condition cs means the second to last bit of r2 is set,
- i.e., r2 is 1 or 3. */
- itt ne
- ldrbne r3, [r1], #1
- strbne r3, [r0], #1
-
- itttt cs
- ldrbcs r4, [r1], #1
- ldrbcs r5, [r1]
- strbcs r4, [r0], #1
- strbcs r5, [r0]
-
-return:
- /* Restore registers: optimized pop {r0, r4, r5, r6, r7, pc} */
- /* This is the only return point of memcpy. */
- ldrd r6, r7, [sp], #8
- ldrd r4, r5, [sp], #8
- pop {r0, pc}
-
-#ifndef __ARM_FEATURE_UNALIGNED
-
- /* The following assembly macro implements misaligned copy in software.
- Assumes that dst is word aligned, src is at offset "pull" bits from
- word, push = 32 - pull, and the number of bytes that remain to copy
- is r2 + 4, r2 >= 0. */
-
- /* In the code below, r2 is the number of bytes that remain to be
- written. The number of bytes read is always larger, because we have
- partial words in the shift queue. */
-
- .macro miscopy pull push shiftleft shiftright
-
- /* Align src to the previous word boundary. */
- bic r1, r1, #3
-
- /* Initialize the shift queue. */
- ldr r5, [r1], #4 /* Load a word from source. */
-
- subs r2, r2, #4
- blt 6f /* Go to misaligned copy of less than 8 bytes. */
-
- /* Get here if there is more than 8 bytes to copy.
- The number of bytes to copy is r2+8, r2 >= 0. */
-
- subs r2, r2, #56
- blt 4f /* Go to misaligned copy of less than 64 bytes. */
-
-3:
- /* Get here if there is more than 64 bytes to copy.
- The number of bytes to copy is r2+64, r2 >= 0. */
-
- /* Copy 64 bytes in every iteration.
- Use a partial word from the shift queue. */
- .irp offset, #0, #8, #16, #24, #32, #40, #48, #56
- mov r6, r5, \shiftleft #\pull
- ldrd r4, r5, [r1, \offset]
- orr r6, r6, r4, \shiftright #\push
- mov r7, r4, \shiftleft #\pull
- orr r7, r7, r5, \shiftright #\push
- strd r6, r7, [r0, \offset]
- .endr
-
- add r1, r1, #64
- add r0, r0, #64
- subs r2, r2, #64
- bge 3b
-
-4:
- /* Get here if there is less than 64 bytes to copy (-64 <= r2 < 0)
- and they are misaligned. */
-
- /* Restore the count if there is more than 7 bytes to copy. */
- adds r2, r2, #56
-
- blt 6f /* Go to misaligned copy of less than 8 bytes. */
-
-5:
- /* Copy 8 bytes at a time.
- Use a partial word from the shift queue. */
- mov r6, r5, \shiftleft #\pull
- ldrd r4, r5, [r1], #8
- orr r6, r6, r4, \shiftright #\push
- mov r7, r4, \shiftleft #\pull
- orr r7, r7, r5, \shiftright #\push
- strd r6, r7, [r0], #8
-
- subs r2, r2, #8
- bge 5b /* If there is more to copy. */
-
-6:
- /* Get here if there less than 8 bytes to copy (-8 <= r2 < 0)
- and they are misaligned. */
-
- /* Check if there is more to copy. */
- cmn r2, #8
- beq return
-
- /* Check if there is less than 4 bytes to copy. */
- cmn r2, #4
-
- itt lt
- /* Restore src offset from word-align. */
- sublt r1, r1, #(\push / 8)
- blt copy_less_than_4
-
- /* Use a partial word from the shift queue. */
- mov r3, r5, \shiftleft #\pull
- /* Load a word from src, but without writeback
- (this word is not fully written to dst). */
- ldr r5, [r1]
-
- /* Restore src offset from word-align. */
- add r1, r1, #(\pull / 8)
-
- /* Shift bytes to create one dst word and store it. */
- orr r3, r3, r5, \shiftright #\push
- str r3, [r0], #4
-
- /* Use single byte copying of the remaining bytes. */
- b copy_less_than_4
-
- .endm
-
-#endif /* not __ARM_FEATURE_UNALIGNED */
-
-dst_not_word_aligned:
-
- /* Get here when dst is not aligned and ip has the last 2 bits of dst,
- i.e., ip is the offset of dst from word.
- The number of bytes that remains to copy is r2 + 4,
- i.e., there are at least 4 bytes to copy.
- Write a partial word (0 to 3 bytes), such that dst becomes
- word-aligned. */
-
- /* If dst is at ip bytes offset from a word (with 0 < ip < 4),
- then there are (4 - ip) bytes to fill up to align dst to the next
- word. */
- rsb ip, ip, #4 /* ip = #4 - ip. */
- cmp ip, #2
-
- /* Copy byte by byte with conditionals. */
- itt gt
- ldrbgt r3, [r1], #1
- strbgt r3, [r0], #1
-
- itt ge
- ldrbge r4, [r1], #1
- strbge r4, [r0], #1
-
- ldrb lr, [r1], #1
- strb lr, [r0], #1
-
- /* Update the count.
- ip holds the number of bytes we have just copied. */
- subs r2, r2, ip /* r2 = r2 - ip. */
- blt copy_less_than_4 /* If r2 < ip. */
-
- /* Get here if there are more than 4 bytes to copy.
- Check if src is aligned. If beforehand src and dst were not word
- aligned but congruent (same offset), then now they are both
- word-aligned, and we can copy the rest efficiently (without
- shifting). */
- ands ip, r1, #3 /* ip = last 2 bits of src. */
- beq word_aligned /* If r1 is word-aligned. */
-
-src_not_word_aligned:
- /* Get here when src is not word-aligned, but dst is word-aligned.
- The number of bytes that remains to copy is r2+4. */
-
-#ifdef __ARM_FEATURE_UNALIGNED
- /* Copy word by word using LDR when alignment can be done in hardware,
- i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
- subs r2, r2, #60
- blt 8f
-
-7:
- /* Copy 64 bytes in every loop iteration. */
- .irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60
- ldr r3, [r1, \offset]
- str r3, [r0, \offset]
- .endr
-
- add r0, r0, #64
- add r1, r1, #64
- subs r2, r2, #64
- bge 7b
-
-8:
- /* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
- Check if there is more than 3 bytes to copy. */
- adds r2, r2, #60
- blt copy_less_than_4
-
-9:
- /* Get here if there is less than 64 but at least 4 bytes to copy,
- where the number of bytes to copy is r2+4. */
- ldr r3, [r1], #4
- str r3, [r0], #4
- subs r2, r2, #4
- bge 9b
-
- b copy_less_than_4
-
-#else /* not __ARM_FEATURE_UNALIGNED */
-
- /* ip has last 2 bits of src,
- i.e., ip is the offset of src from word, and ip > 0.
- Compute shifts needed to copy from src to dst. */
- cmp ip, #2
- beq miscopy_16_16 /* If ip == 2. */
- bge miscopy_24_8 /* If ip == 3. */
-
- /* Get here if ip == 1. */
-
- /* Endian independent macros for shifting bytes within registers. */
-
-#ifndef __ARMEB__
-miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsr shiftright=lsl
-miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsr shiftright=lsl
-miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsr shiftright=lsl
-#else /* not __ARMEB__ */
-miscopy_8_24: miscopy pull=8 push=24 shiftleft=lsl shiftright=lsr
-miscopy_16_16: miscopy pull=16 push=16 shiftleft=lsl shiftright=lsr
-miscopy_24_8: miscopy pull=24 push=8 shiftleft=lsl shiftright=lsr
-#endif /* not __ARMEB__ */
-
-#endif /* not __ARM_FEATURE_UNALIGNED */
-
-END(memcpy)
+++ /dev/null
-/*
- * Copyright (C) 2008 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <machine/cpu-features.h>
-#include <machine/asm.h>
-
- /*
- * Optimized memset() for ARM.
- *
- * memset() returns its first argument.
- */
-
-#if defined(__ARM_NEON__)
- .fpu neon
-#endif
-
-ENTRY(bzero)
- mov r2, r1
- mov r1, #0
- // Fall through to memset...
-END(bzero)
-
-ENTRY(memset)
-#if defined(__ARM_NEON__)
-
-#ifdef NEON_MEMSET_DIVIDER
- cmp r2, #NEON_MEMSET_DIVIDER
- bhi 11f
-#endif
- .save {r0}
- stmfd sp!, {r0}
-
- vdup.8 q0, r1
-
-#ifndef NEON_UNALIGNED_ACCESS
- /* do we have at least 16-bytes to write (needed for alignment below) */
- cmp r2, #16
- blo 3f
-
- /* align destination to 16 bytes for the write-buffer */
- rsb r3, r0, #0
- ands r3, r3, #0xF
- beq 2f
-
- /* write up to 15-bytes (count in r3) */
- sub r2, r2, r3
- movs ip, r3, lsl #31
- strmib r1, [r0], #1
- strcsb r1, [r0], #1
- strcsb r1, [r0], #1
- movs ip, r3, lsl #29
- bge 1f
-
- // writes 4 bytes, 32-bits aligned
- vst1.32 {d0[0]}, [r0, :32]!
-1: bcc 2f
-
- // writes 8 bytes, 64-bits aligned
- vst1.8 {d0}, [r0, :64]!
-2:
-#endif
- /* make sure we have at least 32 bytes to write */
- subs r2, r2, #32
- blo 2f
- vmov q1, q0
-
-1: /* The main loop writes 32 bytes at a time */
- subs r2, r2, #32
-#ifndef NEON_UNALIGNED_ACCESS
- vst1.8 {d0 - d3}, [r0, :128]!
-#else
- vst1.8 {d0 - d3}, [r0]!
-#endif
- bhs 1b
-
-2: /* less than 32 left */
- add r2, r2, #32
- tst r2, #0x10
- beq 3f
-
- // writes 16 bytes, 128-bits aligned
-#ifndef NEON_UNALIGNED_ACCESS
- vst1.8 {d0, d1}, [r0, :128]!
-#else
- vst1.8 {d0, d1}, [r0]!
-#endif
-3: /* write up to 15-bytes (count in r2) */
- movs ip, r2, lsl #29
- bcc 1f
- vst1.8 {d0}, [r0]!
-1: bge 2f
- vst1.32 {d0[0]}, [r0]!
-2: movs ip, r2, lsl #31
- strmib r1, [r0], #1
- strcsb r1, [r0], #1
- strcsb r1, [r0], #1
- ldmfd sp!, {r0}
- bx lr
-11:
-#endif
-
- /*
- * Optimized memset() for ARM.
- *
- * memset() returns its first argument.
- */
-
- /* compute the offset to align the destination
- * offset = (4-(src&3))&3 = -src & 3
- */
-
- .save {r0, r4-r7, lr}
- stmfd sp!, {r0, r4-r7, lr}
- rsb r3, r0, #0
- ands r3, r3, #3
- cmp r3, r2
- movhi r3, r2
-
- /* splat r1 */
- mov r1, r1, lsl #24
- orr r1, r1, r1, lsr #8
- orr r1, r1, r1, lsr #16
-
- movs r12, r3, lsl #31
- strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
- strcsb r1, [r0], #1
- strmib r1, [r0], #1
- subs r2, r2, r3
- ldmlsfd sp!, {r0, r4-r7, lr} /* return */
- bxls lr
-
- /* align the destination to a cache-line */
- mov r12, r1
- mov lr, r1
- mov r4, r1
- mov r5, r1
- mov r6, r1
- mov r7, r1
-
- rsb r3, r0, #0
- ands r3, r3, #0x1C
- beq 3f
- cmp r3, r2
- andhi r3, r2, #0x1C
- sub r2, r2, r3
-
- /* conditionally writes 0 to 7 words (length in r3) */
- movs r3, r3, lsl #28
- stmcsia r0!, {r1, lr}
- stmcsia r0!, {r1, lr}
- stmmiia r0!, {r1, lr}
- movs r3, r3, lsl #2
- strcs r1, [r0], #4
-
-3:
- subs r2, r2, #32
- mov r3, r1
- bmi 2f
-1: subs r2, r2, #32
- stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
- bhs 1b
-2: add r2, r2, #32
-
- /* conditionally stores 0 to 31 bytes */
- movs r2, r2, lsl #28
- stmcsia r0!, {r1,r3,r12,lr}
- stmmiia r0!, {r1, lr}
- movs r2, r2, lsl #2
- strcs r1, [r0], #4
- strmih r1, [r0], #2
- movs r2, r2, lsl #2
- strcsb r1, [r0]
- ldmfd sp!, {r0, r4-r7, lr}
- bx lr
-END(memset)
+++ /dev/null
-/*
- * Copyright (c) 2013 ARM Ltd
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the company may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "arm_asm.h"
-
-#ifdef __ARMEB__
-#define S2LOMEM lsl
-#define S2LOMEMEQ lsleq
-#define S2HIMEM lsr
-#define MSB 0x000000ff
-#define LSB 0xff000000
-#define BYTE0_OFFSET 24
-#define BYTE1_OFFSET 16
-#define BYTE2_OFFSET 8
-#define BYTE3_OFFSET 0
-#else /* not __ARMEB__ */
-#define S2LOMEM lsr
-#define S2LOMEMEQ lsreq
-#define S2HIMEM lsl
-#define BYTE0_OFFSET 0
-#define BYTE1_OFFSET 8
-#define BYTE2_OFFSET 16
-#define BYTE3_OFFSET 24
-#define MSB 0xff000000
-#define LSB 0x000000ff
-#endif /* not __ARMEB__ */
-
-.syntax unified
-
-#if defined (__thumb__)
- .thumb
- .thumb_func
-#endif
- .global strcmp
- .type strcmp, %function
-strcmp:
-
-#if (defined (__thumb__) && !defined (__thumb2__))
-1:
- ldrb r2, [r0]
- ldrb r3, [r1]
- adds r0, r0, #1
- adds r1, r1, #1
- cmp r2, #0
- beq 2f
- cmp r2, r3
- beq 1b
-2:
- subs r0, r2, r3
- bx lr
-#elif (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
-1:
- ldrb r2, [r0], #1
- ldrb r3, [r1], #1
- cmp r2, #1
- it cs
- cmpcs r2, r3
- beq 1b
- subs r0, r2, r3
- RETURN
-
-
-#elif (defined (_ISA_THUMB_2) || defined (_ISA_ARM_6))
- /* Use LDRD whenever possible. */
-
-/* The main thing to look out for when comparing large blocks is that
- the loads do not cross a page boundary when loading past the index
- of the byte with the first difference or the first string-terminator.
-
- For example, if the strings are identical and the string-terminator
- is at index k, byte by byte comparison will not load beyond address
- s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
- k; double word - up to 7 bytes. If the load of these bytes crosses
- a page boundary, it might cause a memory fault (if the page is not mapped)
- that would not have happened in byte by byte comparison.
-
- If an address is (double) word aligned, then a load of a (double) word
- from that address will not cross a page boundary.
- Therefore, the algorithm below considers word and double-word alignment
- of strings separately. */
-
-/* High-level description of the algorithm.
-
- * The fast path: if both strings are double-word aligned,
- use LDRD to load two words from each string in every loop iteration.
- * If the strings have the same offset from a word boundary,
- use LDRB to load and compare byte by byte until
- the first string is aligned to a word boundary (at most 3 bytes).
- This is optimized for quick return on short unaligned strings.
- * If the strings have the same offset from a double-word boundary,
- use LDRD to load two words from each string in every loop iteration, as in the fast path.
- * If the strings do not have the same offset from a double-word boundary,
- load a word from the second string before the loop to initialize the queue.
- Use LDRD to load two words from every string in every loop iteration.
- Inside the loop, load the second word from the second string only after comparing
- the first word, using the queued value, to guarantee safety across page boundaries.
- * If the strings do not have the same offset from a word boundary,
- use LDR and a shift queue. Order of loads and comparisons matters,
- similarly to the previous case.
-
- * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
- * The only difference between ARM and Thumb modes is the use of CBZ instruction.
- * The only difference between big and little endian is the use of REV in little endian
- to compute the return value, instead of MOV.
- * No preload. [TODO.]
-*/
-
- .macro m_cbz reg label
-#ifdef __thumb2__
- cbz \reg, \label
-#else /* not defined __thumb2__ */
- cmp \reg, #0
- beq \label
-#endif /* not defined __thumb2__ */
- .endm /* m_cbz */
-
- .macro m_cbnz reg label
-#ifdef __thumb2__
- cbnz \reg, \label
-#else /* not defined __thumb2__ */
- cmp \reg, #0
- bne \label
-#endif /* not defined __thumb2__ */
- .endm /* m_cbnz */
-
- .macro init
- /* Macro to save temporary registers and prepare magic values. */
- subs sp, sp, #16
- strd r4, r5, [sp, #8]
- strd r6, r7, [sp]
- mvn r6, #0 /* all F */
- mov r7, #0 /* all 0 */
- .endm /* init */
-
- .macro magic_compare_and_branch w1 w2 label
- /* Macro to compare registers w1 and w2 and conditionally branch to label. */
- cmp \w1, \w2 /* Are w1 and w2 the same? */
- magic_find_zero_bytes \w1
- it eq
- cmpeq ip, #0 /* Is there a zero byte in w1? */
- bne \label
- .endm /* magic_compare_and_branch */
-
- .macro magic_find_zero_bytes w1
- /* Macro to find all-zero bytes in w1, result is in ip. */
-#if (defined (__ARM_FEATURE_DSP))
- uadd8 ip, \w1, r6
- sel ip, r7, r6
-#else /* not defined (__ARM_FEATURE_DSP) */
- /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
- Coincidently, these processors only have Thumb-2 mode, where we can use the
- the (large) magic constant available directly as an immediate in instructions.
- Note that we cannot use the magic constant in ARM mode, where we need
- to create the constant in a register. */
- sub ip, \w1, #0x01010101
- bic ip, ip, \w1
- and ip, ip, #0x80808080
-#endif /* not defined (__ARM_FEATURE_DSP) */
- .endm /* magic_find_zero_bytes */
-
- .macro setup_return w1 w2
-#ifdef __ARMEB__
- mov r1, \w1
- mov r2, \w2
-#else /* not __ARMEB__ */
- rev r1, \w1
- rev r2, \w2
-#endif /* not __ARMEB__ */
- .endm /* setup_return */
-
- /*
- optpld r0, #0
- optpld r1, #0
- */
-
- /* Are both strings double-word aligned? */
- orr ip, r0, r1
- tst ip, #7
- bne do_align
-
- /* Fast path. */
- init
-
-doubleword_aligned:
-
- /* Get here when the strings to compare are double-word aligned. */
- /* Compare two words in every iteration. */
- .p2align 2
-2:
- /*
- optpld r0, #16
- optpld r1, #16
- */
-
- /* Load the next double-word from each string. */
- ldrd r2, r3, [r0], #8
- ldrd r4, r5, [r1], #8
-
- magic_compare_and_branch w1=r2, w2=r4, label=return_24
- magic_compare_and_branch w1=r3, w2=r5, label=return_35
- b 2b
-
-do_align:
- /* Is the first string word-aligned? */
- ands ip, r0, #3
- beq word_aligned_r0
-
- /* Fast compare byte by byte until the first string is word-aligned. */
- /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
- to read until the next word boudnary is 4-ip. */
- bic r0, r0, #3
- ldr r2, [r0], #4
- lsls ip, ip, #31
- beq byte2
- bcs byte3
-
-byte1:
- ldrb ip, [r1], #1
- uxtb r3, r2, ror #BYTE1_OFFSET
- subs ip, r3, ip
- bne fast_return
- m_cbz reg=r3, label=fast_return
-
-byte2:
- ldrb ip, [r1], #1
- uxtb r3, r2, ror #BYTE2_OFFSET
- subs ip, r3, ip
- bne fast_return
- m_cbz reg=r3, label=fast_return
-
-byte3:
- ldrb ip, [r1], #1
- uxtb r3, r2, ror #BYTE3_OFFSET
- subs ip, r3, ip
- bne fast_return
- m_cbnz reg=r3, label=word_aligned_r0
-
-fast_return:
- mov r0, ip
- bx lr
-
-word_aligned_r0:
- init
- /* The first string is word-aligned. */
- /* Is the second string word-aligned? */
- ands ip, r1, #3
- bne strcmp_unaligned
-
-word_aligned:
- /* The strings are word-aligned. */
- /* Is the first string double-word aligned? */
- tst r0, #4
- beq doubleword_aligned_r0
-
- /* If r0 is not double-word aligned yet, align it by loading
- and comparing the next word from each string. */
- ldr r2, [r0], #4
- ldr r4, [r1], #4
- magic_compare_and_branch w1=r2 w2=r4 label=return_24
-
-doubleword_aligned_r0:
- /* Get here when r0 is double-word aligned. */
- /* Is r1 doubleword_aligned? */
- tst r1, #4
- beq doubleword_aligned
-
- /* Get here when the strings to compare are word-aligned,
- r0 is double-word aligned, but r1 is not double-word aligned. */
-
- /* Initialize the queue. */
- ldr r5, [r1], #4
-
- /* Compare two words in every iteration. */
- .p2align 2
-3:
- /*
- optpld r0, #16
- optpld r1, #16
- */
-
- /* Load the next double-word from each string and compare. */
- ldrd r2, r3, [r0], #8
- magic_compare_and_branch w1=r2 w2=r5 label=return_25
- ldrd r4, r5, [r1], #8
- magic_compare_and_branch w1=r3 w2=r4 label=return_34
- b 3b
-
- .macro miscmp_word offsetlo offsethi
- /* Macro to compare misaligned strings. */
- /* r0, r1 are word-aligned, and at least one of the strings
- is not double-word aligned. */
- /* Compare one word in every loop iteration. */
- /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
- OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
-
- /* Initialize the shift queue. */
- ldr r5, [r1], #4
-
- /* Compare one word from each string in every loop iteration. */
- .p2align 2
-7:
- ldr r3, [r0], #4
- S2LOMEM r5, r5, #\offsetlo
- magic_find_zero_bytes w1=r3
- cmp r7, ip, S2HIMEM #\offsetlo
- and r2, r3, r6, S2LOMEM #\offsetlo
- it eq
- cmpeq r2, r5
- bne return_25
- ldr r5, [r1], #4
- cmp ip, #0
- eor r3, r2, r3
- S2HIMEM r2, r5, #\offsethi
- it eq
- cmpeq r3, r2
- bne return_32
- b 7b
- .endm /* miscmp_word */
-
-strcmp_unaligned:
- /* r0 is word-aligned, r1 is at offset ip from a word. */
- /* Align r1 to the (previous) word-boundary. */
- bic r1, r1, #3
-
- /* Unaligned comparison word by word using LDRs. */
- cmp ip, #2
- beq miscmp_word_16 /* If ip == 2. */
- bge miscmp_word_24 /* If ip == 3. */
- miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
-miscmp_word_16: miscmp_word offsetlo=16 offsethi=16
-miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
-
-
-return_32:
- setup_return w1=r3, w2=r2
- b do_return
-return_34:
- setup_return w1=r3, w2=r4
- b do_return
-return_25:
- setup_return w1=r2, w2=r5
- b do_return
-return_35:
- setup_return w1=r3, w2=r5
- b do_return
-return_24:
- setup_return w1=r2, w2=r4
-
-do_return:
-
-#ifdef __ARMEB__
- mov r0, ip
-#else /* not __ARMEB__ */
- rev r0, ip
-#endif /* not __ARMEB__ */
-
- /* Restore temporaries early, before computing the return value. */
- ldrd r6, r7, [sp]
- ldrd r4, r5, [sp, #8]
- adds sp, sp, #16
-
- /* There is a zero or a different byte between r1 and r2. */
- /* r0 contains a mask of all-zero bytes in r1. */
- /* Using r0 and not ip here because cbz requires low register. */
- m_cbz reg=r0, label=compute_return_value
- clz r0, r0
- /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
- rsb r0, r0, #24
- /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
- lsr r1, r1, r0
- lsr r2, r2, r0
-
-compute_return_value:
- movs r0, #1
- cmp r1, r2
- /* The return value is computed as follows.
- If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
- If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
- which means r0:=r0-r0-1 and r0 is #-1 at return.
- If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
- which means r0:=r0-r0 and r0 is #0 at return.
- (C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
- it ls
- sbcls r0, r0, r0
- bx lr
-
-
-#else /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
- defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
- (defined (__thumb__) && !defined (__thumb2__))) */
-
- /* Use LDR whenever possible. */
-
-#ifdef __thumb2__
-#define magic1(REG) 0x01010101
-#define magic2(REG) 0x80808080
-#else
-#define magic1(REG) REG
-#define magic2(REG) REG, lsl #7
-#endif
-
- optpld r0
- optpld r1
- eor r2, r0, r1
- tst r2, #3
- /* Strings not at same byte offset from a word boundary. */
- bne strcmp_unaligned
- ands r2, r0, #3
- bic r0, r0, #3
- bic r1, r1, #3
- ldr ip, [r0], #4
- it eq
- ldreq r3, [r1], #4
- beq 1f
- /* Although s1 and s2 have identical initial alignment, they are
- not currently word aligned. Rather than comparing bytes,
- make sure that any bytes fetched from before the addressed
- bytes are forced to 0xff. Then they will always compare
- equal. */
- eor r2, r2, #3
- lsl r2, r2, #3
- mvn r3, MSB
- S2LOMEM r2, r3, r2
- ldr r3, [r1], #4
- orr ip, ip, r2
- orr r3, r3, r2
-1:
-#ifndef __thumb2__
- /* Load the 'magic' constant 0x01010101. */
- str r4, [sp, #-4]!
- mov r4, #1
- orr r4, r4, r4, lsl #8
- orr r4, r4, r4, lsl #16
-#endif
- .p2align 2
-4:
- optpld r0, #8
- optpld r1, #8
- sub r2, ip, magic1(r4)
- cmp ip, r3
- itttt eq
- /* check for any zero bytes in first word */
- biceq r2, r2, ip
- tsteq r2, magic2(r4)
- ldreq ip, [r0], #4
- ldreq r3, [r1], #4
- beq 4b
-2:
- /* There's a zero or a different byte in the word */
- S2HIMEM r0, ip, #24
- S2LOMEM ip, ip, #8
- cmp r0, #1
- it cs
- cmpcs r0, r3, S2HIMEM #24
- it eq
- S2LOMEMEQ r3, r3, #8
- beq 2b
- /* On a big-endian machine, r0 contains the desired byte in bits
- 0-7; on a little-endian machine they are in bits 24-31. In
- both cases the other bits in r0 are all zero. For r3 the
- interesting byte is at the other end of the word, but the
- other bits are not necessarily zero. We need a signed result
- representing the differnece in the unsigned bytes, so for the
- little-endian case we can't just shift the interesting bits
- up. */
-#ifdef __ARMEB__
- sub r0, r0, r3, lsr #24
-#else
- and r3, r3, #255
-#ifdef __thumb2__
- /* No RSB instruction in Thumb2 */
- lsr r0, r0, #24
- sub r0, r0, r3
-#else
- rsb r0, r3, r0, lsr #24
-#endif
-#endif
-#ifndef __thumb2__
- ldr r4, [sp], #4
-#endif
- RETURN
-
-
-strcmp_unaligned:
-
-#if 0
- /* The assembly code below is based on the following alogrithm. */
-#ifdef __ARMEB__
-#define RSHIFT <<
-#define LSHIFT >>
-#else
-#define RSHIFT >>
-#define LSHIFT <<
-#endif
-
-#define body(shift) \
- mask = 0xffffffffU RSHIFT shift; \
- w1 = *wp1++; \
- w2 = *wp2++; \
- do \
- { \
- t1 = w1 & mask; \
- if (__builtin_expect(t1 != w2 RSHIFT shift, 0)) \
- { \
- w2 RSHIFT= shift; \
- break; \
- } \
- if (__builtin_expect(((w1 - b1) & ~w1) & (b1 << 7), 0)) \
- { \
- /* See comment in assembler below re syndrome on big-endian */\
- if ((((w1 - b1) & ~w1) & (b1 << 7)) & mask) \
- w2 RSHIFT= shift; \
- else \
- { \
- w2 = *wp2; \
- t1 = w1 RSHIFT (32 - shift); \
- w2 = (w2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
- } \
- break; \
- } \
- w2 = *wp2++; \
- t1 ^= w1; \
- if (__builtin_expect(t1 != w2 LSHIFT (32 - shift), 0)) \
- { \
- t1 = w1 >> (32 - shift); \
- w2 = (w2 << (32 - shift)) RSHIFT (32 - shift); \
- break; \
- } \
- w1 = *wp1++; \
- } while (1)
-
- const unsigned* wp1;
- const unsigned* wp2;
- unsigned w1, w2;
- unsigned mask;
- unsigned shift;
- unsigned b1 = 0x01010101;
- char c1, c2;
- unsigned t1;
-
- while (((unsigned) s1) & 3)
- {
- c1 = *s1++;
- c2 = *s2++;
- if (c1 == 0 || c1 != c2)
- return c1 - (int)c2;
- }
- wp1 = (unsigned*) (((unsigned)s1) & ~3);
- wp2 = (unsigned*) (((unsigned)s2) & ~3);
- t1 = ((unsigned) s2) & 3;
- if (t1 == 1)
- {
- body(8);
- }
- else if (t1 == 2)
- {
- body(16);
- }
- else
- {
- body (24);
- }
-
- do
- {
-#ifdef __ARMEB__
- c1 = (char) t1 >> 24;
- c2 = (char) w2 >> 24;
-#else /* not __ARMEB__ */
- c1 = (char) t1;
- c2 = (char) w2;
-#endif /* not __ARMEB__ */
- t1 RSHIFT= 8;
- w2 RSHIFT= 8;
- } while (c1 != 0 && c1 == c2);
- return c1 - c2;
-#endif /* 0 */
-
-
- wp1 .req r0
- wp2 .req r1
- b1 .req r2
- w1 .req r4
- w2 .req r5
- t1 .req ip
- @ r3 is scratch
-
- /* First of all, compare bytes until wp1(sp1) is word-aligned. */
-1:
- tst wp1, #3
- beq 2f
- ldrb r2, [wp1], #1
- ldrb r3, [wp2], #1
- cmp r2, #1
- it cs
- cmpcs r2, r3
- beq 1b
- sub r0, r2, r3
- RETURN
-
-2:
- str r5, [sp, #-4]!
- str r4, [sp, #-4]!
- //stmfd sp!, {r4, r5}
- mov b1, #1
- orr b1, b1, b1, lsl #8
- orr b1, b1, b1, lsl #16
-
- and t1, wp2, #3
- bic wp2, wp2, #3
- ldr w1, [wp1], #4
- ldr w2, [wp2], #4
- cmp t1, #2
- beq 2f
- bhi 3f
-
- /* Critical inner Loop: Block with 3 bytes initial overlap */
- .p2align 2
-1:
- bic t1, w1, MSB
- cmp t1, w2, S2LOMEM #8
- sub r3, w1, b1
- bic r3, r3, w1
- bne 4f
- ands r3, r3, b1, lsl #7
- it eq
- ldreq w2, [wp2], #4
- bne 5f
- eor t1, t1, w1
- cmp t1, w2, S2HIMEM #24
- bne 6f
- ldr w1, [wp1], #4
- b 1b
-4:
- S2LOMEM w2, w2, #8
- b 8f
-
-5:
-#ifdef __ARMEB__
- /* The syndrome value may contain false ones if the string ends
- with the bytes 0x01 0x00 */
- tst w1, #0xff000000
- itt ne
- tstne w1, #0x00ff0000
- tstne w1, #0x0000ff00
- beq 7f
-#else
- bics r3, r3, #0xff000000
- bne 7f
-#endif
- ldrb w2, [wp2]
- S2LOMEM t1, w1, #24
-#ifdef __ARMEB__
- lsl w2, w2, #24
-#endif
- b 8f
-
-6:
- S2LOMEM t1, w1, #24
- and w2, w2, LSB
- b 8f
-
- /* Critical inner Loop: Block with 2 bytes initial overlap */
- .p2align 2
-2:
- S2HIMEM t1, w1, #16
- sub r3, w1, b1
- S2LOMEM t1, t1, #16
- bic r3, r3, w1
- cmp t1, w2, S2LOMEM #16
- bne 4f
- ands r3, r3, b1, lsl #7
- it eq
- ldreq w2, [wp2], #4
- bne 5f
- eor t1, t1, w1
- cmp t1, w2, S2HIMEM #16
- bne 6f
- ldr w1, [wp1], #4
- b 2b
-
-5:
-#ifdef __ARMEB__
- /* The syndrome value may contain false ones if the string ends
- with the bytes 0x01 0x00 */
- tst w1, #0xff000000
- it ne
- tstne w1, #0x00ff0000
- beq 7f
-#else
- lsls r3, r3, #16
- bne 7f
-#endif
- ldrh w2, [wp2]
- S2LOMEM t1, w1, #16
-#ifdef __ARMEB__
- lsl w2, w2, #16
-#endif
- b 8f
-
-6:
- S2HIMEM w2, w2, #16
- S2LOMEM t1, w1, #16
-4:
- S2LOMEM w2, w2, #16
- b 8f
-
- /* Critical inner Loop: Block with 1 byte initial overlap */
- .p2align 2
-3:
- and t1, w1, LSB
- cmp t1, w2, S2LOMEM #24
- sub r3, w1, b1
- bic r3, r3, w1
- bne 4f
- ands r3, r3, b1, lsl #7
- it eq
- ldreq w2, [wp2], #4
- bne 5f
- eor t1, t1, w1
- cmp t1, w2, S2HIMEM #8
- bne 6f
- ldr w1, [wp1], #4
- b 3b
-4:
- S2LOMEM w2, w2, #24
- b 8f
-5:
- /* The syndrome value may contain false ones if the string ends
- with the bytes 0x01 0x00 */
- tst w1, LSB
- beq 7f
- ldr w2, [wp2], #4
-6:
- S2LOMEM t1, w1, #8
- bic w2, w2, MSB
- b 8f
-7:
- mov r0, #0
- //ldmfd sp!, {r4, r5}
- ldr r4, [sp], #4
- ldr r5, [sp], #4
- RETURN
-8:
- and r2, t1, LSB
- and r0, w2, LSB
- cmp r0, #1
- it cs
- cmpcs r0, r2
- itt eq
- S2LOMEMEQ t1, t1, #8
- S2LOMEMEQ w2, w2, #8
- beq 8b
- sub r0, r2, r0
- //ldmfd sp!, {r4, r5}
- ldr r4, [sp], #4
- ldr r5, [sp], #4
- RETURN
-
-#endif /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
- defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
- (defined (__thumb__) && !defined (__thumb2__))) */