lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2004 Joakim Tjernlund |
| 3 | * Copyright (C) 2000-2005 Erik Andersen <andersen@uclibc.org> |
| 4 | * |
| 5 | * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. |
| 6 | */ |
| 7 | |
| 8 | /* These are carefully optimized mem*() functions for PPC written in C. |
| 9 | * Don't muck around with these function without checking the generated |
| 10 | * assembler code. |
| 11 | * It is possible to optimize these significantly more by using specific |
| 12 | * data cache instructions(mainly dcbz). However that requires knownledge |
| 13 | * about the CPU's cache line size. |
| 14 | * |
| 15 | * BUG ALERT! |
| 16 | * The cache instructions on MPC8xx CPU's are buggy(they don't update |
| 17 | * the DAR register when causing a DTLB Miss/Error) and cannot be |
| 18 | * used on 8xx CPU's without a kernel patch to work around this |
| 19 | * problem. |
| 20 | */ |
| 21 | |
| 22 | #include <string.h> |
| 23 | |
| 24 | /* PPC can do pre increment and load/store, but not post increment and |
| 25 | load/store. Therefore use *++ptr instead of *ptr++. */ |
| 26 | void *memcpy(void *to, const void *from, size_t len) |
| 27 | { |
| 28 | unsigned long rem, chunks, tmp1, tmp2; |
| 29 | unsigned char *tmp_to; |
| 30 | unsigned char *tmp_from = (unsigned char *)from; |
| 31 | |
| 32 | chunks = len / 8; |
| 33 | tmp_from -= 4; |
| 34 | tmp_to = to - 4; |
| 35 | if (!chunks) |
| 36 | goto lessthan8; |
| 37 | rem = (unsigned long )tmp_to % 4; |
| 38 | if (rem) |
| 39 | goto align; |
| 40 | copy_chunks: |
| 41 | do { |
| 42 | /* make gcc to load all data, then store it */ |
| 43 | tmp1 = *(unsigned long *)(tmp_from+4); |
| 44 | tmp_from += 8; |
| 45 | tmp2 = *(unsigned long *)tmp_from; |
| 46 | *(unsigned long *)(tmp_to+4) = tmp1; |
| 47 | tmp_to += 8; |
| 48 | *(unsigned long *)tmp_to = tmp2; |
| 49 | } while (--chunks); |
| 50 | lessthan8: |
| 51 | len = len % 8; |
| 52 | if (len >= 4) { |
| 53 | tmp_from += 4; |
| 54 | tmp_to += 4; |
| 55 | *(unsigned long *)(tmp_to) = *(unsigned long *)(tmp_from); |
| 56 | len -= 4; |
| 57 | } |
| 58 | if (!len) |
| 59 | return to; |
| 60 | tmp_from += 3; |
| 61 | tmp_to += 3; |
| 62 | do { |
| 63 | *++tmp_to = *++tmp_from; |
| 64 | } while (--len); |
| 65 | |
| 66 | return to; |
| 67 | align: |
| 68 | /* ???: Do we really need to generate the carry flag here? If not, then: |
| 69 | rem -= 4; */ |
| 70 | rem = 4 - rem; |
| 71 | len -= rem; |
| 72 | do { |
| 73 | *(tmp_to+4) = *(tmp_from+4); |
| 74 | ++tmp_from; |
| 75 | ++tmp_to; |
| 76 | } while (--rem); |
| 77 | chunks = len / 8; |
| 78 | if (chunks) |
| 79 | goto copy_chunks; |
| 80 | goto lessthan8; |
| 81 | } |
| 82 | libc_hidden_def(memcpy) |