Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 1 | /* |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 2 | * Copyright (c) 2013 |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 3 | * MIPS Technologies, Inc., California. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its |
| 14 | * contributors may be used to endorse or promote products derived from |
| 15 | * this software without specific prior written permission. |
| 16 | * |
| 17 | * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND |
| 18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE |
| 21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 | * SUCH DAMAGE. |
| 28 | */ |
| 29 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 30 | #ifdef __ANDROID__ |
| 31 | # include <private/bionic_asm.h> |
| 32 | # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE |
| 33 | #elif _LIBC |
| 34 | # include <sysdep.h> |
| 35 | # include <regdef.h> |
| 36 | # include <sys/asm.h> |
| 37 | # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE |
| 38 | #elif _COMPILING_NEWLIB |
| 39 | # include "machine/asm.h" |
| 40 | # include "machine/regdef.h" |
| 41 | # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 42 | #else |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 43 | # include <regdef.h> |
| 44 | # include <sys/asm.h> |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 45 | #endif |
| 46 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 47 | /* Check to see if the MIPS architecture we are compiling for supports |
| 48 | prefetching. */ |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 49 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 50 | #if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64) |
| 51 | # ifndef DISABLE_PREFETCH |
| 52 | # define USE_PREFETCH |
| 53 | # endif |
| 54 | #endif |
| 55 | |
| 56 | #if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32)) |
| 57 | # ifndef DISABLE_DOUBLE |
| 58 | # define USE_DOUBLE |
| 59 | # endif |
| 60 | #endif |
| 61 | |
| 62 | #ifndef USE_DOUBLE |
| 63 | # ifndef DISABLE_DOUBLE_ALIGN |
| 64 | # define DOUBLE_ALIGN |
| 65 | # endif |
| 66 | #endif |
| 67 | |
| 68 | /* Some asm.h files do not have the L macro definition. */ |
| 69 | #ifndef L |
| 70 | # if _MIPS_SIM == _ABIO32 |
| 71 | # define L(label) $L ## label |
| 72 | # else |
| 73 | # define L(label) .L ## label |
| 74 | # endif |
| 75 | #endif |
| 76 | |
| 77 | /* Some asm.h files do not have the PTR_ADDIU macro definition. */ |
| 78 | #ifndef PTR_ADDIU |
| 79 | # if _MIPS_SIM == _ABIO32 |
| 80 | # define PTR_ADDIU addiu |
| 81 | # else |
| 82 | # define PTR_ADDIU daddiu |
| 83 | # endif |
| 84 | #endif |
| 85 | |
| 86 | /* New R6 instructions that may not be in asm.h. */ |
| 87 | #ifndef PTR_LSA |
| 88 | # if _MIPS_SIM == _ABIO32 |
| 89 | # define PTR_LSA lsa |
| 90 | # else |
| 91 | # define PTR_LSA dlsa |
| 92 | # endif |
| 93 | #endif |
| 94 | |
| 95 | /* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE |
| 96 | or PREFETCH_STORE_STREAMED offers a large performance advantage |
| 97 | but PREPAREFORSTORE has some special restrictions to consider. |
| 98 | |
| 99 | Prefetch with the 'prepare for store' hint does not copy a memory |
| 100 | location into the cache, it just allocates a cache line and zeros |
| 101 | it out. This means that if you do not write to the entire cache |
| 102 | line before writing it out to memory some data will get zero'ed out |
| 103 | when the cache line is written back to memory and data will be lost. |
| 104 | |
| 105 | There are ifdef'ed sections of this memcpy to make sure that it does not |
| 106 | do prefetches on cache lines that are not going to be completely written. |
| 107 | This code is only needed and only used when PREFETCH_STORE_HINT is set to |
| 108 | PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are |
| 109 | less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will |
| 110 | not work correctly. */ |
| 111 | |
| 112 | #ifdef USE_PREFETCH |
| 113 | # define PREFETCH_HINT_STORE 1 |
| 114 | # define PREFETCH_HINT_STORE_STREAMED 5 |
| 115 | # define PREFETCH_HINT_STORE_RETAINED 7 |
| 116 | # define PREFETCH_HINT_PREPAREFORSTORE 30 |
| 117 | |
| 118 | /* If we have not picked out what hints to use at this point use the |
| 119 | standard load and store prefetch hints. */ |
| 120 | # ifndef PREFETCH_STORE_HINT |
| 121 | # define PREFETCH_STORE_HINT PREFETCH_HINT_STORE |
| 122 | # endif |
| 123 | |
| 124 | /* We double everything when USE_DOUBLE is true so we do 2 prefetches to |
| 125 | get 64 bytes in that case. The assumption is that each individual |
| 126 | prefetch brings in 32 bytes. */ |
| 127 | # ifdef USE_DOUBLE |
| 128 | # define PREFETCH_CHUNK 64 |
| 129 | # define PREFETCH_FOR_STORE(chunk, reg) \ |
| 130 | pref PREFETCH_STORE_HINT, (chunk)*64(reg); \ |
| 131 | pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg) |
| 132 | # else |
| 133 | # define PREFETCH_CHUNK 32 |
| 134 | # define PREFETCH_FOR_STORE(chunk, reg) \ |
| 135 | pref PREFETCH_STORE_HINT, (chunk)*32(reg) |
| 136 | # endif |
| 137 | |
| 138 | /* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less |
| 139 | than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size |
| 140 | of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE |
| 141 | hint is used, the code will not work correctly. If PREPAREFORSTORE is not |
| 142 | used than MAX_PREFETCH_SIZE does not matter. */ |
| 143 | # define MAX_PREFETCH_SIZE 128 |
| 144 | /* PREFETCH_LIMIT is set based on the fact that we never use an offset greater |
| 145 | than 5 on a STORE prefetch and that a single prefetch can never be larger |
| 146 | than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because |
| 147 | we actually do two prefetches in that case, one 32 bytes after the other. */ |
| 148 | # ifdef USE_DOUBLE |
| 149 | # define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE |
| 150 | # else |
| 151 | # define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE |
| 152 | # endif |
| 153 | |
| 154 | # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \ |
| 155 | && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE) |
| 156 | /* We cannot handle this because the initial prefetches may fetch bytes that |
| 157 | are before the buffer being copied. We start copies with an offset |
| 158 | of 4 so avoid this situation when using PREPAREFORSTORE. */ |
| 159 | # error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small." |
| 160 | # endif |
| 161 | #else /* USE_PREFETCH not defined */ |
| 162 | # define PREFETCH_FOR_STORE(offset, reg) |
| 163 | #endif |
| 164 | |
| 165 | #if __mips_isa_rev > 5 |
| 166 | # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) |
| 167 | # undef PREFETCH_STORE_HINT |
| 168 | # define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED |
| 169 | # endif |
| 170 | # define R6_CODE |
| 171 | #endif |
| 172 | |
| 173 | /* Allow the routine to be named something else if desired. */ |
| 174 | #ifndef MEMSET_NAME |
| 175 | # define MEMSET_NAME memset |
| 176 | #endif |
| 177 | |
| 178 | /* We load/store 64 bits at a time when USE_DOUBLE is true. |
| 179 | The C_ prefix stands for CHUNK and is used to avoid macro name |
| 180 | conflicts with system header files. */ |
| 181 | |
| 182 | #ifdef USE_DOUBLE |
| 183 | # define C_ST sd |
| 184 | # if __MIPSEB |
| 185 | # define C_STHI sdl /* high part is left in big-endian */ |
| 186 | # else |
| 187 | # define C_STHI sdr /* high part is right in little-endian */ |
| 188 | # endif |
| 189 | #else |
| 190 | # define C_ST sw |
| 191 | # if __MIPSEB |
| 192 | # define C_STHI swl /* high part is left in big-endian */ |
| 193 | # else |
| 194 | # define C_STHI swr /* high part is right in little-endian */ |
| 195 | # endif |
| 196 | #endif |
| 197 | |
| 198 | /* Bookkeeping values for 32 vs. 64 bit mode. */ |
| 199 | #ifdef USE_DOUBLE |
| 200 | # define NSIZE 8 |
| 201 | # define NSIZEMASK 0x3f |
| 202 | # define NSIZEDMASK 0x7f |
| 203 | #else |
| 204 | # define NSIZE 4 |
| 205 | # define NSIZEMASK 0x1f |
| 206 | # define NSIZEDMASK 0x3f |
| 207 | #endif |
| 208 | #define UNIT(unit) ((unit)*NSIZE) |
| 209 | #define UNITM1(unit) (((unit)*NSIZE)-1) |
| 210 | |
| 211 | #ifdef __ANDROID__ |
| 212 | LEAF(MEMSET_NAME,0) |
| 213 | #else |
| 214 | LEAF(MEMSET_NAME) |
| 215 | #endif |
| 216 | |
| 217 | .set nomips16 |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 218 | .set noreorder |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 219 | /* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of |
| 220 | size, copy dst pointer to v0 for the return value. */ |
| 221 | slti t2,a2,(2 * NSIZE) |
| 222 | bne t2,zero,L(lastb) |
| 223 | move v0,a0 |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 224 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 225 | /* If memset value is not zero, we copy it to all the bytes in a 32 or 64 |
| 226 | bit word. */ |
| 227 | beq a1,zero,L(set0) /* If memset value is zero no smear */ |
| 228 | PTR_SUBU a3,zero,a0 |
| 229 | nop |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 230 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 231 | /* smear byte into 32 or 64 bit word */ |
| 232 | #if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2) |
| 233 | # ifdef USE_DOUBLE |
| 234 | dins a1, a1, 8, 8 /* Replicate fill byte into half-word. */ |
| 235 | dins a1, a1, 16, 16 /* Replicate fill byte into word. */ |
| 236 | dins a1, a1, 32, 32 /* Replicate fill byte into dbl word. */ |
| 237 | # else |
| 238 | ins a1, a1, 8, 8 /* Replicate fill byte into half-word. */ |
| 239 | ins a1, a1, 16, 16 /* Replicate fill byte into word. */ |
| 240 | # endif |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 241 | #else |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 242 | # ifdef USE_DOUBLE |
| 243 | and a1,0xff |
| 244 | dsll t2,a1,8 |
| 245 | or a1,t2 |
| 246 | dsll t2,a1,16 |
| 247 | or a1,t2 |
| 248 | dsll t2,a1,32 |
| 249 | or a1,t2 |
| 250 | # else |
| 251 | and a1,0xff |
| 252 | sll t2,a1,8 |
| 253 | or a1,t2 |
| 254 | sll t2,a1,16 |
| 255 | or a1,t2 |
| 256 | # endif |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 257 | #endif |
| 258 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 259 | /* If the destination address is not aligned do a partial store to get it |
| 260 | aligned. If it is already aligned just jump to L(aligned). */ |
| 261 | L(set0): |
| 262 | #ifndef R6_CODE |
| 263 | andi t2,a3,(NSIZE-1) /* word-unaligned address? */ |
| 264 | beq t2,zero,L(aligned) /* t2 is the unalignment count */ |
| 265 | PTR_SUBU a2,a2,t2 |
| 266 | C_STHI a1,0(a0) |
| 267 | PTR_ADDU a0,a0,t2 |
| 268 | #else /* R6_CODE */ |
| 269 | andi t2,a0,(NSIZE-1) |
| 270 | lapc t9,L(atable) |
| 271 | PTR_LSA t9,t2,t9,2 |
| 272 | jrc t9 |
| 273 | L(atable): |
| 274 | bc L(aligned) |
| 275 | # ifdef USE_DOUBLE |
| 276 | bc L(lb7) |
| 277 | bc L(lb6) |
| 278 | bc L(lb5) |
| 279 | bc L(lb4) |
| 280 | # endif |
| 281 | bc L(lb3) |
| 282 | bc L(lb2) |
| 283 | bc L(lb1) |
| 284 | L(lb7): |
| 285 | sb a1,6(a0) |
| 286 | L(lb6): |
| 287 | sb a1,5(a0) |
| 288 | L(lb5): |
| 289 | sb a1,4(a0) |
| 290 | L(lb4): |
| 291 | sb a1,3(a0) |
| 292 | L(lb3): |
| 293 | sb a1,2(a0) |
| 294 | L(lb2): |
| 295 | sb a1,1(a0) |
| 296 | L(lb1): |
| 297 | sb a1,0(a0) |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 298 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 299 | li t9,NSIZE |
| 300 | subu t2,t9,t2 |
| 301 | PTR_SUBU a2,a2,t2 |
| 302 | PTR_ADDU a0,a0,t2 |
| 303 | #endif /* R6_CODE */ |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 304 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 305 | L(aligned): |
| 306 | /* If USE_DOUBLE is not set we may still want to align the data on a 16 |
| 307 | byte boundry instead of an 8 byte boundry to maximize the opportunity |
| 308 | of proAptiv chips to do memory bonding (combining two sequential 4 |
| 309 | byte stores into one 8 byte store). We know there are at least 4 bytes |
| 310 | left to store or we would have jumped to L(lastb) earlier in the code. */ |
| 311 | #ifdef DOUBLE_ALIGN |
| 312 | andi t2,a3,4 |
| 313 | beq t2,zero,L(double_aligned) |
| 314 | PTR_SUBU a2,a2,t2 |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 315 | sw a1,0(a0) |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 316 | PTR_ADDU a0,a0,t2 |
| 317 | L(double_aligned): |
| 318 | #endif |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 319 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 320 | /* Now the destination is aligned to (word or double word) aligned address |
| 321 | Set a2 to count how many bytes we have to copy after all the 64/128 byte |
| 322 | chunks are copied and a3 to the dest pointer after all the 64/128 byte |
| 323 | chunks have been copied. We will loop, incrementing a0 until it equals |
| 324 | a3. */ |
| 325 | andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */ |
| 326 | beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */ |
| 327 | PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */ |
| 328 | PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */ |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 329 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 330 | /* When in the loop we may prefetch with the 'prepare to store' hint, |
| 331 | in this case the a0+x should not be past the "t0-32" address. This |
| 332 | means: for x=128 the last "safe" a0 address is "t0-160". Alternatively, |
| 333 | for x=64 the last "safe" a0 address is "t0-96" In the current version we |
| 334 | will use "prefetch hint,128(a0)", so "t0-160" is the limit. */ |
| 335 | #if defined(USE_PREFETCH) \ |
| 336 | && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) |
| 337 | PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */ |
| 338 | PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */ |
| 339 | #endif |
| 340 | #if defined(USE_PREFETCH) \ |
| 341 | && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE) |
| 342 | PREFETCH_FOR_STORE (1, a0) |
| 343 | PREFETCH_FOR_STORE (2, a0) |
| 344 | PREFETCH_FOR_STORE (3, a0) |
| 345 | #endif |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 346 | |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 347 | L(loop16w): |
| 348 | #if defined(USE_PREFETCH) \ |
| 349 | && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) |
| 350 | sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */ |
| 351 | bgtz v1,L(skip_pref) |
| 352 | nop |
| 353 | #endif |
| 354 | #ifndef R6_CODE |
| 355 | PREFETCH_FOR_STORE (4, a0) |
| 356 | PREFETCH_FOR_STORE (5, a0) |
| 357 | #else |
| 358 | PREFETCH_FOR_STORE (2, a0) |
| 359 | #endif |
| 360 | L(skip_pref): |
| 361 | C_ST a1,UNIT(0)(a0) |
| 362 | C_ST a1,UNIT(1)(a0) |
| 363 | C_ST a1,UNIT(2)(a0) |
| 364 | C_ST a1,UNIT(3)(a0) |
| 365 | C_ST a1,UNIT(4)(a0) |
| 366 | C_ST a1,UNIT(5)(a0) |
| 367 | C_ST a1,UNIT(6)(a0) |
| 368 | C_ST a1,UNIT(7)(a0) |
| 369 | C_ST a1,UNIT(8)(a0) |
| 370 | C_ST a1,UNIT(9)(a0) |
| 371 | C_ST a1,UNIT(10)(a0) |
| 372 | C_ST a1,UNIT(11)(a0) |
| 373 | C_ST a1,UNIT(12)(a0) |
| 374 | C_ST a1,UNIT(13)(a0) |
| 375 | C_ST a1,UNIT(14)(a0) |
| 376 | C_ST a1,UNIT(15)(a0) |
| 377 | PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */ |
| 378 | bne a0,a3,L(loop16w) |
| 379 | nop |
| 380 | move a2,t8 |
| 381 | |
| 382 | /* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go. |
| 383 | Check for a 32(64) byte chunk and copy if if there is one. Otherwise |
| 384 | jump down to L(chk1w) to handle the tail end of the copy. */ |
| 385 | L(chkw): |
| 386 | andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */ |
| 387 | /* the t8 is the reminder count past 32-bytes */ |
| 388 | beq a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */ |
| 389 | nop |
| 390 | C_ST a1,UNIT(0)(a0) |
| 391 | C_ST a1,UNIT(1)(a0) |
| 392 | C_ST a1,UNIT(2)(a0) |
| 393 | C_ST a1,UNIT(3)(a0) |
| 394 | C_ST a1,UNIT(4)(a0) |
| 395 | C_ST a1,UNIT(5)(a0) |
| 396 | C_ST a1,UNIT(6)(a0) |
| 397 | C_ST a1,UNIT(7)(a0) |
| 398 | PTR_ADDIU a0,a0,UNIT(8) |
| 399 | |
| 400 | /* Here we have less than 32(64) bytes to set. Set up for a loop to |
| 401 | copy one word (or double word) at a time. Set a2 to count how many |
| 402 | bytes we have to copy after all the word (or double word) chunks are |
| 403 | copied and a3 to the dest pointer after all the (d)word chunks have |
| 404 | been copied. We will loop, incrementing a0 until a0 equals a3. */ |
| 405 | L(chk1w): |
| 406 | andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */ |
| 407 | beq a2,t8,L(lastb) |
| 408 | PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */ |
| 409 | PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */ |
| 410 | |
| 411 | /* copying in words (4-byte or 8 byte chunks) */ |
| 412 | L(wordCopy_loop): |
| 413 | PTR_ADDIU a0,a0,UNIT(1) |
| 414 | bne a0,a3,L(wordCopy_loop) |
| 415 | C_ST a1,UNIT(-1)(a0) |
| 416 | |
| 417 | /* Copy the last 8 (or 16) bytes */ |
| 418 | L(lastb): |
| 419 | blez a2,L(leave) |
| 420 | PTR_ADDU a3,a0,a2 /* a3 is the last dst address */ |
| 421 | L(lastbloop): |
| 422 | PTR_ADDIU a0,a0,1 |
| 423 | bne a0,a3,L(lastbloop) |
| 424 | sb a1,-1(a0) |
| 425 | L(leave): |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 426 | j ra |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 427 | nop |
Raghu Gandham | 405b802 | 2012-07-25 18:16:42 -0700 | [diff] [blame] | 428 | |
| 429 | .set at |
| 430 | .set reorder |
Nikola Veljkovic | 38f2eaa | 2015-05-26 12:06:09 +0200 | [diff] [blame^] | 431 | END(MEMSET_NAME) |
| 432 | #ifndef __ANDROID__ |
| 433 | # ifdef _LIBC |
| 434 | libc_hidden_builtin_def (MEMSET_NAME) |
| 435 | # endif |
| 436 | #endif |