1 /* Optimized version of the standard bzero() function.
2 This file is part of the GNU C Library.
3 Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
4 Contributed by Dan Pop for Itanium <Dan.Pop@cern.ch>.
5 Rewritten for McKinley by Sverre Jarp, HP Labs/CERN <Sverre.Jarp@cern.ch>
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; if not, see
19 <http://www.gnu.org/licenses/>. */
27 The algorithm is fairly straightforward: set byte by byte until we
28 we get to a 16B-aligned address, then loop on 128 B chunks using an
29 early store as prefetching, then loop on 32B chucks, then clear remaining
30 words, finally clear remaining bytes.
31 Since a stf.spill f0 can store 16B in one go, we use this instruction
36 #ifdef __UCLIBC_SUSV3_LEGACY__
54 /* This routine uses only scratch predicate registers (p6 - p15) */
55 #define p_scr p6 /* default register for same-cycle branches */
67 #define LSIZE_SH 7 /* shift amount */
74 #elif defined(USE_FLP)
83 alloc tmp = ar.pfs, 2, 0, 0, 0
89 mov ret0 = dest /* return value */
91 cmp.eq p_scr, p0 = cnt, r0
94 and ptr2 = -(MIN1+1), dest /* aligned address */
95 and tmp = MIN1, dest /* prepare to check for alignment */
96 tbit.nz p_y, p_n = dest, 0 /* Do we have an odd address? (M_B_U) */
100 (p_scr) br.ret.dpnt.many rp /* return immediately if count = 0 */
103 cmp.ne p_unalgn, p0 = tmp, r0
104 } { .mib /* NB: # of bytes to move is 1 */
105 sub bytecnt = (MIN1+1), tmp /* higher than loopcnt */
106 cmp.gt p_scr, p0 = 16, cnt /* is it a minimalistic task? */
107 (p_scr) br.cond.dptk.many .move_bytes_unaligned /* go move just a few (M_B_U) */
110 (p_unalgn) add ptr1 = (MIN1+1), ptr2 /* after alignment */
111 (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 /* after alignment */
112 (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 /* should we do a st8 ? */
115 (p_y) add cnt = -8, cnt
116 (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 /* should we do a st4 ? */
118 (p_y) st8 [ptr2] = r0,-4
119 (p_n) add ptr2 = 4, ptr2
122 (p_yy) add cnt = -4, cnt
123 (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 /* should we do a st2 ? */
125 (p_yy) st4 [ptr2] = r0,-2
126 (p_nn) add ptr2 = 2, ptr2
129 mov tmp = LINE_SIZE+1 /* for compare */
130 (p_y) add cnt = -2, cnt
131 (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 /* should we do a st1 ? */
134 (p_y) st2 [ptr2] = r0,-1
135 (p_n) add ptr2 = 1, ptr2
139 (p_yy) st1 [ptr2] = r0
140 cmp.gt p_scr, p0 = tmp, cnt /* is it a minimalistic task? */
142 (p_yy) add cnt = -1, cnt
143 (p_scr) br.cond.dpnt.many .fraction_of_line /* go move just a few */
147 shr.u linecnt = cnt, LSIZE_SH
152 .l1b: /* ------------------ L1B: store ahead into cache lines; fill later */
154 and tmp = -(LINE_SIZE), cnt /* compute end of range */
155 mov ptr9 = ptr1 /* used for prefetching */
156 and cnt = (LINE_SIZE-1), cnt /* remainder */
158 mov loopcnt = PREF_AHEAD-1 /* default prefetch loop */
159 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt /* check against actual value */
162 (p_scr) add loopcnt = -1, linecnt
163 add ptr2 = 16, ptr1 /* start of stores (beyond prefetch stores) */
164 add ptr1 = tmp, ptr1 /* first address beyond total range */
167 add tmp = -1, linecnt /* next loop count */
168 movi0 ar.lc = loopcnt
172 stf.spill [ptr9] = f0, 128 /* Do stores one cache line apart */
174 br.cloop.dptk.few .pref_l1b
177 add ptr0 = 16, ptr2 /* Two stores in parallel */
182 stf.spill [ptr2] = f0, 32
183 stf.spill [ptr0] = f0, 32
186 stf.spill [ptr2] = f0, 32
187 stf.spill [ptr0] = f0, 32
190 stf.spill [ptr2] = f0, 32
191 stf.spill [ptr0] = f0, 64
192 cmp.lt p_scr, p0 = ptr9, ptr1 /* do we need more prefetching? */
195 stf.spill [ptr2] = f0, 32
196 (p_scr) stf.spill [ptr9] = f0, 128
197 br.cloop.dptk.few .l1bx
200 cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */
201 (p_scr) br.cond.dpnt.many .move_bytes_from_alignment
207 shr.u loopcnt = cnt, 5 /* loopcnt = cnt / 32 */
210 cmp.eq p_scr, p0 = loopcnt, r0
211 add loopcnt = -1, loopcnt
212 (p_scr) br.cond.dpnt.many .store_words
215 and cnt = 0x1f, cnt /* compute the remaining cnt */
216 movi0 ar.lc = loopcnt
219 .l2: /* ----------------------------- L2A: store 32B in 2 cycles */
221 store [ptr1] = myval, 8
222 store [ptr2] = myval, 8
224 store [ptr1] = myval, 24
225 store [ptr2] = myval, 24
226 br.cloop.dptk.many .l2
230 cmp.gt p_scr, p0 = 8, cnt /* just a few bytes left ? */
231 (p_scr) br.cond.dpnt.many .move_bytes_from_alignment /* Branch */
235 store [ptr1] = myval, 8 /* store */
236 cmp.le p_y, p_n = 16, cnt /* */
237 add cnt = -8, cnt /* subtract */
240 (p_y) store [ptr1] = myval, 8 /* store */
241 (p_y) cmp.le.unc p_yy, p_nn = 16, cnt
242 (p_y) add cnt = -8, cnt /* subtract */
245 (p_yy) store [ptr1] = myval, 8
246 (p_yy) add cnt = -8, cnt /* subtract */
249 .move_bytes_from_alignment:
251 cmp.eq p_scr, p0 = cnt, r0
252 tbit.nz.unc p_y, p0 = cnt, 2 /* should we terminate with a st4 ? */
253 (p_scr) br.cond.dpnt.few .restore_and_exit
256 (p_y) st4 [ptr1] = r0,4
257 tbit.nz.unc p_yy, p0 = cnt, 1 /* should we terminate with a st2 ? */
260 (p_yy) st2 [ptr1] = r0,2
261 tbit.nz.unc p_y, p0 = cnt, 0 /* should we terminate with a st1 ? */
265 (p_y) st1 [ptr1] = r0
270 movi0 ar.lc = save_lc
274 .move_bytes_unaligned:
276 .pred.rel "mutex",p_y, p_n
277 .pred.rel "mutex",p_yy, p_nn
278 (p_n) cmp.le p_yy, p_nn = 4, cnt
279 (p_y) cmp.le p_yy, p_nn = 5, cnt
280 (p_n) add ptr2 = 2, ptr1
282 (p_y) add ptr2 = 3, ptr1
283 (p_y) st1 [ptr1] = r0, 1 /* fill 1 (odd-aligned) byte */
284 (p_y) add cnt = -1, cnt /* [15, 14 (or less) left] */
287 (p_yy) cmp.le.unc p_y, p0 = 8, cnt
288 add ptr3 = ptr1, cnt /* prepare last store */
289 movi0 ar.lc = save_lc
291 (p_yy) st2 [ptr1] = r0, 4 /* fill 2 (aligned) bytes */
292 (p_yy) st2 [ptr2] = r0, 4 /* fill 2 (aligned) bytes */
293 (p_yy) add cnt = -4, cnt /* [11, 10 (o less) left] */
296 (p_y) cmp.le.unc p_yy, p0 = 8, cnt
297 add ptr3 = -1, ptr3 /* last store */
298 tbit.nz p_scr, p0 = cnt, 1 /* will there be a st2 at the end ? */
300 (p_y) st2 [ptr1] = r0, 4 /* fill 2 (aligned) bytes */
301 (p_y) st2 [ptr2] = r0, 4 /* fill 2 (aligned) bytes */
302 (p_y) add cnt = -4, cnt /* [7, 6 (or less) left] */
305 (p_yy) st2 [ptr1] = r0, 4 /* fill 2 (aligned) bytes */
306 (p_yy) st2 [ptr2] = r0, 4 /* fill 2 (aligned) bytes */
307 /* [3, 2 (or less) left] */
308 tbit.nz p_y, p0 = cnt, 0 /* will there be a st1 at the end ? */
310 (p_yy) add cnt = -4, cnt
313 (p_scr) st2 [ptr1] = r0 /* fill 2 (aligned) bytes */
314 (p_y) st1 [ptr3] = r0 /* fill last byte (using ptr3) */