]> rtime.felk.cvut.cz Git - mcf548x/linux.git/blob - drivers/staging/tidspbridge/rmgr/rmm.c
Initial 2.6.37
[mcf548x/linux.git] / drivers / staging / tidspbridge / rmgr / rmm.c
1 /*
2  * rmm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Copyright (C) 2005-2006 Texas Instruments, Inc.
7  *
8  * This package is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 /*
18  *  This memory manager provides general heap management and arbitrary
19  *  alignment for any number of memory segments.
20  *
21  *  Notes:
22  *
23  *  Memory blocks are allocated from the end of the first free memory
24  *  block large enough to satisfy the request.  Alignment requirements
25  *  are satisfied by "sliding" the block forward until its base satisfies
26  *  the alignment specification; if this is not possible then the next
27  *  free block large enough to hold the request is tried.
28  *
29  *  Since alignment can cause the creation of a new free block - the
30  *  unused memory formed between the start of the original free block
31  *  and the start of the allocated block - the memory manager must free
32  *  this memory to prevent a memory leak.
33  *
34  *  Overlay memory is managed by reserving through rmm_alloc, and freeing
35  *  it through rmm_free. The memory manager prevents DSP code/data that is
36  *  overlayed from being overwritten as long as the memory it runs at has
37  *  been allocated, and not yet freed.
38  */
39
40 #include <linux/types.h>
41
42 /*  ----------------------------------- DSP/BIOS Bridge */
43 #include <dspbridge/dbdefs.h>
44
45 /*  ----------------------------------- Trace & Debug */
46 #include <dspbridge/dbc.h>
47
48 /*  ----------------------------------- OS Adaptation Layer */
49 #include <dspbridge/list.h>
50
51 /*  ----------------------------------- This */
52 #include <dspbridge/rmm.h>
53
54 /*
55  *  ======== rmm_header ========
56  *  This header is used to maintain a list of free memory blocks.
57  */
58 struct rmm_header {
59         struct rmm_header *next;        /* form a free memory link list */
60         u32 size;               /* size of the free memory */
61         u32 addr;               /* DSP address of memory block */
62 };
63
64 /*
65  *  ======== rmm_ovly_sect ========
66  *  Keeps track of memory occupied by overlay section.
67  */
68 struct rmm_ovly_sect {
69         struct list_head list_elem;
70         u32 addr;               /* Start of memory section */
71         u32 size;               /* Length (target MAUs) of section */
72         s32 page;               /* Memory page */
73 };
74
75 /*
76  *  ======== rmm_target_obj ========
77  */
78 struct rmm_target_obj {
79         struct rmm_segment *seg_tab;
80         struct rmm_header **free_list;
81         u32 num_segs;
82         struct lst_list *ovly_list;     /* List of overlay memory in use */
83 };
84
85 static u32 refs;                /* module reference count */
86
87 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
88                         u32 align, u32 *dsp_address);
89 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
90                        u32 size);
91
92 /*
93  *  ======== rmm_alloc ========
94  */
95 int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
96                      u32 align, u32 *dsp_address, bool reserve)
97 {
98         struct rmm_ovly_sect *sect;
99         struct rmm_ovly_sect *prev_sect = NULL;
100         struct rmm_ovly_sect *new_sect;
101         u32 addr;
102         int status = 0;
103
104         DBC_REQUIRE(target);
105         DBC_REQUIRE(dsp_address != NULL);
106         DBC_REQUIRE(size > 0);
107         DBC_REQUIRE(reserve || (target->num_segs > 0));
108         DBC_REQUIRE(refs > 0);
109
110         if (!reserve) {
111                 if (!alloc_block(target, segid, size, align, dsp_address)) {
112                         status = -ENOMEM;
113                 } else {
114                         /* Increment the number of allocated blocks in this
115                          * segment */
116                         target->seg_tab[segid].number++;
117                 }
118                 goto func_end;
119         }
120         /* An overlay section - See if block is already in use. If not,
121          * insert into the list in ascending address size. */
122         addr = *dsp_address;
123         sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
124         /*  Find place to insert new list element. List is sorted from
125          *  smallest to largest address. */
126         while (sect != NULL) {
127                 if (addr <= sect->addr) {
128                         /* Check for overlap with sect */
129                         if ((addr + size > sect->addr) || (prev_sect &&
130                                                            (prev_sect->addr +
131                                                             prev_sect->size >
132                                                             addr))) {
133                                 status = -ENXIO;
134                         }
135                         break;
136                 }
137                 prev_sect = sect;
138                 sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
139                                                         (struct list_head *)
140                                                         sect);
141         }
142         if (!status) {
143                 /* No overlap - allocate list element for new section. */
144                 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
145                 if (new_sect == NULL) {
146                         status = -ENOMEM;
147                 } else {
148                         lst_init_elem((struct list_head *)new_sect);
149                         new_sect->addr = addr;
150                         new_sect->size = size;
151                         new_sect->page = segid;
152                         if (sect == NULL) {
153                                 /* Put new section at the end of the list */
154                                 lst_put_tail(target->ovly_list,
155                                              (struct list_head *)new_sect);
156                         } else {
157                                 /* Put new section just before sect */
158                                 lst_insert_before(target->ovly_list,
159                                                   (struct list_head *)new_sect,
160                                                   (struct list_head *)sect);
161                         }
162                 }
163         }
164 func_end:
165         return status;
166 }
167
168 /*
169  *  ======== rmm_create ========
170  */
171 int rmm_create(struct rmm_target_obj **target_obj,
172                       struct rmm_segment seg_tab[], u32 num_segs)
173 {
174         struct rmm_header *hptr;
175         struct rmm_segment *sptr, *tmp;
176         struct rmm_target_obj *target;
177         s32 i;
178         int status = 0;
179
180         DBC_REQUIRE(target_obj != NULL);
181         DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
182
183         /* Allocate DBL target object */
184         target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
185
186         if (target == NULL)
187                 status = -ENOMEM;
188
189         if (status)
190                 goto func_cont;
191
192         target->num_segs = num_segs;
193         if (!(num_segs > 0))
194                 goto func_cont;
195
196         /* Allocate the memory for freelist from host's memory */
197         target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
198                                                         GFP_KERNEL);
199         if (target->free_list == NULL) {
200                 status = -ENOMEM;
201         } else {
202                 /* Allocate headers for each element on the free list */
203                 for (i = 0; i < (s32) num_segs; i++) {
204                         target->free_list[i] =
205                                 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
206                         if (target->free_list[i] == NULL) {
207                                 status = -ENOMEM;
208                                 break;
209                         }
210                 }
211                 /* Allocate memory for initial segment table */
212                 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
213                                                                 GFP_KERNEL);
214                 if (target->seg_tab == NULL) {
215                         status = -ENOMEM;
216                 } else {
217                         /* Initialize segment table and free list */
218                         sptr = target->seg_tab;
219                         for (i = 0, tmp = seg_tab; num_segs > 0;
220                              num_segs--, i++) {
221                                 *sptr = *tmp;
222                                 hptr = target->free_list[i];
223                                 hptr->addr = tmp->base;
224                                 hptr->size = tmp->length;
225                                 hptr->next = NULL;
226                                 tmp++;
227                                 sptr++;
228                         }
229                 }
230         }
231 func_cont:
232         /* Initialize overlay memory list */
233         if (!status) {
234                 target->ovly_list = kzalloc(sizeof(struct lst_list),
235                                                         GFP_KERNEL);
236                 if (target->ovly_list == NULL)
237                         status = -ENOMEM;
238                 else
239                         INIT_LIST_HEAD(&target->ovly_list->head);
240         }
241
242         if (!status) {
243                 *target_obj = target;
244         } else {
245                 *target_obj = NULL;
246                 if (target)
247                         rmm_delete(target);
248
249         }
250
251         DBC_ENSURE((!status && *target_obj)
252                    || (status && *target_obj == NULL));
253
254         return status;
255 }
256
257 /*
258  *  ======== rmm_delete ========
259  */
260 void rmm_delete(struct rmm_target_obj *target)
261 {
262         struct rmm_ovly_sect *ovly_section;
263         struct rmm_header *hptr;
264         struct rmm_header *next;
265         u32 i;
266
267         DBC_REQUIRE(target);
268
269         kfree(target->seg_tab);
270
271         if (target->ovly_list) {
272                 while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
273                         (target->ovly_list))) {
274                         kfree(ovly_section);
275                 }
276                 DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
277                 kfree(target->ovly_list);
278         }
279
280         if (target->free_list != NULL) {
281                 /* Free elements on freelist */
282                 for (i = 0; i < target->num_segs; i++) {
283                         hptr = next = target->free_list[i];
284                         while (next) {
285                                 hptr = next;
286                                 next = hptr->next;
287                                 kfree(hptr);
288                         }
289                 }
290                 kfree(target->free_list);
291         }
292
293         kfree(target);
294 }
295
296 /*
297  *  ======== rmm_exit ========
298  */
299 void rmm_exit(void)
300 {
301         DBC_REQUIRE(refs > 0);
302
303         refs--;
304
305         DBC_ENSURE(refs >= 0);
306 }
307
308 /*
309  *  ======== rmm_free ========
310  */
311 bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
312               bool reserved)
313 {
314         struct rmm_ovly_sect *sect;
315         bool ret = true;
316
317         DBC_REQUIRE(target);
318
319         DBC_REQUIRE(reserved || segid < target->num_segs);
320         DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
321                                  (dsp_addr + size) <= (target->seg_tab[segid].
322                                                    base +
323                                                    target->seg_tab[segid].
324                                                    length)));
325
326         /*
327          *  Free or unreserve memory.
328          */
329         if (!reserved) {
330                 ret = free_block(target, segid, dsp_addr, size);
331                 if (ret)
332                         target->seg_tab[segid].number--;
333
334         } else {
335                 /* Unreserve memory */
336                 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
337                 while (sect != NULL) {
338                         if (dsp_addr == sect->addr) {
339                                 DBC_ASSERT(size == sect->size);
340                                 /* Remove from list */
341                                 lst_remove_elem(target->ovly_list,
342                                                 (struct list_head *)sect);
343                                 kfree(sect);
344                                 break;
345                         }
346                         sect =
347                             (struct rmm_ovly_sect *)lst_next(target->ovly_list,
348                                                              (struct list_head
349                                                               *)sect);
350                 }
351                 if (sect == NULL)
352                         ret = false;
353
354         }
355         return ret;
356 }
357
358 /*
359  *  ======== rmm_init ========
360  */
361 bool rmm_init(void)
362 {
363         DBC_REQUIRE(refs >= 0);
364
365         refs++;
366
367         return true;
368 }
369
370 /*
371  *  ======== rmm_stat ========
372  */
373 bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
374               struct dsp_memstat *mem_stat_buf)
375 {
376         struct rmm_header *head;
377         bool ret = false;
378         u32 max_free_size = 0;
379         u32 total_free_size = 0;
380         u32 free_blocks = 0;
381
382         DBC_REQUIRE(mem_stat_buf != NULL);
383         DBC_ASSERT(target != NULL);
384
385         if ((u32) segid < target->num_segs) {
386                 head = target->free_list[segid];
387
388                 /* Collect data from free_list */
389                 while (head != NULL) {
390                         max_free_size = max(max_free_size, head->size);
391                         total_free_size += head->size;
392                         free_blocks++;
393                         head = head->next;
394                 }
395
396                 /* ul_size */
397                 mem_stat_buf->ul_size = target->seg_tab[segid].length;
398
399                 /* ul_num_free_blocks */
400                 mem_stat_buf->ul_num_free_blocks = free_blocks;
401
402                 /* ul_total_free_size */
403                 mem_stat_buf->ul_total_free_size = total_free_size;
404
405                 /* ul_len_max_free_block */
406                 mem_stat_buf->ul_len_max_free_block = max_free_size;
407
408                 /* ul_num_alloc_blocks */
409                 mem_stat_buf->ul_num_alloc_blocks =
410                     target->seg_tab[segid].number;
411
412                 ret = true;
413         }
414
415         return ret;
416 }
417
418 /*
419  *  ======== balloc ========
420  *  This allocation function allocates memory from the lowest addresses
421  *  first.
422  */
423 static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
424                         u32 align, u32 *dsp_address)
425 {
426         struct rmm_header *head;
427         struct rmm_header *prevhead = NULL;
428         struct rmm_header *next;
429         u32 tmpalign;
430         u32 alignbytes;
431         u32 hsize;
432         u32 allocsize;
433         u32 addr;
434
435         alignbytes = (align == 0) ? 1 : align;
436         prevhead = NULL;
437         head = target->free_list[segid];
438
439         do {
440                 hsize = head->size;
441                 next = head->next;
442
443                 addr = head->addr;      /* alloc from the bottom */
444
445                 /* align allocation */
446                 (tmpalign = (u32) addr % alignbytes);
447                 if (tmpalign != 0)
448                         tmpalign = alignbytes - tmpalign;
449
450                 allocsize = size + tmpalign;
451
452                 if (hsize >= allocsize) {       /* big enough */
453                         if (hsize == allocsize && prevhead != NULL) {
454                                 prevhead->next = next;
455                                 kfree(head);
456                         } else {
457                                 head->size = hsize - allocsize;
458                                 head->addr += allocsize;
459                         }
460
461                         /* free up any hole created by alignment */
462                         if (tmpalign)
463                                 free_block(target, segid, addr, tmpalign);
464
465                         *dsp_address = addr + tmpalign;
466                         return true;
467                 }
468
469                 prevhead = head;
470                 head = next;
471
472         } while (head != NULL);
473
474         return false;
475 }
476
477 /*
478  *  ======== free_block ========
479  *  TO DO: free_block() allocates memory, which could result in failure.
480  *  Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
481  *  free_block() could use an rmm_header from the pool, freeing as blocks
482  *  are coalesced.
483  */
484 static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
485                        u32 size)
486 {
487         struct rmm_header *head;
488         struct rmm_header *thead;
489         struct rmm_header *rhead;
490         bool ret = true;
491
492         /* Create a memory header to hold the newly free'd block. */
493         rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
494         if (rhead == NULL) {
495                 ret = false;
496         } else {
497                 /* search down the free list to find the right place for addr */
498                 head = target->free_list[segid];
499
500                 if (addr >= head->addr) {
501                         while (head->next != NULL && addr > head->next->addr)
502                                 head = head->next;
503
504                         thead = head->next;
505
506                         head->next = rhead;
507                         rhead->next = thead;
508                         rhead->addr = addr;
509                         rhead->size = size;
510                 } else {
511                         *rhead = *head;
512                         head->next = rhead;
513                         head->addr = addr;
514                         head->size = size;
515                         thead = rhead->next;
516                 }
517
518                 /* join with upper block, if possible */
519                 if (thead != NULL && (rhead->addr + rhead->size) ==
520                     thead->addr) {
521                         head->next = rhead->next;
522                         thead->size = size + thead->size;
523                         thead->addr = addr;
524                         kfree(rhead);
525                         rhead = thead;
526                 }
527
528                 /* join with the lower block, if possible */
529                 if ((head->addr + head->size) == rhead->addr) {
530                         head->next = rhead->next;
531                         head->size = head->size + rhead->size;
532                         kfree(rhead);
533                 }
534         }
535
536         return ret;
537 }