]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - drivers/staging/tidspbridge/hw/hw_mmu.c
Merge branch 'linus' into omap-for-v3.8/cleanup-headers-prepare-multiplatform-v3
[can-eth-gw-linux.git] / drivers / staging / tidspbridge / hw / hw_mmu.c
1 /*
2  * hw_mmu.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * API definitions to setup MMU TLB and PTE
7  *
8  * Copyright (C) 2007 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 #include <linux/io.h>
20 #include "MMURegAcM.h"
21 #include <hw_defs.h>
22 #include <hw_mmu.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25
26 #define MMU_BASE_VAL_MASK       0xFC00
27 #define MMU_PAGE_MAX         3
28 #define MMU_ELEMENTSIZE_MAX      3
29 #define MMU_ADDR_MASK       0xFFFFF000
30 #define MMU_TTB_MASK         0xFFFFC000
31 #define MMU_SECTION_ADDR_MASK    0xFFF00000
32 #define MMU_SSECTION_ADDR_MASK   0xFF000000
33 #define MMU_PAGE_TABLE_MASK      0xFFFFFC00
34 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
35 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
36
37 #define MMU_LOAD_TLB    0x00000001
38 #define MMU_GFLUSH      0x60
39
40 /*
41  * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
42  */
43 enum hw_mmu_page_size_t {
44         HW_MMU_SECTION,
45         HW_MMU_LARGE_PAGE,
46         HW_MMU_SMALL_PAGE,
47         HW_MMU_SUPERSECTION
48 };
49
50 /*
51  * FUNCTION           : mmu_set_cam_entry
52  *
53  * INPUTS:
54  *
55  *       Identifier      : base_address
56  *       Type            : void __iomem *
57  *       Description     : Base Address of instance of MMU module
58  *
59  *       Identifier      : page_sz
60  *       TypE           : const u32
61  *       Description     : It indicates the page size
62  *
63  *       Identifier      : preserved_bit
64  *       Type           : const u32
65  *       Description     : It indicates the TLB entry is preserved entry
66  *                                                      or not
67  *
68  *       Identifier      : valid_bit
69  *       Type           : const u32
70  *       Description     : It indicates the TLB entry is valid entry or not
71  *
72  *
73  *       Identifier      : virtual_addr_tag
74  *       Type           : const u32
75  *       Description     : virtual Address
76  *
77  * RETURNS:
78  *
79  *       Type           : hw_status
80  *       Description     : 0             -- No errors occurred
81  *                       RET_BAD_NULL_PARAM     -- A Pointer Parameter
82  *                                                 was set to NULL
83  *                       RET_PARAM_OUT_OF_RANGE -- Input Parameter out
84  *                                                 of Range
85  *
86  * PURPOSE:             : Set MMU_CAM reg
87  *
88  * METHOD:              : Check the Input parameters and set the CAM entry.
89  */
90 static hw_status mmu_set_cam_entry(void __iomem *base_address,
91                                    const u32 page_sz,
92                                    const u32 preserved_bit,
93                                    const u32 valid_bit,
94                                    const u32 virtual_addr_tag);
95
96 /*
97  * FUNCTION           : mmu_set_ram_entry
98  *
99  * INPUTS:
100  *
101  *       Identifier      : base_address
102  *       Type            : void __iomem *
103  *       Description     : Base Address of instance of MMU module
104  *
105  *       Identifier      : physical_addr
106  *       Type           : const u32
107  *       Description     : Physical Address to which the corresponding
108  *                       virtual   Address shouldpoint
109  *
110  *       Identifier      : endianism
111  *       Type           : hw_endianism_t
112  *       Description     : endianism for the given page
113  *
114  *       Identifier      : element_size
115  *       Type           : hw_element_size_t
116  *       Description     : The element size ( 8,16, 32 or 64 bit)
117  *
118  *       Identifier      : mixed_size
119  *       Type           : hw_mmu_mixed_size_t
120  *       Description     : Element Size to follow CPU or TLB
121  *
122  * RETURNS:
123  *
124  *       Type           : hw_status
125  *       Description     : 0             -- No errors occurred
126  *                       RET_BAD_NULL_PARAM     -- A Pointer Parameter
127  *                                                      was set to NULL
128  *                       RET_PARAM_OUT_OF_RANGE -- Input Parameter
129  *                                                      out of Range
130  *
131  * PURPOSE:           : Set MMU_CAM reg
132  *
133  * METHOD:             : Check the Input parameters and set the RAM entry.
134  */
135 static hw_status mmu_set_ram_entry(void __iomem *base_address,
136                                    const u32 physical_addr,
137                                    enum hw_endianism_t endianism,
138                                    enum hw_element_size_t element_size,
139                                    enum hw_mmu_mixed_size_t mixed_size);
140
141 /* HW FUNCTIONS */
142
143 hw_status hw_mmu_enable(void __iomem *base_address)
144 {
145         hw_status status = 0;
146
147         MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
148
149         return status;
150 }
151
152 hw_status hw_mmu_disable(void __iomem *base_address)
153 {
154         hw_status status = 0;
155
156         MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
157
158         return status;
159 }
160
161 hw_status hw_mmu_num_locked_set(void __iomem *base_address,
162                                 u32 num_locked_entries)
163 {
164         hw_status status = 0;
165
166         MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
167
168         return status;
169 }
170
171 hw_status hw_mmu_victim_num_set(void __iomem *base_address,
172                                 u32 victim_entry_num)
173 {
174         hw_status status = 0;
175
176         MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
177
178         return status;
179 }
180
181 hw_status hw_mmu_event_ack(void __iomem *base_address, u32 irq_mask)
182 {
183         hw_status status = 0;
184
185         MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
186
187         return status;
188 }
189
190 hw_status hw_mmu_event_disable(void __iomem *base_address, u32 irq_mask)
191 {
192         hw_status status = 0;
193         u32 irq_reg;
194
195         irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
196
197         MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
198
199         return status;
200 }
201
202 hw_status hw_mmu_event_enable(void __iomem *base_address, u32 irq_mask)
203 {
204         hw_status status = 0;
205         u32 irq_reg;
206
207         irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
208
209         MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
210
211         return status;
212 }
213
214 hw_status hw_mmu_event_status(void __iomem *base_address, u32 *irq_mask)
215 {
216         hw_status status = 0;
217
218         *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
219
220         return status;
221 }
222
223 hw_status hw_mmu_fault_addr_read(void __iomem *base_address, u32 *addr)
224 {
225         hw_status status = 0;
226
227         /* read values from register */
228         *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
229
230         return status;
231 }
232
233 hw_status hw_mmu_ttb_set(void __iomem *base_address, u32 ttb_phys_addr)
234 {
235         hw_status status = 0;
236         u32 load_ttb;
237
238         load_ttb = ttb_phys_addr & ~0x7FUL;
239         /* write values to register */
240         MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
241
242         return status;
243 }
244
245 hw_status hw_mmu_twl_enable(void __iomem *base_address)
246 {
247         hw_status status = 0;
248
249         MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
250
251         return status;
252 }
253
254 hw_status hw_mmu_twl_disable(void __iomem *base_address)
255 {
256         hw_status status = 0;
257
258         MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
259
260         return status;
261 }
262
263 hw_status hw_mmu_tlb_add(void __iomem *base_address,
264                          u32 physical_addr,
265                          u32 virtual_addr,
266                          u32 page_sz,
267                          u32 entry_num,
268                          struct hw_mmu_map_attrs_t *map_attrs,
269                          s8 preserved_bit, s8 valid_bit)
270 {
271         hw_status status = 0;
272         u32 lock_reg;
273         u32 virtual_addr_tag;
274         enum hw_mmu_page_size_t mmu_pg_size;
275
276         /*Check the input Parameters */
277         switch (page_sz) {
278         case HW_PAGE_SIZE4KB:
279                 mmu_pg_size = HW_MMU_SMALL_PAGE;
280                 break;
281
282         case HW_PAGE_SIZE64KB:
283                 mmu_pg_size = HW_MMU_LARGE_PAGE;
284                 break;
285
286         case HW_PAGE_SIZE1MB:
287                 mmu_pg_size = HW_MMU_SECTION;
288                 break;
289
290         case HW_PAGE_SIZE16MB:
291                 mmu_pg_size = HW_MMU_SUPERSECTION;
292                 break;
293
294         default:
295                 return -EINVAL;
296         }
297
298         lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
299
300         /* Generate the 20-bit tag from virtual address */
301         virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
302
303         /* Write the fields in the CAM Entry Register */
304         mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
305                           virtual_addr_tag);
306
307         /* Write the different fields of the RAM Entry Register */
308         /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
309         mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
310                           map_attrs->element_size, map_attrs->mixed_size);
311
312         /* Update the MMU Lock Register */
313         /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
314         MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
315
316         /* Enable loading of an entry in TLB by writing 1
317            into LD_TLB_REG register */
318         MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
319
320         MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
321
322         return status;
323 }
324
325 hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
326                          u32 physical_addr,
327                          u32 virtual_addr,
328                          u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
329 {
330         hw_status status = 0;
331         u32 pte_addr, pte_val;
332         s32 num_entries = 1;
333
334         switch (page_sz) {
335         case HW_PAGE_SIZE4KB:
336                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
337                                               virtual_addr &
338                                               MMU_SMALL_PAGE_MASK);
339                 pte_val =
340                     ((physical_addr & MMU_SMALL_PAGE_MASK) |
341                      (map_attrs->endianism << 9) | (map_attrs->
342                                                     element_size << 4) |
343                      (map_attrs->mixed_size << 11) | 2);
344                 break;
345
346         case HW_PAGE_SIZE64KB:
347                 num_entries = 16;
348                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
349                                               virtual_addr &
350                                               MMU_LARGE_PAGE_MASK);
351                 pte_val =
352                     ((physical_addr & MMU_LARGE_PAGE_MASK) |
353                      (map_attrs->endianism << 9) | (map_attrs->
354                                                     element_size << 4) |
355                      (map_attrs->mixed_size << 11) | 1);
356                 break;
357
358         case HW_PAGE_SIZE1MB:
359                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
360                                               virtual_addr &
361                                               MMU_SECTION_ADDR_MASK);
362                 pte_val =
363                     ((((physical_addr & MMU_SECTION_ADDR_MASK) |
364                        (map_attrs->endianism << 15) | (map_attrs->
365                                                        element_size << 10) |
366                        (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
367                 break;
368
369         case HW_PAGE_SIZE16MB:
370                 num_entries = 16;
371                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
372                                               virtual_addr &
373                                               MMU_SSECTION_ADDR_MASK);
374                 pte_val =
375                     (((physical_addr & MMU_SSECTION_ADDR_MASK) |
376                       (map_attrs->endianism << 15) | (map_attrs->
377                                                       element_size << 10) |
378                       (map_attrs->mixed_size << 17)
379                      ) | 0x40000 | 0x2);
380                 break;
381
382         case HW_MMU_COARSE_PAGE_SIZE:
383                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
384                                               virtual_addr &
385                                               MMU_SECTION_ADDR_MASK);
386                 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
387                 break;
388
389         default:
390                 return -EINVAL;
391         }
392
393         while (--num_entries >= 0)
394                 ((u32 *) pte_addr)[num_entries] = pte_val;
395
396         return status;
397 }
398
399 hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
400 {
401         hw_status status = 0;
402         u32 pte_addr;
403         s32 num_entries = 1;
404
405         switch (page_size) {
406         case HW_PAGE_SIZE4KB:
407                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
408                                               virtual_addr &
409                                               MMU_SMALL_PAGE_MASK);
410                 break;
411
412         case HW_PAGE_SIZE64KB:
413                 num_entries = 16;
414                 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
415                                               virtual_addr &
416                                               MMU_LARGE_PAGE_MASK);
417                 break;
418
419         case HW_PAGE_SIZE1MB:
420         case HW_MMU_COARSE_PAGE_SIZE:
421                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
422                                               virtual_addr &
423                                               MMU_SECTION_ADDR_MASK);
424                 break;
425
426         case HW_PAGE_SIZE16MB:
427                 num_entries = 16;
428                 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
429                                               virtual_addr &
430                                               MMU_SSECTION_ADDR_MASK);
431                 break;
432
433         default:
434                 return -EINVAL;
435         }
436
437         while (--num_entries >= 0)
438                 ((u32 *) pte_addr)[num_entries] = 0;
439
440         return status;
441 }
442
443 /* mmu_set_cam_entry */
444 static hw_status mmu_set_cam_entry(void __iomem *base_address,
445                                    const u32 page_sz,
446                                    const u32 preserved_bit,
447                                    const u32 valid_bit,
448                                    const u32 virtual_addr_tag)
449 {
450         hw_status status = 0;
451         u32 mmu_cam_reg;
452
453         mmu_cam_reg = (virtual_addr_tag << 12);
454         mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
455             (preserved_bit << 3);
456
457         /* write values to register */
458         MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
459
460         return status;
461 }
462
463 /* mmu_set_ram_entry */
464 static hw_status mmu_set_ram_entry(void __iomem *base_address,
465                                    const u32 physical_addr,
466                                    enum hw_endianism_t endianism,
467                                    enum hw_element_size_t element_size,
468                                    enum hw_mmu_mixed_size_t mixed_size)
469 {
470         hw_status status = 0;
471         u32 mmu_ram_reg;
472
473         mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
474         mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
475                                        (mixed_size << 6));
476
477         /* write values to register */
478         MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
479
480         return status;
481
482 }
483
484 void hw_mmu_tlb_flush_all(void __iomem *base)
485 {
486         __raw_writel(1, base + MMU_GFLUSH);
487 }