OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * arch/arm/mach-tegra/nvos/nvos_page.c |
| 3 * |
| 4 * Implementation of NvOsPage* APIs using the Linux page allocator |
| 5 * |
| 6 * Copyright (c) 2010, NVIDIA Corporation. |
| 7 * |
| 8 * This program is free software; you can redistribute it and/or modify |
| 9 * it under the terms of the GNU General Public License as published by |
| 10 * the Free Software Foundation; either version 2 of the License, or |
| 11 * (at your option) any later version. |
| 12 * |
| 13 * This program is distributed in the hope that it will be useful, but WITHOUT |
| 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 16 * more details. |
| 17 * |
| 18 * You should have received a copy of the GNU General Public License along |
| 19 * with this program; if not, write to the Free Software Foundation, Inc., |
| 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 21 */ |
| 22 |
| 23 #include <linux/vmalloc.h> |
| 24 #include <linux/highmem.h> |
| 25 #include <asm/cacheflush.h> |
| 26 #include <asm/tlbflush.h> |
| 27 #include <asm/pgtable.h> |
| 28 #include <asm/pgalloc.h> |
| 29 #include "nvcommon.h" |
| 30 #include "nvos.h" |
| 31 #include <linux/slab.h> |
| 32 |
| 33 #if NVOS_TRACE || NV_DEBUG |
| 34 #undef NvOsPageMap |
| 35 #undef NvOsPageGetPage |
| 36 #undef NvOsPageAddress |
| 37 #undef NvOsPageUnmap |
| 38 #undef NvOsPageAlloc |
| 39 #undef NvOsPageFree |
| 40 #undef NvOsPageLock |
| 41 #undef NvOsPageMapIntoPtr |
| 42 #endif |
| 43 |
| 44 #define L_PTE_MT_INNER_WB (0x05 << 2) /* 0101 (armv6, armv7) */ |
| 45 #define pgprot_inner_writeback(prot) \ |
| 46 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_INNER_WB) |
| 47 |
| 48 #define nv_gfp_pool (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN) |
| 49 |
| 50 struct nvos_pagemap { |
| 51 void *addr; |
| 52 unsigned int nr_pages; |
| 53 struct page *pages[1]; |
| 54 }; |
| 55 |
| 56 static void pagemap_flush_page(struct page *page) |
| 57 { |
| 58 #ifdef CONFIG_HIGHMEM |
| 59 void *km = NULL; |
| 60 |
| 61 if (!page_address(page)) { |
| 62 km = kmap(page); |
| 63 if (!km) { |
| 64 pr_err("unable to map high page\n"); |
| 65 return; |
| 66 } |
| 67 } |
| 68 #endif |
| 69 |
| 70 flush_dcache_page(page_address(page)); |
| 71 outer_flush_range(page_to_phys(page), page_to_phys(page)+PAGE_SIZE); |
| 72 dsb(); |
| 73 |
| 74 #ifdef CONFIG_HIGHMEM |
| 75 if (km) kunmap(page); |
| 76 #endif |
| 77 } |
| 78 |
| 79 static void nv_free_pages(struct nvos_pagemap *pm) |
| 80 { |
| 81 unsigned int i; |
| 82 |
| 83 if (pm->addr) vm_unmap_ram(pm->addr, pm->nr_pages); |
| 84 |
| 85 for (i=0; i<pm->nr_pages; i++) { |
| 86 ClearPageReserved(pm->pages[i]); |
| 87 __free_page(pm->pages[i]); |
| 88 } |
| 89 kfree(pm); |
| 90 } |
| 91 |
| 92 static struct nvos_pagemap *nv_alloc_pages(unsigned int count, |
| 93 pgprot_t prot, bool contiguous, int create_mapping) |
| 94 { |
| 95 struct nvos_pagemap *pm; |
| 96 size_t size; |
| 97 unsigned int i = 0; |
| 98 |
| 99 size = sizeof(struct nvos_pagemap) + sizeof(struct page *)*(count-1); |
| 100 pm = kzalloc(size, GFP_KERNEL); |
| 101 if (!pm) |
| 102 return NULL; |
| 103 |
| 104 if (count==1) contiguous = true; |
| 105 |
| 106 if (contiguous) { |
| 107 size_t order = get_order(count << PAGE_SHIFT); |
| 108 struct page *compound_page; |
| 109 compound_page = alloc_pages(nv_gfp_pool, order); |
| 110 if (!compound_page) goto fail; |
| 111 |
| 112 split_page(compound_page, order); |
| 113 for (i=0; i<count; i++) |
| 114 pm->pages[i] = nth_page(compound_page, i); |
| 115 |
| 116 for ( ; i < (1<<order); i++) |
| 117 __free_page(nth_page(compound_page, i)); |
| 118 i = count; |
| 119 } else { |
| 120 for (i=0; i<count; i++) { |
| 121 pm->pages[i] = alloc_page(nv_gfp_pool); |
| 122 if (!pm->pages[i]) goto fail; |
| 123 } |
| 124 } |
| 125 |
| 126 if (create_mapping) { |
| 127 /* since the linear kernel mapping uses sections and super- |
| 128 * sections rather than PTEs, it's not possible to overwrite |
| 129 * it with the correct caching attributes, so use a local |
| 130 * mapping */ |
| 131 pm->addr = vm_map_ram(pm->pages, count, -1, prot); |
| 132 if (!pm->addr) { |
| 133 pr_err("nv_alloc_pages fail to vmap contiguous area\n"); |
| 134 goto fail; |
| 135 } |
| 136 } |
| 137 |
| 138 pm->nr_pages = count; |
| 139 for (i=0; i<count; i++) { |
| 140 SetPageReserved(pm->pages[i]); |
| 141 pagemap_flush_page(pm->pages[i]); |
| 142 } |
| 143 |
| 144 return pm; |
| 145 |
| 146 fail: |
| 147 while (i) __free_page(pm->pages[--i]); |
| 148 if (pm) kfree(pm); |
| 149 return NULL; |
| 150 } |
| 151 |
| 152 NvError NvOsPageMap(NvOsPageAllocHandle desc, size_t offs, |
| 153 size_t size, void **ptr) |
| 154 { |
| 155 struct nvos_pagemap *pm = (struct nvos_pagemap *)desc; |
| 156 if (!desc || !ptr || !size) |
| 157 return NvError_BadParameter; |
| 158 |
| 159 if (pm->addr) *ptr = (void*)((unsigned long)pm->addr + offs); |
| 160 else *ptr = NULL; |
| 161 |
| 162 return (*ptr) ? NvSuccess : NvError_MemoryMapFailed; |
| 163 } |
| 164 |
| 165 struct page *NvOsPageGetPage(NvOsPageAllocHandle desc, size_t offs) |
| 166 { |
| 167 struct nvos_pagemap *pm = (struct nvos_pagemap *)desc; |
| 168 if (!pm) return NULL; |
| 169 |
| 170 offs >>= PAGE_SHIFT; |
| 171 return (likely(offs<pm->nr_pages)) ? pm->pages[offs] : NULL; |
| 172 } |
| 173 |
| 174 NvOsPhysAddr NvOsPageAddress(NvOsPageAllocHandle desc, size_t offs) |
| 175 { |
| 176 struct nvos_pagemap *pm = (struct nvos_pagemap *)desc; |
| 177 size_t index; |
| 178 |
| 179 if (unlikely(!pm)) return (NvOsPhysAddr)0; |
| 180 |
| 181 index = offs >> PAGE_SHIFT; |
| 182 offs &= (PAGE_SIZE - 1); |
| 183 |
| 184 return (NvOsPhysAddr)(page_to_phys(pm->pages[index]) + offs); |
| 185 } |
| 186 |
| 187 |
| 188 void NvOsPageUnmap(NvOsPageAllocHandle desc, void *ptr, size_t size) |
| 189 { |
| 190 return; |
| 191 } |
| 192 |
| 193 NvError NvOsPageAlloc(size_t size, NvOsMemAttribute attrib, |
| 194 NvOsPageFlags flags, NvU32 protect, NvOsPageAllocHandle *desc) |
| 195 { |
| 196 struct nvos_pagemap *pm; |
| 197 pgprot_t prot = pgprot_kernel; |
| 198 size += PAGE_SIZE-1; |
| 199 size >>= PAGE_SHIFT; |
| 200 |
| 201 /* writeback is implemented as inner-cacheable only, since these |
| 202 * allocators are only used to allocate buffers for DMA-driven |
| 203 * clients, and the cost of L2 maintenance makes outer cacheability |
| 204 * a net performance loss more often than not */ |
| 205 if (attrib == NvOsMemAttribute_WriteBack) |
| 206 prot = pgprot_inner_writeback(prot); |
| 207 else |
| 208 prot = pgprot_writecombine(prot); |
| 209 |
| 210 pm = nv_alloc_pages(size, prot, (flags==NvOsPageFlags_Contiguous), 1); |
| 211 |
| 212 if (!pm) return NvError_InsufficientMemory; |
| 213 |
| 214 *desc = (NvOsPageAllocHandle)pm; |
| 215 return NvSuccess; |
| 216 } |
| 217 |
| 218 void NvOsPageFree(NvOsPageAllocHandle desc) |
| 219 { |
| 220 struct nvos_pagemap *pm = (struct nvos_pagemap *)desc; |
| 221 |
| 222 if (pm) nv_free_pages(pm); |
| 223 } |
| 224 |
| 225 |
| 226 NvError NvOsPageLock(void *ptr, size_t size, NvU32 protect, |
| 227 NvOsPageAllocHandle *descriptor) |
| 228 { |
| 229 return NvError_NotImplemented; |
| 230 } |
| 231 |
| 232 NvError NvOsPageMapIntoPtr(NvOsPageAllocHandle desc, void *ptr, |
| 233 size_t offset, size_t size) |
| 234 { |
| 235 return NvError_NotImplemented; |
| 236 } |
OLD | NEW |