Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(112)

Side by Side Diff: arch/arm/mach-tegra/nv/nvmap.c

Issue 3256004: [ARM] tegra: add nvos/nvrm/nvmap drivers (Closed) Base URL: ssh://git@gitrw.chromium.org/kernel.git
Patch Set: remove ap15 headers Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « arch/arm/mach-tegra/nv/include/rm_spi_slink.h ('k') | arch/arm/mach-tegra/nv/nvos/Makefile » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * drivers/char/nvmap.c
3 *
4 * Memory manager for Tegra GPU memory handles
5 *
6 * Copyright (c) 2009-2010, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23 #include <linux/vmalloc.h>
24 #include <linux/module.h>
25 #include <linux/bitmap.h>
26 #include <linux/wait.h>
27 #include <linux/miscdevice.h>
28 #include <linux/platform_device.h>
29 #include <linux/mm.h>
30 #include <linux/mman.h>
31 #include <linux/uaccess.h>
32 #include <linux/backing-dev.h>
33 #include <linux/device.h>
34 #include <linux/highmem.h>
35 #include <linux/smp_lock.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched.h>
38 #include <linux/io.h>
39 #include <linux/rbtree.h>
40 #include <linux/proc_fs.h>
41 #include <linux/ctype.h>
42 #include <asm/tlbflush.h>
43 #include <mach/iovmm.h>
44 #include "linux/nvmem_ioctl.h"
45 #include "nvcommon.h"
46 #include "nvrm_memmgr.h"
47 #include "nvbootargs.h"
48
49 #include <linux/dma-mapping.h>
50 #include "asm/cacheflush.h"
51
52
53 #define NVMAP_BASE (VMALLOC_END + SZ_2M)
54 #define NVMAP_SIZE SZ_2M
55
56 #define L_PTE_MT_INNER_WB (0x05 << 2) /* 0101 (armv6, armv7) */
57 #define pgprot_inner_writeback(prot) \
58 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_INNER_WB)
59
60 static void smp_dma_clean_range(const void *start, const void *end)
61 {
62 dmac_map_area(start, end - start, DMA_TO_DEVICE);
63 }
64
65 static void smp_dma_inv_range(const void *start, const void *end)
66 {
67 dmac_unmap_area(start, end - start, DMA_FROM_DEVICE);
68 }
69
70 static void smp_dma_flush_range(const void *start, const void *end)
71 {
72 dmac_flush_range(start, end);
73 }
74
75 int nvmap_add_carveout_heap(unsigned long base, size_t size,
76 const char *name, unsigned int bitmask);
77
78
79 /*#define IOVMM_FIRST*/ /* enable to force most allocations from iovmm */
80
81 static void nvmap_vma_open(struct vm_area_struct *vma);
82
83 static void nvmap_vma_close(struct vm_area_struct *vma);
84
85 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
86
87 static int nvmap_open(struct inode *inode, struct file *filp);
88
89 static int nvmap_release(struct inode *inode, struct file *file);
90
91 static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma);
92
93 static long nvmap_ioctl(struct file *filp,
94 unsigned int cmd, unsigned long arg);
95
96 static int nvmap_ioctl_getid(struct file *filp, void __user *arg);
97
98 static int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
99
100 static int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
101
102 static int nvmap_ioctl_free(struct file *filp, unsigned long arg);
103
104 static int nvmap_ioctl_create(struct file *filp,
105 unsigned int cmd, void __user *arg);
106
107 static int nvmap_ioctl_pinop(struct file *filp,
108 bool is_pin, void __user *arg);
109
110 static int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
111
112 static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
113
114 static int nvmap_ioctl_rw_handle(struct file *filp, int is_read,
115 void __user* arg);
116
117 extern void NvRmPrivMemIncrRef(NvRmMemHandle hmem);
118
119 static struct backing_dev_info nvmap_bdi = {
120 .ra_pages = 0,
121 .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
122 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
123 };
124
125
126 #define NVMAP_PTE_OFFSET(x) (((unsigned long)(x) - NVMAP_BASE) >> PAGE_SHIFT)
127 #define NVMAP_PTE_INDEX(x) (((unsigned long)(x) - NVMAP_BASE)>>PGDIR_SHIFT)
128 #define NUM_NVMAP_PTES (NVMAP_SIZE >> PGDIR_SHIFT)
129 #define NVMAP_END (NVMAP_BASE + NVMAP_SIZE)
130 #define NVMAP_PAGES (NVMAP_SIZE >> PAGE_SHIFT)
131
132 static pte_t *nvmap_pte[NUM_NVMAP_PTES];
133 static unsigned long nvmap_ptebits[NVMAP_PAGES/BITS_PER_LONG];
134
135 static DEFINE_SPINLOCK(nvmap_ptelock);
136 static DECLARE_WAIT_QUEUE_HEAD(nvmap_ptefull);
137
138 /* used to lost the master tree of memory handles */
139 static DEFINE_SPINLOCK(nvmap_handle_lock);
140
141 /* only one task may be performing pin / unpin operations at once, to
142 * prevent deadlocks caused by interleaved IOVMM re-allocations */
143 static DEFINE_MUTEX(nvmap_pin_lock);
144
145 /* queue of tasks which are blocking on pin, for IOVMM room */
146 static DECLARE_WAIT_QUEUE_HEAD(nvmap_pin_wait);
147 static struct rb_root nvmap_handles = RB_ROOT;
148
149 static struct tegra_iovmm_client *nvmap_vm_client = NULL;
150
151 /* first-fit linear allocator carveout heap manager */
152 struct nvmap_mem_block {
153 unsigned long base;
154 size_t size;
155 short next; /* next absolute (address-order) block */
156 short prev; /* previous absolute (address-order) block */
157 short next_free;
158 short prev_free;
159 };
160
161 struct nvmap_carveout {
162 unsigned short num_blocks;
163 short spare_index;
164 short free_index;
165 short block_index;
166 spinlock_t lock;
167 const char *name;
168 struct nvmap_mem_block *blocks;
169 };
170
171 enum {
172 CARVEOUT_STAT_TOTAL_SIZE,
173 CARVEOUT_STAT_FREE_SIZE,
174 CARVEOUT_STAT_NUM_BLOCKS,
175 CARVEOUT_STAT_FREE_BLOCKS,
176 CARVEOUT_STAT_LARGEST_BLOCK,
177 CARVEOUT_STAT_LARGEST_FREE,
178 CARVEOUT_STAT_BASE,
179 };
180
181 static inline pgprot_t _nvmap_flag_to_pgprot(unsigned long flag, pgprot_t base)
182 {
183 switch (flag) {
184 case NVMEM_HANDLE_UNCACHEABLE:
185 base = pgprot_noncached(base);
186 break;
187 case NVMEM_HANDLE_WRITE_COMBINE:
188 base = pgprot_writecombine(base);
189 break;
190 case NVMEM_HANDLE_INNER_CACHEABLE:
191 base = pgprot_inner_writeback(base);
192 break;
193 }
194 return base;
195 }
196
197 static unsigned long _nvmap_carveout_blockstat(struct nvmap_carveout *co,
198 int stat)
199 {
200 unsigned long val = 0;
201 short idx;
202 spin_lock(&co->lock);
203
204 if (stat==CARVEOUT_STAT_BASE) {
205 if (co->block_index==-1)
206 val = ~0;
207 else
208 val = co->blocks[co->block_index].base;
209 spin_unlock(&co->lock);
210 return val;
211 }
212
213 if (stat==CARVEOUT_STAT_TOTAL_SIZE ||
214 stat==CARVEOUT_STAT_NUM_BLOCKS ||
215 stat==CARVEOUT_STAT_LARGEST_BLOCK)
216 idx = co->block_index;
217 else
218 idx = co->free_index;
219
220 while (idx!=-1) {
221 switch (stat) {
222 case CARVEOUT_STAT_TOTAL_SIZE:
223 val += co->blocks[idx].size;
224 idx = co->blocks[idx].next;
225 break;
226 case CARVEOUT_STAT_NUM_BLOCKS:
227 val++;
228 idx = co->blocks[idx].next;
229 break;
230 case CARVEOUT_STAT_LARGEST_BLOCK:
231 val = max_t(unsigned long, val, co->blocks[idx].size);
232 idx = co->blocks[idx].next;
233 break;
234 case CARVEOUT_STAT_FREE_SIZE:
235 val += co->blocks[idx].size;
236 idx = co->blocks[idx].next_free;
237 break;
238 case CARVEOUT_STAT_FREE_BLOCKS:
239 val ++;
240 idx = co->blocks[idx].next_free;
241 break;
242 case CARVEOUT_STAT_LARGEST_FREE:
243 val = max_t(unsigned long, val, co->blocks[idx].size);
244 idx = co->blocks[idx].next_free;
245 break;
246 }
247 }
248
249 spin_unlock(&co->lock);
250 return val;
251 }
252
253 #define co_is_free(_co, _idx) \
254 ((_co)->free_index==(_idx) || ((_co)->blocks[(_idx)].prev_free!=-1))
255
256 static int _nvmap_init_carveout(struct nvmap_carveout *co,
257 const char *name, unsigned long base_address, size_t len)
258 {
259 const unsigned int min_blocks = 16;
260 struct nvmap_mem_block *blocks = NULL;
261 int i;
262
263 blocks = kzalloc(sizeof(*blocks)*min_blocks, GFP_KERNEL);
264
265 if (!blocks) goto fail;
266 co->name = kstrdup(name, GFP_KERNEL);
267 if (!co->name) goto fail;
268
269 for (i=1; i<min_blocks; i++) {
270 blocks[i].next = i+1;
271 blocks[i].prev = i-1;
272 blocks[i].next_free = -1;
273 blocks[i].prev_free = -1;
274 }
275 blocks[i-1].next = -1;
276 blocks[1].prev = -1;
277
278 blocks[0].next = blocks[0].prev = -1;
279 blocks[0].next_free = blocks[0].prev_free = -1;
280 blocks[0].base = base_address;
281 blocks[0].size = len;
282 co->blocks = blocks;
283 co->num_blocks = min_blocks;
284 spin_lock_init(&co->lock);
285 co->block_index = 0;
286 co->spare_index = 1;
287 co->free_index = 0;
288 return 0;
289
290 fail:
291 if (blocks) kfree(blocks);
292 return -ENOMEM;
293 }
294
295 static int nvmap_grow_blocks(struct nvmap_carveout *co)
296 {
297 struct nvmap_mem_block *blocks;
298 unsigned int i;
299
300 if (co->num_blocks >= 1<<(8*sizeof(co->free_index)-1)) return -ENOMEM;
301 blocks = kzalloc(sizeof(*blocks)*(co->num_blocks*2), GFP_ATOMIC);
302 if (!blocks) {
303 printk("NV: %s alloc failed\n", __func__);
304 return -ENOMEM;
305 }
306
307 memcpy(blocks, co->blocks, sizeof(*blocks)*(co->num_blocks));
308 kfree(co->blocks);
309 co->blocks = blocks;
310 for (i=co->num_blocks; i<co->num_blocks*2; i++) {
311 blocks[i].next = i+1;
312 blocks[i].prev = i-1;
313 blocks[i].next_free = -1;
314 blocks[i].prev_free = -1;
315 }
316 blocks[co->num_blocks].prev = -1;
317 blocks[i-1].next = -1;
318 co->spare_index = co->num_blocks;
319 co->num_blocks *= 2;
320 return 0;
321 }
322
323 static int nvmap_get_spare(struct nvmap_carveout *co) {
324 int idx;
325
326 if (co->spare_index == -1)
327 if (nvmap_grow_blocks(co))
328 return -1;
329
330 BUG_ON(co->spare_index == -1);
331 idx = co->spare_index;
332 co->spare_index = co->blocks[idx].next;
333 co->blocks[idx].next = -1;
334 co->blocks[idx].prev = -1;
335 co->blocks[idx].next_free = -1;
336 co->blocks[idx].prev_free = -1;
337 return idx;
338 }
339
340 #define BLOCK(_co, _idx) ((_idx)==-1 ? NULL : &(_co)->blocks[(_idx)])
341
342 static void nvmap_zap_free(struct nvmap_carveout *co, int idx)
343 {
344 struct nvmap_mem_block *block;
345
346 block = BLOCK(co, idx);
347 if (block->prev_free != -1)
348 BLOCK(co, block->prev_free)->next_free = block->next_free;
349 else
350 co->free_index = block->next_free;
351
352 if (block->next_free != -1)
353 BLOCK(co, block->next_free)->prev_free = block->prev_free;
354
355 block->prev_free = -1;
356 block->next_free = -1;
357 }
358
359 static void nvmap_split_block(struct nvmap_carveout *co,
360 int idx, size_t start, size_t size)
361 {
362 if (BLOCK(co, idx)->base < start) {
363 int spare_idx = nvmap_get_spare(co);
364 struct nvmap_mem_block *spare = BLOCK(co, spare_idx);
365 struct nvmap_mem_block *block = BLOCK(co, idx);
366 if (spare) {
367 spare->size = start - block->base;
368 spare->base = block->base;
369 block->size -= (start - block->base);
370 block->base = start;
371 spare->next = idx;
372 spare->prev = block->prev;
373 block->prev = spare_idx;
374 if (spare->prev != -1)
375 co->blocks[spare->prev].next = spare_idx;
376 else
377 co->block_index = spare_idx;
378 spare->prev_free = -1;
379 spare->next_free = co->free_index;
380 if (co->free_index != -1)
381 co->blocks[co->free_index].prev_free = spare_idx ;
382 co->free_index = spare_idx;
383 } else {
384 if (block->prev != -1) {
385 spare = BLOCK(co, block->prev);
386 spare->size += start - block->base;
387 block->base = start;
388 }
389 }
390 }
391
392 if (BLOCK(co, idx)->size > size) {
393 int spare_idx = nvmap_get_spare(co);
394 struct nvmap_mem_block *spare = BLOCK(co, spare_idx);
395 struct nvmap_mem_block *block = BLOCK(co, idx);
396 if (spare) {
397 spare->base = block->base + size;
398 spare->size = block->size - size;
399 block->size = size;
400 spare->prev = idx;
401 spare->next = block->next;
402 block->next = spare_idx;
403 if (spare->next != -1)
404 co->blocks[spare->next].prev = spare_idx;
405 spare->prev_free = -1;
406 spare->next_free = co->free_index;
407 if (co->free_index != -1)
408 co->blocks[co->free_index].prev_free = spare_idx ;
409 co->free_index = spare_idx;
410 }
411 }
412
413 nvmap_zap_free(co, idx);
414 }
415
416 #define next_spare next
417 #define prev_spare prev
418
419 #define nvmap_insert_block(_list, _co, _idx) \
420 do { \
421 struct nvmap_mem_block *b = BLOCK((_co), (_idx)); \
422 struct nvmap_mem_block *s = BLOCK((_co), (_co)->_list##_index);\
423 if (s) s->prev_##_list = (_idx); \
424 b->prev_##_list = -1; \
425 b->next_##_list = (_co)->_list##_index; \
426 (_co)->_list##_index = (_idx); \
427 } while (0);
428
429 static void nvmap_carveout_free(struct nvmap_carveout *co, int idx)
430 {
431 struct nvmap_mem_block *b;
432
433 spin_lock(&co->lock);
434
435 b = BLOCK(co, idx);
436
437 if (b->next!=-1 && co_is_free(co, b->next)) {
438 int zap = b->next;
439 struct nvmap_mem_block *n = BLOCK(co, zap);
440 b->size += n->size;
441
442 b->next = n->next;
443 if (n->next != -1) co->blocks[n->next].prev = idx;
444
445 nvmap_zap_free(co, zap);
446 nvmap_insert_block(spare, co, zap);
447 }
448
449 if (b->prev!=-1 && co_is_free(co, b->prev)) {
450 int zap = b->prev;
451 struct nvmap_mem_block *p = BLOCK(co, zap);
452
453 b->base = p->base;
454 b->size += p->size;
455
456 b->prev = p->prev;
457
458 if (p->prev != -1) co->blocks[p->prev].next = idx;
459 else co->block_index = idx;
460
461 nvmap_zap_free(co, zap);
462 nvmap_insert_block(spare, co, zap);
463 }
464
465 nvmap_insert_block(free, co, idx);
466 spin_unlock(&co->lock);
467 }
468
469 static int nvmap_carveout_alloc(struct nvmap_carveout *co,
470 size_t align, size_t size)
471 {
472 short idx;
473
474 spin_lock(&co->lock);
475
476 idx = co->free_index;
477
478 while (idx != -1) {
479 struct nvmap_mem_block *b = BLOCK(co, idx);
480 /* try to be a bit more clever about generating block-
481 * droppings by comparing the results of a left-justified vs
482 * right-justified block split, and choosing the
483 * justification style which yields the largest remaining
484 * block */
485 size_t end = b->base + b->size;
486 size_t ljust = (b->base + align - 1) & ~(align-1);
487 size_t rjust = (end - size) & ~(align-1);
488 size_t l_max, r_max;
489
490 if (rjust < b->base) rjust = ljust;
491 l_max = max_t(size_t, ljust - b->base, end - (ljust + size));
492 r_max = max_t(size_t, rjust - b->base, end - (rjust + size));
493
494 if (b->base + b->size >= ljust + size) {
495 if (l_max >= r_max)
496 nvmap_split_block(co, idx, ljust, size);
497 else
498 nvmap_split_block(co, idx, rjust, size);
499 break;
500 }
501 idx = b->next_free;
502 }
503
504 spin_unlock(&co->lock);
505 return idx;
506 }
507
508 #undef next_spare
509 #undef prev_spare
510
511 #define NVDA_POISON (('n'<<24) | ('v'<<16) | ('d'<<8) | ('a'))
512
513 struct nvmap_handle {
514 struct rb_node node;
515 atomic_t ref;
516 atomic_t pin;
517 unsigned long flags;
518 size_t size;
519 size_t orig_size;
520 struct task_struct *owner;
521 unsigned int poison;
522 union {
523 struct {
524 struct page **pages;
525 struct tegra_iovmm_area *area;
526 struct list_head mru_list;
527 bool contig;
528 bool dirty; /* IOVMM area allocated since last pin */
529 } pgalloc;
530 struct {
531 struct nvmap_carveout *co_heap;
532 int block_idx;
533 unsigned long base;
534 unsigned int key; /* preserved by bootloader */
535 } carveout;
536 };
537 bool global;
538 bool secure; /* only allocated in IOVM space, zapped on unpin */
539 bool heap_pgalloc;
540 bool alloc;
541 void *kern_map; /* used for RM memmgr backwards compat */
542 };
543
544 /* handle_ref objects are file-descriptor-local references to nvmap_handle
545 * objects. they track the number of references and pins performed by
546 * the specific caller (since nvmap_handle objects may be global), so that
547 * a client which terminates without properly unwinding all handles (or
548 * all nested pins) can be unwound by nvmap. */
549 struct nvmap_handle_ref {
550 struct nvmap_handle *h;
551 struct rb_node node;
552 atomic_t refs;
553 atomic_t pin;
554 };
555
556 struct nvmap_file_priv {
557 struct rb_root handle_refs;
558 atomic_t iovm_commit;
559 size_t iovm_limit;
560 spinlock_t ref_lock;
561 bool su;
562 };
563
564 struct nvmap_carveout_node {
565 struct device dev;
566 struct list_head heap_list;
567 unsigned int heap_bit;
568 struct nvmap_carveout carveout;
569 };
570
571 /* the master structure for all nvmap-managed carveouts and all handle_ref
572 * objects allocated inside the kernel. heaps are sorted by their heap_bit
573 * (highest heap_bit first) so that carveout allocation will be first
574 * attempted by the heap with the highest heap_bit set in the allocation's
575 * heap mask */
576 static struct {
577 struct nvmap_file_priv init_data;
578 struct rw_semaphore list_sem;
579 struct list_head heaps;
580 } nvmap_context;
581
582 static struct vm_operations_struct nvmap_vma_ops = {
583 .open = nvmap_vma_open,
584 .close = nvmap_vma_close,
585 .fault = nvmap_vma_fault,
586 };
587
588 const struct file_operations nvmap_fops = {
589 .owner = THIS_MODULE,
590 .open = nvmap_open,
591 .release = nvmap_release,
592 .unlocked_ioctl = nvmap_ioctl,
593 .mmap = nvmap_mmap
594 };
595
596 const struct file_operations knvmap_fops = {
597 .owner = THIS_MODULE,
598 .open = nvmap_open,
599 .release = nvmap_release,
600 .unlocked_ioctl = nvmap_ioctl,
601 .mmap = nvmap_mmap
602 };
603
604 struct nvmap_vma_priv {
605 struct nvmap_handle *h;
606 size_t offs;
607 atomic_t ref;
608 };
609
610 static struct proc_dir_entry *nvmap_procfs_root;
611 static struct proc_dir_entry *nvmap_procfs_proc;
612
613 static void _nvmap_handle_free(struct nvmap_handle *h);
614
615 #define NVMAP_CARVEOUT_ATTR_RO(_name) \
616 struct device_attribute nvmap_heap_attr_##_name = \
617 __ATTR(_name, S_IRUGO, _nvmap_sysfs_show_heap_##_name, NULL)
618
619 #define NVMAP_CARVEOUT_ATTR_WO(_name, _mode) \
620 struct device_attribute nvmap_heap_attr_##_name = \
621 __ATTR(_name, _mode, NULL, _nvmap_sysfs_set_heap_##_name)
622
623
624 static ssize_t _nvmap_sysfs_show_heap_usage(struct device *d,
625 struct device_attribute *attr, char *buf)
626 {
627 struct nvmap_carveout_node *c = container_of(d,
628 struct nvmap_carveout_node, dev);
629 return sprintf(buf, "%08x\n", c->heap_bit);
630 }
631
632 static ssize_t _nvmap_sysfs_show_heap_name(struct device *d,
633 struct device_attribute *attr, char *buf)
634 {
635 struct nvmap_carveout_node *c = container_of(d,
636 struct nvmap_carveout_node, dev);
637 return sprintf(buf, "%s\n", c->carveout.name);
638 }
639
640 static ssize_t _nvmap_sysfs_show_heap_base(struct device *d,
641 struct device_attribute *attr, char *buf)
642 {
643 struct nvmap_carveout_node *c = container_of(d,
644 struct nvmap_carveout_node, dev);
645 return sprintf(buf, "%08lx\n",
646 _nvmap_carveout_blockstat(&c->carveout, CARVEOUT_STAT_BASE));
647 }
648
649 static ssize_t _nvmap_sysfs_show_heap_free_size(struct device *d,
650 struct device_attribute *attr, char *buf)
651 {
652 struct nvmap_carveout_node *c = container_of(d,
653 struct nvmap_carveout_node, dev);
654 return sprintf(buf, "%lu\n",
655 _nvmap_carveout_blockstat(&c->carveout,
656 CARVEOUT_STAT_FREE_SIZE));
657 }
658
659 static ssize_t _nvmap_sysfs_show_heap_free_count(struct device *d,
660 struct device_attribute *attr, char *buf)
661 {
662 struct nvmap_carveout_node *c = container_of(d,
663 struct nvmap_carveout_node, dev);
664 return sprintf(buf, "%lu\n",
665 _nvmap_carveout_blockstat(&c->carveout,
666 CARVEOUT_STAT_FREE_BLOCKS));
667 }
668
669 static ssize_t _nvmap_sysfs_show_heap_free_max(struct device *d,
670 struct device_attribute *attr, char *buf)
671 {
672 struct nvmap_carveout_node *c = container_of(d,
673 struct nvmap_carveout_node, dev);
674 return sprintf(buf, "%lu\n",
675 _nvmap_carveout_blockstat(&c->carveout,
676 CARVEOUT_STAT_LARGEST_FREE));
677 }
678
679 static ssize_t _nvmap_sysfs_show_heap_total_count(struct device *d,
680 struct device_attribute *attr, char *buf)
681 {
682 struct nvmap_carveout_node *c = container_of(d,
683 struct nvmap_carveout_node, dev);
684 return sprintf(buf, "%lu\n",
685 _nvmap_carveout_blockstat(&c->carveout,
686 CARVEOUT_STAT_NUM_BLOCKS));
687 }
688
689 static ssize_t _nvmap_sysfs_show_heap_total_max(struct device *d,
690 struct device_attribute *attr, char *buf)
691 {
692 struct nvmap_carveout_node *c = container_of(d,
693 struct nvmap_carveout_node, dev);
694 return sprintf(buf, "%lu\n",
695 _nvmap_carveout_blockstat(&c->carveout,
696 CARVEOUT_STAT_LARGEST_BLOCK));
697 }
698
699 static ssize_t _nvmap_sysfs_show_heap_total_size(struct device *d,
700 struct device_attribute *attr, char *buf)
701 {
702 struct nvmap_carveout_node *c = container_of(d,
703 struct nvmap_carveout_node, dev);
704 return sprintf(buf, "%lu\n",
705 _nvmap_carveout_blockstat(&c->carveout,
706 CARVEOUT_STAT_TOTAL_SIZE));
707 }
708
709 static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
710 const char *name, unsigned int new_bitmask);
711
712 static ssize_t _nvmap_sysfs_set_heap_split(struct device *d,
713 struct device_attribute *attr, const char * buf, size_t count)
714 {
715 struct nvmap_carveout_node *c = container_of(d,
716 struct nvmap_carveout_node, dev);
717 char *tmp, *local = kzalloc(count+1, GFP_KERNEL);
718 char *sizestr = NULL, *bitmaskstr = NULL, *name = NULL;
719 char **format[] = { &sizestr, &bitmaskstr, &name };
720 char ***f_iter = format;
721 unsigned int i;
722 unsigned long size, bitmask;
723 int err;
724
725 if (!local) {
726 pr_err("%s: unable to read string\n", __func__);
727 return -ENOMEM;
728 }
729
730 memcpy(local, buf, count);
731 tmp = local;
732 for (i=0, **f_iter = local; i<count &&
733 (f_iter - format)<ARRAY_SIZE(format)-1; i++) {
734 if (local[i]==',') {
735 local[i] = '\0';
736 f_iter++;
737 **f_iter = &local[i+1];
738 }
739 }
740
741 if (!sizestr || !bitmaskstr || !name) {
742 pr_err("%s: format error\n", __func__);
743 kfree(tmp);
744 return -EINVAL;
745 }
746
747 for (local=name; !isspace(*local); local++);
748
749 if (local==name) {
750 pr_err("%s: invalid name %s\n", __func__, name);
751 kfree(tmp);
752 return -EINVAL;
753 }
754
755 *local=0;
756
757 size = memparse(sizestr, &sizestr);
758 if (!size) {
759 kfree(tmp);
760 return -EINVAL;
761 }
762
763 if (strict_strtoul(bitmaskstr, 0, &bitmask)==-EINVAL) {
764 kfree(tmp);
765 return -EINVAL;
766 }
767
768 err = nvmap_split_carveout_heap(&c->carveout, size, name, bitmask);
769
770 if (err) pr_err("%s: failed to create split heap %s\n", __func__, name);
771 kfree(tmp);
772 return err ? err : count;
773 }
774
775 static NVMAP_CARVEOUT_ATTR_RO(usage);
776 static NVMAP_CARVEOUT_ATTR_RO(name);
777 static NVMAP_CARVEOUT_ATTR_RO(base);
778 static NVMAP_CARVEOUT_ATTR_RO(free_size);
779 static NVMAP_CARVEOUT_ATTR_RO(free_count);
780 static NVMAP_CARVEOUT_ATTR_RO(free_max);
781 static NVMAP_CARVEOUT_ATTR_RO(total_size);
782 static NVMAP_CARVEOUT_ATTR_RO(total_count);
783 static NVMAP_CARVEOUT_ATTR_RO(total_max);
784 static NVMAP_CARVEOUT_ATTR_WO(split, (S_IWUSR | S_IWGRP));
785
786 static struct attribute *nvmap_heap_default_attrs[] = {
787 &nvmap_heap_attr_usage.attr,
788 &nvmap_heap_attr_name.attr,
789 &nvmap_heap_attr_split.attr,
790 &nvmap_heap_attr_base.attr,
791 &nvmap_heap_attr_total_size.attr,
792 &nvmap_heap_attr_free_size.attr,
793 &nvmap_heap_attr_total_count.attr,
794 &nvmap_heap_attr_free_count.attr,
795 &nvmap_heap_attr_total_max.attr,
796 &nvmap_heap_attr_free_max.attr,
797 NULL
798 };
799
800 static struct attribute_group nvmap_heap_defattr_group = {
801 .attrs = nvmap_heap_default_attrs
802 };
803
804 static struct device *__nvmap_heap_parent_dev(void);
805 #define _nvmap_heap_parent_dev __nvmap_heap_parent_dev()
806
807 /* unpinned I/O VMM areas may be reclaimed by nvmap to make room for
808 * new surfaces. unpinned surfaces are stored in segregated linked-lists
809 * sorted in most-recently-unpinned order (i.e., head insertion, head
810 * removal */
811 #ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
812 static DEFINE_SPINLOCK(nvmap_mru_vma_lock);
813 static const size_t nvmap_mru_cutoff[] = {
814 262144, 393216, 786432, 1048576, 1572864
815 };
816
817 static struct list_head nvmap_mru_vma_lists[ARRAY_SIZE(nvmap_mru_cutoff)];
818
819 static inline struct list_head *_nvmap_list(size_t size)
820 {
821 unsigned int i;
822
823 for (i=0; i<ARRAY_SIZE(nvmap_mru_cutoff); i++)
824 if (size <= nvmap_mru_cutoff[i]) return &nvmap_mru_vma_lists[i];
825
826 return &nvmap_mru_vma_lists[ARRAY_SIZE(nvmap_mru_cutoff)-1];
827 }
828 #endif
829
830 static inline struct nvmap_handle *_nvmap_handle_get(struct nvmap_handle *h)
831 {
832 if (unlikely(h->poison!=NVDA_POISON)) {
833 pr_err("%s: %s getting poisoned handle\n", __func__,
834 current->group_leader->comm);
835 return NULL;
836 } else if (unlikely(atomic_inc_return(&h->ref)<=1)) {
837 pr_err("%s: %s getting a freed handle\n",
838 __func__, current->group_leader->comm);
839 return NULL;
840 }
841 return h;
842 }
843
844 static inline void _nvmap_handle_put(struct nvmap_handle *h)
845 {
846 int cnt = atomic_dec_return(&h->ref);
847
848 if (unlikely(cnt<0)) {
849 pr_err("%s: %s put to negative references\n",
850 __func__, current->comm);
851 dump_stack();
852 } else if (!cnt) _nvmap_handle_free(h);
853 }
854
855 static struct nvmap_handle *_nvmap_claim_preserved(
856 struct task_struct *new_owner, unsigned long key)
857 {
858 struct rb_node *n;
859 struct nvmap_handle *b = NULL;
860
861 if (!key) return NULL;
862
863 spin_lock(&nvmap_handle_lock);
864 n = rb_first(&nvmap_handles);
865
866 while (n) {
867 b = rb_entry(n, struct nvmap_handle, node);
868 if (b->alloc && !b->heap_pgalloc && b->carveout.key == key) {
869 b->carveout.key = 0;
870 b->owner = new_owner;
871 break;
872 }
873 b = NULL;
874 n = rb_next(n);
875 }
876
877 spin_unlock(&nvmap_handle_lock);
878 return b;
879 }
880
881 static struct nvmap_handle *_nvmap_validate_get(unsigned long handle, bool su)
882 {
883 struct nvmap_handle *b = NULL;
884
885 #ifdef CONFIG_DEVNVMAP_PARANOID
886 struct rb_node *n;
887
888 spin_lock(&nvmap_handle_lock);
889
890 n = nvmap_handles.rb_node;
891
892 while (n) {
893 b = rb_entry(n, struct nvmap_handle, node);
894 if ((unsigned long)b == handle) {
895 if (su || b->global || b->owner==current->group_leader)
896 b = _nvmap_handle_get(b);
897 else
898 b = NULL;
899 spin_unlock(&nvmap_handle_lock);
900 return b;
901 }
902 if (handle > (unsigned long)b) n = n->rb_right;
903 else n = n->rb_left;
904 }
905 spin_unlock(&nvmap_handle_lock);
906 return NULL;
907 #else
908 if (!handle) return NULL;
909 b = _nvmap_handle_get((struct nvmap_handle *)handle);
910 return b;
911 #endif
912 }
913
914 static inline void _nvmap_insert_mru_vma(struct nvmap_handle *h)
915 {
916 #ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
917 spin_lock(&nvmap_mru_vma_lock);
918 list_add(&h->pgalloc.mru_list, _nvmap_list(h->pgalloc.area->iovm_length) );
919 spin_unlock(&nvmap_mru_vma_lock);
920 #endif
921 }
922
923 static void _nvmap_remove_mru_vma(struct nvmap_handle *h)
924 {
925 #ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
926 spin_lock(&nvmap_mru_vma_lock);
927 if (!list_empty(&h->pgalloc.mru_list))
928 list_del(&h->pgalloc.mru_list);
929 spin_unlock(&nvmap_mru_vma_lock);
930 INIT_LIST_HEAD(&h->pgalloc.mru_list);
931 #endif
932 }
933
934 static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
935 {
936 #ifndef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
937 BUG_ON(!h->pgalloc.area);
938 BUG_ON(h->size > h->pgalloc.area->iovm_length);
939 BUG_ON((h->size | h->pgalloc.area->iovm_length) & ~PAGE_MASK);
940 return h->pgalloc.area;
941 #else
942 struct list_head *mru;
943 struct nvmap_handle *evict = NULL;
944 struct tegra_iovmm_area *vm = NULL;
945 unsigned int i, idx;
946
947 spin_lock(&nvmap_mru_vma_lock);
948
949 if (h->pgalloc.area) {
950 BUG_ON(list_empty(&h->pgalloc.mru_list));
951 list_del(&h->pgalloc.mru_list);
952 INIT_LIST_HEAD(&h->pgalloc.mru_list);
953 spin_unlock(&nvmap_mru_vma_lock);
954 return h->pgalloc.area;
955 }
956
957 vm = tegra_iovmm_create_vm(nvmap_vm_client, NULL, h->size,
958 _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
959
960 if (vm) {
961 INIT_LIST_HEAD(&h->pgalloc.mru_list);
962 spin_unlock(&nvmap_mru_vma_lock);
963 return vm;
964 }
965 /* attempt to re-use the most recently unpinned IOVMM area in the
966 * same size bin as the current handle. If that fails, iteratively
967 * evict handles (starting from the current bin) until an allocation
968 * succeeds or no more areas can be evicted */
969
970 mru = _nvmap_list(h->size);
971 if (!list_empty(mru))
972 evict = list_first_entry(mru, struct nvmap_handle,
973 pgalloc.mru_list);
974 if (evict && evict->pgalloc.area->iovm_length >= h->size) {
975 list_del(&evict->pgalloc.mru_list);
976 vm = evict->pgalloc.area;
977 evict->pgalloc.area = NULL;
978 INIT_LIST_HEAD(&evict->pgalloc.mru_list);
979 spin_unlock(&nvmap_mru_vma_lock);
980 return vm;
981 }
982
983 idx = mru - nvmap_mru_vma_lists;
984
985 for (i=0; i<ARRAY_SIZE(nvmap_mru_vma_lists) && !vm; i++, idx++) {
986 if (idx >= ARRAY_SIZE(nvmap_mru_vma_lists))
987 idx -= ARRAY_SIZE(nvmap_mru_vma_lists);
988 mru = &nvmap_mru_vma_lists[idx];
989 while (!list_empty(mru) && !vm) {
990 evict = list_first_entry(mru, struct nvmap_handle,
991 pgalloc.mru_list);
992
993 BUG_ON(atomic_add_return(0, &evict->pin)!=0);
994 BUG_ON(!evict->pgalloc.area);
995 list_del(&evict->pgalloc.mru_list);
996 INIT_LIST_HEAD(&evict->pgalloc.mru_list);
997 tegra_iovmm_free_vm(evict->pgalloc.area);
998 evict->pgalloc.area = NULL;
999 vm = tegra_iovmm_create_vm(nvmap_vm_client,
1000 NULL, h->size,
1001 _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
1002 }
1003 }
1004
1005 spin_unlock(&nvmap_mru_vma_lock);
1006 return vm;
1007 #endif
1008 }
1009
1010 static int _nvmap_do_cache_maint(struct nvmap_handle *h,
1011 unsigned long start, unsigned long end, unsigned long op, bool get);
1012
1013 void _nvmap_handle_free(struct nvmap_handle *h)
1014 {
1015 int e;
1016 spin_lock(&nvmap_handle_lock);
1017
1018 /* if 2 contexts call _get and _put simultaneously, the reference
1019 * count may drop to 0 and then increase to 1 before the handle
1020 * can be freed. */
1021 if (atomic_add_return(0, &h->ref)>0) {
1022 spin_unlock(&nvmap_handle_lock);
1023 return;
1024 }
1025 smp_rmb();
1026 BUG_ON(atomic_read(&h->ref)<0);
1027 BUG_ON(atomic_read(&h->pin)!=0);
1028
1029 rb_erase(&h->node, &nvmap_handles);
1030
1031 spin_unlock(&nvmap_handle_lock);
1032
1033 if (h->owner) put_task_struct(h->owner);
1034
1035 /* remove when NvRmMemMgr compatibility is eliminated */
1036 if (h->kern_map) {
1037 BUG_ON(!h->alloc);
1038 if (h->heap_pgalloc)
1039 vm_unmap_ram(h->kern_map, h->size>>PAGE_SHIFT);
1040 else {
1041 unsigned long addr = (unsigned long)h->kern_map;
1042 addr &= ~PAGE_MASK;
1043 iounmap((void *)addr);
1044 }
1045 }
1046
1047 /* ensure that no stale data remains in the cache for this handle */
1048 e = _nvmap_do_cache_maint(h, 0, h->size, NVMEM_CACHE_OP_WB_INV, false);
1049
1050 if (h->alloc && !h->heap_pgalloc)
1051 nvmap_carveout_free(h->carveout.co_heap, h->carveout.block_idx);
1052 else if (h->alloc) {
1053 unsigned int i;
1054 BUG_ON(h->size & ~PAGE_MASK);
1055 BUG_ON(!h->pgalloc.pages);
1056 _nvmap_remove_mru_vma(h);
1057 if (h->pgalloc.area) tegra_iovmm_free_vm(h->pgalloc.area);
1058 for (i=0; i<h->size>>PAGE_SHIFT; i++) {
1059 ClearPageReserved(h->pgalloc.pages[i]);
1060 __free_page(h->pgalloc.pages[i]);
1061 }
1062 if ((h->size>>PAGE_SHIFT)*sizeof(struct page*)>=PAGE_SIZE)
1063 vfree(h->pgalloc.pages);
1064 else
1065 kfree(h->pgalloc.pages);
1066 }
1067 h->poison = 0xa5a5a5a5;
1068 kfree(h);
1069 }
1070
1071 #define nvmap_gfp (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
1072
1073 static int _nvmap_alloc_do_coalloc(struct nvmap_handle *h,
1074 struct nvmap_carveout *co, size_t align)
1075 {
1076 int idx;
1077
1078 idx = nvmap_carveout_alloc(co, align, h->size);
1079 if (idx != -1) {
1080 h->alloc = true;
1081 h->heap_pgalloc = false;
1082 h->carveout.co_heap = co;
1083 h->carveout.block_idx = idx;
1084 spin_lock(&co->lock);
1085 h->carveout.base = co->blocks[idx].base;
1086 spin_unlock(&co->lock);
1087 }
1088
1089 return (idx==-1) ? -ENOMEM : 0;
1090 }
1091
1092 /* map the backing pages for a heap_pgalloc handle into its IOVMM area */
1093 static void _nvmap_handle_iovmm_map(struct nvmap_handle *h)
1094 {
1095 tegra_iovmm_addr_t va;
1096 unsigned long i;
1097
1098 BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
1099 BUG_ON(h->size & ~PAGE_MASK);
1100 WARN_ON(!h->pgalloc.dirty);
1101
1102 for (va = h->pgalloc.area->iovm_start, i=0;
1103 va < (h->pgalloc.area->iovm_start + h->size);
1104 i++, va+=PAGE_SIZE) {
1105 BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
1106 tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
1107 page_to_pfn(h->pgalloc.pages[i]));
1108 }
1109 h->pgalloc.dirty = false;
1110 }
1111
1112 static int _nvmap_alloc_do_pgalloc(struct nvmap_handle *h,
1113 bool contiguous, bool secure)
1114 {
1115 unsigned int i = 0, cnt = (h->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1116 struct page **pages;
1117
1118 if (cnt*sizeof(*pages)>=PAGE_SIZE)
1119 pages = vmalloc(cnt*sizeof(*pages));
1120 else
1121 pages = kzalloc(sizeof(*pages)*cnt, GFP_KERNEL);
1122
1123 if (!pages) return -ENOMEM;
1124
1125 if (cnt==1 && !secure) contiguous = true;
1126
1127 /* secure surfaces should only be allocated in discontiguous (IOVM-
1128 * managed) space, so that the mapping can be zapped after it is
1129 * unpinned */
1130 WARN_ON(secure && contiguous);
1131
1132 if (contiguous) {
1133 size_t order = get_order(h->size);
1134 struct page *compound_page;
1135 compound_page = alloc_pages(nvmap_gfp, order);
1136 if (!compound_page) goto fail;
1137 split_page(compound_page, order);
1138 for (i=0; i<cnt; i++)
1139 pages[i] = nth_page(compound_page, i);
1140 for (; i<(1<<order); i++)
1141 __free_page(nth_page(compound_page, i));
1142 } else {
1143 for (i=0; i<cnt; i++) {
1144 pages[i] = alloc_page(nvmap_gfp);
1145 if (!pages[i]) {
1146 pr_err("failed to allocate %u pages after %u entries \n",
1147 cnt, i);
1148 goto fail;
1149 }
1150 }
1151 }
1152
1153 h->pgalloc.area = NULL;
1154 #ifndef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
1155 if (!contiguous) {
1156 h->pgalloc.area = tegra_iovmm_create_vm(nvmap_vm_client,
1157 NULL, cnt << PAGE_SHIFT,
1158 _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
1159 if (!h->pgalloc.area) goto fail;
1160 h->pgalloc.dirty = true;
1161 }
1162 #endif
1163
1164 for (i=0; i<cnt; i++) {
1165 void *km;
1166 SetPageReserved(pages[i]);
1167 km = kmap(pages[i]);
1168 if (km) flush_dcache_page(pages[i]);
1169 outer_flush_range(page_to_phys(pages[i]),
1170 page_to_phys(pages[i])+PAGE_SIZE);
1171 kunmap(pages[i]);
1172 }
1173
1174 h->size = cnt<<PAGE_SHIFT;
1175 h->pgalloc.pages = pages;
1176 h->heap_pgalloc = true;
1177 h->pgalloc.contig = contiguous;
1178 INIT_LIST_HEAD(&h->pgalloc.mru_list);
1179 h->alloc = true;
1180 return 0;
1181
1182 fail:
1183 while (i--) __free_page(pages[i]);
1184 if (pages && (cnt*sizeof(*pages)>=PAGE_SIZE)) vfree(pages);
1185 else if (pages) kfree(pages);
1186 return -ENOMEM;
1187 }
1188
1189 static struct nvmap_handle *_nvmap_handle_create(
1190 struct task_struct *owner, size_t size)
1191 {
1192 struct nvmap_handle *h = kzalloc(sizeof(*h), GFP_KERNEL);
1193 struct nvmap_handle *b;
1194 struct rb_node **p;
1195 struct rb_node *parent = NULL;
1196
1197 if (!h) return NULL;
1198 atomic_set(&h->ref, 1);
1199 atomic_set(&h->pin, 0);
1200 h->owner = owner;
1201 h->size = h->orig_size = size;
1202 h->flags = NVMEM_HANDLE_WRITE_COMBINE;
1203 h->poison = NVDA_POISON;
1204
1205 spin_lock(&nvmap_handle_lock);
1206 p = &nvmap_handles.rb_node;
1207 while (*p) {
1208 parent = *p;
1209 b = rb_entry(parent, struct nvmap_handle, node);
1210 if (h > b) p = &parent->rb_right;
1211 else p = &parent->rb_left;
1212 }
1213 rb_link_node(&h->node, parent, p);
1214 rb_insert_color(&h->node, &nvmap_handles);
1215 spin_unlock(&nvmap_handle_lock);
1216 if (owner) get_task_struct(owner);
1217 return h;
1218 }
1219
1220 /* nvmap pte manager */
1221
1222 static void _nvmap_set_pte_at(unsigned long addr, unsigned long pfn,
1223 pgprot_t prot)
1224 {
1225 u32 off;
1226 int idx;
1227 pte_t *pte;
1228
1229 BUG_ON(!addr);
1230 idx = NVMAP_PTE_INDEX(addr);
1231 off = NVMAP_PTE_OFFSET(addr) & (PTRS_PER_PTE-1);
1232
1233 pte = nvmap_pte[idx] + off;
1234 set_pte_ext(pte, pfn_pte(pfn, prot), 0);
1235 flush_tlb_kernel_page(addr);
1236 }
1237
1238 static int _nvmap_map_pte(unsigned long pfn, pgprot_t prot, void **vaddr)
1239 {
1240 static unsigned int last_bit = 0;
1241 unsigned long bit;
1242 unsigned long addr;
1243 unsigned long flags;
1244
1245 spin_lock_irqsave(&nvmap_ptelock, flags);
1246
1247 bit = find_next_zero_bit(nvmap_ptebits, NVMAP_PAGES, last_bit);
1248 if (bit==NVMAP_PAGES) {
1249 bit = find_first_zero_bit(nvmap_ptebits, last_bit);
1250 if (bit == last_bit) bit = NVMAP_PAGES;
1251 }
1252
1253 if (bit==NVMAP_PAGES) {
1254 spin_unlock_irqrestore(&nvmap_ptelock, flags);
1255 return -ENOMEM;
1256 }
1257
1258 last_bit = bit;
1259 set_bit(bit, nvmap_ptebits);
1260 spin_unlock_irqrestore(&nvmap_ptelock, flags);
1261
1262 addr = NVMAP_BASE + bit*PAGE_SIZE;
1263
1264 _nvmap_set_pte_at(addr, pfn, prot);
1265 *vaddr = (void *)addr;
1266 return 0;
1267 }
1268
1269 static int nvmap_map_pte(unsigned long pfn, pgprot_t prot, void **addr)
1270 {
1271 int ret;
1272 ret = wait_event_interruptible(nvmap_ptefull,
1273 !_nvmap_map_pte(pfn, prot, addr));
1274
1275 if (ret==-ERESTARTSYS) return -EINTR;
1276 return ret;
1277 }
1278
1279 static void nvmap_unmap_pte(void *addr)
1280 {
1281 unsigned long bit = NVMAP_PTE_OFFSET(addr);
1282 unsigned long flags;
1283
1284 /* the ptes aren't cleared in this function, since the address isn't
1285 * re-used until it is allocated again by nvmap_map_pte. */
1286 BUG_ON(bit >= NVMAP_PAGES);
1287 spin_lock_irqsave(&nvmap_ptelock, flags);
1288 clear_bit(bit, nvmap_ptebits);
1289 spin_unlock_irqrestore(&nvmap_ptelock, flags);
1290 wake_up(&nvmap_ptefull);
1291 }
1292
1293 /* to ensure that the backing store for the VMA isn't freed while a fork'd
1294 * reference still exists, nvmap_vma_open increments the reference count on
1295 * the handle, and nvmap_vma_close decrements it. alternatively, we could
1296 * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
1297 */
1298 static void nvmap_vma_open(struct vm_area_struct *vma)
1299 {
1300 struct nvmap_vma_priv *priv;
1301
1302 priv = vma->vm_private_data;
1303
1304 BUG_ON(!priv);
1305
1306 atomic_inc(&priv->ref);
1307 }
1308
1309 static void nvmap_vma_close(struct vm_area_struct *vma) {
1310 struct nvmap_vma_priv *priv = vma->vm_private_data;
1311
1312 if (priv && !atomic_dec_return(&priv->ref)) {
1313 if (priv->h) _nvmap_handle_put(priv->h);
1314 kfree(priv);
1315 }
1316 vma->vm_private_data = NULL;
1317 }
1318
1319 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1320 {
1321 struct nvmap_vma_priv *priv;
1322 unsigned long offs;
1323
1324 offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
1325 priv = vma->vm_private_data;
1326 if (!priv || !priv->h || !priv->h->alloc)
1327 return VM_FAULT_SIGBUS;
1328
1329 offs += priv->offs;
1330 /* if the VMA was split for some reason, vm_pgoff will be the VMA's
1331 * offset from the original VMA */
1332 offs += (vma->vm_pgoff << PAGE_SHIFT);
1333
1334 if (offs >= priv->h->size)
1335 return VM_FAULT_SIGBUS;
1336
1337 if (!priv->h->heap_pgalloc) {
1338 unsigned long pfn;
1339 BUG_ON(priv->h->carveout.base & ~PAGE_MASK);
1340 pfn = ((priv->h->carveout.base + offs) >> PAGE_SHIFT);
1341 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1342 return VM_FAULT_NOPAGE;
1343 } else {
1344 struct page *page;
1345 offs >>= PAGE_SHIFT;
1346 page = priv->h->pgalloc.pages[offs];
1347 if (page) get_page(page);
1348 vmf->page = page;
1349 return (page) ? 0 : VM_FAULT_SIGBUS;
1350 }
1351 }
1352
1353 static long nvmap_ioctl(struct file *filp,
1354 unsigned int cmd, unsigned long arg)
1355 {
1356 int err = 0;
1357 void __user *uarg = (void __user *)arg;
1358
1359 if (_IOC_TYPE(cmd) != NVMEM_IOC_MAGIC)
1360 return -ENOTTY;
1361
1362 if (_IOC_NR(cmd) > NVMEM_IOC_MAXNR)
1363 return -ENOTTY;
1364
1365 if (_IOC_DIR(cmd) & _IOC_READ)
1366 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
1367 if (_IOC_DIR(cmd) & _IOC_WRITE)
1368 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
1369
1370 if (err)
1371 return -EFAULT;
1372
1373 switch (cmd) {
1374 case NVMEM_IOC_CREATE:
1375 case NVMEM_IOC_CLAIM:
1376 case NVMEM_IOC_FROM_ID:
1377 err = nvmap_ioctl_create(filp, cmd, uarg);
1378 break;
1379
1380 case NVMEM_IOC_GET_ID:
1381 err = nvmap_ioctl_getid(filp, uarg);
1382 break;
1383
1384 case NVMEM_IOC_PARAM:
1385 err = nvmap_ioctl_get_param(filp, uarg);
1386 break;
1387
1388 case NVMEM_IOC_UNPIN_MULT:
1389 case NVMEM_IOC_PIN_MULT:
1390 err = nvmap_ioctl_pinop(filp, cmd==NVMEM_IOC_PIN_MULT, uarg);
1391 break;
1392
1393 case NVMEM_IOC_ALLOC:
1394 err = nvmap_ioctl_alloc(filp, uarg);
1395 break;
1396
1397 case NVMEM_IOC_FREE:
1398 err = nvmap_ioctl_free(filp, arg);
1399 break;
1400
1401 case NVMEM_IOC_MMAP:
1402 err = nvmap_map_into_caller_ptr(filp, uarg);
1403 break;
1404
1405 case NVMEM_IOC_WRITE:
1406 case NVMEM_IOC_READ:
1407 err = nvmap_ioctl_rw_handle(filp, cmd==NVMEM_IOC_READ, uarg);
1408 break;
1409
1410 case NVMEM_IOC_CACHE:
1411 err = nvmap_ioctl_cache_maint(filp, uarg);
1412 break;
1413
1414 default:
1415 return -ENOTTY;
1416 }
1417 return err;
1418 }
1419
1420 /* must be called with the ref_lock held - given a user-space handle ID
1421 * ref, validate that the handle_ref object may be used by the caller */
1422 struct nvmap_handle_ref *_nvmap_ref_lookup_locked(
1423 struct nvmap_file_priv *priv, unsigned long ref)
1424 {
1425 struct rb_node *n = priv->handle_refs.rb_node;
1426
1427 while (n) {
1428 struct nvmap_handle_ref *r;
1429 r = rb_entry(n, struct nvmap_handle_ref, node);
1430 if ((unsigned long)r->h == ref) return r;
1431 else if (ref > (unsigned long)r->h) n = n->rb_right;
1432 else n = n->rb_left;
1433 }
1434
1435 return NULL;
1436 }
1437
1438 /* must be called inside nvmap_pin_lock, to ensure that an entire stream
1439 * of pins will complete without competition from a second stream. returns
1440 * 0 if the pin was successful, -ENOMEM on failure */
1441 static int _nvmap_handle_pin_locked(struct nvmap_handle *h)
1442 {
1443 struct tegra_iovmm_area *area;
1444 BUG_ON(!h->alloc);
1445
1446 h = _nvmap_handle_get(h);
1447 if (!h) return -ENOMEM;
1448
1449 if (atomic_inc_return(&h->pin)==1) {
1450 if (h->heap_pgalloc && !h->pgalloc.contig) {
1451 area = _nvmap_get_vm(h);
1452 if (!area) {
1453 /* no race here, inside the pin mutex */
1454 atomic_dec(&h->pin);
1455 _nvmap_handle_put(h);
1456 return -ENOMEM;
1457 }
1458 if (area != h->pgalloc.area)
1459 h->pgalloc.dirty = true;
1460 h->pgalloc.area = area;
1461 }
1462 }
1463 return 0;
1464 }
1465
1466 /* doesn't need to be called inside nvmap_pin_lock, since this will only
1467 * expand the available VM area */
1468 static int _nvmap_handle_unpin(struct nvmap_handle *h)
1469 {
1470 int ret = 0;
1471
1472 if (atomic_add_return(0, &h->pin)==0) {
1473 pr_err("%s: %s attempting to unpin an unpinned handle\n",
1474 __func__, current->comm);
1475 dump_stack();
1476 return 0;
1477 }
1478
1479 BUG_ON(!h->alloc);
1480 if (!atomic_dec_return(&h->pin)) {
1481 if (h->heap_pgalloc && h->pgalloc.area) {
1482 /* if a secure handle is clean (i.e., mapped into
1483 * IOVMM, it needs to be zapped on unpin. */
1484 if (h->secure && !h->pgalloc.dirty) {
1485 tegra_iovmm_zap_vm(h->pgalloc.area);
1486 h->pgalloc.dirty = true;
1487 }
1488 _nvmap_insert_mru_vma(h);
1489 ret=1;
1490 }
1491 }
1492 _nvmap_handle_put(h);
1493 return ret;
1494 }
1495
1496 /* pin a list of handles, mapping IOVMM areas if needed. may sleep, if
1497 * a handle's IOVMM area has been reclaimed and insufficient IOVMM space
1498 * is available to complete the list pin. no intervening pin operations
1499 * will interrupt this, and no validation is performed on the handles
1500 * that are provided. */
1501 static int _nvmap_handle_pin_fast(unsigned int nr, struct nvmap_handle **h)
1502 {
1503 unsigned int i;
1504 int ret = 0;
1505
1506 mutex_lock(&nvmap_pin_lock);
1507 for (i=0; i<nr && !ret; i++) {
1508 ret = wait_event_interruptible(nvmap_pin_wait,
1509 !_nvmap_handle_pin_locked(h[i]));
1510 }
1511 mutex_unlock(&nvmap_pin_lock);
1512
1513 if (ret) {
1514 int do_wake = 0;
1515 while (i--) do_wake |= _nvmap_handle_unpin(h[i]);
1516 if (do_wake) wake_up(&nvmap_pin_wait);
1517 return -EINTR;
1518 } else {
1519 for (i=0; i<nr; i++)
1520 if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
1521 _nvmap_handle_iovmm_map(h[i]);
1522 }
1523
1524 return 0;
1525 }
1526
1527 static int _nvmap_do_global_unpin(unsigned long ref)
1528 {
1529 struct nvmap_handle *h;
1530 int w;
1531
1532 h = _nvmap_validate_get(ref, true);
1533 if (unlikely(!h)) {
1534 pr_err("%s: %s attempting to unpin non-existent handle\n",
1535 __func__, current->group_leader->comm);
1536 return 0;
1537 }
1538
1539 pr_err("%s: %s unpinning %s's %uB %s handle without local context\n",
1540 __func__, current->group_leader->comm,
1541 (h->owner) ? h->owner->comm : "kernel", h->orig_size,
1542 (h->heap_pgalloc && !h->pgalloc.contig) ? "iovmm" :
1543 (h->heap_pgalloc) ? "sysmem" : "carveout");
1544
1545 w = _nvmap_handle_unpin(h);
1546 _nvmap_handle_put(h);
1547 return w;
1548 }
1549
1550 static void _nvmap_do_unpin(struct nvmap_file_priv *priv,
1551 unsigned int nr, unsigned long *refs)
1552 {
1553 struct nvmap_handle_ref *r;
1554 unsigned int i;
1555 int do_wake = 0;
1556
1557 spin_lock(&priv->ref_lock);
1558 for (i=0; i<nr; i++) {
1559 if (!refs[i]) continue;
1560 r = _nvmap_ref_lookup_locked(priv, refs[i]);
1561 if (unlikely(!r)) {
1562 if (priv->su)
1563 do_wake |= _nvmap_do_global_unpin(refs[i]);
1564 else
1565 pr_err("%s: %s unpinning invalid handle\n",
1566 __func__, current->comm);
1567 } else if (unlikely(!atomic_add_unless(&r->pin, -1, 0)))
1568 pr_err("%s: %s unpinning unpinned handle\n",
1569 __func__, current->comm);
1570 else
1571 do_wake |= _nvmap_handle_unpin(r->h);
1572 }
1573 spin_unlock(&priv->ref_lock);
1574 if (do_wake) wake_up(&nvmap_pin_wait);
1575 }
1576
1577 /* pins a list of handle_ref objects; same conditions apply as to
1578 * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
1579 static int _nvmap_do_pin(struct nvmap_file_priv *priv,
1580 unsigned int nr, unsigned long *refs)
1581 {
1582 int ret = 0;
1583 unsigned int i;
1584 struct nvmap_handle **h = (struct nvmap_handle **)refs;
1585 struct nvmap_handle_ref *r;
1586
1587 /* to optimize for the common case (client provided valid handle
1588 * references and the pin succeeds), increment the handle_ref pin
1589 * count during validation. in error cases, the tree will need to
1590 * be re-walked, since the handle_ref is discarded so that an
1591 * allocation isn't required. if a handle_ref is not found,
1592 * locally validate that the caller has permission to pin the handle;
1593 * handle_refs are not created in this case, so it is possible that
1594 * if the caller crashes after pinning a global handle, the handle
1595 * will be permanently leaked. */
1596 spin_lock(&priv->ref_lock);
1597 for (i=0; i<nr && !ret; i++) {
1598 r = _nvmap_ref_lookup_locked(priv, refs[i]);
1599 if (!r && (!(priv->su || h[i]->global ||
1600 current->group_leader == h[i]->owner)))
1601 ret = -EPERM;
1602 else if (r) atomic_inc(&r->pin);
1603 else {
1604 pr_err("%s: %s pinning %s's %uB handle without "
1605 "local context\n", __func__,
1606 current->group_leader->comm,
1607 h[i]->owner->comm, h[i]->orig_size);
1608 }
1609 }
1610
1611 while (ret && i--) {
1612 r = _nvmap_ref_lookup_locked(priv, refs[i]);
1613 if (r) atomic_dec(&r->pin);
1614 }
1615 spin_unlock(&priv->ref_lock);
1616
1617 if (ret) return ret;
1618
1619 mutex_lock(&nvmap_pin_lock);
1620 for (i=0; i<nr && !ret; i++) {
1621 ret = wait_event_interruptible_timeout(nvmap_pin_wait,
1622 !_nvmap_handle_pin_locked(h[i]), 5);
1623 if (ret >= 0) ret = !ret;
1624 BUG_ON(ret > 0);
1625
1626
1627 }
1628 mutex_unlock(&nvmap_pin_lock);
1629
1630 if (ret) {
1631 int do_wake = 0;
1632 spin_lock(&priv->ref_lock);
1633 while (i--) {
1634 r = _nvmap_ref_lookup_locked(priv, refs[i]);
1635 do_wake |= _nvmap_handle_unpin(r->h);
1636 if (r) atomic_dec(&r->pin);
1637 }
1638 spin_unlock(&priv->ref_lock);
1639 if (do_wake) wake_up(&nvmap_pin_wait);
1640 return -EINTR;
1641 } else {
1642 for (i=0; i<nr; i++) {
1643 if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
1644 _nvmap_handle_iovmm_map(h[i]);
1645 }
1646 }
1647
1648 return 0;
1649 }
1650
1651 static int nvmap_ioctl_pinop(struct file *filp,
1652 bool is_pin, void __user *arg)
1653 {
1654 struct nvmem_pin_handle op;
1655 struct nvmap_handle *h;
1656 unsigned long on_stack[16];
1657 unsigned long *refs;
1658 unsigned long __user *output;
1659 unsigned int i;
1660 int err;
1661
1662 err = copy_from_user(&op, arg, sizeof(op));
1663 if (err) return err;
1664
1665 if (!op.count) return -EINVAL;
1666
1667 if (op.count > 1) {
1668 size_t bytes = op.count * sizeof(unsigned long *);
1669 if (!access_ok(VERIFY_READ, (void *)op.handles, bytes))
1670 return -EPERM;
1671 if (is_pin && op.addr &&
1672 !access_ok(VERIFY_WRITE, (void *)op.addr, bytes))
1673 return -EPERM;
1674
1675 if (op.count <= ARRAY_SIZE(on_stack)) refs = on_stack;
1676 else refs = kzalloc(bytes, GFP_KERNEL);
1677
1678 if (!refs) return -ENOMEM;
1679 err = copy_from_user(refs, (void*)op.handles, bytes);
1680 if (err) goto out;
1681 } else {
1682 refs = on_stack;
1683 on_stack[0] = (unsigned long)op.handles;
1684 }
1685
1686 if (is_pin)
1687 err = _nvmap_do_pin(filp->private_data, op.count, refs);
1688 else
1689 _nvmap_do_unpin(filp->private_data, op.count, refs);
1690
1691 /* skip the output stage on unpin */
1692 if (err || !is_pin) goto out;
1693
1694 /* it is guaranteed that if _nvmap_do_pin returns 0 that
1695 * all of the handle_ref objects are valid, so dereferencing directly
1696 * here is safe */
1697 if (op.count > 1)
1698 output = (unsigned long __user *)op.addr;
1699 else {
1700 struct nvmem_pin_handle __user *tmp = arg;
1701 output = (unsigned long __user *)&(tmp->addr);
1702 }
1703
1704 if (!output) goto out;
1705
1706 for (i=0; i<op.count; i++) {
1707 unsigned long addr;
1708 h = (struct nvmap_handle *)refs[i];
1709 if (h->heap_pgalloc && h->pgalloc.contig)
1710 addr = page_to_phys(h->pgalloc.pages[0]);
1711 else if (h->heap_pgalloc)
1712 addr = h->pgalloc.area->iovm_start;
1713 else
1714 addr = h->carveout.base;
1715
1716 __put_user(addr, &output[i]);
1717 }
1718
1719 out:
1720 if (refs != on_stack) kfree(refs);
1721 return err;
1722 }
1723
1724 static int nvmap_release(struct inode *inode, struct file *filp)
1725 {
1726 struct nvmap_file_priv *priv = filp->private_data;
1727 struct rb_node *n;
1728 struct nvmap_handle_ref *r;
1729 int refs;
1730 int do_wake = 0;
1731 int pins;
1732
1733 if (!priv) return 0;
1734
1735 while ((n = rb_first(&priv->handle_refs))) {
1736 r = rb_entry(n, struct nvmap_handle_ref, node);
1737 rb_erase(&r->node, &priv->handle_refs);
1738 smp_rmb();
1739 pins = atomic_read(&r->pin);
1740 atomic_set(&r->pin, 0);
1741 while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
1742 refs = atomic_read(&r->refs);
1743 if (r->h->alloc && r->h->heap_pgalloc && !r->h->pgalloc.contig)
1744 atomic_sub(r->h->size, &priv->iovm_commit);
1745 while (refs--) _nvmap_handle_put(r->h);
1746 kfree(r);
1747 }
1748 if (do_wake) wake_up(&nvmap_pin_wait);
1749 kfree(priv);
1750 return 0;
1751 }
1752
1753 static int nvmap_open(struct inode *inode, struct file *filp)
1754 {
1755 /* eliminate read, write and llseek support on this node */
1756 struct nvmap_file_priv *priv;
1757 int ret;
1758
1759 /* nvmap doesn't track total number of pinned references, so its
1760 * IOVMM client is always locked. */
1761 if (!nvmap_vm_client) {
1762 mutex_lock(&nvmap_pin_lock);
1763 if (!nvmap_vm_client) {
1764 nvmap_vm_client = tegra_iovmm_alloc_client("gpu", NULL);
1765 if (nvmap_vm_client)
1766 tegra_iovmm_client_lock(nvmap_vm_client);
1767 }
1768 mutex_unlock(&nvmap_pin_lock);
1769 }
1770
1771 ret = nonseekable_open(inode, filp);
1772 if (unlikely(ret))
1773 return ret;
1774
1775 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1776 if (!priv) return -ENOMEM;
1777 priv->handle_refs = RB_ROOT;
1778 priv->su = (filp->f_op == &knvmap_fops);
1779
1780 atomic_set(&priv->iovm_commit, 0);
1781
1782 if (nvmap_vm_client)
1783 priv->iovm_limit = tegra_iovmm_get_vm_size(nvmap_vm_client);
1784 #ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
1785 /* to prevent fragmentation-caused deadlocks, derate the size of
1786 * the IOVM space to 75% */
1787 priv->iovm_limit >>= 2;
1788 priv->iovm_limit *= 3;
1789 #endif
1790
1791 spin_lock_init(&priv->ref_lock);
1792
1793 filp->f_mapping->backing_dev_info = &nvmap_bdi;
1794
1795 filp->private_data = priv;
1796 return 0;
1797 }
1798
1799 static int nvmap_ioctl_getid(struct file *filp, void __user *arg)
1800 {
1801 struct nvmem_create_handle op;
1802 struct nvmap_handle *h = NULL;
1803 int err;
1804
1805 err = copy_from_user(&op, arg, sizeof(op));
1806 if (err) return err;
1807
1808 if (!op.handle) return -EINVAL;
1809
1810 h = _nvmap_validate_get((unsigned long)op.handle,
1811 filp->f_op==&knvmap_fops);
1812
1813 if (h) {
1814 op.id = (__u32)h;
1815 /* when the owner of a handle gets its ID, this is treated
1816 * as a granting of the handle for use by other processes.
1817 * however, the super-user is not capable of promoting a
1818 * handle to global status if it was created in another
1819 * process. */
1820 if (current->group_leader == h->owner) h->global = true;
1821
1822 /* getid is not supposed to result in a ref count increase */
1823 _nvmap_handle_put(h);
1824
1825 return copy_to_user(arg, &op, sizeof(op));
1826 }
1827 return -EPERM;
1828 }
1829
1830 /* attempts to allocate from either contiguous system memory or IOVMM space */
1831 static int _nvmap_do_page_alloc(struct nvmap_file_priv *priv,
1832 struct nvmap_handle *h, unsigned int heap_mask,
1833 size_t align, bool secure)
1834 {
1835 int ret = -ENOMEM;
1836 size_t page_size = (h->size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
1837 #ifdef IOVMM_FIRST
1838 unsigned int fallback[] = { NVMEM_HEAP_IOVMM, NVMEM_HEAP_SYSMEM, 0 };
1839 #else
1840 unsigned int fallback[] = { NVMEM_HEAP_SYSMEM, NVMEM_HEAP_IOVMM, 0 };
1841 #endif
1842 unsigned int *m = fallback;
1843
1844 /* secure allocations must not be performed from sysmem */
1845 if (secure) heap_mask &= ~NVMEM_HEAP_SYSMEM;
1846
1847 if (align > PAGE_SIZE) return -EINVAL;
1848
1849
1850 while (*m && ret) {
1851 if (heap_mask & NVMEM_HEAP_SYSMEM & *m)
1852 ret = _nvmap_alloc_do_pgalloc(h, true, secure);
1853
1854 else if (heap_mask & NVMEM_HEAP_IOVMM & *m) {
1855 /* increment the committed IOVM space prior to
1856 * allocation, to avoid race conditions with other
1857 * threads simultaneously allocating. this is
1858 * conservative, but guaranteed to work */
1859
1860 int oc;
1861 oc = atomic_add_return(page_size, &priv->iovm_commit);
1862
1863 if (oc <= priv->iovm_limit)
1864 ret = _nvmap_alloc_do_pgalloc(h, false, secure);
1865 else
1866 ret = -ENOMEM;
1867 /* on failure, or when do_pgalloc promotes a non-
1868 * contiguous request into a contiguous request,
1869 * release the commited iovm space */
1870 if (ret || h->pgalloc.contig)
1871 atomic_sub(page_size, &priv->iovm_commit);
1872 }
1873 m++;
1874 }
1875 return ret;
1876 }
1877
1878 /* attempts to allocate from the carveout heaps */
1879 static int _nvmap_do_carveout_alloc(struct nvmap_handle *h,
1880 unsigned int heap_mask, size_t align)
1881 {
1882 int ret = -ENOMEM;
1883 struct nvmap_carveout_node *n;
1884
1885 down_read(&nvmap_context.list_sem);
1886 list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
1887 if (heap_mask & n->heap_bit)
1888 ret = _nvmap_alloc_do_coalloc(h, &n->carveout, align);
1889 if (!ret) break;
1890 }
1891 up_read(&nvmap_context.list_sem);
1892 return ret;
1893 }
1894
1895 static int _nvmap_do_alloc(struct nvmap_file_priv *priv,
1896 unsigned long href, unsigned int heap_mask, size_t align,
1897 unsigned int flags, bool secure, bool carveout_first)
1898 {
1899 int ret = -ENOMEM;
1900 struct nvmap_handle_ref *r;
1901 struct nvmap_handle *h;
1902
1903 if (!href) return -EINVAL;
1904
1905 spin_lock(&priv->ref_lock);
1906 r = _nvmap_ref_lookup_locked(priv, href);
1907 spin_unlock(&priv->ref_lock);
1908
1909 if (!r) return -EPERM;
1910
1911 h = r->h;
1912 if (h->alloc) return 0;
1913 h->flags = flags;
1914
1915 align = max_t(size_t, align, L1_CACHE_BYTES);
1916
1917 if (secure) heap_mask &= ~NVMEM_HEAP_CARVEOUT_MASK;
1918
1919 if (carveout_first || (heap_mask & NVMEM_HEAP_CARVEOUT_IRAM)) {
1920 ret = _nvmap_do_carveout_alloc(h, heap_mask, align);
1921 if (ret) ret = _nvmap_do_page_alloc(priv, h,
1922 heap_mask, align, secure);
1923 } else {
1924 ret = _nvmap_do_page_alloc(priv, h, heap_mask, align, secure);
1925 if (ret) ret = _nvmap_do_carveout_alloc(h, heap_mask, align);
1926 }
1927
1928 BUG_ON((!ret && !h->alloc) || (ret && h->alloc));
1929 return ret;
1930 }
1931
1932 static int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
1933 {
1934 struct nvmem_alloc_handle op;
1935 struct nvmap_file_priv *priv = filp->private_data;
1936 bool secure = false;
1937 #ifdef IOVMM_FIRST
1938 bool carveout_first = false;
1939 #else
1940 bool carveout_first = true;
1941 #endif
1942 int err;
1943
1944 err = copy_from_user(&op, arg, sizeof(op));
1945 if (err) return err;
1946
1947 if (op.align & (op.align-1)) return -EINVAL;
1948
1949 /* user-space handles are aligned to page boundaries, to prevent
1950 * data leakage. */
1951 op.align = max_t(size_t, op.align, PAGE_SIZE);
1952
1953 if (op.flags & NVMEM_HANDLE_SECURE) secure = true;
1954
1955 /* TODO: implement a way to specify carveout-first vs
1956 * carveout-second */
1957 return _nvmap_do_alloc(priv, op.handle, op.heap_mask,
1958 op.align, (op.flags & 0x3), secure, carveout_first);
1959 }
1960
1961 static int _nvmap_do_free(struct nvmap_file_priv *priv, unsigned long href)
1962 {
1963 struct nvmap_handle_ref *r;
1964 struct nvmap_handle *h;
1965 int do_wake = 0;
1966
1967 if (!href) return 0;
1968
1969 spin_lock(&priv->ref_lock);
1970 r = _nvmap_ref_lookup_locked(priv, href);
1971
1972 if (!r) {
1973 spin_unlock(&priv->ref_lock);
1974 pr_err("%s attempting to free unrealized handle\n",
1975 current->group_leader->comm);
1976 return -EPERM;
1977 }
1978
1979 h = r->h;
1980
1981 smp_rmb();
1982 if (!atomic_dec_return(&r->refs)) {
1983 int pins = atomic_read(&r->pin);
1984 rb_erase(&r->node, &priv->handle_refs);
1985 spin_unlock(&priv->ref_lock);
1986 if (pins) pr_err("%s: %s freeing %s's pinned %s %s %uB handle\n" ,
1987 __func__, current->comm,
1988 (r->h->owner) ? r->h->owner->comm : "kernel",
1989 (r->h->global) ? "global" : "private",
1990 (r->h->alloc && r->h->heap_pgalloc)?"page-alloc" :
1991 (r->h->alloc) ? "carveout" : "unallocated",
1992 r->h->orig_size);
1993 while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
1994 kfree(r);
1995 if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
1996 atomic_sub(h->size, &priv->iovm_commit);
1997 if (do_wake) wake_up(&nvmap_pin_wait);
1998 } else
1999 spin_unlock(&priv->ref_lock);
2000
2001 BUG_ON(!atomic_read(&h->ref));
2002 _nvmap_handle_put(h);
2003 return 0;
2004 }
2005
2006 static int nvmap_ioctl_free(struct file *filp, unsigned long arg)
2007 {
2008 return _nvmap_do_free(filp->private_data, arg);
2009 }
2010
2011 /* given a size, pre-existing handle ID, or a preserved handle key, create
2012 * a handle and a reference to the handle in the per-context data */
2013 static int _nvmap_do_create(struct nvmap_file_priv *priv,
2014 unsigned int cmd, unsigned long key, bool su,
2015 struct nvmap_handle_ref **ref)
2016 {
2017 struct nvmap_handle_ref *r = NULL;
2018 struct nvmap_handle *h = NULL;
2019 struct rb_node **p, *parent = NULL;
2020
2021 if (cmd == NVMEM_IOC_FROM_ID) {
2022 /* only ugly corner case to handle with from ID:
2023 *
2024 * normally, if the handle that is being duplicated is IOVMM-
2025 * backed, the handle should fail to duplicate if duping it
2026 * would over-commit IOVMM space. however, if the handle is
2027 * already duplicated in the client process (or the client
2028 * is duplicating a handle it created originally), IOVMM space
2029 * should not be doubly-reserved.
2030 */
2031 h = _nvmap_validate_get(key, priv->su);
2032
2033 if (!h) {
2034 pr_err("%s: %s duplicate handle failed\n", __func__,
2035 current->group_leader->comm);
2036 return -EPERM;
2037 }
2038
2039 if (!h->alloc) {
2040 pr_err("%s: attempting to clone unallocated "
2041 "handle\n", __func__);
2042 _nvmap_handle_put(h);
2043 h = NULL;
2044 return -EINVAL;
2045 }
2046
2047 spin_lock(&priv->ref_lock);
2048 r = _nvmap_ref_lookup_locked(priv, (unsigned long)h);
2049 spin_unlock(&priv->ref_lock);
2050 if (r) {
2051 /* if the client does something strange, like calling Cr eateFromId
2052 * when it was the original creator, avoid creating two handle refs
2053 * for the same handle */
2054 atomic_inc(&r->refs);
2055 *ref = r;
2056 return 0;
2057 }
2058
2059 /* verify that adding this handle to the process' access list
2060 * won't exceed the IOVM limit */
2061 /* TODO: [ahatala 2010-04-20] let the kernel over-commit for now */
2062 if (h->heap_pgalloc && !h->pgalloc.contig && !su) {
2063 int oc = atomic_add_return(h->size, &priv->iovm_commit);
2064 if (oc > priv->iovm_limit) {
2065 atomic_sub(h->size, &priv->iovm_commit);
2066 _nvmap_handle_put(h);
2067 h = NULL;
2068 pr_err("%s: %s duplicating handle would "
2069 "over-commit iovmm space (%dB / %dB)\n",
2070 __func__, current->group_leader->comm,
2071 oc, priv->iovm_limit);
2072 return -ENOMEM;
2073 }
2074 }
2075 } else if (cmd == NVMEM_IOC_CREATE) {
2076 h = _nvmap_handle_create(current->group_leader, key);
2077 if (!h) return -ENOMEM;
2078 } else {
2079 h = _nvmap_claim_preserved(current->group_leader, key);
2080 if (!h) return -EINVAL;
2081 }
2082
2083 BUG_ON(!h);
2084
2085 r = kzalloc(sizeof(*r), GFP_KERNEL);
2086 spin_lock(&priv->ref_lock);
2087 if (!r) {
2088 spin_unlock(&priv->ref_lock);
2089 if (h) _nvmap_handle_put(h);
2090 return -ENOMEM;
2091 }
2092
2093 atomic_set(&r->refs, 1);
2094 r->h = h;
2095 atomic_set(&r->pin, 0);
2096
2097 p = &priv->handle_refs.rb_node;
2098 while (*p) {
2099 struct nvmap_handle_ref *l;
2100 parent = *p;
2101 l = rb_entry(parent, struct nvmap_handle_ref, node);
2102 if (r->h > l->h) p = &parent->rb_right;
2103 else p = &parent->rb_left;
2104 }
2105 rb_link_node(&r->node, parent, p);
2106 rb_insert_color(&r->node, &priv->handle_refs);
2107
2108 spin_unlock(&priv->ref_lock);
2109 *ref = r;
2110 return 0;
2111 }
2112
2113 static int nvmap_ioctl_create(struct file *filp,
2114 unsigned int cmd, void __user *arg)
2115 {
2116 struct nvmem_create_handle op;
2117 struct nvmap_handle_ref *r = NULL;
2118 struct nvmap_file_priv *priv = filp->private_data;
2119 unsigned long key;
2120 int err = 0;
2121
2122 err = copy_from_user(&op, arg, sizeof(op));
2123 if (err) return err;
2124
2125 if (!priv) return -ENODEV;
2126
2127 /* user-space-created handles are expanded to be page-aligned,
2128 * so that mmap() will not accidentally leak a different allocation */
2129 if (cmd==NVMEM_IOC_CREATE)
2130 key = (op.size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
2131 else if (cmd==NVMEM_IOC_CLAIM)
2132 key = op.key;
2133 else if (cmd==NVMEM_IOC_FROM_ID)
2134 key = op.id;
2135
2136 err = _nvmap_do_create(priv, cmd, key, (filp->f_op==&knvmap_fops), &r);
2137
2138 if (!err) {
2139 op.handle = (uintptr_t)r->h;
2140 /* since the size is spoofed to a page-multiple above,
2141 * clobber the orig_size field back to the requested value for
2142 * debugging. */
2143 if (cmd == NVMEM_IOC_CREATE) r->h->orig_size = op.size;
2144 err = copy_to_user(arg, &op, sizeof(op));
2145 if (err) _nvmap_do_free(priv, op.handle);
2146 }
2147
2148 return err;
2149 }
2150
2151 static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
2152 {
2153 struct nvmem_map_caller op;
2154 struct nvmap_vma_priv *vpriv;
2155 struct vm_area_struct *vma;
2156 struct nvmap_handle *h;
2157 int err = 0;
2158
2159 err = copy_from_user(&op, arg, sizeof(op));
2160 if (err) return err;
2161
2162 if (!op.handle) return -EINVAL;
2163
2164 h = _nvmap_validate_get(op.handle, (filp->f_op==&knvmap_fops));
2165 if (!h) return -EINVAL;
2166
2167 down_read(&current->mm->mmap_sem);
2168
2169 vma = find_vma(current->mm, op.addr);
2170 if (!vma || !vma->vm_private_data) {
2171 err = -ENOMEM;
2172 goto out;
2173 }
2174
2175 if (op.offset & ~PAGE_MASK) {
2176 err = -EFAULT;
2177 goto out;
2178 }
2179
2180 if ((op.offset + op.length) > h->size) {
2181 err = -EADDRNOTAVAIL;
2182 goto out;
2183 }
2184
2185 vpriv = vma->vm_private_data;
2186 BUG_ON(!vpriv);
2187
2188 /* the VMA must exactly match the requested mapping operation, and the
2189 * VMA that is targetted must have been created originally by /dev/nvmap
2190 */
2191 if ((vma->vm_start != op.addr) || (vma->vm_ops != &nvmap_vma_ops) ||
2192 (vma->vm_end-vma->vm_start != op.length)) {
2193 err = -EPERM;
2194 goto out;
2195 }
2196
2197 /* verify that each mmap() system call creates a unique VMA */
2198
2199 if (vpriv->h && h==vpriv->h)
2200 goto out;
2201 else if (vpriv->h) {
2202 err = -EADDRNOTAVAIL;
2203 goto out;
2204 }
2205
2206 if (!h->heap_pgalloc && (h->carveout.base & ~PAGE_MASK)) {
2207 err = -EFAULT;
2208 goto out;
2209 }
2210
2211 vpriv->h = h;
2212 vpriv->offs = op.offset;
2213
2214 /* if the hmem is not writeback-cacheable, drop back to a page mapping
2215 * which will guarantee DMA coherency
2216 */
2217 vma->vm_page_prot = _nvmap_flag_to_pgprot(h->flags,
2218 vma->vm_page_prot);
2219
2220 out:
2221 up_read(&current->mm->mmap_sem);
2222 if (err) _nvmap_handle_put(h);
2223 return err;
2224 }
2225 /* Initially, the nvmap mmap system call is used to allocate an inaccessible
2226 * region of virtual-address space in the client. A subsequent
2227 * NVMAP_IOC_MMAP ioctl will associate each
2228 */
2229 static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma)
2230 {
2231 /* FIXME: drivers which do not support cow seem to be split down the
2232 * middle whether to force the VM_SHARED flag, or to return an error
2233 * when this flag isn't already set (i.e., MAP_PRIVATE).
2234 */
2235 struct nvmap_vma_priv *priv;
2236
2237 vma->vm_private_data = NULL;
2238
2239 priv = kzalloc(sizeof(*priv),GFP_KERNEL);
2240 if (!priv)
2241 return -ENOMEM;
2242
2243 priv->offs = 0;
2244 priv->h = NULL;
2245 atomic_set(&priv->ref, 1);
2246
2247 vma->vm_flags |= VM_SHARED;
2248 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
2249 vma->vm_ops = &nvmap_vma_ops;
2250 vma->vm_private_data = priv;
2251
2252 return 0;
2253 }
2254
2255 /* perform cache maintenance on a handle; caller's handle must be pre-
2256 * validated. */
2257 static int _nvmap_do_cache_maint(struct nvmap_handle *h,
2258 unsigned long start, unsigned long end, unsigned long op, bool get)
2259 {
2260 pgprot_t prot;
2261 void *addr = NULL;
2262 void (*inner_maint)(const void*, const void*);
2263 void (*outer_maint)(unsigned long, unsigned long);
2264 int err = 0;
2265
2266 if (get) h = _nvmap_handle_get(h);
2267
2268 if (!h) return -EINVAL;
2269
2270 /* don't waste time on cache maintenance if the handle isn't cached */
2271 if (h->flags == NVMEM_HANDLE_UNCACHEABLE ||
2272 h->flags == NVMEM_HANDLE_WRITE_COMBINE)
2273 goto out;
2274
2275 if (op == NVMEM_CACHE_OP_WB) {
2276 inner_maint = smp_dma_clean_range;
2277 if (h->flags == NVMEM_HANDLE_CACHEABLE)
2278 outer_maint = outer_clean_range;
2279 else
2280 outer_maint = NULL;
2281 } else if (op == NVMEM_CACHE_OP_WB_INV) {
2282 inner_maint = smp_dma_flush_range;
2283 if (h->flags == NVMEM_HANDLE_CACHEABLE)
2284 outer_maint = outer_flush_range;
2285 else
2286 outer_maint = NULL;
2287 } else {
2288 inner_maint = smp_dma_inv_range;
2289 if (h->flags == NVMEM_HANDLE_CACHEABLE)
2290 outer_maint = outer_inv_range;
2291 else
2292 outer_maint = NULL;
2293 }
2294
2295 prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
2296
2297 /* for any write-back operation, it is safe to writeback the entire
2298 * cache rather than just the requested region. for large regions, it
2299 * is faster to do this than to iterate over every line.
2300 * only implemented for L1-only cacheable handles currently */
2301 #if 0
2302 if (h->flags == NVMEM_HANDLE_INNER_CACHEABLE &&
2303 end-start >= PAGE_SIZE*3 && op != NVMEM_CACHE_OP_INV) {
2304 flush_cache_all();
2305 goto out;
2306 }
2307 #endif
2308
2309 while (start < end) {
2310 struct page *page = NULL;
2311 unsigned long phys;
2312 void *src;
2313 size_t count;
2314
2315 if (h->heap_pgalloc) {
2316 page = h->pgalloc.pages[start>>PAGE_SHIFT];
2317 BUG_ON(!page);
2318 get_page(page);
2319 phys = page_to_phys(page) + (start & ~PAGE_MASK);
2320 } else {
2321 phys = h->carveout.base + start;
2322 }
2323
2324 if (!addr) {
2325 err = nvmap_map_pte(__phys_to_pfn(phys), prot, &addr);
2326 if (err) {
2327 if (page) put_page(page);
2328 break;
2329 }
2330 } else {
2331 _nvmap_set_pte_at((unsigned long)addr,
2332 __phys_to_pfn(phys), prot);
2333 }
2334
2335 src = addr + (phys & ~PAGE_MASK);
2336 count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK));
2337
2338 inner_maint(src, src+count);
2339 if (outer_maint) outer_maint(phys, phys+count);
2340 start += count;
2341 if (page) put_page(page);
2342 }
2343
2344 out:
2345 if (h->flags == NVMEM_HANDLE_INNER_CACHEABLE) outer_sync();
2346 if (addr) nvmap_unmap_pte(addr);
2347 if (get) _nvmap_handle_put(h);
2348 return err;
2349 }
2350
2351 static int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
2352 {
2353 struct nvmem_cache_op op;
2354 int err = 0;
2355 struct vm_area_struct *vma;
2356 struct nvmap_vma_priv *vpriv;
2357 unsigned long start;
2358 unsigned long end;
2359
2360 err = copy_from_user(&op, arg, sizeof(op));
2361 if (err) return err;
2362
2363 if (!op.handle || !op.addr || op.op<NVMEM_CACHE_OP_WB ||
2364 op.op>NVMEM_CACHE_OP_WB_INV)
2365 return -EINVAL;
2366
2367 vma = find_vma(current->active_mm, (unsigned long)op.addr);
2368 if (!vma || vma->vm_ops!=&nvmap_vma_ops ||
2369 (unsigned long)op.addr + op.len > vma->vm_end)
2370 return -EADDRNOTAVAIL;
2371
2372 vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
2373
2374 if ((unsigned long)vpriv->h != op.handle)
2375 return -EFAULT;
2376
2377 start = (unsigned long)op.addr - vma->vm_start;
2378 end = start + op.len;
2379
2380 return _nvmap_do_cache_maint(vpriv->h, start, end, op.op, true);
2381 }
2382
2383 /* copies a single element from the pre-get()'ed handle h, returns
2384 * the number of bytes copied, and the address in the nvmap mapping range
2385 * which was used (to eliminate re-allocation when copying multiple
2386 * elements */
2387 static ssize_t _nvmap_do_one_rw_handle(struct nvmap_handle *h, int is_read,
2388 int is_user, unsigned long start, unsigned long rw_addr,
2389 unsigned long bytes, void **nvmap_addr)
2390 {
2391 pgprot_t prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
2392 unsigned long end = start + bytes;
2393 unsigned long orig_start = start;
2394
2395 if (is_user) {
2396 if (is_read && !access_ok(VERIFY_WRITE, (void*)rw_addr, bytes))
2397 return -EPERM;
2398 if (!is_read && !access_ok(VERIFY_READ, (void*)rw_addr, bytes))
2399 return -EPERM;
2400 }
2401
2402 while (start < end) {
2403 struct page *page = NULL;
2404 unsigned long phys;
2405 size_t count;
2406 void *src;
2407
2408 if (h->heap_pgalloc) {
2409 page = h->pgalloc.pages[start >> PAGE_SHIFT];
2410 BUG_ON(!page);
2411 get_page(page);
2412 phys = page_to_phys(page) + (start & ~PAGE_MASK);
2413 } else {
2414 phys = h->carveout.base + start;
2415 }
2416
2417 if (!*nvmap_addr) {
2418 int err = nvmap_map_pte(__phys_to_pfn(phys),
2419 prot, nvmap_addr);
2420 if (err) {
2421 if (page) put_page(page);
2422 count = start - orig_start;
2423 return (count) ? count : err;
2424 }
2425 } else {
2426 _nvmap_set_pte_at((unsigned long)*nvmap_addr,
2427 __phys_to_pfn(phys), prot);
2428
2429 }
2430
2431 src = *nvmap_addr + (phys & ~PAGE_MASK);
2432 count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK));
2433
2434 if (is_user && is_read)
2435 copy_to_user((void*)rw_addr, src, count);
2436 else if (is_user)
2437 copy_from_user(src, (void*)rw_addr, count);
2438 else if (is_read)
2439 memcpy((void*)rw_addr, src, count);
2440 else
2441 memcpy(src, (void*)rw_addr, count);
2442
2443 rw_addr += count;
2444 start += count;
2445 if (page) put_page(page);
2446 }
2447
2448 return (ssize_t)start - orig_start;
2449 }
2450
2451 static ssize_t _nvmap_do_rw_handle(struct nvmap_handle *h, int is_read,
2452 int is_user, unsigned long h_offs, unsigned long sys_addr,
2453 unsigned long h_stride, unsigned long sys_stride,
2454 unsigned long elem_size, unsigned long count)
2455 {
2456 ssize_t bytes_copied = 0;
2457 void *addr = NULL;
2458
2459 h = _nvmap_handle_get(h);
2460 if (!h) return -EINVAL;
2461
2462 if (elem_size == h_stride &&
2463 elem_size == sys_stride) {
2464 elem_size *= count;
2465 h_stride = elem_size;
2466 sys_stride = elem_size;
2467 count = 1;
2468 }
2469
2470 while (count--) {
2471 size_t ret = _nvmap_do_one_rw_handle(h, is_read,
2472 is_user, h_offs, sys_addr, elem_size, &addr);
2473 if (ret < 0) {
2474 if (!bytes_copied) bytes_copied = ret;
2475 break;
2476 }
2477 bytes_copied += ret;
2478 if (ret < elem_size) break;
2479 sys_addr += sys_stride;
2480 h_offs += h_stride;
2481 }
2482
2483 if (addr) nvmap_unmap_pte(addr);
2484 _nvmap_handle_put(h);
2485 return bytes_copied;
2486 }
2487
2488 static int nvmap_ioctl_rw_handle(struct file *filp,
2489 int is_read, void __user* arg)
2490 {
2491 struct nvmem_rw_handle __user *uarg = arg;
2492 struct nvmem_rw_handle op;
2493 struct nvmap_handle *h;
2494 ssize_t copied;
2495 int err = 0;
2496
2497 err = copy_from_user(&op, arg, sizeof(op));
2498 if (err) return err;
2499
2500 if (!op.handle || !op.addr || !op.count || !op.elem_size)
2501 return -EINVAL;
2502
2503 h = _nvmap_validate_get(op.handle, (filp->f_op == &knvmap_fops));
2504 if (!h) return -EINVAL; /* -EPERM? */
2505
2506 copied = _nvmap_do_rw_handle(h, is_read, 1, op.offset,
2507 (unsigned long)op.addr, op.hmem_stride,
2508 op.user_stride, op.elem_size, op.count);
2509
2510 if (copied < 0) { err = copied; copied = 0; }
2511 else if (copied < (op.count*op.elem_size)) err = -EINTR;
2512
2513 __put_user(copied, &uarg->count);
2514
2515 _nvmap_handle_put(h);
2516
2517 return err;
2518 }
2519
2520 static unsigned int _nvmap_do_get_param(struct nvmap_handle *h,
2521 unsigned int param)
2522 {
2523 if (param==NVMEM_HANDLE_PARAM_SIZE)
2524 return h->orig_size;
2525
2526 else if (param==NVMEM_HANDLE_PARAM_ALIGNMENT) {
2527 if (!h->alloc) return 0;
2528
2529 if (h->heap_pgalloc) return PAGE_SIZE;
2530 else {
2531 unsigned int i=1;
2532 if (!h->carveout.base) return SZ_4M;
2533 while (!(i & h->carveout.base)) i<<=1;
2534 return i;
2535 }
2536 } else if (param==NVMEM_HANDLE_PARAM_BASE) {
2537
2538 if (!h->alloc || !atomic_add_return(0, &h->pin)){
2539 WARN_ON(1);
2540 return ~0ul;
2541 }
2542
2543 if (!h->heap_pgalloc)
2544 return h->carveout.base;
2545
2546 if (h->pgalloc.contig)
2547 return page_to_phys(h->pgalloc.pages[0]);
2548
2549 if (h->pgalloc.area)
2550 return h->pgalloc.area->iovm_start;
2551
2552 return ~0ul;
2553 } else if (param==NVMEM_HANDLE_PARAM_HEAP) {
2554
2555 if (!h->alloc) return 0;
2556
2557 if (!h->heap_pgalloc) {
2558 /* FIXME: hard-coded physical address */
2559 if ((h->carveout.base & 0xf0000000ul)==0x40000000ul)
2560 return NVMEM_HEAP_CARVEOUT_IRAM;
2561 else
2562 return NVMEM_HEAP_CARVEOUT_GENERIC;
2563 }
2564
2565 if (!h->pgalloc.contig)
2566 return NVMEM_HEAP_IOVMM;
2567
2568 return NVMEM_HEAP_SYSMEM;
2569 }
2570
2571 return 0;
2572 }
2573
2574 static int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
2575 {
2576 struct nvmem_handle_param op;
2577 struct nvmap_handle *h;
2578 int err;
2579
2580 err = copy_from_user(&op, arg, sizeof(op));
2581 if (err) return err;
2582
2583 if (op.param < NVMEM_HANDLE_PARAM_SIZE ||
2584 op.param > NVMEM_HANDLE_PARAM_HEAP)
2585 return -EINVAL;
2586
2587 h = _nvmap_validate_get(op.handle, (filp->f_op==&knvmap_fops));
2588 if (!h) return -EINVAL;
2589
2590 op.result = _nvmap_do_get_param(h, op.param);
2591 err = copy_to_user(arg, &op, sizeof(op));
2592
2593 _nvmap_handle_put(h);
2594 return err;
2595 }
2596
2597 static struct miscdevice misc_nvmap_dev = {
2598 .minor = MISC_DYNAMIC_MINOR,
2599 .name = "nvmap",
2600 .fops = &nvmap_fops
2601 };
2602
2603 static struct miscdevice misc_knvmap_dev = {
2604 .minor = MISC_DYNAMIC_MINOR,
2605 .name = "knvmap",
2606 .fops = &knvmap_fops
2607 };
2608
2609 static struct device *__nvmap_heap_parent_dev(void)
2610 {
2611 return misc_nvmap_dev.this_device;
2612 }
2613
2614 /* creates the sysfs attribute files for a carveout heap; if called
2615 * before fs initialization, silently returns.
2616 */
2617 static void _nvmap_create_heap_attrs(struct nvmap_carveout_node *n)
2618 {
2619 if (!_nvmap_heap_parent_dev) return;
2620 dev_set_name(&n->dev, "heap-%s", n->carveout.name);
2621 n->dev.parent = _nvmap_heap_parent_dev;
2622 n->dev.driver = NULL;
2623 n->dev.release = NULL;
2624 if (device_register(&n->dev)) {
2625 pr_err("%s: failed to create heap-%s device\n",
2626 __func__, n->carveout.name);
2627 return;
2628 }
2629 if (sysfs_create_group(&n->dev.kobj, &nvmap_heap_defattr_group))
2630 pr_err("%s: failed to create attribute group for heap-%s "
2631 "device\n", __func__, n->carveout.name);
2632 }
2633
2634 static int __init nvmap_dev_init(void)
2635 {
2636 struct nvmap_carveout_node *n;
2637
2638 if (misc_register(&misc_nvmap_dev))
2639 pr_err("%s error registering %s\n", __func__,
2640 misc_nvmap_dev.name);
2641
2642 if (misc_register(&misc_knvmap_dev))
2643 pr_err("%s error registering %s\n", __func__,
2644 misc_knvmap_dev.name);
2645
2646 /* create sysfs attribute entries for all the heaps which were
2647 * created prior to nvmap_dev_init */
2648 down_read(&nvmap_context.list_sem);
2649 list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
2650 _nvmap_create_heap_attrs(n);
2651 }
2652 up_read(&nvmap_context.list_sem);
2653
2654 nvmap_procfs_root = proc_mkdir("nvmap", NULL);
2655 if (nvmap_procfs_root) {
2656 nvmap_procfs_proc = proc_mkdir("proc", nvmap_procfs_root);
2657 }
2658 return 0;
2659 }
2660 fs_initcall(nvmap_dev_init);
2661
2662 /* initialization of core data structures split out to earlier in the
2663 * init sequence, to allow kernel drivers access to nvmap before devfs
2664 * is initialized */
2665 #define NR_CARVEOUTS 2
2666 static unsigned int nvmap_carveout_cmds = 0;
2667 static unsigned long nvmap_carveout_cmd_base[NR_CARVEOUTS];
2668 static unsigned long nvmap_carveout_cmd_size[NR_CARVEOUTS];
2669
2670 static int __init nvmap_core_init(void)
2671 {
2672 u32 base = NVMAP_BASE;
2673 pgd_t *pgd;
2674 pmd_t *pmd;
2675 pte_t *pte;
2676 unsigned int i;
2677
2678 init_rwsem(&nvmap_context.list_sem);
2679 nvmap_context.init_data.handle_refs = RB_ROOT;
2680 atomic_set(&nvmap_context.init_data.iovm_commit, 0);
2681 /* no IOVMM allocations for kernel-created handles */
2682 spin_lock_init(&nvmap_context.init_data.ref_lock);
2683 nvmap_context.init_data.su = true;
2684 nvmap_context.init_data.iovm_limit = 0;
2685 INIT_LIST_HEAD(&nvmap_context.heaps);
2686
2687 #ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
2688 for (i=0; i<ARRAY_SIZE(nvmap_mru_cutoff); i++)
2689 INIT_LIST_HEAD(&nvmap_mru_vma_lists[i]);
2690 #endif
2691
2692 i = 0;
2693 do {
2694 pgd = pgd_offset(&init_mm, base);
2695 pmd = pmd_alloc(&init_mm, pgd, base);
2696 if (!pmd) {
2697 pr_err("%s: no pmd tables\n", __func__);
2698 return -ENOMEM;
2699 }
2700 pte = pte_alloc_kernel(pmd, base);
2701 if (!pte) {
2702 pr_err("%s: no pte tables\n", __func__);
2703 return -ENOMEM;
2704 }
2705 nvmap_pte[i++] = pte;
2706 base += (1<<PGDIR_SHIFT);
2707 } while (base < NVMAP_END);
2708
2709 for (i=0; i<nvmap_carveout_cmds; i++) {
2710 char tmp[16];
2711 snprintf(tmp, sizeof(tmp), "generic-%u", i);
2712 nvmap_add_carveout_heap(nvmap_carveout_cmd_base[i],
2713 nvmap_carveout_cmd_size[i], tmp, 0x1);
2714 }
2715
2716 return 0;
2717 }
2718 core_initcall(nvmap_core_init);
2719
2720 static int __init nvmap_heap_arg(char *options)
2721 {
2722 unsigned long start, size;
2723 char *p = options;
2724
2725 start = -1;
2726 size = memparse(p, &p);
2727 if (*p == '@')
2728 start = memparse(p + 1, &p);
2729
2730 if (nvmap_carveout_cmds < ARRAY_SIZE(nvmap_carveout_cmd_size)) {
2731 nvmap_carveout_cmd_base[nvmap_carveout_cmds] = start;
2732 nvmap_carveout_cmd_size[nvmap_carveout_cmds] = size;
2733 nvmap_carveout_cmds++;
2734 }
2735 return 0;
2736 }
2737 __setup("nvmem=", nvmap_heap_arg);
2738
2739 static int _nvmap_try_create_preserved(struct nvmap_carveout *co,
2740 struct nvmap_handle *h, unsigned long base,
2741 size_t size, unsigned int key)
2742 {
2743 unsigned long end = base + size;
2744 short idx;
2745
2746 h->carveout.base = ~0;
2747 h->carveout.key = key;
2748 h->carveout.co_heap = NULL;
2749
2750 spin_lock(&co->lock);
2751 idx = co->free_index;
2752 while (idx != -1) {
2753 struct nvmap_mem_block *b = BLOCK(co, idx);
2754 unsigned long blk_end = b->base + b->size;
2755 if (b->base <= base && blk_end >= end) {
2756 nvmap_split_block(co, idx, base, size);
2757 h->carveout.block_idx = idx;
2758 h->carveout.base = co->blocks[idx].base;
2759 h->carveout.co_heap = co;
2760 h->alloc = true;
2761 break;
2762 }
2763 idx = b->next_free;
2764 }
2765 spin_unlock(&co->lock);
2766
2767 return (h->carveout.co_heap == NULL) ? -ENXIO : 0;
2768 }
2769
2770 static void _nvmap_create_nvos_preserved(struct nvmap_carveout *co)
2771 {
2772 unsigned int i, key;
2773 NvBootArgsPreservedMemHandle mem;
2774 static int was_created[NvBootArgKey_PreservedMemHandle_Num -
2775 NvBootArgKey_PreservedMemHandle_0] = { 0 };
2776
2777 for (i=0, key=NvBootArgKey_PreservedMemHandle_0;
2778 i<ARRAY_SIZE(was_created); i++, key++) {
2779 struct nvmap_handle *h;
2780
2781 if (was_created[i]) continue;
2782
2783 if (NvOsBootArgGet(key, &mem, sizeof(mem))!=NvSuccess) continue;
2784 if (!mem.Address || !mem.Size) continue;
2785
2786 h = _nvmap_handle_create(NULL, mem.Size);
2787 if (!h) continue;
2788
2789 if (!_nvmap_try_create_preserved(co, h, mem.Address,
2790 mem.Size, key))
2791 was_created[i] = 1;
2792 else
2793 _nvmap_handle_put(h);
2794 }
2795 }
2796
2797 int nvmap_add_carveout_heap(unsigned long base, size_t size,
2798 const char *name, unsigned int bitmask)
2799 {
2800 struct nvmap_carveout_node *n;
2801 struct nvmap_carveout_node *l;
2802
2803
2804 n = kzalloc(sizeof(*n), GFP_KERNEL);
2805 if (!n) return -ENOMEM;
2806
2807 BUG_ON(bitmask & ~NVMEM_HEAP_CARVEOUT_MASK);
2808 n->heap_bit = bitmask;
2809
2810 if (_nvmap_init_carveout(&n->carveout, name, base, size)) {
2811 kfree(n);
2812 return -ENOMEM;
2813 }
2814
2815 down_write(&nvmap_context.list_sem);
2816
2817 /* called inside the list_sem lock to ensure that the was_created
2818 * array is protected against simultaneous access */
2819 _nvmap_create_nvos_preserved(&n->carveout);
2820 _nvmap_create_heap_attrs(n);
2821
2822 list_for_each_entry(l, &nvmap_context.heaps, heap_list) {
2823 if (n->heap_bit > l->heap_bit) {
2824 list_add_tail(&n->heap_list, &l->heap_list);
2825 up_write(&nvmap_context.list_sem);
2826 return 0;
2827 }
2828 }
2829 list_add_tail(&n->heap_list, &nvmap_context.heaps);
2830 up_write(&nvmap_context.list_sem);
2831 return 0;
2832 }
2833
2834 int nvmap_create_preserved_handle(unsigned long base, size_t size,
2835 unsigned int key)
2836 {
2837 struct nvmap_carveout_node *i;
2838 struct nvmap_handle *h;
2839
2840 h = _nvmap_handle_create(NULL, size);
2841 if (!h) return -ENOMEM;
2842
2843 down_read(&nvmap_context.list_sem);
2844 list_for_each_entry(i, &nvmap_context.heaps, heap_list) {
2845 struct nvmap_carveout *co = &i->carveout;
2846 if (!_nvmap_try_create_preserved(co, h, base, size, key))
2847 break;
2848 }
2849 up_read(&nvmap_context.list_sem);
2850
2851 /* the base may not be correct if block splitting fails */
2852 if (!h->carveout.co_heap || h->carveout.base != base) {
2853 _nvmap_handle_put(h);
2854 return -ENOMEM;
2855 }
2856
2857 return 0;
2858 }
2859
2860 /* attempts to create a new carveout heap with a new usage bitmask by
2861 * taking an allocation from a previous carveout with a different bitmask */
2862 static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
2863 const char *name, unsigned int new_bitmask)
2864 {
2865 struct nvmap_carveout_node *i, *n;
2866 int idx = -1;
2867 unsigned int blkbase, blksize;
2868
2869
2870 n = kzalloc(sizeof(*n), GFP_KERNEL);
2871 if (!n) return -ENOMEM;
2872 n->heap_bit = new_bitmask;
2873
2874 /* align split carveouts to 1M */
2875 idx = nvmap_carveout_alloc(co, SZ_1M, size);
2876 if (idx != -1) {
2877 /* take the spin lock to avoid race conditions with
2878 * intervening allocations triggering grow_block operations */
2879 spin_lock(&co->lock);
2880 blkbase = co->blocks[idx].base;
2881 blksize = co->blocks[idx].size;
2882 spin_unlock(&co->lock);
2883
2884 if (_nvmap_init_carveout(&n->carveout,name, blkbase, blksize)) {
2885 nvmap_carveout_free(&i->carveout, idx);
2886 idx = -1;
2887 } else {
2888 spin_lock(&co->lock);
2889 if (co->blocks[idx].prev) {
2890 co->blocks[co->blocks[idx].prev].next =
2891 co->blocks[idx].next;
2892 }
2893 if (co->blocks[idx].next) {
2894 co->blocks[co->blocks[idx].next].prev =
2895 co->blocks[idx].prev;
2896 }
2897 if (co->block_index==idx)
2898 co->block_index = co->blocks[idx].next;
2899 co->blocks[idx].next_free = -1;
2900 co->blocks[idx].prev_free = -1;
2901 co->blocks[idx].next = co->spare_index;
2902 if (co->spare_index!=-1)
2903 co->blocks[co->spare_index].prev = idx;
2904 co->spare_index = idx;
2905 spin_unlock(&co->lock);
2906 }
2907 }
2908
2909 if (idx==-1) {
2910 kfree(n);
2911 return -ENOMEM;
2912 }
2913
2914 down_write(&nvmap_context.list_sem);
2915 _nvmap_create_heap_attrs(n);
2916 list_for_each_entry(i, &nvmap_context.heaps, heap_list) {
2917 if (n->heap_bit > i->heap_bit) {
2918 list_add_tail(&n->heap_list, &i->heap_list);
2919 up_write(&nvmap_context.list_sem);
2920 return 0;
2921 }
2922 }
2923 list_add_tail(&n->heap_list, &nvmap_context.heaps);
2924 up_write(&nvmap_context.list_sem);
2925 return 0;
2926 }
2927
2928 /* NvRmMemMgr APIs implemented on top of nvmap */
2929
2930 #include <linux/freezer.h>
2931
2932 NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
2933 {
2934 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
2935 unsigned long addr;
2936
2937 if (unlikely(!atomic_add_return(0, &h->pin) || !h->alloc ||
2938 Offset >= h->orig_size)) {
2939 WARN_ON(1);
2940 return ~0ul;
2941 }
2942
2943 if (h->heap_pgalloc && h->pgalloc.contig)
2944 addr = page_to_phys(h->pgalloc.pages[0]);
2945 else if (h->heap_pgalloc) {
2946 BUG_ON(!h->pgalloc.area);
2947 addr = h->pgalloc.area->iovm_start;
2948 } else
2949 addr = h->carveout.base;
2950
2951 return (NvU32)addr+Offset;
2952
2953 }
2954
2955 void NvRmMemPinMult(NvRmMemHandle *hMems, NvU32 *addrs, NvU32 Count)
2956 {
2957 struct nvmap_handle **h = (struct nvmap_handle **)hMems;
2958 unsigned int i;
2959 int ret;
2960
2961 do {
2962 ret = _nvmap_handle_pin_fast(Count, h);
2963 if (ret && !try_to_freeze()) {
2964 pr_err("%s: failed to pin handles\n", __func__);
2965 dump_stack();
2966 }
2967 } while (ret);
2968
2969 for (i=0; i<Count; i++) {
2970 addrs[i] = NvRmMemGetAddress(hMems[i], 0);
2971 BUG_ON(addrs[i]==~0ul);
2972 }
2973 }
2974
2975 void NvRmMemUnpinMult(NvRmMemHandle *hMems, NvU32 Count)
2976 {
2977 int do_wake = 0;
2978 unsigned int i;
2979
2980 for (i=0; i<Count; i++) {
2981 struct nvmap_handle *h = (struct nvmap_handle *)hMems[i];
2982 if (h) {
2983 BUG_ON(atomic_add_return(0, &h->pin)==0);
2984 do_wake |= _nvmap_handle_unpin(h);
2985 }
2986 }
2987
2988 if (do_wake) wake_up(&nvmap_pin_wait);
2989 }
2990
2991 NvU32 NvRmMemPin(NvRmMemHandle hMem)
2992 {
2993 NvU32 addr;
2994 NvRmMemPinMult(&hMem, &addr, 1);
2995 return addr;
2996 }
2997
2998 void NvRmMemUnpin(NvRmMemHandle hMem)
2999 {
3000 NvRmMemUnpinMult(&hMem, 1);
3001 }
3002
3003 void NvRmMemHandleFree(NvRmMemHandle hMem)
3004 {
3005 _nvmap_do_free(&nvmap_context.init_data, (unsigned long)hMem);
3006 }
3007
3008 NvError NvRmMemMap(NvRmMemHandle hMem, NvU32 Offset, NvU32 Size,
3009 NvU32 Flags, void **pVirtAddr)
3010 {
3011 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
3012 pgprot_t prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
3013
3014 BUG_ON(!h->alloc);
3015
3016 if (Offset+Size > h->size)
3017 return NvError_BadParameter;
3018
3019 if (!h->kern_map && h->heap_pgalloc) {
3020 BUG_ON(h->size & ~PAGE_MASK);
3021 h->kern_map = vm_map_ram(h->pgalloc.pages,
3022 h->size>>PAGE_SHIFT, -1, prot);
3023 } else if (!h->kern_map) {
3024 unsigned int size;
3025 unsigned long addr;
3026
3027 addr = h->carveout.base;
3028 size = h->size + (addr & ~PAGE_MASK);
3029 addr &= PAGE_MASK;
3030 size = (size + PAGE_SIZE - 1) & PAGE_MASK;
3031
3032 h->kern_map = ioremap_wc(addr, size);
3033 if (h->kern_map) {
3034 addr = h->carveout.base - addr;
3035 h->kern_map += addr;
3036 }
3037 }
3038
3039 if (h->kern_map) {
3040 *pVirtAddr = (h->kern_map + Offset);
3041 return NvSuccess;
3042 }
3043
3044 return NvError_InsufficientMemory;
3045 }
3046
3047 void NvRmMemUnmap(NvRmMemHandle hMem, void *pVirtAddr, NvU32 Size)
3048 {
3049 return;
3050 }
3051
3052 NvU32 NvRmMemGetId(NvRmMemHandle hMem)
3053 {
3054 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
3055 if (!h->owner) h->global = true;
3056 return (NvU32)h;
3057 }
3058
3059 NvError NvRmMemHandleFromId(NvU32 id, NvRmMemHandle *hMem)
3060 {
3061 struct nvmap_handle_ref *r;
3062
3063 int err = _nvmap_do_create(&nvmap_context.init_data,
3064 NVMEM_IOC_FROM_ID, id, true, &r);
3065
3066 if (err || !r) return NvError_NotInitialized;
3067
3068 *hMem = (NvRmMemHandle)r->h;
3069 return NvSuccess;
3070 }
3071
3072 NvError NvRmMemHandleClaimPreservedHandle(NvRmDeviceHandle hRm,
3073 NvU32 Key, NvRmMemHandle *hMem)
3074 {
3075 struct nvmap_handle_ref *r;
3076
3077 int err = _nvmap_do_create(&nvmap_context.init_data,
3078 NVMEM_IOC_CLAIM, (unsigned long)Key, true, &r);
3079
3080 if (err || !r) return NvError_NotInitialized;
3081
3082 *hMem = (NvRmMemHandle)r->h;
3083 return NvSuccess;
3084 }
3085
3086 NvError NvRmMemHandleCreate(NvRmDeviceHandle hRm,
3087 NvRmMemHandle *hMem, NvU32 Size)
3088 {
3089 struct nvmap_handle_ref *r;
3090 int err = _nvmap_do_create(&nvmap_context.init_data,
3091 NVMEM_IOC_CREATE, (unsigned long)Size, true, &r);
3092
3093 if (err || !r) return NvError_InsufficientMemory;
3094 *hMem = (NvRmMemHandle)r->h;
3095 return NvSuccess;
3096 }
3097
3098 NvError NvRmMemAlloc(NvRmMemHandle hMem, const NvRmHeap *Heaps,
3099 NvU32 NumHeaps, NvU32 Alignment, NvOsMemAttribute Coherency)
3100 {
3101 unsigned int heap_mask = 0;
3102 unsigned int flags = pgprot_kernel;
3103 int err;
3104
3105 BUG_ON(Alignment & (Alignment-1));
3106
3107 if (Coherency == NvOsMemAttribute_WriteBack)
3108 flags = NVMEM_HANDLE_INNER_CACHEABLE;
3109 else
3110 flags = NVMEM_HANDLE_WRITE_COMBINE;
3111
3112 if (!NumHeaps || !Heaps)
3113 heap_mask = (NVMEM_HEAP_SYSMEM | NVMEM_HEAP_CARVEOUT_GENERIC);
3114 else {
3115 unsigned int i;
3116 for (i=0; i<NumHeaps; i++) {
3117 switch (Heaps[i]) {
3118 case NvRmHeap_GART:
3119 heap_mask |= NVMEM_HEAP_IOVMM;
3120 break;
3121 case NvRmHeap_External:
3122 heap_mask |= NVMEM_HEAP_SYSMEM;
3123 break;
3124 case NvRmHeap_ExternalCarveOut:
3125 heap_mask |= NVMEM_HEAP_CARVEOUT_GENERIC;
3126 break;
3127 case NvRmHeap_IRam:
3128 heap_mask |= NVMEM_HEAP_CARVEOUT_IRAM;
3129 break;
3130 default:
3131 break;
3132 }
3133 }
3134 }
3135 if (!heap_mask) return NvError_InsufficientMemory;
3136
3137 err = _nvmap_do_alloc(&nvmap_context.init_data, (unsigned long)hMem,
3138 heap_mask, (size_t)Alignment, flags, false, true);
3139
3140 if (err) return NvError_InsufficientMemory;
3141 return NvSuccess;
3142 }
3143
3144 void NvRmMemReadStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 SrcStride,
3145 void *pDst, NvU32 DstStride, NvU32 ElementSize, NvU32 Count)
3146 {
3147 ssize_t bytes = 0;
3148
3149 bytes = _nvmap_do_rw_handle((struct nvmap_handle *)hMem, true,
3150 false, Offset, (unsigned long)pDst, SrcStride,
3151 DstStride, ElementSize, Count);
3152
3153 BUG_ON(bytes != (ssize_t)(Count*ElementSize));
3154 }
3155
3156 void NvRmMemWriteStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 DstStride,
3157 const void *pSrc, NvU32 SrcStride, NvU32 ElementSize, NvU32 Count)
3158 {
3159 ssize_t bytes = 0;
3160
3161 bytes = _nvmap_do_rw_handle((struct nvmap_handle *)hMem, false,
3162 false, Offset, (unsigned long)pSrc, DstStride,
3163 SrcStride, ElementSize, Count);
3164
3165 BUG_ON(bytes != (ssize_t)(Count*ElementSize));
3166 }
3167
3168 NvU32 NvRmMemGetSize(NvRmMemHandle hMem)
3169 {
3170 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
3171 return h->orig_size;
3172 }
3173
3174 NvRmHeap NvRmMemGetHeapType(NvRmMemHandle hMem, NvU32 *BaseAddr)
3175 {
3176 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
3177 NvRmHeap heap;
3178
3179 if (!h->alloc) {
3180 *BaseAddr = ~0ul;
3181 return (NvRmHeap)0;
3182 }
3183
3184 if (h->heap_pgalloc && !h->pgalloc.contig)
3185 heap = NvRmHeap_GART;
3186 else if (h->heap_pgalloc)
3187 heap = NvRmHeap_External;
3188 else if ((h->carveout.base & 0xf0000000ul) == 0x40000000ul)
3189 heap = NvRmHeap_IRam;
3190 else
3191 heap = NvRmHeap_ExternalCarveOut;
3192
3193 if (h->heap_pgalloc && h->pgalloc.contig)
3194 *BaseAddr = (NvU32)page_to_phys(h->pgalloc.pages[0]);
3195 else if (h->heap_pgalloc && atomic_add_return(0, &h->pin))
3196 *BaseAddr = h->pgalloc.area->iovm_start;
3197 else if (h->heap_pgalloc)
3198 *BaseAddr = ~0ul;
3199 else
3200 *BaseAddr = (NvU32)h->carveout.base;
3201
3202 return heap;
3203 }
3204
3205 void NvRmMemCacheMaint(NvRmMemHandle hMem, void *pMapping,
3206 NvU32 Size, NvBool Writeback, NvBool Inv)
3207 {
3208 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
3209 unsigned long start;
3210 unsigned int op;
3211
3212 if (!h->kern_map || h->flags==NVMEM_HANDLE_UNCACHEABLE ||
3213 h->flags==NVMEM_HANDLE_WRITE_COMBINE) return;
3214
3215 if (!Writeback && !Inv) return;
3216
3217 if (Writeback && Inv) op = NVMEM_CACHE_OP_WB_INV;
3218 else if (Writeback) op = NVMEM_CACHE_OP_WB;
3219 else op = NVMEM_CACHE_OP_INV;
3220
3221 start = (unsigned long)pMapping - (unsigned long)h->kern_map;
3222
3223 _nvmap_do_cache_maint(h, start, start+Size, op, true);
3224 return;
3225 }
3226
3227 NvU32 NvRmMemGetAlignment(NvRmMemHandle hMem)
3228 {
3229 struct nvmap_handle *h = (struct nvmap_handle *)hMem;
3230 return _nvmap_do_get_param(h, NVMEM_HANDLE_PARAM_ALIGNMENT);
3231 }
3232
3233 NvError NvRmMemGetStat(NvRmMemStat Stat, NvS32 *Result)
3234 {
3235 unsigned long total_co = 0;
3236 unsigned long free_co = 0;
3237 unsigned long max_free = 0;
3238 struct nvmap_carveout_node *n;
3239
3240 down_read(&nvmap_context.list_sem);
3241 list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
3242
3243 if (!(n->heap_bit & NVMEM_HEAP_CARVEOUT_GENERIC)) continue;
3244 total_co += _nvmap_carveout_blockstat(&n->carveout,
3245 CARVEOUT_STAT_TOTAL_SIZE);
3246 free_co += _nvmap_carveout_blockstat(&n->carveout,
3247 CARVEOUT_STAT_FREE_SIZE);
3248 max_free = max(max_free,
3249 _nvmap_carveout_blockstat(&n->carveout,
3250 CARVEOUT_STAT_LARGEST_FREE));
3251 }
3252 up_read(&nvmap_context.list_sem);
3253
3254 if (Stat==NvRmMemStat_TotalCarveout) {
3255 *Result = (NvU32)total_co;
3256 return NvSuccess;
3257 } else if (Stat==NvRmMemStat_UsedCarveout) {
3258 *Result = (NvU32)total_co - (NvU32)free_co;
3259 return NvSuccess;
3260 } else if (Stat==NvRmMemStat_LargestFreeCarveoutBlock) {
3261 *Result = (NvU32)max_free;
3262 return NvSuccess;
3263 }
3264
3265 return NvError_BadParameter;
3266 }
3267
3268 NvU8 NvRmMemRd08(NvRmMemHandle hMem, NvU32 Offset)
3269 {
3270 NvU8 val;
3271 NvRmMemRead(hMem, Offset, &val, sizeof(val));
3272 return val;
3273 }
3274
3275 NvU16 NvRmMemRd16(NvRmMemHandle hMem, NvU32 Offset)
3276 {
3277 NvU16 val;
3278 NvRmMemRead(hMem, Offset, &val, sizeof(val));
3279 return val;
3280 }
3281
3282 NvU32 NvRmMemRd32(NvRmMemHandle hMem, NvU32 Offset)
3283 {
3284 NvU32 val;
3285 NvRmMemRead(hMem, Offset, &val, sizeof(val));
3286 return val;
3287 }
3288
3289 void NvRmMemWr08(NvRmMemHandle hMem, NvU32 Offset, NvU8 Data)
3290 {
3291 NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
3292 }
3293
3294 void NvRmMemWr16(NvRmMemHandle hMem, NvU32 Offset, NvU16 Data)
3295 {
3296 NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
3297 }
3298
3299 void NvRmMemWr32(NvRmMemHandle hMem, NvU32 Offset, NvU32 Data)
3300 {
3301 NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
3302 }
3303
3304 void NvRmMemRead(NvRmMemHandle hMem, NvU32 Offset, void *pDst, NvU32 Size)
3305 {
3306 NvRmMemReadStrided(hMem, Offset, Size, pDst, Size, Size, 1);
3307 }
3308
3309 void NvRmMemWrite(NvRmMemHandle hMem, NvU32 Offset,
3310 const void *pSrc, NvU32 Size)
3311 {
3312 NvRmMemWriteStrided(hMem, Offset, Size, pSrc, Size, Size, 1);
3313 }
3314
3315 void NvRmMemMove(NvRmMemHandle dstHMem, NvU32 dstOffset,
3316 NvRmMemHandle srcHMem, NvU32 srcOffset, NvU32 Size)
3317 {
3318 while (Size--) {
3319 NvU8 tmp = NvRmMemRd08(srcHMem, srcOffset);
3320 NvRmMemWr08(dstHMem, dstOffset, tmp);
3321 dstOffset++;
3322 srcOffset++;
3323 }
3324 }
3325
3326 NvU32 NvRmMemGetCacheLineSize(void)
3327 {
3328 return 32;
3329 }
3330
3331 void *NvRmHostAlloc(size_t size)
3332 {
3333 return NvOsAlloc(size);
3334 }
3335
3336 void NvRmHostFree(void *ptr)
3337 {
3338 NvOsFree(ptr);
3339 }
3340
3341 NvError NvRmMemMapIntoCallerPtr(NvRmMemHandle hMem, void *pCallerPtr,
3342 NvU32 Offset, NvU32 Size)
3343 {
3344 return NvError_NotSupported;
3345 }
3346
3347 NvError NvRmMemHandlePreserveHandle(NvRmMemHandle hMem, NvU32 *pKey)
3348 {
3349 return NvError_NotSupported;
3350 }
OLDNEW
« no previous file with comments | « arch/arm/mach-tegra/nv/include/rm_spi_slink.h ('k') | arch/arm/mach-tegra/nv/nvos/Makefile » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698