Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(77)

Side by Side Diff: arch/arm/mach-tegra/nv/nvos/nvos.c

Issue 3256004: [ARM] tegra: add nvos/nvrm/nvmap drivers (Closed) Base URL: ssh://git@gitrw.chromium.org/kernel.git
Patch Set: remove ap15 headers Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « arch/arm/mach-tegra/nv/nvos/Makefile ('k') | arch/arm/mach-tegra/nv/nvos/nvos_exports.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2008-2009 NVIDIA Corporation.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * Neither the name of the NVIDIA Corporation nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 #include "nvos.h"
34 #include "nvos_trace.h"
35 #include "nvutil.h"
36 #include "nverror.h"
37 #include "nvassert.h"
38 #include "nvbootargs.h"
39 #include "nvio.h"
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/irq.h>
43 #include <linux/jiffies.h>
44 #include <linux/time.h>
45 #include <linux/kernel.h>
46 #include <linux/kthread.h>
47 #include <linux/mm.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
50 #include <linux/semaphore.h>
51 #include <linux/spinlock.h>
52 #include <linux/string.h>
53 #include <linux/uaccess.h>
54 #include <linux/vmalloc.h>
55 #include <linux/pagemap.h>
56 #include <linux/dma-mapping.h>
57 #include <asm/atomic.h>
58 #include <asm/io.h>
59 #include <asm/page.h>
60 #include <asm/div64.h>
61 #include <asm/setup.h>
62 #include <asm/cacheflush.h>
63 #include <mach/irqs.h>
64 #include <linux/freezer.h>
65 #include <linux/slab.h>
66
67 #if NVOS_TRACE || NV_DEBUG
68 #undef NvOsAlloc
69 #undef NvOsFree
70 #undef NvOsRealloc
71 #undef NvOsSharedMemAlloc
72 #undef NvOsSharedMemMap
73 #undef NvOsSharedMemUnmap
74 #undef NvOsSharedMemFree
75 #undef NvOsMutexCreate
76 #undef NvOsExecAlloc
77 #undef NvOsExecFree
78 #undef NvOsPageAlloc
79 #undef NvOsPageLock
80 #undef NvOsPageFree
81 #undef NvOsPageMap
82 #undef NvOsPageMapIntoPtr
83 #undef NvOsPageUnmap
84 #undef NvOsPageAddress
85 #undef NvOsIntrMutexCreate
86 #undef NvOsIntrMutexLock
87 #undef NvOsIntrMutexUnlock
88 #undef NvOsIntrMutexDestroy
89 #undef NvOsInterruptRegister
90 #undef NvOsInterruptUnregister
91 #undef NvOsInterruptEnable
92 #undef NvOsInterruptDone
93 #undef NvOsPhysicalMemMapIntoCaller
94 #undef NvOsMutexLock
95 #undef NvOsMutexUnlock
96 #undef NvOsMutexDestroy
97 #undef NvOsPhysicalMemMap
98 #undef NvOsPhysicalMemUnmap
99 #undef NvOsSemaphoreCreate
100 #undef NvOsSemaphoreWait
101 #undef NvOsSemaphoreWaitTimeout
102 #undef NvOsSemaphoreSignal
103 #undef NvOsSemaphoreDestroy
104 #undef NvOsSemaphoreClone
105 #undef NvOsSemaphoreUnmarshal
106 #undef NvOsThreadCreate
107 #undef NvOsInterruptPriorityThreadCreate
108 #undef NvOsThreadJoin
109 #undef NvOsThreadYield
110 #endif
111
112 #define KTHREAD_IRQ_PRIO (MAX_RT_PRIO>>1)
113
114 #define NVOS_MAX_SYSTEM_IRQS NR_IRQS
115
116 #define NVOS_IRQ_IS_ENABLED 0x1
117
118 /* NVOS_IRQ_IS_ flags are mutually exclusive.
119 * IS_TASKLET executes the handler in a tasklet (used for kernel drivers)
120 * IS_KERNEL_THREAD executes in a kernel thread (used for kernel GPIOs)
121 * IS_USER simply signals an NvOs semaphore (used for user-mode interrupts)
122 *
123 * Currently the choice is based on the IRQ number and if the requester is
124 * an IOCTL. Later this can be modified to be exposed in the public APIs.
125 *
126 * If no flag is set, the IRQ is handled in the interrupt handler itself.
127 */
128
129 #define NVOS_IRQ_TYPE_SHIFT 1
130 #define NVOS_IRQ_TYPE_MASK (0x3 << NVOS_IRQ_TYPE_SHIFT)
131
132 #define NVOS_IRQ_IS_IRQ (0)
133 #define NVOS_IRQ_IS_TASKLET (0x1 << NVOS_IRQ_TYPE_SHIFT)
134 #define NVOS_IRQ_IS_KERNEL_THREAD (0x2 << NVOS_IRQ_TYPE_SHIFT)
135 #define NVOS_IRQ_IS_USER (0x3 << NVOS_IRQ_TYPE_SHIFT)
136
137 static DEFINE_SPINLOCK(gs_NvOsSpinLock);
138
139 typedef struct NvOsIrqHandlerRec
140 {
141 union
142 {
143 NvOsInterruptHandler pHandler;
144 NvOsSemaphoreHandle pSem;
145 };
146 NvU32 Irq;
147 char IrqName[16];
148 struct semaphore sem;
149 struct task_struct *task;
150 struct tasklet_struct Tasklet;
151 } NvOsIrqHandler;
152
153 typedef struct NvOsInterruptBlockRec
154 {
155 void *pArg;
156 NvU32 Flags;
157 NvU32 NumIrqs;
158 NvU32 Shutdown;
159 NvOsIrqHandler IrqList[1];
160 } NvOsInterruptBlock;
161
162 #define INTBLOCK_SIZE(NUMIRQS) \
163 (sizeof(NvOsInterruptBlock) + ((NUMIRQS)-1)*sizeof(NvOsIrqHandler))
164
165 static NvOsInterruptBlock *s_pIrqList[NVOS_MAX_SYSTEM_IRQS] = { NULL };
166
167 static NvBootArgs s_BootArgs = { {0}, {0}, {0}, {0}, {0}, {0}, {{0}} };
168
169 /* The tasklet "data" parameter is a munging of the s_pIrqList index
170 * (just the IRQ number), and the InterruptBlock's IrqList index, to
171 * make interrupt handler lookups O(n)
172 */
173 static void NvOsTaskletWrapper(
174 unsigned long data)
175 {
176 NvOsInterruptBlock *pBlock = s_pIrqList[(data&0xffff)];
177 if (pBlock)
178 (*pBlock->IrqList[data>>16].pHandler)(pBlock->pArg);
179 }
180
181 /* The thread "pdata" parameter is a munging of the s_pIrqList index
182 * (just the IRQ number), and the InterruptBlock's IrqList index, to
183 * make interrupt handler lookups O(n)
184 */
185 static int NvOsInterruptThreadWrapper(
186 void *pdata)
187 {
188 unsigned long data = (unsigned long)pdata;
189 NvOsInterruptBlock *pBlock = s_pIrqList[(data&0xffff)];
190
191 if (!pBlock)
192 {
193 return 0;
194 }
195 while (!pBlock->Shutdown)
196 {
197 int t;
198
199 /* Is the timeout large enough? */
200 t = down_interruptible(&pBlock->IrqList[data>>16].sem);
201
202 if (pBlock->Shutdown)
203 break;
204
205 if (t)
206 continue;
207
208 (*pBlock->IrqList[data>>16].pHandler)(pBlock->pArg);
209 }
210
211 return 0;
212 }
213
214 static irqreturn_t NvOsIrqWrapper(
215 int irq,
216 void *dev_id)
217 {
218 unsigned long data = (unsigned long)dev_id;
219 NvOsInterruptBlock *pBlock = s_pIrqList[irq];
220
221 disable_irq_nosync(irq);
222 switch (pBlock->Flags & NVOS_IRQ_TYPE_MASK)
223 {
224 case NVOS_IRQ_IS_TASKLET:
225 tasklet_schedule(&pBlock->IrqList[data].Tasklet);
226 break;
227 case NVOS_IRQ_IS_KERNEL_THREAD:
228 up(&(pBlock->IrqList[data].sem));
229 break;
230 case NVOS_IRQ_IS_USER:
231 NvOsSemaphoreSignal(pBlock->IrqList[data].pSem);
232 break;
233 case NVOS_IRQ_IS_IRQ:
234 (*pBlock->IrqList[data].pHandler)(pBlock->pArg);
235 break;
236 }
237
238 return IRQ_HANDLED;
239 }
240
241 NvError NvOsFprintf(NvOsFileHandle stream, const char *format, ...)
242 {
243 return NvError_NotImplemented;
244 }
245
246 NvS32 NvOsSnprintf(char *str, size_t size, const char *format, ...)
247 {
248 va_list ap;
249 va_start( ap, format );
250 return vsnprintf( str, size, format, ap );
251 va_end( ap );
252 }
253
254 NvError NvOsVfprintf(NvOsFileHandle stream, const char *format, va_list ap)
255 {
256 return NvError_NotImplemented;
257 }
258
259 NvS32 NvOsVsnprintf(char *str, size_t size, const char *format, va_list ap)
260 {
261 return vsnprintf( str, size, format, ap );
262 }
263
264 void NvOsDebugPrintf(const char *format, ...)
265 {
266 va_list ap;
267 va_start( ap, format );
268 vprintk( format, ap );
269 va_end( ap );
270 }
271
272 void
273 NvOsDebugVprintf( const char *format, va_list ap )
274 {
275 vprintk( format, ap );
276 }
277
278 NvS32 NvOsDebugNprintf(const char *format, ...)
279 {
280 NvS32 r;
281 va_list ap;
282 va_start( ap, format );
283 r = vprintk( format, ap );
284 va_end( ap );
285 return r;
286 }
287
288
289 NvError NvOsGetOsInformation(NvOsOsInfo *pOsInfo)
290 {
291 if (pOsInfo)
292 {
293 NvOsMemset(pOsInfo, 0, sizeof(NvOsOsInfo));
294 pOsInfo->OsType = NvOsOs_Linux;
295 }
296 else
297 {
298 return NvError_BadParameter;
299 }
300 return NvError_Success;
301 }
302
303 void NvOsStrncpy(char *dest, const char *src, size_t size)
304 {
305 strncpy( dest, src, size );
306 }
307
308 NvOsCodePage NvOsStrGetSystemCodePage(void)
309 {
310 return (NvOsCodePage)0;
311 }
312
313 size_t NvOsStrlen(const char *s)
314 {
315 return strlen(s);
316 }
317
318 int NvOsStrcmp(const char *s1, const char *s2)
319 {
320 return strcmp(s1, s2);
321 }
322
323 int NvOsStrncmp(const char *s1, const char *s2, size_t size)
324 {
325 return strncmp(s1, s2, size);
326 }
327
328 void NvOsMemcpy(void *dest, const void *src, size_t size)
329 {
330 memcpy(dest, src, size);
331 }
332
333 int NvOsMemcmp(const void *s1, const void *s2, size_t size)
334 {
335 return memcmp(s1, s2, size);
336 }
337
338 void NvOsMemset(void *s, NvU8 c, size_t size)
339 {
340 memset(s, c, size);
341 }
342
343 void NvOsMemmove(void *dest, const void *src, size_t size)
344 {
345 memmove(dest, src, size);
346 }
347
348 NvError NvOsCopyIn(void *pDst, const void *pSrc, size_t Bytes)
349 {
350 if (!Bytes)
351 return NvSuccess;
352
353 if( access_ok( VERIFY_READ, pSrc, Bytes ) )
354 {
355 __copy_from_user(pDst, pSrc, Bytes);
356 return NvSuccess;
357 }
358
359 return NvError_InvalidAddress;
360 }
361
362 NvError NvOsCopyOut(void *pDst, const void *pSrc, size_t Bytes)
363 {
364 if (!Bytes)
365 return NvSuccess;
366
367 if( access_ok( VERIFY_WRITE, pDst, Bytes ) )
368 {
369 __copy_to_user(pDst, pSrc, Bytes);
370 return NvSuccess;
371 }
372
373 return NvError_InvalidAddress;
374 }
375
376 NvError NvOsFopen(const char *path, NvU32 flags, NvOsFileHandle *file)
377 {
378 return NvError_NotImplemented;
379 }
380
381 void NvOsFclose(NvOsFileHandle stream)
382 {
383 }
384
385 NvError NvOsFwrite(NvOsFileHandle stream, const void *ptr, size_t size)
386 {
387 return NvError_NotImplemented;
388 }
389
390 NvError NvOsFread(
391 NvOsFileHandle stream,
392 void *ptr,
393 size_t size,
394 size_t *bytes)
395 {
396 return NvError_NotImplemented;
397 }
398
399 NvError NvOsFreadTimeout(
400 NvOsFileHandle stream,
401 void *ptr,
402 size_t size,
403 size_t *bytes,
404 NvU32 timeout_msec)
405 {
406 return NvError_NotImplemented;
407 }
408
409 NvError NvOsFgetc(NvOsFileHandle stream, NvU8 *c)
410 {
411 return NvError_NotImplemented;
412 }
413
414 NvError NvOsFseek(NvOsFileHandle file, NvS64 offset, NvOsSeekEnum whence)
415 {
416 return NvError_NotImplemented;
417 }
418
419 NvError NvOsFtell(NvOsFileHandle file, NvU64 *position)
420 {
421 return NvError_NotImplemented;
422 }
423
424 NvError NvOsStat(const char *filename, NvOsStatType *stat)
425 {
426 return NvError_NotImplemented;
427 }
428
429 NvError NvOsFstat(NvOsFileHandle file, NvOsStatType *stat)
430 {
431 return NvError_NotImplemented;
432 }
433
434 NvError NvOsFflush(NvOsFileHandle stream)
435 {
436 return NvError_NotImplemented;
437 }
438
439 NvError NvOsFsync(NvOsFileHandle stream)
440 {
441 return NvError_NotImplemented;
442 }
443
444 NvError NvOsIoctl(
445 NvOsFileHandle hFile,
446 NvU32 IoctlCode,
447 void *pBuffer,
448 NvU32 InBufferSize,
449 NvU32 InOutBufferSize,
450 NvU32 OutBufferSize)
451 {
452 return NvError_NotImplemented;
453 }
454
455 NvError NvOsOpendir(const char *path, NvOsDirHandle *dir)
456 {
457 return NvError_NotImplemented;
458 }
459
460 NvError NvOsReaddir(NvOsDirHandle dir, char *name, size_t size)
461 {
462 return NvError_NotImplemented;
463 }
464
465 void NvOsClosedir(NvOsDirHandle dir)
466 {
467 }
468
469 const NvOsFileHooks *NvOsSetFileHooks(NvOsFileHooks *newHooks)
470 {
471 return 0;
472 }
473
474 NvError NvOsGetConfigU32(const char *name, NvU32 *value)
475 {
476 return NvError_NotImplemented;
477 }
478
479 NvError NvOsGetConfigString(const char *name, char *value, NvU32 size)
480 {
481 return NvError_NotImplemented;
482 }
483
484 void *NvOsAlloc(size_t size)
485 {
486 size_t AllocSize = size + sizeof(size_t);
487 size_t* ptr = NULL;
488 ptr = vmalloc(AllocSize);
489 if (!ptr)
490 return ptr;
491 *ptr = size;
492 ptr++;
493 return (ptr);
494 }
495
496 void *NvOsRealloc(void *ptr, size_t size)
497 {
498 size_t* NewPtr = NULL;
499 size_t OldSize = 0;
500 size_t SmallerSize = 0;
501
502 if( !ptr )
503 {
504 return NvOsAlloc(size);
505 }
506 if (!size)
507 {
508 NvOsFree(ptr);
509 return NULL;
510 }
511
512 // Get the size of the memory allocated for ptr.
513 NewPtr = (size_t*)ptr;
514 NewPtr--;
515 OldSize = *NewPtr;
516 if (size == OldSize)
517 return ptr;
518 SmallerSize = (OldSize > size) ? size : OldSize;
519
520 NewPtr = NvOsAlloc(size);
521 if(!NewPtr)
522 return NULL;
523 NvOsMemcpy(NewPtr, ptr, SmallerSize);
524 NvOsFree(ptr);
525 return NewPtr;
526 }
527
528 void NvOsFree(void *ptr)
529 {
530 size_t* AllocPtr = NULL;
531 if (ptr)
532 {
533 AllocPtr = (size_t*)ptr;
534 AllocPtr--;
535 }
536 else
537 return;
538 vfree(AllocPtr);
539 }
540
541 void *NvOsExecAlloc(size_t size)
542 {
543 return vmalloc_exec( size );
544 }
545
546 NvError NvOsSharedMemAlloc(
547 const char *key,
548 size_t size,
549 NvOsSharedMemHandle *descriptor)
550 {
551 return NvError_NotImplemented;
552 }
553
554 NvError NvOsSharedMemMap(
555 NvOsSharedMemHandle descriptor,
556 size_t offset,
557 size_t size,
558 void **ptr)
559 {
560 return NvError_NotImplemented;
561 }
562
563
564 void NvOsSharedMemUnmap(void *ptr, size_t size)
565 {
566 }
567
568 void NvOsSharedMemFree(NvOsSharedMemHandle descriptor)
569 {
570 }
571
572 NvError NvOsPhysicalMemMap(
573 NvOsPhysAddr phys,
574 size_t size,
575 NvOsMemAttribute attrib,
576 NvU32 flags,
577 void **ptr)
578 {
579 /* For apertures in the static kernel mapping, just return the
580 * static VA rather than creating a new mapping
581 * FIXME: Eventually, the static phyiscal apertures should be
582 * registered with NvOs when mapped, since they could be
583 * chip-dependent
584 */
585 #define aperture_comp_map(_name, _pa, _len) \
586 if ((phys >= (NvOsPhysAddr)(_pa)) && \
587 ((NvOsPhysAddr)(phys+size)<=(NvOsPhysAddr)((_pa)+(_len)))) { \
588 *ptr = (void *)tegra_munge_pa(phys); \
589 return NvSuccess; \
590 }
591
592 tegra_apertures(aperture_comp_map);
593
594 if (attrib == NvOsMemAttribute_WriteCombined)
595 {
596 *ptr = ioremap_wc(phys, size);
597 }
598 else if (attrib == NvOsMemAttribute_WriteBack)
599 {
600 *ptr = ioremap_cached(phys, size);
601 }
602 else
603 {
604 *ptr = ioremap_nocache(phys, size);
605 }
606
607 if (*ptr == 0)
608 return NvError_InsufficientMemory;
609
610 return NvSuccess;
611 }
612
613 NvError NvOsPhysicalMemMapIntoCaller(
614 void *pCallerPtr,
615 NvOsPhysAddr phys,
616 size_t size,
617 NvOsMemAttribute attrib,
618 NvU32 flags)
619 {
620 return NvError_NotImplemented;
621 }
622
623 void NvOsPhysicalMemUnmap(void *ptr, size_t size)
624 {
625 NvUPtr va = (NvUPtr)ptr;
626
627 /* No unmapping required for statically mapped I/O space */
628 #define aperture_comp_unmap(_name, _pa, _len) \
629 if ((tegra_munge_pa((_pa)) <= va) && \
630 (tegra_munge_pa((_pa))+(_len) >= (va+size))) \
631 return;
632
633
634 tegra_apertures(aperture_comp_unmap);
635 iounmap(ptr);
636 }
637
638 NvError NvOsLibraryLoad(const char *name, NvOsLibraryHandle *library)
639 {
640 return NvError_NotImplemented;
641 }
642
643 void* NvOsLibraryGetSymbol(NvOsLibraryHandle library, const char *symbol)
644 {
645 return 0;
646 }
647
648 void NvOsLibraryUnload(NvOsLibraryHandle library)
649 {
650 }
651
652 void NvOsSleepMS(NvU32 msec)
653 {
654 msleep( msec );
655 }
656
657 void NvOsWaitUS(NvU32 usec)
658 {
659 udelay( usec );
660 }
661
662 typedef struct NvOsMutexRec
663 {
664 struct mutex mutex;
665 volatile NvU32 count;
666 volatile struct thread_info *owner;
667 } NvOsMutex;
668
669 /**
670 * nvos mutexes are recursive.
671 */
672 NvError NvOsMutexCreate(NvOsMutexHandle *mutex)
673 {
674 NvOsMutex *m;
675
676 m = kzalloc( sizeof(NvOsMutex), GFP_KERNEL );
677 if( !m )
678 return NvError_InsufficientMemory;
679
680 mutex_init( &m->mutex );
681 m->count = 0;
682 m->owner = 0;
683
684 *mutex = m;
685 return NvSuccess;
686 }
687
688 void NvOsMutexLock(NvOsMutexHandle mutex)
689 {
690 struct task_struct *task = current;
691 struct thread_info *info = task_thread_info(task);
692 int ret;
693
694 NV_ASSERT( mutex );
695
696 /* if we own the lock, increment the count and bail out */
697 if( mutex->owner == info )
698 {
699 mutex->count++;
700 return;
701 }
702
703 /* lock as normal, then setup the recursive stuff */
704 do
705 {
706 /* FIXME: interruptible mutexes may not be necessary, since this
707 * implementation is only used by the kernel tasks. */
708 ret = mutex_lock_interruptible( &mutex->mutex );
709 // If a signal arrives while the task is sleeping,
710 // re-schedule it and attempt to reacquire the mutex
711 if (ret && !try_to_freeze())
712 schedule();
713 } while (ret);
714 mutex->owner = info;
715 mutex->count = 1;
716 }
717
718 void NvOsMutexUnlock(NvOsMutexHandle mutex)
719 {
720 NV_ASSERT( mutex );
721
722 mutex->count--;
723 if( mutex->count == 0 )
724 {
725 /* prevent the same thread from unlocking, then doing a recursive
726 * lock (skip mutex_lock).
727 */
728 mutex->owner = 0;
729
730 mutex_unlock( &mutex->mutex );
731 }
732 }
733
734 void NvOsMutexDestroy(NvOsMutexHandle mutex)
735 {
736
737 if( !mutex )
738 return;
739 kfree( mutex );
740 }
741
742 typedef struct NvOsIntrMutexRec
743 {
744 spinlock_t lock;
745 unsigned long flags;
746 } NvOsIntrMutex;
747
748 NvError NvOsIntrMutexCreate(NvOsIntrMutexHandle *mutex)
749 {
750 NvOsIntrMutex *m;
751
752 m = kzalloc( sizeof(NvOsIntrMutex), GFP_KERNEL );
753 if( !m )
754 return NvError_InsufficientMemory;
755
756 spin_lock_init( &m->lock );
757 *mutex = m;
758 return NvSuccess;
759 }
760
761 void NvOsIntrMutexLock(NvOsIntrMutexHandle mutex)
762 {
763 NV_ASSERT( mutex );
764 spin_lock_irqsave( &mutex->lock, mutex->flags );
765 }
766
767 void NvOsIntrMutexUnlock(NvOsIntrMutexHandle mutex)
768 {
769 NV_ASSERT( mutex );
770 spin_unlock_irqrestore( &mutex->lock, mutex->flags );
771 }
772
773 void NvOsIntrMutexDestroy(NvOsIntrMutexHandle mutex)
774 {
775 if (mutex)
776 kfree(mutex);
777 }
778
779 typedef struct NvOsSpinMutexRec
780 {
781 spinlock_t lock;
782 } NvOsSpinMutex;
783
784 NvError NvOsSpinMutexCreate(NvOsSpinMutexHandle *mutex)
785 {
786 NvOsSpinMutex *m;
787
788 m = kzalloc( sizeof(NvOsSpinMutex), GFP_KERNEL );
789 if( !m )
790 return NvError_InsufficientMemory;
791
792 spin_lock_init( &m->lock );
793 *mutex = m;
794 return NvSuccess;
795 }
796
797 void NvOsSpinMutexLock(NvOsSpinMutexHandle mutex)
798 {
799 NV_ASSERT( mutex );
800 spin_lock( &mutex->lock );
801 }
802
803 void NvOsSpinMutexUnlock(NvOsSpinMutexHandle mutex)
804 {
805 NV_ASSERT( mutex );
806 spin_unlock( &mutex->lock );
807 }
808
809 void NvOsSpinMutexDestroy(NvOsSpinMutexHandle mutex)
810 {
811 if (mutex)
812 kfree(mutex);
813 }
814
815 typedef struct NvOsSemaphoreRec
816 {
817 struct semaphore sem;
818 atomic_t refcount;
819 } NvOsSemaphore;
820
821 NvError NvOsSemaphoreCreate(
822 NvOsSemaphoreHandle *semaphore,
823 NvU32 value)
824 {
825 NvOsSemaphore *s;
826
827 s = kzalloc( sizeof(NvOsSemaphore), GFP_KERNEL );
828 if( !s )
829 return NvError_InsufficientMemory;
830
831 sema_init( &s->sem, value );
832 atomic_set( &s->refcount, 1 );
833
834 *semaphore = s;
835
836 return NvSuccess;
837 }
838
839 NvError NvOsSemaphoreClone(
840 NvOsSemaphoreHandle orig,
841 NvOsSemaphoreHandle *semaphore)
842 {
843 NV_ASSERT( orig );
844 NV_ASSERT( semaphore );
845
846 atomic_inc( &orig->refcount );
847 *semaphore = orig;
848
849 return NvSuccess;
850 }
851
852 NvError NvOsSemaphoreUnmarshal(
853 NvOsSemaphoreHandle hClientSema,
854 NvOsSemaphoreHandle *phDriverSema)
855 {
856 NV_ASSERT( hClientSema );
857 NV_ASSERT( phDriverSema );
858
859 atomic_inc( &hClientSema->refcount );
860 *phDriverSema = hClientSema;
861
862 return NvSuccess;
863 }
864
865 int NvOsSemaphoreWaitInterruptible(NvOsSemaphoreHandle semaphore);
866 int NvOsSemaphoreWaitInterruptible(NvOsSemaphoreHandle semaphore)
867 {
868 NV_ASSERT(semaphore);
869
870 return down_interruptible(&semaphore->sem);
871 }
872
873 void NvOsSemaphoreWait(NvOsSemaphoreHandle semaphore)
874 {
875 int ret;
876
877 NV_ASSERT(semaphore);
878
879 do
880 {
881 /* FIXME: We should split the implementation into two parts -
882 * one for semaphore that were created by users ioctl'ing into
883 * the nvos device (which need down_interruptible), and others that
884 * are created and used by the kernel drivers, which do not */
885 ret = down_interruptible(&semaphore->sem);
886 /* The kernel doesn't reschedule tasks
887 * that have pending signals. If a signal
888 * is pending, forcibly reschedule the task.
889 */
890 if (ret && !try_to_freeze())
891 schedule();
892 } while (ret);
893 }
894
895 NvError NvOsSemaphoreWaitTimeout(
896 NvOsSemaphoreHandle semaphore,
897 NvU32 msec)
898 {
899 int t;
900
901 NV_ASSERT( semaphore );
902
903 if (!semaphore)
904 return NvError_Timeout;
905
906 if (msec==NV_WAIT_INFINITE)
907 {
908 NvOsSemaphoreWait(semaphore);
909 return NvSuccess;
910 }
911 else if (msec==0)
912 {
913 t = down_trylock(&semaphore->sem);
914 if (!t)
915 return NvSuccess;
916 else
917 return NvError_Timeout;
918 }
919
920 /* FIXME: The kernel doesn't provide an interruptible timed
921 * semaphore wait, which would be preferable for our the ioctl'd
922 * NvOs sempahores. */
923 t = down_timeout(&semaphore->sem, (long)msecs_to_jiffies( msec ));
924
925 if (t == -ETIME)
926 return NvError_Timeout;
927 else if (!t)
928 return NvSuccess;
929
930 return NvError_AccessDenied;
931 }
932
933 void NvOsSemaphoreSignal(NvOsSemaphoreHandle semaphore)
934 {
935 NV_ASSERT( semaphore );
936
937 up( &semaphore->sem );
938 }
939
940 void NvOsSemaphoreDestroy(NvOsSemaphoreHandle semaphore)
941 {
942 if (!semaphore)
943 return;
944
945 if( atomic_dec_return( &semaphore->refcount ) == 0 )
946 kfree( semaphore );
947 }
948
949 NvError NvOsThreadMode(int coop)
950 {
951 return NvError_NotImplemented;
952 }
953
954 typedef struct NvOsThreadRec
955 {
956 struct task_struct *task;
957 NvOsThreadFunction func;
958 void *arg;
959 } NvOsThread;
960
961 static int thread_wrapper( void *arg )
962 {
963 NvOsThread *t = (NvOsThread *)arg;
964 t->func(t->arg);
965 return 0;
966 }
967
968 static NvError NvOsThreadCreateInternal(
969 NvOsThreadFunction function,
970 void *args,
971 NvOsThreadHandle *thread,
972 NvBool elevatedPriority)
973 {
974 NvError e;
975 NvOsThread *t = 0;
976 static NvU32 NvOsKernelThreadIndex = 0;
977 struct sched_param sched;
978 int scheduler;
979 NvU32 ThreadId;
980
981 t = kzalloc( sizeof(NvOsThread), GFP_KERNEL );
982 if( !t )
983 {
984 return NvError_InsufficientMemory;
985 }
986
987 t->func = function;
988 t->arg = args;
989
990 ThreadId = (NvU32)NvOsAtomicExchangeAdd32((NvS32*)&NvOsKernelThreadIndex,1);
991 t->task =
992 kthread_create(thread_wrapper, t, "NvOsKernelThread/%d", ThreadId);
993
994 if(IS_ERR(t->task))
995 {
996 e = NvError_InsufficientMemory;
997 goto fail;
998 }
999
1000 if (elevatedPriority)
1001 {
1002 scheduler = SCHED_FIFO;
1003 sched.sched_priority = KTHREAD_IRQ_PRIO+1;
1004 }
1005 else
1006 {
1007 scheduler = SCHED_NORMAL;
1008 sched.sched_priority = 0;
1009 }
1010
1011 if (sched_setscheduler_nocheck( t->task, scheduler, &sched ) < 0)
1012 NvOsDebugPrintf("Failed to set task priority to %d\n",
1013 sched.sched_priority);
1014
1015 *thread = t;
1016 wake_up_process( t->task );
1017 e = NvSuccess;
1018 goto clean;
1019
1020 fail:
1021 kfree( t );
1022
1023 clean:
1024 return e;
1025 }
1026
1027
1028 NvError NvOsInterruptPriorityThreadCreate(
1029 NvOsThreadFunction function,
1030 void *args,
1031 NvOsThreadHandle *thread)
1032 {
1033 return NvOsThreadCreateInternal(function, args, thread, NV_TRUE);
1034 }
1035
1036 NvError NvOsThreadCreate(
1037 NvOsThreadFunction function,
1038 void *args,
1039 NvOsThreadHandle *thread)
1040 {
1041 return NvOsThreadCreateInternal(function, args, thread, NV_FALSE);
1042 }
1043
1044 NvError NvOsThreadSetLowPriority(void)
1045 {
1046 struct sched_param sched;
1047 struct task_struct *curr;
1048
1049 curr = get_current();
1050 sched.sched_priority = 0;
1051
1052 if (unlikely(!curr))
1053 return NvError_NotInitialized;
1054
1055 if (sched_setscheduler_nocheck( curr, SCHED_IDLE, &sched )<0)
1056 {
1057 NvOsDebugPrintf("Failed to set low priority for thread %p\n", curr);
1058 return NvError_NotSupported;
1059 }
1060
1061 return NvSuccess;
1062 }
1063
1064 void NvOsThreadJoin(NvOsThreadHandle thread)
1065 {
1066 if (!thread)
1067 return;
1068
1069 (void)kthread_stop(thread->task);
1070 kfree(thread);
1071 }
1072
1073 void NvOsThreadYield(void)
1074 {
1075 schedule();
1076 }
1077
1078 NvS32 NvOsAtomicCompareExchange32(
1079 NvS32 *pTarget,
1080 NvS32 OldValue,
1081 NvS32 NewValue)
1082 {
1083 return atomic_cmpxchg( (atomic_t *)pTarget, OldValue, NewValue );
1084 }
1085
1086 NvS32 NvOsAtomicExchange32(NvS32 *pTarget, NvS32 Value)
1087 {
1088 return atomic_xchg( (atomic_t *)pTarget, Value );
1089 }
1090
1091 NvS32 NvOsAtomicExchangeAdd32(NvS32 *pTarget, NvS32 Value)
1092 {
1093 NvS32 new;
1094 new = atomic_add_return( Value, (atomic_t *)pTarget );
1095 return new + (-Value);
1096 }
1097
1098 NvU32 NvOsTlsAlloc(void)
1099 {
1100 return 0;
1101 }
1102
1103 void NvOsTlsFree(NvU32 TlsIndex)
1104 {
1105 }
1106
1107 void *NvOsTlsGet(NvU32 TlsIndex)
1108 {
1109 return 0;
1110 }
1111
1112 void NvOsTlsSet(NvU32 TlsIndex, void *Value)
1113 {
1114 }
1115
1116 NvU32 NvOsGetTimeMS(void)
1117 {
1118 struct timespec ts;
1119 s64 nsec;
1120 getnstimeofday(&ts);
1121 nsec = timespec_to_ns(&ts);
1122 do_div(nsec, 1000000);
1123 return (NvU32)nsec;
1124 }
1125
1126 NvU64 NvOsGetTimeUS(void)
1127 {
1128 struct timespec ts;
1129 s64 nsec;
1130 getnstimeofday(&ts);
1131 nsec = timespec_to_ns(&ts);
1132 do_div(nsec, 1000);
1133 return (NvU32)nsec;
1134 }
1135
1136 void NvOsDataCacheWritebackRange(
1137 void *start,
1138 NvU32 length)
1139 {
1140 dmac_map_area(start, length, DMA_TO_DEVICE);
1141 }
1142
1143 void NvOsDataCacheWritebackInvalidateRange(
1144 void *start,
1145 NvU32 length)
1146 {
1147 dmac_flush_range(start, (NvU8*)start+length);
1148 }
1149
1150 void NvOsInstrCacheInvalidate(void)
1151 {
1152 }
1153
1154 void NvOsInstrCacheInvalidateRange(
1155 void *start,
1156 NvU32 length)
1157 {
1158 __cpuc_coherent_kern_range((unsigned long)start,
1159 (unsigned long)start+length);
1160 }
1161
1162 void NvOsFlushWriteCombineBuffer( void )
1163 {
1164 dsb();
1165 outer_sync();
1166 }
1167
1168 NvError NvOsInterruptRegisterInternal(
1169 NvU32 IrqListSize,
1170 const NvU32 *pIrqList,
1171 const void *pList,
1172 void* context,
1173 NvOsInterruptHandle *handle,
1174 NvBool InterruptEnable,
1175 NvBool IsUser)
1176 {
1177 const NvOsSemaphoreHandle *pSemList = NULL;
1178 const NvOsInterruptHandler *pFnList = NULL;
1179 NvError e = NvSuccess;
1180 NvOsInterruptBlock *pNewBlock;
1181 NvU32 i;
1182
1183 if (!IrqListSize)
1184 return NvError_BadValue;
1185
1186 if (IsUser)
1187 pSemList = (const NvOsSemaphoreHandle *)pList;
1188 else
1189 pFnList = (const NvOsInterruptHandler *)pList;
1190
1191 *handle = (NvOsInterruptHandle) 0;
1192 pNewBlock = (NvOsInterruptBlock *)NvOsAlloc(INTBLOCK_SIZE(IrqListSize));
1193 if (!pNewBlock)
1194 return NvError_InsufficientMemory;
1195
1196 NvOsMemset(pNewBlock, 0, INTBLOCK_SIZE(IrqListSize));
1197
1198 pNewBlock->pArg = context;
1199 pNewBlock->NumIrqs = IrqListSize;
1200 pNewBlock->Shutdown = 0;
1201 for (i=0; i<IrqListSize; i++)
1202 {
1203 if (pIrqList[i] >= NVOS_MAX_SYSTEM_IRQS)
1204 {
1205 BUG();
1206 e = NvError_InsufficientMemory;
1207 goto clean_fail;
1208 }
1209
1210 if (NvOsAtomicCompareExchange32((NvS32*)&s_pIrqList[pIrqList[i]], 0,
1211 (NvS32)pNewBlock)!=0)
1212 {
1213 e = NvError_AlreadyAllocated;
1214 goto clean_fail;
1215 }
1216 snprintf(pNewBlock->IrqList[i].IrqName,
1217 sizeof(pNewBlock->IrqList[i].IrqName),
1218 "NvOsIrq%s%04d", (IsUser)?"User":"Kern", pIrqList[i]);
1219
1220 pNewBlock->IrqList[i].Irq = pIrqList[i];
1221
1222 /* HACK use threads for GPIO and tasklets for all other interrupts. */
1223 if (IsUser)
1224 {
1225 pNewBlock->IrqList[i].pSem = pSemList[i];
1226 pNewBlock->Flags |= NVOS_IRQ_IS_USER;
1227 }
1228 else
1229 {
1230 pNewBlock->IrqList[i].pHandler = pFnList[i];
1231 if (pIrqList[i] >= INT_GPIO_BASE)
1232 pNewBlock->Flags |= NVOS_IRQ_IS_KERNEL_THREAD;
1233 else
1234 pNewBlock->Flags |= NVOS_IRQ_IS_TASKLET;
1235 }
1236
1237 if ((pNewBlock->Flags & NVOS_IRQ_TYPE_MASK)==NVOS_IRQ_IS_KERNEL_THREAD)
1238 {
1239 struct sched_param p;
1240 p.sched_priority = KTHREAD_IRQ_PRIO;
1241 sema_init(&(pNewBlock->IrqList[i].sem), 0);
1242 pNewBlock->IrqList[i].task =
1243 kthread_create(NvOsInterruptThreadWrapper,
1244 (void *)((pIrqList[i]&0xffff) | ((i&0xffff)<<16)),
1245 pNewBlock->IrqList[i].IrqName);
1246 if (sched_setscheduler(pNewBlock->IrqList[i].task,
1247 SCHED_FIFO, &p)<0)
1248 NvOsDebugPrintf("Failed to elevate priority for IRQ %u\n",
1249 pIrqList[i]);
1250 wake_up_process( pNewBlock->IrqList[i].task );
1251 }
1252
1253 if ((pNewBlock->Flags & NVOS_IRQ_TYPE_MASK)==NVOS_IRQ_IS_TASKLET)
1254 {
1255 tasklet_init(&pNewBlock->IrqList[i].Tasklet, NvOsTaskletWrapper,
1256 (pIrqList[i]&0xffff) | ((i&0xffff)<<16));
1257 }
1258
1259 /* NvOs specifies that the interrupt handler is responsible for
1260 * re-enabling the interrupt. This is not the standard behavior
1261 * for Linux IRQs, so only interrupts which are installed through
1262 * NvOs will have the no-auto-enable flag specified
1263 */
1264 set_irq_flags(pIrqList[i], IRQF_VALID | IRQF_NOAUTOEN);
1265
1266 if (request_irq(pIrqList[i], NvOsIrqWrapper,
1267 0, pNewBlock->IrqList[i].IrqName, (void*)i)!=0)
1268 {
1269 e = NvError_ResourceError;
1270 goto clean_fail;
1271 }
1272 }
1273 *handle = (NvOsInterruptHandle)pNewBlock;
1274 if (InterruptEnable)
1275 {
1276 pNewBlock->Flags |= NVOS_IRQ_IS_ENABLED;
1277 i = 0;
1278 }
1279 for ( ; i<IrqListSize; i++)
1280 enable_irq(pIrqList[i]);
1281
1282 return NvSuccess;
1283
1284 clean_fail:
1285 while (i)
1286 {
1287 --i;
1288 if ((pNewBlock->Flags & NVOS_IRQ_TYPE_MASK)==NVOS_IRQ_IS_KERNEL_THREAD)
1289 {
1290 up(&pNewBlock->IrqList[i].sem);
1291 (void)kthread_stop(pNewBlock->IrqList[i].task);
1292 }
1293 if ((pNewBlock->Flags & NVOS_IRQ_TYPE_MASK) == NVOS_IRQ_IS_TASKLET)
1294 {
1295 tasklet_kill(&pNewBlock->IrqList[i].Tasklet);
1296 }
1297 free_irq(pIrqList[i], (void*)i);
1298 set_irq_flags(pIrqList[i], IRQF_VALID);
1299 NvOsAtomicCompareExchange32((NvS32*)&s_pIrqList[pIrqList[i]],
1300 (NvS32)pNewBlock, 0);
1301 }
1302 *handle = NULL;
1303 NvOsFree(pNewBlock);
1304
1305 return e;
1306 }
1307
1308 NvError NvOsInterruptRegister(
1309 NvU32 IrqListSize,
1310 const NvU32 *pIrqList,
1311 const NvOsInterruptHandler *pIrqHandlerList,
1312 void *context,
1313 NvOsInterruptHandle *handle,
1314 NvBool InterruptEnable)
1315 {
1316 return NvOsInterruptRegisterInternal(IrqListSize, pIrqList,
1317 (const void*)pIrqHandlerList, context, handle,
1318 InterruptEnable, NV_FALSE);
1319 }
1320
1321 void NvOsInterruptUnregister(NvOsInterruptHandle handle)
1322 {
1323 NvOsInterruptBlock *pBlock = (NvOsInterruptBlock *)handle;
1324 NvU32 i;
1325
1326 if (!pBlock)
1327 return;
1328
1329 pBlock->Shutdown = 1;
1330
1331 for (i=0; i<pBlock->NumIrqs; i++)
1332 {
1333 free_irq(pBlock->IrqList[i].Irq, (void*)i);
1334 NvOsAtomicCompareExchange32(
1335 (NvS32*)&s_pIrqList[pBlock->IrqList[i].Irq], (NvS32)pBlock, 0);
1336
1337 if ((pBlock->Flags & NVOS_IRQ_TYPE_MASK) == NVOS_IRQ_IS_KERNEL_THREAD)
1338 {
1339 up(&pBlock->IrqList[i].sem);
1340 (void)kthread_stop(pBlock->IrqList[i].task);
1341 }
1342 if ((pBlock->Flags & NVOS_IRQ_TYPE_MASK) == NVOS_IRQ_IS_TASKLET)
1343 {
1344 tasklet_kill(&pBlock->IrqList[i].Tasklet);
1345 }
1346 set_irq_flags(pBlock->IrqList[i].Irq, IRQF_VALID);
1347 }
1348
1349 NvOsFree(pBlock);
1350 }
1351
1352 NvError NvOsInterruptEnable(NvOsInterruptHandle handle)
1353 {
1354 NvOsInterruptBlock *pBlock = (NvOsInterruptBlock *)handle;
1355 NvU32 i;
1356
1357 if (pBlock == NULL)
1358 BUG();
1359
1360 if (!(pBlock->Flags & NVOS_IRQ_IS_ENABLED))
1361 {
1362 pBlock->Flags |= NVOS_IRQ_IS_ENABLED;
1363 for (i=0; i<pBlock->NumIrqs; i++)
1364 enable_irq(pBlock->IrqList[i].Irq);
1365 }
1366
1367 return NvSuccess;
1368 }
1369
1370 void NvOsInterruptDone(NvOsInterruptHandle handle)
1371 {
1372 NvOsInterruptBlock *pBlock = (NvOsInterruptBlock *)handle;
1373 NvU32 i;
1374
1375 if (pBlock == NULL)
1376 BUG();
1377
1378 for (i=0; i<pBlock->NumIrqs; i++)
1379 enable_irq(pBlock->IrqList[i].Irq);
1380 }
1381
1382 void NvOsInterruptMask(NvOsInterruptHandle handle, NvBool mask)
1383 {
1384 NvOsInterruptBlock *pBlock = (NvOsInterruptBlock *)handle;
1385 NvU32 i;
1386
1387 if (pBlock == NULL)
1388 BUG();
1389
1390 if (mask)
1391 {
1392 for (i=0; i<pBlock->NumIrqs; i++)
1393 disable_irq(pBlock->IrqList[i].Irq);
1394 }
1395 else
1396 {
1397 for (i=0; i<pBlock->NumIrqs; i++)
1398 enable_irq(pBlock->IrqList[i].Irq);
1399 }
1400 }
1401
1402 void NvOsProfileApertureSizes(NvU32 *apertures, NvU32 *sizes)
1403 {
1404 }
1405
1406 void NvOsProfileStart(void **apertures)
1407 {
1408 }
1409
1410 void NvOsProfileStop(void **apertures)
1411 {
1412 }
1413
1414 NvError NvOsProfileWrite(
1415 NvOsFileHandle file, NvU32 index,
1416 void *aperture)
1417 {
1418 return NvError_NotImplemented;
1419 }
1420
1421 NvError NvOsBootArgSet(NvU32 key, void *arg, NvU32 size)
1422 {
1423 return NvError_NotImplemented;
1424 }
1425
1426 NvError NvOsBootArgGet(NvU32 key, void *arg, NvU32 size)
1427 {
1428 const void *src;
1429 NvU32 size_src;
1430
1431 if (key>=NvBootArgKey_PreservedMemHandle_0 &&
1432 key<NvBootArgKey_PreservedMemHandle_Num)
1433 {
1434 int Index = key - NvBootArgKey_PreservedMemHandle_0;
1435
1436 src = &s_BootArgs.MemHandleArgs[Index];
1437 size_src = sizeof(NvBootArgsPreservedMemHandle);
1438 }
1439 else
1440 {
1441 switch (key)
1442 {
1443 case NvBootArgKey_ChipShmoo:
1444 src = &s_BootArgs.ChipShmooArgs;
1445 size_src = sizeof(NvBootArgsChipShmoo);
1446 break;
1447 case NvBootArgKey_Framebuffer:
1448 src = &s_BootArgs.FramebufferArgs;
1449 size_src = sizeof(NvBootArgsFramebuffer);
1450 break;
1451 case NvBootArgKey_Display:
1452 src = &s_BootArgs.DisplayArgs;
1453 size_src = sizeof(NvBootArgsDisplay);
1454 break;
1455 case NvBootArgKey_Rm:
1456 src = &s_BootArgs.RmArgs;
1457 size_src = sizeof(NvBootArgsRm);
1458 break;
1459 case NvBootArgKey_ChipShmooPhys:
1460 src = &s_BootArgs.ChipShmooPhysArgs;
1461 size_src = sizeof(NvBootArgsChipShmooPhys);
1462 break;
1463 case NvBootArgKey_WarmBoot:
1464 src = &s_BootArgs.WarmbootArgs;
1465 size_src = sizeof(NvBootArgsWarmboot);
1466 break;
1467 default:
1468 src = NULL;
1469 size_src = 0;
1470 break;
1471 }
1472 }
1473
1474 if (!arg || !src || (size_src!=size))
1475 return NvError_BadParameter;
1476
1477 NvOsMemcpy(arg, src, size_src);
1478 return NvSuccess;
1479 }
1480
1481 /** nvassert functions */
1482
1483 void NvOsBreakPoint(const char* file, NvU32 line, const char* condition)
1484 {
1485 printk( "assert: %s:%d: %s\n", file, line, (condition) ? condition : " " );
1486 dump_stack();
1487 }
1488
1489 /** trace functions */
1490
1491 void NvOsTraceLogPrintf( const char *format, ... )
1492 {
1493
1494 }
1495
1496 void NvOsTraceLogStart(void)
1497 {
1498 }
1499
1500 void NvOsTraceLogEnd(void)
1501 {
1502 }
1503
1504 /* resource tracking */
1505
1506 #if NV_DEBUG
1507 void *NvOsAllocLeak( size_t size, const char *f, int l )
1508 {
1509 return NvOsAlloc( size );
1510 }
1511
1512 void *NvOsReallocLeak( void *ptr, size_t size, const char *f, int l )
1513 {
1514 return NvOsRealloc( ptr, size );
1515 }
1516
1517 void NvOsFreeLeak( void *ptr, const char *f, int l )
1518 {
1519 NvOsFree( ptr );
1520 }
1521 #endif
1522
1523 void NvOsGetProcessInfo(char* buf, NvU32 len)
1524 {
1525 NvOsSnprintf(buf,len, "(kernel pid=%d)", current->pid);
1526 }
1527
1528 #if (NVOS_TRACE || NV_DEBUG)
1529 void NvOsSetResourceAllocFileLine(void* userptr, const char* file, int line)
1530 {
1531 }
1532 #endif
1533
1534 #ifdef GHACK
1535
1536 static int __init parse_tegra_tag(const struct tag *tag)
1537 {
1538 const struct tag_nvidia_tegra *nvtag = &tag->u.tegra;
1539
1540 if (nvtag->bootarg_key >= NvBootArgKey_PreservedMemHandle_0 &&
1541 nvtag->bootarg_key < NvBootArgKey_PreservedMemHandle_Num)
1542 {
1543 int Index = nvtag->bootarg_key - NvBootArgKey_PreservedMemHandle_0;
1544 NvBootArgsPreservedMemHandle *dst = &s_BootArgs.MemHandleArgs[Index];
1545 const NvBootArgsPreservedMemHandle *src =
1546 (const NvBootArgsPreservedMemHandle *)nvtag->bootarg;
1547
1548 if (nvtag->bootarg_len != sizeof(NvBootArgsPreservedMemHandle))
1549 printk("Unexpected preserved memory handle tag length!\n");
1550 else
1551 *dst = *src;
1552 return 0;
1553 }
1554
1555 switch (nvtag->bootarg_key)
1556 {
1557 case NvBootArgKey_ChipShmoo:
1558 {
1559 NvBootArgsChipShmoo *dst = &s_BootArgs.ChipShmooArgs;
1560 const NvBootArgsChipShmoo *src =
1561 (const NvBootArgsChipShmoo *)nvtag->bootarg;
1562
1563 if (nvtag->bootarg_len != sizeof(NvBootArgsChipShmoo))
1564 printk("Unexpected shmoo tag length!\n");
1565 else
1566 {
1567 printk("Shmoo tag with %u handle\n",
1568 src->MemHandleKey);
1569 *dst = *src;
1570 }
1571 return 0;
1572 }
1573 case NvBootArgKey_Display:
1574 {
1575 NvBootArgsDisplay *dst = &s_BootArgs.DisplayArgs;
1576 const NvBootArgsDisplay *src =
1577 (const NvBootArgsDisplay *)nvtag->bootarg;
1578
1579 if (nvtag->bootarg_len != sizeof(NvBootArgsDisplay))
1580 printk("Unexpected display tag length!\n");
1581 else
1582 *dst = *src;
1583 return 0;
1584 }
1585 case NvBootArgKey_Framebuffer:
1586 {
1587 NvBootArgsFramebuffer *dst = &s_BootArgs.FramebufferArgs;
1588 const NvBootArgsFramebuffer *src =
1589 (const NvBootArgsFramebuffer *)nvtag->bootarg;
1590
1591 if (nvtag->bootarg_len != sizeof(NvBootArgsFramebuffer))
1592 printk("Unexpected framebuffer tag length!\n");
1593 else
1594 {
1595 printk("Framebuffer tag with %u handle\n",
1596 src->MemHandleKey);
1597 *dst = *src;
1598 }
1599 return 0;
1600 }
1601 case NvBootArgKey_Rm:
1602 {
1603 NvBootArgsRm *dst = &s_BootArgs.RmArgs;
1604 const NvBootArgsRm *src =
1605 (const NvBootArgsRm *)nvtag->bootarg;
1606
1607 if (nvtag->bootarg_len != sizeof(NvBootArgsRm))
1608 printk("Unexpected RM tag length!\n");
1609 else
1610 *dst = *src;
1611 return 0;
1612 }
1613 case NvBootArgKey_ChipShmooPhys:
1614 {
1615 NvBootArgsChipShmooPhys *dst = &s_BootArgs.ChipShmooPhysArgs;
1616 const NvBootArgsChipShmooPhys *src =
1617 (const NvBootArgsChipShmooPhys *)nvtag->bootarg;
1618
1619 if (nvtag->bootarg_len != sizeof(NvBootArgsChipShmooPhys))
1620 printk("Unexpected phys shmoo tag length!\n");
1621 else
1622 {
1623 printk("Phys shmoo tag with pointer 0x%X and length %u\n",
1624 src->PhysShmooPtr, src->Size);
1625 *dst = *src;
1626 }
1627 return 0;
1628 }
1629 case NvBootArgKey_WarmBoot:
1630 {
1631 NvBootArgsWarmboot *dst = &s_BootArgs.WarmbootArgs;
1632 const NvBootArgsWarmboot *src =
1633 (const NvBootArgsWarmboot *)nvtag->bootarg;
1634
1635 if (nvtag->bootarg_len != sizeof(NvBootArgsWarmboot))
1636 printk("Unexpected warmboot tag length!\n");
1637 else
1638 {
1639 printk("Found a warmboot tag!\n");
1640 *dst = *src;
1641 }
1642 return 0;
1643 }
1644
1645 default:
1646 return 0;
1647 }
1648 }
1649 __tagtable(ATAG_NVIDIA_TEGRA, parse_tegra_tag);
1650
1651 void __init tegra_nvos_kernel_init(void);
1652
1653 void __init tegra_nvos_kernel_init(void)
1654 {
1655 spin_lock_init(&gs_NvOsSpinLock);
1656 }
1657 #endif
OLDNEW
« no previous file with comments | « arch/arm/mach-tegra/nv/nvos/Makefile ('k') | arch/arm/mach-tegra/nv/nvos/nvos_exports.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698