OLD | NEW |
(Empty) | |
| 1 //------------------------------------------------------------------------------ |
| 2 // <copyright file="ar6k.c" company="Atheros"> |
| 3 // Copyright (c) 2007-2008 Atheros Corporation. All rights reserved. |
| 4 // |
| 5 // This program is free software; you can redistribute it and/or modify |
| 6 // it under the terms of the GNU General Public License version 2 as |
| 7 // published by the Free Software Foundation; |
| 8 // |
| 9 // Software distributed under the License is distributed on an "AS |
| 10 // IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or |
| 11 // implied. See the License for the specific language governing |
| 12 // rights and limitations under the License. |
| 13 // |
| 14 // |
| 15 //------------------------------------------------------------------------------ |
| 16 //============================================================================== |
| 17 // AR6K device layer that handles register level I/O |
| 18 // |
| 19 // Author(s): ="Atheros" |
| 20 //============================================================================== |
| 21 #include "a_config.h" |
| 22 #include "athdefs.h" |
| 23 #include "a_types.h" |
| 24 #include "AR6002/hw2.0/hw/mbox_host_reg.h" |
| 25 #include "a_osapi.h" |
| 26 #include "../htc_debug.h" |
| 27 #include "hif.h" |
| 28 #include "htc_packet.h" |
| 29 #include "ar6k.h" |
| 30 |
| 31 #define MAILBOX_FOR_BLOCK_SIZE 1 |
| 32 |
| 33 A_STATUS DevEnableInterrupts(AR6K_DEVICE *pDev); |
| 34 A_STATUS DevDisableInterrupts(AR6K_DEVICE *pDev); |
| 35 |
| 36 static void DevCleanupVirtualScatterSupport(AR6K_DEVICE *pDev); |
| 37 |
| 38 void AR6KFreeIOPacket(AR6K_DEVICE *pDev, HTC_PACKET *pPacket) |
| 39 { |
| 40 LOCK_AR6K(pDev); |
| 41 HTC_PACKET_ENQUEUE(&pDev->RegisterIOList,pPacket); |
| 42 UNLOCK_AR6K(pDev); |
| 43 } |
| 44 |
| 45 HTC_PACKET *AR6KAllocIOPacket(AR6K_DEVICE *pDev) |
| 46 { |
| 47 HTC_PACKET *pPacket; |
| 48 |
| 49 LOCK_AR6K(pDev); |
| 50 pPacket = HTC_PACKET_DEQUEUE(&pDev->RegisterIOList); |
| 51 UNLOCK_AR6K(pDev); |
| 52 |
| 53 return pPacket; |
| 54 } |
| 55 |
| 56 void DevCleanup(AR6K_DEVICE *pDev) |
| 57 { |
| 58 DevCleanupGMbox(pDev); |
| 59 |
| 60 if (pDev->HifAttached) { |
| 61 HIFDetachHTC(pDev->HIFDevice); |
| 62 pDev->HifAttached = FALSE; |
| 63 } |
| 64 |
| 65 DevCleanupVirtualScatterSupport(pDev); |
| 66 |
| 67 if (A_IS_MUTEX_VALID(&pDev->Lock)) { |
| 68 A_MUTEX_DELETE(&pDev->Lock); |
| 69 } |
| 70 } |
| 71 |
| 72 A_STATUS DevSetup(AR6K_DEVICE *pDev) |
| 73 { |
| 74 A_UINT32 blocksizes[AR6K_MAILBOXES]; |
| 75 A_STATUS status = A_OK; |
| 76 int i; |
| 77 HTC_CALLBACKS htcCallbacks; |
| 78 |
| 79 do { |
| 80 |
| 81 DL_LIST_INIT(&pDev->ScatterReqHead); |
| 82 /* initialize our free list of IO packets */ |
| 83 INIT_HTC_PACKET_QUEUE(&pDev->RegisterIOList); |
| 84 A_MUTEX_INIT(&pDev->Lock); |
| 85 |
| 86 A_MEMZERO(&htcCallbacks, sizeof(HTC_CALLBACKS)); |
| 87 /* the device layer handles these */ |
| 88 htcCallbacks.rwCompletionHandler = DevRWCompletionHandler; |
| 89 htcCallbacks.dsrHandler = DevDsrHandler; |
| 90 htcCallbacks.context = pDev; |
| 91 |
| 92 status = HIFAttachHTC(pDev->HIFDevice, &htcCallbacks); |
| 93 |
| 94 if (A_FAILED(status)) { |
| 95 break; |
| 96 } |
| 97 |
| 98 pDev->HifAttached = TRUE; |
| 99 |
| 100 /* get the addresses for all 4 mailboxes */ |
| 101 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR, |
| 102 &pDev->MailBoxInfo, sizeof(pDev->MailBoxInfo
)); |
| 103 |
| 104 if (status != A_OK) { |
| 105 A_ASSERT(FALSE); |
| 106 break; |
| 107 } |
| 108 |
| 109 /* carve up register I/O packets (these are for ASYNC register I/O )
*/ |
| 110 for (i = 0; i < AR6K_MAX_REG_IO_BUFFERS; i++) { |
| 111 HTC_PACKET *pIOPacket; |
| 112 pIOPacket = &pDev->RegIOBuffers[i].HtcPacket; |
| 113 SET_HTC_PACKET_INFO_RX_REFILL(pIOPacket, |
| 114 pDev, |
| 115 pDev->RegIOBuffers[i].Buffer, |
| 116 AR6K_REG_IO_BUFFER_SIZE, |
| 117 0); /* don't care */ |
| 118 AR6KFreeIOPacket(pDev,pIOPacket); |
| 119 } |
| 120 |
| 121 /* get the block sizes */ |
| 122 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_S
IZE, |
| 123 blocksizes, sizeof(blocksizes)); |
| 124 |
| 125 if (status != A_OK) { |
| 126 A_ASSERT(FALSE); |
| 127 break; |
| 128 } |
| 129 |
| 130 /* note: we actually get the block size of a mailbox other than 0, f
or SDIO the block |
| 131 * size on mailbox 0 is artificially set to 1. So we use the block
size that is set |
| 132 * for the other 3 mailboxes */ |
| 133 pDev->BlockSize = blocksizes[MAILBOX_FOR_BLOCK_SIZE]; |
| 134 /* must be a power of 2 */ |
| 135 A_ASSERT((pDev->BlockSize & (pDev->BlockSize - 1)) == 0); |
| 136 |
| 137 /* assemble mask, used for padding to a block */ |
| 138 pDev->BlockMask = pDev->BlockSize - 1; |
| 139 |
| 140 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("BlockSize: %d, MailboxAddress:0x%X \n", |
| 141 pDev->BlockSize, pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX
])); |
| 142 |
| 143 pDev->GetPendingEventsFunc = NULL; |
| 144 /* see if the HIF layer implements the get pending events function
*/ |
| 145 HIFConfigureDevice(pDev->HIFDevice, |
| 146 HIF_DEVICE_GET_PENDING_EVENTS_FUNC, |
| 147 &pDev->GetPendingEventsFunc, |
| 148 sizeof(pDev->GetPendingEventsFunc)); |
| 149 |
| 150 /* assume we can process HIF interrupt events asynchronously */ |
| 151 pDev->HifIRQProcessingMode = HIF_DEVICE_IRQ_ASYNC_SYNC; |
| 152 |
| 153 /* see if the HIF layer overrides this assumption */ |
| 154 HIFConfigureDevice(pDev->HIFDevice, |
| 155 HIF_DEVICE_GET_IRQ_PROC_MODE, |
| 156 &pDev->HifIRQProcessingMode, |
| 157 sizeof(pDev->HifIRQProcessingMode)); |
| 158 |
| 159 switch (pDev->HifIRQProcessingMode) { |
| 160 case HIF_DEVICE_IRQ_SYNC_ONLY: |
| 161 AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("HIF Interrupt processing is SYN
C ONLY\n")); |
| 162 /* see if HIF layer wants HTC to yield */ |
| 163 HIFConfigureDevice(pDev->HIFDevice, |
| 164 HIF_DEVICE_GET_IRQ_YIELD_PARAMS, |
| 165 &pDev->HifIRQYieldParams, |
| 166 sizeof(pDev->HifIRQYieldParams)); |
| 167 |
| 168 if (pDev->HifIRQYieldParams.RecvPacketYieldCount > 0) { |
| 169 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, |
| 170 ("HIF requests that DSR yield per %d RECV packets \n", |
| 171 pDev->HifIRQYieldParams.RecvPacketYieldCount)); |
| 172 pDev->DSRCanYield = TRUE; |
| 173 } |
| 174 break; |
| 175 case HIF_DEVICE_IRQ_ASYNC_SYNC: |
| 176 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF Interrupt processing is ASYN
C and SYNC\n")); |
| 177 break; |
| 178 default: |
| 179 A_ASSERT(FALSE); |
| 180 } |
| 181 |
| 182 pDev->HifMaskUmaskRecvEvent = NULL; |
| 183 |
| 184 /* see if the HIF layer implements the mask/unmask recv events funct
ion */ |
| 185 HIFConfigureDevice(pDev->HIFDevice, |
| 186 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC, |
| 187 &pDev->HifMaskUmaskRecvEvent, |
| 188 sizeof(pDev->HifMaskUmaskRecvEvent)); |
| 189 |
| 190 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("HIF special overrides : 0x%X , 0x%X\n", |
| 191 (A_UINT32)pDev->GetPendingEventsFunc, (A_UINT32)pDev->HifMaskUm
askRecvEvent)); |
| 192 |
| 193 status = DevDisableInterrupts(pDev); |
| 194 |
| 195 if (A_FAILED(status)) { |
| 196 break; |
| 197 } |
| 198 |
| 199 status = DevSetupGMbox(pDev); |
| 200 |
| 201 } while (FALSE); |
| 202 |
| 203 if (A_FAILED(status)) { |
| 204 if (pDev->HifAttached) { |
| 205 HIFDetachHTC(pDev->HIFDevice); |
| 206 pDev->HifAttached = FALSE; |
| 207 } |
| 208 } |
| 209 |
| 210 return status; |
| 211 |
| 212 } |
| 213 |
| 214 A_STATUS DevEnableInterrupts(AR6K_DEVICE *pDev) |
| 215 { |
| 216 A_STATUS status; |
| 217 AR6K_IRQ_ENABLE_REGISTERS regs; |
| 218 |
| 219 LOCK_AR6K(pDev); |
| 220 |
| 221 /* Enable all the interrupts except for the internal AR6000 CPU interrup
t */ |
| 222 pDev->IrqEnableRegisters.int_status_enable = INT_STATUS_ENABLE_ERROR_SET(0x0
1) | |
| 223 INT_STATUS_ENABLE_CPU_SET(0x01) | |
| 224 INT_STATUS_ENABLE_COUNTER_SET(0x01); |
| 225 |
| 226 if (NULL == pDev->GetPendingEventsFunc) { |
| 227 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DAT
A_SET(0x01); |
| 228 } else { |
| 229 /* The HIF layer provided us with a pending events function which means
that |
| 230 * the detection of pending mbox messages is handled in the HIF layer. |
| 231 * This is the case for the SPI2 interface. |
| 232 * In the normal case we enable MBOX interrupts, for the case |
| 233 * with HIFs that offer this mechanism, we keep these interrupts |
| 234 * masked */ |
| 235 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DA
TA_SET(0x01); |
| 236 } |
| 237 |
| 238 |
| 239 /* Set up the CPU Interrupt Status Register */ |
| 240 pDev->IrqEnableRegisters.cpu_int_status_enable = CPU_INT_STATUS_ENABLE_BIT_S
ET(0x00); |
| 241 |
| 242 /* Set up the Error Interrupt Status Register */ |
| 243 pDev->IrqEnableRegisters.error_status_enable = |
| 244 ERROR_STATUS_ENABLE_RX_UNDERFLOW_SET(0x01) | |
| 245 ERROR_STATUS_ENABLE_TX_OVERFLOW_SET(0x01); |
| 246 |
| 247 /* Set up the Counter Interrupt Status Register (only for debug interrupt to
catch fatal errors) */ |
| 248 pDev->IrqEnableRegisters.counter_int_status_enable = |
| 249 COUNTER_INT_STATUS_ENABLE_BIT_SET(AR6K_TARGET_DEBUG_INTR_MASK); |
| 250 |
| 251 /* copy into our temp area */ |
| 252 A_MEMCPY(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE); |
| 253 |
| 254 UNLOCK_AR6K(pDev); |
| 255 |
| 256 /* always synchronous */ |
| 257 status = HIFReadWrite(pDev->HIFDevice, |
| 258 INT_STATUS_ENABLE_ADDRESS, |
| 259 ®s.int_status_enable, |
| 260 AR6K_IRQ_ENABLE_REGS_SIZE, |
| 261 HIF_WR_SYNC_BYTE_INC, |
| 262 NULL); |
| 263 |
| 264 if (status != A_OK) { |
| 265 /* Can't write it for some reason */ |
| 266 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, |
| 267 ("Failed to update interrupt control registers err: %d\n
", status)); |
| 268 |
| 269 } |
| 270 |
| 271 return status; |
| 272 } |
| 273 |
| 274 A_STATUS DevDisableInterrupts(AR6K_DEVICE *pDev) |
| 275 { |
| 276 AR6K_IRQ_ENABLE_REGISTERS regs; |
| 277 |
| 278 LOCK_AR6K(pDev); |
| 279 /* Disable all interrupts */ |
| 280 pDev->IrqEnableRegisters.int_status_enable = 0; |
| 281 pDev->IrqEnableRegisters.cpu_int_status_enable = 0; |
| 282 pDev->IrqEnableRegisters.error_status_enable = 0; |
| 283 pDev->IrqEnableRegisters.counter_int_status_enable = 0; |
| 284 /* copy into our temp area */ |
| 285 A_MEMCPY(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE); |
| 286 |
| 287 UNLOCK_AR6K(pDev); |
| 288 |
| 289 /* always synchronous */ |
| 290 return HIFReadWrite(pDev->HIFDevice, |
| 291 INT_STATUS_ENABLE_ADDRESS, |
| 292 ®s.int_status_enable, |
| 293 AR6K_IRQ_ENABLE_REGS_SIZE, |
| 294 HIF_WR_SYNC_BYTE_INC, |
| 295 NULL); |
| 296 } |
| 297 |
| 298 /* enable device interrupts */ |
| 299 A_STATUS DevUnmaskInterrupts(AR6K_DEVICE *pDev) |
| 300 { |
| 301 /* for good measure, make sure interrupt are disabled before unmasking a
t the HIF |
| 302 * layer. |
| 303 * The rationale here is that between device insertion (where we clear t
he interrupts the first time) |
| 304 * and when HTC is finally ready to handle interrupts, other software ca
n perform target "soft" resets. |
| 305 * The AR6K interrupt enables reset back to an "enabled" state when this
happens. |
| 306 * */ |
| 307 DevDisableInterrupts(pDev); |
| 308 |
| 309 /* Unmask the host controller interrupts */ |
| 310 HIFUnMaskInterrupt(pDev->HIFDevice); |
| 311 |
| 312 return DevEnableInterrupts(pDev); |
| 313 } |
| 314 |
| 315 /* disable all device interrupts */ |
| 316 A_STATUS DevMaskInterrupts(AR6K_DEVICE *pDev) |
| 317 { |
| 318 /* mask the interrupt at the HIF layer, we don't want a stray interrupt
taken while |
| 319 * we zero out our shadow registers in DevDisableInterrupts()*/ |
| 320 HIFMaskInterrupt(pDev->HIFDevice); |
| 321 |
| 322 return DevDisableInterrupts(pDev); |
| 323 } |
| 324 |
| 325 /* callback when our fetch to enable/disable completes */ |
| 326 static void DevDoEnableDisableRecvAsyncHandler(void *Context, HTC_PACKET *pPacke
t) |
| 327 { |
| 328 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context; |
| 329 |
| 330 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDoEnableDisableRecvAsyncHandler: (dev: 0
x%X)\n", (A_UINT32)pDev)); |
| 331 |
| 332 if (A_FAILED(pPacket->Status)) { |
| 333 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, |
| 334 (" Failed to disable receiver, status:%d \n", pPacket->Status)); |
| 335 } |
| 336 /* free this IO packet */ |
| 337 AR6KFreeIOPacket(pDev,pPacket); |
| 338 AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDoEnableDisableRecvAsyncHandler \n")); |
| 339 } |
| 340 |
| 341 /* disable packet reception (used in case the host runs out of buffers) |
| 342 * this is the "override" method when the HIF reports another methods to |
| 343 * disable recv events */ |
| 344 static A_STATUS DevDoEnableDisableRecvOverride(AR6K_DEVICE *pDev, A_BOOL EnableR
ecv, A_BOOL AsyncMode) |
| 345 { |
| 346 A_STATUS status = A_OK; |
| 347 HTC_PACKET *pIOPacket = NULL; |
| 348 |
| 349 AR_DEBUG_PRINTF(ATH_DEBUG_TRC,("DevDoEnableDisableRecvOverride: Enable:%d Mo
de:%d\n", |
| 350 EnableRecv,AsyncMode)); |
| 351 |
| 352 do { |
| 353 |
| 354 if (AsyncMode) { |
| 355 |
| 356 pIOPacket = AR6KAllocIOPacket(pDev); |
| 357 |
| 358 if (NULL == pIOPacket) { |
| 359 status = A_NO_MEMORY; |
| 360 A_ASSERT(FALSE); |
| 361 break; |
| 362 } |
| 363 |
| 364 /* stick in our completion routine when the I/O operation comple
tes */ |
| 365 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler; |
| 366 pIOPacket->pContext = pDev; |
| 367 |
| 368 /* call the HIF layer override and do this asynchronously */ |
| 369 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice, |
| 370 EnableRecv ? HIF_UNMASK_RECV :
HIF_MASK_RECV, |
| 371 pIOPacket); |
| 372 break; |
| 373 } |
| 374 |
| 375 /* if we get here we are doing it synchronously */ |
| 376 status = pDev->HifMaskUmaskRecvEvent(pDev->HIFDevice, |
| 377 EnableRecv ? HIF_UNMASK_RECV : HIF_
MASK_RECV, |
| 378 NULL); |
| 379 |
| 380 } while (FALSE); |
| 381 |
| 382 if (A_FAILED(status) && (pIOPacket != NULL)) { |
| 383 AR6KFreeIOPacket(pDev,pIOPacket); |
| 384 } |
| 385 |
| 386 return status; |
| 387 } |
| 388 |
| 389 /* disable packet reception (used in case the host runs out of buffers) |
| 390 * this is the "normal" method using the interrupt enable registers through |
| 391 * the host I/F */ |
| 392 static A_STATUS DevDoEnableDisableRecvNormal(AR6K_DEVICE *pDev, A_BOOL EnableRec
v, A_BOOL AsyncMode) |
| 393 { |
| 394 A_STATUS status = A_OK; |
| 395 HTC_PACKET *pIOPacket = NULL; |
| 396 AR6K_IRQ_ENABLE_REGISTERS regs; |
| 397 |
| 398 /* take the lock to protect interrupt enable shadows */ |
| 399 LOCK_AR6K(pDev); |
| 400 |
| 401 if (EnableRecv) { |
| 402 pDev->IrqEnableRegisters.int_status_enable |= INT_STATUS_ENABLE_MBOX_DAT
A_SET(0x01); |
| 403 } else { |
| 404 pDev->IrqEnableRegisters.int_status_enable &= ~INT_STATUS_ENABLE_MBOX_DA
TA_SET(0x01); |
| 405 } |
| 406 |
| 407 /* copy into our temp area */ |
| 408 A_MEMCPY(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE); |
| 409 UNLOCK_AR6K(pDev); |
| 410 |
| 411 do { |
| 412 |
| 413 if (AsyncMode) { |
| 414 |
| 415 pIOPacket = AR6KAllocIOPacket(pDev); |
| 416 |
| 417 if (NULL == pIOPacket) { |
| 418 status = A_NO_MEMORY; |
| 419 A_ASSERT(FALSE); |
| 420 break; |
| 421 } |
| 422 |
| 423 /* copy values to write to our async I/O buffer */ |
| 424 A_MEMCPY(pIOPacket->pBuffer,®s,AR6K_IRQ_ENABLE_REGS_SIZE); |
| 425 |
| 426 /* stick in our completion routine when the I/O operation comple
tes */ |
| 427 pIOPacket->Completion = DevDoEnableDisableRecvAsyncHandler; |
| 428 pIOPacket->pContext = pDev; |
| 429 |
| 430 /* write it out asynchronously */ |
| 431 HIFReadWrite(pDev->HIFDevice, |
| 432 INT_STATUS_ENABLE_ADDRESS, |
| 433 pIOPacket->pBuffer, |
| 434 AR6K_IRQ_ENABLE_REGS_SIZE, |
| 435 HIF_WR_ASYNC_BYTE_INC, |
| 436 pIOPacket); |
| 437 break; |
| 438 } |
| 439 |
| 440 /* if we get here we are doing it synchronously */ |
| 441 |
| 442 status = HIFReadWrite(pDev->HIFDevice, |
| 443 INT_STATUS_ENABLE_ADDRESS, |
| 444 ®s.int_status_enable, |
| 445 AR6K_IRQ_ENABLE_REGS_SIZE, |
| 446 HIF_WR_SYNC_BYTE_INC, |
| 447 NULL); |
| 448 |
| 449 } while (FALSE); |
| 450 |
| 451 if (A_FAILED(status) && (pIOPacket != NULL)) { |
| 452 AR6KFreeIOPacket(pDev,pIOPacket); |
| 453 } |
| 454 |
| 455 return status; |
| 456 } |
| 457 |
| 458 |
| 459 A_STATUS DevStopRecv(AR6K_DEVICE *pDev, A_BOOL AsyncMode) |
| 460 { |
| 461 if (NULL == pDev->HifMaskUmaskRecvEvent) { |
| 462 return DevDoEnableDisableRecvNormal(pDev,FALSE,AsyncMode); |
| 463 } else { |
| 464 return DevDoEnableDisableRecvOverride(pDev,FALSE,AsyncMode); |
| 465 } |
| 466 } |
| 467 |
| 468 A_STATUS DevEnableRecv(AR6K_DEVICE *pDev, A_BOOL AsyncMode) |
| 469 { |
| 470 if (NULL == pDev->HifMaskUmaskRecvEvent) { |
| 471 return DevDoEnableDisableRecvNormal(pDev,TRUE,AsyncMode); |
| 472 } else { |
| 473 return DevDoEnableDisableRecvOverride(pDev,TRUE,AsyncMode); |
| 474 } |
| 475 } |
| 476 |
| 477 void DevDumpRegisters(AR6K_DEVICE *pDev, |
| 478 AR6K_IRQ_PROC_REGISTERS *pIrqProcRegs, |
| 479 AR6K_IRQ_ENABLE_REGISTERS *pIrqEnableRegs) |
| 480 { |
| 481 |
| 482 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("\n<------- Register Table -------->\n")); |
| 483 |
| 484 if (pIrqProcRegs != NULL) { |
| 485 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 486 ("Host Int Status: 0x%x\n",pIrqProcRegs->host_int_status))
; |
| 487 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 488 ("CPU Int Status: 0x%x\n",pIrqProcRegs->cpu_int_status)); |
| 489 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 490 ("Error Int Status: 0x%x\n",pIrqProcRegs->error_int_status)
); |
| 491 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 492 ("Counter Int Status: 0x%x\n",pIrqProcRegs->counter_int_statu
s)); |
| 493 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 494 ("Mbox Frame: 0x%x\n",pIrqProcRegs->mbox_frame)); |
| 495 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 496 ("Rx Lookahead Valid: 0x%x\n",pIrqProcRegs->rx_lookahead_vali
d)); |
| 497 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 498 ("Rx Lookahead 0: 0x%x\n",pIrqProcRegs->rx_lookahead[0]))
; |
| 499 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 500 ("Rx Lookahead 1: 0x%x\n",pIrqProcRegs->rx_lookahead[1]))
; |
| 501 |
| 502 if (pDev->MailBoxInfo.GMboxAddress != 0) { |
| 503 /* if the target supports GMBOX hardware, dump some additional s
tate */ |
| 504 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 505 ("GMBOX Host Int Status 2: 0x%x\n",pIrqProcRegs->host_int_stat
us2)); |
| 506 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 507 ("GMBOX RX Avail: 0x%x\n",pIrqProcRegs->gmbox_rx_avai
l)); |
| 508 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 509 ("GMBOX lookahead alias 0: 0x%x\n",pIrqProcRegs->rx_gmbox_look
ahead_alias[0])); |
| 510 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 511 ("GMBOX lookahead alias 1: 0x%x\n",pIrqProcRegs->rx_gmbox_look
ahead_alias[1])); |
| 512 } |
| 513 |
| 514 } |
| 515 |
| 516 if (pIrqEnableRegs != NULL) { |
| 517 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 518 ("Int Status Enable: 0x%x\n",pIrqEnableRegs->int_status_enab
le)); |
| 519 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 520 ("Counter Int Status Enable: 0x%x\n",pIrqEnableRegs->counter_int_sta
tus_enable)); |
| 521 } |
| 522 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, ("<------------------------------->\n")); |
| 523 } |
| 524 |
| 525 |
| 526 #define DEV_GET_VIRT_DMA_INFO(p) ((DEV_SCATTER_DMA_VIRTUAL_INFO *)((p)->HIFPriv
ate[0])) |
| 527 |
| 528 static HIF_SCATTER_REQ *DevAllocScatterReq(HIF_DEVICE *Context) |
| 529 { |
| 530 DL_LIST *pItem; |
| 531 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context; |
| 532 LOCK_AR6K(pDev); |
| 533 pItem = DL_ListRemoveItemFromHead(&pDev->ScatterReqHead); |
| 534 UNLOCK_AR6K(pDev); |
| 535 if (pItem != NULL) { |
| 536 return A_CONTAINING_STRUCT(pItem, HIF_SCATTER_REQ, ListLink); |
| 537 } |
| 538 return NULL; |
| 539 } |
| 540 |
| 541 static void DevFreeScatterReq(HIF_DEVICE *Context, HIF_SCATTER_REQ *pReq) |
| 542 { |
| 543 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context; |
| 544 LOCK_AR6K(pDev); |
| 545 DL_ListInsertTail(&pDev->ScatterReqHead, &pReq->ListLink); |
| 546 UNLOCK_AR6K(pDev); |
| 547 } |
| 548 |
| 549 A_STATUS DevCopyScatterListToFromDMABuffer(HIF_SCATTER_REQ *pReq, A_BOOL FromDMA
) |
| 550 { |
| 551 A_UINT8 *pDMABuffer = NULL; |
| 552 int i, remaining; |
| 553 A_UINT32 length; |
| 554 |
| 555 pDMABuffer = pReq->pScatterBounceBuffer; |
| 556 |
| 557 if (pDMABuffer == NULL) { |
| 558 A_ASSERT(FALSE); |
| 559 return A_EINVAL; |
| 560 } |
| 561 |
| 562 remaining = (int)pReq->TotalLength; |
| 563 |
| 564 for (i = 0; i < pReq->ValidScatterEntries; i++) { |
| 565 |
| 566 length = min((int)pReq->ScatterList[i].Length, remaining); |
| 567 |
| 568 if (length != (int)pReq->ScatterList[i].Length) { |
| 569 A_ASSERT(FALSE); |
| 570 /* there is a problem with the scatter list */ |
| 571 return A_EINVAL; |
| 572 } |
| 573 |
| 574 if (FromDMA) { |
| 575 /* from DMA buffer */ |
| 576 A_MEMCPY(pReq->ScatterList[i].pBuffer, pDMABuffer , length); |
| 577 } else { |
| 578 /* to DMA buffer */ |
| 579 A_MEMCPY(pDMABuffer, pReq->ScatterList[i].pBuffer, length); |
| 580 } |
| 581 |
| 582 pDMABuffer += length; |
| 583 remaining -= length; |
| 584 } |
| 585 |
| 586 return A_OK; |
| 587 } |
| 588 |
| 589 static void DevReadWriteScatterAsyncHandler(void *Context, HTC_PACKET *pPacket) |
| 590 { |
| 591 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context; |
| 592 HIF_SCATTER_REQ *pReq = (HIF_SCATTER_REQ *)pPacket->pPktContext; |
| 593 |
| 594 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevReadWriteScatterAsyncHandler: (dev: 0x%
X)\n", (A_UINT32)pDev)); |
| 595 |
| 596 pReq->CompletionStatus = pPacket->Status; |
| 597 |
| 598 AR6KFreeIOPacket(pDev,pPacket); |
| 599 |
| 600 pReq->CompletionRoutine(pReq); |
| 601 |
| 602 AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevReadWriteScatterAsyncHandler \n")); |
| 603 } |
| 604 |
| 605 static A_STATUS DevReadWriteScatter(HIF_DEVICE *Context, HIF_SCATTER_REQ *pReq) |
| 606 { |
| 607 AR6K_DEVICE *pDev = (AR6K_DEVICE *)Context; |
| 608 A_STATUS status = A_OK; |
| 609 HTC_PACKET *pIOPacket = NULL; |
| 610 A_UINT32 request = pReq->Request; |
| 611 |
| 612 do { |
| 613 |
| 614 if (pReq->TotalLength > AR6K_MAX_TRANSFER_SIZE_PER_SCATTER) { |
| 615 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, |
| 616 ("Invalid length: %d \n", pReq->TotalLength)); |
| 617 break; |
| 618 } |
| 619 |
| 620 if (pReq->TotalLength == 0) { |
| 621 A_ASSERT(FALSE); |
| 622 break; |
| 623 } |
| 624 |
| 625 if (request & HIF_ASYNCHRONOUS) { |
| 626 /* use an I/O packet to carry this request */ |
| 627 pIOPacket = AR6KAllocIOPacket(pDev); |
| 628 if (NULL == pIOPacket) { |
| 629 status = A_NO_MEMORY; |
| 630 break; |
| 631 } |
| 632 |
| 633 /* save the request */ |
| 634 pIOPacket->pPktContext = pReq; |
| 635 /* stick in our completion routine when the I/O operation comple
tes */ |
| 636 pIOPacket->Completion = DevReadWriteScatterAsyncHandler; |
| 637 pIOPacket->pContext = pDev; |
| 638 } |
| 639 |
| 640 if (request & HIF_WRITE) { |
| 641 /* in virtual DMA, we are issuing the requests through the legacy HI
FReadWrite API |
| 642 * this API will adjust the address automatically for the last byte
to fall on the mailbox |
| 643 * EOM. */ |
| 644 |
| 645 /* if the address is an extended address, we can adjust the address
here since the extended |
| 646 * address will bypass the normal checks in legacy HIF layers */ |
| 647 if (pReq->Address == pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].Extende
dAddress) { |
| 648 pReq->Address += pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].Extende
dSize - pReq->TotalLength; |
| 649 } |
| 650 } |
| 651 |
| 652 /* use legacy readwrite */ |
| 653 status = HIFReadWrite(pDev->HIFDevice, |
| 654 pReq->Address, |
| 655 DEV_GET_VIRT_DMA_INFO(pReq)->pVirtDmaBuffer, |
| 656 pReq->TotalLength, |
| 657 request, |
| 658 (request & HIF_ASYNCHRONOUS) ? pIOPacket : NULL); |
| 659 |
| 660 } while (FALSE); |
| 661 |
| 662 if ((status != A_PENDING) && A_FAILED(status) && (request & HIF_ASYNCHRONOUS
)) { |
| 663 if (pIOPacket != NULL) { |
| 664 AR6KFreeIOPacket(pDev,pIOPacket); |
| 665 } |
| 666 pReq->CompletionStatus = status; |
| 667 pReq->CompletionRoutine(pReq); |
| 668 status = A_OK; |
| 669 } |
| 670 |
| 671 return status; |
| 672 } |
| 673 |
| 674 |
| 675 static void DevCleanupVirtualScatterSupport(AR6K_DEVICE *pDev) |
| 676 { |
| 677 HIF_SCATTER_REQ *pReq; |
| 678 |
| 679 while (1) { |
| 680 pReq = DevAllocScatterReq((HIF_DEVICE *)pDev); |
| 681 if (NULL == pReq) { |
| 682 break; |
| 683 } |
| 684 A_FREE(pReq); |
| 685 } |
| 686 |
| 687 } |
| 688 |
| 689 /* function to set up virtual scatter support if HIF layer has not implement
ed the interface */ |
| 690 static A_STATUS DevSetupVirtualScatterSupport(AR6K_DEVICE *pDev) |
| 691 { |
| 692 A_STATUS status = A_OK; |
| 693 int bufferSize, sgreqSize; |
| 694 int i; |
| 695 DEV_SCATTER_DMA_VIRTUAL_INFO *pVirtualInfo; |
| 696 HIF_SCATTER_REQ *pReq; |
| 697 |
| 698 bufferSize = sizeof(DEV_SCATTER_DMA_VIRTUAL_INFO) + |
| 699 2 * (A_GET_CACHE_LINE_BYTES()) + AR6K_MAX_TRANSFER_SIZE_PER_SCAT
TER; |
| 700 |
| 701 sgreqSize = sizeof(HIF_SCATTER_REQ) + |
| 702 (AR6K_SCATTER_ENTRIES_PER_REQ - 1) * (sizeof(HIF_SCATTER_ITE
M)); |
| 703 |
| 704 for (i = 0; i < AR6K_SCATTER_REQS; i++) { |
| 705 /* allocate the scatter request, buffer info and the actual virtual
buffer itself */ |
| 706 pReq = (HIF_SCATTER_REQ *)A_MALLOC(sgreqSize + bufferSize); |
| 707 |
| 708 if (NULL == pReq) { |
| 709 status = A_NO_MEMORY; |
| 710 break; |
| 711 } |
| 712 |
| 713 A_MEMZERO(pReq, sgreqSize); |
| 714 |
| 715 /* the virtual DMA starts after the scatter request struct */ |
| 716 pVirtualInfo = (DEV_SCATTER_DMA_VIRTUAL_INFO *)((A_UINT8 *)pReq + sgreqS
ize); |
| 717 A_MEMZERO(pVirtualInfo, sizeof(DEV_SCATTER_DMA_VIRTUAL_INFO)); |
| 718 |
| 719 pVirtualInfo->pVirtDmaBuffer = &pVirtualInfo->DataArea[0]; |
| 720 /* align buffer to cache line in case host controller can actually D
MA this */ |
| 721 pVirtualInfo->pVirtDmaBuffer = A_ALIGN_TO_CACHE_LINE(pVirtualInfo->pVirt
DmaBuffer); |
| 722 /* store the structure in the private area */ |
| 723 pReq->HIFPrivate[0] = pVirtualInfo; |
| 724 /* we emulate a DMA bounce interface */ |
| 725 pReq->ScatterMethod = HIF_SCATTER_DMA_BOUNCE; |
| 726 pReq->pScatterBounceBuffer = pVirtualInfo->pVirtDmaBuffer; |
| 727 /* free request to the list */ |
| 728 DevFreeScatterReq((HIF_DEVICE *)pDev,pReq); |
| 729 } |
| 730 |
| 731 if (A_FAILED(status)) { |
| 732 DevCleanupVirtualScatterSupport(pDev); |
| 733 } else { |
| 734 pDev->HifScatterInfo.pAllocateReqFunc = DevAllocScatterReq; |
| 735 pDev->HifScatterInfo.pFreeReqFunc = DevFreeScatterReq; |
| 736 pDev->HifScatterInfo.pReadWriteScatterFunc = DevReadWriteScatter; |
| 737 pDev->HifScatterInfo.MaxScatterEntries = AR6K_SCATTER_ENTRIES_PER_REQ; |
| 738 pDev->HifScatterInfo.MaxTransferSizePerScatterReq = AR6K_MAX_TRANSFER_SI
ZE_PER_SCATTER; |
| 739 pDev->ScatterIsVirtual = TRUE; |
| 740 } |
| 741 |
| 742 return status; |
| 743 } |
| 744 |
| 745 |
| 746 A_STATUS DevSetupMsgBundling(AR6K_DEVICE *pDev, int MaxMsgsPerTransfer) |
| 747 { |
| 748 A_STATUS status; |
| 749 |
| 750 status = HIFConfigureDevice(pDev->HIFDevice, |
| 751 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT, |
| 752 &pDev->HifScatterInfo, |
| 753 sizeof(pDev->HifScatterInfo)); |
| 754 |
| 755 if (A_FAILED(status)) { |
| 756 AR_DEBUG_PRINTF(ATH_DEBUG_WARN, |
| 757 ("AR6K: ** HIF layer does not support scatter requests (%d) \n",stat
us)); |
| 758 |
| 759 /* we can try to use a virtual DMA scatter mechanism using legacy HI
FReadWrite() */ |
| 760 status = DevSetupVirtualScatterSupport(pDev); |
| 761 |
| 762 if (A_SUCCESS(status)) { |
| 763 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 764 ("AR6K: virtual scatter transfers enabled (max scatter items:%d:
maxlen:%d) \n", |
| 765 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(
pDev))); |
| 766 } |
| 767 |
| 768 } else { |
| 769 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 770 ("AR6K: HIF layer supports scatter requests (max scatter items:%d: m
axlen:%d) \n", |
| 771 DEV_GET_MAX_MSG_PER_BUNDLE(pDev), DEV_GET_MAX_BUNDLE_LENGTH(
pDev))); |
| 772 } |
| 773 |
| 774 if (A_SUCCESS(status)) { |
| 775 /* for the recv path, the maximum number of bytes per recv bundle is
just limited |
| 776 * by the maximum transfer size at the HIF layer */ |
| 777 pDev->MaxRecvBundleSize = pDev->HifScatterInfo.MaxTransferSizePerScatter
Req; |
| 778 |
| 779 /* for the send path, the max transfer size is limited by the existe
nce and size of |
| 780 * the extended mailbox address range */ |
| 781 if (pDev->MailBoxInfo.MboxProp[0].ExtendedAddress != 0) { |
| 782 pDev->MaxSendBundleSize = pDev->MailBoxInfo.MboxProp[0].ExtendedSize
; |
| 783 } else { |
| 784 /* legacy */ |
| 785 pDev->MaxSendBundleSize = AR6K_LEGACY_MAX_WRITE_LENGTH; |
| 786 } |
| 787 |
| 788 if (pDev->MaxSendBundleSize > pDev->HifScatterInfo.MaxTransferSizePerSca
tterReq) { |
| 789 /* limit send bundle size to what the HIF can support for scatte
r requests */ |
| 790 pDev->MaxSendBundleSize = pDev->HifScatterInfo.MaxTransferSizePerSca
tterReq; |
| 791 } |
| 792 |
| 793 AR_DEBUG_PRINTF(ATH_DEBUG_ANY, |
| 794 ("AR6K: max recv: %d max send: %d \n", |
| 795 DEV_GET_MAX_BUNDLE_RECV_LENGTH(pDev), DEV_GET_MAX_BUNDLE_SEN
D_LENGTH(pDev))); |
| 796 |
| 797 } |
| 798 return status; |
| 799 } |
| 800 |
| 801 A_STATUS DevSubmitScatterRequest(AR6K_DEVICE *pDev, HIF_SCATTER_REQ *pScatterReq
, A_BOOL Read, A_BOOL Async) |
| 802 { |
| 803 A_STATUS status; |
| 804
|
| 805 if (Read) { |
| 806 /* read operation */ |
| 807 pScatterReq->Request = (Async) ? HIF_RD_ASYNC_BLOCK_FIX : HIF_RD_SYNC_BL
OCK_FIX; |
| 808 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX]; |
| 809 A_ASSERT(pScatterReq->TotalLength <= (A_UINT32)DEV_GET_MAX_BUNDLE_RECV_L
ENGTH(pDev)); |
| 810 } else { |
| 811 A_UINT32 mailboxWidth; |
| 812 |
| 813 /* write operation */ |
| 814 pScatterReq->Request = (Async) ? HIF_WR_ASYNC_BLOCK_INC : HIF_WR_SYNC_BL
OCK_INC; |
| 815 A_ASSERT(pScatterReq->TotalLength <= (A_UINT32)DEV_GET_MAX_BUNDLE_SEND_L
ENGTH(pDev)); |
| 816 if (pScatterReq->TotalLength > AR6K_LEGACY_MAX_WRITE_LENGTH) { |
| 817 /* for large writes use the extended address */ |
| 818 pScatterReq->Address = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].Exten
dedAddress; |
| 819 mailboxWidth = pDev->MailBoxInfo.MboxProp[HTC_MAILBOX].ExtendedSize; |
| 820 } else { |
| 821 pScatterReq->Address = pDev->MailBoxInfo.MboxAddresses[HTC_MAILBOX];
|
| 822 mailboxWidth = AR6K_LEGACY_MAX_WRITE_LENGTH; |
| 823 } |
| 824 |
| 825 if (!pDev->ScatterIsVirtual) { |
| 826 /* we are passing this scatter list down to the HIF layer' scatter r
equest handler, fixup the address |
| 827 * so that the last byte falls on the EOM, we do this for those HIFs
that support the |
| 828 * scatter API */ |
| 829 pScatterReq->Address += (mailboxWidth - pScatterReq->TotalLength);
|
| 830 } |
| 831 |
| 832 } |
| 833 |
| 834 AR_DEBUG_PRINTF(ATH_DEBUG_RECV | ATH_DEBUG_SEND, |
| 835 ("DevSubmitScatterRequest, Entries: %d, Total Length: %d Mbox:0x
%X (mode: %s : %s)\n", |
| 836 pScatterReq->ValidScatterEntries, |
| 837 pScatterReq->TotalLength, |
| 838 pScatterReq->Address, |
| 839 Async ? "ASYNC" : "SYNC", |
| 840 (Read) ? "RD" : "WR")); |
| 841 |
| 842 status = DEV_PREPARE_SCATTER_OPERATION(pScatterReq); |
| 843 |
| 844 if (A_FAILED(status)) { |
| 845 if (Async) { |
| 846 pScatterReq->CompletionStatus = status; |
| 847 pScatterReq->CompletionRoutine(pScatterReq); |
| 848 return A_OK; |
| 849 } |
| 850 return status; |
| 851 } |
| 852 |
| 853 status = pDev->HifScatterInfo.pReadWriteScatterFunc(pDev->ScatterIsVirtual ?
pDev : pDev->HIFDevice, |
| 854 pScatterReq); |
| 855 if (!Async) { |
| 856 /* in sync mode, we can touch the scatter request */ |
| 857 pScatterReq->CompletionStatus = status; |
| 858 DEV_FINISH_SCATTER_OPERATION(pScatterReq); |
| 859 } else { |
| 860 if (status == A_PENDING) { |
| 861 status = A_OK; |
| 862 } |
| 863 } |
| 864 |
| 865 return status; |
| 866 } |
| 867 |
| 868 |
| 869 #ifdef MBOXHW_UNIT_TEST |
| 870 |
| 871 |
| 872 /* This is a mailbox hardware unit test that must be called in a schedulable con
text |
| 873 * This test is very simple, it will send a list of buffers with a counting patt
ern |
| 874 * and the target will invert the data and send the message back |
| 875 * |
| 876 * the unit test has the following constraints: |
| 877 * |
| 878 * The target has at least 8 buffers of 256 bytes each. The host will send |
| 879 * the following pattern of buffers in rapid succession : |
| 880 * |
| 881 * 1 buffer - 128 bytes |
| 882 * 1 buffer - 256 bytes |
| 883 * 1 buffer - 512 bytes |
| 884 * 1 buffer - 1024 bytes |
| 885 * |
| 886 * The host will send the buffers to one mailbox and wait for buffers to be refl
ected |
| 887 * back from the same mailbox. The target sends the buffers FIFO order. |
| 888 * Once the final buffer has been received for a mailbox, the next mailbox is te
sted. |
| 889 * |
| 890 * |
| 891 * Note: To simplifythe test , we assume that the chosen buffer sizes |
| 892 * will fall on a nice block pad |
| 893 * |
| 894 * It is expected that higher-order tests will be written to stress the mailboxe
s using |
| 895 * a message-based protocol (with some performance timming) that can create more |
| 896 * randomness in the packets sent over mailboxes. |
| 897 * |
| 898 * */ |
| 899 |
| 900 #define A_ROUND_UP_PWR2(x, align) (((int) (x) + ((align)-1)) & ~((align)-1)) |
| 901 |
| 902 #define BUFFER_BLOCK_PAD 128 |
| 903 |
| 904 #if 0 |
| 905 #define BUFFER1 128 |
| 906 #define BUFFER2 256 |
| 907 #define BUFFER3 512 |
| 908 #define BUFFER4 1024 |
| 909 #endif |
| 910 |
| 911 #if 1 |
| 912 #define BUFFER1 80 |
| 913 #define BUFFER2 200 |
| 914 #define BUFFER3 444 |
| 915 #define BUFFER4 800 |
| 916 #endif |
| 917 |
| 918 #define TOTAL_BYTES (A_ROUND_UP_PWR2(BUFFER1,BUFFER_BLOCK_PAD) + \ |
| 919 A_ROUND_UP_PWR2(BUFFER2,BUFFER_BLOCK_PAD) + \ |
| 920 A_ROUND_UP_PWR2(BUFFER3,BUFFER_BLOCK_PAD) + \ |
| 921 A_ROUND_UP_PWR2(BUFFER4,BUFFER_BLOCK_PAD) ) |
| 922 |
| 923 #define TEST_BYTES (BUFFER1 + BUFFER2 + BUFFER3 + BUFFER4) |
| 924 |
| 925 #define TEST_CREDITS_RECV_TIMEOUT 100 |
| 926 |
| 927 static A_UINT8 g_Buffer[TOTAL_BYTES]; |
| 928 static A_UINT32 g_MailboxAddrs[AR6K_MAILBOXES]; |
| 929 static A_UINT32 g_BlockSizes[AR6K_MAILBOXES]; |
| 930 |
| 931 #define BUFFER_PROC_LIST_DEPTH 4 |
| 932 |
| 933 typedef struct _BUFFER_PROC_LIST{ |
| 934 A_UINT8 *pBuffer; |
| 935 A_UINT32 length; |
| 936 }BUFFER_PROC_LIST; |
| 937 |
| 938 |
| 939 #define PUSH_BUFF_PROC_ENTRY(pList,len,pCurrpos) \ |
| 940 { \ |
| 941 (pList)->pBuffer = (pCurrpos); \ |
| 942 (pList)->length = (len); \ |
| 943 (pCurrpos) += (len); \ |
| 944 (pList)++; \ |
| 945 } |
| 946 |
| 947 /* a simple and crude way to send different "message" sizes */ |
| 948 static void AssembleBufferList(BUFFER_PROC_LIST *pList) |
| 949 { |
| 950 A_UINT8 *pBuffer = g_Buffer; |
| 951 |
| 952 #if BUFFER_PROC_LIST_DEPTH < 4 |
| 953 #error "Buffer processing list depth is not deep enough!!" |
| 954 #endif |
| 955 |
| 956 PUSH_BUFF_PROC_ENTRY(pList,BUFFER1,pBuffer); |
| 957 PUSH_BUFF_PROC_ENTRY(pList,BUFFER2,pBuffer); |
| 958 PUSH_BUFF_PROC_ENTRY(pList,BUFFER3,pBuffer); |
| 959 PUSH_BUFF_PROC_ENTRY(pList,BUFFER4,pBuffer); |
| 960 |
| 961 } |
| 962 |
| 963 #define FILL_ZERO TRUE |
| 964 #define FILL_COUNTING FALSE |
| 965 static void InitBuffers(A_BOOL Zero) |
| 966 { |
| 967 A_UINT16 *pBuffer16 = (A_UINT16 *)g_Buffer; |
| 968 int i; |
| 969 |
| 970 /* fill buffer with 16 bit counting pattern or zeros */ |
| 971 for (i = 0; i < (TOTAL_BYTES / 2) ; i++) { |
| 972 if (!Zero) { |
| 973 pBuffer16[i] = (A_UINT16)i; |
| 974 } else { |
| 975 pBuffer16[i] = 0; |
| 976 } |
| 977 } |
| 978 } |
| 979 |
| 980 |
| 981 static A_BOOL CheckOneBuffer(A_UINT16 *pBuffer16, int Length) |
| 982 { |
| 983 int i; |
| 984 A_UINT16 startCount; |
| 985 A_BOOL success = TRUE; |
| 986 |
| 987 /* get the starting count */ |
| 988 startCount = pBuffer16[0]; |
| 989 /* invert it, this is the expected value */ |
| 990 startCount = ~startCount; |
| 991 /* scan the buffer and verify */ |
| 992 for (i = 0; i < (Length / 2) ; i++,startCount++) { |
| 993 /* target will invert all the data */ |
| 994 if ((A_UINT16)pBuffer16[i] != (A_UINT16)~startCount) { |
| 995 success = FALSE; |
| 996 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid Data Got:0x%X, Expecting:0x
%X (offset:%d, total:%d) \n", |
| 997 pBuffer16[i], ((A_UINT16)~startCount), i, Length)); |
| 998 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("0x%X 0x%X 0x%X 0x%X \n", |
| 999 pBuffer16[i], pBuffer16[i + 1], pBuffer16[i + 2],pBuffer
16[i+3])); |
| 1000 break; |
| 1001 } |
| 1002 } |
| 1003 |
| 1004 return success; |
| 1005 } |
| 1006 |
| 1007 static A_BOOL CheckBuffers(void) |
| 1008 { |
| 1009 int i; |
| 1010 A_BOOL success = TRUE; |
| 1011 BUFFER_PROC_LIST checkList[BUFFER_PROC_LIST_DEPTH]; |
| 1012 |
| 1013 /* assemble the list */ |
| 1014 AssembleBufferList(checkList); |
| 1015 |
| 1016 /* scan the buffers and verify */ |
| 1017 for (i = 0; i < BUFFER_PROC_LIST_DEPTH ; i++) { |
| 1018 success = CheckOneBuffer((A_UINT16 *)checkList[i].pBuffer, checkList[i].
length); |
| 1019 if (!success) { |
| 1020 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer : 0x%X, Length:%d failed ver
ify \n", |
| 1021 (A_UINT32)checkList[i].pBuffer, checkList[i].length)); |
| 1022 break; |
| 1023 } |
| 1024 } |
| 1025 |
| 1026 return success; |
| 1027 } |
| 1028 |
| 1029 /* find the end marker for the last buffer we will be sending */ |
| 1030 static A_UINT16 GetEndMarker(void) |
| 1031 { |
| 1032 A_UINT8 *pBuffer; |
| 1033 BUFFER_PROC_LIST checkList[BUFFER_PROC_LIST_DEPTH]; |
| 1034 |
| 1035 /* fill up buffers with the normal counting pattern */ |
| 1036 InitBuffers(FILL_COUNTING); |
| 1037 |
| 1038 /* assemble the list we will be sending down */ |
| 1039 AssembleBufferList(checkList); |
| 1040 /* point to the last 2 bytes of the last buffer */ |
| 1041 pBuffer = &(checkList[BUFFER_PROC_LIST_DEPTH - 1].pBuffer[(checkList[BUFFER_
PROC_LIST_DEPTH - 1].length) - 2]); |
| 1042 |
| 1043 /* the last count in the last buffer is the marker */ |
| 1044 return (A_UINT16)pBuffer[0] | ((A_UINT16)pBuffer[1] << 8); |
| 1045 } |
| 1046 |
| 1047 #define ATH_PRINT_OUT_ZONE ATH_DEBUG_ERR |
| 1048 |
| 1049 /* send the ordered buffers to the target */ |
| 1050 static A_STATUS SendBuffers(AR6K_DEVICE *pDev, int mbox) |
| 1051 { |
| 1052 A_STATUS status = A_OK; |
| 1053 A_UINT32 request = HIF_WR_SYNC_BLOCK_INC; |
| 1054 BUFFER_PROC_LIST sendList[BUFFER_PROC_LIST_DEPTH]; |
| 1055 int i; |
| 1056 int totalBytes = 0; |
| 1057 int paddedLength; |
| 1058 int totalwPadding = 0; |
| 1059 |
| 1060 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sending buffers on mailbox : %d \n",mb
ox)); |
| 1061 |
| 1062 /* fill buffer with counting pattern */ |
| 1063 InitBuffers(FILL_COUNTING); |
| 1064 |
| 1065 /* assemble the order in which we send */ |
| 1066 AssembleBufferList(sendList); |
| 1067 |
| 1068 for (i = 0; i < BUFFER_PROC_LIST_DEPTH; i++) { |
| 1069 |
| 1070 /* we are doing block transfers, so we need to pad everything to a b
lock size */ |
| 1071 paddedLength = (sendList[i].length + (g_BlockSizes[mbox] - 1)) & |
| 1072 (~(g_BlockSizes[mbox] - 1)); |
| 1073 |
| 1074 /* send each buffer synchronously */ |
| 1075 status = HIFReadWrite(pDev->HIFDevice, |
| 1076 g_MailboxAddrs[mbox], |
| 1077 sendList[i].pBuffer, |
| 1078 paddedLength, |
| 1079 request, |
| 1080 NULL); |
| 1081 if (status != A_OK) { |
| 1082 break; |
| 1083 } |
| 1084 totalBytes += sendList[i].length; |
| 1085 totalwPadding += paddedLength; |
| 1086 } |
| 1087 |
| 1088 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Sent %d bytes (%d padded bytes) to mai
lbox : %d \n",totalBytes,totalwPadding,mbox)); |
| 1089 |
| 1090 return status; |
| 1091 } |
| 1092 |
| 1093 /* poll the mailbox credit counter until we get a credit or timeout */ |
| 1094 static A_STATUS GetCredits(AR6K_DEVICE *pDev, int mbox, int *pCredits) |
| 1095 { |
| 1096 A_STATUS status = A_OK; |
| 1097 int timeout = TEST_CREDITS_RECV_TIMEOUT; |
| 1098 A_UINT8 credits = 0; |
| 1099 A_UINT32 address; |
| 1100 |
| 1101 while (TRUE) { |
| 1102 |
| 1103 /* Read the counter register to get credits, this auto-decrements *
/ |
| 1104 address = COUNT_DEC_ADDRESS + (AR6K_MAILBOXES + mbox) * 4; |
| 1105 status = HIFReadWrite(pDev->HIFDevice, address, &credits, sizeof(credits
), |
| 1106 HIF_RD_SYNC_BYTE_FIX, NULL); |
| 1107 if (status != A_OK) { |
| 1108 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, |
| 1109 ("Unable to decrement the command credit count register (mbox=%d
)\n",mbox)); |
| 1110 status = A_ERROR; |
| 1111 break; |
| 1112 } |
| 1113 |
| 1114 if (credits) { |
| 1115 break; |
| 1116 } |
| 1117 |
| 1118 timeout--; |
| 1119 |
| 1120 if (timeout <= 0) { |
| 1121 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, |
| 1122 (" Timeout reading credit registers (mbox=%d, address:0x%X) \n",
mbox,address)); |
| 1123 status = A_ERROR; |
| 1124 break; |
| 1125 } |
| 1126 |
| 1127 /* delay a little, target may not be ready */ |
| 1128 A_MDELAY(1000); |
| 1129 |
| 1130 } |
| 1131 |
| 1132 if (status == A_OK) { |
| 1133 *pCredits = credits; |
| 1134 } |
| 1135 |
| 1136 return status; |
| 1137 } |
| 1138 |
| 1139 |
| 1140 /* wait for the buffers to come back */ |
| 1141 static A_STATUS RecvBuffers(AR6K_DEVICE *pDev, int mbox) |
| 1142 { |
| 1143 A_STATUS status = A_OK; |
| 1144 A_UINT32 request = HIF_RD_SYNC_BLOCK_INC; |
| 1145 BUFFER_PROC_LIST recvList[BUFFER_PROC_LIST_DEPTH]; |
| 1146 int curBuffer; |
| 1147 int credits; |
| 1148 int i; |
| 1149 int totalBytes = 0; |
| 1150 int paddedLength; |
| 1151 int totalwPadding = 0; |
| 1152 |
| 1153 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for buffers on mailbox : %d \n
",mbox)); |
| 1154 |
| 1155 /* zero the buffers */ |
| 1156 InitBuffers(FILL_ZERO); |
| 1157 |
| 1158 /* assemble the order in which we should receive */ |
| 1159 AssembleBufferList(recvList); |
| 1160 |
| 1161 curBuffer = 0; |
| 1162 |
| 1163 while (curBuffer < BUFFER_PROC_LIST_DEPTH) { |
| 1164 |
| 1165 /* get number of buffers that have been completed, this blocks |
| 1166 * until we get at least 1 credit or it times out */ |
| 1167 status = GetCredits(pDev, mbox, &credits); |
| 1168 |
| 1169 if (status != A_OK) { |
| 1170 break; |
| 1171 } |
| 1172 |
| 1173 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got %d messages on mailbox : %d \n
",credits, mbox)); |
| 1174 |
| 1175 /* get all the buffers that are sitting on the queue */ |
| 1176 for (i = 0; i < credits; i++) { |
| 1177 A_ASSERT(curBuffer < BUFFER_PROC_LIST_DEPTH); |
| 1178 /* recv the current buffer synchronously, the buffers should com
e back in |
| 1179 * order... with padding applied by the target */ |
| 1180 paddedLength = (recvList[curBuffer].length + (g_BlockSizes[mbox] - 1
)) & |
| 1181 (~(g_BlockSizes[mbox] - 1)); |
| 1182 |
| 1183 status = HIFReadWrite(pDev->HIFDevice, |
| 1184 g_MailboxAddrs[mbox], |
| 1185 recvList[curBuffer].pBuffer, |
| 1186 paddedLength, |
| 1187 request, |
| 1188 NULL); |
| 1189 if (status != A_OK) { |
| 1190 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to read %d bytes on mail
box:%d : address:0x%X \n", |
| 1191 recvList[curBuffer].length, mbox, g_MailboxAddrs[mbox]))
; |
| 1192 break; |
| 1193 } |
| 1194 |
| 1195 totalwPadding += paddedLength; |
| 1196 totalBytes += recvList[curBuffer].length; |
| 1197 curBuffer++; |
| 1198 } |
| 1199 |
| 1200 if (status != A_OK) { |
| 1201 break; |
| 1202 } |
| 1203 /* go back and get some more */ |
| 1204 credits = 0; |
| 1205 } |
| 1206 |
| 1207 if (totalBytes != TEST_BYTES) { |
| 1208 A_ASSERT(FALSE); |
| 1209 } else { |
| 1210 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Got all buffers on mbox:%d total r
ecv :%d (w/Padding : %d) \n", |
| 1211 mbox, totalBytes, totalwPadding)); |
| 1212 } |
| 1213 |
| 1214 return status; |
| 1215 |
| 1216 |
| 1217 } |
| 1218 |
| 1219 static A_STATUS DoOneMboxHWTest(AR6K_DEVICE *pDev, int mbox) |
| 1220 { |
| 1221 A_STATUS status; |
| 1222 |
| 1223 do { |
| 1224 /* send out buffers */ |
| 1225 status = SendBuffers(pDev,mbox); |
| 1226 |
| 1227 if (status != A_OK) { |
| 1228 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Sending buffers Failed : %d mbox:%d
\n",status,mbox)); |
| 1229 break; |
| 1230 } |
| 1231 |
| 1232 /* go get them, this will block */ |
| 1233 status = RecvBuffers(pDev, mbox); |
| 1234 |
| 1235 if (status != A_OK) { |
| 1236 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Recv buffers Failed : %d mbox:%d\n"
,status,mbox)); |
| 1237 break; |
| 1238 } |
| 1239 |
| 1240 /* check the returned data patterns */ |
| 1241 if (!CheckBuffers()) { |
| 1242 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Buffer Verify Failed : mbox:%d\n",m
box)); |
| 1243 status = A_ERROR; |
| 1244 break; |
| 1245 } |
| 1246 |
| 1247 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" Send/Recv success! mailbox : %d \
n",mbox)); |
| 1248 |
| 1249 } while (FALSE); |
| 1250 |
| 1251 return status; |
| 1252 } |
| 1253 |
| 1254 /* here is where the test starts */ |
| 1255 A_STATUS DoMboxHWTest(AR6K_DEVICE *pDev) |
| 1256 { |
| 1257 int i; |
| 1258 A_STATUS status; |
| 1259 int credits = 0; |
| 1260 A_UINT8 params[4]; |
| 1261 int numBufs; |
| 1262 int bufferSize; |
| 1263 A_UINT16 temp; |
| 1264 |
| 1265 |
| 1266 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest START - \n")); |
| 1267 |
| 1268 do { |
| 1269 /* get the addresses for all 4 mailboxes */ |
| 1270 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_ADDR, |
| 1271 g_MailboxAddrs, sizeof(g_MailboxAddrs)); |
| 1272 |
| 1273 if (status != A_OK) { |
| 1274 A_ASSERT(FALSE); |
| 1275 break; |
| 1276 } |
| 1277 |
| 1278 /* get the block sizes */ |
| 1279 status = HIFConfigureDevice(pDev->HIFDevice, HIF_DEVICE_GET_MBOX_BLOCK_S
IZE, |
| 1280 g_BlockSizes, sizeof(g_BlockSizes)); |
| 1281 |
| 1282 if (status != A_OK) { |
| 1283 A_ASSERT(FALSE); |
| 1284 break; |
| 1285 } |
| 1286 |
| 1287 /* note, the HIF layer usually reports mbox 0 to have a block size o
f |
| 1288 * 1, but our test wants to run in block-mode for all mailboxes, so
we treat all mailboxes |
| 1289 * the same. */ |
| 1290 g_BlockSizes[0] = g_BlockSizes[1]; |
| 1291 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Block Size to use: %d \n",g_BlockS
izes[0])); |
| 1292 |
| 1293 if (g_BlockSizes[1] > BUFFER_BLOCK_PAD) { |
| 1294 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("%d Block size is too large for
buffer pad %d\n", |
| 1295 g_BlockSizes[1], BUFFER_BLOCK_PAD)); |
| 1296 break; |
| 1297 } |
| 1298 |
| 1299 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Waiting for target.... \n")); |
| 1300 |
| 1301 /* the target lets us know it is ready by giving us 1 credit on |
| 1302 * mailbox 0 */ |
| 1303 status = GetCredits(pDev, 0, &credits); |
| 1304 |
| 1305 if (status != A_OK) { |
| 1306 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait for target ready \n"
)); |
| 1307 break; |
| 1308 } |
| 1309 |
| 1310 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Target is ready ...\n")); |
| 1311 |
| 1312 /* read the first 4 scratch registers */ |
| 1313 status = HIFReadWrite(pDev->HIFDevice, |
| 1314 SCRATCH_ADDRESS, |
| 1315 params, |
| 1316 4, |
| 1317 HIF_RD_SYNC_BYTE_INC, |
| 1318 NULL); |
| 1319 |
| 1320 if (status != A_OK) { |
| 1321 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to wait get parameters \n"))
; |
| 1322 break; |
| 1323 } |
| 1324 |
| 1325 numBufs = params[0]; |
| 1326 bufferSize = (int)(((A_UINT16)params[2] << 8) | (A_UINT16)params[1]); |
| 1327 |
| 1328 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, |
| 1329 ("Target parameters: bufs per mailbox:%d, buffer size:%d bytes (tota
l space: %d, minimum required space (w/padding): %d) \n", |
| 1330 numBufs, bufferSize, (numBufs * bufferSize), TOTAL_BYTES)); |
| 1331 |
| 1332 if ((numBufs * bufferSize) < TOTAL_BYTES) { |
| 1333 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Not Enough buffer space to run test
! need:%d, got:%d \n", |
| 1334 TOTAL_BYTES, (numBufs*bufferSize))); |
| 1335 status = A_ERROR; |
| 1336 break; |
| 1337 } |
| 1338 |
| 1339 temp = GetEndMarker(); |
| 1340 |
| 1341 status = HIFReadWrite(pDev->HIFDevice, |
| 1342 SCRATCH_ADDRESS + 4, |
| 1343 (A_UINT8 *)&temp, |
| 1344 2, |
| 1345 HIF_WR_SYNC_BYTE_INC, |
| 1346 NULL); |
| 1347 |
| 1348 if (status != A_OK) { |
| 1349 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write end marker \n")); |
| 1350 break; |
| 1351 } |
| 1352 |
| 1353 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("End Marker: 0x%X \n",temp)); |
| 1354 |
| 1355 temp = (A_UINT16)g_BlockSizes[1]; |
| 1356 /* convert to a mask */ |
| 1357 temp = temp - 1; |
| 1358 status = HIFReadWrite(pDev->HIFDevice, |
| 1359 SCRATCH_ADDRESS + 6, |
| 1360 (A_UINT8 *)&temp, |
| 1361 2, |
| 1362 HIF_WR_SYNC_BYTE_INC, |
| 1363 NULL); |
| 1364 |
| 1365 if (status != A_OK) { |
| 1366 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to write block mask \n")); |
| 1367 break; |
| 1368 } |
| 1369 |
| 1370 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, ("Set Block Mask: 0x%X \n",temp)); |
| 1371 |
| 1372 /* execute the test on each mailbox */ |
| 1373 for (i = 0; i < AR6K_MAILBOXES; i++) { |
| 1374 status = DoOneMboxHWTest(pDev, i); |
| 1375 if (status != A_OK) { |
| 1376 break; |
| 1377 } |
| 1378 } |
| 1379 |
| 1380 } while (FALSE); |
| 1381 |
| 1382 if (status == A_OK) { |
| 1383 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - SUCCESS! - \
n")); |
| 1384 } else { |
| 1385 AR_DEBUG_PRINTF(ATH_PRINT_OUT_ZONE, (" DoMboxHWTest DONE - FAILED! - \n
")); |
| 1386 } |
| 1387 /* don't let HTC_Start continue, the target is actually not running any
HTC code */ |
| 1388 return A_ERROR; |
| 1389 } |
| 1390 #endif |
| 1391 |
| 1392 |
| 1393 |
OLD | NEW |