OLD | NEW |
(Empty) | |
| 1 /* qcusbnet.c - gobi network device |
| 2 * Copyright (c) 2010, Code Aurora Forum. All rights reserved. |
| 3 |
| 4 * This program is free software; you can redistribute it and/or modify |
| 5 * it under the terms of the GNU General Public License version 2 and |
| 6 * only version 2 as published by the Free Software Foundation. |
| 7 |
| 8 * This program is distributed in the hope that it will be useful, |
| 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 * GNU General Public License for more details. |
| 12 |
| 13 * You should have received a copy of the GNU General Public License |
| 14 * along with this program; if not, write to the Free Software |
| 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
| 16 * 02110-1301, USA. |
| 17 */ |
| 18 |
| 19 #include "structs.h" |
| 20 #include "qmidevice.h" |
| 21 #include "qmi.h" |
| 22 #include "qcusbnet.h" |
| 23 |
| 24 #define DRIVER_VERSION "1.0.110+google" |
| 25 #define DRIVER_AUTHOR "Qualcomm Innovation Center" |
| 26 #define DRIVER_DESC "gobi" |
| 27 |
| 28 static LIST_HEAD(qcusbnet_list); |
| 29 static DEFINE_MUTEX(qcusbnet_lock); |
| 30 |
| 31 int qcusbnet_debug; |
| 32 static struct class *devclass; |
| 33 |
| 34 static void free_dev(struct kref *ref) |
| 35 { |
| 36 struct qcusbnet *dev = container_of(ref, struct qcusbnet, refcount); |
| 37 list_del(&dev->node); |
| 38 kfree(dev); |
| 39 } |
| 40 |
| 41 void qcusbnet_put(struct qcusbnet *dev) |
| 42 { |
| 43 mutex_lock(&qcusbnet_lock); |
| 44 kref_put(&dev->refcount, free_dev); |
| 45 mutex_unlock(&qcusbnet_lock); |
| 46 } |
| 47 |
| 48 struct qcusbnet *qcusbnet_get(struct qcusbnet *key) |
| 49 { |
| 50 /* Given a putative qcusbnet struct, return either the struct itself |
| 51 * (with a ref taken) if the struct is still visible, or NULL if it's |
| 52 * not. This prevents object-visibility races where someone is looking |
| 53 * up an object as the last ref gets dropped; dropping the last ref and |
| 54 * removing the object from the list are atomic with respect to getting |
| 55 * a new ref. */ |
| 56 struct qcusbnet *entry; |
| 57 mutex_lock(&qcusbnet_lock); |
| 58 list_for_each_entry(entry, &qcusbnet_list, node) { |
| 59 if (entry == key) { |
| 60 kref_get(&entry->refcount); |
| 61 mutex_unlock(&qcusbnet_lock); |
| 62 return entry; |
| 63 } |
| 64 } |
| 65 mutex_unlock(&qcusbnet_lock); |
| 66 return NULL; |
| 67 } |
| 68 |
| 69 int qc_suspend(struct usb_interface *iface, pm_message_t event) |
| 70 { |
| 71 struct usbnet *usbnet; |
| 72 struct qcusbnet *dev; |
| 73 |
| 74 if (!iface) |
| 75 return -ENOMEM; |
| 76 |
| 77 usbnet = usb_get_intfdata(iface); |
| 78 |
| 79 if (!usbnet || !usbnet->net) { |
| 80 DBG("failed to get netdevice\n"); |
| 81 return -ENXIO; |
| 82 } |
| 83 |
| 84 dev = (struct qcusbnet *)usbnet->data[0]; |
| 85 if (!dev) { |
| 86 DBG("failed to get QMIDevice\n"); |
| 87 return -ENXIO; |
| 88 } |
| 89 |
| 90 if (!(event.event & PM_EVENT_AUTO)) { |
| 91 DBG("device suspended to power level %d\n", |
| 92 event.event); |
| 93 qc_setdown(dev, DOWN_DRIVER_SUSPENDED); |
| 94 } else { |
| 95 DBG("device autosuspend\n"); |
| 96 } |
| 97 |
| 98 if (event.event & PM_EVENT_SUSPEND) { |
| 99 qc_stopread(dev); |
| 100 usbnet->udev->reset_resume = 0; |
| 101 iface->dev.power.power_state.event = event.event; |
| 102 } else { |
| 103 usbnet->udev->reset_resume = 1; |
| 104 } |
| 105 |
| 106 return usbnet_suspend(iface, event); |
| 107 } |
| 108 |
| 109 static int qc_resume(struct usb_interface *iface) |
| 110 { |
| 111 struct usbnet *usbnet; |
| 112 struct qcusbnet *dev; |
| 113 int ret; |
| 114 int oldstate; |
| 115 |
| 116 if (iface == 0) |
| 117 return -ENOMEM; |
| 118 |
| 119 usbnet = usb_get_intfdata(iface); |
| 120 |
| 121 if (!usbnet || !usbnet->net) { |
| 122 DBG("failed to get netdevice\n"); |
| 123 return -ENXIO; |
| 124 } |
| 125 |
| 126 dev = (struct qcusbnet *)usbnet->data[0]; |
| 127 if (!dev) { |
| 128 DBG("failed to get QMIDevice\n"); |
| 129 return -ENXIO; |
| 130 } |
| 131 |
| 132 oldstate = iface->dev.power.power_state.event; |
| 133 iface->dev.power.power_state.event = PM_EVENT_ON; |
| 134 DBG("resuming from power mode %d\n", oldstate); |
| 135 |
| 136 if (oldstate & PM_EVENT_SUSPEND) { |
| 137 qc_cleardown(dev, DOWN_DRIVER_SUSPENDED); |
| 138 |
| 139 ret = usbnet_resume(iface); |
| 140 if (ret) { |
| 141 DBG("usbnet_resume error %d\n", ret); |
| 142 return ret; |
| 143 } |
| 144 |
| 145 ret = qc_startread(dev); |
| 146 if (ret) { |
| 147 DBG("qc_startread error %d\n", ret); |
| 148 return ret; |
| 149 } |
| 150 |
| 151 complete(&dev->worker.work); |
| 152 } else { |
| 153 DBG("nothing to resume\n"); |
| 154 return 0; |
| 155 } |
| 156 |
| 157 return ret; |
| 158 } |
| 159 |
| 160 static int qcnet_bind(struct usbnet *usbnet, struct usb_interface *iface) |
| 161 { |
| 162 int numends; |
| 163 int i; |
| 164 struct usb_host_endpoint *endpoint = NULL; |
| 165 struct usb_host_endpoint *in = NULL; |
| 166 struct usb_host_endpoint *out = NULL; |
| 167 |
| 168 if (iface->num_altsetting != 1) { |
| 169 DBG("invalid num_altsetting %u\n", iface->num_altsetting); |
| 170 return -EINVAL; |
| 171 } |
| 172 |
| 173 if (iface->cur_altsetting->desc.bInterfaceNumber != 0 |
| 174 && iface->cur_altsetting->desc.bInterfaceNumber != 5) { |
| 175 DBG("invalid interface %d\n", |
| 176 iface->cur_altsetting->desc.bInterfaceNumber); |
| 177 return -EINVAL; |
| 178 } |
| 179 |
| 180 numends = iface->cur_altsetting->desc.bNumEndpoints; |
| 181 for (i = 0; i < numends; i++) { |
| 182 endpoint = iface->cur_altsetting->endpoint + i; |
| 183 if (!endpoint) { |
| 184 DBG("invalid endpoint %u\n", i); |
| 185 return -EINVAL; |
| 186 } |
| 187 |
| 188 if (usb_endpoint_dir_in(&endpoint->desc) |
| 189 && !usb_endpoint_xfer_int(&endpoint->desc)) { |
| 190 in = endpoint; |
| 191 } else if (!usb_endpoint_dir_out(&endpoint->desc)) { |
| 192 out = endpoint; |
| 193 } |
| 194 } |
| 195 |
| 196 if (!in || !out) { |
| 197 DBG("invalid endpoints\n"); |
| 198 return -EINVAL; |
| 199 } |
| 200 |
| 201 if (usb_set_interface(usbnet->udev, |
| 202 iface->cur_altsetting->desc.bInterfaceNumber, 0))
{ |
| 203 DBG("unable to set interface\n"); |
| 204 return -EINVAL; |
| 205 } |
| 206 |
| 207 usbnet->in = usb_rcvbulkpipe(usbnet->udev, in->desc.bEndpointAddress & U
SB_ENDPOINT_NUMBER_MASK); |
| 208 usbnet->out = usb_sndbulkpipe(usbnet->udev, out->desc.bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK); |
| 209 |
| 210 DBG("in %x, out %x\n", |
| 211 in->desc.bEndpointAddress, |
| 212 out->desc.bEndpointAddress); |
| 213 |
| 214 return 0; |
| 215 } |
| 216 |
| 217 static void qcnet_unbind(struct usbnet *usbnet, struct usb_interface *iface) |
| 218 { |
| 219 struct qcusbnet *dev = (struct qcusbnet *)usbnet->data[0]; |
| 220 |
| 221 netif_carrier_off(usbnet->net); |
| 222 qc_deregister(dev); |
| 223 |
| 224 kfree(usbnet->net->netdev_ops); |
| 225 usbnet->net->netdev_ops = NULL; |
| 226 /* drop the list's ref */ |
| 227 qcusbnet_put(dev); |
| 228 } |
| 229 |
| 230 static void qcnet_urbhook(struct urb *urb) |
| 231 { |
| 232 unsigned long flags; |
| 233 struct worker *worker = urb->context; |
| 234 if (!worker) { |
| 235 DBG("bad context\n"); |
| 236 return; |
| 237 } |
| 238 |
| 239 if (urb->status) { |
| 240 DBG("urb finished with error %d\n", urb->status); |
| 241 } |
| 242 |
| 243 spin_lock_irqsave(&worker->active_lock, flags); |
| 244 worker->active = ERR_PTR(-EAGAIN); |
| 245 spin_unlock_irqrestore(&worker->active_lock, flags); |
| 246 /* XXX-fix race against qcnet_stop()? */ |
| 247 complete(&worker->work); |
| 248 usb_free_urb(urb); |
| 249 } |
| 250 |
| 251 static void qcnet_txtimeout(struct net_device *netdev) |
| 252 { |
| 253 struct list_head *node, *tmp; |
| 254 struct qcusbnet *dev; |
| 255 struct worker *worker; |
| 256 struct urbreq *req; |
| 257 unsigned long activeflags, listflags; |
| 258 struct usbnet *usbnet = netdev_priv(netdev); |
| 259 |
| 260 if (!usbnet || !usbnet->net) { |
| 261 DBG("failed to get usbnet device\n"); |
| 262 return; |
| 263 } |
| 264 |
| 265 dev = (struct qcusbnet *)usbnet->data[0]; |
| 266 if (!dev) { |
| 267 DBG("failed to get QMIDevice\n"); |
| 268 return; |
| 269 } |
| 270 worker = &dev->worker; |
| 271 |
| 272 DBG("\n"); |
| 273 |
| 274 spin_lock_irqsave(&worker->active_lock, activeflags); |
| 275 if (worker->active) |
| 276 usb_kill_urb(worker->active); |
| 277 spin_unlock_irqrestore(&worker->active_lock, activeflags); |
| 278 |
| 279 spin_lock_irqsave(&worker->urbs_lock, listflags); |
| 280 list_for_each_safe(node, tmp, &worker->urbs) { |
| 281 req = list_entry(node, struct urbreq, node); |
| 282 usb_free_urb(req->urb); |
| 283 list_del(&req->node); |
| 284 kfree(req); |
| 285 } |
| 286 spin_unlock_irqrestore(&worker->urbs_lock, listflags); |
| 287 |
| 288 complete(&worker->work); |
| 289 } |
| 290 |
| 291 static int qcnet_worker(void *arg) |
| 292 { |
| 293 struct list_head *node, *tmp; |
| 294 unsigned long activeflags, listflags; |
| 295 struct urbreq *req; |
| 296 int status; |
| 297 struct usb_device *usbdev; |
| 298 struct worker *worker = arg; |
| 299 if (!worker) { |
| 300 DBG("passed null pointer\n"); |
| 301 return -EINVAL; |
| 302 } |
| 303 |
| 304 usbdev = interface_to_usbdev(worker->iface); |
| 305 |
| 306 DBG("traffic thread started\n"); |
| 307 |
| 308 while (!kthread_should_stop()) { |
| 309 wait_for_completion_interruptible(&worker->work); |
| 310 |
| 311 if (kthread_should_stop()) { |
| 312 spin_lock_irqsave(&worker->active_lock, activeflags); |
| 313 if (worker->active) { |
| 314 usb_kill_urb(worker->active); |
| 315 } |
| 316 spin_unlock_irqrestore(&worker->active_lock, activeflags
); |
| 317 |
| 318 spin_lock_irqsave(&worker->urbs_lock, listflags); |
| 319 list_for_each_safe(node, tmp, &worker->urbs) { |
| 320 req = list_entry(node, struct urbreq, node); |
| 321 usb_free_urb(req->urb); |
| 322 list_del(&req->node); |
| 323 kfree(req); |
| 324 } |
| 325 spin_unlock_irqrestore(&worker->urbs_lock, listflags); |
| 326 |
| 327 break; |
| 328 } |
| 329 |
| 330 spin_lock_irqsave(&worker->active_lock, activeflags); |
| 331 if (IS_ERR(worker->active) && PTR_ERR(worker->active) == -EAGAIN
) { |
| 332 worker->active = NULL; |
| 333 spin_unlock_irqrestore(&worker->active_lock, activeflags
); |
| 334 usb_autopm_put_interface(worker->iface); |
| 335 spin_lock_irqsave(&worker->active_lock, activeflags); |
| 336 } |
| 337 |
| 338 if (worker->active) { |
| 339 spin_unlock_irqrestore(&worker->active_lock, activeflags
); |
| 340 continue; |
| 341 } |
| 342 |
| 343 spin_lock_irqsave(&worker->urbs_lock, listflags); |
| 344 if (list_empty(&worker->urbs)) { |
| 345 spin_unlock_irqrestore(&worker->urbs_lock, listflags); |
| 346 spin_unlock_irqrestore(&worker->active_lock, activeflags
); |
| 347 continue; |
| 348 } |
| 349 |
| 350 req = list_first_entry(&worker->urbs, struct urbreq, node); |
| 351 list_del(&req->node); |
| 352 spin_unlock_irqrestore(&worker->urbs_lock, listflags); |
| 353 |
| 354 worker->active = req->urb; |
| 355 spin_unlock_irqrestore(&worker->active_lock, activeflags); |
| 356 |
| 357 status = usb_autopm_get_interface(worker->iface); |
| 358 if (status < 0) { |
| 359 DBG("unable to autoresume interface: %d\n", status); |
| 360 if (status == -EPERM) { |
| 361 qc_suspend(worker->iface, PMSG_SUSPEND); |
| 362 } |
| 363 |
| 364 spin_lock_irqsave(&worker->urbs_lock, listflags); |
| 365 list_add(&req->node, &worker->urbs); |
| 366 spin_unlock_irqrestore(&worker->urbs_lock, listflags); |
| 367 |
| 368 spin_lock_irqsave(&worker->active_lock, activeflags); |
| 369 worker->active = NULL; |
| 370 spin_unlock_irqrestore(&worker->active_lock, activeflags
); |
| 371 |
| 372 continue; |
| 373 } |
| 374 |
| 375 status = usb_submit_urb(worker->active, GFP_KERNEL); |
| 376 if (status < 0) { |
| 377 DBG("Failed to submit URB: %d. Packet dropped\n", statu
s); |
| 378 spin_lock_irqsave(&worker->active_lock, activeflags); |
| 379 usb_free_urb(worker->active); |
| 380 worker->active = NULL; |
| 381 spin_unlock_irqrestore(&worker->active_lock, activeflags
); |
| 382 usb_autopm_put_interface(worker->iface); |
| 383 complete(&worker->work); |
| 384 } |
| 385 |
| 386 kfree(req); |
| 387 } |
| 388 |
| 389 DBG("traffic thread exiting\n"); |
| 390 worker->thread = NULL; |
| 391 return 0; |
| 392 } |
| 393 |
| 394 static int qcnet_startxmit(struct sk_buff *skb, struct net_device *netdev) |
| 395 { |
| 396 unsigned long listflags; |
| 397 struct qcusbnet *dev; |
| 398 struct worker *worker; |
| 399 struct urbreq *req; |
| 400 void *data; |
| 401 struct usbnet *usbnet = netdev_priv(netdev); |
| 402 |
| 403 DBG("\n"); |
| 404 |
| 405 if (!usbnet || !usbnet->net) { |
| 406 DBG("failed to get usbnet device\n"); |
| 407 return NETDEV_TX_BUSY; |
| 408 } |
| 409 |
| 410 dev = (struct qcusbnet *)usbnet->data[0]; |
| 411 if (!dev) { |
| 412 DBG("failed to get QMIDevice\n"); |
| 413 return NETDEV_TX_BUSY; |
| 414 } |
| 415 worker = &dev->worker; |
| 416 |
| 417 if (qc_isdown(dev, DOWN_DRIVER_SUSPENDED)) { |
| 418 DBG("device is suspended\n"); |
| 419 dump_stack(); |
| 420 return NETDEV_TX_BUSY; |
| 421 } |
| 422 |
| 423 req = kmalloc(sizeof(*req), GFP_ATOMIC); |
| 424 if (!req) { |
| 425 DBG("unable to allocate URBList memory\n"); |
| 426 return NETDEV_TX_BUSY; |
| 427 } |
| 428 |
| 429 req->urb = usb_alloc_urb(0, GFP_ATOMIC); |
| 430 |
| 431 if (!req->urb) { |
| 432 kfree(req); |
| 433 DBG("unable to allocate URB\n"); |
| 434 return NETDEV_TX_BUSY; |
| 435 } |
| 436 |
| 437 data = kmalloc(skb->len, GFP_ATOMIC); |
| 438 if (!data) { |
| 439 usb_free_urb(req->urb); |
| 440 kfree(req); |
| 441 DBG("unable to allocate URB data\n"); |
| 442 return NETDEV_TX_BUSY; |
| 443 } |
| 444 memcpy(data, skb->data, skb->len); |
| 445 |
| 446 usb_fill_bulk_urb(req->urb, dev->usbnet->udev, dev->usbnet->out, |
| 447 data, skb->len, qcnet_urbhook, worker); |
| 448 |
| 449 spin_lock_irqsave(&worker->urbs_lock, listflags); |
| 450 list_add_tail(&req->node, &worker->urbs); |
| 451 spin_unlock_irqrestore(&worker->urbs_lock, listflags); |
| 452 |
| 453 complete(&worker->work); |
| 454 |
| 455 netdev->trans_start = jiffies; |
| 456 dev_kfree_skb_any(skb); |
| 457 |
| 458 return NETDEV_TX_OK; |
| 459 } |
| 460 |
| 461 static int qcnet_open(struct net_device *netdev) |
| 462 { |
| 463 int status = 0; |
| 464 struct qcusbnet *dev; |
| 465 struct usbnet *usbnet = netdev_priv(netdev); |
| 466 |
| 467 if (!usbnet) { |
| 468 DBG("failed to get usbnet device\n"); |
| 469 return -ENXIO; |
| 470 } |
| 471 |
| 472 dev = (struct qcusbnet *)usbnet->data[0]; |
| 473 if (!dev) { |
| 474 DBG("failed to get QMIDevice\n"); |
| 475 return -ENXIO; |
| 476 } |
| 477 |
| 478 DBG("\n"); |
| 479 |
| 480 dev->worker.iface = dev->iface; |
| 481 INIT_LIST_HEAD(&dev->worker.urbs); |
| 482 dev->worker.active = NULL; |
| 483 spin_lock_init(&dev->worker.urbs_lock); |
| 484 spin_lock_init(&dev->worker.active_lock); |
| 485 init_completion(&dev->worker.work); |
| 486 |
| 487 dev->worker.thread = kthread_run(qcnet_worker, &dev->worker, "qcnet_work
er"); |
| 488 if (IS_ERR(dev->worker.thread)) { |
| 489 DBG("AutoPM thread creation error\n"); |
| 490 return PTR_ERR(dev->worker.thread); |
| 491 } |
| 492 |
| 493 qc_cleardown(dev, DOWN_NET_IFACE_STOPPED); |
| 494 if (dev->open) { |
| 495 status = dev->open(netdev); |
| 496 if (status == 0) { |
| 497 usb_autopm_put_interface(dev->iface); |
| 498 } |
| 499 } else { |
| 500 DBG("no USBNetOpen defined\n"); |
| 501 } |
| 502 |
| 503 return status; |
| 504 } |
| 505 |
| 506 int qcnet_stop(struct net_device *netdev) |
| 507 { |
| 508 struct qcusbnet *dev; |
| 509 struct usbnet *usbnet = netdev_priv(netdev); |
| 510 |
| 511 if (!usbnet || !usbnet->net) { |
| 512 DBG("failed to get netdevice\n"); |
| 513 return -ENXIO; |
| 514 } |
| 515 |
| 516 dev = (struct qcusbnet *)usbnet->data[0]; |
| 517 if (!dev) { |
| 518 DBG("failed to get QMIDevice\n"); |
| 519 return -ENXIO; |
| 520 } |
| 521 |
| 522 qc_setdown(dev, DOWN_NET_IFACE_STOPPED); |
| 523 complete(&dev->worker.work); |
| 524 kthread_stop(dev->worker.thread); |
| 525 DBG("thread stopped\n"); |
| 526 |
| 527 if (dev->stop != NULL) |
| 528 return dev->stop(netdev); |
| 529 return 0; |
| 530 } |
| 531 |
| 532 static const struct driver_info qc_netinfo = { |
| 533 .description = "QCUSBNet Ethernet Device", |
| 534 .flags = FLAG_ETHER, |
| 535 .bind = qcnet_bind, |
| 536 .unbind = qcnet_unbind, |
| 537 .data = 0, |
| 538 }; |
| 539 |
| 540 #define MKVIDPID(v, p) \ |
| 541 { \ |
| 542 USB_DEVICE(v, p), \ |
| 543 .driver_info = (unsigned long)&qc_netinfo, \ |
| 544 } |
| 545 |
| 546 static const struct usb_device_id qc_vidpids[] = { |
| 547 MKVIDPID(0x05c6, 0x9215), /* Acer Gobi 2000 */ |
| 548 MKVIDPID(0x05c6, 0x9265), /* Asus Gobi 2000 */ |
| 549 MKVIDPID(0x16d8, 0x8002), /* CMOTech Gobi 2000 */ |
| 550 MKVIDPID(0x413c, 0x8186), /* Dell Gobi 2000 */ |
| 551 MKVIDPID(0x1410, 0xa010), /* Entourage Gobi 2000 */ |
| 552 MKVIDPID(0x1410, 0xa011), /* Entourage Gobi 2000 */ |
| 553 MKVIDPID(0x1410, 0xa012), /* Entourage Gobi 2000 */ |
| 554 MKVIDPID(0x1410, 0xa013), /* Entourage Gobi 2000 */ |
| 555 MKVIDPID(0x03f0, 0x251d), /* HP Gobi 2000 */ |
| 556 MKVIDPID(0x05c6, 0x9205), /* Lenovo Gobi 2000 */ |
| 557 MKVIDPID(0x05c6, 0x920b), /* Generic Gobi 2000 */ |
| 558 MKVIDPID(0x04da, 0x250f), /* Panasonic Gobi 2000 */ |
| 559 MKVIDPID(0x05c6, 0x9245), /* Samsung Gobi 2000 */ |
| 560 MKVIDPID(0x1199, 0x9001), /* Sierra Wireless Gobi 2000 */ |
| 561 MKVIDPID(0x1199, 0x9002), /* Sierra Wireless Gobi 2000 */ |
| 562 MKVIDPID(0x1199, 0x9003), /* Sierra Wireless Gobi 2000 */ |
| 563 MKVIDPID(0x1199, 0x9004), /* Sierra Wireless Gobi 2000 */ |
| 564 MKVIDPID(0x1199, 0x9005), /* Sierra Wireless Gobi 2000 */ |
| 565 MKVIDPID(0x1199, 0x9006), /* Sierra Wireless Gobi 2000 */ |
| 566 MKVIDPID(0x1199, 0x9007), /* Sierra Wireless Gobi 2000 */ |
| 567 MKVIDPID(0x1199, 0x9008), /* Sierra Wireless Gobi 2000 */ |
| 568 MKVIDPID(0x1199, 0x9009), /* Sierra Wireless Gobi 2000 */ |
| 569 MKVIDPID(0x1199, 0x900a), /* Sierra Wireless Gobi 2000 */ |
| 570 MKVIDPID(0x05c6, 0x9225), /* Sony Gobi 2000 */ |
| 571 MKVIDPID(0x05c6, 0x9235), /* Top Global Gobi 2000 */ |
| 572 MKVIDPID(0x05c6, 0x9275), /* iRex Technologies Gobi 2000 */ |
| 573 |
| 574 MKVIDPID(0x05c6, 0x920d), /* Qualcomm Gobi 3000 */ |
| 575 MKVIDPID(0x1410, 0xa021), /* Novatel Gobi 3000 */ |
| 576 { } |
| 577 }; |
| 578 |
| 579 MODULE_DEVICE_TABLE(usb, qc_vidpids); |
| 580 |
| 581 int qcnet_probe(struct usb_interface *iface, const struct usb_device_id *vidpids
) |
| 582 { |
| 583 int status; |
| 584 struct usbnet *usbnet; |
| 585 struct qcusbnet *dev; |
| 586 struct net_device_ops *netdevops; |
| 587 |
| 588 status = usbnet_probe(iface, vidpids); |
| 589 if (status < 0) { |
| 590 DBG("usbnet_probe failed %d\n", status); |
| 591 return status; |
| 592 } |
| 593 |
| 594 usbnet = usb_get_intfdata(iface); |
| 595 |
| 596 if (!usbnet || !usbnet->net) { |
| 597 DBG("failed to get netdevice\n"); |
| 598 return -ENXIO; |
| 599 } |
| 600 |
| 601 dev = kmalloc(sizeof(struct qcusbnet), GFP_KERNEL); |
| 602 if (!dev) { |
| 603 DBG("failed to allocate device buffers\n"); |
| 604 return -ENOMEM; |
| 605 } |
| 606 |
| 607 usbnet->data[0] = (unsigned long)dev; |
| 608 |
| 609 dev->usbnet = usbnet; |
| 610 |
| 611 netdevops = kmalloc(sizeof(struct net_device_ops), GFP_KERNEL); |
| 612 if (!netdevops) { |
| 613 DBG("failed to allocate net device ops\n"); |
| 614 return -ENOMEM; |
| 615 } |
| 616 memcpy(netdevops, usbnet->net->netdev_ops, sizeof(struct net_device_ops)
); |
| 617 |
| 618 dev->open = netdevops->ndo_open; |
| 619 netdevops->ndo_open = qcnet_open; |
| 620 dev->stop = netdevops->ndo_stop; |
| 621 netdevops->ndo_stop = qcnet_stop; |
| 622 netdevops->ndo_start_xmit = qcnet_startxmit; |
| 623 netdevops->ndo_tx_timeout = qcnet_txtimeout; |
| 624 |
| 625 usbnet->net->netdev_ops = netdevops; |
| 626 |
| 627 memset(&(dev->usbnet->net->stats), 0, sizeof(struct net_device_stats)); |
| 628 |
| 629 dev->iface = iface; |
| 630 memset(&(dev->meid), '0', 14); |
| 631 |
| 632 DBG("Mac Address: %pM\n", dev->usbnet->net->dev_addr); |
| 633 |
| 634 dev->valid = false; |
| 635 memset(&dev->qmi, 0, sizeof(dev->qmi)); |
| 636 |
| 637 dev->qmi.devclass = devclass; |
| 638 |
| 639 kref_init(&dev->refcount); |
| 640 INIT_LIST_HEAD(&dev->node); |
| 641 INIT_LIST_HEAD(&dev->qmi.clients); |
| 642 init_completion(&dev->worker.work); |
| 643 spin_lock_init(&dev->qmi.clients_lock); |
| 644 |
| 645 dev->down = 0; |
| 646 qc_setdown(dev, DOWN_NO_NDIS_CONNECTION); |
| 647 qc_setdown(dev, DOWN_NET_IFACE_STOPPED); |
| 648 |
| 649 status = qc_register(dev); |
| 650 if (status) { |
| 651 qc_deregister(dev); |
| 652 } else { |
| 653 mutex_lock(&qcusbnet_lock); |
| 654 /* Give our initial ref to the list */ |
| 655 list_add(&dev->node, &qcusbnet_list); |
| 656 mutex_unlock(&qcusbnet_lock); |
| 657 } |
| 658 |
| 659 return status; |
| 660 } |
| 661 EXPORT_SYMBOL_GPL(qcnet_probe); |
| 662 |
| 663 static struct usb_driver qcusbnet = { |
| 664 .name = "gobi", |
| 665 .id_table = qc_vidpids, |
| 666 .probe = qcnet_probe, |
| 667 .disconnect = usbnet_disconnect, |
| 668 .suspend = qc_suspend, |
| 669 .resume = qc_resume, |
| 670 .supports_autosuspend = true, |
| 671 }; |
| 672 |
| 673 static int __init modinit(void) |
| 674 { |
| 675 devclass = class_create(THIS_MODULE, "QCQMI"); |
| 676 if (IS_ERR(devclass)) { |
| 677 DBG("error at class_create %ld\n", PTR_ERR(devclass)); |
| 678 return -ENOMEM; |
| 679 } |
| 680 printk(KERN_INFO "%s: %s\n", DRIVER_DESC, DRIVER_VERSION); |
| 681 return usb_register(&qcusbnet); |
| 682 } |
| 683 module_init(modinit); |
| 684 |
| 685 static void __exit modexit(void) |
| 686 { |
| 687 usb_deregister(&qcusbnet); |
| 688 class_destroy(devclass); |
| 689 } |
| 690 module_exit(modexit); |
| 691 |
| 692 MODULE_VERSION(DRIVER_VERSION); |
| 693 MODULE_AUTHOR(DRIVER_AUTHOR); |
| 694 MODULE_DESCRIPTION(DRIVER_DESC); |
| 695 MODULE_LICENSE("Dual BSD/GPL"); |
| 696 |
| 697 module_param(qcusbnet_debug, bool, S_IRUGO | S_IWUSR); |
| 698 MODULE_PARM_DESC(qcusbnet_debug, "Debugging enabled or not"); |
OLD | NEW |