| 1 | commit ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0 |
| 2 | Author: Linus Walleij <linus.walleij@linaro.org> |
| 3 | Date: Wed Feb 22 10:05:38 2012 +0100 |
| 4 | |
| 5 | spi: create a message queueing infrastructure |
| 6 | |
| 7 | This rips the message queue in the PL022 driver out and pushes |
| 8 | it into (optional) common infrastructure. Drivers that want to |
| 9 | use the message pumping thread will need to define the new |
| 10 | per-messags transfer methods and leave the deprecated transfer() |
| 11 | method as NULL. |
| 12 | |
| 13 | Most of the design is described in the documentation changes that |
| 14 | are included in this patch. |
| 15 | |
| 16 | Since there is a queue that need to be stopped when the system |
| 17 | is suspending/resuming, two new calls are implemented for the |
| 18 | device drivers to call in their suspend()/resume() functions: |
| 19 | spi_master_suspend() and spi_master_resume(). |
| 20 | |
| 21 | ChangeLog v1->v2: |
| 22 | - Remove Kconfig entry and do not make the queue support optional |
| 23 | at all, instead be more agressive and have it as part of the |
| 24 | compulsory infrastructure. |
| 25 | - If the .transfer() method is implemented, delete print a small |
| 26 | deprecation notice and do not start the transfer pump. |
| 27 | - Fix a bitrotted comment. |
| 28 | ChangeLog v2->v3: |
| 29 | - Fix up a problematic sequence courtesy of Chris Blair. |
| 30 | - Stop rather than destroy the queue on suspend() courtesy of |
| 31 | Chris Blair. |
| 32 | |
| 33 | Signed-off-by: Chris Blair <chris.blair@stericsson.com> |
| 34 | Signed-off-by: Linus Walleij <linus.walleij@linaro.org> |
| 35 | Tested-by: Mark Brown <broonie@opensource.wolfsonmicro.com> |
| 36 | Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com> |
| 37 | Signed-off-by: Grant Likely <grant.likely@secretlab.ca> |
| 38 | |
| 39 | [Florian: dropped the changes on drivers/spi/spi-pl022.c, removed |
| 40 | the dev_info() about unqueued drivers still using the master function] |
| 41 | |
| 42 | --- a/Documentation/spi/spi-summary |
| 43 | +++ b/Documentation/spi/spi-summary |
| 44 | @@ -1,7 +1,7 @@ |
| 45 | Overview of Linux kernel SPI support |
| 46 | ==================================== |
| 47 | |
| 48 | -21-May-2007 |
| 49 | +02-Feb-2012 |
| 50 | |
| 51 | What is SPI? |
| 52 | ------------ |
| 53 | @@ -483,9 +483,9 @@ also initialize its own internal state. |
| 54 | and those methods.) |
| 55 | |
| 56 | After you initialize the spi_master, then use spi_register_master() to |
| 57 | -publish it to the rest of the system. At that time, device nodes for |
| 58 | -the controller and any predeclared spi devices will be made available, |
| 59 | -and the driver model core will take care of binding them to drivers. |
| 60 | +publish it to the rest of the system. At that time, device nodes for the |
| 61 | +controller and any predeclared spi devices will be made available, and |
| 62 | +the driver model core will take care of binding them to drivers. |
| 63 | |
| 64 | If you need to remove your SPI controller driver, spi_unregister_master() |
| 65 | will reverse the effect of spi_register_master(). |
| 66 | @@ -521,21 +521,53 @@ SPI MASTER METHODS |
| 67 | ** When you code setup(), ASSUME that the controller |
| 68 | ** is actively processing transfers for another device. |
| 69 | |
| 70 | - master->transfer(struct spi_device *spi, struct spi_message *message) |
| 71 | - This must not sleep. Its responsibility is arrange that the |
| 72 | - transfer happens and its complete() callback is issued. The two |
| 73 | - will normally happen later, after other transfers complete, and |
| 74 | - if the controller is idle it will need to be kickstarted. |
| 75 | - |
| 76 | master->cleanup(struct spi_device *spi) |
| 77 | Your controller driver may use spi_device.controller_state to hold |
| 78 | state it dynamically associates with that device. If you do that, |
| 79 | be sure to provide the cleanup() method to free that state. |
| 80 | |
| 81 | + master->prepare_transfer_hardware(struct spi_master *master) |
| 82 | + This will be called by the queue mechanism to signal to the driver |
| 83 | + that a message is coming in soon, so the subsystem requests the |
| 84 | + driver to prepare the transfer hardware by issuing this call. |
| 85 | + This may sleep. |
| 86 | + |
| 87 | + master->unprepare_transfer_hardware(struct spi_master *master) |
| 88 | + This will be called by the queue mechanism to signal to the driver |
| 89 | + that there are no more messages pending in the queue and it may |
| 90 | + relax the hardware (e.g. by power management calls). This may sleep. |
| 91 | + |
| 92 | + master->transfer_one_message(struct spi_master *master, |
| 93 | + struct spi_message *mesg) |
| 94 | + The subsystem calls the driver to transfer a single message while |
| 95 | + queuing transfers that arrive in the meantime. When the driver is |
| 96 | + finished with this message, it must call |
| 97 | + spi_finalize_current_message() so the subsystem can issue the next |
| 98 | + transfer. This may sleep. |
| 99 | + |
| 100 | + DEPRECATED METHODS |
| 101 | + |
| 102 | + master->transfer(struct spi_device *spi, struct spi_message *message) |
| 103 | + This must not sleep. Its responsibility is arrange that the |
| 104 | + transfer happens and its complete() callback is issued. The two |
| 105 | + will normally happen later, after other transfers complete, and |
| 106 | + if the controller is idle it will need to be kickstarted. This |
| 107 | + method is not used on queued controllers and must be NULL if |
| 108 | + transfer_one_message() and (un)prepare_transfer_hardware() are |
| 109 | + implemented. |
| 110 | + |
| 111 | |
| 112 | SPI MESSAGE QUEUE |
| 113 | |
| 114 | -The bulk of the driver will be managing the I/O queue fed by transfer(). |
| 115 | +If you are happy with the standard queueing mechanism provided by the |
| 116 | +SPI subsystem, just implement the queued methods specified above. Using |
| 117 | +the message queue has the upside of centralizing a lot of code and |
| 118 | +providing pure process-context execution of methods. The message queue |
| 119 | +can also be elevated to realtime priority on high-priority SPI traffic. |
| 120 | + |
| 121 | +Unless the queueing mechanism in the SPI subsystem is selected, the bulk |
| 122 | +of the driver will be managing the I/O queue fed by the now deprecated |
| 123 | +function transfer(). |
| 124 | |
| 125 | That queue could be purely conceptual. For example, a driver used only |
| 126 | for low-frequency sensor access might be fine using synchronous PIO. |
| 127 | @@ -561,4 +593,6 @@ Stephen Street |
| 128 | Mark Underwood |
| 129 | Andrew Victor |
| 130 | Vitaly Wool |
| 131 | - |
| 132 | +Grant Likely |
| 133 | +Mark Brown |
| 134 | +Linus Walleij |
| 135 | --- a/drivers/spi/spi.c |
| 136 | +++ b/drivers/spi/spi.c |
| 137 | @@ -30,6 +30,9 @@ |
| 138 | #include <linux/of_spi.h> |
| 139 | #include <linux/pm_runtime.h> |
| 140 | #include <linux/export.h> |
| 141 | +#include <linux/sched.h> |
| 142 | +#include <linux/delay.h> |
| 143 | +#include <linux/kthread.h> |
| 144 | |
| 145 | static void spidev_release(struct device *dev) |
| 146 | { |
| 147 | @@ -507,6 +510,293 @@ spi_register_board_info(struct spi_board |
| 148 | |
| 149 | /*-------------------------------------------------------------------------*/ |
| 150 | |
| 151 | +/** |
| 152 | + * spi_pump_messages - kthread work function which processes spi message queue |
| 153 | + * @work: pointer to kthread work struct contained in the master struct |
| 154 | + * |
| 155 | + * This function checks if there is any spi message in the queue that |
| 156 | + * needs processing and if so call out to the driver to initialize hardware |
| 157 | + * and transfer each message. |
| 158 | + * |
| 159 | + */ |
| 160 | +static void spi_pump_messages(struct kthread_work *work) |
| 161 | +{ |
| 162 | + struct spi_master *master = |
| 163 | + container_of(work, struct spi_master, pump_messages); |
| 164 | + unsigned long flags; |
| 165 | + bool was_busy = false; |
| 166 | + int ret; |
| 167 | + |
| 168 | + /* Lock queue and check for queue work */ |
| 169 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 170 | + if (list_empty(&master->queue) || !master->running) { |
| 171 | + if (master->busy) { |
| 172 | + ret = master->unprepare_transfer_hardware(master); |
| 173 | + if (ret) { |
| 174 | + dev_err(&master->dev, |
| 175 | + "failed to unprepare transfer hardware\n"); |
| 176 | + return; |
| 177 | + } |
| 178 | + } |
| 179 | + master->busy = false; |
| 180 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 181 | + return; |
| 182 | + } |
| 183 | + |
| 184 | + /* Make sure we are not already running a message */ |
| 185 | + if (master->cur_msg) { |
| 186 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 187 | + return; |
| 188 | + } |
| 189 | + /* Extract head of queue */ |
| 190 | + master->cur_msg = |
| 191 | + list_entry(master->queue.next, struct spi_message, queue); |
| 192 | + |
| 193 | + list_del_init(&master->cur_msg->queue); |
| 194 | + if (master->busy) |
| 195 | + was_busy = true; |
| 196 | + else |
| 197 | + master->busy = true; |
| 198 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 199 | + |
| 200 | + if (!was_busy) { |
| 201 | + ret = master->prepare_transfer_hardware(master); |
| 202 | + if (ret) { |
| 203 | + dev_err(&master->dev, |
| 204 | + "failed to prepare transfer hardware\n"); |
| 205 | + return; |
| 206 | + } |
| 207 | + } |
| 208 | + |
| 209 | + ret = master->transfer_one_message(master, master->cur_msg); |
| 210 | + if (ret) { |
| 211 | + dev_err(&master->dev, |
| 212 | + "failed to transfer one message from queue\n"); |
| 213 | + return; |
| 214 | + } |
| 215 | +} |
| 216 | + |
| 217 | +static int spi_init_queue(struct spi_master *master) |
| 218 | +{ |
| 219 | + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 220 | + |
| 221 | + INIT_LIST_HEAD(&master->queue); |
| 222 | + spin_lock_init(&master->queue_lock); |
| 223 | + |
| 224 | + master->running = false; |
| 225 | + master->busy = false; |
| 226 | + |
| 227 | + init_kthread_worker(&master->kworker); |
| 228 | + master->kworker_task = kthread_run(kthread_worker_fn, |
| 229 | + &master->kworker, |
| 230 | + dev_name(&master->dev)); |
| 231 | + if (IS_ERR(master->kworker_task)) { |
| 232 | + dev_err(&master->dev, "failed to create message pump task\n"); |
| 233 | + return -ENOMEM; |
| 234 | + } |
| 235 | + init_kthread_work(&master->pump_messages, spi_pump_messages); |
| 236 | + |
| 237 | + /* |
| 238 | + * Master config will indicate if this controller should run the |
| 239 | + * message pump with high (realtime) priority to reduce the transfer |
| 240 | + * latency on the bus by minimising the delay between a transfer |
| 241 | + * request and the scheduling of the message pump thread. Without this |
| 242 | + * setting the message pump thread will remain at default priority. |
| 243 | + */ |
| 244 | + if (master->rt) { |
| 245 | + dev_info(&master->dev, |
| 246 | + "will run message pump with realtime priority\n"); |
| 247 | + sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); |
| 248 | + } |
| 249 | + |
| 250 | + return 0; |
| 251 | +} |
| 252 | + |
| 253 | +/** |
| 254 | + * spi_get_next_queued_message() - called by driver to check for queued |
| 255 | + * messages |
| 256 | + * @master: the master to check for queued messages |
| 257 | + * |
| 258 | + * If there are more messages in the queue, the next message is returned from |
| 259 | + * this call. |
| 260 | + */ |
| 261 | +struct spi_message *spi_get_next_queued_message(struct spi_master *master) |
| 262 | +{ |
| 263 | + struct spi_message *next; |
| 264 | + unsigned long flags; |
| 265 | + |
| 266 | + /* get a pointer to the next message, if any */ |
| 267 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 268 | + if (list_empty(&master->queue)) |
| 269 | + next = NULL; |
| 270 | + else |
| 271 | + next = list_entry(master->queue.next, |
| 272 | + struct spi_message, queue); |
| 273 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 274 | + |
| 275 | + return next; |
| 276 | +} |
| 277 | +EXPORT_SYMBOL_GPL(spi_get_next_queued_message); |
| 278 | + |
| 279 | +/** |
| 280 | + * spi_finalize_current_message() - the current message is complete |
| 281 | + * @master: the master to return the message to |
| 282 | + * |
| 283 | + * Called by the driver to notify the core that the message in the front of the |
| 284 | + * queue is complete and can be removed from the queue. |
| 285 | + */ |
| 286 | +void spi_finalize_current_message(struct spi_master *master) |
| 287 | +{ |
| 288 | + struct spi_message *mesg; |
| 289 | + unsigned long flags; |
| 290 | + |
| 291 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 292 | + mesg = master->cur_msg; |
| 293 | + master->cur_msg = NULL; |
| 294 | + |
| 295 | + queue_kthread_work(&master->kworker, &master->pump_messages); |
| 296 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 297 | + |
| 298 | + mesg->state = NULL; |
| 299 | + if (mesg->complete) |
| 300 | + mesg->complete(mesg->context); |
| 301 | +} |
| 302 | +EXPORT_SYMBOL_GPL(spi_finalize_current_message); |
| 303 | + |
| 304 | +static int spi_start_queue(struct spi_master *master) |
| 305 | +{ |
| 306 | + unsigned long flags; |
| 307 | + |
| 308 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 309 | + |
| 310 | + if (master->running || master->busy) { |
| 311 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 312 | + return -EBUSY; |
| 313 | + } |
| 314 | + |
| 315 | + master->running = true; |
| 316 | + master->cur_msg = NULL; |
| 317 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 318 | + |
| 319 | + queue_kthread_work(&master->kworker, &master->pump_messages); |
| 320 | + |
| 321 | + return 0; |
| 322 | +} |
| 323 | + |
| 324 | +static int spi_stop_queue(struct spi_master *master) |
| 325 | +{ |
| 326 | + unsigned long flags; |
| 327 | + unsigned limit = 500; |
| 328 | + int ret = 0; |
| 329 | + |
| 330 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 331 | + |
| 332 | + /* |
| 333 | + * This is a bit lame, but is optimized for the common execution path. |
| 334 | + * A wait_queue on the master->busy could be used, but then the common |
| 335 | + * execution path (pump_messages) would be required to call wake_up or |
| 336 | + * friends on every SPI message. Do this instead. |
| 337 | + */ |
| 338 | + while ((!list_empty(&master->queue) || master->busy) && limit--) { |
| 339 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 340 | + msleep(10); |
| 341 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 342 | + } |
| 343 | + |
| 344 | + if (!list_empty(&master->queue) || master->busy) |
| 345 | + ret = -EBUSY; |
| 346 | + else |
| 347 | + master->running = false; |
| 348 | + |
| 349 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 350 | + |
| 351 | + if (ret) { |
| 352 | + dev_warn(&master->dev, |
| 353 | + "could not stop message queue\n"); |
| 354 | + return ret; |
| 355 | + } |
| 356 | + return ret; |
| 357 | +} |
| 358 | + |
| 359 | +static int spi_destroy_queue(struct spi_master *master) |
| 360 | +{ |
| 361 | + int ret; |
| 362 | + |
| 363 | + ret = spi_stop_queue(master); |
| 364 | + |
| 365 | + /* |
| 366 | + * flush_kthread_worker will block until all work is done. |
| 367 | + * If the reason that stop_queue timed out is that the work will never |
| 368 | + * finish, then it does no good to call flush/stop thread, so |
| 369 | + * return anyway. |
| 370 | + */ |
| 371 | + if (ret) { |
| 372 | + dev_err(&master->dev, "problem destroying queue\n"); |
| 373 | + return ret; |
| 374 | + } |
| 375 | + |
| 376 | + flush_kthread_worker(&master->kworker); |
| 377 | + kthread_stop(master->kworker_task); |
| 378 | + |
| 379 | + return 0; |
| 380 | +} |
| 381 | + |
| 382 | +/** |
| 383 | + * spi_queued_transfer - transfer function for queued transfers |
| 384 | + * @spi: spi device which is requesting transfer |
| 385 | + * @msg: spi message which is to handled is queued to driver queue |
| 386 | + */ |
| 387 | +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) |
| 388 | +{ |
| 389 | + struct spi_master *master = spi->master; |
| 390 | + unsigned long flags; |
| 391 | + |
| 392 | + spin_lock_irqsave(&master->queue_lock, flags); |
| 393 | + |
| 394 | + if (!master->running) { |
| 395 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 396 | + return -ESHUTDOWN; |
| 397 | + } |
| 398 | + msg->actual_length = 0; |
| 399 | + msg->status = -EINPROGRESS; |
| 400 | + |
| 401 | + list_add_tail(&msg->queue, &master->queue); |
| 402 | + if (master->running && !master->busy) |
| 403 | + queue_kthread_work(&master->kworker, &master->pump_messages); |
| 404 | + |
| 405 | + spin_unlock_irqrestore(&master->queue_lock, flags); |
| 406 | + return 0; |
| 407 | +} |
| 408 | + |
| 409 | +static int spi_master_initialize_queue(struct spi_master *master) |
| 410 | +{ |
| 411 | + int ret; |
| 412 | + |
| 413 | + master->queued = true; |
| 414 | + master->transfer = spi_queued_transfer; |
| 415 | + |
| 416 | + /* Initialize and start queue */ |
| 417 | + ret = spi_init_queue(master); |
| 418 | + if (ret) { |
| 419 | + dev_err(&master->dev, "problem initializing queue\n"); |
| 420 | + goto err_init_queue; |
| 421 | + } |
| 422 | + ret = spi_start_queue(master); |
| 423 | + if (ret) { |
| 424 | + dev_err(&master->dev, "problem starting queue\n"); |
| 425 | + goto err_start_queue; |
| 426 | + } |
| 427 | + |
| 428 | + return 0; |
| 429 | + |
| 430 | +err_start_queue: |
| 431 | +err_init_queue: |
| 432 | + spi_destroy_queue(master); |
| 433 | + return ret; |
| 434 | +} |
| 435 | + |
| 436 | +/*-------------------------------------------------------------------------*/ |
| 437 | + |
| 438 | static void spi_master_release(struct device *dev) |
| 439 | { |
| 440 | struct spi_master *master; |
| 441 | @@ -522,6 +812,7 @@ static struct class spi_master_class = { |
| 442 | }; |
| 443 | |
| 444 | |
| 445 | + |
| 446 | /** |
| 447 | * spi_alloc_master - allocate SPI master controller |
| 448 | * @dev: the controller, possibly using the platform_bus |
| 449 | @@ -621,6 +912,15 @@ int spi_register_master(struct spi_maste |
| 450 | dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), |
| 451 | dynamic ? " (dynamic)" : ""); |
| 452 | |
| 453 | + /* If we're using a queued driver, start the queue */ |
| 454 | + if (!master->transfer) { |
| 455 | + status = spi_master_initialize_queue(master); |
| 456 | + if (status) { |
| 457 | + device_unregister(&master->dev); |
| 458 | + goto done; |
| 459 | + } |
| 460 | + } |
| 461 | + |
| 462 | mutex_lock(&board_lock); |
| 463 | list_add_tail(&master->list, &spi_master_list); |
| 464 | list_for_each_entry(bi, &board_list, list) |
| 465 | @@ -636,7 +936,6 @@ done: |
| 466 | } |
| 467 | EXPORT_SYMBOL_GPL(spi_register_master); |
| 468 | |
| 469 | - |
| 470 | static int __unregister(struct device *dev, void *null) |
| 471 | { |
| 472 | spi_unregister_device(to_spi_device(dev)); |
| 473 | @@ -657,6 +956,11 @@ void spi_unregister_master(struct spi_ma |
| 474 | { |
| 475 | int dummy; |
| 476 | |
| 477 | + if (master->queued) { |
| 478 | + if (spi_destroy_queue(master)) |
| 479 | + dev_err(&master->dev, "queue remove failed\n"); |
| 480 | + } |
| 481 | + |
| 482 | mutex_lock(&board_lock); |
| 483 | list_del(&master->list); |
| 484 | mutex_unlock(&board_lock); |
| 485 | @@ -666,6 +970,37 @@ void spi_unregister_master(struct spi_ma |
| 486 | } |
| 487 | EXPORT_SYMBOL_GPL(spi_unregister_master); |
| 488 | |
| 489 | +int spi_master_suspend(struct spi_master *master) |
| 490 | +{ |
| 491 | + int ret; |
| 492 | + |
| 493 | + /* Basically no-ops for non-queued masters */ |
| 494 | + if (!master->queued) |
| 495 | + return 0; |
| 496 | + |
| 497 | + ret = spi_stop_queue(master); |
| 498 | + if (ret) |
| 499 | + dev_err(&master->dev, "queue stop failed\n"); |
| 500 | + |
| 501 | + return ret; |
| 502 | +} |
| 503 | +EXPORT_SYMBOL_GPL(spi_master_suspend); |
| 504 | + |
| 505 | +int spi_master_resume(struct spi_master *master) |
| 506 | +{ |
| 507 | + int ret; |
| 508 | + |
| 509 | + if (!master->queued) |
| 510 | + return 0; |
| 511 | + |
| 512 | + ret = spi_start_queue(master); |
| 513 | + if (ret) |
| 514 | + dev_err(&master->dev, "queue restart failed\n"); |
| 515 | + |
| 516 | + return ret; |
| 517 | +} |
| 518 | +EXPORT_SYMBOL_GPL(spi_master_resume); |
| 519 | + |
| 520 | static int __spi_master_match(struct device *dev, void *data) |
| 521 | { |
| 522 | struct spi_master *m; |
| 523 | --- a/include/linux/spi/spi.h |
| 524 | +++ b/include/linux/spi/spi.h |
| 525 | @@ -22,6 +22,7 @@ |
| 526 | #include <linux/device.h> |
| 527 | #include <linux/mod_devicetable.h> |
| 528 | #include <linux/slab.h> |
| 529 | +#include <linux/kthread.h> |
| 530 | |
| 531 | /* |
| 532 | * INTERFACES between SPI master-side drivers and SPI infrastructure. |
| 533 | @@ -235,6 +236,27 @@ static inline void spi_unregister_driver |
| 534 | * the device whose settings are being modified. |
| 535 | * @transfer: adds a message to the controller's transfer queue. |
| 536 | * @cleanup: frees controller-specific state |
| 537 | + * @queued: whether this master is providing an internal message queue |
| 538 | + * @kworker: thread struct for message pump |
| 539 | + * @kworker_task: pointer to task for message pump kworker thread |
| 540 | + * @pump_messages: work struct for scheduling work to the message pump |
| 541 | + * @queue_lock: spinlock to syncronise access to message queue |
| 542 | + * @queue: message queue |
| 543 | + * @cur_msg: the currently in-flight message |
| 544 | + * @busy: message pump is busy |
| 545 | + * @running: message pump is running |
| 546 | + * @rt: whether this queue is set to run as a realtime task |
| 547 | + * @prepare_transfer_hardware: a message will soon arrive from the queue |
| 548 | + * so the subsystem requests the driver to prepare the transfer hardware |
| 549 | + * by issuing this call |
| 550 | + * @transfer_one_message: the subsystem calls the driver to transfer a single |
| 551 | + * message while queuing transfers that arrive in the meantime. When the |
| 552 | + * driver is finished with this message, it must call |
| 553 | + * spi_finalize_current_message() so the subsystem can issue the next |
| 554 | + * transfer |
| 555 | + * @prepare_transfer_hardware: there are currently no more messages on the |
| 556 | + * queue so the subsystem notifies the driver that it may relax the |
| 557 | + * hardware by issuing this call |
| 558 | * |
| 559 | * Each SPI master controller can communicate with one or more @spi_device |
| 560 | * children. These make a small bus, sharing MOSI, MISO and SCK signals |
| 561 | @@ -318,6 +340,28 @@ struct spi_master { |
| 562 | |
| 563 | /* called on release() to free memory provided by spi_master */ |
| 564 | void (*cleanup)(struct spi_device *spi); |
| 565 | + |
| 566 | + /* |
| 567 | + * These hooks are for drivers that want to use the generic |
| 568 | + * master transfer queueing mechanism. If these are used, the |
| 569 | + * transfer() function above must NOT be specified by the driver. |
| 570 | + * Over time we expect SPI drivers to be phased over to this API. |
| 571 | + */ |
| 572 | + bool queued; |
| 573 | + struct kthread_worker kworker; |
| 574 | + struct task_struct *kworker_task; |
| 575 | + struct kthread_work pump_messages; |
| 576 | + spinlock_t queue_lock; |
| 577 | + struct list_head queue; |
| 578 | + struct spi_message *cur_msg; |
| 579 | + bool busy; |
| 580 | + bool running; |
| 581 | + bool rt; |
| 582 | + |
| 583 | + int (*prepare_transfer_hardware)(struct spi_master *master); |
| 584 | + int (*transfer_one_message)(struct spi_master *master, |
| 585 | + struct spi_message *mesg); |
| 586 | + int (*unprepare_transfer_hardware)(struct spi_master *master); |
| 587 | }; |
| 588 | |
| 589 | static inline void *spi_master_get_devdata(struct spi_master *master) |
| 590 | @@ -343,6 +387,13 @@ static inline void spi_master_put(struct |
| 591 | put_device(&master->dev); |
| 592 | } |
| 593 | |
| 594 | +/* PM calls that need to be issued by the driver */ |
| 595 | +extern int spi_master_suspend(struct spi_master *master); |
| 596 | +extern int spi_master_resume(struct spi_master *master); |
| 597 | + |
| 598 | +/* Calls the driver make to interact with the message queue */ |
| 599 | +extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); |
| 600 | +extern void spi_finalize_current_message(struct spi_master *master); |
| 601 | |
| 602 | /* the spi driver core manages memory for the spi_master classdev */ |
| 603 | extern struct spi_master * |
| 604 | |