Root/target/linux/generic/patches-2.6.32/910-backport-spi-bus-locking-api.patch

1From cf32b71e981ca63e8f349d8585ca2a3583b556e0 Mon Sep 17 00:00:00 2001
2From: Ernst Schwab <eschwab@online.de>
3Date: Mon, 28 Jun 2010 17:49:29 -0700
4Subject: [PATCH] spi/mmc_spi: SPI bus locking API, using mutex
5
6SPI bus locking API to allow exclusive access to the SPI bus, especially, but
7not limited to, for the mmc_spi driver.
8
9Coded according to an outline from Grant Likely; here is his
10specification (accidentally swapped function names corrected):
11
12It requires 3 things to be added to struct spi_master.
13- 1 Mutex
14- 1 spin lock
15- 1 flag.
16
17The mutex protects spi_sync, and provides sleeping "for free"
18The spinlock protects the atomic spi_async call.
19The flag is set when the lock is obtained, and checked while holding
20the spinlock in spi_async(). If the flag is checked, then spi_async()
21must fail immediately.
22
23The current runtime API looks like this:
24spi_async(struct spi_device*, struct spi_message*);
25spi_sync(struct spi_device*, struct spi_message*);
26
27The API needs to be extended to this:
28spi_async(struct spi_device*, struct spi_message*)
29spi_sync(struct spi_device*, struct spi_message*)
30spi_bus_lock(struct spi_master*) /* although struct spi_device* might
31be easier */
32spi_bus_unlock(struct spi_master*)
33spi_async_locked(struct spi_device*, struct spi_message*)
34spi_sync_locked(struct spi_device*, struct spi_message*)
35
36Drivers can only call the last two if they already hold the spi_master_lock().
37
38spi_bus_lock() obtains the mutex, obtains the spin lock, sets the
39flag, and releases the spin lock before returning. It doesn't even
40need to sleep while waiting for "in-flight" spi_transactions to
41complete because its purpose is to guarantee no additional
42transactions are added. It does not guarantee that the bus is idle.
43
44spi_bus_unlock() clears the flag and releases the mutex, which will
45wake up any waiters.
46
47The difference between spi_async() and spi_async_locked() is that the
48locked version bypasses the check of the lock flag. Both versions
49need to obtain the spinlock.
50
51The difference between spi_sync() and spi_sync_locked() is that
52spi_sync() must hold the mutex while enqueuing a new transfer.
53spi_sync_locked() doesn't because the mutex is already held. Note
54however that spi_sync must *not* continue to hold the mutex while
55waiting for the transfer to complete, otherwise only one transfer
56could be queued up at a time!
57
58Almost no code needs to be written. The current spi_async() and
59spi_sync() can probably be renamed to __spi_async() and __spi_sync()
60so that spi_async(), spi_sync(), spi_async_locked() and
61spi_sync_locked() can just become wrappers around the common code.
62
63spi_sync() is protected by a mutex because it can sleep
64spi_async() needs to be protected with a flag and a spinlock because
65it can be called atomically and must not sleep
66
67Signed-off-by: Ernst Schwab <eschwab@online.de>
68[grant.likely@secretlab.ca: use spin_lock_irqsave()]
69Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
70Tested-by: Matt Fleming <matt@console-pimps.org>
71Tested-by: Antonio Ospite <ospite@studenti.unina.it>
72---
73 drivers/spi/spi.c | 225 ++++++++++++++++++++++++++++++++++++++++-------
74 include/linux/spi/spi.h | 12 +++
75 2 files changed, 204 insertions(+), 33 deletions(-)
76
77--- a/drivers/spi/spi.c
78+++ b/drivers/spi/spi.c
79@@ -524,6 +524,10 @@ int spi_register_master(struct spi_maste
80         dynamic = 1;
81     }
82 
83+ spin_lock_init(&master->bus_lock_spinlock);
84+ mutex_init(&master->bus_lock_mutex);
85+ master->bus_lock_flag = 0;
86+
87     /* register the device, then userspace will see it.
88      * registration fails if the bus ID is in use.
89      */
90@@ -663,6 +667,35 @@ int spi_setup(struct spi_device *spi)
91 }
92 EXPORT_SYMBOL_GPL(spi_setup);
93 
94+static int __spi_async(struct spi_device *spi, struct spi_message *message)
95+{
96+ struct spi_master *master = spi->master;
97+
98+ /* Half-duplex links include original MicroWire, and ones with
99+ * only one data pin like SPI_3WIRE (switches direction) or where
100+ * either MOSI or MISO is missing. They can also be caused by
101+ * software limitations.
102+ */
103+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
104+ || (spi->mode & SPI_3WIRE)) {
105+ struct spi_transfer *xfer;
106+ unsigned flags = master->flags;
107+
108+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
109+ if (xfer->rx_buf && xfer->tx_buf)
110+ return -EINVAL;
111+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
112+ return -EINVAL;
113+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
114+ return -EINVAL;
115+ }
116+ }
117+
118+ message->spi = spi;
119+ message->status = -EINPROGRESS;
120+ return master->transfer(spi, message);
121+}
122+
123 /**
124  * spi_async - asynchronous SPI transfer
125  * @spi: device with which data will be exchanged
126@@ -695,33 +728,68 @@ EXPORT_SYMBOL_GPL(spi_setup);
127 int spi_async(struct spi_device *spi, struct spi_message *message)
128 {
129     struct spi_master *master = spi->master;
130+ int ret;
131+ unsigned long flags;
132 
133- /* Half-duplex links include original MicroWire, and ones with
134- * only one data pin like SPI_3WIRE (switches direction) or where
135- * either MOSI or MISO is missing. They can also be caused by
136- * software limitations.
137- */
138- if ((master->flags & SPI_MASTER_HALF_DUPLEX)
139- || (spi->mode & SPI_3WIRE)) {
140- struct spi_transfer *xfer;
141- unsigned flags = master->flags;
142+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
143 
144- list_for_each_entry(xfer, &message->transfers, transfer_list) {
145- if (xfer->rx_buf && xfer->tx_buf)
146- return -EINVAL;
147- if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
148- return -EINVAL;
149- if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
150- return -EINVAL;
151- }
152- }
153+ if (master->bus_lock_flag)
154+ ret = -EBUSY;
155+ else
156+ ret = __spi_async(spi, message);
157 
158- message->spi = spi;
159- message->status = -EINPROGRESS;
160- return master->transfer(spi, message);
161+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
162+
163+ return ret;
164 }
165 EXPORT_SYMBOL_GPL(spi_async);
166 
167+/**
168+ * spi_async_locked - version of spi_async with exclusive bus usage
169+ * @spi: device with which data will be exchanged
170+ * @message: describes the data transfers, including completion callback
171+ * Context: any (irqs may be blocked, etc)
172+ *
173+ * This call may be used in_irq and other contexts which can't sleep,
174+ * as well as from task contexts which can sleep.
175+ *
176+ * The completion callback is invoked in a context which can't sleep.
177+ * Before that invocation, the value of message->status is undefined.
178+ * When the callback is issued, message->status holds either zero (to
179+ * indicate complete success) or a negative error code. After that
180+ * callback returns, the driver which issued the transfer request may
181+ * deallocate the associated memory; it's no longer in use by any SPI
182+ * core or controller driver code.
183+ *
184+ * Note that although all messages to a spi_device are handled in
185+ * FIFO order, messages may go to different devices in other orders.
186+ * Some device might be higher priority, or have various "hard" access
187+ * time requirements, for example.
188+ *
189+ * On detection of any fault during the transfer, processing of
190+ * the entire message is aborted, and the device is deselected.
191+ * Until returning from the associated message completion callback,
192+ * no other spi_message queued to that device will be processed.
193+ * (This rule applies equally to all the synchronous transfer calls,
194+ * which are wrappers around this core asynchronous primitive.)
195+ */
196+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
197+{
198+ struct spi_master *master = spi->master;
199+ int ret;
200+ unsigned long flags;
201+
202+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
203+
204+ ret = __spi_async(spi, message);
205+
206+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
207+
208+ return ret;
209+
210+}
211+EXPORT_SYMBOL_GPL(spi_async_locked);
212+
213 
214 /*-------------------------------------------------------------------------*/
215 
216@@ -735,6 +803,32 @@ static void spi_complete(void *arg)
217     complete(arg);
218 }
219 
220+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
221+ int bus_locked)
222+{
223+ DECLARE_COMPLETION_ONSTACK(done);
224+ int status;
225+ struct spi_master *master = spi->master;
226+
227+ message->complete = spi_complete;
228+ message->context = &done;
229+
230+ if (!bus_locked)
231+ mutex_lock(&master->bus_lock_mutex);
232+
233+ status = spi_async_locked(spi, message);
234+
235+ if (!bus_locked)
236+ mutex_unlock(&master->bus_lock_mutex);
237+
238+ if (status == 0) {
239+ wait_for_completion(&done);
240+ status = message->status;
241+ }
242+ message->context = NULL;
243+ return status;
244+}
245+
246 /**
247  * spi_sync - blocking/synchronous SPI data transfers
248  * @spi: device with which data will be exchanged
249@@ -758,21 +852,86 @@ static void spi_complete(void *arg)
250  */
251 int spi_sync(struct spi_device *spi, struct spi_message *message)
252 {
253- DECLARE_COMPLETION_ONSTACK(done);
254- int status;
255-
256- message->complete = spi_complete;
257- message->context = &done;
258- status = spi_async(spi, message);
259- if (status == 0) {
260- wait_for_completion(&done);
261- status = message->status;
262- }
263- message->context = NULL;
264- return status;
265+ return __spi_sync(spi, message, 0);
266 }
267 EXPORT_SYMBOL_GPL(spi_sync);
268 
269+/**
270+ * spi_sync_locked - version of spi_sync with exclusive bus usage
271+ * @spi: device with which data will be exchanged
272+ * @message: describes the data transfers
273+ * Context: can sleep
274+ *
275+ * This call may only be used from a context that may sleep. The sleep
276+ * is non-interruptible, and has no timeout. Low-overhead controller
277+ * drivers may DMA directly into and out of the message buffers.
278+ *
279+ * This call should be used by drivers that require exclusive access to the
280+ * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must
281+ * be released by a spi_bus_unlock call when the exclusive access is over.
282+ *
283+ * It returns zero on success, else a negative error code.
284+ */
285+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
286+{
287+ return __spi_sync(spi, message, 1);
288+}
289+EXPORT_SYMBOL_GPL(spi_sync_locked);
290+
291+/**
292+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
293+ * @master: SPI bus master that should be locked for exclusive bus access
294+ * Context: can sleep
295+ *
296+ * This call may only be used from a context that may sleep. The sleep
297+ * is non-interruptible, and has no timeout.
298+ *
299+ * This call should be used by drivers that require exclusive access to the
300+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
301+ * exclusive access is over. Data transfer must be done by spi_sync_locked
302+ * and spi_async_locked calls when the SPI bus lock is held.
303+ *
304+ * It returns zero on success, else a negative error code.
305+ */
306+int spi_bus_lock(struct spi_master *master)
307+{
308+ unsigned long flags;
309+
310+ mutex_lock(&master->bus_lock_mutex);
311+
312+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
313+ master->bus_lock_flag = 1;
314+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
315+
316+ /* mutex remains locked until spi_bus_unlock is called */
317+
318+ return 0;
319+}
320+EXPORT_SYMBOL_GPL(spi_bus_lock);
321+
322+/**
323+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
324+ * @master: SPI bus master that was locked for exclusive bus access
325+ * Context: can sleep
326+ *
327+ * This call may only be used from a context that may sleep. The sleep
328+ * is non-interruptible, and has no timeout.
329+ *
330+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
331+ * call.
332+ *
333+ * It returns zero on success, else a negative error code.
334+ */
335+int spi_bus_unlock(struct spi_master *master)
336+{
337+ master->bus_lock_flag = 0;
338+
339+ mutex_unlock(&master->bus_lock_mutex);
340+
341+ return 0;
342+}
343+EXPORT_SYMBOL_GPL(spi_bus_unlock);
344+
345 /* portable code must never pass more than 32 bytes */
346 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
347 
348--- a/include/linux/spi/spi.h
349+++ b/include/linux/spi/spi.h
350@@ -261,6 +261,13 @@ struct spi_master {
351 #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
352 #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
353 
354+ /* lock and mutex for SPI bus locking */
355+ spinlock_t bus_lock_spinlock;
356+ struct mutex bus_lock_mutex;
357+
358+ /* flag indicating that the SPI bus is locked for exclusive use */
359+ bool bus_lock_flag;
360+
361     /* Setup mode and clock, etc (spi driver may call many times).
362      *
363      * IMPORTANT: this may be called when transfers to another
364@@ -541,6 +548,8 @@ static inline void spi_message_free(stru
365 
366 extern int spi_setup(struct spi_device *spi);
367 extern int spi_async(struct spi_device *spi, struct spi_message *message);
368+extern int spi_async_locked(struct spi_device *spi,
369+ struct spi_message *message);
370 
371 /*---------------------------------------------------------------------------*/
372 
373@@ -550,6 +559,9 @@ extern int spi_async(struct spi_device *
374  */
375 
376 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
377+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
378+extern int spi_bus_lock(struct spi_master *master);
379+extern int spi_bus_unlock(struct spi_master *master);
380 
381 /**
382  * spi_write - SPI synchronous write
383

Archive Download this file



interactive