Root/
1 | #ifndef _RDS_IW_H |
2 | #define _RDS_IW_H |
3 | |
4 | #include <rdma/ib_verbs.h> |
5 | #include <rdma/rdma_cm.h> |
6 | #include "rds.h" |
7 | #include "rdma_transport.h" |
8 | |
9 | #define RDS_FASTREG_SIZE 20 |
10 | #define RDS_FASTREG_POOL_SIZE 2048 |
11 | |
12 | #define RDS_IW_MAX_SGE 8 |
13 | #define RDS_IW_RECV_SGE 2 |
14 | |
15 | #define RDS_IW_DEFAULT_RECV_WR 1024 |
16 | #define RDS_IW_DEFAULT_SEND_WR 256 |
17 | |
18 | #define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ |
19 | |
20 | extern struct list_head rds_iw_devices; |
21 | |
22 | /* |
23 | * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to |
24 | * try and minimize the amount of memory tied up both the device and |
25 | * socket receive queues. |
26 | */ |
27 | /* page offset of the final full frag that fits in the page */ |
28 | #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE) |
29 | struct rds_page_frag { |
30 | struct list_head f_item; |
31 | struct page *f_page; |
32 | unsigned long f_offset; |
33 | dma_addr_t f_mapped; |
34 | }; |
35 | |
36 | struct rds_iw_incoming { |
37 | struct list_head ii_frags; |
38 | struct rds_incoming ii_inc; |
39 | }; |
40 | |
41 | struct rds_iw_connect_private { |
42 | /* Add new fields at the end, and don't permute existing fields. */ |
43 | __be32 dp_saddr; |
44 | __be32 dp_daddr; |
45 | u8 dp_protocol_major; |
46 | u8 dp_protocol_minor; |
47 | __be16 dp_protocol_minor_mask; /* bitmask */ |
48 | __be32 dp_reserved1; |
49 | __be64 dp_ack_seq; |
50 | __be32 dp_credit; /* non-zero enables flow ctl */ |
51 | }; |
52 | |
53 | struct rds_iw_scatterlist { |
54 | struct scatterlist *list; |
55 | unsigned int len; |
56 | int dma_len; |
57 | unsigned int dma_npages; |
58 | unsigned int bytes; |
59 | }; |
60 | |
61 | struct rds_iw_mapping { |
62 | spinlock_t m_lock; /* protect the mapping struct */ |
63 | struct list_head m_list; |
64 | struct rds_iw_mr *m_mr; |
65 | uint32_t m_rkey; |
66 | struct rds_iw_scatterlist m_sg; |
67 | }; |
68 | |
69 | struct rds_iw_send_work { |
70 | struct rds_message *s_rm; |
71 | |
72 | /* We should really put these into a union: */ |
73 | struct rm_rdma_op *s_op; |
74 | struct rds_iw_mapping *s_mapping; |
75 | struct ib_mr *s_mr; |
76 | struct ib_fast_reg_page_list *s_page_list; |
77 | unsigned char s_remap_count; |
78 | |
79 | struct ib_send_wr s_wr; |
80 | struct ib_sge s_sge[RDS_IW_MAX_SGE]; |
81 | unsigned long s_queued; |
82 | }; |
83 | |
84 | struct rds_iw_recv_work { |
85 | struct rds_iw_incoming *r_iwinc; |
86 | struct rds_page_frag *r_frag; |
87 | struct ib_recv_wr r_wr; |
88 | struct ib_sge r_sge[2]; |
89 | }; |
90 | |
91 | struct rds_iw_work_ring { |
92 | u32 w_nr; |
93 | u32 w_alloc_ptr; |
94 | u32 w_alloc_ctr; |
95 | u32 w_free_ptr; |
96 | atomic_t w_free_ctr; |
97 | }; |
98 | |
99 | struct rds_iw_device; |
100 | |
101 | struct rds_iw_connection { |
102 | |
103 | struct list_head iw_node; |
104 | struct rds_iw_device *rds_iwdev; |
105 | struct rds_connection *conn; |
106 | |
107 | /* alphabet soup, IBTA style */ |
108 | struct rdma_cm_id *i_cm_id; |
109 | struct ib_pd *i_pd; |
110 | struct ib_mr *i_mr; |
111 | struct ib_cq *i_send_cq; |
112 | struct ib_cq *i_recv_cq; |
113 | |
114 | /* tx */ |
115 | struct rds_iw_work_ring i_send_ring; |
116 | struct rds_message *i_rm; |
117 | struct rds_header *i_send_hdrs; |
118 | u64 i_send_hdrs_dma; |
119 | struct rds_iw_send_work *i_sends; |
120 | |
121 | /* rx */ |
122 | struct tasklet_struct i_recv_tasklet; |
123 | struct mutex i_recv_mutex; |
124 | struct rds_iw_work_ring i_recv_ring; |
125 | struct rds_iw_incoming *i_iwinc; |
126 | u32 i_recv_data_rem; |
127 | struct rds_header *i_recv_hdrs; |
128 | u64 i_recv_hdrs_dma; |
129 | struct rds_iw_recv_work *i_recvs; |
130 | struct rds_page_frag i_frag; |
131 | u64 i_ack_recv; /* last ACK received */ |
132 | |
133 | /* sending acks */ |
134 | unsigned long i_ack_flags; |
135 | #ifdef KERNEL_HAS_ATOMIC64 |
136 | atomic64_t i_ack_next; /* next ACK to send */ |
137 | #else |
138 | spinlock_t i_ack_lock; /* protect i_ack_next */ |
139 | u64 i_ack_next; /* next ACK to send */ |
140 | #endif |
141 | struct rds_header *i_ack; |
142 | struct ib_send_wr i_ack_wr; |
143 | struct ib_sge i_ack_sge; |
144 | u64 i_ack_dma; |
145 | unsigned long i_ack_queued; |
146 | |
147 | /* Flow control related information |
148 | * |
149 | * Our algorithm uses a pair variables that we need to access |
150 | * atomically - one for the send credits, and one posted |
151 | * recv credits we need to transfer to remote. |
152 | * Rather than protect them using a slow spinlock, we put both into |
153 | * a single atomic_t and update it using cmpxchg |
154 | */ |
155 | atomic_t i_credits; |
156 | |
157 | /* Protocol version specific information */ |
158 | unsigned int i_flowctl:1; /* enable/disable flow ctl */ |
159 | unsigned int i_dma_local_lkey:1; |
160 | unsigned int i_fastreg_posted:1; /* fastreg posted on this connection */ |
161 | /* Batched completions */ |
162 | unsigned int i_unsignaled_wrs; |
163 | long i_unsignaled_bytes; |
164 | }; |
165 | |
166 | /* This assumes that atomic_t is at least 32 bits */ |
167 | #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) |
168 | #define IB_GET_POST_CREDITS(v) ((v) >> 16) |
169 | #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) |
170 | #define IB_SET_POST_CREDITS(v) ((v) << 16) |
171 | |
172 | struct rds_iw_cm_id { |
173 | struct list_head list; |
174 | struct rdma_cm_id *cm_id; |
175 | }; |
176 | |
177 | struct rds_iw_device { |
178 | struct list_head list; |
179 | struct list_head cm_id_list; |
180 | struct list_head conn_list; |
181 | struct ib_device *dev; |
182 | struct ib_pd *pd; |
183 | struct ib_mr *mr; |
184 | struct rds_iw_mr_pool *mr_pool; |
185 | int max_sge; |
186 | unsigned int max_wrs; |
187 | unsigned int dma_local_lkey:1; |
188 | spinlock_t spinlock; /* protect the above */ |
189 | }; |
190 | |
191 | /* bits for i_ack_flags */ |
192 | #define IB_ACK_IN_FLIGHT 0 |
193 | #define IB_ACK_REQUESTED 1 |
194 | |
195 | /* Magic WR_ID for ACKs */ |
196 | #define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL) |
197 | #define RDS_IW_FAST_REG_WR_ID ((u64)0xefefefefefefefefULL) |
198 | #define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL) |
199 | |
200 | struct rds_iw_statistics { |
201 | uint64_t s_iw_connect_raced; |
202 | uint64_t s_iw_listen_closed_stale; |
203 | uint64_t s_iw_tx_cq_call; |
204 | uint64_t s_iw_tx_cq_event; |
205 | uint64_t s_iw_tx_ring_full; |
206 | uint64_t s_iw_tx_throttle; |
207 | uint64_t s_iw_tx_sg_mapping_failure; |
208 | uint64_t s_iw_tx_stalled; |
209 | uint64_t s_iw_tx_credit_updates; |
210 | uint64_t s_iw_rx_cq_call; |
211 | uint64_t s_iw_rx_cq_event; |
212 | uint64_t s_iw_rx_ring_empty; |
213 | uint64_t s_iw_rx_refill_from_cq; |
214 | uint64_t s_iw_rx_refill_from_thread; |
215 | uint64_t s_iw_rx_alloc_limit; |
216 | uint64_t s_iw_rx_credit_updates; |
217 | uint64_t s_iw_ack_sent; |
218 | uint64_t s_iw_ack_send_failure; |
219 | uint64_t s_iw_ack_send_delayed; |
220 | uint64_t s_iw_ack_send_piggybacked; |
221 | uint64_t s_iw_ack_received; |
222 | uint64_t s_iw_rdma_mr_alloc; |
223 | uint64_t s_iw_rdma_mr_free; |
224 | uint64_t s_iw_rdma_mr_used; |
225 | uint64_t s_iw_rdma_mr_pool_flush; |
226 | uint64_t s_iw_rdma_mr_pool_wait; |
227 | uint64_t s_iw_rdma_mr_pool_depleted; |
228 | }; |
229 | |
230 | extern struct workqueue_struct *rds_iw_wq; |
231 | |
232 | /* |
233 | * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h |
234 | * doesn't define it. |
235 | */ |
236 | static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev, |
237 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) |
238 | { |
239 | unsigned int i; |
240 | |
241 | for (i = 0; i < sg_dma_len; ++i) { |
242 | ib_dma_sync_single_for_cpu(dev, |
243 | ib_sg_dma_address(dev, &sg[i]), |
244 | ib_sg_dma_len(dev, &sg[i]), |
245 | direction); |
246 | } |
247 | } |
248 | #define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu |
249 | |
250 | static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev, |
251 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) |
252 | { |
253 | unsigned int i; |
254 | |
255 | for (i = 0; i < sg_dma_len; ++i) { |
256 | ib_dma_sync_single_for_device(dev, |
257 | ib_sg_dma_address(dev, &sg[i]), |
258 | ib_sg_dma_len(dev, &sg[i]), |
259 | direction); |
260 | } |
261 | } |
262 | #define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device |
263 | |
264 | static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic) |
265 | { |
266 | return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey; |
267 | } |
268 | |
269 | /* ib.c */ |
270 | extern struct rds_transport rds_iw_transport; |
271 | extern struct ib_client rds_iw_client; |
272 | |
273 | extern unsigned int fastreg_pool_size; |
274 | extern unsigned int fastreg_message_size; |
275 | |
276 | extern spinlock_t iw_nodev_conns_lock; |
277 | extern struct list_head iw_nodev_conns; |
278 | |
279 | /* ib_cm.c */ |
280 | int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp); |
281 | void rds_iw_conn_free(void *arg); |
282 | int rds_iw_conn_connect(struct rds_connection *conn); |
283 | void rds_iw_conn_shutdown(struct rds_connection *conn); |
284 | void rds_iw_state_change(struct sock *sk); |
285 | int rds_iw_listen_init(void); |
286 | void rds_iw_listen_stop(void); |
287 | void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...); |
288 | int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, |
289 | struct rdma_cm_event *event); |
290 | int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id); |
291 | void rds_iw_cm_connect_complete(struct rds_connection *conn, |
292 | struct rdma_cm_event *event); |
293 | |
294 | |
295 | #define rds_iw_conn_error(conn, fmt...) \ |
296 | __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt) |
297 | |
298 | /* ib_rdma.c */ |
299 | int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); |
300 | void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); |
301 | void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); |
302 | void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock); |
303 | static inline void rds_iw_destroy_nodev_conns(void) |
304 | { |
305 | __rds_iw_destroy_conns(&iw_nodev_conns, &iw_nodev_conns_lock); |
306 | } |
307 | static inline void rds_iw_destroy_conns(struct rds_iw_device *rds_iwdev) |
308 | { |
309 | __rds_iw_destroy_conns(&rds_iwdev->conn_list, &rds_iwdev->spinlock); |
310 | } |
311 | struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *); |
312 | void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo); |
313 | void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *); |
314 | void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, |
315 | struct rds_sock *rs, u32 *key_ret); |
316 | void rds_iw_sync_mr(void *trans_private, int dir); |
317 | void rds_iw_free_mr(void *trans_private, int invalidate); |
318 | void rds_iw_flush_mrs(void); |
319 | |
320 | /* ib_recv.c */ |
321 | int rds_iw_recv_init(void); |
322 | void rds_iw_recv_exit(void); |
323 | int rds_iw_recv(struct rds_connection *conn); |
324 | int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, |
325 | gfp_t page_gfp, int prefill); |
326 | void rds_iw_inc_free(struct rds_incoming *inc); |
327 | int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, |
328 | size_t size); |
329 | void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); |
330 | void rds_iw_recv_tasklet_fn(unsigned long data); |
331 | void rds_iw_recv_init_ring(struct rds_iw_connection *ic); |
332 | void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); |
333 | void rds_iw_recv_init_ack(struct rds_iw_connection *ic); |
334 | void rds_iw_attempt_ack(struct rds_iw_connection *ic); |
335 | void rds_iw_ack_send_complete(struct rds_iw_connection *ic); |
336 | u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic); |
337 | |
338 | /* ib_ring.c */ |
339 | void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr); |
340 | void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr); |
341 | u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos); |
342 | void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val); |
343 | void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val); |
344 | int rds_iw_ring_empty(struct rds_iw_work_ring *ring); |
345 | int rds_iw_ring_low(struct rds_iw_work_ring *ring); |
346 | u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring); |
347 | u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest); |
348 | extern wait_queue_head_t rds_iw_ring_empty_wait; |
349 | |
350 | /* ib_send.c */ |
351 | void rds_iw_xmit_complete(struct rds_connection *conn); |
352 | int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, |
353 | unsigned int hdr_off, unsigned int sg, unsigned int off); |
354 | void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); |
355 | void rds_iw_send_init_ring(struct rds_iw_connection *ic); |
356 | void rds_iw_send_clear_ring(struct rds_iw_connection *ic); |
357 | int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); |
358 | void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); |
359 | void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); |
360 | int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, |
361 | u32 *adv_credits, int need_posted, int max_posted); |
362 | |
363 | /* ib_stats.c */ |
364 | DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats); |
365 | #define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member) |
366 | unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, |
367 | unsigned int avail); |
368 | |
369 | /* ib_sysctl.c */ |
370 | int rds_iw_sysctl_init(void); |
371 | void rds_iw_sysctl_exit(void); |
372 | extern unsigned long rds_iw_sysctl_max_send_wr; |
373 | extern unsigned long rds_iw_sysctl_max_recv_wr; |
374 | extern unsigned long rds_iw_sysctl_max_unsig_wrs; |
375 | extern unsigned long rds_iw_sysctl_max_unsig_bytes; |
376 | extern unsigned long rds_iw_sysctl_max_recv_allocation; |
377 | extern unsigned int rds_iw_sysctl_flow_control; |
378 | |
379 | /* |
380 | * Helper functions for getting/setting the header and data SGEs in |
381 | * RDS packets (not RDMA) |
382 | */ |
383 | static inline struct ib_sge * |
384 | rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge) |
385 | { |
386 | return &sge[0]; |
387 | } |
388 | |
389 | static inline struct ib_sge * |
390 | rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge) |
391 | { |
392 | return &sge[1]; |
393 | } |
394 | |
395 | #endif |
396 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9