Root/
1 | #ifndef __LINUX_PKT_SCHED_H |
2 | #define __LINUX_PKT_SCHED_H |
3 | |
4 | #include <linux/types.h> |
5 | |
6 | /* Logical priority bands not depending on specific packet scheduler. |
7 | Every scheduler will map them to real traffic classes, if it has |
8 | no more precise mechanism to classify packets. |
9 | |
10 | These numbers have no special meaning, though their coincidence |
11 | with obsolete IPv6 values is not occasional :-). New IPv6 drafts |
12 | preferred full anarchy inspired by diffserv group. |
13 | |
14 | Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy |
15 | class, actually, as rule it will be handled with more care than |
16 | filler or even bulk. |
17 | */ |
18 | |
19 | #define TC_PRIO_BESTEFFORT 0 |
20 | #define TC_PRIO_FILLER 1 |
21 | #define TC_PRIO_BULK 2 |
22 | #define TC_PRIO_INTERACTIVE_BULK 4 |
23 | #define TC_PRIO_INTERACTIVE 6 |
24 | #define TC_PRIO_CONTROL 7 |
25 | |
26 | #define TC_PRIO_MAX 15 |
27 | |
28 | /* Generic queue statistics, available for all the elements. |
29 | Particular schedulers may have also their private records. |
30 | */ |
31 | |
32 | struct tc_stats { |
33 | __u64 bytes; /* NUmber of enqueues bytes */ |
34 | __u32 packets; /* Number of enqueued packets */ |
35 | __u32 drops; /* Packets dropped because of lack of resources */ |
36 | __u32 overlimits; /* Number of throttle events when this |
37 | * flow goes out of allocated bandwidth */ |
38 | __u32 bps; /* Current flow byte rate */ |
39 | __u32 pps; /* Current flow packet rate */ |
40 | __u32 qlen; |
41 | __u32 backlog; |
42 | }; |
43 | |
44 | struct tc_estimator { |
45 | signed char interval; |
46 | unsigned char ewma_log; |
47 | }; |
48 | |
49 | /* "Handles" |
50 | --------- |
51 | |
52 | All the traffic control objects have 32bit identifiers, or "handles". |
53 | |
54 | They can be considered as opaque numbers from user API viewpoint, |
55 | but actually they always consist of two fields: major and |
56 | minor numbers, which are interpreted by kernel specially, |
57 | that may be used by applications, though not recommended. |
58 | |
59 | F.e. qdisc handles always have minor number equal to zero, |
60 | classes (or flows) have major equal to parent qdisc major, and |
61 | minor uniquely identifying class inside qdisc. |
62 | |
63 | Macros to manipulate handles: |
64 | */ |
65 | |
66 | #define TC_H_MAJ_MASK (0xFFFF0000U) |
67 | #define TC_H_MIN_MASK (0x0000FFFFU) |
68 | #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK) |
69 | #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK) |
70 | #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK)) |
71 | |
72 | #define TC_H_UNSPEC (0U) |
73 | #define TC_H_ROOT (0xFFFFFFFFU) |
74 | #define TC_H_INGRESS (0xFFFFFFF1U) |
75 | |
76 | struct tc_ratespec { |
77 | unsigned char cell_log; |
78 | unsigned char __reserved; |
79 | unsigned short overhead; |
80 | short cell_align; |
81 | unsigned short mpu; |
82 | __u32 rate; |
83 | }; |
84 | |
85 | #define TC_RTAB_SIZE 1024 |
86 | |
87 | struct tc_sizespec { |
88 | unsigned char cell_log; |
89 | unsigned char size_log; |
90 | short cell_align; |
91 | int overhead; |
92 | unsigned int linklayer; |
93 | unsigned int mpu; |
94 | unsigned int mtu; |
95 | unsigned int tsize; |
96 | }; |
97 | |
98 | enum { |
99 | TCA_STAB_UNSPEC, |
100 | TCA_STAB_BASE, |
101 | TCA_STAB_DATA, |
102 | __TCA_STAB_MAX |
103 | }; |
104 | |
105 | #define TCA_STAB_MAX (__TCA_STAB_MAX - 1) |
106 | |
107 | /* FIFO section */ |
108 | |
109 | struct tc_fifo_qopt { |
110 | __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ |
111 | }; |
112 | |
113 | /* PRIO section */ |
114 | |
115 | #define TCQ_PRIO_BANDS 16 |
116 | #define TCQ_MIN_PRIO_BANDS 2 |
117 | |
118 | struct tc_prio_qopt { |
119 | int bands; /* Number of bands */ |
120 | __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ |
121 | }; |
122 | |
123 | /* MULTIQ section */ |
124 | |
125 | struct tc_multiq_qopt { |
126 | __u16 bands; /* Number of bands */ |
127 | __u16 max_bands; /* Maximum number of queues */ |
128 | }; |
129 | |
130 | /* TBF section */ |
131 | |
132 | struct tc_tbf_qopt { |
133 | struct tc_ratespec rate; |
134 | struct tc_ratespec peakrate; |
135 | __u32 limit; |
136 | __u32 buffer; |
137 | __u32 mtu; |
138 | }; |
139 | |
140 | enum { |
141 | TCA_TBF_UNSPEC, |
142 | TCA_TBF_PARMS, |
143 | TCA_TBF_RTAB, |
144 | TCA_TBF_PTAB, |
145 | __TCA_TBF_MAX, |
146 | }; |
147 | |
148 | #define TCA_TBF_MAX (__TCA_TBF_MAX - 1) |
149 | |
150 | |
151 | /* TEQL section */ |
152 | |
153 | /* TEQL does not require any parameters */ |
154 | |
155 | /* SFQ section */ |
156 | |
157 | struct tc_sfq_qopt { |
158 | unsigned quantum; /* Bytes per round allocated to flow */ |
159 | int perturb_period; /* Period of hash perturbation */ |
160 | __u32 limit; /* Maximal packets in queue */ |
161 | unsigned divisor; /* Hash divisor */ |
162 | unsigned flows; /* Maximal number of flows */ |
163 | }; |
164 | |
165 | struct tc_sfq_xstats { |
166 | __s32 allot; |
167 | }; |
168 | |
169 | /* |
170 | * NOTE: limit, divisor and flows are hardwired to code at the moment. |
171 | * |
172 | * limit=flows=128, divisor=1024; |
173 | * |
174 | * The only reason for this is efficiency, it is possible |
175 | * to change these parameters in compile time. |
176 | */ |
177 | |
178 | /* RED section */ |
179 | |
180 | enum { |
181 | TCA_RED_UNSPEC, |
182 | TCA_RED_PARMS, |
183 | TCA_RED_STAB, |
184 | __TCA_RED_MAX, |
185 | }; |
186 | |
187 | #define TCA_RED_MAX (__TCA_RED_MAX - 1) |
188 | |
189 | struct tc_red_qopt { |
190 | __u32 limit; /* HARD maximal queue length (bytes) */ |
191 | __u32 qth_min; /* Min average length threshold (bytes) */ |
192 | __u32 qth_max; /* Max average length threshold (bytes) */ |
193 | unsigned char Wlog; /* log(W) */ |
194 | unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ |
195 | unsigned char Scell_log; /* cell size for idle damping */ |
196 | unsigned char flags; |
197 | #define TC_RED_ECN 1 |
198 | #define TC_RED_HARDDROP 2 |
199 | }; |
200 | |
201 | struct tc_red_xstats { |
202 | __u32 early; /* Early drops */ |
203 | __u32 pdrop; /* Drops due to queue limits */ |
204 | __u32 other; /* Drops due to drop() calls */ |
205 | __u32 marked; /* Marked packets */ |
206 | }; |
207 | |
208 | /* GRED section */ |
209 | |
210 | #define MAX_DPs 16 |
211 | |
212 | enum { |
213 | TCA_GRED_UNSPEC, |
214 | TCA_GRED_PARMS, |
215 | TCA_GRED_STAB, |
216 | TCA_GRED_DPS, |
217 | __TCA_GRED_MAX, |
218 | }; |
219 | |
220 | #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) |
221 | |
222 | struct tc_gred_qopt { |
223 | __u32 limit; /* HARD maximal queue length (bytes) */ |
224 | __u32 qth_min; /* Min average length threshold (bytes) */ |
225 | __u32 qth_max; /* Max average length threshold (bytes) */ |
226 | __u32 DP; /* upto 2^32 DPs */ |
227 | __u32 backlog; |
228 | __u32 qave; |
229 | __u32 forced; |
230 | __u32 early; |
231 | __u32 other; |
232 | __u32 pdrop; |
233 | __u8 Wlog; /* log(W) */ |
234 | __u8 Plog; /* log(P_max/(qth_max-qth_min)) */ |
235 | __u8 Scell_log; /* cell size for idle damping */ |
236 | __u8 prio; /* prio of this VQ */ |
237 | __u32 packets; |
238 | __u32 bytesin; |
239 | }; |
240 | |
241 | /* gred setup */ |
242 | struct tc_gred_sopt { |
243 | __u32 DPs; |
244 | __u32 def_DP; |
245 | __u8 grio; |
246 | __u8 flags; |
247 | __u16 pad1; |
248 | }; |
249 | |
250 | /* HTB section */ |
251 | #define TC_HTB_NUMPRIO 8 |
252 | #define TC_HTB_MAXDEPTH 8 |
253 | #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */ |
254 | |
255 | struct tc_htb_opt { |
256 | struct tc_ratespec rate; |
257 | struct tc_ratespec ceil; |
258 | __u32 buffer; |
259 | __u32 cbuffer; |
260 | __u32 quantum; |
261 | __u32 level; /* out only */ |
262 | __u32 prio; |
263 | }; |
264 | struct tc_htb_glob { |
265 | __u32 version; /* to match HTB/TC */ |
266 | __u32 rate2quantum; /* bps->quantum divisor */ |
267 | __u32 defcls; /* default class number */ |
268 | __u32 debug; /* debug flags */ |
269 | |
270 | /* stats */ |
271 | __u32 direct_pkts; /* count of non shapped packets */ |
272 | }; |
273 | enum { |
274 | TCA_HTB_UNSPEC, |
275 | TCA_HTB_PARMS, |
276 | TCA_HTB_INIT, |
277 | TCA_HTB_CTAB, |
278 | TCA_HTB_RTAB, |
279 | __TCA_HTB_MAX, |
280 | }; |
281 | |
282 | #define TCA_HTB_MAX (__TCA_HTB_MAX - 1) |
283 | |
284 | struct tc_htb_xstats { |
285 | __u32 lends; |
286 | __u32 borrows; |
287 | __u32 giants; /* too big packets (rate will not be accurate) */ |
288 | __u32 tokens; |
289 | __u32 ctokens; |
290 | }; |
291 | |
292 | /* HFSC section */ |
293 | |
294 | struct tc_hfsc_qopt { |
295 | __u16 defcls; /* default class */ |
296 | }; |
297 | |
298 | struct tc_service_curve { |
299 | __u32 m1; /* slope of the first segment in bps */ |
300 | __u32 d; /* x-projection of the first segment in us */ |
301 | __u32 m2; /* slope of the second segment in bps */ |
302 | }; |
303 | |
304 | struct tc_hfsc_stats { |
305 | __u64 work; /* total work done */ |
306 | __u64 rtwork; /* work done by real-time criteria */ |
307 | __u32 period; /* current period */ |
308 | __u32 level; /* class level in hierarchy */ |
309 | }; |
310 | |
311 | enum { |
312 | TCA_HFSC_UNSPEC, |
313 | TCA_HFSC_RSC, |
314 | TCA_HFSC_FSC, |
315 | TCA_HFSC_USC, |
316 | __TCA_HFSC_MAX, |
317 | }; |
318 | |
319 | #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1) |
320 | |
321 | |
322 | /* CBQ section */ |
323 | |
324 | #define TC_CBQ_MAXPRIO 8 |
325 | #define TC_CBQ_MAXLEVEL 8 |
326 | #define TC_CBQ_DEF_EWMA 5 |
327 | |
328 | struct tc_cbq_lssopt { |
329 | unsigned char change; |
330 | unsigned char flags; |
331 | #define TCF_CBQ_LSS_BOUNDED 1 |
332 | #define TCF_CBQ_LSS_ISOLATED 2 |
333 | unsigned char ewma_log; |
334 | unsigned char level; |
335 | #define TCF_CBQ_LSS_FLAGS 1 |
336 | #define TCF_CBQ_LSS_EWMA 2 |
337 | #define TCF_CBQ_LSS_MAXIDLE 4 |
338 | #define TCF_CBQ_LSS_MINIDLE 8 |
339 | #define TCF_CBQ_LSS_OFFTIME 0x10 |
340 | #define TCF_CBQ_LSS_AVPKT 0x20 |
341 | __u32 maxidle; |
342 | __u32 minidle; |
343 | __u32 offtime; |
344 | __u32 avpkt; |
345 | }; |
346 | |
347 | struct tc_cbq_wrropt { |
348 | unsigned char flags; |
349 | unsigned char priority; |
350 | unsigned char cpriority; |
351 | unsigned char __reserved; |
352 | __u32 allot; |
353 | __u32 weight; |
354 | }; |
355 | |
356 | struct tc_cbq_ovl { |
357 | unsigned char strategy; |
358 | #define TC_CBQ_OVL_CLASSIC 0 |
359 | #define TC_CBQ_OVL_DELAY 1 |
360 | #define TC_CBQ_OVL_LOWPRIO 2 |
361 | #define TC_CBQ_OVL_DROP 3 |
362 | #define TC_CBQ_OVL_RCLASSIC 4 |
363 | unsigned char priority2; |
364 | __u16 pad; |
365 | __u32 penalty; |
366 | }; |
367 | |
368 | struct tc_cbq_police { |
369 | unsigned char police; |
370 | unsigned char __res1; |
371 | unsigned short __res2; |
372 | }; |
373 | |
374 | struct tc_cbq_fopt { |
375 | __u32 split; |
376 | __u32 defmap; |
377 | __u32 defchange; |
378 | }; |
379 | |
380 | struct tc_cbq_xstats { |
381 | __u32 borrows; |
382 | __u32 overactions; |
383 | __s32 avgidle; |
384 | __s32 undertime; |
385 | }; |
386 | |
387 | enum { |
388 | TCA_CBQ_UNSPEC, |
389 | TCA_CBQ_LSSOPT, |
390 | TCA_CBQ_WRROPT, |
391 | TCA_CBQ_FOPT, |
392 | TCA_CBQ_OVL_STRATEGY, |
393 | TCA_CBQ_RATE, |
394 | TCA_CBQ_RTAB, |
395 | TCA_CBQ_POLICE, |
396 | __TCA_CBQ_MAX, |
397 | }; |
398 | |
399 | #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1) |
400 | |
401 | /* dsmark section */ |
402 | |
403 | enum { |
404 | TCA_DSMARK_UNSPEC, |
405 | TCA_DSMARK_INDICES, |
406 | TCA_DSMARK_DEFAULT_INDEX, |
407 | TCA_DSMARK_SET_TC_INDEX, |
408 | TCA_DSMARK_MASK, |
409 | TCA_DSMARK_VALUE, |
410 | __TCA_DSMARK_MAX, |
411 | }; |
412 | |
413 | #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1) |
414 | |
415 | /* ATM section */ |
416 | |
417 | enum { |
418 | TCA_ATM_UNSPEC, |
419 | TCA_ATM_FD, /* file/socket descriptor */ |
420 | TCA_ATM_PTR, /* pointer to descriptor - later */ |
421 | TCA_ATM_HDR, /* LL header */ |
422 | TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ |
423 | TCA_ATM_ADDR, /* PVC address (for output only) */ |
424 | TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */ |
425 | __TCA_ATM_MAX, |
426 | }; |
427 | |
428 | #define TCA_ATM_MAX (__TCA_ATM_MAX - 1) |
429 | |
430 | /* Network emulator */ |
431 | |
432 | enum { |
433 | TCA_NETEM_UNSPEC, |
434 | TCA_NETEM_CORR, |
435 | TCA_NETEM_DELAY_DIST, |
436 | TCA_NETEM_REORDER, |
437 | TCA_NETEM_CORRUPT, |
438 | __TCA_NETEM_MAX, |
439 | }; |
440 | |
441 | #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1) |
442 | |
443 | struct tc_netem_qopt { |
444 | __u32 latency; /* added delay (us) */ |
445 | __u32 limit; /* fifo limit (packets) */ |
446 | __u32 loss; /* random packet loss (0=none ~0=100%) */ |
447 | __u32 gap; /* re-ordering gap (0 for none) */ |
448 | __u32 duplicate; /* random packet dup (0=none ~0=100%) */ |
449 | __u32 jitter; /* random jitter in latency (us) */ |
450 | }; |
451 | |
452 | struct tc_netem_corr { |
453 | __u32 delay_corr; /* delay correlation */ |
454 | __u32 loss_corr; /* packet loss correlation */ |
455 | __u32 dup_corr; /* duplicate correlation */ |
456 | }; |
457 | |
458 | struct tc_netem_reorder { |
459 | __u32 probability; |
460 | __u32 correlation; |
461 | }; |
462 | |
463 | struct tc_netem_corrupt { |
464 | __u32 probability; |
465 | __u32 correlation; |
466 | }; |
467 | |
468 | #define NETEM_DIST_SCALE 8192 |
469 | |
470 | /* DRR */ |
471 | |
472 | enum { |
473 | TCA_DRR_UNSPEC, |
474 | TCA_DRR_QUANTUM, |
475 | __TCA_DRR_MAX |
476 | }; |
477 | |
478 | #define TCA_DRR_MAX (__TCA_DRR_MAX - 1) |
479 | |
480 | struct tc_drr_stats { |
481 | __u32 deficit; |
482 | }; |
483 | |
484 | #endif |
485 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9