1 // L2TPNS: token bucket filters
3 char const *cvs_id_tbf
= "$Id: tbf.c,v 1.4 2004-07-08 16:54:35 bodea Exp $";
15 // Need a time interval.
17 #define TBF_MAX_QUEUE 2 // Maximum of 2 queued packet per
18 #define TBF_MAX_SIZE 3000 // Maxiumum queued packet size is 2048.
20 #define TBF_MAX_CREDIT 6000 // Maximum 6000 bytes of credit.
21 #define TBF_RATE 360 // 360 bytes per 1/10th of a second.
27 int oldest
; // Position of packet in the ring buffer.
28 sessionidt sid
; // associated session ID.
29 int max_credit
; // Maximum amount of credit available (burst size).
30 int rate
; // How many bytes of credit per second we get? (sustained rate)
31 void (*send
)(sessionidt s
, u8
*, int); // Routine to actually send out the data.
32 int prev
; // Timer chain position.
33 int next
; // Timer chain position.
35 u32 b_queued
; // Total bytes sent through this TBF
36 u32 b_sent
; // Total bytes sucessfully made it to the network.
37 u32 p_queued
; // ditto packets.
38 u32 p_sent
; // ditto packets.
39 u32 b_dropped
; // Total bytes dropped.
40 u32 p_dropped
; // Total packets dropped.
41 u32 p_delayed
; // Total packets not sent immediately.
43 int sizes
[TBF_MAX_QUEUE
];
44 char packets
[TBF_MAX_QUEUE
][TBF_MAX_SIZE
];
48 static tbft
*filter_list
= NULL
;
49 static int filter_list_size
= 0;
51 static int timer_chain
= -1; // Head of timer chain.
53 static void tbf_run_queue(int tbf_id
);
57 filter_list
= mmap(NULL
, sizeof(*filter_list
) * MAXTBFS
, PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_ANONYMOUS
, 0, 0);
61 filter_list_size
= MAXTBFS
;
62 filter_list
[0].sid
= -1; // Reserved.
65 // Put a TBF on the timer list.
66 // This is a doubly linked list..
67 // We put ourselves on the tail of the list.
69 static void add_to_timer(int id
)
74 if (timer_chain
== -1) {
75 filter_list
[id
].next
= filter_list
[id
].prev
= id
;
80 filter_list
[id
].next
= timer_chain
;
81 filter_list
[id
].prev
= filter_list
[timer_chain
].prev
;
82 filter_list
[filter_list
[timer_chain
].prev
].next
= id
;
83 filter_list
[timer_chain
].prev
= id
;
87 // Remove a TBF from the timer list.
88 // This is a doubly linked list.
89 static void del_from_timer(int id
)
94 if (filter_list
[id
].next
== id
) { // Last element in chain?
95 if (timer_chain
!= id
) { // WTF?
96 log(0,0,0,0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
99 filter_list
[id
].next
= filter_list
[id
].prev
= 0;
103 filter_list
[filter_list
[id
].next
].prev
= filter_list
[id
].prev
;
104 filter_list
[filter_list
[id
].prev
].next
= filter_list
[id
].next
;
105 if (timer_chain
== id
)
106 timer_chain
= filter_list
[id
].next
;
108 filter_list
[id
].next
= filter_list
[id
].prev
= 0; // Mark as off the timer chain.
112 // Free a token bucket filter structure for re-use.
115 int free_tbf(int tid
)
117 if (tid
< 1) // Make sure we don't free id # 0
120 if (!filter_list
) // WTF?
123 if (filter_list
[tid
].next
)
125 filter_list
[tid
].sid
= 0;
131 // Allocate a new token bucket filter.
133 int new_tbf(int sid
, int max_credit
, int rate
, void (*f
)(sessionidt
, u8
*, int))
138 log(3,0,0,0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid
, rate
, f
);
141 return 0; // Couldn't alloc memory!
145 for (i
= 0 ; i
< filter_list_size
; ++i
, p
= (p
+1)%filter_list_size
) {
146 if (filter_list
[p
].sid
)
149 memset((void*) &filter_list
[p
], 0, sizeof(filter_list
[p
]) ); // Clear counters and data.
150 filter_list
[p
].sid
= sid
;
151 filter_list
[p
].credit
= max_credit
;
152 filter_list
[p
].queued
= 0;
153 filter_list
[p
].max_credit
= max_credit
;
154 filter_list
[p
].rate
= rate
;
155 filter_list
[p
].oldest
= 0;
156 filter_list
[p
].send
= f
;
161 // All allocated filters are used! Increase the size of the allocated
165 int new_size
= filter_list_size
* 2;
166 tbft
*new = mremap(filter_list
, filter_list_size
* sizeof(*new), new_size
* sizeof(*new), MREMAP_MAYMOVE
);
168 if (new == MAP_FAILED
)
170 log(0,0,0,0, "Ran out of token bucket filters and mremap failed! Sess %d will be un-throttled\n", sid
);
174 i
= filter_list_size
;
175 filter_list_size
= new_size
;
179 for (; i
< filter_list_size
; ++i
)
180 filter_list
[i
].sid
= 0;
184 log(0,0,0,0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid
);
190 // Sanity check all the TBF records. This is
191 // typically done when we become a master..
200 for (i
= 1; i
< filter_list_size
; ++i
) {
201 if (!filter_list
[i
].sid
) // Is it used??
204 sid
= filter_list
[i
].sid
;
205 if (i
!= session
[sid
].tbf_in
&&
206 i
!= session
[sid
].tbf_out
) { // Ooops.
208 free_tbf(i
); // Mark it as free...
212 for (i
= 0; i
< config
->cluster_highest_sessionid
; ++i
) {
213 if (session
[i
].tbf_in
&& filter_list
[session
[i
].tbf_in
].sid
!= i
) {
214 filter_list
[session
[i
].tbf_in
].sid
= i
; // Ouch!? FIXME. What to do here?
216 if (session
[i
].tbf_out
&& filter_list
[session
[i
].tbf_out
].sid
!= i
) {
217 filter_list
[session
[i
].tbf_out
].sid
= i
; // Ouch!? FIXME. What to do here?
224 // Run a packet through a token bucket filter.
225 // If we can send it right away, we do. Else we
226 // try and queue it to send later. Else we drop it.
228 int tbf_queue_packet(int tbf_id
, char * data
, int size
)
236 if (tbf_id
> filter_list_size
|| tbf_id
< 1) { // Out of range ID??
237 // Very bad. Just drop it.
241 f
= &filter_list
[tbf_id
];
243 if (!f
->sid
) // Is this a real structure??
246 tbf_run_queue(tbf_id
); // Caculate credit and send any queued packets if possible..
251 if (!f
->queued
&& f
->credit
> size
) { // If the queue is empty, and we have
252 // enough credit, just send it now.
255 f
->send(f
->sid
, data
, size
);
259 f
->b_dropped
+= size
;
265 // Not enough credit. Can we have room in the queue?
266 if (f
->queued
>= TBF_MAX_QUEUE
) {
268 f
->b_dropped
+= size
;
269 return -1; // No, just drop it.
272 // Is it too big to fit into a queue slot?
273 if (size
>= TBF_MAX_SIZE
) {
275 f
->b_dropped
+= size
;
276 return -1; // Yes, just drop it.
279 // Ok. We have a slot, and it's big enough to
280 // contain the packet, so queue the packet!
281 i
= ( f
->oldest
+ f
->queued
) % TBF_MAX_QUEUE
;
282 memcpy(f
->packets
[i
], data
, size
);
288 if (!f
->next
) // Are we off the timer chain?
289 add_to_timer(tbf_id
); // Put ourselves on the timer chain.
291 return 0; // All done.
295 // Send queued packets from the filter if possible.
296 // (We're normally only called if this is possible.. )
297 static void tbf_run_queue(int tbf_id
)
304 f
= &filter_list
[tbf_id
];
306 // Calculate available credit...
307 f
->credit
+= (TIME
- f
->lasttime
) * f
->rate
/ 10; // current time is 1/10th of a second.
308 if (f
->credit
> f
->max_credit
)
309 f
->credit
= f
->max_credit
;
312 while (f
->queued
> 0 && f
->credit
>= f
->sizes
[f
->oldest
]) { // While we have enough credit..
315 f
->send(f
->sid
, f
->packets
[f
->oldest
], f
->sizes
[f
->oldest
]);
316 f
->b_sent
+= f
->sizes
[f
->oldest
];
319 f
->b_dropped
+= f
->sizes
[f
->oldest
];
323 f
->credit
-= f
->sizes
[f
->oldest
];
325 f
->oldest
= (f
->oldest
+ 1 ) % TBF_MAX_QUEUE
;
326 f
->queued
--; // One less queued packet..
329 if (f
->queued
) // Still more to do. Hang around on the timer list.
332 if (f
->next
) // Are we on the timer list??
333 del_from_timer(tbf_id
); // Nothing more to do. Get off the timer list.
337 // Periodically walk the timer list..
339 int tbf_run_timer(void)
342 int count
= filter_list_size
+ 1; // Safety check.
344 int tbf_id
; // structure being processed.
347 return 0; // Nothing to do...
349 if (!filter_list
) // No structures built yet.
352 last
= filter_list
[i
].prev
; // last element to process.
356 i
= filter_list
[i
].next
; // Get the next in the queue.
358 tbf_run_queue(tbf_id
); // Run the timer queue..
359 } while ( timer_chain
> 0 && i
&& tbf_id
!= last
&& --count
> 0);
363 for (i
= 0; i
< filter_list_size
; ++i
) {
364 if (!filter_list
[i
].next
)
366 if (filter_list
[i
].lasttime
== TIME
) // Did we just run it?
369 log(1,0,0,0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i
,
370 filter_list
[i
].next
, filter_list
[i
].prev
, timer_chain
);
378 int cmd_show_tbf(struct cli_def
*cli
, char *command
, char **argv
, int argc
)
383 if (CLI_HELP_REQUESTED
)
384 return CLI_HELP_NO_ARGS
;
386 if (!config
->cluster_iam_master
) {
387 cli_print(cli
, "Can't do this on a slave. Do it on %s", inet_toa(config
->cluster_master_address
));
394 cli_print(cli
,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
395 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
397 for (i
= 1; i
< filter_list_size
; ++i
) {
398 if (!filter_list
[i
].sid
) // Is it used?
401 cli_print(cli
, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
402 i
, (filter_list
[i
].next
? "*" : " "),
404 filter_list
[i
].rate
* 8,
405 filter_list
[i
].credit
,
406 filter_list
[i
].queued
,
408 filter_list
[i
].b_queued
,
409 filter_list
[i
].p_queued
,
410 filter_list
[i
].b_sent
,
411 filter_list
[i
].p_sent
,
412 filter_list
[i
].p_dropped
,
413 filter_list
[i
].p_delayed
);
416 cli_print(cli
, "%d tbf entries used, %d total", count
, filter_list_size
);