- s/tap/tun/
[l2tpns.git] / tbf.c
1 // L2TPNS: token bucket filters
2
3 char const *cvs_id_tbf = "$Id: tbf.c,v 1.4 2004-07-08 16:54:35 bodea Exp $";
4
5 #define _GNU_SOURCE
6
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10
11 #include "l2tpns.h"
12 #include "util.h"
13 #include "tbf.h"
14
15 // Need a time interval.
16
17 #define TBF_MAX_QUEUE 2 // Maximum of 2 queued packet per
18 #define TBF_MAX_SIZE 3000 // Maxiumum queued packet size is 2048.
19
20 #define TBF_MAX_CREDIT 6000 // Maximum 6000 bytes of credit.
21 #define TBF_RATE 360 // 360 bytes per 1/10th of a second.
22
23 typedef struct {
24 int credit;
25 int lasttime;
26 int queued;
27 int oldest; // Position of packet in the ring buffer.
28 sessionidt sid; // associated session ID.
29 int max_credit; // Maximum amount of credit available (burst size).
30 int rate; // How many bytes of credit per second we get? (sustained rate)
31 void (*send)(sessionidt s, u8 *, int); // Routine to actually send out the data.
32 int prev; // Timer chain position.
33 int next; // Timer chain position.
34
35 u32 b_queued; // Total bytes sent through this TBF
36 u32 b_sent; // Total bytes sucessfully made it to the network.
37 u32 p_queued; // ditto packets.
38 u32 p_sent; // ditto packets.
39 u32 b_dropped; // Total bytes dropped.
40 u32 p_dropped; // Total packets dropped.
41 u32 p_delayed; // Total packets not sent immediately.
42
43 int sizes[TBF_MAX_QUEUE];
44 char packets[TBF_MAX_QUEUE][TBF_MAX_SIZE];
45 } tbft;
46
47
48 static tbft *filter_list = NULL;
49 static int filter_list_size = 0;
50
51 static int timer_chain = -1; // Head of timer chain.
52
53 static void tbf_run_queue(int tbf_id);
54
55 void init_tbf(void)
56 {
57 filter_list = mmap(NULL, sizeof(*filter_list) * MAXTBFS, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
58 if (!filter_list)
59 return;
60
61 filter_list_size = MAXTBFS;
62 filter_list[0].sid = -1; // Reserved.
63 }
64 //
65 // Put a TBF on the timer list.
66 // This is a doubly linked list..
67 // We put ourselves on the tail of the list.
68 //
69 static void add_to_timer(int id)
70 {
71 if (!filter_list)
72 return;
73
74 if (timer_chain == -1) {
75 filter_list[id].next = filter_list[id].prev = id;
76 timer_chain = id;
77 return;
78 }
79
80 filter_list[id].next = timer_chain;
81 filter_list[id].prev = filter_list[timer_chain].prev;
82 filter_list[filter_list[timer_chain].prev].next = id;
83 filter_list[timer_chain].prev = id;
84 }
85
86 //
87 // Remove a TBF from the timer list.
88 // This is a doubly linked list.
89 static void del_from_timer(int id)
90 {
91 if (!filter_list)
92 return;
93
94 if (filter_list[id].next == id) { // Last element in chain?
95 if (timer_chain != id) { // WTF?
96 log(0,0,0,0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
97 } else
98 timer_chain = -1;
99 filter_list[id].next = filter_list[id].prev = 0;
100 return;
101 }
102
103 filter_list[filter_list[id].next].prev = filter_list[id].prev;
104 filter_list[filter_list[id].prev].next = filter_list[id].next;
105 if (timer_chain == id)
106 timer_chain = filter_list[id].next;
107
108 filter_list[id].next = filter_list[id].prev = 0; // Mark as off the timer chain.
109 }
110
111 //
112 // Free a token bucket filter structure for re-use.
113 //
114
115 int free_tbf(int tid)
116 {
117 if (tid < 1) // Make sure we don't free id # 0
118 return -1;
119
120 if (!filter_list) // WTF?
121 return -1;
122
123 if (filter_list[tid].next)
124 del_from_timer(tid);
125 filter_list[tid].sid = 0;
126
127 return 0; // Done!
128 }
129
130 //
131 // Allocate a new token bucket filter.
132 //
133 int new_tbf(int sid, int max_credit, int rate, void (*f)(sessionidt, u8 *, int))
134 {
135 int i;
136 static int p = 0;
137
138 log(3,0,0,0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid, rate, f);
139
140 if (!filter_list)
141 return 0; // Couldn't alloc memory!
142
143 // again:
144
145 for (i = 0 ; i < filter_list_size ; ++i, p = (p+1)%filter_list_size ) {
146 if (filter_list[p].sid)
147 continue;
148
149 memset((void*) &filter_list[p], 0, sizeof(filter_list[p]) ); // Clear counters and data.
150 filter_list[p].sid = sid;
151 filter_list[p].credit = max_credit;
152 filter_list[p].queued = 0;
153 filter_list[p].max_credit = max_credit;
154 filter_list[p].rate = rate;
155 filter_list[p].oldest = 0;
156 filter_list[p].send = f;
157 return p;
158 }
159
160 #if 0
161 // All allocated filters are used! Increase the size of the allocated
162 // filters.
163
164 {
165 int new_size = filter_list_size * 2;
166 tbft *new = mremap(filter_list, filter_list_size * sizeof(*new), new_size * sizeof(*new), MREMAP_MAYMOVE);
167
168 if (new == MAP_FAILED)
169 {
170 log(0,0,0,0, "Ran out of token bucket filters and mremap failed! Sess %d will be un-throttled\n", sid);
171 return 0;
172 }
173
174 i = filter_list_size;
175 filter_list_size = new_size;
176 filter_list = new;
177 }
178
179 for (; i < filter_list_size; ++i)
180 filter_list[i].sid = 0;
181
182 goto again;
183 #else
184 log(0,0,0,0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid);
185 return 0;
186 #endif
187 }
188
189 //
190 // Sanity check all the TBF records. This is
191 // typically done when we become a master..
192 //
193 void fsck_tbfs(void)
194 {
195 int i , sid;
196
197 if (!filter_list)
198 return;
199
200 for (i = 1; i < filter_list_size; ++i) {
201 if (!filter_list[i].sid) // Is it used??
202 continue;
203
204 sid = filter_list[i].sid;
205 if (i != session[sid].tbf_in &&
206 i != session[sid].tbf_out) { // Ooops.
207
208 free_tbf(i); // Mark it as free...
209 }
210 }
211
212 for (i = 0; i < config->cluster_highest_sessionid ; ++i) {
213 if (session[i].tbf_in && filter_list[session[i].tbf_in].sid != i) {
214 filter_list[session[i].tbf_in].sid = i; // Ouch!? FIXME. What to do here?
215 }
216 if (session[i].tbf_out && filter_list[session[i].tbf_out].sid != i) {
217 filter_list[session[i].tbf_out].sid = i; // Ouch!? FIXME. What to do here?
218 }
219 }
220 }
221
222
223 //
224 // Run a packet through a token bucket filter.
225 // If we can send it right away, we do. Else we
226 // try and queue it to send later. Else we drop it.
227 //
228 int tbf_queue_packet(int tbf_id, char * data, int size)
229 {
230 int i;
231 tbft * f;
232
233 if (!filter_list)
234 return -1;
235
236 if (tbf_id > filter_list_size || tbf_id < 1) { // Out of range ID??
237 // Very bad. Just drop it.
238 return -1;
239 }
240
241 f = &filter_list[tbf_id];
242
243 if (!f->sid) // Is this a real structure??
244 return -1;
245
246 tbf_run_queue(tbf_id); // Caculate credit and send any queued packets if possible..
247
248 f->b_queued += size;
249 f->p_queued ++;
250
251 if (!f->queued && f->credit > size) { // If the queue is empty, and we have
252 // enough credit, just send it now.
253 f->credit -= size;
254 if (f->send) {
255 f->send(f->sid, data, size);
256 f->b_sent += size;
257 f->p_sent ++;
258 } else {
259 f->b_dropped += size;
260 f->p_dropped ++;
261 }
262 return size;
263 }
264
265 // Not enough credit. Can we have room in the queue?
266 if (f->queued >= TBF_MAX_QUEUE) {
267 f->p_dropped ++;
268 f->b_dropped += size;
269 return -1; // No, just drop it.
270 }
271
272 // Is it too big to fit into a queue slot?
273 if (size >= TBF_MAX_SIZE) {
274 f->p_dropped ++;
275 f->b_dropped += size;
276 return -1; // Yes, just drop it.
277 }
278
279 // Ok. We have a slot, and it's big enough to
280 // contain the packet, so queue the packet!
281 i = ( f->oldest + f->queued ) % TBF_MAX_QUEUE;
282 memcpy(f->packets[i], data, size);
283
284 f->sizes[i] = size;
285 f->queued ++;
286 f->p_delayed ++;
287
288 if (!f->next) // Are we off the timer chain?
289 add_to_timer(tbf_id); // Put ourselves on the timer chain.
290
291 return 0; // All done.
292 }
293
294 //
295 // Send queued packets from the filter if possible.
296 // (We're normally only called if this is possible.. )
297 static void tbf_run_queue(int tbf_id)
298 {
299 tbft * f;
300
301 if (!filter_list)
302 return;
303
304 f = &filter_list[tbf_id];
305
306 // Calculate available credit...
307 f->credit += (TIME - f->lasttime) * f->rate / 10; // current time is 1/10th of a second.
308 if (f->credit > f->max_credit)
309 f->credit = f->max_credit;
310 f->lasttime = TIME;
311
312 while (f->queued > 0 && f->credit >= f->sizes[f->oldest]) { // While we have enough credit..
313
314 if (f->send) {
315 f->send(f->sid, f->packets[f->oldest], f->sizes[f->oldest]);
316 f->b_sent += f->sizes[f->oldest];
317 f->p_sent ++;
318 } else {
319 f->b_dropped += f->sizes[f->oldest];
320 f->p_dropped ++;
321 }
322
323 f->credit -= f->sizes[f->oldest];
324
325 f->oldest = (f->oldest + 1 ) % TBF_MAX_QUEUE;
326 f->queued--; // One less queued packet..
327 }
328
329 if (f->queued) // Still more to do. Hang around on the timer list.
330 return;
331
332 if (f->next) // Are we on the timer list??
333 del_from_timer(tbf_id); // Nothing more to do. Get off the timer list.
334 }
335
336 //
337 // Periodically walk the timer list..
338 //
339 int tbf_run_timer(void)
340 {
341 int i = timer_chain;
342 int count = filter_list_size + 1; // Safety check.
343 int last = -1;
344 int tbf_id; // structure being processed.
345
346 if (timer_chain < 0)
347 return 0; // Nothing to do...
348
349 if (!filter_list) // No structures built yet.
350 return 0;
351
352 last = filter_list[i].prev; // last element to process.
353
354 do {
355 tbf_id = i;
356 i = filter_list[i].next; // Get the next in the queue.
357
358 tbf_run_queue(tbf_id); // Run the timer queue..
359 } while ( timer_chain > 0 && i && tbf_id != last && --count > 0);
360
361
362 #if 0 // Debugging.
363 for (i = 0; i < filter_list_size; ++i) {
364 if (!filter_list[i].next)
365 continue;
366 if (filter_list[i].lasttime == TIME) // Did we just run it?
367 continue;
368
369 log(1,0,0,0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i,
370 filter_list[i].next, filter_list[i].prev, timer_chain);
371 tbf_run_queue(i);
372 }
373 #endif
374
375 return 1;
376 }
377
378 int cmd_show_tbf(struct cli_def *cli, char *command, char **argv, int argc)
379 {
380 int i;
381 int count = 0;
382
383 if (CLI_HELP_REQUESTED)
384 return CLI_HELP_NO_ARGS;
385
386 if (!config->cluster_iam_master) {
387 cli_print(cli, "Can't do this on a slave. Do it on %s", inet_toa(config->cluster_master_address));
388 return CLI_OK;
389 }
390
391 if (!filter_list)
392 return CLI_OK;
393
394 cli_print(cli,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
395 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
396
397 for (i = 1; i < filter_list_size; ++i) {
398 if (!filter_list[i].sid) // Is it used?
399 continue; // No.
400
401 cli_print(cli, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
402 i, (filter_list[i].next ? "*" : " "),
403 filter_list[i].sid,
404 filter_list[i].rate * 8,
405 filter_list[i].credit,
406 filter_list[i].queued,
407
408 filter_list[i].b_queued,
409 filter_list[i].p_queued,
410 filter_list[i].b_sent,
411 filter_list[i].p_sent,
412 filter_list[i].p_dropped,
413 filter_list[i].p_delayed);
414 ++count;
415 }
416 cli_print(cli, "%d tbf entries used, %d total", count, filter_list_size);
417 return CLI_OK;
418 }