Add configurable hostname
[l2tpns.git] / tbf.c
1 // L2TPNS: token bucket filters
2
3 char const *cvs_id_tbf = "$Id: tbf.c,v 1.6 2004/09/02 04:18:07 fred_nerk Exp $";
4
5 #define _GNU_SOURCE
6
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10
11 #include "l2tpns.h"
12 #include "util.h"
13 #include "tbf.h"
14
15 tbft *filter_list = NULL;
16 static int filter_list_size = 0;
17
18 static int timer_chain = -1; // Head of timer chain.
19
20 static void tbf_run_queue(int tbf_id);
21
22 void init_tbf(void)
23 {
24 if ((filter_list = shared_malloc(sizeof(*filter_list) * MAXTBFS)) == MAP_FAILED)
25 return;
26
27 filter_list_size = MAXTBFS;
28 filter_list[0].sid = -1; // Reserved.
29 }
30 //
31 // Put a TBF on the timer list.
32 // This is a doubly linked list..
33 // We put ourselves on the tail of the list.
34 //
35 static void add_to_timer(int id)
36 {
37 if (!filter_list)
38 return;
39
40 if (timer_chain == -1) {
41 filter_list[id].next = filter_list[id].prev = id;
42 timer_chain = id;
43 return;
44 }
45
46 filter_list[id].next = timer_chain;
47 filter_list[id].prev = filter_list[timer_chain].prev;
48 filter_list[filter_list[timer_chain].prev].next = id;
49 filter_list[timer_chain].prev = id;
50 }
51
52 //
53 // Remove a TBF from the timer list.
54 // This is a doubly linked list.
55 static void del_from_timer(int id)
56 {
57 if (!filter_list)
58 return;
59
60 if (filter_list[id].next == id) { // Last element in chain?
61 if (timer_chain != id) { // WTF?
62 log(0,0,0,0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
63 } else
64 timer_chain = -1;
65 filter_list[id].next = filter_list[id].prev = 0;
66 return;
67 }
68
69 filter_list[filter_list[id].next].prev = filter_list[id].prev;
70 filter_list[filter_list[id].prev].next = filter_list[id].next;
71 if (timer_chain == id)
72 timer_chain = filter_list[id].next;
73
74 filter_list[id].next = filter_list[id].prev = 0; // Mark as off the timer chain.
75 }
76
77 //
78 // Free a token bucket filter structure for re-use.
79 //
80
81 int free_tbf(int tid)
82 {
83 if (tid < 1) // Make sure we don't free id # 0
84 return -1;
85
86 if (!filter_list) // WTF?
87 return -1;
88
89 if (filter_list[tid].next)
90 del_from_timer(tid);
91 filter_list[tid].sid = 0;
92
93 return 0; // Done!
94 }
95
96 //
97 // Allocate a new token bucket filter.
98 //
99 int new_tbf(int sid, int max_credit, int rate, void (*f)(sessionidt, u8 *, int))
100 {
101 int i;
102 static int p = 0;
103
104 log(3,0,0,0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid, rate, f);
105
106 if (!filter_list)
107 return 0; // Couldn't alloc memory!
108
109 // again:
110
111 for (i = 0 ; i < filter_list_size ; ++i, p = (p+1)%filter_list_size ) {
112 if (filter_list[p].sid)
113 continue;
114
115 memset((void*) &filter_list[p], 0, sizeof(filter_list[p]) ); // Clear counters and data.
116 filter_list[p].sid = sid;
117 filter_list[p].credit = max_credit;
118 filter_list[p].queued = 0;
119 filter_list[p].max_credit = max_credit;
120 filter_list[p].rate = rate;
121 filter_list[p].oldest = 0;
122 filter_list[p].send = f;
123 return p;
124 }
125
126 #if 0
127 // All allocated filters are used! Increase the size of the allocated
128 // filters.
129
130 {
131 int new_size = filter_list_size * 2;
132 tbft *new = mremap(filter_list, filter_list_size * sizeof(*new), new_size * sizeof(*new), MREMAP_MAYMOVE);
133
134 if (new == MAP_FAILED)
135 {
136 log(0,0,0,0, "Ran out of token bucket filters and mremap failed! Sess %d will be un-throttled\n", sid);
137 return 0;
138 }
139
140 i = filter_list_size;
141 filter_list_size = new_size;
142 filter_list = new;
143 }
144
145 for (; i < filter_list_size; ++i)
146 filter_list[i].sid = 0;
147
148 goto again;
149 #else
150 log(0,0,0,0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid);
151 return 0;
152 #endif
153 }
154
155 //
156 // Sanity check all the TBF records. This is
157 // typically done when we become a master..
158 //
159 void fsck_tbfs(void)
160 {
161 int i , sid;
162
163 if (!filter_list)
164 return;
165
166 for (i = 1; i < filter_list_size; ++i) {
167 if (!filter_list[i].sid) // Is it used??
168 continue;
169
170 sid = filter_list[i].sid;
171 if (i != session[sid].tbf_in &&
172 i != session[sid].tbf_out) { // Ooops.
173
174 free_tbf(i); // Mark it as free...
175 }
176 }
177
178 for (i = 0; i < config->cluster_highest_sessionid ; ++i) {
179 if (session[i].tbf_in && filter_list[session[i].tbf_in].sid != i) {
180 filter_list[session[i].tbf_in].sid = i; // Ouch!? FIXME. What to do here?
181 }
182 if (session[i].tbf_out && filter_list[session[i].tbf_out].sid != i) {
183 filter_list[session[i].tbf_out].sid = i; // Ouch!? FIXME. What to do here?
184 }
185 }
186 }
187
188
189 //
190 // Run a packet through a token bucket filter.
191 // If we can send it right away, we do. Else we
192 // try and queue it to send later. Else we drop it.
193 //
194 int tbf_queue_packet(int tbf_id, char * data, int size)
195 {
196 int i;
197 tbft * f;
198
199 if (!filter_list)
200 return -1;
201
202 if (tbf_id > filter_list_size || tbf_id < 1) { // Out of range ID??
203 // Very bad. Just drop it.
204 return -1;
205 }
206
207 f = &filter_list[tbf_id];
208
209 if (!f->sid) // Is this a real structure??
210 return -1;
211
212 tbf_run_queue(tbf_id); // Caculate credit and send any queued packets if possible..
213
214 f->b_queued += size;
215 f->p_queued ++;
216
217 if (!f->queued && f->credit > size) { // If the queue is empty, and we have
218 // enough credit, just send it now.
219 f->credit -= size;
220 if (f->send) {
221 f->send(f->sid, data, size);
222 f->b_sent += size;
223 f->p_sent ++;
224 } else {
225 f->b_dropped += size;
226 f->p_dropped ++;
227 }
228 return size;
229 }
230
231 // Not enough credit. Can we have room in the queue?
232 if (f->queued >= TBF_MAX_QUEUE) {
233 f->p_dropped ++;
234 f->b_dropped += size;
235 return -1; // No, just drop it.
236 }
237
238 // Is it too big to fit into a queue slot?
239 if (size >= TBF_MAX_SIZE) {
240 f->p_dropped ++;
241 f->b_dropped += size;
242 return -1; // Yes, just drop it.
243 }
244
245 // Ok. We have a slot, and it's big enough to
246 // contain the packet, so queue the packet!
247 i = ( f->oldest + f->queued ) % TBF_MAX_QUEUE;
248 memcpy(f->packets[i], data, size);
249
250 f->sizes[i] = size;
251 f->queued ++;
252 f->p_delayed ++;
253
254 if (!f->next) // Are we off the timer chain?
255 add_to_timer(tbf_id); // Put ourselves on the timer chain.
256
257 return 0; // All done.
258 }
259
260 //
261 // Send queued packets from the filter if possible.
262 // (We're normally only called if this is possible.. )
263 static void tbf_run_queue(int tbf_id)
264 {
265 tbft * f;
266
267 if (!filter_list)
268 return;
269
270 f = &filter_list[tbf_id];
271
272 // Calculate available credit...
273 f->credit += (TIME - f->lasttime) * f->rate / 10; // current time is 1/10th of a second.
274 if (f->credit > f->max_credit)
275 f->credit = f->max_credit;
276 f->lasttime = TIME;
277
278 while (f->queued > 0 && f->credit >= f->sizes[f->oldest]) { // While we have enough credit..
279
280 if (f->send) {
281 f->send(f->sid, f->packets[f->oldest], f->sizes[f->oldest]);
282 f->b_sent += f->sizes[f->oldest];
283 f->p_sent ++;
284 } else {
285 f->b_dropped += f->sizes[f->oldest];
286 f->p_dropped ++;
287 }
288
289 f->credit -= f->sizes[f->oldest];
290
291 f->oldest = (f->oldest + 1 ) % TBF_MAX_QUEUE;
292 f->queued--; // One less queued packet..
293 }
294
295 if (f->queued) // Still more to do. Hang around on the timer list.
296 return;
297
298 if (f->next) // Are we on the timer list??
299 del_from_timer(tbf_id); // Nothing more to do. Get off the timer list.
300 }
301
302 //
303 // Periodically walk the timer list..
304 //
305 int tbf_run_timer(void)
306 {
307 int i = timer_chain;
308 int count = filter_list_size + 1; // Safety check.
309 int last = -1;
310 int tbf_id; // structure being processed.
311
312 if (timer_chain < 0)
313 return 0; // Nothing to do...
314
315 if (!filter_list) // No structures built yet.
316 return 0;
317
318 last = filter_list[i].prev; // last element to process.
319
320 do {
321 tbf_id = i;
322 i = filter_list[i].next; // Get the next in the queue.
323
324 tbf_run_queue(tbf_id); // Run the timer queue..
325 } while ( timer_chain > 0 && i && tbf_id != last && --count > 0);
326
327
328 #if 0 // Debugging.
329 for (i = 0; i < filter_list_size; ++i) {
330 if (!filter_list[i].next)
331 continue;
332 if (filter_list[i].lasttime == TIME) // Did we just run it?
333 continue;
334
335 log(1,0,0,0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i,
336 filter_list[i].next, filter_list[i].prev, timer_chain);
337 tbf_run_queue(i);
338 }
339 #endif
340
341 return 1;
342 }
343
344 int cmd_show_tbf(struct cli_def *cli, char *command, char **argv, int argc)
345 {
346 int i;
347 int count = 0;
348
349 if (CLI_HELP_REQUESTED)
350 return CLI_HELP_NO_ARGS;
351
352 if (!config->cluster_iam_master) {
353 cli_print(cli, "Can't do this on a slave. Do it on %s", inet_toa(config->cluster_master_address));
354 return CLI_OK;
355 }
356
357 if (!filter_list)
358 return CLI_OK;
359
360 cli_print(cli,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
361 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
362
363 for (i = 1; i < filter_list_size; ++i) {
364 if (!filter_list[i].sid) // Is it used?
365 continue; // No.
366
367 cli_print(cli, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
368 i, (filter_list[i].next ? "*" : " "),
369 filter_list[i].sid,
370 filter_list[i].rate * 8,
371 filter_list[i].credit,
372 filter_list[i].queued,
373
374 filter_list[i].b_queued,
375 filter_list[i].p_queued,
376 filter_list[i].b_sent,
377 filter_list[i].p_sent,
378 filter_list[i].p_dropped,
379 filter_list[i].p_delayed);
380 ++count;
381 }
382 cli_print(cli, "%d tbf entries used, %d total", count, filter_list_size);
383 return CLI_OK;
384 }