slaves get new tbf indexes on becoming master anyway
[l2tpns.git] / tbf.c
1 // L2TPNS: token bucket filters
2
3 char const *cvs_id_tbf = "$Id: tbf.c,v 1.8 2004/10/29 04:01:11 bodea Exp $";
4
5 #include <string.h>
6 #include "l2tpns.h"
7 #include "util.h"
8 #include "tbf.h"
9
10 tbft *filter_list = NULL;
11 static int filter_list_size = 0;
12
13 static int timer_chain = -1; // Head of timer chain.
14
15 static void tbf_run_queue(int tbf_id);
16
17 void init_tbf(int num_tbfs)
18 {
19 if (!(filter_list = shared_malloc(sizeof(*filter_list) * num_tbfs)))
20 return;
21
22 filter_list_size = num_tbfs;
23 filter_list[0].sid = -1; // Reserved.
24 }
25 //
26 // Put a TBF on the timer list.
27 // This is a doubly linked list..
28 // We put ourselves on the tail of the list.
29 //
30 static void add_to_timer(int id)
31 {
32 if (!filter_list)
33 return;
34
35 if (timer_chain == -1) {
36 filter_list[id].next = filter_list[id].prev = id;
37 timer_chain = id;
38 return;
39 }
40
41 filter_list[id].next = timer_chain;
42 filter_list[id].prev = filter_list[timer_chain].prev;
43 filter_list[filter_list[timer_chain].prev].next = id;
44 filter_list[timer_chain].prev = id;
45 }
46
47 //
48 // Remove a TBF from the timer list.
49 // This is a doubly linked list.
50 static void del_from_timer(int id)
51 {
52 if (!filter_list)
53 return;
54
55 if (filter_list[id].next == id) { // Last element in chain?
56 if (timer_chain != id) { // WTF?
57 log(0,0,0,0, "Removed a singleton element from TBF, but tc didn't point to it!\n");
58 } else
59 timer_chain = -1;
60 filter_list[id].next = filter_list[id].prev = 0;
61 return;
62 }
63
64 filter_list[filter_list[id].next].prev = filter_list[id].prev;
65 filter_list[filter_list[id].prev].next = filter_list[id].next;
66 if (timer_chain == id)
67 timer_chain = filter_list[id].next;
68
69 filter_list[id].next = filter_list[id].prev = 0; // Mark as off the timer chain.
70 }
71
72 //
73 // Free a token bucket filter structure for re-use.
74 //
75
76 int free_tbf(int tid)
77 {
78 if (tid < 1) // Make sure we don't free id # 0
79 return -1;
80
81 if (!filter_list) // WTF?
82 return -1;
83
84 if (filter_list[tid].next)
85 del_from_timer(tid);
86 filter_list[tid].sid = 0;
87
88 return 0; // Done!
89 }
90
91 //
92 // Allocate a new token bucket filter.
93 //
94 int new_tbf(int sid, int max_credit, int rate, void (*f)(sessionidt, u8 *, int))
95 {
96 int i;
97 static int p = 0;
98
99 log(4,0,0,0, "Allocating new TBF (sess %d, rate %d, helper %p)\n", sid, rate, f);
100
101 if (!filter_list)
102 return 0; // Couldn't alloc memory!
103
104 for (i = 0 ; i < filter_list_size ; ++i, p = (p+1)%filter_list_size ) {
105 if (filter_list[p].sid)
106 continue;
107
108 memset((void*) &filter_list[p], 0, sizeof(filter_list[p]) ); // Clear counters and data.
109 filter_list[p].sid = sid;
110 filter_list[p].credit = max_credit;
111 filter_list[p].queued = 0;
112 filter_list[p].max_credit = max_credit;
113 filter_list[p].rate = rate;
114 filter_list[p].oldest = 0;
115 filter_list[p].send = f;
116 return p;
117 }
118
119 log(0,0,0,0, "Ran out of token bucket filters! Sess %d will be un-throttled\n", sid);
120 return 0;
121 }
122
123 //
124 // Sanity check all the TBF records. This is
125 // typically done when we become a master..
126 //
127 void fsck_tbfs(void)
128 {
129 int i , sid;
130
131 if (!filter_list)
132 return;
133
134 for (i = 1; i < filter_list_size; ++i) {
135 if (!filter_list[i].sid) // Is it used??
136 continue;
137
138 sid = filter_list[i].sid;
139 if (i != session[sid].tbf_in &&
140 i != session[sid].tbf_out) { // Ooops.
141
142 free_tbf(i); // Mark it as free...
143 }
144 }
145
146 for (i = 0; i < config->cluster_highest_sessionid ; ++i) {
147 if (session[i].tbf_in && filter_list[session[i].tbf_in].sid != i) {
148 filter_list[session[i].tbf_in].sid = i; // Ouch!? FIXME. What to do here?
149 }
150 if (session[i].tbf_out && filter_list[session[i].tbf_out].sid != i) {
151 filter_list[session[i].tbf_out].sid = i; // Ouch!? FIXME. What to do here?
152 }
153 }
154 }
155
156
157 //
158 // Run a packet through a token bucket filter.
159 // If we can send it right away, we do. Else we
160 // try and queue it to send later. Else we drop it.
161 //
162 int tbf_queue_packet(int tbf_id, char * data, int size)
163 {
164 int i;
165 tbft * f;
166
167 if (!filter_list)
168 return -1;
169
170 if (tbf_id > filter_list_size || tbf_id < 1) { // Out of range ID??
171 // Very bad. Just drop it.
172 return -1;
173 }
174
175 f = &filter_list[tbf_id];
176
177 if (!f->sid) // Is this a real structure??
178 return -1;
179
180 tbf_run_queue(tbf_id); // Caculate credit and send any queued packets if possible..
181
182 f->b_queued += size;
183 f->p_queued ++;
184
185 if (!f->queued && f->credit > size) { // If the queue is empty, and we have
186 // enough credit, just send it now.
187 f->credit -= size;
188 if (f->send) {
189 f->send(f->sid, data, size);
190 f->b_sent += size;
191 f->p_sent ++;
192 } else {
193 f->b_dropped += size;
194 f->p_dropped ++;
195 }
196 return size;
197 }
198
199 // Not enough credit. Can we have room in the queue?
200 if (f->queued >= TBF_MAX_QUEUE) {
201 f->p_dropped ++;
202 f->b_dropped += size;
203 return -1; // No, just drop it.
204 }
205
206 // Is it too big to fit into a queue slot?
207 if (size >= TBF_MAX_SIZE) {
208 f->p_dropped ++;
209 f->b_dropped += size;
210 return -1; // Yes, just drop it.
211 }
212
213 // Ok. We have a slot, and it's big enough to
214 // contain the packet, so queue the packet!
215 i = ( f->oldest + f->queued ) % TBF_MAX_QUEUE;
216 memcpy(f->packets[i], data, size);
217
218 f->sizes[i] = size;
219 f->queued ++;
220 f->p_delayed ++;
221
222 if (!f->next) // Are we off the timer chain?
223 add_to_timer(tbf_id); // Put ourselves on the timer chain.
224
225 return 0; // All done.
226 }
227
228 //
229 // Send queued packets from the filter if possible.
230 // (We're normally only called if this is possible.. )
231 static void tbf_run_queue(int tbf_id)
232 {
233 tbft * f;
234
235 if (!filter_list)
236 return;
237
238 f = &filter_list[tbf_id];
239
240 // Calculate available credit...
241 f->credit += (TIME - f->lasttime) * f->rate / 10; // current time is 1/10th of a second.
242 if (f->credit > f->max_credit)
243 f->credit = f->max_credit;
244 f->lasttime = TIME;
245
246 while (f->queued > 0 && f->credit >= f->sizes[f->oldest]) { // While we have enough credit..
247
248 if (f->send) {
249 f->send(f->sid, f->packets[f->oldest], f->sizes[f->oldest]);
250 f->b_sent += f->sizes[f->oldest];
251 f->p_sent ++;
252 } else {
253 f->b_dropped += f->sizes[f->oldest];
254 f->p_dropped ++;
255 }
256
257 f->credit -= f->sizes[f->oldest];
258
259 f->oldest = (f->oldest + 1 ) % TBF_MAX_QUEUE;
260 f->queued--; // One less queued packet..
261 }
262
263 if (f->queued) // Still more to do. Hang around on the timer list.
264 return;
265
266 if (f->next) // Are we on the timer list??
267 del_from_timer(tbf_id); // Nothing more to do. Get off the timer list.
268 }
269
270 //
271 // Periodically walk the timer list..
272 //
273 int tbf_run_timer(void)
274 {
275 int i = timer_chain;
276 int count = filter_list_size + 1; // Safety check.
277 int last = -1;
278 int tbf_id; // structure being processed.
279
280 if (timer_chain < 0)
281 return 0; // Nothing to do...
282
283 if (!filter_list) // No structures built yet.
284 return 0;
285
286 last = filter_list[i].prev; // last element to process.
287
288 do {
289 tbf_id = i;
290 i = filter_list[i].next; // Get the next in the queue.
291
292 tbf_run_queue(tbf_id); // Run the timer queue..
293 } while ( timer_chain > 0 && i && tbf_id != last && --count > 0);
294
295
296 #if 0 // Debugging.
297 for (i = 0; i < filter_list_size; ++i) {
298 if (!filter_list[i].next)
299 continue;
300 if (filter_list[i].lasttime == TIME) // Did we just run it?
301 continue;
302
303 log(1,0,0,0, "Missed tbf %d! Not on the timer chain?(n %d, p %d, tc %d)\n", i,
304 filter_list[i].next, filter_list[i].prev, timer_chain);
305 tbf_run_queue(i);
306 }
307 #endif
308
309 return 1;
310 }
311
312 int cmd_show_tbf(struct cli_def *cli, char *command, char **argv, int argc)
313 {
314 int i;
315 int count = 0;
316
317 if (CLI_HELP_REQUESTED)
318 return CLI_HELP_NO_ARGS;
319
320 if (!config->cluster_iam_master) {
321 cli_print(cli, "Can't do this on a slave. Do it on %s", inet_toa(config->cluster_master_address));
322 return CLI_OK;
323 }
324
325 if (!filter_list)
326 return CLI_OK;
327
328 cli_print(cli,"%6s %5s %5s %6s %6s | %7s %7s %8s %8s %8s %8s", "TBF#", "Sid", "Rate", "Credit", "Queued",
329 "ByteIn","PackIn","ByteSent","PackSent", "PackDrop", "PackDelay");
330
331 for (i = 1; i < filter_list_size; ++i) {
332 if (!filter_list[i].sid) // Is it used?
333 continue; // No.
334
335 cli_print(cli, "%5d%1s %5d %5d %6d %6d | %7d %7d %8d %8d %8d %8d",
336 i, (filter_list[i].next ? "*" : " "),
337 filter_list[i].sid,
338 filter_list[i].rate * 8,
339 filter_list[i].credit,
340 filter_list[i].queued,
341
342 filter_list[i].b_queued,
343 filter_list[i].p_queued,
344 filter_list[i].b_sent,
345 filter_list[i].p_sent,
346 filter_list[i].p_dropped,
347 filter_list[i].p_delayed);
348 ++count;
349 }
350 cli_print(cli, "%d tbf entries used, %d total", count, filter_list_size);
351 return CLI_OK;
352 }