78de01ba0fa3c3a400b23b16e22df9f005d23d92
[l2tpns.git] / cluster.c
1 // L2TPNS Clustering Stuff
2
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <stdarg.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <sys/file.h>
9 #include <sys/stat.h>
10 #include <sys/socket.h>
11 #include <netinet/in.h>
12 #include <arpa/inet.h>
13 #include <sys/ioctl.h>
14 #include <net/if.h>
15 #include <string.h>
16 #include <malloc.h>
17 #include <errno.h>
18 #include <libcli.h>
19
20 #include "l2tpns.h"
21 #include "cluster.h"
22 #include "util.h"
23 #include "tbf.h"
24 #include "pppoe.h"
25
26 #ifdef BGP
27 #include "bgp.h"
28 #endif
29 /*
30 * All cluster packets have the same format.
31 *
32 * One or more instances of
33 * a 32 bit 'type' id.
34 * a 32 bit 'extra' data dependant on the 'type'.
35 * zero or more bytes of structure data, dependant on the type.
36 *
37 */
38
39 // Module variables.
40 extern int cluster_sockfd; // The filedescriptor for the cluster communications port.
41
42 in_addr_t my_address = 0; // The network address of my ethernet port.
43 static int walk_session_number = 0; // The next session to send when doing the slow table walk.
44 static int walk_bundle_number = 0; // The next bundle to send when doing the slow table walk.
45 static int walk_tunnel_number = 0; // The next tunnel to send when doing the slow table walk.
46 int forked = 0; // Sanity check: CLI must not diddle with heartbeat table
47
48 #define MAX_HEART_SIZE (8192) // Maximum size of heartbeat packet. Must be less than max IP packet size :)
49 #define MAX_CHANGES (MAX_HEART_SIZE/(sizeof(sessiont) + sizeof(int) ) - 2) // Assumes a session is the biggest type!
50
51 static struct {
52 int type;
53 int id;
54 } cluster_changes[MAX_CHANGES]; // Queue of changed structures that need to go out when next heartbeat.
55
56 static struct {
57 int seq;
58 int size;
59 uint8_t data[MAX_HEART_SIZE];
60 } past_hearts[HB_HISTORY_SIZE]; // Ring buffer of heartbeats that we've recently sent out. Needed so
61 // we can re-transmit if needed.
62
63 static struct {
64 in_addr_t peer;
65 uint32_t basetime;
66 clockt timestamp;
67 int uptodate;
68 } peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
69 static int num_peers; // Number of peers in list.
70
71 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
72 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
73
74 //
75 // Create a listening socket
76 //
77 // This joins the cluster multi-cast group.
78 //
79 int cluster_init()
80 {
81 struct sockaddr_in addr;
82 struct sockaddr_in interface_addr;
83 struct ip_mreq mreq;
84 struct ifreq ifr;
85 int opt;
86
87 config->cluster_undefined_sessions = MAXSESSION-1;
88 config->cluster_undefined_bundles = MAXBUNDLE-1;
89 config->cluster_undefined_tunnels = MAXTUNNEL-1;
90
91 if (!config->cluster_address)
92 return 0;
93 if (!*config->cluster_interface)
94 return 0;
95
96 cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
97
98 memset(&addr, 0, sizeof(addr));
99 addr.sin_family = AF_INET;
100 addr.sin_port = htons(CLUSTERPORT);
101 addr.sin_addr.s_addr = INADDR_ANY;
102 setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
103
104 opt = fcntl(cluster_sockfd, F_GETFL, 0);
105 fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
106
107 if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
108 {
109 LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
110 return -1;
111 }
112
113 strcpy(ifr.ifr_name, config->cluster_interface);
114 if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
115 {
116 LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
117 return -1;
118 }
119
120 memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
121 my_address = interface_addr.sin_addr.s_addr;
122
123 // Join multicast group.
124 mreq.imr_multiaddr.s_addr = config->cluster_address;
125 mreq.imr_interface = interface_addr.sin_addr;
126
127
128 opt = 0; // Turn off multicast loopback.
129 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
130
131 if (config->cluster_mcast_ttl != 1)
132 {
133 uint8_t ttl = 0;
134 if (config->cluster_mcast_ttl > 0)
135 ttl = config->cluster_mcast_ttl < 256 ? config->cluster_mcast_ttl : 255;
136
137 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl));
138 }
139
140 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
141 {
142 LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
143 return -1;
144 }
145
146 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
147 {
148 LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
149 return -1;
150 }
151
152 config->cluster_last_hb = TIME;
153 config->cluster_seq_number = -1;
154
155 return cluster_sockfd;
156 }
157
158
159 //
160 // Send a chunk of data to the entire cluster (usually via the multicast
161 // address ).
162 //
163
164 static int cluster_send_data(void *data, int datalen)
165 {
166 struct sockaddr_in addr = {0};
167
168 if (!cluster_sockfd) return -1;
169 if (!config->cluster_address) return 0;
170
171 addr.sin_addr.s_addr = config->cluster_address;
172 addr.sin_port = htons(CLUSTERPORT);
173 addr.sin_family = AF_INET;
174
175 LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
176
177 if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
178 {
179 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
180 return -1;
181 }
182
183 return 0;
184 }
185
186 //
187 // Add a chunk of data to a heartbeat packet.
188 // Maintains the format. Assumes that the caller
189 // has passed in a big enough buffer!
190 //
191 static void add_type(uint8_t **p, int type, int more, uint8_t *data, int size)
192 {
193 *((uint32_t *) (*p)) = type;
194 *p += sizeof(uint32_t);
195
196 *((uint32_t *)(*p)) = more;
197 *p += sizeof(uint32_t);
198
199 if (data && size > 0) {
200 memcpy(*p, data, size);
201 *p += size;
202 }
203 }
204
205 // advertise our presence via BGP or gratuitous ARP
206 static void advertise_routes(void)
207 {
208 #ifdef BGP
209 if (bgp_configured)
210 bgp_enable_routing(1);
211 else
212 #endif /* BGP */
213 if (config->send_garp)
214 send_garp(config->bind_address); // Start taking traffic.
215 }
216
217 // withdraw our routes (BGP only)
218 static void withdraw_routes(void)
219 {
220 #ifdef BGP
221 if (bgp_configured)
222 bgp_enable_routing(0);
223 #endif /* BGP */
224 }
225
226 static void cluster_uptodate(void)
227 {
228 if (config->cluster_iam_uptodate)
229 return;
230
231 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles)
232 return;
233
234 config->cluster_iam_uptodate = 1;
235
236 LOG(0, 0, 0, "Now uptodate with master.\n");
237 advertise_routes();
238 }
239
240 //
241 // Send a unicast UDP packet to a peer with 'data' as the
242 // contents.
243 //
244 static int peer_send_data(in_addr_t peer, uint8_t *data, int size)
245 {
246 struct sockaddr_in addr = {0};
247
248 if (!cluster_sockfd) return -1;
249 if (!config->cluster_address) return 0;
250
251 if (!peer) // Odd??
252 return -1;
253
254 addr.sin_addr.s_addr = peer;
255 addr.sin_port = htons(CLUSTERPORT);
256 addr.sin_family = AF_INET;
257
258 LOG_HEX(5, "Peer send", data, size);
259
260 if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
261 {
262 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
263 return -1;
264 }
265
266 return 0;
267 }
268
269 //
270 // Send a structured message to a peer with a single element of type 'type'.
271 //
272 static int peer_send_message(in_addr_t peer, int type, int more, uint8_t *data, int size)
273 {
274 uint8_t buf[65536]; // Vast overkill.
275 uint8_t *p = buf;
276
277 LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
278 add_type(&p, type, more, data, size);
279
280 return peer_send_data(peer, buf, (p-buf) );
281 }
282
283 // send a packet to the master
284 static int _forward_packet(uint8_t *data, int size, in_addr_t addr, int port, int type)
285 {
286 uint8_t buf[65536]; // Vast overkill.
287 uint8_t *p = buf;
288
289 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
290 return -1;
291
292 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
293
294 STAT(c_forwarded);
295 add_type(&p, type, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
296 memcpy(p, data, size);
297 p += size;
298
299 return peer_send_data(config->cluster_master_address, buf, (p - buf));
300 }
301
302 //
303 // Forward a state changing packet to the master.
304 //
305 // The master just processes the payload as if it had
306 // received it off the tun device.
307 //(note: THIS ROUTINE WRITES TO pack[-6]).
308 int master_forward_packet(uint8_t *data, int size, in_addr_t addr, int port)
309 {
310 uint8_t *p = data - (3 * sizeof(uint32_t));
311 uint8_t *psave = p;
312
313 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
314 return -1;
315
316 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
317
318 STAT(c_forwarded);
319 add_type(&p, C_FORWARD, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
320
321 return peer_send_data(config->cluster_master_address, psave, size + (3 * sizeof(uint32_t)));
322 }
323
324 // Forward PPPOE packet to the master.
325 //(note: THIS ROUTINE WRITES TO pack[-4]).
326 int master_forward_pppoe_packet(uint8_t *data, int size, uint8_t codepad)
327 {
328 uint8_t *p = data - (2 * sizeof(uint32_t));
329 uint8_t *psave = p;
330
331 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
332 return -1;
333
334 LOG(4, 0, 0, "Forward PPPOE packet to master, code %s (size %d)\n", get_string_codepad(codepad), size);
335
336 STAT(c_forwarded);
337 add_type(&p, C_PPPOE_FORWARD, codepad, NULL, 0);
338
339 return peer_send_data(config->cluster_master_address, psave, size + (2 * sizeof(uint32_t)));
340 }
341
342 // Forward a DAE RADIUS packet to the master.
343 int master_forward_dae_packet(uint8_t *data, int size, in_addr_t addr, int port)
344 {
345 return _forward_packet(data, size, addr, port, C_FORWARD_DAE);
346 }
347
348 //
349 // Forward a throttled packet to the master for handling.
350 //
351 // The master just drops the packet into the appropriate
352 // token bucket queue, and lets normal processing take care
353 // of it.
354 //
355 int master_throttle_packet(int tbfid, uint8_t *data, int size)
356 {
357 uint8_t buf[65536]; // Vast overkill.
358 uint8_t *p = buf;
359
360 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
361 return -1;
362
363 LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
364
365 add_type(&p, C_THROTTLE, tbfid, data, size);
366
367 return peer_send_data(config->cluster_master_address, buf, (p-buf) );
368
369 }
370
371 //
372 // Forward a walled garden packet to the master for handling.
373 //
374 // The master just writes the packet straight to the tun
375 // device (where is will normally loop through the
376 // firewall rules, and come back in on the tun device)
377 //
378 // (Note that this must be called with the tun header
379 // as the start of the data).
380 int master_garden_packet(sessionidt s, uint8_t *data, int size)
381 {
382 uint8_t buf[65536]; // Vast overkill.
383 uint8_t *p = buf;
384
385 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
386 return -1;
387
388 LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
389
390 add_type(&p, C_GARDEN, s, data, size);
391
392 return peer_send_data(config->cluster_master_address, buf, (p-buf));
393
394 }
395
396 //
397 // Forward a MPPP packet to the master for handling.
398 //
399 // (Note that this must be called with the tun header
400 // as the start of the data).
401 // (i.e. this routine writes to data[-8]).
402 int master_forward_mppp_packet(sessionidt s, uint8_t *data, int size)
403 {
404 uint8_t *p = data - (2 * sizeof(uint32_t));
405 uint8_t *psave = p;
406
407 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
408 return -1;
409
410 LOG(4, 0, 0, "Forward MPPP packet to master (size %d)\n", size);
411
412 add_type(&p, C_MPPP_FORWARD, s, NULL, 0);
413
414 return peer_send_data(config->cluster_master_address, psave, size + (2 * sizeof(uint32_t)));
415
416 }
417
418 //
419 // Send a chunk of data as a heartbeat..
420 // We save it in the history buffer as we do so.
421 //
422 static void send_heartbeat(int seq, uint8_t *data, int size)
423 {
424 int i;
425
426 if (size > sizeof(past_hearts[0].data))
427 {
428 LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
429 kill(0, SIGTERM);
430 exit(1);
431 }
432 i = seq % HB_HISTORY_SIZE;
433 past_hearts[i].seq = seq;
434 past_hearts[i].size = size;
435 memcpy(&past_hearts[i].data, data, size); // Save it.
436 cluster_send_data(data, size);
437 }
438
439 //
440 // Send an 'i am alive' message to every machine in the cluster.
441 //
442 void cluster_send_ping(time_t basetime)
443 {
444 uint8_t buff[100 + sizeof(pingt)];
445 uint8_t *p = buff;
446 pingt x;
447
448 if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
449 return;
450
451 LOG(5, 0, 0, "Sending cluster ping...\n");
452
453 x.ver = 1;
454 x.addr = config->bind_address;
455 x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels + config->cluster_undefined_bundles;
456 x.basetime = basetime;
457
458 add_type(&p, C_PING, basetime, (uint8_t *) &x, sizeof(x));
459 cluster_send_data(buff, (p-buff) );
460 }
461
462 //
463 // Walk the session counters looking for non-zero ones to send
464 // to the master. We send up to 600 of them at one time.
465 // We examine a maximum of 3000 sessions.
466 // (50k max session should mean that we normally
467 // examine the entire session table every 25 seconds).
468
469 #define MAX_B_RECS (600)
470 void master_update_counts(void)
471 {
472 int i, c;
473 bytest b[MAX_B_RECS+1];
474
475 if (config->cluster_iam_master) // Only happens on the slaves.
476 return;
477
478 if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
479 return;
480
481 i = MAX_B_RECS * 5; // Examine max 3000 sessions;
482 if (config->cluster_highest_sessionid > i)
483 i = config->cluster_highest_sessionid;
484
485 for ( c = 0; i > 0 ; --i) {
486 // Next session to look at.
487 walk_session_number++;
488 if ( walk_session_number > config->cluster_highest_sessionid)
489 walk_session_number = 1;
490
491 if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
492 continue; // Unchanged. Skip it.
493
494 b[c].sid = walk_session_number;
495 b[c].pin = sess_local[walk_session_number].pin;
496 b[c].pout = sess_local[walk_session_number].pout;
497 b[c].cin = sess_local[walk_session_number].cin;
498 b[c].cout = sess_local[walk_session_number].cout;
499
500 // Reset counters.
501 sess_local[walk_session_number].pin = sess_local[walk_session_number].pout = 0;
502 sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
503
504 if (++c > MAX_B_RECS) // Send a max of 600 elements in a packet.
505 break;
506 }
507
508 if (!c) // Didn't find any that changes. Get out of here!
509 return;
510
511
512 // Forward the data to the master.
513 LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
514 peer_send_message(config->cluster_master_address, C_BYTES, c, (uint8_t *) &b, sizeof(b[0]) * c);
515 return;
516 }
517
518 //
519 // On the master, check how our slaves are going. If
520 // one of them's not up-to-date we'll heartbeat faster.
521 // If we don't have any of them, then we need to turn
522 // on our own packet handling!
523 //
524 void cluster_check_slaves(void)
525 {
526 int i;
527 static int have_peers = 0;
528 int had_peers = have_peers;
529 clockt t = TIME;
530
531 if (!config->cluster_iam_master)
532 return; // Only runs on the master...
533
534 config->cluster_iam_uptodate = 1; // cleared in loop below
535
536 for (i = have_peers = 0; i < num_peers; i++)
537 {
538 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
539 continue; // Stale peer! Skip them.
540
541 if (!peers[i].basetime)
542 continue; // Shutdown peer! Skip them.
543
544 if (peers[i].uptodate)
545 have_peers++;
546 else
547 config->cluster_iam_uptodate = 0; // Start fast heartbeats
548 }
549
550 // in a cluster, withdraw/add routes when we get a peer/lose peers
551 if (have_peers != had_peers)
552 {
553 if (had_peers < config->cluster_master_min_adv &&
554 have_peers >= config->cluster_master_min_adv)
555 withdraw_routes();
556
557 else if (had_peers >= config->cluster_master_min_adv &&
558 have_peers < config->cluster_master_min_adv)
559 advertise_routes();
560 }
561 }
562
563 //
564 // Check that we have a master. If it's been too
565 // long since we heard from a master then hold an election.
566 //
567 void cluster_check_master(void)
568 {
569 int i, count, high_unique_id = 0;
570 int last_free = 0;
571 clockt t = TIME;
572 static int probed = 0;
573 int have_peers;
574
575 if (config->cluster_iam_master)
576 return; // Only runs on the slaves...
577
578 // If the master is late (missed 2 hearbeats by a second and a
579 // hair) it may be that the switch has dropped us from the
580 // multicast group, try unicasting probes to the master
581 // which will hopefully respond with a unicast heartbeat that
582 // will allow us to limp along until the querier next runs.
583 if (config->cluster_master_address
584 && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
585 {
586 if (!probed || (TIME > (probed + 2 * config->cluster_hb_interval)))
587 {
588 probed = TIME;
589 LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
590 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
591
592 peer_send_message(config->cluster_master_address,
593 C_LASTSEEN, config->cluster_seq_number, NULL, 0);
594 }
595 } else { // We got a recent heartbeat; reset the probe flag.
596 probed = 0;
597 }
598
599 if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
600 return; // Everything's ok!
601
602 config->cluster_last_hb = TIME + 1; // Just the one election thanks.
603 config->cluster_master_address = 0;
604
605 LOG(0, 0, 0, "Master timed out! Holding election...\n");
606
607 // In the process of shutting down, can't be master
608 if (main_quit)
609 return;
610
611 for (i = have_peers = 0; i < num_peers; i++)
612 {
613 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
614 continue; // Stale peer! Skip them.
615
616 if (!peers[i].basetime)
617 continue; // Shutdown peer! Skip them.
618
619 if (peers[i].basetime < basetime) {
620 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
621 return; // They'll win the election. Get out of here.
622 }
623
624 if (peers[i].basetime == basetime &&
625 peers[i].peer > my_address) {
626 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
627 return; // They'll win the election. Wait for them to come up.
628 }
629
630 if (peers[i].uptodate)
631 have_peers++;
632 }
633
634 // Wow. it's been ages since I last heard a heartbeat
635 // and I'm better than an of my peers so it's time
636 // to become a master!!!
637
638 config->cluster_iam_master = 1;
639 pppoe_send_garp(); // gratuitous arp of the pppoe interface
640
641 LOG(0, 0, 0, "I am declaring myself the master!\n");
642
643 if (have_peers < config->cluster_master_min_adv)
644 advertise_routes();
645 else
646 withdraw_routes();
647
648 if (config->cluster_seq_number == -1)
649 config->cluster_seq_number = 0;
650
651 //
652 // Go through and mark all the tunnels as defined.
653 // Count the highest used tunnel number as well.
654 //
655 config->cluster_highest_tunnelid = 0;
656 for (i = 0; i < MAXTUNNEL; ++i) {
657 if (tunnel[i].state == TUNNELUNDEF)
658 tunnel[i].state = TUNNELFREE;
659
660 if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
661 config->cluster_highest_tunnelid = i;
662 }
663
664 //
665 // Go through and mark all the bundles as defined.
666 // Count the highest used bundle number as well.
667 //
668 config->cluster_highest_bundleid = 0;
669 for (i = 0; i < MAXBUNDLE; ++i) {
670 if (bundle[i].state == BUNDLEUNDEF)
671 bundle[i].state = BUNDLEFREE;
672
673 if (bundle[i].state != BUNDLEFREE && i > config->cluster_highest_bundleid)
674 config->cluster_highest_bundleid = i;
675 }
676
677 //
678 // Go through and mark all the sessions as being defined.
679 // reset the idle timeouts.
680 // add temporary byte counters to permanent ones.
681 // Re-string the free list.
682 // Find the ID of the highest session.
683 last_free = 0;
684 high_unique_id = 0;
685 config->cluster_highest_sessionid = 0;
686 for (i = 0, count = 0; i < MAXSESSION; ++i) {
687 if (session[i].tunnel == T_UNDEF) {
688 session[i].tunnel = T_FREE;
689 ++count;
690 }
691
692 if (!session[i].opened) { // Unused session. Add to free list.
693 memset(&session[i], 0, sizeof(session[i]));
694 session[i].tunnel = T_FREE;
695 session[last_free].next = i;
696 session[i].next = 0;
697 last_free = i;
698 continue;
699 }
700
701 // Reset idle timeouts..
702 session[i].last_packet = session[i].last_data = time_now;
703
704 // Reset die relative to our uptime rather than the old master's
705 if (session[i].die) session[i].die = TIME;
706
707 // Accumulate un-sent byte/packet counters.
708 increment_counter(&session[i].cin, &session[i].cin_wrap, sess_local[i].cin);
709 increment_counter(&session[i].cout, &session[i].cout_wrap, sess_local[i].cout);
710 session[i].cin_delta += sess_local[i].cin;
711 session[i].cout_delta += sess_local[i].cout;
712
713 session[i].pin += sess_local[i].pin;
714 session[i].pout += sess_local[i].pout;
715
716 sess_local[i].cin = sess_local[i].cout = 0;
717 sess_local[i].pin = sess_local[i].pout = 0;
718
719 sess_local[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
720
721 if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
722 high_unique_id = session[i].unique_id+1;
723
724 session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
725 throttle_session(i, session[i].throttle_in, session[i].throttle_out);
726
727 config->cluster_highest_sessionid = i;
728 }
729
730 session[last_free].next = 0; // End of chain.
731 last_id = high_unique_id; // Keep track of the highest used session ID.
732
733 become_master();
734
735 rebuild_address_pool();
736
737 // If we're not the very first master, this is a big issue!
738 if (count > 0)
739 LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
740
741 config->cluster_undefined_sessions = 0;
742 config->cluster_undefined_bundles = 0;
743 config->cluster_undefined_tunnels = 0;
744 config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
745
746 // FIXME. We need to fix up the tunnel control message
747 // queue here! There's a number of other variables we
748 // should also update.
749 }
750
751
752 //
753 // Check that our session table is validly matching what the
754 // master has in mind.
755 //
756 // In particular, if we have too many sessions marked 'undefined'
757 // we fix it up here, and we ensure that the 'first free session'
758 // pointer is valid.
759 //
760 static void cluster_check_sessions(int highsession, int freesession_ptr, int highbundle, int hightunnel)
761 {
762 int i;
763
764 sessionfree = freesession_ptr; // Keep the freesession ptr valid.
765
766 if (config->cluster_iam_uptodate)
767 return;
768
769 if (highsession > config->cluster_undefined_sessions && highbundle > config->cluster_undefined_bundles && hightunnel > config->cluster_undefined_tunnels)
770 return;
771
772 // Clear out defined sessions, counting the number of
773 // undefs remaining.
774 config->cluster_undefined_sessions = 0;
775 for (i = 1 ; i < MAXSESSION; ++i) {
776 if (i > highsession) {
777 if (session[i].tunnel == T_UNDEF) session[i].tunnel = T_FREE; // Defined.
778 continue;
779 }
780
781 if (session[i].tunnel == T_UNDEF)
782 ++config->cluster_undefined_sessions;
783 }
784
785 // Clear out defined bundles, counting the number of
786 // undefs remaining.
787 config->cluster_undefined_bundles = 0;
788 for (i = 1 ; i < MAXBUNDLE; ++i) {
789 if (i > highbundle) {
790 if (bundle[i].state == BUNDLEUNDEF) bundle[i].state = BUNDLEFREE; // Defined.
791 continue;
792 }
793
794 if (bundle[i].state == BUNDLEUNDEF)
795 ++config->cluster_undefined_bundles;
796 }
797
798 // Clear out defined tunnels, counting the number of
799 // undefs remaining.
800 config->cluster_undefined_tunnels = 0;
801 for (i = 1 ; i < MAXTUNNEL; ++i) {
802 if (i > hightunnel) {
803 if (tunnel[i].state == TUNNELUNDEF) tunnel[i].state = TUNNELFREE; // Defined.
804 continue;
805 }
806
807 if (tunnel[i].state == TUNNELUNDEF)
808 ++config->cluster_undefined_tunnels;
809 }
810
811
812 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles) {
813 LOG(2, 0, 0, "Cleared undefined sessions/bundles/tunnels. %d sess (high %d), %d bund (high %d), %d tunn (high %d)\n",
814 config->cluster_undefined_sessions, highsession, config->cluster_undefined_bundles, highbundle, config->cluster_undefined_tunnels, hightunnel);
815 return;
816 }
817
818 // Are we up to date?
819
820 if (!config->cluster_iam_uptodate)
821 cluster_uptodate();
822 }
823
824 static int hb_add_type(uint8_t **p, int type, int id)
825 {
826 switch (type) {
827 case C_CSESSION: { // Compressed C_SESSION.
828 uint8_t c[sizeof(sessiont) * 2]; // Bigger than worst case.
829 uint8_t *d = (uint8_t *) &session[id];
830 uint8_t *orig = d;
831 int size;
832
833 size = rle_compress( &d, sizeof(sessiont), c, sizeof(c) );
834
835 // Did we compress the full structure, and is the size actually
836 // reduced??
837 if ( (d - orig) == sizeof(sessiont) && size < sizeof(sessiont) ) {
838 add_type(p, C_CSESSION, id, c, size);
839 break;
840 }
841 // Failed to compress : Fall through.
842 }
843 case C_SESSION:
844 add_type(p, C_SESSION, id, (uint8_t *) &session[id], sizeof(sessiont));
845 break;
846
847 case C_CBUNDLE: { // Compressed C_BUNDLE
848 uint8_t c[sizeof(bundlet) * 2]; // Bigger than worst case.
849 uint8_t *d = (uint8_t *) &bundle[id];
850 uint8_t *orig = d;
851 int size;
852
853 size = rle_compress( &d, sizeof(bundlet), c, sizeof(c) );
854
855 // Did we compress the full structure, and is the size actually
856 // reduced??
857 if ( (d - orig) == sizeof(bundlet) && size < sizeof(bundlet) ) {
858 add_type(p, C_CBUNDLE, id, c, size);
859 break;
860 }
861 // Failed to compress : Fall through.
862 }
863
864 case C_BUNDLE:
865 add_type(p, C_BUNDLE, id, (uint8_t *) &bundle[id], sizeof(bundlet));
866 break;
867
868 case C_CTUNNEL: { // Compressed C_TUNNEL
869 uint8_t c[sizeof(tunnelt) * 2]; // Bigger than worst case.
870 uint8_t *d = (uint8_t *) &tunnel[id];
871 uint8_t *orig = d;
872 int size;
873
874 size = rle_compress( &d, sizeof(tunnelt), c, sizeof(c) );
875
876 // Did we compress the full structure, and is the size actually
877 // reduced??
878 if ( (d - orig) == sizeof(tunnelt) && size < sizeof(tunnelt) ) {
879 add_type(p, C_CTUNNEL, id, c, size);
880 break;
881 }
882 // Failed to compress : Fall through.
883 }
884 case C_TUNNEL:
885 add_type(p, C_TUNNEL, id, (uint8_t *) &tunnel[id], sizeof(tunnelt));
886 break;
887 default:
888 LOG(0, 0, 0, "Found an invalid type in heart queue! (%d)\n", type);
889 kill(0, SIGTERM);
890 exit(1);
891 }
892 return 0;
893 }
894
895 //
896 // Send a heartbeat, incidently sending out any queued changes..
897 //
898 void cluster_heartbeat()
899 {
900 int i, count = 0, tcount = 0, bcount = 0;
901 uint8_t buff[MAX_HEART_SIZE + sizeof(heartt) + sizeof(int) ];
902 heartt h;
903 uint8_t *p = buff;
904
905 if (!config->cluster_iam_master) // Only the master does this.
906 return;
907
908 config->cluster_table_version += config->cluster_num_changes;
909
910 // Fill out the heartbeat header.
911 memset(&h, 0, sizeof(h));
912
913 h.version = HB_VERSION;
914 h.seq = config->cluster_seq_number;
915 h.basetime = basetime;
916 h.clusterid = config->bind_address; // Will this do??
917 h.basetime = basetime;
918 h.highsession = config->cluster_highest_sessionid;
919 h.freesession = sessionfree;
920 h.hightunnel = config->cluster_highest_tunnelid;
921 h.highbundle = config->cluster_highest_bundleid;
922 h.size_sess = sizeof(sessiont); // Just in case.
923 h.size_bund = sizeof(bundlet);
924 h.size_tunn = sizeof(tunnelt);
925 h.interval = config->cluster_hb_interval;
926 h.timeout = config->cluster_hb_timeout;
927 h.table_version = config->cluster_table_version;
928
929 add_type(&p, C_HEARTBEAT, HB_VERSION, (uint8_t *) &h, sizeof(h));
930
931 for (i = 0; i < config->cluster_num_changes; ++i) {
932 hb_add_type(&p, cluster_changes[i].type, cluster_changes[i].id);
933 }
934
935 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
936 LOG(0, 0, 0, "FATAL: Overran the heartbeat buffer! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
937 kill(0, SIGTERM);
938 exit(1);
939 }
940
941 //
942 // Fill out the packet with sessions from the session table...
943 // (not forgetting to leave space so we can get some tunnels in too )
944 while ( (p + sizeof(uint32_t) * 2 + sizeof(sessiont) * 2 ) < (buff + MAX_HEART_SIZE) ) {
945
946 if (!walk_session_number) // session #0 isn't valid.
947 ++walk_session_number;
948
949 if (count >= config->cluster_highest_sessionid) // If we're a small cluster, don't go wild.
950 break;
951
952 hb_add_type(&p, C_CSESSION, walk_session_number);
953 walk_session_number = (1+walk_session_number)%(config->cluster_highest_sessionid+1); // +1 avoids divide by zero.
954
955 ++count; // Count the number of extra sessions we're sending.
956 }
957
958 //
959 // Fill out the packet with tunnels from the tunnel table...
960 // This effectively means we walk the tunnel table more quickly
961 // than the session table. This is good because stuffing up a
962 // tunnel is a much bigger deal than stuffing up a session.
963 //
964 while ( (p + sizeof(uint32_t) * 2 + sizeof(tunnelt) ) < (buff + MAX_HEART_SIZE) ) {
965
966 if (!walk_tunnel_number) // tunnel #0 isn't valid.
967 ++walk_tunnel_number;
968
969 if (tcount >= config->cluster_highest_tunnelid)
970 break;
971
972 hb_add_type(&p, C_CTUNNEL, walk_tunnel_number);
973 walk_tunnel_number = (1+walk_tunnel_number)%(config->cluster_highest_tunnelid+1); // +1 avoids divide by zero.
974
975 ++tcount;
976 }
977
978 //
979 // Fill out the packet with bundles from the bundle table...
980 while ( (p + sizeof(uint32_t) * 2 + sizeof(bundlet) ) < (buff + MAX_HEART_SIZE) ) {
981
982 if (!walk_bundle_number) // bundle #0 isn't valid.
983 ++walk_bundle_number;
984
985 if (bcount >= config->cluster_highest_bundleid)
986 break;
987
988 hb_add_type(&p, C_CBUNDLE, walk_bundle_number);
989 walk_bundle_number = (1+walk_bundle_number)%(config->cluster_highest_bundleid+1); // +1 avoids divide by zero.
990 ++bcount;
991 }
992
993 //
994 // Did we do something wrong?
995 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
996 LOG(0, 0, 0, "Overran the heartbeat buffer now! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
997 kill(0, SIGTERM);
998 exit(1);
999 }
1000
1001 LOG(4, 0, 0, "Sending v%d heartbeat #%d, change #%" PRIu64 " with %d changes "
1002 "(%d x-sess, %d x-bundles, %d x-tunnels, %d highsess, %d highbund, %d hightun, size %d)\n",
1003 HB_VERSION, h.seq, h.table_version, config->cluster_num_changes,
1004 count, bcount, tcount, config->cluster_highest_sessionid, config->cluster_highest_bundleid,
1005 config->cluster_highest_tunnelid, (int) (p - buff));
1006
1007 config->cluster_num_changes = 0;
1008
1009 send_heartbeat(h.seq, buff, (p-buff) ); // Send out the heartbeat to the cluster, keeping a copy of it.
1010
1011 config->cluster_seq_number = (config->cluster_seq_number+1)%HB_MAX_SEQ; // Next seq number to use.
1012 }
1013
1014 //
1015 // A structure of type 'type' has changed; Add it to the queue to send.
1016 //
1017 static int type_changed(int type, int id)
1018 {
1019 int i;
1020
1021 for (i = 0 ; i < config->cluster_num_changes ; ++i)
1022 {
1023 if ( cluster_changes[i].id == id && cluster_changes[i].type == type)
1024 {
1025 // Already marked for change, remove it
1026 --config->cluster_num_changes;
1027 memmove(&cluster_changes[i],
1028 &cluster_changes[i+1],
1029 (config->cluster_num_changes - i) * sizeof(cluster_changes[i]));
1030 break;
1031 }
1032 }
1033
1034 cluster_changes[config->cluster_num_changes].type = type;
1035 cluster_changes[config->cluster_num_changes].id = id;
1036 ++config->cluster_num_changes;
1037
1038 if (config->cluster_num_changes > MAX_CHANGES)
1039 cluster_heartbeat(); // flush now
1040
1041 return 1;
1042 }
1043
1044 // A particular session has been changed!
1045 int cluster_send_session(int sid)
1046 {
1047 if (!config->cluster_iam_master) {
1048 LOG(0, sid, 0, "I'm not a master, but I just tried to change a session!\n");
1049 return -1;
1050 }
1051
1052 if (forked) {
1053 LOG(0, sid, 0, "cluster_send_session called from child process!\n");
1054 return -1;
1055 }
1056
1057 return type_changed(C_CSESSION, sid);
1058 }
1059
1060 // A particular bundle has been changed!
1061 int cluster_send_bundle(int bid)
1062 {
1063 if (!config->cluster_iam_master) {
1064 LOG(0, 0, bid, "I'm not a master, but I just tried to change a bundle!\n");
1065 return -1;
1066 }
1067
1068 return type_changed(C_CBUNDLE, bid);
1069 }
1070
1071 // A particular tunnel has been changed!
1072 int cluster_send_tunnel(int tid)
1073 {
1074 if (!config->cluster_iam_master) {
1075 LOG(0, 0, tid, "I'm not a master, but I just tried to change a tunnel!\n");
1076 return -1;
1077 }
1078
1079 return type_changed(C_CTUNNEL, tid);
1080 }
1081
1082
1083 //
1084 // We're a master, and a slave has just told us that it's
1085 // missed a packet. We'll resend it every packet since
1086 // the last one it's seen.
1087 //
1088 static int cluster_catchup_slave(int seq, in_addr_t slave)
1089 {
1090 int s;
1091 int diff;
1092
1093 LOG(1, 0, 0, "Slave %s sent LASTSEEN with seq %d\n", fmtaddr(slave, 0), seq);
1094 if (!config->cluster_iam_master) {
1095 LOG(1, 0, 0, "Got LASTSEEN but I'm not a master! Redirecting it to %s.\n",
1096 fmtaddr(config->cluster_master_address, 0));
1097
1098 peer_send_message(slave, C_MASTER, config->cluster_master_address, NULL, 0);
1099 return 0;
1100 }
1101
1102 diff = config->cluster_seq_number - seq; // How many packet do we need to send?
1103 if (diff < 0)
1104 diff += HB_MAX_SEQ;
1105
1106 if (diff >= HB_HISTORY_SIZE) { // Ouch. We don't have the packet to send it!
1107 LOG(0, 0, 0, "A slave asked for message %d when our seq number is %d. Killing it.\n",
1108 seq, config->cluster_seq_number);
1109 return peer_send_message(slave, C_KILL, seq, NULL, 0);// Kill the slave. Nothing else to do.
1110 }
1111
1112 LOG(1, 0, 0, "Sending %d catchup packets to slave %s\n", diff, fmtaddr(slave, 0) );
1113
1114 // Now resend every packet that it missed, in order.
1115 while (seq != config->cluster_seq_number) {
1116 s = seq % HB_HISTORY_SIZE;
1117 if (seq != past_hearts[s].seq) {
1118 LOG(0, 0, 0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
1119 fmtaddr(slave, 0), seq, past_hearts[s].seq, s, config->cluster_seq_number);
1120 return -1; // What to do here!?
1121 }
1122 peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
1123 seq = (seq+1)%HB_MAX_SEQ; // Increment to next seq number.
1124 }
1125 return 0; // All good!
1126 }
1127
1128 //
1129 // We've heard from another peer! Add it to the list
1130 // that we select from at election time.
1131 //
1132 static int cluster_add_peer(in_addr_t peer, time_t basetime, pingt *pp, int size)
1133 {
1134 int i;
1135 in_addr_t clusterid;
1136 pingt p;
1137
1138 // Allow for backward compatability.
1139 // Just the ping packet into a new structure to allow
1140 // for the possibility that we might have received
1141 // more or fewer elements than we were expecting.
1142 if (size > sizeof(p))
1143 size = sizeof(p);
1144
1145 memset( (void *) &p, 0, sizeof(p) );
1146 memcpy( (void *) &p, (void *) pp, size);
1147
1148 clusterid = p.addr;
1149 if (clusterid != config->bind_address)
1150 {
1151 // Is this for us?
1152 LOG(4, 0, 0, "Skipping ping from %s (different cluster)\n", fmtaddr(peer, 0));
1153 return 0;
1154 }
1155
1156 for (i = 0; i < num_peers ; ++i)
1157 {
1158 if (peers[i].peer != peer)
1159 continue;
1160
1161 // This peer already exists. Just update the timestamp.
1162 peers[i].basetime = basetime;
1163 peers[i].timestamp = TIME;
1164 peers[i].uptodate = !p.undef;
1165 break;
1166 }
1167
1168 // Is this the master shutting down??
1169 if (peer == config->cluster_master_address) {
1170 LOG(3, 0, 0, "Master %s %s\n", fmtaddr(config->cluster_master_address, 0),
1171 basetime ? "has restarted!" : "shutting down...");
1172
1173 config->cluster_master_address = 0;
1174 config->cluster_last_hb = 0; // Force an election.
1175 cluster_check_master();
1176 }
1177
1178 if (i >= num_peers)
1179 {
1180 LOG(4, 0, 0, "Adding %s as a peer\n", fmtaddr(peer, 0));
1181
1182 // Not found. Is there a stale slot to re-use?
1183 for (i = 0; i < num_peers ; ++i)
1184 {
1185 if (!peers[i].basetime) // Shutdown
1186 break;
1187
1188 if ((peers[i].timestamp + config->cluster_hb_timeout * 10) < TIME) // Stale.
1189 break;
1190 }
1191
1192 if (i >= CLUSTER_MAX_SIZE)
1193 {
1194 // Too many peers!!
1195 LOG(0, 0, 0, "Tried to add %s as a peer, but I already have %d of them!\n", fmtaddr(peer, 0), i);
1196 return -1;
1197 }
1198
1199 peers[i].peer = peer;
1200 peers[i].basetime = basetime;
1201 peers[i].timestamp = TIME;
1202 peers[i].uptodate = !p.undef;
1203 if (i == num_peers)
1204 ++num_peers;
1205
1206 LOG(1, 0, 0, "Added %s as a new peer. Now %d peers\n", fmtaddr(peer, 0), num_peers);
1207 }
1208
1209 return 1;
1210 }
1211
1212 // A slave responds with C_MASTER when it gets a message which should have gone to a master.
1213 static int cluster_set_master(in_addr_t peer, in_addr_t master)
1214 {
1215 if (config->cluster_iam_master) // Sanity...
1216 return 0;
1217
1218 LOG(3, 0, 0, "Peer %s set the master to %s...\n", fmtaddr(peer, 0),
1219 fmtaddr(master, 1));
1220
1221 config->cluster_master_address = master;
1222 if (master)
1223 {
1224 // catchup with new master
1225 peer_send_message(master, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1226
1227 // delay next election
1228 config->cluster_last_hb = TIME;
1229 }
1230
1231 // run election (or reset "probed" if master was set)
1232 cluster_check_master();
1233 return 0;
1234 }
1235
1236 /* Handle the slave updating the byte counters for the master. */
1237 //
1238 // Note that we don't mark the session as dirty; We rely on
1239 // the slow table walk to propogate this back out to the slaves.
1240 //
1241 static int cluster_handle_bytes(uint8_t *data, int size)
1242 {
1243 bytest *b;
1244
1245 b = (bytest *) data;
1246
1247 LOG(3, 0, 0, "Got byte counter update (size %d)\n", size);
1248
1249 /* Loop around, adding the byte
1250 counts to each of the sessions. */
1251
1252 while (size >= sizeof(*b) ) {
1253 if (b->sid > MAXSESSION) {
1254 LOG(0, 0, 0, "Got C_BYTES with session #%d!\n", b->sid);
1255 return -1; /* Abort processing */
1256 }
1257
1258 session[b->sid].pin += b->pin;
1259 session[b->sid].pout += b->pout;
1260
1261 increment_counter(&session[b->sid].cin, &session[b->sid].cin_wrap, b->cin);
1262 increment_counter(&session[b->sid].cout, &session[b->sid].cout_wrap, b->cout);
1263
1264 session[b->sid].cin_delta += b->cin;
1265 session[b->sid].cout_delta += b->cout;
1266
1267 if (b->cin)
1268 session[b->sid].last_packet = session[b->sid].last_data = time_now;
1269 else if (b->cout)
1270 session[b->sid].last_data = time_now;
1271
1272 size -= sizeof(*b);
1273 ++b;
1274 }
1275
1276 if (size != 0)
1277 LOG(0, 0, 0, "Got C_BYTES with %d bytes of trailing junk!\n", size);
1278
1279 return size;
1280 }
1281
1282 //
1283 // Handle receiving a session structure in a heartbeat packet.
1284 //
1285 static int cluster_recv_session(int more, uint8_t *p)
1286 {
1287 if (more >= MAXSESSION) {
1288 LOG(0, 0, 0, "DANGER: Received a heartbeat session id > MAXSESSION!\n");
1289 return -1;
1290 }
1291
1292 if (session[more].tunnel == T_UNDEF) {
1293 if (config->cluster_iam_uptodate) { // Sanity.
1294 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined session!\n");
1295 } else {
1296 --config->cluster_undefined_sessions;
1297 }
1298 }
1299
1300 load_session(more, (sessiont *) p); // Copy session into session table..
1301
1302 LOG(5, more, 0, "Received session update (%d undef)\n", config->cluster_undefined_sessions);
1303
1304 if (!config->cluster_iam_uptodate)
1305 cluster_uptodate(); // Check to see if we're up to date.
1306
1307 return 0;
1308 }
1309
1310 static int cluster_recv_bundle(int more, uint8_t *p)
1311 {
1312 if (more >= MAXBUNDLE) {
1313 LOG(0, 0, 0, "DANGER: Received a bundle id > MAXBUNDLE!\n");
1314 return -1;
1315 }
1316
1317 if (bundle[more].state == BUNDLEUNDEF) {
1318 if (config->cluster_iam_uptodate) { // Sanity.
1319 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined bundle!\n");
1320 } else {
1321 --config->cluster_undefined_bundles;
1322 }
1323 }
1324
1325 memcpy(&bundle[more], p, sizeof(bundle[more]) );
1326
1327 LOG(5, 0, more, "Received bundle update\n");
1328
1329 if (!config->cluster_iam_uptodate)
1330 cluster_uptodate(); // Check to see if we're up to date.
1331
1332 return 0;
1333 }
1334
1335 static int cluster_recv_tunnel(int more, uint8_t *p)
1336 {
1337 if (more >= MAXTUNNEL) {
1338 LOG(0, 0, 0, "DANGER: Received a tunnel session id > MAXTUNNEL!\n");
1339 return -1;
1340 }
1341
1342 if (tunnel[more].state == TUNNELUNDEF) {
1343 if (config->cluster_iam_uptodate) { // Sanity.
1344 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined tunnel!\n");
1345 } else {
1346 --config->cluster_undefined_tunnels;
1347 }
1348 }
1349
1350 memcpy(&tunnel[more], p, sizeof(tunnel[more]) );
1351
1352 //
1353 // Clear tunnel control messages. These are dynamically allocated.
1354 // If we get unlucky, this may cause the tunnel to drop!
1355 //
1356 tunnel[more].controls = tunnel[more].controle = NULL;
1357 tunnel[more].controlc = 0;
1358
1359 LOG(5, 0, more, "Received tunnel update\n");
1360
1361 if (!config->cluster_iam_uptodate)
1362 cluster_uptodate(); // Check to see if we're up to date.
1363
1364 return 0;
1365 }
1366
1367
1368 // pre v6 heartbeat session structure
1369 struct oldsession {
1370 sessionidt next;
1371 sessionidt far;
1372 tunnelidt tunnel;
1373 uint8_t flags;
1374 struct {
1375 uint8_t phase;
1376 uint8_t lcp:4;
1377 uint8_t ipcp:4;
1378 uint8_t ipv6cp:4;
1379 uint8_t ccp:4;
1380 } ppp;
1381 char reserved_1[2];
1382 in_addr_t ip;
1383 int ip_pool_index;
1384 uint32_t unique_id;
1385 char reserved_2[4];
1386 uint32_t magic;
1387 uint32_t pin, pout;
1388 uint32_t cin, cout;
1389 uint32_t cin_wrap, cout_wrap;
1390 uint32_t cin_delta, cout_delta;
1391 uint16_t throttle_in;
1392 uint16_t throttle_out;
1393 uint8_t filter_in;
1394 uint8_t filter_out;
1395 uint16_t mru;
1396 clockt opened;
1397 clockt die;
1398 uint32_t session_timeout;
1399 uint32_t idle_timeout;
1400 time_t last_packet;
1401 time_t last_data;
1402 in_addr_t dns1, dns2;
1403 routet route[MAXROUTE];
1404 uint16_t tbf_in;
1405 uint16_t tbf_out;
1406 int random_vector_length;
1407 uint8_t random_vector[MAXTEL];
1408 char user[MAXUSER];
1409 char called[MAXTEL];
1410 char calling[MAXTEL];
1411 uint32_t tx_connect_speed;
1412 uint32_t rx_connect_speed;
1413 clockt timeout;
1414 uint32_t mrru;
1415 uint8_t mssf;
1416 epdist epdis;
1417 bundleidt bundle;
1418 in_addr_t snoop_ip;
1419 uint16_t snoop_port;
1420 uint8_t walled_garden;
1421 uint8_t ipv6prefixlen;
1422 struct in6_addr ipv6route;
1423 char reserved_3[11];
1424 };
1425
1426 static uint8_t *convert_session(struct oldsession *old)
1427 {
1428 static sessiont new;
1429 int i;
1430
1431 memset(&new, 0, sizeof(new));
1432
1433 new.next = old->next;
1434 new.far = old->far;
1435 new.tunnel = old->tunnel;
1436 new.flags = old->flags;
1437 new.ppp.phase = old->ppp.phase;
1438 new.ppp.lcp = old->ppp.lcp;
1439 new.ppp.ipcp = old->ppp.ipcp;
1440 new.ppp.ipv6cp = old->ppp.ipv6cp;
1441 new.ppp.ccp = old->ppp.ccp;
1442 new.ip = old->ip;
1443 new.ip_pool_index = old->ip_pool_index;
1444 new.unique_id = old->unique_id;
1445 new.magic = old->magic;
1446 new.pin = old->pin;
1447 new.pout = old->pout;
1448 new.cin = old->cin;
1449 new.cout = old->cout;
1450 new.cin_wrap = old->cin_wrap;
1451 new.cout_wrap = old->cout_wrap;
1452 new.cin_delta = old->cin_delta;
1453 new.cout_delta = old->cout_delta;
1454 new.throttle_in = old->throttle_in;
1455 new.throttle_out = old->throttle_out;
1456 new.filter_in = old->filter_in;
1457 new.filter_out = old->filter_out;
1458 new.mru = old->mru;
1459 new.opened = old->opened;
1460 new.die = old->die;
1461 new.session_timeout = old->session_timeout;
1462 new.idle_timeout = old->idle_timeout;
1463 new.last_packet = old->last_packet;
1464 new.last_data = old->last_data;
1465 new.dns1 = old->dns1;
1466 new.dns2 = old->dns2;
1467 new.tbf_in = old->tbf_in;
1468 new.tbf_out = old->tbf_out;
1469 new.random_vector_length = old->random_vector_length;
1470 new.tx_connect_speed = old->tx_connect_speed;
1471 new.rx_connect_speed = old->rx_connect_speed;
1472 new.timeout = old->timeout;
1473 new.mrru = old->mrru;
1474 new.mssf = old->mssf;
1475 new.epdis = old->epdis;
1476 new.bundle = old->bundle;
1477 new.snoop_ip = old->snoop_ip;
1478 new.snoop_port = old->snoop_port;
1479 new.walled_garden = old->walled_garden;
1480 new.ipv6prefixlen = old->ipv6prefixlen;
1481 new.ipv6route = old->ipv6route;
1482
1483 memcpy(new.random_vector, old->random_vector, sizeof(new.random_vector));
1484 memcpy(new.user, old->user, sizeof(new.user));
1485 memcpy(new.called, old->called, sizeof(new.called));
1486 memcpy(new.calling, old->calling, sizeof(new.calling));
1487
1488 for (i = 0; i < MAXROUTE; i++)
1489 memcpy(&new.route[i], &old->route[i], sizeof(new.route[i]));
1490
1491 return (uint8_t *) &new;
1492 }
1493
1494 //
1495 // Process a heartbeat..
1496 //
1497 // v6: added RADIUS class attribute, re-ordered session structure
1498 // v7: added tunnelt attribute at the end of struct (tunnelt size change)
1499 static int cluster_process_heartbeat(uint8_t *data, int size, int more, uint8_t *p, in_addr_t addr)
1500 {
1501 heartt *h;
1502 int s = size - (p-data);
1503 int i, type;
1504 int hb_ver = more;
1505
1506 #ifdef LAC
1507 #if HB_VERSION != 7
1508 # error "need to update cluster_process_heartbeat()"
1509 #endif
1510 #else
1511 #if HB_VERSION != 6
1512 # error "need to update cluster_process_heartbeat()"
1513 #endif
1514 #endif
1515
1516
1517 // we handle versions 5 through 7
1518 if (hb_ver < 5 || hb_ver > HB_VERSION) {
1519 LOG(0, 0, 0, "Received a heartbeat version that I don't support (%d)!\n", hb_ver);
1520 return -1; // Ignore it??
1521 }
1522
1523 if (size > sizeof(past_hearts[0].data)) {
1524 LOG(0, 0, 0, "Received an oversize heartbeat from %s (%d)!\n", fmtaddr(addr, 0), size);
1525 return -1;
1526 }
1527
1528 if (s < sizeof(*h))
1529 goto shortpacket;
1530
1531 h = (heartt *) p;
1532 p += sizeof(*h);
1533 s -= sizeof(*h);
1534
1535 if (h->clusterid != config->bind_address)
1536 return -1; // It's not part of our cluster.
1537
1538 if (config->cluster_iam_master) { // Sanity...
1539 // Note that this MUST match the election process above!
1540
1541 LOG(0, 0, 0, "I just got a heartbeat from master %s, but _I_ am the master!\n", fmtaddr(addr, 0));
1542 if (!h->basetime) {
1543 LOG(0, 0, 0, "Heartbeat with zero basetime! Ignoring\n");
1544 return -1; // Skip it.
1545 }
1546
1547 if (h->table_version > config->cluster_table_version) {
1548 LOG(0, 0, 0, "They've seen more state changes (%" PRIu64 " vs my %" PRIu64 ") so I'm gone!\n",
1549 h->table_version, config->cluster_table_version);
1550
1551 kill(0, SIGTERM);
1552 exit(1);
1553 }
1554
1555 if (h->table_version < config->cluster_table_version)
1556 return -1;
1557
1558 if (basetime > h->basetime) {
1559 LOG(0, 0, 0, "They're an older master than me so I'm gone!\n");
1560 kill(0, SIGTERM);
1561 exit(1);
1562 }
1563
1564 if (basetime < h->basetime)
1565 return -1;
1566
1567 if (my_address < addr) { // Tie breaker.
1568 LOG(0, 0, 0, "They're a higher IP address than me, so I'm gone!\n");
1569 kill(0, SIGTERM);
1570 exit(1);
1571 }
1572
1573 //
1574 // Send it a unicast heartbeat to see give it a chance to die.
1575 // NOTE: It's actually safe to do seq-number - 1 without checking
1576 // for wrap around.
1577 //
1578 cluster_catchup_slave(config->cluster_seq_number - 1, addr);
1579
1580 return -1; // Skip it.
1581 }
1582
1583 //
1584 // Try and guard against a stray master appearing.
1585 //
1586 // Ignore heartbeats received from another master before the
1587 // timeout (less a smidgen) for the old master has elapsed.
1588 //
1589 // Note that after a clean failover, the cluster_master_address
1590 // is cleared, so this doesn't run.
1591 //
1592 if (config->cluster_master_address && addr != config->cluster_master_address) {
1593 LOG(0, 0, 0, "Ignoring stray heartbeat from %s, current master %s has not yet timed out (last heartbeat %.1f seconds ago).\n",
1594 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1),
1595 0.1 * (TIME - config->cluster_last_hb));
1596 return -1; // ignore
1597 }
1598
1599 if (config->cluster_seq_number == -1) // Don't have one. Just align to the master...
1600 config->cluster_seq_number = h->seq;
1601
1602 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1603 config->cluster_last_hb_ver = hb_ver; // remember what cluster version the master is using
1604
1605 if (config->cluster_seq_number != h->seq) { // Out of sequence heartbeat!
1606 static int lastseen_seq = 0;
1607 static time_t lastseen_time = 0;
1608
1609 // limit to once per second for a particular seq#
1610 int ask = (config->cluster_seq_number != lastseen_seq || time_now != lastseen_time);
1611
1612 LOG(1, 0, 0, "HB: Got seq# %d but was expecting %d. %s.\n",
1613 h->seq, config->cluster_seq_number,
1614 ask ? "Asking for resend" : "Ignoring");
1615
1616 if (ask)
1617 {
1618 lastseen_seq = config->cluster_seq_number;
1619 lastseen_time = time_now;
1620 peer_send_message(addr, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1621 }
1622
1623 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1624
1625 // Just drop the packet. The master will resend it as part of the catchup.
1626
1627 return 0;
1628 }
1629 // Save the packet in our buffer.
1630 // This is needed in case we become the master.
1631 config->cluster_seq_number = (h->seq+1)%HB_MAX_SEQ;
1632 i = h->seq % HB_HISTORY_SIZE;
1633 past_hearts[i].seq = h->seq;
1634 past_hearts[i].size = size;
1635 memcpy(&past_hearts[i].data, data, size); // Save it.
1636
1637
1638 // Check that we don't have too many undefined sessions, and
1639 // that the free session pointer is correct.
1640 cluster_check_sessions(h->highsession, h->freesession, h->highbundle, h->hightunnel);
1641
1642 if (h->interval != config->cluster_hb_interval)
1643 {
1644 LOG(2, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
1645 h->interval, config->cluster_hb_interval);
1646
1647 config->cluster_hb_interval = h->interval;
1648 }
1649
1650 if (h->timeout != config->cluster_hb_timeout)
1651 {
1652 LOG(2, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
1653 h->timeout, config->cluster_hb_timeout);
1654
1655 config->cluster_hb_timeout = h->timeout;
1656 }
1657
1658 // Ok. process the packet...
1659 while ( s > 0) {
1660
1661 type = *((uint32_t *) p);
1662 p += sizeof(uint32_t);
1663 s -= sizeof(uint32_t);
1664
1665 more = *((uint32_t *) p);
1666 p += sizeof(uint32_t);
1667 s -= sizeof(uint32_t);
1668
1669 switch (type) {
1670 case C_CSESSION: { // Compressed session structure.
1671 uint8_t c[ sizeof(sessiont) + 2];
1672 int size;
1673 uint8_t *orig_p = p;
1674
1675 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c) );
1676 s -= (p - orig_p);
1677
1678 // session struct changed with v5
1679 if (hb_ver < 6)
1680 {
1681 if (size != sizeof(struct oldsession)) {
1682 LOG(0, 0, 0, "DANGER: Received a v%d CSESSION that didn't decompress correctly!\n", hb_ver);
1683 // Now what? Should exit! No-longer up to date!
1684 break;
1685 }
1686 cluster_recv_session(more, convert_session((struct oldsession *) c));
1687 break;
1688 }
1689
1690 if (size != sizeof(sessiont) ) { // Ouch! Very very bad!
1691 LOG(0, 0, 0, "DANGER: Received a CSESSION that didn't decompress correctly!\n");
1692 // Now what? Should exit! No-longer up to date!
1693 break;
1694 }
1695
1696 cluster_recv_session(more, c);
1697 break;
1698 }
1699 case C_SESSION:
1700 if (hb_ver < 6)
1701 {
1702 if (s < sizeof(struct oldsession))
1703 goto shortpacket;
1704
1705 cluster_recv_session(more, convert_session((struct oldsession *) p));
1706
1707 p += sizeof(struct oldsession);
1708 s -= sizeof(struct oldsession);
1709 break;
1710 }
1711
1712 if ( s < sizeof(session[more]))
1713 goto shortpacket;
1714
1715 cluster_recv_session(more, p);
1716
1717 p += sizeof(session[more]);
1718 s -= sizeof(session[more]);
1719 break;
1720
1721 case C_CTUNNEL: { // Compressed tunnel structure.
1722 uint8_t c[ sizeof(tunnelt) + 2];
1723 int size;
1724 uint8_t *orig_p = p;
1725
1726 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1727 s -= (p - orig_p);
1728
1729 #ifdef LAC
1730 if ( ((hb_ver >= HB_VERSION) && (size != sizeof(tunnelt))) ||
1731 ((hb_ver < HB_VERSION) && (size > sizeof(tunnelt))) )
1732 #else
1733 if (size != sizeof(tunnelt) )
1734 #endif
1735 { // Ouch! Very very bad!
1736 LOG(0, 0, 0, "DANGER: Received a CTUNNEL that didn't decompress correctly!\n");
1737 // Now what? Should exit! No-longer up to date!
1738 break;
1739 }
1740
1741 cluster_recv_tunnel(more, c);
1742 break;
1743
1744 }
1745 case C_TUNNEL:
1746 if ( s < sizeof(tunnel[more]))
1747 goto shortpacket;
1748
1749 cluster_recv_tunnel(more, p);
1750
1751 p += sizeof(tunnel[more]);
1752 s -= sizeof(tunnel[more]);
1753 break;
1754
1755 case C_CBUNDLE: { // Compressed bundle structure.
1756 uint8_t c[ sizeof(bundlet) + 2];
1757 int size;
1758 uint8_t *orig_p = p;
1759
1760 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1761 s -= (p - orig_p);
1762
1763 if (size != sizeof(bundlet) ) { // Ouch! Very very bad!
1764 LOG(0, 0, 0, "DANGER: Received a CBUNDLE that didn't decompress correctly!\n");
1765 // Now what? Should exit! No-longer up to date!
1766 break;
1767 }
1768
1769 cluster_recv_bundle(more, c);
1770 break;
1771
1772 }
1773 case C_BUNDLE:
1774 if ( s < sizeof(bundle[more]))
1775 goto shortpacket;
1776
1777 cluster_recv_bundle(more, p);
1778
1779 p += sizeof(bundle[more]);
1780 s -= sizeof(bundle[more]);
1781 break;
1782 default:
1783 LOG(0, 0, 0, "DANGER: I received a heartbeat element where I didn't understand the type! (%d)\n", type);
1784 return -1; // can't process any more of the packet!!
1785 }
1786 }
1787
1788 if (config->cluster_master_address != addr)
1789 {
1790 LOG(0, 0, 0, "My master just changed from %s to %s!\n",
1791 fmtaddr(config->cluster_master_address, 0), fmtaddr(addr, 1));
1792
1793 config->cluster_master_address = addr;
1794 }
1795
1796 config->cluster_last_hb = TIME; // Successfully received a heartbeat!
1797 config->cluster_table_version = h->table_version;
1798 return 0;
1799
1800 shortpacket:
1801 LOG(0, 0, 0, "I got an incomplete heartbeat packet! This means I'm probably out of sync!!\n");
1802 return -1;
1803 }
1804
1805 //
1806 // We got a packet on the cluster port!
1807 // Handle pings, lastseens, and heartbeats!
1808 //
1809 int processcluster(uint8_t *data, int size, in_addr_t addr)
1810 {
1811 int type, more;
1812 uint8_t *p = data;
1813 int s = size;
1814
1815 if (addr == my_address)
1816 return -1; // Ignore it. Something looped back the multicast!
1817
1818 LOG(5, 0, 0, "Process cluster: %d bytes from %s\n", size, fmtaddr(addr, 0));
1819
1820 if (s <= 0) // Any data there??
1821 return -1;
1822
1823 if (s < 8)
1824 goto shortpacket;
1825
1826 type = *((uint32_t *) p);
1827 p += sizeof(uint32_t);
1828 s -= sizeof(uint32_t);
1829
1830 more = *((uint32_t *) p);
1831 p += sizeof(uint32_t);
1832 s -= sizeof(uint32_t);
1833
1834 switch (type)
1835 {
1836 case C_PING: // Update the peers table.
1837 return cluster_add_peer(addr, more, (pingt *) p, s);
1838
1839 case C_MASTER: // Our master is wrong
1840 return cluster_set_master(addr, more);
1841
1842 case C_LASTSEEN: // Catch up a slave (slave missed a packet).
1843 return cluster_catchup_slave(more, addr);
1844
1845 case C_FORWARD: // Forwarded control packet. pass off to processudp.
1846 case C_FORWARD_DAE: // Forwarded DAE packet. pass off to processdae.
1847 if (!config->cluster_iam_master)
1848 {
1849 LOG(0, 0, 0, "I'm not the master, but I got a C_FORWARD%s from %s?\n",
1850 type == C_FORWARD_DAE ? "_DAE" : "", fmtaddr(addr, 0));
1851
1852 return -1;
1853 }
1854 else
1855 {
1856 struct sockaddr_in a;
1857 a.sin_addr.s_addr = more;
1858
1859 a.sin_port = *(int *) p;
1860 s -= sizeof(int);
1861 p += sizeof(int);
1862
1863 LOG(4, 0, 0, "Got a forwarded %spacket... (%s:%d)\n",
1864 type == C_FORWARD_DAE ? "DAE " : "", fmtaddr(more, 0), a.sin_port);
1865
1866 STAT(recv_forward);
1867 if (type == C_FORWARD_DAE)
1868 {
1869 struct in_addr local;
1870 local.s_addr = config->bind_address ? config->bind_address : my_address;
1871 processdae(p, s, &a, sizeof(a), &local);
1872 }
1873 else
1874 processudp(p, s, &a);
1875
1876 return 0;
1877 }
1878 case C_PPPOE_FORWARD:
1879 if (!config->cluster_iam_master)
1880 {
1881 LOG(0, 0, 0, "I'm not the master, but I got a C_PPPOE_FORWARD from %s?\n", fmtaddr(addr, 0));
1882 return -1;
1883 }
1884 else
1885 {
1886 pppoe_process_forward(p, s, addr);
1887 return 0;
1888 }
1889
1890 case C_MPPP_FORWARD:
1891 // Receive a MPPP packet from a slave.
1892 if (!config->cluster_iam_master) {
1893 LOG(0, 0, 0, "I'm not the master, but I got a C_MPPP_FORWARD from %s?\n", fmtaddr(addr, 0));
1894 return -1;
1895 }
1896
1897 processipout(p, s);
1898 return 0;
1899
1900 case C_THROTTLE: { // Receive a forwarded packet from a slave.
1901 if (!config->cluster_iam_master) {
1902 LOG(0, 0, 0, "I'm not the master, but I got a C_THROTTLE from %s?\n", fmtaddr(addr, 0));
1903 return -1;
1904 }
1905
1906 tbf_queue_packet(more, p, s); // The TBF id tells wether it goes in or out.
1907 return 0;
1908 }
1909 case C_GARDEN:
1910 // Receive a walled garden packet from a slave.
1911 if (!config->cluster_iam_master) {
1912 LOG(0, 0, 0, "I'm not the master, but I got a C_GARDEN from %s?\n", fmtaddr(addr, 0));
1913 return -1;
1914 }
1915
1916 tun_write(p, s);
1917 return 0;
1918
1919 case C_BYTES:
1920 if (!config->cluster_iam_master) {
1921 LOG(0, 0, 0, "I'm not the master, but I got a C_BYTES from %s?\n", fmtaddr(addr, 0));
1922 return -1;
1923 }
1924
1925 return cluster_handle_bytes(p, s);
1926
1927 case C_KILL: // The master asked us to die!? (usually because we're too out of date).
1928 if (config->cluster_iam_master) {
1929 LOG(0, 0, 0, "_I_ am master, but I received a C_KILL from %s! (Seq# %d)\n", fmtaddr(addr, 0), more);
1930 return -1;
1931 }
1932 if (more != config->cluster_seq_number) {
1933 LOG(0, 0, 0, "The master asked us to die but the seq number didn't match!?\n");
1934 return -1;
1935 }
1936
1937 if (addr != config->cluster_master_address) {
1938 LOG(0, 0, 0, "Received a C_KILL from %s which doesn't match config->cluster_master_address (%s)\n",
1939 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1));
1940 // We can only warn about it. The master might really have switched!
1941 }
1942
1943 LOG(0, 0, 0, "Received a valid C_KILL: I'm going to die now.\n");
1944 kill(0, SIGTERM);
1945 exit(0); // Lets be paranoid;
1946 return -1; // Just signalling the compiler.
1947
1948 case C_HEARTBEAT:
1949 LOG(4, 0, 0, "Got a heartbeat from %s\n", fmtaddr(addr, 0));
1950 return cluster_process_heartbeat(data, size, more, p, addr);
1951
1952 default:
1953 LOG(0, 0, 0, "Strange type packet received on cluster socket (%d)\n", type);
1954 return -1;
1955 }
1956 return 0;
1957
1958 shortpacket:
1959 LOG(0, 0, 0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
1960 return -1;
1961 }
1962
1963 //====================================================================================================
1964
1965 int cmd_show_cluster(struct cli_def *cli, char *command, char **argv, int argc)
1966 {
1967 int i;
1968
1969 if (CLI_HELP_REQUESTED)
1970 return CLI_HELP_NO_ARGS;
1971
1972 cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
1973 cli_print(cli, "My address : %s", fmtaddr(my_address, 0));
1974 cli_print(cli, "VIP address : %s", fmtaddr(config->bind_address, 0));
1975 cli_print(cli, "Multicast address: %s", fmtaddr(config->cluster_address, 0));
1976 cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
1977
1978 if (!config->cluster_iam_master) {
1979 cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
1980 config->cluster_master_address
1981 ? fmtaddr(config->cluster_master_address, 0)
1982 : "Not defined",
1983 0.1 * (TIME - config->cluster_last_hb));
1984 cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
1985 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1986 cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
1987 cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
1988 cli_print(cli, "%d bundles undefined of %d", config->cluster_undefined_bundles, config->cluster_highest_bundleid);
1989 cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);
1990 } else {
1991 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1992 cli_print(cli, "Next heartbeat # : %d", config->cluster_seq_number);
1993 cli_print(cli, "Highest session : %d", config->cluster_highest_sessionid);
1994 cli_print(cli, "Highest bundle : %d", config->cluster_highest_bundleid);
1995 cli_print(cli, "Highest tunnel : %d", config->cluster_highest_tunnelid);
1996 cli_print(cli, "%d changes queued for sending", config->cluster_num_changes);
1997 }
1998 cli_print(cli, "%d peers.", num_peers);
1999
2000 if (num_peers)
2001 cli_print(cli, "%20s %10s %8s", "Address", "Basetime", "Age");
2002 for (i = 0; i < num_peers; ++i) {
2003 cli_print(cli, "%20s %10u %8d", fmtaddr(peers[i].peer, 0),
2004 peers[i].basetime, TIME - peers[i].timestamp);
2005 }
2006 return CLI_OK;
2007 }
2008
2009 //
2010 // Simple run-length-encoding compression.
2011 // Format is
2012 // 1 byte < 128 = count of non-zero bytes following. // Not legal to be zero.
2013 // n non-zero bytes;
2014 // or
2015 // 1 byte > 128 = (count - 128) run of zero bytes. //
2016 // repeat.
2017 // count == 0 indicates end of compressed stream.
2018 //
2019 // Compress from 'src' into 'dst'. return number of bytes
2020 // used from 'dst'.
2021 // Updates *src_p to indicate 1 past last bytes used.
2022 //
2023 // We could get an extra byte in the zero runs by storing (count-1)
2024 // but I'm playing it safe.
2025 //
2026 // Worst case is a 50% expansion in space required (trying to
2027 // compress { 0x00, 0x01 } * N )
2028 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2029 {
2030 int count;
2031 int orig_dsize = dsize;
2032 uint8_t *x, *src;
2033 src = *src_p;
2034
2035 while (ssize > 0 && dsize > 2) {
2036 count = 0;
2037 x = dst++; --dsize; // Reserve space for count byte..
2038
2039 if (*src) { // Copy a run of non-zero bytes.
2040 while (*src && count < 127 && ssize > 0 && dsize > 1) { // Count number of non-zero bytes.
2041 *dst++ = *src++;
2042 --dsize; --ssize;
2043 ++count;
2044 }
2045 *x = count; // Store number of non-zero bytes. Guarenteed to be non-zero!
2046
2047 } else { // Compress a run of zero bytes.
2048 while (*src == 0 && count < 127 && ssize > 0) {
2049 ++src;
2050 --ssize;
2051 ++count;
2052 }
2053 *x = count | 0x80 ;
2054 }
2055 }
2056
2057 *dst++ = 0x0; // Add Stop byte.
2058 --dsize;
2059
2060 *src_p = src;
2061 return (orig_dsize - dsize);
2062 }
2063
2064 //
2065 // Decompress the buffer into **p.
2066 // 'psize' is the size of the decompression buffer available.
2067 //
2068 // Returns the number of bytes decompressed.
2069 //
2070 // Decompresses from '*src_p' into 'dst'.
2071 // Return the number of dst bytes used.
2072 // Updates the 'src_p' pointer to point to the
2073 // first un-used byte.
2074 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2075 {
2076 int count;
2077 int orig_dsize = dsize;
2078 uint8_t *src = *src_p;
2079
2080 while (ssize >0 && dsize > 0) { // While there's more to decompress, and there's room in the decompress buffer...
2081 count = *src++; --ssize; // get the count byte from the source.
2082 if (count == 0x0) // End marker reached? If so, finish.
2083 break;
2084
2085 if (count & 0x80) { // Decompress a run of zeros
2086 for (count &= 0x7f ; count > 0 && dsize > 0; --count) {
2087 *dst++ = 0x0;
2088 --dsize;
2089 }
2090 } else { // Copy run of non-zero bytes.
2091 for ( ; count > 0 && ssize && dsize; --count) { // Copy non-zero bytes across.
2092 *dst++ = *src++;
2093 --ssize; --dsize;
2094 }
2095 }
2096 }
2097 *src_p = src;
2098 return (orig_dsize - dsize);
2099 }