Merge from master
[l2tpns.git] / cluster.c
1 // L2TPNS Clustering Stuff
2
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <stdarg.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <sys/file.h>
9 #include <sys/stat.h>
10 #include <sys/socket.h>
11 #include <netinet/in.h>
12 #include <arpa/inet.h>
13 #include <sys/ioctl.h>
14 #include <net/if.h>
15 #include <string.h>
16 #include <malloc.h>
17 #include <errno.h>
18 #include <libcli.h>
19 #include <linux/rtnetlink.h>
20
21 #include "l2tpns.h"
22 #include "cluster.h"
23 #include "util.h"
24 #include "tbf.h"
25 #include "pppoe.h"
26
27 #ifdef BGP
28 #include "bgp.h"
29 #endif
30 /*
31 * All cluster packets have the same format.
32 *
33 * One or more instances of
34 * a 32 bit 'type' id.
35 * a 32 bit 'extra' data dependant on the 'type'.
36 * zero or more bytes of structure data, dependant on the type.
37 *
38 */
39
40 // Module variables.
41 extern int cluster_sockfd; // The filedescriptor for the cluster communications port.
42
43 in_addr_t my_address = 0; // The network address of my ethernet port.
44 static int walk_session_number = 0; // The next session to send when doing the slow table walk.
45 static int walk_bundle_number = 0; // The next bundle to send when doing the slow table walk.
46 static int walk_tunnel_number = 0; // The next tunnel to send when doing the slow table walk.
47 static int walk_groupe_number = 0; // The next groupe to send when doing the slow table walk.
48 int forked = 0; // Sanity check: CLI must not diddle with heartbeat table
49
50 #define MAX_HEART_SIZE (8192) // Maximum size of heartbeat packet. Must be less than max IP packet size :)
51 #define MAX_CHANGES (MAX_HEART_SIZE/(sizeof(sessiont) + sizeof(int) ) - 2) // Assumes a session is the biggest type!
52
53 static struct {
54 int type;
55 int id;
56 } cluster_changes[MAX_CHANGES]; // Queue of changed structures that need to go out when next heartbeat.
57
58 static struct {
59 int seq;
60 int size;
61 uint8_t data[MAX_HEART_SIZE];
62 } past_hearts[HB_HISTORY_SIZE]; // Ring buffer of heartbeats that we've recently sent out. Needed so
63 // we can re-transmit if needed.
64
65 static struct {
66 in_addr_t peer;
67 uint32_t basetime;
68 clockt timestamp;
69 int uptodate;
70 } peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
71 static int num_peers; // Number of peers in list.
72
73 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
74 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
75
76 //
77 // Create a listening socket
78 //
79 // This joins the cluster multi-cast group.
80 //
81 int cluster_init()
82 {
83 struct sockaddr_in addr;
84 struct sockaddr_in interface_addr;
85 struct ip_mreq mreq;
86 struct ifreq ifr;
87 int opt;
88
89 config->cluster_undefined_sessions = MAXSESSION-1;
90 config->cluster_undefined_bundles = MAXBUNDLE-1;
91 config->cluster_undefined_tunnels = MAXTUNNEL-1;
92 config->cluster_undefined_groupes = MAXGROUPE-1;
93
94 if (!config->cluster_address)
95 return 0;
96 if (!*config->cluster_interface)
97 return 0;
98
99 cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
100
101 memset(&addr, 0, sizeof(addr));
102 addr.sin_family = AF_INET;
103 addr.sin_port = htons(CLUSTERPORT);
104 addr.sin_addr.s_addr = INADDR_ANY;
105 setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
106
107 opt = fcntl(cluster_sockfd, F_GETFL, 0);
108 fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
109
110 if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
111 {
112 LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
113 return -1;
114 }
115
116 strcpy(ifr.ifr_name, config->cluster_interface);
117 if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
118 {
119 LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
120 return -1;
121 }
122
123 memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
124 my_address = interface_addr.sin_addr.s_addr;
125
126 // Join multicast group.
127 mreq.imr_multiaddr.s_addr = config->cluster_address;
128 mreq.imr_interface = interface_addr.sin_addr;
129
130
131 opt = 0; // Turn off multicast loopback.
132 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
133
134 if (config->cluster_mcast_ttl != 1)
135 {
136 uint8_t ttl = 0;
137 if (config->cluster_mcast_ttl > 0)
138 ttl = config->cluster_mcast_ttl < 256 ? config->cluster_mcast_ttl : 255;
139
140 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl));
141 }
142
143 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
144 {
145 LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
146 return -1;
147 }
148
149 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
150 {
151 LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
152 return -1;
153 }
154
155 config->cluster_last_hb = TIME;
156 config->cluster_seq_number = -1;
157
158 return cluster_sockfd;
159 }
160
161
162 //
163 // Send a chunk of data to the entire cluster (usually via the multicast
164 // address ).
165 //
166
167 static int cluster_send_data(void *data, int datalen)
168 {
169 struct sockaddr_in addr = {0};
170
171 if (!cluster_sockfd) return -1;
172 if (!config->cluster_address) return 0;
173
174 addr.sin_addr.s_addr = config->cluster_address;
175 addr.sin_port = htons(CLUSTERPORT);
176 addr.sin_family = AF_INET;
177
178 LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
179
180 if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
181 {
182 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
183 return -1;
184 }
185
186 return 0;
187 }
188
189 //
190 // Add a chunk of data to a heartbeat packet.
191 // Maintains the format. Assumes that the caller
192 // has passed in a big enough buffer!
193 //
194 static void add_type(uint8_t **p, int type, int more, uint8_t *data, int size)
195 {
196 *((uint32_t *) (*p)) = type;
197 *p += sizeof(uint32_t);
198
199 *((uint32_t *)(*p)) = more;
200 *p += sizeof(uint32_t);
201
202 if (data && size > 0) {
203 memcpy(*p, data, size);
204 *p += size;
205 }
206 }
207
208 // advertise our presence via BGP or gratuitous ARP
209 static void advertise_routes(void)
210 {
211 #ifdef BGP
212 if (bgp_configured)
213 bgp_enable_routing(1);
214 else
215 #endif /* BGP */
216 if (config->send_garp)
217 send_garp(config->bind_address); // Start taking traffic.
218 }
219
220 // withdraw our routes (BGP only)
221 static void withdraw_routes(void)
222 {
223 #ifdef BGP
224 if (bgp_configured)
225 bgp_enable_routing(0);
226 #endif /* BGP */
227 }
228
229 static void cluster_uptodate(void)
230 {
231 if (config->cluster_iam_uptodate)
232 return;
233
234 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels ||
235 config->cluster_undefined_bundles || config->cluster_undefined_groupes)
236 return;
237
238 config->cluster_iam_uptodate = 1;
239
240 LOG(0, 0, 0, "Now uptodate with master.\n");
241 advertise_routes();
242 }
243
244 //
245 // Send a unicast UDP packet to a peer with 'data' as the
246 // contents.
247 //
248 static int peer_send_data(in_addr_t peer, uint8_t *data, int size)
249 {
250 struct sockaddr_in addr = {0};
251
252 if (!cluster_sockfd) return -1;
253 if (!config->cluster_address) return 0;
254
255 if (!peer) // Odd??
256 return -1;
257
258 addr.sin_addr.s_addr = peer;
259 addr.sin_port = htons(CLUSTERPORT);
260 addr.sin_family = AF_INET;
261
262 LOG_HEX(5, "Peer send", data, size);
263
264 if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
265 {
266 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
267 return -1;
268 }
269
270 return 0;
271 }
272
273 //
274 // Send a structured message to a peer with a single element of type 'type'.
275 //
276 static int peer_send_message(in_addr_t peer, int type, int more, uint8_t *data, int size)
277 {
278 uint8_t buf[65536]; // Vast overkill.
279 uint8_t *p = buf;
280
281 LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
282 add_type(&p, type, more, data, size);
283
284 return peer_send_data(peer, buf, (p-buf) );
285 }
286
287 // send a packet to the master
288 static int _forward_packet(uint8_t *data, int size, in_addr_t addr, int port, int type)
289 {
290 uint8_t buf[65536]; // Vast overkill.
291 uint8_t *p = buf;
292
293 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
294 return -1;
295
296 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
297
298 STAT(c_forwarded);
299 add_type(&p, type, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
300 memcpy(p, data, size);
301 p += size;
302
303 return peer_send_data(config->cluster_master_address, buf, (p - buf));
304 }
305
306 //
307 // Forward a state changing packet to the master.
308 //
309 // The master just processes the payload as if it had
310 // received it off the tun device.
311 //(note: THIS ROUTINE WRITES TO pack[-6]).
312 int master_forward_packet(uint8_t *data, int size, in_addr_t addr, uint16_t port, uint16_t indexudp)
313 {
314 uint8_t *p = data - (3 * sizeof(uint32_t));
315 uint8_t *psave = p;
316 uint32_t indexandport = port | ((indexudp << 16) & 0xFFFF0000);
317
318 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
319 return -1;
320
321 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
322
323 STAT(c_forwarded);
324 add_type(&p, C_FORWARD, addr, (uint8_t *) &indexandport, sizeof(indexandport));
325
326 return peer_send_data(config->cluster_master_address, psave, size + (3 * sizeof(uint32_t)));
327 }
328
329 // Forward PPPOE packet to the master.
330 //(note: THIS ROUTINE WRITES TO pack[-4]).
331 int master_forward_pppoe_packet(uint8_t *data, int size, uint8_t codepad)
332 {
333 uint8_t *p = data - (2 * sizeof(uint32_t));
334 uint8_t *psave = p;
335
336 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
337 return -1;
338
339 LOG(4, 0, 0, "Forward PPPOE packet to master, code %s (size %d)\n", get_string_codepad(codepad), size);
340
341 STAT(c_forwarded);
342 add_type(&p, C_PPPOE_FORWARD, codepad, NULL, 0);
343
344 return peer_send_data(config->cluster_master_address, psave, size + (2 * sizeof(uint32_t)));
345 }
346
347 // Forward a DAE RADIUS packet to the master.
348 int master_forward_dae_packet(uint8_t *data, int size, in_addr_t addr, int port)
349 {
350 return _forward_packet(data, size, addr, port, C_FORWARD_DAE);
351 }
352
353 //
354 // Forward a throttled packet to the master for handling.
355 //
356 // The master just drops the packet into the appropriate
357 // token bucket queue, and lets normal processing take care
358 // of it.
359 //
360 int master_throttle_packet(int tbfid, uint8_t *data, int size)
361 {
362 uint8_t buf[65536]; // Vast overkill.
363 uint8_t *p = buf;
364
365 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
366 return -1;
367
368 LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
369
370 add_type(&p, C_THROTTLE, tbfid, data, size);
371
372 return peer_send_data(config->cluster_master_address, buf, (p-buf) );
373
374 }
375
376 //
377 // Forward a walled garden packet to the master for handling.
378 //
379 // The master just writes the packet straight to the tun
380 // device (where is will normally loop through the
381 // firewall rules, and come back in on the tun device)
382 //
383 // (Note that this must be called with the tun header
384 // as the start of the data).
385 int master_garden_packet(sessionidt s, uint8_t *data, int size)
386 {
387 uint8_t buf[65536]; // Vast overkill.
388 uint8_t *p = buf;
389
390 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
391 return -1;
392
393 LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
394
395 add_type(&p, C_GARDEN, s, data, size);
396
397 return peer_send_data(config->cluster_master_address, buf, (p-buf));
398
399 }
400
401 //
402 // Forward a MPPP packet to the master for handling.
403 //
404 // (Note that this must be called with the tun header
405 // as the start of the data).
406 // (i.e. this routine writes to data[-8]).
407 int master_forward_mppp_packet(sessionidt s, uint8_t *data, int size)
408 {
409 uint8_t *p = data - (2 * sizeof(uint32_t));
410 uint8_t *psave = p;
411
412 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
413 return -1;
414
415 LOG(4, 0, 0, "Forward MPPP packet to master (size %d)\n", size);
416
417 add_type(&p, C_MPPP_FORWARD, s, NULL, 0);
418
419 return peer_send_data(config->cluster_master_address, psave, size + (2 * sizeof(uint32_t)));
420
421 }
422
423 //
424 // Send a chunk of data as a heartbeat..
425 // We save it in the history buffer as we do so.
426 //
427 static void send_heartbeat(int seq, uint8_t *data, int size)
428 {
429 int i;
430
431 if (size > sizeof(past_hearts[0].data))
432 {
433 LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
434 kill(0, SIGTERM);
435 exit(1);
436 }
437 i = seq % HB_HISTORY_SIZE;
438 past_hearts[i].seq = seq;
439 past_hearts[i].size = size;
440 memcpy(&past_hearts[i].data, data, size); // Save it.
441 cluster_send_data(data, size);
442 }
443
444 //
445 // Send an 'i am alive' message to every machine in the cluster.
446 //
447 void cluster_send_ping(time_t basetime)
448 {
449 uint8_t buff[100 + sizeof(pingt)];
450 uint8_t *p = buff;
451 pingt x;
452
453 if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
454 return;
455
456 LOG(5, 0, 0, "Sending cluster ping...\n");
457
458 x.ver = 1;
459 x.addr = config->bind_address;
460 x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels +
461 config->cluster_undefined_groupes + config->cluster_undefined_bundles;
462 x.basetime = basetime;
463
464 add_type(&p, C_PING, basetime, (uint8_t *) &x, sizeof(x));
465 cluster_send_data(buff, (p-buff) );
466 }
467
468 //
469 // Walk the session counters looking for non-zero ones to send
470 // to the master. We send up to 600 of them at one time.
471 // We examine a maximum of 3000 sessions.
472 // (50k max session should mean that we normally
473 // examine the entire session table every 25 seconds).
474
475 #define MAX_B_RECS (600)
476 void master_update_counts(void)
477 {
478 int i, c;
479 bytest b[MAX_B_RECS+1];
480
481 if (config->cluster_iam_master) // Only happens on the slaves.
482 return;
483
484 if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
485 return;
486
487 i = MAX_B_RECS * 5; // Examine max 3000 sessions;
488 if (config->cluster_highest_sessionid > i)
489 i = config->cluster_highest_sessionid;
490
491 for ( c = 0; i > 0 ; --i) {
492 // Next session to look at.
493 walk_session_number++;
494 if ( walk_session_number > config->cluster_highest_sessionid)
495 walk_session_number = 1;
496
497 if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
498 continue; // Unchanged. Skip it.
499
500 b[c].sid = walk_session_number;
501 b[c].pin = sess_local[walk_session_number].pin;
502 b[c].pout = sess_local[walk_session_number].pout;
503 b[c].cin = sess_local[walk_session_number].cin;
504 b[c].cout = sess_local[walk_session_number].cout;
505
506 // Reset counters.
507 sess_local[walk_session_number].pin = sess_local[walk_session_number].pout = 0;
508 sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
509
510 if (++c > MAX_B_RECS) // Send a max of 600 elements in a packet.
511 break;
512 }
513
514 if (!c) // Didn't find any that changes. Get out of here!
515 return;
516
517
518 // Forward the data to the master.
519 LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
520 peer_send_message(config->cluster_master_address, C_BYTES, c, (uint8_t *) &b, sizeof(b[0]) * c);
521 return;
522 }
523
524 //
525 // On the master, check how our slaves are going. If
526 // one of them's not up-to-date we'll heartbeat faster.
527 // If we don't have any of them, then we need to turn
528 // on our own packet handling!
529 //
530 void cluster_check_slaves(void)
531 {
532 int i;
533 static int have_peers = 0;
534 int had_peers = have_peers;
535 clockt t = TIME;
536
537 if (!config->cluster_iam_master)
538 return; // Only runs on the master...
539
540 config->cluster_iam_uptodate = 1; // cleared in loop below
541
542 for (i = have_peers = 0; i < num_peers; i++)
543 {
544 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
545 continue; // Stale peer! Skip them.
546
547 if (!peers[i].basetime)
548 continue; // Shutdown peer! Skip them.
549
550 if (peers[i].uptodate)
551 have_peers++;
552 else
553 config->cluster_iam_uptodate = 0; // Start fast heartbeats
554 }
555
556 // in a cluster, withdraw/add routes when we get a peer/lose peers
557 if (have_peers != had_peers)
558 {
559 if (had_peers < config->cluster_master_min_adv &&
560 have_peers >= config->cluster_master_min_adv)
561 withdraw_routes();
562
563 else if (had_peers >= config->cluster_master_min_adv &&
564 have_peers < config->cluster_master_min_adv)
565 advertise_routes();
566 }
567 }
568
569 //
570 // Check that we have a master. If it's been too
571 // long since we heard from a master then hold an election.
572 //
573 void cluster_check_master(void)
574 {
575 int i, count, high_unique_id = 0;
576 int last_free = 0;
577 clockt t = TIME;
578 static int probed = 0;
579 int have_peers;
580
581 if (config->cluster_iam_master)
582 return; // Only runs on the slaves...
583
584 // If the master is late (missed 2 hearbeats by a second and a
585 // hair) it may be that the switch has dropped us from the
586 // multicast group, try unicasting probes to the master
587 // which will hopefully respond with a unicast heartbeat that
588 // will allow us to limp along until the querier next runs.
589 if (config->cluster_master_address
590 && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
591 {
592 if (!probed || (TIME > (probed + 2 * config->cluster_hb_interval)))
593 {
594 probed = TIME;
595 LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
596 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
597
598 peer_send_message(config->cluster_master_address,
599 C_LASTSEEN, config->cluster_seq_number, NULL, 0);
600 }
601 } else { // We got a recent heartbeat; reset the probe flag.
602 probed = 0;
603 }
604
605 if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
606 return; // Everything's ok!
607
608 config->cluster_last_hb = TIME + 1; // Just the one election thanks.
609 config->cluster_master_address = 0;
610
611 LOG(0, 0, 0, "Master timed out! Holding election...\n");
612
613 // In the process of shutting down, can't be master
614 if (main_quit)
615 return;
616
617 for (i = have_peers = 0; i < num_peers; i++)
618 {
619 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
620 continue; // Stale peer! Skip them.
621
622 if (!peers[i].basetime)
623 continue; // Shutdown peer! Skip them.
624
625 if (peers[i].basetime < basetime) {
626 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
627 return; // They'll win the election. Get out of here.
628 }
629
630 if (peers[i].basetime == basetime &&
631 peers[i].peer > my_address) {
632 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
633 return; // They'll win the election. Wait for them to come up.
634 }
635
636 if (peers[i].uptodate)
637 have_peers++;
638 }
639
640 // Wow. it's been ages since I last heard a heartbeat
641 // and I'm better than an of my peers so it's time
642 // to become a master!!!
643
644 config->cluster_iam_master = 1;
645 pppoe_send_garp(); // gratuitous arp of the pppoe interface
646
647 LOG(0, 0, 0, "I am declaring myself the master!\n");
648
649 if (have_peers < config->cluster_master_min_adv)
650 advertise_routes();
651 else
652 withdraw_routes();
653
654 if (config->cluster_seq_number == -1)
655 config->cluster_seq_number = 0;
656
657 //
658 // Go through and mark all the tunnels as defined.
659 // Count the highest used tunnel number as well.
660 //
661 config->cluster_highest_tunnelid = 0;
662 for (i = 0; i < MAXTUNNEL; ++i) {
663 if (tunnel[i].state == TUNNELUNDEF)
664 tunnel[i].state = TUNNELFREE;
665
666 if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
667 config->cluster_highest_tunnelid = i;
668 }
669
670 //
671 // Go through and mark all the bundles as defined.
672 // Count the highest used bundle number as well.
673 //
674 config->cluster_highest_bundleid = 0;
675 for (i = 0; i < MAXBUNDLE; ++i) {
676 if (bundle[i].state == BUNDLEUNDEF)
677 bundle[i].state = BUNDLEFREE;
678
679 if (bundle[i].state != BUNDLEFREE && i > config->cluster_highest_bundleid)
680 config->cluster_highest_bundleid = i;
681 }
682
683 //
684 // Go through and mark all the groupes as defined.
685 // Count the highest used groupe number as well.
686 //
687 config->cluster_highest_groupeid = 0;
688 for (i = 0; i < MAXGROUPE; ++i)
689 {
690 if (grpsession[i].state == GROUPEUNDEF)
691 grpsession[i].state = GROUPEFREE;
692
693 if (grpsession[i].state != GROUPEFREE && i > config->cluster_highest_groupeid)
694 config->cluster_highest_groupeid = i;
695 }
696
697 //
698 // Go through and mark all the sessions as being defined.
699 // reset the idle timeouts.
700 // add temporary byte counters to permanent ones.
701 // Re-string the free list.
702 // Find the ID of the highest session.
703 last_free = 0;
704 high_unique_id = 0;
705 config->cluster_highest_sessionid = 0;
706 for (i = 0, count = 0; i < MAXSESSION; ++i) {
707 if (session[i].tunnel == T_UNDEF) {
708 session[i].tunnel = T_FREE;
709 ++count;
710 }
711
712 if (!session[i].opened) { // Unused session. Add to free list.
713 memset(&session[i], 0, sizeof(session[i]));
714 session[i].tunnel = T_FREE;
715 session[last_free].next = i;
716 session[i].next = 0;
717 last_free = i;
718 continue;
719 }
720
721 // Reset idle timeouts..
722 session[i].last_packet = session[i].last_data = time_now;
723
724 // Reset die relative to our uptime rather than the old master's
725 if (session[i].die) session[i].die = TIME;
726
727 // Accumulate un-sent byte/packet counters.
728 increment_counter(&session[i].cin, &session[i].cin_wrap, sess_local[i].cin);
729 increment_counter(&session[i].cout, &session[i].cout_wrap, sess_local[i].cout);
730 session[i].cin_delta += sess_local[i].cin;
731 session[i].cout_delta += sess_local[i].cout;
732
733 session[i].pin += sess_local[i].pin;
734 session[i].pout += sess_local[i].pout;
735
736 sess_local[i].cin = sess_local[i].cout = 0;
737 sess_local[i].pin = sess_local[i].pout = 0;
738
739 sess_local[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
740
741 if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
742 high_unique_id = session[i].unique_id+1;
743
744 session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
745 throttle_session(i, session[i].throttle_in, session[i].throttle_out);
746
747 config->cluster_highest_sessionid = i;
748 }
749
750 session[last_free].next = 0; // End of chain.
751 last_id = high_unique_id; // Keep track of the highest used session ID.
752
753 become_master();
754
755 rebuild_address_pool();
756
757 // If we're not the very first master, this is a big issue!
758 if (count > 0)
759 LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
760
761 config->cluster_undefined_sessions = 0;
762 config->cluster_undefined_bundles = 0;
763 config->cluster_undefined_tunnels = 0;
764 config->cluster_undefined_groupes = 0;
765 config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
766
767 // FIXME. We need to fix up the tunnel control message
768 // queue here! There's a number of other variables we
769 // should also update.
770 }
771
772
773 //
774 // Check that our session table is validly matching what the
775 // master has in mind.
776 //
777 // In particular, if we have too many sessions marked 'undefined'
778 // we fix it up here, and we ensure that the 'first free session'
779 // pointer is valid.
780 //
781 static void cluster_check_sessions(int highsession, int freesession_ptr, int highbundle, int hightunnel, int highgroupe)
782 {
783 int i;
784
785 sessionfree = freesession_ptr; // Keep the freesession ptr valid.
786
787 if (config->cluster_iam_uptodate)
788 return;
789
790 if (highsession > config->cluster_undefined_sessions && highbundle > config->cluster_undefined_bundles &&
791 highgroupe > config->cluster_undefined_groupes && hightunnel > config->cluster_undefined_tunnels)
792 return;
793
794 // Clear out defined sessions, counting the number of
795 // undefs remaining.
796 config->cluster_undefined_sessions = 0;
797 for (i = 1 ; i < MAXSESSION; ++i) {
798 if (i > highsession) {
799 if (session[i].tunnel == T_UNDEF) session[i].tunnel = T_FREE; // Defined.
800 continue;
801 }
802
803 if (session[i].tunnel == T_UNDEF)
804 ++config->cluster_undefined_sessions;
805 }
806
807 // Clear out defined bundles, counting the number of
808 // undefs remaining.
809 config->cluster_undefined_bundles = 0;
810 for (i = 1 ; i < MAXBUNDLE; ++i) {
811 if (i > highbundle) {
812 if (bundle[i].state == BUNDLEUNDEF) bundle[i].state = BUNDLEFREE; // Defined.
813 continue;
814 }
815
816 if (bundle[i].state == BUNDLEUNDEF)
817 ++config->cluster_undefined_bundles;
818 }
819
820 // Clear out defined tunnels, counting the number of
821 // undefs remaining.
822 config->cluster_undefined_tunnels = 0;
823 for (i = 1 ; i < MAXTUNNEL; ++i) {
824 if (i > hightunnel) {
825 if (tunnel[i].state == TUNNELUNDEF) tunnel[i].state = TUNNELFREE; // Defined.
826 continue;
827 }
828
829 if (tunnel[i].state == TUNNELUNDEF)
830 ++config->cluster_undefined_tunnels;
831 }
832
833 // Clear out defined groupe, counting the number of
834 // undefs remaining.
835 config->cluster_undefined_groupes = 0;
836 for (i = 1 ; i < MAXGROUPE; ++i) {
837 if (i > highgroupe) {
838 if (grpsession[i].state == GROUPEUNDEF) grpsession[i].state = GROUPEFREE; // Defined.
839 continue;
840 }
841
842 if (grpsession[i].state == GROUPEUNDEF)
843 ++config->cluster_undefined_groupes;
844 }
845
846 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles || config->cluster_undefined_groupes) {
847 LOG(2, 0, 0, "Cleared undefined sessions/bundles/tunnels. %d sess (high %d), %d bund (high %d), %d grp (high %d), %d tunn (high %d)\n",
848 config->cluster_undefined_sessions, highsession, config->cluster_undefined_bundles, highbundle,
849 config->cluster_undefined_groupes, highgroupe, config->cluster_undefined_tunnels, hightunnel);
850 return;
851 }
852
853 // Are we up to date?
854
855 if (!config->cluster_iam_uptodate)
856 cluster_uptodate();
857 }
858
859 static int hb_add_type(uint8_t **p, int type, int id)
860 {
861 switch (type) {
862 case C_CSESSION: { // Compressed C_SESSION.
863 uint8_t c[sizeof(sessiont) * 2]; // Bigger than worst case.
864 uint8_t *d = (uint8_t *) &session[id];
865 uint8_t *orig = d;
866 int size;
867
868 size = rle_compress( &d, sizeof(sessiont), c, sizeof(c) );
869
870 // Did we compress the full structure, and is the size actually
871 // reduced??
872 if ( (d - orig) == sizeof(sessiont) && size < sizeof(sessiont) ) {
873 add_type(p, C_CSESSION, id, c, size);
874 break;
875 }
876 // Failed to compress : Fall through.
877 }
878 case C_SESSION:
879 add_type(p, C_SESSION, id, (uint8_t *) &session[id], sizeof(sessiont));
880 break;
881
882 case C_CBUNDLE: { // Compressed C_BUNDLE
883 uint8_t c[sizeof(bundlet) * 2]; // Bigger than worst case.
884 uint8_t *d = (uint8_t *) &bundle[id];
885 uint8_t *orig = d;
886 int size;
887
888 size = rle_compress( &d, sizeof(bundlet), c, sizeof(c) );
889
890 // Did we compress the full structure, and is the size actually
891 // reduced??
892 if ( (d - orig) == sizeof(bundlet) && size < sizeof(bundlet) ) {
893 add_type(p, C_CBUNDLE, id, c, size);
894 break;
895 }
896 // Failed to compress : Fall through.
897 }
898
899 case C_BUNDLE:
900 add_type(p, C_BUNDLE, id, (uint8_t *) &bundle[id], sizeof(bundlet));
901 break;
902
903 case C_CGROUPE: { // Compressed C_GROUPE
904 uint8_t c[sizeof(groupsesst) * 2]; // Bigger than worst case.
905 uint8_t *d = (uint8_t *) &grpsession[id];
906 uint8_t *orig = d;
907 int size;
908
909 size = rle_compress( &d, sizeof(groupsesst), c, sizeof(c) );
910
911 // Did we compress the full structure, and is the size actually
912 // reduced??
913 if ( (d - orig) == sizeof(groupsesst) && size < sizeof(groupsesst) )
914 {
915 add_type(p, C_CGROUPE, id, c, size);
916 break;
917 }
918 // Failed to compress : Fall through.
919 }
920 case C_GROUPE:
921 add_type(p, C_GROUPE, id, (uint8_t *) &grpsession[id], sizeof(groupsesst));
922 break;
923
924 case C_CTUNNEL: { // Compressed C_TUNNEL
925 uint8_t c[sizeof(tunnelt) * 2]; // Bigger than worst case.
926 uint8_t *d = (uint8_t *) &tunnel[id];
927 uint8_t *orig = d;
928 int size;
929
930 size = rle_compress( &d, sizeof(tunnelt), c, sizeof(c) );
931
932 // Did we compress the full structure, and is the size actually
933 // reduced??
934 if ( (d - orig) == sizeof(tunnelt) && size < sizeof(tunnelt) ) {
935 add_type(p, C_CTUNNEL, id, c, size);
936 break;
937 }
938 // Failed to compress : Fall through.
939 }
940 case C_TUNNEL:
941 add_type(p, C_TUNNEL, id, (uint8_t *) &tunnel[id], sizeof(tunnelt));
942 break;
943 default:
944 LOG(0, 0, 0, "Found an invalid type in heart queue! (%d)\n", type);
945 kill(0, SIGTERM);
946 exit(1);
947 }
948 return 0;
949 }
950
951 //
952 // Send a heartbeat, incidently sending out any queued changes..
953 //
954 void cluster_heartbeat()
955 {
956 int i, count = 0, tcount = 0, bcount = 0, gcount = 0;
957 uint8_t buff[MAX_HEART_SIZE + sizeof(heartt) + sizeof(int) ];
958 heartt h;
959 uint8_t *p = buff;
960
961 if (!config->cluster_iam_master) // Only the master does this.
962 return;
963
964 config->cluster_table_version += config->cluster_num_changes;
965
966 // Fill out the heartbeat header.
967 memset(&h, 0, sizeof(h));
968
969 h.version = HB_VERSION;
970 h.seq = config->cluster_seq_number;
971 h.basetime = basetime;
972 h.clusterid = config->bind_address; // Will this do??
973 h.basetime = basetime;
974 h.highsession = config->cluster_highest_sessionid;
975 h.freesession = sessionfree;
976 h.hightunnel = config->cluster_highest_tunnelid;
977 h.highbundle = config->cluster_highest_bundleid;
978 h.highgroupe = config->cluster_highest_groupeid;
979 h.size_sess = sizeof(sessiont); // Just in case.
980 h.size_bund = sizeof(bundlet);
981 h.size_tunn = sizeof(tunnelt);
982 h.nextgrpid = gnextgrpid;
983 h.interval = config->cluster_hb_interval;
984 h.timeout = config->cluster_hb_timeout;
985 h.table_version = config->cluster_table_version;
986
987 add_type(&p, C_HEARTBEAT, HB_VERSION, (uint8_t *) &h, sizeof(h));
988
989 for (i = 0; i < config->cluster_num_changes; ++i) {
990 hb_add_type(&p, cluster_changes[i].type, cluster_changes[i].id);
991 }
992
993 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
994 LOG(0, 0, 0, "FATAL: Overran the heartbeat buffer! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
995 kill(0, SIGTERM);
996 exit(1);
997 }
998
999 //
1000 // Fill out the packet with sessions from the session table...
1001 // (not forgetting to leave space so we can get some tunnels,bundle,groupe in too )
1002 while ( (p + sizeof(uint32_t) * 2 + sizeof(sessiont) * 2 ) < (buff + MAX_HEART_SIZE) ) {
1003
1004 if (!walk_session_number) // session #0 isn't valid.
1005 ++walk_session_number;
1006
1007 if (count >= config->cluster_highest_sessionid) // If we're a small cluster, don't go wild.
1008 break;
1009
1010 hb_add_type(&p, C_CSESSION, walk_session_number);
1011 walk_session_number = (1+walk_session_number)%(config->cluster_highest_sessionid+1); // +1 avoids divide by zero.
1012
1013 ++count; // Count the number of extra sessions we're sending.
1014 }
1015
1016 //
1017 // Fill out the packet with tunnels from the tunnel table...
1018 // This effectively means we walk the tunnel table more quickly
1019 // than the session table. This is good because stuffing up a
1020 // tunnel is a much bigger deal than stuffing up a session.
1021 //
1022 int maxsize = (sizeof(tunnelt) < sizeof(bundlet)) ? sizeof(bundlet):sizeof(tunnelt);
1023 maxsize = (sizeof(groupsesst) < maxsize) ? maxsize:sizeof(groupsesst);
1024 maxsize += (sizeof(uint32_t) * 2);
1025
1026 // Fill out the packet with tunnels,bundlets, groupes from the tables...
1027 while ( (p + maxsize) < (buff + MAX_HEART_SIZE) )
1028 {
1029 if ((tcount >= config->cluster_highest_tunnelid) &&
1030 (bcount >= config->cluster_highest_bundleid) &&
1031 (gcount >= config->cluster_highest_groupeid))
1032 break;
1033
1034 if ( ((p + sizeof(uint32_t) * 2 + sizeof(tunnelt) ) < (buff + MAX_HEART_SIZE)) &&
1035 (tcount < config->cluster_highest_tunnelid))
1036 {
1037 if (!walk_tunnel_number) // tunnel #0 isn't valid.
1038 ++walk_tunnel_number;
1039
1040 hb_add_type(&p, C_CTUNNEL, walk_tunnel_number);
1041 walk_tunnel_number = (1+walk_tunnel_number)%(config->cluster_highest_tunnelid+1); // +1 avoids divide by zero.
1042
1043 ++tcount;
1044 }
1045
1046 if ( ((p + sizeof(uint32_t) * 2 + sizeof(bundlet) ) < (buff + MAX_HEART_SIZE)) &&
1047 (bcount < config->cluster_highest_bundleid))
1048 {
1049 if (!walk_bundle_number) // bundle #0 isn't valid.
1050 ++walk_bundle_number;
1051
1052 hb_add_type(&p, C_CBUNDLE, walk_bundle_number);
1053 walk_bundle_number = (1+walk_bundle_number)%(config->cluster_highest_bundleid+1); // +1 avoids divide by zero.
1054
1055 ++bcount;
1056 }
1057
1058 if ( ((p + sizeof(uint32_t) * 2 + sizeof(groupsesst) ) < (buff + MAX_HEART_SIZE)) &&
1059 (gcount < config->cluster_highest_groupeid))
1060 {
1061 if (!walk_groupe_number) // groupe #0 isn't valid.
1062 ++walk_groupe_number;
1063
1064 hb_add_type(&p, C_CGROUPE, walk_groupe_number);
1065 walk_groupe_number = (1+walk_groupe_number)%(config->cluster_highest_groupeid+1); // +1 avoids divide by zero.
1066 ++gcount;
1067 }
1068 }
1069
1070 //
1071 // Did we do something wrong?
1072 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
1073 LOG(0, 0, 0, "Overran the heartbeat buffer now! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
1074 kill(0, SIGTERM);
1075 exit(1);
1076 }
1077
1078 LOG(4, 0, 0, "Sending v%d heartbeat #%d, change #%" PRIu64 " with %d changes "
1079 "(%d x-sess, %d x-bundles, %d x-tunnels, %d x-groupes, %d highsess, %d highbund, %d hightun, %d highgrp, size %d)\n",
1080 HB_VERSION, h.seq, h.table_version, config->cluster_num_changes,
1081 count, bcount, tcount, gcount, config->cluster_highest_sessionid, config->cluster_highest_bundleid,
1082 config->cluster_highest_tunnelid, config->cluster_highest_groupeid, (int) (p - buff));
1083
1084 config->cluster_num_changes = 0;
1085
1086 send_heartbeat(h.seq, buff, (p-buff) ); // Send out the heartbeat to the cluster, keeping a copy of it.
1087
1088 config->cluster_seq_number = (config->cluster_seq_number+1)%HB_MAX_SEQ; // Next seq number to use.
1089 }
1090
1091 //
1092 // A structure of type 'type' has changed; Add it to the queue to send.
1093 //
1094 static int type_changed(int type, int id)
1095 {
1096 int i;
1097
1098 for (i = 0 ; i < config->cluster_num_changes ; ++i)
1099 {
1100 if ( cluster_changes[i].id == id && cluster_changes[i].type == type)
1101 {
1102 // Already marked for change, remove it
1103 --config->cluster_num_changes;
1104 memmove(&cluster_changes[i],
1105 &cluster_changes[i+1],
1106 (config->cluster_num_changes - i) * sizeof(cluster_changes[i]));
1107 break;
1108 }
1109 }
1110
1111 cluster_changes[config->cluster_num_changes].type = type;
1112 cluster_changes[config->cluster_num_changes].id = id;
1113 ++config->cluster_num_changes;
1114
1115 if (config->cluster_num_changes > MAX_CHANGES)
1116 cluster_heartbeat(); // flush now
1117
1118 return 1;
1119 }
1120
1121 // A particular session has been changed!
1122 int cluster_send_session(int sid)
1123 {
1124 if (!config->cluster_iam_master) {
1125 LOG(0, sid, 0, "I'm not a master, but I just tried to change a session!\n");
1126 return -1;
1127 }
1128
1129 if (forked) {
1130 LOG(0, sid, 0, "cluster_send_session called from child process!\n");
1131 return -1;
1132 }
1133
1134 return type_changed(C_CSESSION, sid);
1135 }
1136
1137 // A particular bundle has been changed!
1138 int cluster_send_bundle(int bid)
1139 {
1140 if (!config->cluster_iam_master) {
1141 LOG(0, 0, bid, "I'm not a master, but I just tried to change a bundle!\n");
1142 return -1;
1143 }
1144
1145 return type_changed(C_CBUNDLE, bid);
1146 }
1147
1148 // A particular groupe has been changed!
1149 int cluster_send_groupe(int gid)
1150 {
1151 if (!config->cluster_iam_master)
1152 {
1153 LOG(0, 0, gid, "I'm not a master, but I just tried to change a groupe!\n");
1154 return -1;
1155 }
1156
1157 return type_changed(C_CGROUPE, gid);
1158 }
1159
1160 // A particular tunnel has been changed!
1161 int cluster_send_tunnel(int tid)
1162 {
1163 if (!config->cluster_iam_master) {
1164 LOG(0, 0, tid, "I'm not a master, but I just tried to change a tunnel!\n");
1165 return -1;
1166 }
1167
1168 return type_changed(C_CTUNNEL, tid);
1169 }
1170
1171
1172 //
1173 // We're a master, and a slave has just told us that it's
1174 // missed a packet. We'll resend it every packet since
1175 // the last one it's seen.
1176 //
1177 static int cluster_catchup_slave(int seq, in_addr_t slave)
1178 {
1179 int s;
1180 int diff;
1181
1182 LOG(1, 0, 0, "Slave %s sent LASTSEEN with seq %d\n", fmtaddr(slave, 0), seq);
1183 if (!config->cluster_iam_master) {
1184 LOG(1, 0, 0, "Got LASTSEEN but I'm not a master! Redirecting it to %s.\n",
1185 fmtaddr(config->cluster_master_address, 0));
1186
1187 peer_send_message(slave, C_MASTER, config->cluster_master_address, NULL, 0);
1188 return 0;
1189 }
1190
1191 diff = config->cluster_seq_number - seq; // How many packet do we need to send?
1192 if (diff < 0)
1193 diff += HB_MAX_SEQ;
1194
1195 if (diff >= HB_HISTORY_SIZE) { // Ouch. We don't have the packet to send it!
1196 LOG(0, 0, 0, "A slave asked for message %d when our seq number is %d. Killing it.\n",
1197 seq, config->cluster_seq_number);
1198 return peer_send_message(slave, C_KILL, seq, NULL, 0);// Kill the slave. Nothing else to do.
1199 }
1200
1201 LOG(1, 0, 0, "Sending %d catchup packets to slave %s\n", diff, fmtaddr(slave, 0) );
1202
1203 // Now resend every packet that it missed, in order.
1204 while (seq != config->cluster_seq_number) {
1205 s = seq % HB_HISTORY_SIZE;
1206 if (seq != past_hearts[s].seq) {
1207 LOG(0, 0, 0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
1208 fmtaddr(slave, 0), seq, past_hearts[s].seq, s, config->cluster_seq_number);
1209 return -1; // What to do here!?
1210 }
1211 peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
1212 seq = (seq+1)%HB_MAX_SEQ; // Increment to next seq number.
1213 }
1214 return 0; // All good!
1215 }
1216
1217 //
1218 // We've heard from another peer! Add it to the list
1219 // that we select from at election time.
1220 //
1221 static int cluster_add_peer(in_addr_t peer, time_t basetime, pingt *pp, int size)
1222 {
1223 int i;
1224 in_addr_t clusterid;
1225 pingt p;
1226
1227 // Allow for backward compatability.
1228 // Just the ping packet into a new structure to allow
1229 // for the possibility that we might have received
1230 // more or fewer elements than we were expecting.
1231 if (size > sizeof(p))
1232 size = sizeof(p);
1233
1234 memset( (void *) &p, 0, sizeof(p) );
1235 memcpy( (void *) &p, (void *) pp, size);
1236
1237 clusterid = p.addr;
1238 if (clusterid != config->bind_address)
1239 {
1240 // Is this for us?
1241 LOG(4, 0, 0, "Skipping ping from %s (different cluster)\n", fmtaddr(peer, 0));
1242 return 0;
1243 }
1244
1245 for (i = 0; i < num_peers ; ++i)
1246 {
1247 if (peers[i].peer != peer)
1248 continue;
1249
1250 // This peer already exists. Just update the timestamp.
1251 peers[i].basetime = basetime;
1252 peers[i].timestamp = TIME;
1253 peers[i].uptodate = !p.undef;
1254 break;
1255 }
1256
1257 // Is this the master shutting down??
1258 if (peer == config->cluster_master_address) {
1259 LOG(3, 0, 0, "Master %s %s\n", fmtaddr(config->cluster_master_address, 0),
1260 basetime ? "has restarted!" : "shutting down...");
1261
1262 config->cluster_master_address = 0;
1263 config->cluster_last_hb = 0; // Force an election.
1264 cluster_check_master();
1265 }
1266
1267 if (i >= num_peers)
1268 {
1269 LOG(4, 0, 0, "Adding %s as a peer\n", fmtaddr(peer, 0));
1270
1271 // Not found. Is there a stale slot to re-use?
1272 for (i = 0; i < num_peers ; ++i)
1273 {
1274 if (!peers[i].basetime) // Shutdown
1275 break;
1276
1277 if ((peers[i].timestamp + config->cluster_hb_timeout * 10) < TIME) // Stale.
1278 break;
1279 }
1280
1281 if (i >= CLUSTER_MAX_SIZE)
1282 {
1283 // Too many peers!!
1284 LOG(0, 0, 0, "Tried to add %s as a peer, but I already have %d of them!\n", fmtaddr(peer, 0), i);
1285 return -1;
1286 }
1287
1288 peers[i].peer = peer;
1289 peers[i].basetime = basetime;
1290 peers[i].timestamp = TIME;
1291 peers[i].uptodate = !p.undef;
1292 if (i == num_peers)
1293 ++num_peers;
1294
1295 LOG(1, 0, 0, "Added %s as a new peer. Now %d peers\n", fmtaddr(peer, 0), num_peers);
1296 }
1297
1298 return 1;
1299 }
1300
1301 // A slave responds with C_MASTER when it gets a message which should have gone to a master.
1302 static int cluster_set_master(in_addr_t peer, in_addr_t master)
1303 {
1304 if (config->cluster_iam_master) // Sanity...
1305 return 0;
1306
1307 LOG(3, 0, 0, "Peer %s set the master to %s...\n", fmtaddr(peer, 0),
1308 fmtaddr(master, 1));
1309
1310 config->cluster_master_address = master;
1311 if (master)
1312 {
1313 // catchup with new master
1314 peer_send_message(master, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1315
1316 // delay next election
1317 config->cluster_last_hb = TIME;
1318 }
1319
1320 // run election (or reset "probed" if master was set)
1321 cluster_check_master();
1322 return 0;
1323 }
1324
1325 /* Handle the slave updating the byte counters for the master. */
1326 //
1327 // Note that we don't mark the session as dirty; We rely on
1328 // the slow table walk to propogate this back out to the slaves.
1329 //
1330 static int cluster_handle_bytes(uint8_t *data, int size)
1331 {
1332 bytest *b;
1333
1334 b = (bytest *) data;
1335
1336 LOG(3, 0, 0, "Got byte counter update (size %d)\n", size);
1337
1338 /* Loop around, adding the byte
1339 counts to each of the sessions. */
1340
1341 while (size >= sizeof(*b) ) {
1342 if (b->sid > MAXSESSION) {
1343 LOG(0, 0, 0, "Got C_BYTES with session #%d!\n", b->sid);
1344 return -1; /* Abort processing */
1345 }
1346
1347 session[b->sid].pin += b->pin;
1348 session[b->sid].pout += b->pout;
1349
1350 increment_counter(&session[b->sid].cin, &session[b->sid].cin_wrap, b->cin);
1351 increment_counter(&session[b->sid].cout, &session[b->sid].cout_wrap, b->cout);
1352
1353 session[b->sid].cin_delta += b->cin;
1354 session[b->sid].cout_delta += b->cout;
1355
1356 if (b->cin)
1357 session[b->sid].last_packet = session[b->sid].last_data = time_now;
1358 else if (b->cout)
1359 session[b->sid].last_data = time_now;
1360
1361 size -= sizeof(*b);
1362 ++b;
1363 }
1364
1365 if (size != 0)
1366 LOG(0, 0, 0, "Got C_BYTES with %d bytes of trailing junk!\n", size);
1367
1368 return size;
1369 }
1370
1371 //
1372 // Handle receiving a session structure in a heartbeat packet.
1373 //
1374 static int cluster_recv_session(int more, uint8_t *p)
1375 {
1376 if (more >= MAXSESSION) {
1377 LOG(0, 0, 0, "DANGER: Received a heartbeat session id > MAXSESSION!\n");
1378 return -1;
1379 }
1380
1381 if (session[more].tunnel == T_UNDEF) {
1382 if (config->cluster_iam_uptodate) { // Sanity.
1383 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined session!\n");
1384 } else {
1385 --config->cluster_undefined_sessions;
1386 }
1387 }
1388
1389 load_session(more, (sessiont *) p); // Copy session into session table..
1390
1391 LOG(5, more, 0, "Received session update (%d undef)\n", config->cluster_undefined_sessions);
1392
1393 if (!config->cluster_iam_uptodate)
1394 cluster_uptodate(); // Check to see if we're up to date.
1395
1396 return 0;
1397 }
1398
1399 static int cluster_recv_bundle(int more, uint8_t *p)
1400 {
1401 if (more >= MAXBUNDLE) {
1402 LOG(0, 0, 0, "DANGER: Received a bundle id > MAXBUNDLE!\n");
1403 return -1;
1404 }
1405
1406 if (bundle[more].state == BUNDLEUNDEF) {
1407 if (config->cluster_iam_uptodate) { // Sanity.
1408 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined bundle!\n");
1409 } else {
1410 --config->cluster_undefined_bundles;
1411 }
1412 }
1413
1414 memcpy(&bundle[more], p, sizeof(bundle[more]) );
1415
1416 LOG(5, 0, more, "Received bundle update\n");
1417
1418 if (!config->cluster_iam_uptodate)
1419 cluster_uptodate(); // Check to see if we're up to date.
1420
1421 return 0;
1422 }
1423
1424 static int cluster_recv_groupe(int more, uint8_t *p)
1425 {
1426 if (more >= MAXGROUPE) {
1427 LOG(0, 0, 0, "DANGER: Received a group id > MAXGROUPE!\n");
1428 return -1;
1429 }
1430
1431 if (grpsession[more].state == GROUPEUNDEF) {
1432 if (config->cluster_iam_uptodate) { // Sanity.
1433 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined group!\n");
1434 } else {
1435 --config->cluster_undefined_groupes;
1436 }
1437 }
1438
1439 grp_cluster_load_groupe(more, (groupsesst *) p); // Copy groupe into groupe table..
1440
1441 LOG(5, 0, more, "Received group update (%d undef)\n", config->cluster_undefined_groupes);
1442
1443 if (!config->cluster_iam_uptodate)
1444 cluster_uptodate(); // Check to see if we're up to date.
1445
1446 return 0;
1447 }
1448
1449 static int cluster_recv_tunnel(int more, uint8_t *p)
1450 {
1451 if (more >= MAXTUNNEL) {
1452 LOG(0, 0, 0, "DANGER: Received a tunnel session id > MAXTUNNEL!\n");
1453 return -1;
1454 }
1455
1456 if (tunnel[more].state == TUNNELUNDEF) {
1457 if (config->cluster_iam_uptodate) { // Sanity.
1458 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined tunnel!\n");
1459 } else {
1460 --config->cluster_undefined_tunnels;
1461 }
1462 }
1463
1464 memcpy(&tunnel[more], p, sizeof(tunnel[more]) );
1465
1466 //
1467 // Clear tunnel control messages. These are dynamically allocated.
1468 // If we get unlucky, this may cause the tunnel to drop!
1469 //
1470 tunnel[more].controls = tunnel[more].controle = NULL;
1471 tunnel[more].controlc = 0;
1472
1473 LOG(5, 0, more, "Received tunnel update\n");
1474
1475 if (!config->cluster_iam_uptodate)
1476 cluster_uptodate(); // Check to see if we're up to date.
1477
1478 return 0;
1479 }
1480
1481
1482 // pre v6 heartbeat session structure
1483 struct oldsession {
1484 sessionidt next;
1485 sessionidt far;
1486 tunnelidt tunnel;
1487 uint8_t flags;
1488 struct {
1489 uint8_t phase;
1490 uint8_t lcp:4;
1491 uint8_t ipcp:4;
1492 uint8_t ipv6cp:4;
1493 uint8_t ccp:4;
1494 } ppp;
1495 char reserved_1[2];
1496 in_addr_t ip;
1497 int ip_pool_index;
1498 uint32_t unique_id;
1499 char reserved_2[4];
1500 uint32_t magic;
1501 uint32_t pin, pout;
1502 uint32_t cin, cout;
1503 uint32_t cin_wrap, cout_wrap;
1504 uint32_t cin_delta, cout_delta;
1505 uint16_t throttle_in;
1506 uint16_t throttle_out;
1507 uint8_t filter_in;
1508 uint8_t filter_out;
1509 uint16_t mru;
1510 clockt opened;
1511 clockt die;
1512 uint32_t session_timeout;
1513 uint32_t idle_timeout;
1514 time_t last_packet;
1515 time_t last_data;
1516 in_addr_t dns1, dns2;
1517 routet route[MAXROUTE];
1518 uint16_t tbf_in;
1519 uint16_t tbf_out;
1520 int random_vector_length;
1521 uint8_t random_vector[MAXTEL];
1522 char user[MAXUSER];
1523 char called[MAXTEL];
1524 char calling[MAXTEL];
1525 uint32_t tx_connect_speed;
1526 uint32_t rx_connect_speed;
1527 clockt timeout;
1528 uint32_t mrru;
1529 uint8_t mssf;
1530 epdist epdis;
1531 bundleidt bundle;
1532 in_addr_t snoop_ip;
1533 uint16_t snoop_port;
1534 uint8_t walled_garden;
1535 uint8_t ipv6prefixlen;
1536 struct in6_addr ipv6route;
1537 char reserved_3[11];
1538 };
1539
1540 static uint8_t *convert_session(struct oldsession *old)
1541 {
1542 static sessiont new;
1543 int i;
1544
1545 memset(&new, 0, sizeof(new));
1546
1547 new.next = old->next;
1548 new.far = old->far;
1549 new.tunnel = old->tunnel;
1550 new.flags = old->flags;
1551 new.ppp.phase = old->ppp.phase;
1552 new.ppp.lcp = old->ppp.lcp;
1553 new.ppp.ipcp = old->ppp.ipcp;
1554 new.ppp.ipv6cp = old->ppp.ipv6cp;
1555 new.ppp.ccp = old->ppp.ccp;
1556 new.ip = old->ip;
1557 new.ip_pool_index = old->ip_pool_index;
1558 new.unique_id = old->unique_id;
1559 new.magic = old->magic;
1560 new.pin = old->pin;
1561 new.pout = old->pout;
1562 new.cin = old->cin;
1563 new.cout = old->cout;
1564 new.cin_wrap = old->cin_wrap;
1565 new.cout_wrap = old->cout_wrap;
1566 new.cin_delta = old->cin_delta;
1567 new.cout_delta = old->cout_delta;
1568 new.throttle_in = old->throttle_in;
1569 new.throttle_out = old->throttle_out;
1570 new.filter_in = old->filter_in;
1571 new.filter_out = old->filter_out;
1572 new.mru = old->mru;
1573 new.opened = old->opened;
1574 new.die = old->die;
1575 new.session_timeout = old->session_timeout;
1576 new.idle_timeout = old->idle_timeout;
1577 new.last_packet = old->last_packet;
1578 new.last_data = old->last_data;
1579 new.dns1 = old->dns1;
1580 new.dns2 = old->dns2;
1581 new.tbf_in = old->tbf_in;
1582 new.tbf_out = old->tbf_out;
1583 new.random_vector_length = old->random_vector_length;
1584 new.tx_connect_speed = old->tx_connect_speed;
1585 new.rx_connect_speed = old->rx_connect_speed;
1586 new.timeout = old->timeout;
1587 new.mrru = old->mrru;
1588 new.mssf = old->mssf;
1589 new.epdis = old->epdis;
1590 new.bundle = old->bundle;
1591 new.snoop_ip = old->snoop_ip;
1592 new.snoop_port = old->snoop_port;
1593 new.walled_garden = old->walled_garden;
1594 new.ipv6prefixlen = old->ipv6prefixlen;
1595 new.ipv6route = old->ipv6route;
1596
1597 memcpy(new.random_vector, old->random_vector, sizeof(new.random_vector));
1598 memcpy(new.user, old->user, sizeof(new.user));
1599 memcpy(new.called, old->called, sizeof(new.called));
1600 memcpy(new.calling, old->calling, sizeof(new.calling));
1601
1602 for (i = 0; i < MAXROUTE; i++)
1603 memcpy(&new.route[i], &old->route[i], sizeof(new.route[i]));
1604
1605 return (uint8_t *) &new;
1606 }
1607
1608 //
1609 // Process a heartbeat..
1610 //
1611 // v6: added RADIUS class attribute, re-ordered session structure
1612 // v7: added tunnelt attribute at the end of struct (tunnelt size change)
1613 static int cluster_process_heartbeat(uint8_t *data, int size, int more, uint8_t *p, in_addr_t addr)
1614 {
1615 heartt *h;
1616 int s = size - (p-data);
1617 int i, type;
1618 int hb_ver = more;
1619
1620 #if HB_VERSION != 7
1621 # error "need to update cluster_process_heartbeat()"
1622 #endif
1623
1624 // we handle versions 5 through 7
1625 if (hb_ver < 5 || hb_ver > HB_VERSION) {
1626 LOG(0, 0, 0, "Received a heartbeat version that I don't support (%d)!\n", hb_ver);
1627 return -1; // Ignore it??
1628 }
1629
1630 if (size > sizeof(past_hearts[0].data)) {
1631 LOG(0, 0, 0, "Received an oversize heartbeat from %s (%d)!\n", fmtaddr(addr, 0), size);
1632 return -1;
1633 }
1634
1635 if (s < sizeof(*h))
1636 goto shortpacket;
1637
1638 h = (heartt *) p;
1639 p += sizeof(*h);
1640 s -= sizeof(*h);
1641
1642 if (h->clusterid != config->bind_address)
1643 return -1; // It's not part of our cluster.
1644
1645 if (config->cluster_iam_master) { // Sanity...
1646 // Note that this MUST match the election process above!
1647
1648 LOG(0, 0, 0, "I just got a heartbeat from master %s, but _I_ am the master!\n", fmtaddr(addr, 0));
1649 if (!h->basetime) {
1650 LOG(0, 0, 0, "Heartbeat with zero basetime! Ignoring\n");
1651 return -1; // Skip it.
1652 }
1653
1654 if (h->table_version > config->cluster_table_version) {
1655 LOG(0, 0, 0, "They've seen more state changes (%" PRIu64 " vs my %" PRIu64 ") so I'm gone!\n",
1656 h->table_version, config->cluster_table_version);
1657
1658 kill(0, SIGTERM);
1659 exit(1);
1660 }
1661
1662 if (h->table_version < config->cluster_table_version)
1663 return -1;
1664
1665 if (basetime > h->basetime) {
1666 LOG(0, 0, 0, "They're an older master than me so I'm gone!\n");
1667 kill(0, SIGTERM);
1668 exit(1);
1669 }
1670
1671 if (basetime < h->basetime)
1672 return -1;
1673
1674 if (my_address < addr) { // Tie breaker.
1675 LOG(0, 0, 0, "They're a higher IP address than me, so I'm gone!\n");
1676 kill(0, SIGTERM);
1677 exit(1);
1678 }
1679
1680 //
1681 // Send it a unicast heartbeat to see give it a chance to die.
1682 // NOTE: It's actually safe to do seq-number - 1 without checking
1683 // for wrap around.
1684 //
1685 cluster_catchup_slave(config->cluster_seq_number - 1, addr);
1686
1687 return -1; // Skip it.
1688 }
1689
1690 //
1691 // Try and guard against a stray master appearing.
1692 //
1693 // Ignore heartbeats received from another master before the
1694 // timeout (less a smidgen) for the old master has elapsed.
1695 //
1696 // Note that after a clean failover, the cluster_master_address
1697 // is cleared, so this doesn't run.
1698 //
1699 if (config->cluster_master_address && addr != config->cluster_master_address) {
1700 LOG(0, 0, 0, "Ignoring stray heartbeat from %s, current master %s has not yet timed out (last heartbeat %.1f seconds ago).\n",
1701 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1),
1702 0.1 * (TIME - config->cluster_last_hb));
1703 return -1; // ignore
1704 }
1705
1706 if (config->cluster_seq_number == -1) // Don't have one. Just align to the master...
1707 config->cluster_seq_number = h->seq;
1708
1709 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1710 config->cluster_last_hb_ver = hb_ver; // remember what cluster version the master is using
1711
1712 if (config->cluster_seq_number != h->seq) { // Out of sequence heartbeat!
1713 static int lastseen_seq = 0;
1714 static time_t lastseen_time = 0;
1715
1716 // limit to once per second for a particular seq#
1717 int ask = (config->cluster_seq_number != lastseen_seq || time_now != lastseen_time);
1718
1719 LOG(1, 0, 0, "HB: Got seq# %d but was expecting %d. %s.\n",
1720 h->seq, config->cluster_seq_number,
1721 ask ? "Asking for resend" : "Ignoring");
1722
1723 if (ask)
1724 {
1725 lastseen_seq = config->cluster_seq_number;
1726 lastseen_time = time_now;
1727 peer_send_message(addr, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1728 }
1729
1730 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1731
1732 // Just drop the packet. The master will resend it as part of the catchup.
1733
1734 return 0;
1735 }
1736 // Save the packet in our buffer.
1737 // This is needed in case we become the master.
1738 config->cluster_seq_number = (h->seq+1)%HB_MAX_SEQ;
1739 i = h->seq % HB_HISTORY_SIZE;
1740 past_hearts[i].seq = h->seq;
1741 past_hearts[i].size = size;
1742 memcpy(&past_hearts[i].data, data, size); // Save it.
1743
1744
1745 // Check that we don't have too many undefined sessions, and
1746 // that the free session pointer is correct.
1747 gnextgrpid = h->nextgrpid;
1748 cluster_check_sessions(h->highsession, h->freesession, h->highbundle, h->hightunnel, h->highgroupe);
1749
1750 if (h->interval != config->cluster_hb_interval)
1751 {
1752 LOG(2, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
1753 h->interval, config->cluster_hb_interval);
1754
1755 config->cluster_hb_interval = h->interval;
1756 }
1757
1758 if (h->timeout != config->cluster_hb_timeout)
1759 {
1760 LOG(2, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
1761 h->timeout, config->cluster_hb_timeout);
1762
1763 config->cluster_hb_timeout = h->timeout;
1764 }
1765
1766 // Ok. process the packet...
1767 while ( s > 0) {
1768
1769 type = *((uint32_t *) p);
1770 p += sizeof(uint32_t);
1771 s -= sizeof(uint32_t);
1772
1773 more = *((uint32_t *) p);
1774 p += sizeof(uint32_t);
1775 s -= sizeof(uint32_t);
1776
1777 switch (type) {
1778 case C_CSESSION: { // Compressed session structure.
1779 uint8_t c[ sizeof(sessiont) + 2];
1780 int size;
1781 uint8_t *orig_p = p;
1782
1783 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c) );
1784 s -= (p - orig_p);
1785
1786 // session struct changed with v5
1787 if (hb_ver < 6)
1788 {
1789 if (size != sizeof(struct oldsession)) {
1790 LOG(0, 0, 0, "DANGER: Received a v%d CSESSION that didn't decompress correctly!\n", hb_ver);
1791 // Now what? Should exit! No-longer up to date!
1792 break;
1793 }
1794 cluster_recv_session(more, convert_session((struct oldsession *) c));
1795 break;
1796 }
1797
1798 if (size != sizeof(sessiont) ) { // Ouch! Very very bad!
1799 LOG(0, 0, 0, "DANGER: Received a CSESSION that didn't decompress correctly!\n");
1800 // Now what? Should exit! No-longer up to date!
1801 break;
1802 }
1803
1804 cluster_recv_session(more, c);
1805 break;
1806 }
1807 case C_SESSION:
1808 if (hb_ver < 6)
1809 {
1810 if (s < sizeof(struct oldsession))
1811 goto shortpacket;
1812
1813 cluster_recv_session(more, convert_session((struct oldsession *) p));
1814
1815 p += sizeof(struct oldsession);
1816 s -= sizeof(struct oldsession);
1817 break;
1818 }
1819
1820 if ( s < sizeof(session[more]))
1821 goto shortpacket;
1822
1823 cluster_recv_session(more, p);
1824
1825 p += sizeof(session[more]);
1826 s -= sizeof(session[more]);
1827 break;
1828
1829 case C_CTUNNEL: { // Compressed tunnel structure.
1830 uint8_t c[ sizeof(tunnelt) + 2];
1831 int size;
1832 uint8_t *orig_p = p;
1833
1834 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1835 s -= (p - orig_p);
1836
1837 if ( ((hb_ver >= HB_VERSION) && (size != sizeof(tunnelt))) ||
1838 ((hb_ver < HB_VERSION) && (size > sizeof(tunnelt))) )
1839 { // Ouch! Very very bad!
1840 LOG(0, 0, 0, "DANGER: Received a CTUNNEL that didn't decompress correctly!\n");
1841 // Now what? Should exit! No-longer up to date!
1842 break;
1843 }
1844
1845 cluster_recv_tunnel(more, c);
1846 break;
1847
1848 }
1849 case C_TUNNEL:
1850 if ( s < sizeof(tunnel[more]))
1851 goto shortpacket;
1852
1853 cluster_recv_tunnel(more, p);
1854
1855 p += sizeof(tunnel[more]);
1856 s -= sizeof(tunnel[more]);
1857 break;
1858
1859 case C_CBUNDLE: { // Compressed bundle structure.
1860 uint8_t c[ sizeof(bundlet) + 2];
1861 int size;
1862 uint8_t *orig_p = p;
1863
1864 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1865 s -= (p - orig_p);
1866
1867 if (size != sizeof(bundlet) ) { // Ouch! Very very bad!
1868 LOG(0, 0, 0, "DANGER: Received a CBUNDLE that didn't decompress correctly!\n");
1869 // Now what? Should exit! No-longer up to date!
1870 break;
1871 }
1872
1873 cluster_recv_bundle(more, c);
1874 break;
1875
1876 }
1877 case C_BUNDLE:
1878 if ( s < sizeof(bundle[more]))
1879 goto shortpacket;
1880
1881 cluster_recv_bundle(more, p);
1882
1883 p += sizeof(bundle[more]);
1884 s -= sizeof(bundle[more]);
1885 break;
1886
1887 case C_CGROUPE:
1888 { // Compressed Groupe structure.
1889 uint8_t c[ sizeof(groupsesst) + 2];
1890 int size;
1891 uint8_t *orig_p = p;
1892
1893 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1894 s -= (p - orig_p);
1895
1896 if (size != sizeof(groupsesst) )
1897 { // Ouch! Very very bad!
1898 LOG(0, 0, 0, "DANGER: Received a C_CGROUPE that didn't decompress correctly!\n");
1899 // Now what? Should exit! No-longer up to date!
1900 break;
1901 }
1902
1903 cluster_recv_groupe(more, c);
1904 break;
1905 }
1906 case C_GROUPE:
1907 if ( s < sizeof(grpsession[more]))
1908 goto shortpacket;
1909
1910 cluster_recv_groupe(more, p);
1911
1912 p += sizeof(grpsession[more]);
1913 s -= sizeof(grpsession[more]);
1914 break;
1915
1916 default:
1917 LOG(0, 0, 0, "DANGER: I received a heartbeat element where I didn't understand the type! (%d)\n", type);
1918 return -1; // can't process any more of the packet!!
1919 }
1920 }
1921
1922 if (config->cluster_master_address != addr)
1923 {
1924 LOG(0, 0, 0, "My master just changed from %s to %s!\n",
1925 fmtaddr(config->cluster_master_address, 0), fmtaddr(addr, 1));
1926
1927 config->cluster_master_address = addr;
1928 }
1929
1930 config->cluster_last_hb = TIME; // Successfully received a heartbeat!
1931 config->cluster_table_version = h->table_version;
1932 return 0;
1933
1934 shortpacket:
1935 LOG(0, 0, 0, "I got an incomplete heartbeat packet! This means I'm probably out of sync!!\n");
1936 return -1;
1937 }
1938
1939 //
1940 // We got a packet on the cluster port!
1941 // Handle pings, lastseens, and heartbeats!
1942 //
1943 int processcluster(uint8_t *data, int size, in_addr_t addr)
1944 {
1945 int type, more;
1946 uint8_t *p = data;
1947 int s = size;
1948
1949 if (addr == my_address)
1950 return -1; // Ignore it. Something looped back the multicast!
1951
1952 LOG(5, 0, 0, "Process cluster: %d bytes from %s\n", size, fmtaddr(addr, 0));
1953
1954 if (s <= 0) // Any data there??
1955 return -1;
1956
1957 if (s < 8)
1958 goto shortpacket;
1959
1960 type = *((uint32_t *) p);
1961 p += sizeof(uint32_t);
1962 s -= sizeof(uint32_t);
1963
1964 more = *((uint32_t *) p);
1965 p += sizeof(uint32_t);
1966 s -= sizeof(uint32_t);
1967
1968 switch (type)
1969 {
1970 case C_PING: // Update the peers table.
1971 return cluster_add_peer(addr, more, (pingt *) p, s);
1972
1973 case C_MASTER: // Our master is wrong
1974 return cluster_set_master(addr, more);
1975
1976 case C_LASTSEEN: // Catch up a slave (slave missed a packet).
1977 return cluster_catchup_slave(more, addr);
1978
1979 case C_FORWARD: // Forwarded control packet. pass off to processudp.
1980 case C_FORWARD_DAE: // Forwarded DAE packet. pass off to processdae.
1981 if (!config->cluster_iam_master)
1982 {
1983 LOG(0, 0, 0, "I'm not the master, but I got a C_FORWARD%s from %s?\n",
1984 type == C_FORWARD_DAE ? "_DAE" : "", fmtaddr(addr, 0));
1985
1986 return -1;
1987 }
1988 else
1989 {
1990 struct sockaddr_in a;
1991 uint16_t indexudp;
1992 a.sin_addr.s_addr = more;
1993
1994 a.sin_port = (*(int *) p) & 0xFFFF;
1995 indexudp = ((*(int *) p) >> 16) & 0xFFFF;
1996 s -= sizeof(int);
1997 p += sizeof(int);
1998
1999 LOG(4, 0, 0, "Got a forwarded %spacket... (%s:%d)\n",
2000 type == C_FORWARD_DAE ? "DAE " : "", fmtaddr(more, 0), a.sin_port);
2001
2002 STAT(recv_forward);
2003 if (type == C_FORWARD_DAE)
2004 {
2005 struct in_addr local;
2006 local.s_addr = config->bind_address ? config->bind_address : my_address;
2007 processdae(p, s, &a, sizeof(a), &local);
2008 }
2009 else
2010 processudp(p, s, &a, indexudp);
2011
2012 return 0;
2013 }
2014 case C_PPPOE_FORWARD:
2015 if (!config->cluster_iam_master)
2016 {
2017 LOG(0, 0, 0, "I'm not the master, but I got a C_PPPOE_FORWARD from %s?\n", fmtaddr(addr, 0));
2018 return -1;
2019 }
2020 else
2021 {
2022 pppoe_process_forward(p, s, addr);
2023 return 0;
2024 }
2025
2026 case C_MPPP_FORWARD:
2027 // Receive a MPPP packet from a slave.
2028 if (!config->cluster_iam_master) {
2029 LOG(0, 0, 0, "I'm not the master, but I got a C_MPPP_FORWARD from %s?\n", fmtaddr(addr, 0));
2030 return -1;
2031 }
2032
2033 processipout(p, s);
2034 return 0;
2035
2036 case C_THROTTLE: { // Receive a forwarded packet from a slave.
2037 if (!config->cluster_iam_master) {
2038 LOG(0, 0, 0, "I'm not the master, but I got a C_THROTTLE from %s?\n", fmtaddr(addr, 0));
2039 return -1;
2040 }
2041
2042 tbf_queue_packet(more, p, s); // The TBF id tells wether it goes in or out.
2043 return 0;
2044 }
2045 case C_GARDEN:
2046 // Receive a walled garden packet from a slave.
2047 if (!config->cluster_iam_master) {
2048 LOG(0, 0, 0, "I'm not the master, but I got a C_GARDEN from %s?\n", fmtaddr(addr, 0));
2049 return -1;
2050 }
2051
2052 tun_write(p, s);
2053 return 0;
2054
2055 case C_BYTES:
2056 if (!config->cluster_iam_master) {
2057 LOG(0, 0, 0, "I'm not the master, but I got a C_BYTES from %s?\n", fmtaddr(addr, 0));
2058 return -1;
2059 }
2060
2061 return cluster_handle_bytes(p, s);
2062
2063 case C_KILL: // The master asked us to die!? (usually because we're too out of date).
2064 if (config->cluster_iam_master) {
2065 LOG(0, 0, 0, "_I_ am master, but I received a C_KILL from %s! (Seq# %d)\n", fmtaddr(addr, 0), more);
2066 return -1;
2067 }
2068 if (more != config->cluster_seq_number) {
2069 LOG(0, 0, 0, "The master asked us to die but the seq number didn't match!?\n");
2070 return -1;
2071 }
2072
2073 if (addr != config->cluster_master_address) {
2074 LOG(0, 0, 0, "Received a C_KILL from %s which doesn't match config->cluster_master_address (%s)\n",
2075 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1));
2076 // We can only warn about it. The master might really have switched!
2077 }
2078
2079 LOG(0, 0, 0, "Received a valid C_KILL: I'm going to die now.\n");
2080 kill(0, SIGTERM);
2081 exit(0); // Lets be paranoid;
2082 return -1; // Just signalling the compiler.
2083
2084 case C_HEARTBEAT:
2085 LOG(4, 0, 0, "Got a heartbeat from %s\n", fmtaddr(addr, 0));
2086 return cluster_process_heartbeat(data, size, more, p, addr);
2087
2088 default:
2089 LOG(0, 0, 0, "Strange type packet received on cluster socket (%d)\n", type);
2090 return -1;
2091 }
2092 return 0;
2093
2094 shortpacket:
2095 LOG(0, 0, 0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
2096 return -1;
2097 }
2098
2099 //====================================================================================================
2100
2101 int cmd_show_cluster(struct cli_def *cli, const char *command, char **argv, int argc)
2102 {
2103 int i;
2104
2105 if (CLI_HELP_REQUESTED)
2106 return CLI_HELP_NO_ARGS;
2107
2108 cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
2109 cli_print(cli, "My address : %s", fmtaddr(my_address, 0));
2110 cli_print(cli, "VIP address : %s", fmtaddr(config->bind_address, 0));
2111 cli_print(cli, "Multicast address: %s", fmtaddr(config->cluster_address, 0));
2112 cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
2113
2114 if (!config->cluster_iam_master) {
2115 cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
2116 config->cluster_master_address
2117 ? fmtaddr(config->cluster_master_address, 0)
2118 : "Not defined",
2119 0.1 * (TIME - config->cluster_last_hb));
2120 cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
2121 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
2122 cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
2123 cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
2124 cli_print(cli, "%d bundles undefined of %d", config->cluster_undefined_bundles, config->cluster_highest_bundleid);
2125 cli_print(cli, "%d groupes undefined of %d", config->cluster_undefined_groupes, config->cluster_highest_groupeid);
2126 cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);
2127 } else {
2128 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
2129 cli_print(cli, "Next heartbeat # : %d", config->cluster_seq_number);
2130 cli_print(cli, "Highest session : %d", config->cluster_highest_sessionid);
2131 cli_print(cli, "Highest bundle : %d", config->cluster_highest_bundleid);
2132 cli_print(cli, "Highest groupe : %d", config->cluster_highest_groupeid);
2133 cli_print(cli, "Highest tunnel : %d", config->cluster_highest_tunnelid);
2134 cli_print(cli, "%d changes queued for sending", config->cluster_num_changes);
2135 }
2136 cli_print(cli, "%d peers.", num_peers);
2137
2138 if (num_peers)
2139 cli_print(cli, "%20s %10s %8s", "Address", "Basetime", "Age");
2140 for (i = 0; i < num_peers; ++i) {
2141 cli_print(cli, "%20s %10u %8d", fmtaddr(peers[i].peer, 0),
2142 peers[i].basetime, TIME - peers[i].timestamp);
2143 }
2144 return CLI_OK;
2145 }
2146
2147 //
2148 // Simple run-length-encoding compression.
2149 // Format is
2150 // 1 byte < 128 = count of non-zero bytes following. // Not legal to be zero.
2151 // n non-zero bytes;
2152 // or
2153 // 1 byte > 128 = (count - 128) run of zero bytes. //
2154 // repeat.
2155 // count == 0 indicates end of compressed stream.
2156 //
2157 // Compress from 'src' into 'dst'. return number of bytes
2158 // used from 'dst'.
2159 // Updates *src_p to indicate 1 past last bytes used.
2160 //
2161 // We could get an extra byte in the zero runs by storing (count-1)
2162 // but I'm playing it safe.
2163 //
2164 // Worst case is a 50% expansion in space required (trying to
2165 // compress { 0x00, 0x01 } * N )
2166 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2167 {
2168 int count;
2169 int orig_dsize = dsize;
2170 uint8_t *x, *src;
2171 src = *src_p;
2172
2173 while (ssize > 0 && dsize > 2) {
2174 count = 0;
2175 x = dst++; --dsize; // Reserve space for count byte..
2176
2177 if (*src) { // Copy a run of non-zero bytes.
2178 while (*src && count < 127 && ssize > 0 && dsize > 1) { // Count number of non-zero bytes.
2179 *dst++ = *src++;
2180 --dsize; --ssize;
2181 ++count;
2182 }
2183 *x = count; // Store number of non-zero bytes. Guarenteed to be non-zero!
2184
2185 } else { // Compress a run of zero bytes.
2186 while (*src == 0 && count < 127 && ssize > 0) {
2187 ++src;
2188 --ssize;
2189 ++count;
2190 }
2191 *x = count | 0x80 ;
2192 }
2193 }
2194
2195 *dst++ = 0x0; // Add Stop byte.
2196 --dsize;
2197
2198 *src_p = src;
2199 return (orig_dsize - dsize);
2200 }
2201
2202 //
2203 // Decompress the buffer into **p.
2204 // 'psize' is the size of the decompression buffer available.
2205 //
2206 // Returns the number of bytes decompressed.
2207 //
2208 // Decompresses from '*src_p' into 'dst'.
2209 // Return the number of dst bytes used.
2210 // Updates the 'src_p' pointer to point to the
2211 // first un-used byte.
2212 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
2213 {
2214 int count;
2215 int orig_dsize = dsize;
2216 uint8_t *src = *src_p;
2217
2218 while (ssize >0 && dsize > 0) { // While there's more to decompress, and there's room in the decompress buffer...
2219 count = *src++; --ssize; // get the count byte from the source.
2220 if (count == 0x0) // End marker reached? If so, finish.
2221 break;
2222
2223 if (count & 0x80) { // Decompress a run of zeros
2224 for (count &= 0x7f ; count > 0 && dsize > 0; --count) {
2225 *dst++ = 0x0;
2226 --dsize;
2227 }
2228 } else { // Copy run of non-zero bytes.
2229 for ( ; count > 0 && ssize && dsize; --count) { // Copy non-zero bytes across.
2230 *dst++ = *src++;
2231 --ssize; --dsize;
2232 }
2233 }
2234 }
2235 *src_p = src;
2236 return (orig_dsize - dsize);
2237 }