e25019612bde16fff260b56cc9693f95baf4555f
[l2tpns.git] / cluster.c
1 // L2TPNS Clustering Stuff
2
3 char const *cvs_id_cluster = "$Id: cluster.c,v 1.53 2006/07/17 07:53:08 bodea Exp $";
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdarg.h>
8 #include <unistd.h>
9 #include <inttypes.h>
10 #include <sys/file.h>
11 #include <sys/stat.h>
12 #include <sys/socket.h>
13 #include <netinet/in.h>
14 #include <arpa/inet.h>
15 #include <sys/ioctl.h>
16 #include <net/if.h>
17 #include <string.h>
18 #include <malloc.h>
19 #include <errno.h>
20 #include <libcli.h>
21
22 #include "l2tpns.h"
23 #include "cluster.h"
24 #include "util.h"
25 #include "tbf.h"
26
27 #ifdef BGP
28 #include "bgp.h"
29 #endif
30 /*
31 * All cluster packets have the same format.
32 *
33 * One or more instances of
34 * a 32 bit 'type' id.
35 * a 32 bit 'extra' data dependant on the 'type'.
36 * zero or more bytes of structure data, dependant on the type.
37 *
38 */
39
40 // Module variables.
41 extern int cluster_sockfd; // The filedescriptor for the cluster communications port.
42
43 in_addr_t my_address = 0; // The network address of my ethernet port.
44 static int walk_session_number = 0; // The next session to send when doing the slow table walk.
45 static int walk_bundle_number = 0; // The next bundle to send when doing the slow table walk.
46 static int walk_tunnel_number = 0; // The next tunnel to send when doing the slow table walk.
47 int forked = 0; // Sanity check: CLI must not diddle with heartbeat table
48
49 #define MAX_HEART_SIZE (8192) // Maximum size of heartbeat packet. Must be less than max IP packet size :)
50 #define MAX_CHANGES (MAX_HEART_SIZE/(sizeof(sessiont) + sizeof(int) ) - 2) // Assumes a session is the biggest type!
51
52 static struct {
53 int type;
54 int id;
55 } cluster_changes[MAX_CHANGES]; // Queue of changed structures that need to go out when next heartbeat.
56
57 static struct {
58 int seq;
59 int size;
60 uint8_t data[MAX_HEART_SIZE];
61 } past_hearts[HB_HISTORY_SIZE]; // Ring buffer of heartbeats that we've recently sent out. Needed so
62 // we can re-transmit if needed.
63
64 static struct {
65 in_addr_t peer;
66 uint32_t basetime;
67 clockt timestamp;
68 int uptodate;
69 } peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
70 static int num_peers; // Number of peers in list.
71
72 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
73 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize);
74
75 //
76 // Create a listening socket
77 //
78 // This joins the cluster multi-cast group.
79 //
80 int cluster_init()
81 {
82 struct sockaddr_in addr;
83 struct sockaddr_in interface_addr;
84 struct ip_mreq mreq;
85 struct ifreq ifr;
86 int opt;
87
88 config->cluster_undefined_sessions = MAXSESSION-1;
89 config->cluster_undefined_bundles = MAXBUNDLE-1;
90 config->cluster_undefined_tunnels = MAXTUNNEL-1;
91
92 if (!config->cluster_address)
93 return 0;
94 if (!*config->cluster_interface)
95 return 0;
96
97 cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
98
99 memset(&addr, 0, sizeof(addr));
100 addr.sin_family = AF_INET;
101 addr.sin_port = htons(CLUSTERPORT);
102 addr.sin_addr.s_addr = INADDR_ANY;
103 setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
104
105 opt = fcntl(cluster_sockfd, F_GETFL, 0);
106 fcntl(cluster_sockfd, F_SETFL, opt | O_NONBLOCK);
107
108 if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
109 {
110 LOG(0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
111 return -1;
112 }
113
114 strcpy(ifr.ifr_name, config->cluster_interface);
115 if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
116 {
117 LOG(0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
118 return -1;
119 }
120
121 memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
122 my_address = interface_addr.sin_addr.s_addr;
123
124 // Join multicast group.
125 mreq.imr_multiaddr.s_addr = config->cluster_address;
126 mreq.imr_interface = interface_addr.sin_addr;
127
128
129 opt = 0; // Turn off multicast loopback.
130 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
131
132 if (config->cluster_mcast_ttl != 1)
133 {
134 uint8_t ttl = 0;
135 if (config->cluster_mcast_ttl > 0)
136 ttl = config->cluster_mcast_ttl < 256 ? config->cluster_mcast_ttl : 255;
137
138 setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl));
139 }
140
141 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
142 {
143 LOG(0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
144 return -1;
145 }
146
147 if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
148 {
149 LOG(0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
150 return -1;
151 }
152
153 config->cluster_last_hb = TIME;
154 config->cluster_seq_number = -1;
155
156 return cluster_sockfd;
157 }
158
159
160 //
161 // Send a chunk of data to the entire cluster (usually via the multicast
162 // address ).
163 //
164
165 static int cluster_send_data(void *data, int datalen)
166 {
167 struct sockaddr_in addr = {0};
168
169 if (!cluster_sockfd) return -1;
170 if (!config->cluster_address) return 0;
171
172 addr.sin_addr.s_addr = config->cluster_address;
173 addr.sin_port = htons(CLUSTERPORT);
174 addr.sin_family = AF_INET;
175
176 LOG(5, 0, 0, "Cluster send data: %d bytes\n", datalen);
177
178 if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
179 {
180 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
181 return -1;
182 }
183
184 return 0;
185 }
186
187 //
188 // Add a chunk of data to a heartbeat packet.
189 // Maintains the format. Assumes that the caller
190 // has passed in a big enough buffer!
191 //
192 static void add_type(uint8_t **p, int type, int more, uint8_t *data, int size)
193 {
194 *((uint32_t *) (*p)) = type;
195 *p += sizeof(uint32_t);
196
197 *((uint32_t *)(*p)) = more;
198 *p += sizeof(uint32_t);
199
200 if (data && size > 0) {
201 memcpy(*p, data, size);
202 *p += size;
203 }
204 }
205
206 // advertise our presence via BGP or gratuitous ARP
207 static void advertise_routes(void)
208 {
209 #ifdef BGP
210 if (bgp_configured)
211 bgp_enable_routing(1);
212 else
213 #endif /* BGP */
214 if (config->send_garp)
215 send_garp(config->bind_address); // Start taking traffic.
216 }
217
218 // withdraw our routes (BGP only)
219 static void withdraw_routes(void)
220 {
221 #ifdef BGP
222 if (bgp_configured)
223 bgp_enable_routing(0);
224 #endif /* BGP */
225 }
226
227 static void cluster_uptodate(void)
228 {
229 if (config->cluster_iam_uptodate)
230 return;
231
232 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles)
233 return;
234
235 config->cluster_iam_uptodate = 1;
236
237 LOG(0, 0, 0, "Now uptodate with master.\n");
238 advertise_routes();
239 }
240
241 //
242 // Send a unicast UDP packet to a peer with 'data' as the
243 // contents.
244 //
245 static int peer_send_data(in_addr_t peer, uint8_t *data, int size)
246 {
247 struct sockaddr_in addr = {0};
248
249 if (!cluster_sockfd) return -1;
250 if (!config->cluster_address) return 0;
251
252 if (!peer) // Odd??
253 return -1;
254
255 addr.sin_addr.s_addr = peer;
256 addr.sin_port = htons(CLUSTERPORT);
257 addr.sin_family = AF_INET;
258
259 LOG_HEX(5, "Peer send", data, size);
260
261 if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
262 {
263 LOG(0, 0, 0, "sendto: %s\n", strerror(errno));
264 return -1;
265 }
266
267 return 0;
268 }
269
270 //
271 // Send a structured message to a peer with a single element of type 'type'.
272 //
273 static int peer_send_message(in_addr_t peer, int type, int more, uint8_t *data, int size)
274 {
275 uint8_t buf[65536]; // Vast overkill.
276 uint8_t *p = buf;
277
278 LOG(4, 0, 0, "Sending message to peer (type %d, more %d, size %d)\n", type, more, size);
279 add_type(&p, type, more, data, size);
280
281 return peer_send_data(peer, buf, (p-buf) );
282 }
283
284 // send a packet to the master
285 static int _forward_packet(uint8_t *data, int size, in_addr_t addr, int port, int type)
286 {
287 uint8_t buf[65536]; // Vast overkill.
288 uint8_t *p = buf;
289
290 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
291 return -1;
292
293 LOG(4, 0, 0, "Forwarding packet from %s to master (size %d)\n", fmtaddr(addr, 0), size);
294
295 STAT(c_forwarded);
296 add_type(&p, type, addr, (uint8_t *) &port, sizeof(port)); // ick. should be uint16_t
297 memcpy(p, data, size);
298 p += size;
299
300 return peer_send_data(config->cluster_master_address, buf, (p - buf));
301 }
302
303 //
304 // Forward a state changing packet to the master.
305 //
306 // The master just processes the payload as if it had
307 // received it off the tun device.
308 //
309 int master_forward_packet(uint8_t *data, int size, in_addr_t addr, int port)
310 {
311 return _forward_packet(data, size, addr, port, C_FORWARD);
312 }
313
314 // Forward a DAE RADIUS packet to the master.
315 int master_forward_dae_packet(uint8_t *data, int size, in_addr_t addr, int port)
316 {
317 return _forward_packet(data, size, addr, port, C_FORWARD_DAE);
318 }
319
320 //
321 // Forward a throttled packet to the master for handling.
322 //
323 // The master just drops the packet into the appropriate
324 // token bucket queue, and lets normal processing take care
325 // of it.
326 //
327 int master_throttle_packet(int tbfid, uint8_t *data, int size)
328 {
329 uint8_t buf[65536]; // Vast overkill.
330 uint8_t *p = buf;
331
332 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
333 return -1;
334
335 LOG(4, 0, 0, "Throttling packet master (size %d, tbfid %d)\n", size, tbfid);
336
337 add_type(&p, C_THROTTLE, tbfid, data, size);
338
339 return peer_send_data(config->cluster_master_address, buf, (p-buf) );
340
341 }
342
343 //
344 // Forward a walled garden packet to the master for handling.
345 //
346 // The master just writes the packet straight to the tun
347 // device (where is will normally loop through the
348 // firewall rules, and come back in on the tun device)
349 //
350 // (Note that this must be called with the tun header
351 // as the start of the data).
352 int master_garden_packet(sessionidt s, uint8_t *data, int size)
353 {
354 uint8_t buf[65536]; // Vast overkill.
355 uint8_t *p = buf;
356
357 if (!config->cluster_master_address) // No election has been held yet. Just skip it.
358 return -1;
359
360 LOG(4, 0, 0, "Walled garden packet to master (size %d)\n", size);
361
362 add_type(&p, C_GARDEN, s, data, size);
363
364 return peer_send_data(config->cluster_master_address, buf, (p-buf));
365
366 }
367
368 //
369 // Send a chunk of data as a heartbeat..
370 // We save it in the history buffer as we do so.
371 //
372 static void send_heartbeat(int seq, uint8_t *data, int size)
373 {
374 int i;
375
376 if (size > sizeof(past_hearts[0].data))
377 {
378 LOG(0, 0, 0, "Tried to heartbeat something larger than the maximum packet!\n");
379 kill(0, SIGTERM);
380 exit(1);
381 }
382 i = seq % HB_HISTORY_SIZE;
383 past_hearts[i].seq = seq;
384 past_hearts[i].size = size;
385 memcpy(&past_hearts[i].data, data, size); // Save it.
386 cluster_send_data(data, size);
387 }
388
389 //
390 // Send an 'i am alive' message to every machine in the cluster.
391 //
392 void cluster_send_ping(time_t basetime)
393 {
394 uint8_t buff[100 + sizeof(pingt)];
395 uint8_t *p = buff;
396 pingt x;
397
398 if (config->cluster_iam_master && basetime) // We're heartbeating so no need to ping.
399 return;
400
401 LOG(5, 0, 0, "Sending cluster ping...\n");
402
403 x.ver = 1;
404 x.addr = config->bind_address;
405 x.undef = config->cluster_undefined_sessions + config->cluster_undefined_tunnels;
406 x.basetime = basetime;
407
408 add_type(&p, C_PING, basetime, (uint8_t *) &x, sizeof(x));
409 cluster_send_data(buff, (p-buff) );
410 }
411
412 //
413 // Walk the session counters looking for non-zero ones to send
414 // to the master. We send up to 600 of them at one time.
415 // We examine a maximum of 3000 sessions.
416 // (50k max session should mean that we normally
417 // examine the entire session table every 25 seconds).
418
419 #define MAX_B_RECS (600)
420 void master_update_counts(void)
421 {
422 int i, c;
423 bytest b[MAX_B_RECS+1];
424
425 if (config->cluster_iam_master) // Only happens on the slaves.
426 return;
427
428 if (!config->cluster_master_address) // If we don't have a master, skip it for a while.
429 return;
430
431 // C_BYTES format changed in 2.1.0 (cluster version 5)
432 // during upgrade from previous versions, hang onto our counters
433 // for a bit until the new master comes up
434 if (config->cluster_last_hb_ver < 5)
435 return;
436
437 i = MAX_B_RECS * 5; // Examine max 3000 sessions;
438 if (config->cluster_highest_sessionid > i)
439 i = config->cluster_highest_sessionid;
440
441 for ( c = 0; i > 0 ; --i) {
442 // Next session to look at.
443 walk_session_number++;
444 if ( walk_session_number > config->cluster_highest_sessionid)
445 walk_session_number = 1;
446
447 if (!sess_local[walk_session_number].cin && !sess_local[walk_session_number].cout)
448 continue; // Unchanged. Skip it.
449
450 b[c].sid = walk_session_number;
451 b[c].pin = sess_local[walk_session_number].pin;
452 b[c].pout = sess_local[walk_session_number].pout;
453 b[c].cin = sess_local[walk_session_number].cin;
454 b[c].cout = sess_local[walk_session_number].cout;
455
456 // Reset counters.
457 sess_local[walk_session_number].pin = sess_local[walk_session_number].pout = 0;
458 sess_local[walk_session_number].cin = sess_local[walk_session_number].cout = 0;
459
460 if (++c > MAX_B_RECS) // Send a max of 600 elements in a packet.
461 break;
462 }
463
464 if (!c) // Didn't find any that changes. Get out of here!
465 return;
466
467
468 // Forward the data to the master.
469 LOG(4, 0, 0, "Sending byte counters to master (%d elements)\n", c);
470 peer_send_message(config->cluster_master_address, C_BYTES, c, (uint8_t *) &b, sizeof(b[0]) * c);
471 return;
472 }
473
474 //
475 // On the master, check how our slaves are going. If
476 // one of them's not up-to-date we'll heartbeat faster.
477 // If we don't have any of them, then we need to turn
478 // on our own packet handling!
479 //
480 void cluster_check_slaves(void)
481 {
482 int i;
483 static int have_peers = 0;
484 int had_peers = have_peers;
485 clockt t = TIME;
486
487 if (!config->cluster_iam_master)
488 return; // Only runs on the master...
489
490 config->cluster_iam_uptodate = 1; // cleared in loop below
491
492 for (i = have_peers = 0; i < num_peers; i++)
493 {
494 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
495 continue; // Stale peer! Skip them.
496
497 if (!peers[i].basetime)
498 continue; // Shutdown peer! Skip them.
499
500 if (peers[i].uptodate)
501 have_peers++;
502 else
503 config->cluster_iam_uptodate = 0; // Start fast heartbeats
504 }
505
506 // in a cluster, withdraw/add routes when we get a peer/lose peers
507 if (have_peers != had_peers)
508 {
509 if (had_peers < config->cluster_master_min_adv &&
510 have_peers >= config->cluster_master_min_adv)
511 withdraw_routes();
512
513 else if (had_peers >= config->cluster_master_min_adv &&
514 have_peers < config->cluster_master_min_adv)
515 advertise_routes();
516 }
517 }
518
519 //
520 // Check that we have a master. If it's been too
521 // long since we heard from a master then hold an election.
522 //
523 void cluster_check_master(void)
524 {
525 int i, count, tcount, bcount, high_unique_id = 0;
526 int last_free = 0;
527 clockt t = TIME;
528 static int probed = 0;
529 int have_peers;
530
531 if (config->cluster_iam_master)
532 return; // Only runs on the slaves...
533
534 // If the master is late (missed 2 hearbeats by a second and a
535 // hair) it may be that the switch has dropped us from the
536 // multicast group, try unicasting probes to the master
537 // which will hopefully respond with a unicast heartbeat that
538 // will allow us to limp along until the querier next runs.
539 if (config->cluster_master_address
540 && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
541 {
542 if (!probed || (TIME > (probed + 2 * config->cluster_hb_interval)))
543 {
544 probed = TIME;
545 LOG(1, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
546 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
547
548 peer_send_message(config->cluster_master_address,
549 C_LASTSEEN, config->cluster_seq_number, NULL, 0);
550 }
551 } else { // We got a recent heartbeat; reset the probe flag.
552 probed = 0;
553 }
554
555 if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
556 return; // Everything's ok!
557
558 config->cluster_last_hb = TIME + 1; // Just the one election thanks.
559 config->cluster_master_address = 0;
560
561 LOG(0, 0, 0, "Master timed out! Holding election...\n");
562
563 // In the process of shutting down, can't be master
564 if (main_quit)
565 return;
566
567 for (i = have_peers = 0; i < num_peers; i++)
568 {
569 if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
570 continue; // Stale peer! Skip them.
571
572 if (!peers[i].basetime)
573 continue; // Shutdown peer! Skip them.
574
575 if (peers[i].basetime < basetime) {
576 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
577 return; // They'll win the election. Get out of here.
578 }
579
580 if (peers[i].basetime == basetime &&
581 peers[i].peer > my_address) {
582 LOG(1, 0, 0, "Expecting %s to become master\n", fmtaddr(peers[i].peer, 0));
583 return; // They'll win the election. Wait for them to come up.
584 }
585
586 if (peers[i].uptodate)
587 have_peers++;
588 }
589
590 // Wow. it's been ages since I last heard a heartbeat
591 // and I'm better than an of my peers so it's time
592 // to become a master!!!
593
594 config->cluster_iam_master = 1;
595
596 LOG(0, 0, 0, "I am declaring myself the master!\n");
597
598 if (have_peers < config->cluster_master_min_adv)
599 advertise_routes();
600 else
601 withdraw_routes();
602
603 if (config->cluster_seq_number == -1)
604 config->cluster_seq_number = 0;
605
606 //
607 // Go through and mark all the tunnels as defined.
608 // Count the highest used tunnel number as well.
609 //
610 config->cluster_highest_tunnelid = 0;
611 for (i = 0, tcount = 0; i < MAXTUNNEL; ++i) {
612 if (tunnel[i].state == TUNNELUNDEF)
613 tunnel[i].state = TUNNELFREE;
614
615 if (tunnel[i].state != TUNNELFREE && i > config->cluster_highest_tunnelid)
616 config->cluster_highest_tunnelid = i;
617 }
618
619 //
620 // Go through and mark all the bundles as defined.
621 // Count the highest used bundle number as well.
622 //
623 config->cluster_highest_bundleid = 0;
624 for (i = 0, bcount = 0; i < MAXBUNDLE; ++i) {
625 if (bundle[i].state == BUNDLEUNDEF)
626 bundle[i].state = BUNDLEFREE;
627
628 if (bundle[i].state != BUNDLEFREE && i > config->cluster_highest_bundleid)
629 config->cluster_highest_bundleid = i;
630 }
631
632 //
633 // Go through and mark all the sessions as being defined.
634 // reset the idle timeouts.
635 // add temporary byte counters to permanent ones.
636 // Re-string the free list.
637 // Find the ID of the highest session.
638 last_free = 0;
639 high_unique_id = 0;
640 config->cluster_highest_sessionid = 0;
641 for (i = 0, count = 0; i < MAXSESSION; ++i) {
642 if (session[i].tunnel == T_UNDEF) {
643 session[i].tunnel = T_FREE;
644 ++count;
645 }
646
647 if (!session[i].opened) { // Unused session. Add to free list.
648 memset(&session[i], 0, sizeof(session[i]));
649 session[i].tunnel = T_FREE;
650 session[last_free].next = i;
651 session[i].next = 0;
652 last_free = i;
653 continue;
654 }
655
656 // Reset idle timeouts..
657 session[i].last_packet = session[i].last_data = time_now;
658
659 // Reset die relative to our uptime rather than the old master's
660 if (session[i].die) session[i].die = TIME;
661
662 // Accumulate un-sent byte/packet counters.
663 increment_counter(&session[i].cin, &session[i].cin_wrap, sess_local[i].cin);
664 increment_counter(&session[i].cout, &session[i].cout_wrap, sess_local[i].cout);
665 session[i].cin_delta += sess_local[i].cin;
666 session[i].cout_delta += sess_local[i].cout;
667
668 session[i].pin += sess_local[i].pin;
669 session[i].pout += sess_local[i].pout;
670
671 sess_local[i].cin = sess_local[i].cout = 0;
672 sess_local[i].pin = sess_local[i].pout = 0;
673
674 sess_local[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
675
676 if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
677 high_unique_id = session[i].unique_id+1;
678
679 session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
680 throttle_session(i, session[i].throttle_in, session[i].throttle_out);
681
682 config->cluster_highest_sessionid = i;
683 }
684
685 session[last_free].next = 0; // End of chain.
686 last_id = high_unique_id; // Keep track of the highest used session ID.
687
688 become_master();
689
690 rebuild_address_pool();
691
692 // If we're not the very first master, this is a big issue!
693 if (count > 0)
694 LOG(0, 0, 0, "Warning: Fixed %d uninitialized sessions in becoming master!\n", count);
695
696 config->cluster_undefined_sessions = 0;
697 config->cluster_undefined_bundles = 0;
698 config->cluster_undefined_tunnels = 0;
699 config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
700
701 // FIXME. We need to fix up the tunnel control message
702 // queue here! There's a number of other variables we
703 // should also update.
704 }
705
706
707 //
708 // Check that our session table is validly matching what the
709 // master has in mind.
710 //
711 // In particular, if we have too many sessions marked 'undefined'
712 // we fix it up here, and we ensure that the 'first free session'
713 // pointer is valid.
714 //
715 static void cluster_check_sessions(int highsession, int freesession_ptr, int highbundle, int hightunnel)
716 {
717 int i;
718
719 sessionfree = freesession_ptr; // Keep the freesession ptr valid.
720
721 if (config->cluster_iam_uptodate)
722 return;
723
724 if (highsession > config->cluster_undefined_sessions && highbundle > config->cluster_undefined_bundles && hightunnel > config->cluster_undefined_tunnels)
725 return;
726
727 // Clear out defined sessions, counting the number of
728 // undefs remaining.
729 config->cluster_undefined_sessions = 0;
730 for (i = 1 ; i < MAXSESSION; ++i) {
731 if (i > highsession) {
732 if (session[i].tunnel == T_UNDEF) session[i].tunnel = T_FREE; // Defined.
733 continue;
734 }
735
736 if (session[i].tunnel == T_UNDEF)
737 ++config->cluster_undefined_sessions;
738 }
739
740 // Clear out defined bundles, counting the number of
741 // undefs remaining.
742 config->cluster_undefined_bundles = 0;
743 for (i = 1 ; i < MAXBUNDLE; ++i) {
744 if (i > highbundle) {
745 if (bundle[i].state == BUNDLEUNDEF) bundle[i].state = BUNDLEFREE; // Defined.
746 continue;
747 }
748
749 if (bundle[i].state == BUNDLEUNDEF)
750 ++config->cluster_undefined_bundles;
751 }
752
753 // Clear out defined tunnels, counting the number of
754 // undefs remaining.
755 config->cluster_undefined_tunnels = 0;
756 for (i = 1 ; i < MAXTUNNEL; ++i) {
757 if (i > hightunnel) {
758 if (tunnel[i].state == TUNNELUNDEF) tunnel[i].state = TUNNELFREE; // Defined.
759 continue;
760 }
761
762 if (tunnel[i].state == TUNNELUNDEF)
763 ++config->cluster_undefined_tunnels;
764 }
765
766
767 if (config->cluster_undefined_sessions || config->cluster_undefined_tunnels || config->cluster_undefined_bundles) {
768 LOG(2, 0, 0, "Cleared undefined sessions/bundles/tunnels. %d sess (high %d), %d bund (high %d), %d tunn (high %d)\n",
769 config->cluster_undefined_sessions, highsession, config->cluster_undefined_bundles, highbundle, config->cluster_undefined_tunnels, hightunnel);
770 return;
771 }
772
773 // Are we up to date?
774
775 if (!config->cluster_iam_uptodate)
776 cluster_uptodate();
777 }
778
779 static int hb_add_type(uint8_t **p, int type, int id)
780 {
781 switch (type) {
782 case C_CSESSION: { // Compressed C_SESSION.
783 uint8_t c[sizeof(sessiont) * 2]; // Bigger than worst case.
784 uint8_t *d = (uint8_t *) &session[id];
785 uint8_t *orig = d;
786 int size;
787
788 size = rle_compress( &d, sizeof(sessiont), c, sizeof(c) );
789
790 // Did we compress the full structure, and is the size actually
791 // reduced??
792 if ( (d - orig) == sizeof(sessiont) && size < sizeof(sessiont) ) {
793 add_type(p, C_CSESSION, id, c, size);
794 break;
795 }
796 // Failed to compress : Fall through.
797 }
798 case C_SESSION:
799 add_type(p, C_SESSION, id, (uint8_t *) &session[id], sizeof(sessiont));
800 break;
801
802 case C_CBUNDLE: { // Compressed C_BUNDLE
803 uint8_t c[sizeof(bundlet) * 2]; // Bigger than worst case.
804 uint8_t *d = (uint8_t *) &bundle[id];
805 uint8_t *orig = d;
806 int size;
807
808 size = rle_compress( &d, sizeof(bundlet), c, sizeof(c) );
809
810 // Did we compress the full structure, and is the size actually
811 // reduced??
812 if ( (d - orig) == sizeof(bundlet) && size < sizeof(bundlet) ) {
813 add_type(p, C_CBUNDLE, id, c, size);
814 break;
815 }
816 // Failed to compress : Fall through.
817 }
818
819 case C_BUNDLE:
820 add_type(p, C_BUNDLE, id, (uint8_t *) &bundle[id], sizeof(bundlet));
821 break;
822
823 case C_CTUNNEL: { // Compressed C_TUNNEL
824 uint8_t c[sizeof(tunnelt) * 2]; // Bigger than worst case.
825 uint8_t *d = (uint8_t *) &tunnel[id];
826 uint8_t *orig = d;
827 int size;
828
829 size = rle_compress( &d, sizeof(tunnelt), c, sizeof(c) );
830
831 // Did we compress the full structure, and is the size actually
832 // reduced??
833 if ( (d - orig) == sizeof(tunnelt) && size < sizeof(tunnelt) ) {
834 add_type(p, C_CTUNNEL, id, c, size);
835 break;
836 }
837 // Failed to compress : Fall through.
838 }
839 case C_TUNNEL:
840 add_type(p, C_TUNNEL, id, (uint8_t *) &tunnel[id], sizeof(tunnelt));
841 break;
842 default:
843 LOG(0, 0, 0, "Found an invalid type in heart queue! (%d)\n", type);
844 kill(0, SIGTERM);
845 exit(1);
846 }
847 return 0;
848 }
849
850 //
851 // Send a heartbeat, incidently sending out any queued changes..
852 //
853 void cluster_heartbeat()
854 {
855 int i, count = 0, tcount = 0, bcount = 0;
856 uint8_t buff[MAX_HEART_SIZE + sizeof(heartt) + sizeof(int) ];
857 heartt h;
858 uint8_t *p = buff;
859
860 if (!config->cluster_iam_master) // Only the master does this.
861 return;
862
863 config->cluster_table_version += config->cluster_num_changes;
864
865 // Fill out the heartbeat header.
866 memset(&h, 0, sizeof(h));
867
868 h.version = HB_VERSION;
869 h.seq = config->cluster_seq_number;
870 h.basetime = basetime;
871 h.clusterid = config->bind_address; // Will this do??
872 h.basetime = basetime;
873 h.highsession = config->cluster_highest_sessionid;
874 h.freesession = sessionfree;
875 h.hightunnel = config->cluster_highest_tunnelid;
876 h.highbundle = config->cluster_highest_bundleid;
877 h.size_sess = sizeof(sessiont); // Just in case.
878 h.size_bund = sizeof(bundlet);
879 h.size_tunn = sizeof(tunnelt);
880 h.interval = config->cluster_hb_interval;
881 h.timeout = config->cluster_hb_timeout;
882 h.table_version = config->cluster_table_version;
883
884 add_type(&p, C_HEARTBEAT, HB_VERSION, (uint8_t *) &h, sizeof(h));
885
886 for (i = 0; i < config->cluster_num_changes; ++i) {
887 hb_add_type(&p, cluster_changes[i].type, cluster_changes[i].id);
888 }
889
890 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
891 LOG(0, 0, 0, "FATAL: Overran the heartbeat buffer! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
892 kill(0, SIGTERM);
893 exit(1);
894 }
895
896 //
897 // Fill out the packet with sessions from the session table...
898 // (not forgetting to leave space so we can get some tunnels in too )
899 while ( (p + sizeof(uint32_t) * 2 + sizeof(sessiont) * 2 ) < (buff + MAX_HEART_SIZE) ) {
900
901 if (!walk_session_number) // session #0 isn't valid.
902 ++walk_session_number;
903
904 if (count >= config->cluster_highest_sessionid) // If we're a small cluster, don't go wild.
905 break;
906
907 hb_add_type(&p, C_CSESSION, walk_session_number);
908 walk_session_number = (1+walk_session_number)%(config->cluster_highest_sessionid+1); // +1 avoids divide by zero.
909
910 ++count; // Count the number of extra sessions we're sending.
911 }
912
913 //
914 // Fill out the packet with tunnels from the tunnel table...
915 // This effectively means we walk the tunnel table more quickly
916 // than the session table. This is good because stuffing up a
917 // tunnel is a much bigger deal than stuffing up a session.
918 //
919 while ( (p + sizeof(uint32_t) * 2 + sizeof(tunnelt) ) < (buff + MAX_HEART_SIZE) ) {
920
921 if (!walk_tunnel_number) // tunnel #0 isn't valid.
922 ++walk_tunnel_number;
923
924 if (tcount >= config->cluster_highest_tunnelid)
925 break;
926
927 hb_add_type(&p, C_CTUNNEL, walk_tunnel_number);
928 walk_tunnel_number = (1+walk_tunnel_number)%(config->cluster_highest_tunnelid+1); // +1 avoids divide by zero.
929
930 ++tcount;
931 }
932
933 //
934 // Fill out the packet with bundles from the bundle table...
935 while ( (p + sizeof(uint32_t) * 2 + sizeof(bundlet) ) < (buff + MAX_HEART_SIZE) ) {
936
937 if (!walk_bundle_number) // bundle #0 isn't valid.
938 ++walk_bundle_number;
939
940 if (bcount >= config->cluster_highest_bundleid)
941 break;
942
943 hb_add_type(&p, C_CTUNNEL, walk_bundle_number);
944 walk_tunnel_number = (1+walk_bundle_number)%(config->cluster_highest_bundleid+1); // +1 avoids divide by zero.
945
946 ++bcount;
947 }
948
949 //
950 // Did we do something wrong?
951 if (p > (buff + sizeof(buff))) { // Did we somehow manage to overun the buffer?
952 LOG(0, 0, 0, "Overran the heartbeat buffer now! This is fatal. Exiting. (size %d)\n", (int) (p - buff));
953 kill(0, SIGTERM);
954 exit(1);
955 }
956
957 LOG(3, 0, 0, "Sending v%d heartbeat #%d, change #%" PRIu64 " with %d changes "
958 "(%d x-sess, %d x-bundles, %d x-tunnels, %d highsess, %d highbund, %d hightun, size %d)\n",
959 HB_VERSION, h.seq, h.table_version, config->cluster_num_changes,
960 count, bcount, tcount, config->cluster_highest_sessionid, config->cluster_highest_bundleid,
961 config->cluster_highest_tunnelid, (int) (p - buff));
962
963 config->cluster_num_changes = 0;
964
965 send_heartbeat(h.seq, buff, (p-buff) ); // Send out the heartbeat to the cluster, keeping a copy of it.
966
967 config->cluster_seq_number = (config->cluster_seq_number+1)%HB_MAX_SEQ; // Next seq number to use.
968 }
969
970 //
971 // A structure of type 'type' has changed; Add it to the queue to send.
972 //
973 static int type_changed(int type, int id)
974 {
975 int i;
976
977 for (i = 0 ; i < config->cluster_num_changes ; ++i)
978 if ( cluster_changes[i].id == id &&
979 cluster_changes[i].type == type)
980 return 0; // Already marked for change.
981
982 cluster_changes[i].type = type;
983 cluster_changes[i].id = id;
984 ++config->cluster_num_changes;
985
986 if (config->cluster_num_changes > MAX_CHANGES)
987 cluster_heartbeat(); // flush now
988
989 return 1;
990 }
991
992
993 // A particular session has been changed!
994 int cluster_send_session(int sid)
995 {
996 if (!config->cluster_iam_master) {
997 LOG(0, sid, 0, "I'm not a master, but I just tried to change a session!\n");
998 return -1;
999 }
1000
1001 if (forked) {
1002 LOG(0, sid, 0, "cluster_send_session called from child process!\n");
1003 return -1;
1004 }
1005
1006 return type_changed(C_CSESSION, sid);
1007 }
1008
1009 // A particular bundle has been changed!
1010 int cluster_send_bundle(int bid)
1011 {
1012 if (!config->cluster_iam_master) {
1013 LOG(0, 0, bid, "I'm not a master, but I just tried to change a bundle!\n");
1014 return -1;
1015 }
1016
1017 return type_changed(C_CBUNDLE, bid);
1018 }
1019
1020 // A particular tunnel has been changed!
1021 int cluster_send_tunnel(int tid)
1022 {
1023 if (!config->cluster_iam_master) {
1024 LOG(0, 0, tid, "I'm not a master, but I just tried to change a tunnel!\n");
1025 return -1;
1026 }
1027
1028 return type_changed(C_CTUNNEL, tid);
1029 }
1030
1031
1032 //
1033 // We're a master, and a slave has just told us that it's
1034 // missed a packet. We'll resend it every packet since
1035 // the last one it's seen.
1036 //
1037 static int cluster_catchup_slave(int seq, in_addr_t slave)
1038 {
1039 int s;
1040 int diff;
1041
1042 LOG(1, 0, 0, "Slave %s sent LASTSEEN with seq %d\n", fmtaddr(slave, 0), seq);
1043 if (!config->cluster_iam_master) {
1044 LOG(1, 0, 0, "Got LASTSEEN but I'm not a master! Redirecting it to %s.\n",
1045 fmtaddr(config->cluster_master_address, 0));
1046
1047 peer_send_message(slave, C_MASTER, config->cluster_master_address, NULL, 0);
1048 return 0;
1049 }
1050
1051 diff = config->cluster_seq_number - seq; // How many packet do we need to send?
1052 if (diff < 0)
1053 diff += HB_MAX_SEQ;
1054
1055 if (diff >= HB_HISTORY_SIZE) { // Ouch. We don't have the packet to send it!
1056 LOG(0, 0, 0, "A slave asked for message %d when our seq number is %d. Killing it.\n",
1057 seq, config->cluster_seq_number);
1058 return peer_send_message(slave, C_KILL, seq, NULL, 0);// Kill the slave. Nothing else to do.
1059 }
1060
1061 LOG(1, 0, 0, "Sending %d catchup packets to slave %s\n", diff, fmtaddr(slave, 0) );
1062
1063 // Now resend every packet that it missed, in order.
1064 while (seq != config->cluster_seq_number) {
1065 s = seq % HB_HISTORY_SIZE;
1066 if (seq != past_hearts[s].seq) {
1067 LOG(0, 0, 0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
1068 fmtaddr(slave, 0), seq, past_hearts[s].seq, s, config->cluster_seq_number);
1069 return -1; // What to do here!?
1070 }
1071 peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
1072 seq = (seq+1)%HB_MAX_SEQ; // Increment to next seq number.
1073 }
1074 return 0; // All good!
1075 }
1076
1077 //
1078 // We've heard from another peer! Add it to the list
1079 // that we select from at election time.
1080 //
1081 static int cluster_add_peer(in_addr_t peer, time_t basetime, pingt *pp, int size)
1082 {
1083 int i;
1084 in_addr_t clusterid;
1085 pingt p;
1086
1087 // Allow for backward compatability.
1088 // Just the ping packet into a new structure to allow
1089 // for the possibility that we might have received
1090 // more or fewer elements than we were expecting.
1091 if (size > sizeof(p))
1092 size = sizeof(p);
1093
1094 memset( (void *) &p, 0, sizeof(p) );
1095 memcpy( (void *) &p, (void *) pp, size);
1096
1097 clusterid = p.addr;
1098 if (clusterid != config->bind_address)
1099 {
1100 // Is this for us?
1101 LOG(4, 0, 0, "Skipping ping from %s (different cluster)\n", fmtaddr(peer, 0));
1102 return 0;
1103 }
1104
1105 for (i = 0; i < num_peers ; ++i)
1106 {
1107 if (peers[i].peer != peer)
1108 continue;
1109
1110 // This peer already exists. Just update the timestamp.
1111 peers[i].basetime = basetime;
1112 peers[i].timestamp = TIME;
1113 peers[i].uptodate = !p.undef;
1114 break;
1115 }
1116
1117 // Is this the master shutting down??
1118 if (peer == config->cluster_master_address) {
1119 LOG(3, 0, 0, "Master %s %s\n", fmtaddr(config->cluster_master_address, 0),
1120 basetime ? "has restarted!" : "shutting down...");
1121
1122 config->cluster_master_address = 0;
1123 config->cluster_last_hb = 0; // Force an election.
1124 cluster_check_master();
1125 }
1126
1127 if (i >= num_peers)
1128 {
1129 LOG(4, 0, 0, "Adding %s as a peer\n", fmtaddr(peer, 0));
1130
1131 // Not found. Is there a stale slot to re-use?
1132 for (i = 0; i < num_peers ; ++i)
1133 {
1134 if (!peers[i].basetime) // Shutdown
1135 break;
1136
1137 if ((peers[i].timestamp + config->cluster_hb_timeout * 10) < TIME) // Stale.
1138 break;
1139 }
1140
1141 if (i >= CLUSTER_MAX_SIZE)
1142 {
1143 // Too many peers!!
1144 LOG(0, 0, 0, "Tried to add %s as a peer, but I already have %d of them!\n", fmtaddr(peer, 0), i);
1145 return -1;
1146 }
1147
1148 peers[i].peer = peer;
1149 peers[i].basetime = basetime;
1150 peers[i].timestamp = TIME;
1151 peers[i].uptodate = !p.undef;
1152 if (i == num_peers)
1153 ++num_peers;
1154
1155 LOG(1, 0, 0, "Added %s as a new peer. Now %d peers\n", fmtaddr(peer, 0), num_peers);
1156 }
1157
1158 return 1;
1159 }
1160
1161 // A slave responds with C_MASTER when it gets a message which should have gone to a master.
1162 static int cluster_set_master(in_addr_t peer, in_addr_t master)
1163 {
1164 if (config->cluster_iam_master) // Sanity...
1165 return 0;
1166
1167 LOG(3, 0, 0, "Peer %s set the master to %s...\n", fmtaddr(peer, 0),
1168 fmtaddr(master, 1));
1169
1170 config->cluster_master_address = master;
1171 if (master)
1172 {
1173 // catchup with new master
1174 peer_send_message(master, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1175
1176 // delay next election
1177 config->cluster_last_hb = TIME;
1178 }
1179
1180 // run election (or reset "probed" if master was set)
1181 cluster_check_master();
1182 return 0;
1183 }
1184
1185 /* Handle the slave updating the byte counters for the master. */
1186 //
1187 // Note that we don't mark the session as dirty; We rely on
1188 // the slow table walk to propogate this back out to the slaves.
1189 //
1190 static int cluster_handle_bytes(uint8_t *data, int size)
1191 {
1192 bytest *b;
1193
1194 b = (bytest *) data;
1195
1196 LOG(3, 0, 0, "Got byte counter update (size %d)\n", size);
1197
1198 /* Loop around, adding the byte
1199 counts to each of the sessions. */
1200
1201 while (size >= sizeof(*b) ) {
1202 if (b->sid > MAXSESSION) {
1203 LOG(0, 0, 0, "Got C_BYTES with session #%d!\n", b->sid);
1204 return -1; /* Abort processing */
1205 }
1206
1207 session[b->sid].pin += b->pin;
1208 session[b->sid].pout += b->pout;
1209
1210 increment_counter(&session[b->sid].cin, &session[b->sid].cin_wrap, b->cin);
1211 increment_counter(&session[b->sid].cout, &session[b->sid].cout_wrap, b->cout);
1212
1213 session[b->sid].cin_delta += b->cin;
1214 session[b->sid].cout_delta += b->cout;
1215
1216 if (b->cin)
1217 session[b->sid].last_packet = session[b->sid].last_data = time_now;
1218 else if (b->cout)
1219 session[b->sid].last_data = time_now;
1220
1221 size -= sizeof(*b);
1222 ++b;
1223 }
1224
1225 if (size != 0)
1226 LOG(0, 0, 0, "Got C_BYTES with %d bytes of trailing junk!\n", size);
1227
1228 return size;
1229 }
1230
1231 //
1232 // Handle receiving a session structure in a heartbeat packet.
1233 //
1234 static int cluster_recv_session(int more, uint8_t *p)
1235 {
1236 if (more >= MAXSESSION) {
1237 LOG(0, 0, 0, "DANGER: Received a heartbeat session id > MAXSESSION!\n");
1238 return -1;
1239 }
1240
1241 if (session[more].tunnel == T_UNDEF) {
1242 if (config->cluster_iam_uptodate) { // Sanity.
1243 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined session!\n");
1244 } else {
1245 --config->cluster_undefined_sessions;
1246 }
1247 }
1248
1249 load_session(more, (sessiont *) p); // Copy session into session table..
1250
1251 LOG(5, more, 0, "Received session update (%d undef)\n", config->cluster_undefined_sessions);
1252
1253 if (!config->cluster_iam_uptodate)
1254 cluster_uptodate(); // Check to see if we're up to date.
1255
1256 return 0;
1257 }
1258
1259 static int cluster_recv_bundle(int more, uint8_t *p)
1260 {
1261 if (more >= MAXBUNDLE) {
1262 LOG(0, 0, 0, "DANGER: Received a bundle id > MAXBUNDLE!\n");
1263 return -1;
1264 }
1265
1266 if (bundle[more].state == BUNDLEUNDEF) {
1267 if (config->cluster_iam_uptodate) { // Sanity.
1268 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined bundle!\n");
1269 } else {
1270 --config->cluster_undefined_bundles;
1271 }
1272 }
1273
1274 memcpy(&bundle[more], p, sizeof(bundle[more]) );
1275
1276 LOG(5, 0, more, "Received bundle update\n");
1277
1278 if (!config->cluster_iam_uptodate)
1279 cluster_uptodate(); // Check to see if we're up to date.
1280
1281 return 0;
1282 }
1283
1284 static int cluster_recv_tunnel(int more, uint8_t *p)
1285 {
1286 if (more >= MAXTUNNEL) {
1287 LOG(0, 0, 0, "DANGER: Received a tunnel session id > MAXTUNNEL!\n");
1288 return -1;
1289 }
1290
1291 if (tunnel[more].state == TUNNELUNDEF) {
1292 if (config->cluster_iam_uptodate) { // Sanity.
1293 LOG(0, 0, 0, "I thought I was uptodate but I just found an undefined tunnel!\n");
1294 } else {
1295 --config->cluster_undefined_tunnels;
1296 }
1297 }
1298
1299 memcpy(&tunnel[more], p, sizeof(tunnel[more]) );
1300
1301 //
1302 // Clear tunnel control messages. These are dynamically allocated.
1303 // If we get unlucky, this may cause the tunnel to drop!
1304 //
1305 tunnel[more].controls = tunnel[more].controle = NULL;
1306 tunnel[more].controlc = 0;
1307
1308 LOG(5, 0, more, "Received tunnel update\n");
1309
1310 if (!config->cluster_iam_uptodate)
1311 cluster_uptodate(); // Check to see if we're up to date.
1312
1313 return 0;
1314 }
1315
1316
1317 // pre v5 heartbeat session structure
1318 struct oldsession {
1319 sessionidt next;
1320 sessionidt far;
1321 tunnelidt tunnel;
1322 in_addr_t ip;
1323 int ip_pool_index;
1324 unsigned long unique_id;
1325 uint16_t nr;
1326 uint16_t ns;
1327 uint32_t magic;
1328 uint32_t cin, cout;
1329 uint32_t pin, pout;
1330 uint32_t total_cin;
1331 uint32_t total_cout;
1332 uint32_t id;
1333 uint16_t throttle_in;
1334 uint16_t throttle_out;
1335 clockt opened;
1336 clockt die;
1337 time_t last_packet;
1338 in_addr_t dns1, dns2;
1339 routet route[MAXROUTE];
1340 uint16_t radius;
1341 uint16_t mru;
1342 uint16_t tbf_in;
1343 uint16_t tbf_out;
1344 uint8_t l2tp_flags;
1345 uint8_t reserved_old_snoop;
1346 uint8_t walled_garden;
1347 uint8_t flags1;
1348 char random_vector[MAXTEL];
1349 int random_vector_length;
1350 char user[129];
1351 char called[MAXTEL];
1352 char calling[MAXTEL];
1353 uint32_t tx_connect_speed;
1354 uint32_t rx_connect_speed;
1355 uint32_t flags;
1356 #define SF_IPCP_ACKED 1 // Has this session seen an IPCP Ack?
1357 #define SF_LCP_ACKED 2 // LCP negotiated
1358 #define SF_CCP_ACKED 4 // CCP negotiated
1359 in_addr_t snoop_ip;
1360 uint16_t snoop_port;
1361 uint16_t sid;
1362 uint8_t filter_in;
1363 uint8_t filter_out;
1364 char reserved[18];
1365 };
1366
1367 static uint8_t *convert_session(struct oldsession *old)
1368 {
1369 static sessiont new;
1370 int i;
1371
1372 memset(&new, 0, sizeof(new));
1373
1374 new.next = old->next;
1375 new.far = old->far;
1376 new.tunnel = old->tunnel;
1377 new.flags = old->l2tp_flags;
1378 new.ip = old->ip;
1379 new.ip_pool_index = old->ip_pool_index;
1380 new.unique_id = old->unique_id;
1381 new.magic = old->magic;
1382 new.pin = old->pin;
1383 new.pout = old->pout;
1384 new.cin = old->total_cin;
1385 new.cout = old->total_cout;
1386 new.cin_delta = old->cin;
1387 new.cout_delta = old->cout;
1388 new.throttle_in = old->throttle_in;
1389 new.throttle_out = old->throttle_out;
1390 new.filter_in = old->filter_in;
1391 new.filter_out = old->filter_out;
1392 new.mru = old->mru;
1393 new.opened = old->opened;
1394 new.die = old->die;
1395 new.last_packet = old->last_packet;
1396 new.dns1 = old->dns1;
1397 new.dns2 = old->dns2;
1398 new.tbf_in = old->tbf_in;
1399 new.tbf_out = old->tbf_out;
1400 new.random_vector_length = old->random_vector_length;
1401 new.tx_connect_speed = old->tx_connect_speed;
1402 new.rx_connect_speed = old->rx_connect_speed;
1403 new.snoop_ip = old->snoop_ip;
1404 new.snoop_port = old->snoop_port;
1405 new.walled_garden = old->walled_garden;
1406
1407 memcpy(new.random_vector, old->random_vector, sizeof(new.random_vector));
1408 memcpy(new.user, old->user, sizeof(new.user));
1409 memcpy(new.called, old->called, sizeof(new.called));
1410 memcpy(new.calling, old->calling, sizeof(new.calling));
1411
1412 for (i = 0; i < MAXROUTE; i++)
1413 memcpy(&new.route[i], &old->route[i], sizeof(new.route[i]));
1414
1415 if (new.opened)
1416 {
1417 new.ppp.phase = Establish;
1418 if (old->flags & (SF_IPCP_ACKED|SF_LCP_ACKED))
1419 {
1420 new.ppp.phase = Network;
1421 new.ppp.lcp = Opened;
1422 new.ppp.ipcp = (old->flags & SF_IPCP_ACKED) ? Opened : Starting;
1423 new.ppp.ccp = (old->flags & SF_CCP_ACKED) ? Opened : Stopped;
1424 }
1425
1426 // no PPPv6 in old session
1427 new.ppp.ipv6cp = Stopped;
1428 }
1429
1430 return (uint8_t *) &new;
1431 }
1432
1433 //
1434 // Process a heartbeat..
1435 //
1436 // v3: added interval, timeout
1437 // v4: added table_version
1438 // v5: added ipv6, re-ordered session structure
1439 static int cluster_process_heartbeat(uint8_t *data, int size, int more, uint8_t *p, in_addr_t addr)
1440 {
1441 heartt *h;
1442 int s = size - (p-data);
1443 int i, type;
1444 int hb_ver = more;
1445
1446 #if HB_VERSION != 5
1447 # error "need to update cluster_process_heartbeat()"
1448 #endif
1449
1450 // we handle versions 3 through 5
1451 if (hb_ver < 3 || hb_ver > HB_VERSION) {
1452 LOG(0, 0, 0, "Received a heartbeat version that I don't support (%d)!\n", hb_ver);
1453 return -1; // Ignore it??
1454 }
1455
1456 // Ok. It's a heartbeat packet from a cluster master!
1457 if (s < sizeof(*h))
1458 goto shortpacket;
1459
1460 h = (heartt *) p;
1461 p += sizeof(*h);
1462 s -= sizeof(*h);
1463
1464 if (h->clusterid != config->bind_address)
1465 return -1; // It's not part of our cluster.
1466
1467 if (config->cluster_iam_master) { // Sanity...
1468 // Note that this MUST match the election process above!
1469
1470 LOG(0, 0, 0, "I just got a heartbeat from master %s, but _I_ am the master!\n", fmtaddr(addr, 0));
1471 if (!h->basetime) {
1472 LOG(0, 0, 0, "Heartbeat with zero basetime! Ignoring\n");
1473 return -1; // Skip it.
1474 }
1475
1476 if (hb_ver >= 4) {
1477 if (h->table_version > config->cluster_table_version) {
1478 LOG(0, 0, 0, "They've seen more state changes (%" PRIu64 " vs my %" PRIu64 ") so I'm gone!\n",
1479 h->table_version, config->cluster_table_version);
1480
1481 kill(0, SIGTERM);
1482 exit(1);
1483 }
1484 if (h->table_version < config->cluster_table_version)
1485 return -1;
1486 }
1487
1488 if (basetime > h->basetime) {
1489 LOG(0, 0, 0, "They're an older master than me so I'm gone!\n");
1490 kill(0, SIGTERM);
1491 exit(1);
1492 }
1493
1494 if (basetime < h->basetime)
1495 return -1;
1496
1497 if (my_address < addr) { // Tie breaker.
1498 LOG(0, 0, 0, "They're a higher IP address than me, so I'm gone!\n");
1499 kill(0, SIGTERM);
1500 exit(1);
1501 }
1502
1503 //
1504 // Send it a unicast heartbeat to see give it a chance to die.
1505 // NOTE: It's actually safe to do seq-number - 1 without checking
1506 // for wrap around.
1507 //
1508 cluster_catchup_slave(config->cluster_seq_number - 1, addr);
1509
1510 return -1; // Skip it.
1511 }
1512
1513 //
1514 // Try and guard against a stray master appearing.
1515 //
1516 // Ignore heartbeats received from another master before the
1517 // timeout (less a smidgen) for the old master has elapsed.
1518 //
1519 // Note that after a clean failover, the cluster_master_address
1520 // is cleared, so this doesn't run.
1521 //
1522 if (config->cluster_master_address && addr != config->cluster_master_address) {
1523 LOG(0, 0, 0, "Ignoring stray heartbeat from %s, current master %s has not yet timed out (last heartbeat %.1f seconds ago).\n",
1524 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1),
1525 0.1 * (TIME - config->cluster_last_hb));
1526 return -1; // ignore
1527 }
1528
1529 if (config->cluster_seq_number == -1) // Don't have one. Just align to the master...
1530 config->cluster_seq_number = h->seq;
1531
1532 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1533 config->cluster_last_hb_ver = hb_ver; // remember what cluster version the master is using
1534
1535 if (config->cluster_seq_number != h->seq) { // Out of sequence heartbeat!
1536 static int lastseen_seq = 0;
1537 static time_t lastseen_time = 0;
1538
1539 // limit to once per second for a particular seq#
1540 int ask = (config->cluster_seq_number != lastseen_seq || time_now != lastseen_time);
1541
1542 LOG(1, 0, 0, "HB: Got seq# %d but was expecting %d. %s.\n",
1543 h->seq, config->cluster_seq_number,
1544 ask ? "Asking for resend" : "Ignoring");
1545
1546 if (ask)
1547 {
1548 lastseen_seq = config->cluster_seq_number;
1549 lastseen_time = time_now;
1550 peer_send_message(addr, C_LASTSEEN, config->cluster_seq_number, NULL, 0);
1551 }
1552
1553 config->cluster_last_hb = TIME; // Reset to ensure that we don't become master!!
1554
1555 // Just drop the packet. The master will resend it as part of the catchup.
1556
1557 return 0;
1558 }
1559 // Save the packet in our buffer.
1560 // This is needed in case we become the master.
1561 config->cluster_seq_number = (h->seq+1)%HB_MAX_SEQ;
1562 i = h->seq % HB_HISTORY_SIZE;
1563 past_hearts[i].seq = h->seq;
1564 past_hearts[i].size = size;
1565 memcpy(&past_hearts[i].data, data, size); // Save it.
1566
1567
1568 // Check that we don't have too many undefined sessions, and
1569 // that the free session pointer is correct.
1570 cluster_check_sessions(h->highsession, h->freesession, h->highbundle, h->hightunnel);
1571
1572 if (h->interval != config->cluster_hb_interval)
1573 {
1574 LOG(2, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
1575 h->interval, config->cluster_hb_interval);
1576
1577 config->cluster_hb_interval = h->interval;
1578 }
1579
1580 if (h->timeout != config->cluster_hb_timeout)
1581 {
1582 LOG(2, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
1583 h->timeout, config->cluster_hb_timeout);
1584
1585 config->cluster_hb_timeout = h->timeout;
1586 }
1587
1588 // Ok. process the packet...
1589 while ( s > 0) {
1590
1591 type = *((uint32_t *) p);
1592 p += sizeof(uint32_t);
1593 s -= sizeof(uint32_t);
1594
1595 more = *((uint32_t *) p);
1596 p += sizeof(uint32_t);
1597 s -= sizeof(uint32_t);
1598
1599 switch (type) {
1600 case C_CSESSION: { // Compressed session structure.
1601 uint8_t c[ sizeof(sessiont) + 2];
1602 int size;
1603 uint8_t *orig_p = p;
1604
1605 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c) );
1606 s -= (p - orig_p);
1607
1608 // session struct changed with v5
1609 if (hb_ver < 5)
1610 {
1611 if (size != sizeof(struct oldsession)) {
1612 LOG(0, 0, 0, "DANGER: Received a v%d CSESSION that didn't decompress correctly!\n", hb_ver);
1613 // Now what? Should exit! No-longer up to date!
1614 break;
1615 }
1616 cluster_recv_session(more, convert_session((struct oldsession *) c));
1617 break;
1618 }
1619
1620 if (size != sizeof(sessiont) ) { // Ouch! Very very bad!
1621 LOG(0, 0, 0, "DANGER: Received a CSESSION that didn't decompress correctly!\n");
1622 // Now what? Should exit! No-longer up to date!
1623 break;
1624 }
1625
1626 cluster_recv_session(more, c);
1627 break;
1628 }
1629 case C_SESSION:
1630 if (hb_ver < 5)
1631 {
1632 if (s < sizeof(struct oldsession))
1633 goto shortpacket;
1634
1635 cluster_recv_session(more, convert_session((struct oldsession *) p));
1636
1637 p += sizeof(struct oldsession);
1638 s -= sizeof(struct oldsession);
1639 break;
1640 }
1641
1642 if ( s < sizeof(session[more]))
1643 goto shortpacket;
1644
1645 cluster_recv_session(more, p);
1646
1647 p += sizeof(session[more]);
1648 s -= sizeof(session[more]);
1649 break;
1650
1651 case C_CTUNNEL: { // Compressed tunnel structure.
1652 uint8_t c[ sizeof(tunnelt) + 2];
1653 int size;
1654 uint8_t *orig_p = p;
1655
1656 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1657 s -= (p - orig_p);
1658
1659 if (size != sizeof(tunnelt) ) { // Ouch! Very very bad!
1660 LOG(0, 0, 0, "DANGER: Received a CTUNNEL that didn't decompress correctly!\n");
1661 // Now what? Should exit! No-longer up to date!
1662 break;
1663 }
1664
1665 cluster_recv_tunnel(more, c);
1666 break;
1667
1668 }
1669 case C_TUNNEL:
1670 if ( s < sizeof(tunnel[more]))
1671 goto shortpacket;
1672
1673 cluster_recv_tunnel(more, p);
1674
1675 p += sizeof(tunnel[more]);
1676 s -= sizeof(tunnel[more]);
1677 break;
1678
1679 case C_CBUNDLE: { // Compressed bundle structure.
1680 uint8_t c[ sizeof(bundlet) + 2];
1681 int size;
1682 uint8_t *orig_p = p;
1683
1684 size = rle_decompress((uint8_t **) &p, s, c, sizeof(c));
1685 s -= (p - orig_p);
1686
1687 if (size != sizeof(bundlet) ) { // Ouch! Very very bad!
1688 LOG(0, 0, 0, "DANGER: Received a CBUNDLE that didn't decompress correctly!\n");
1689 // Now what? Should exit! No-longer up to date!
1690 break;
1691 }
1692
1693 cluster_recv_bundle(more, c);
1694 break;
1695
1696 }
1697 case C_BUNDLE:
1698 if ( s < sizeof(bundle[more]))
1699 goto shortpacket;
1700
1701 cluster_recv_bundle(more, p);
1702
1703 p += sizeof(bundle[more]);
1704 s -= sizeof(bundle[more]);
1705 break;
1706 default:
1707 LOG(0, 0, 0, "DANGER: I received a heartbeat element where I didn't understand the type! (%d)\n", type);
1708 return -1; // can't process any more of the packet!!
1709 }
1710 }
1711
1712 if (config->cluster_master_address != addr)
1713 {
1714 LOG(0, 0, 0, "My master just changed from %s to %s!\n",
1715 fmtaddr(config->cluster_master_address, 0), fmtaddr(addr, 1));
1716
1717 config->cluster_master_address = addr;
1718 }
1719
1720 config->cluster_last_hb = TIME; // Successfully received a heartbeat!
1721 config->cluster_table_version = h->table_version;
1722 return 0;
1723
1724 shortpacket:
1725 LOG(0, 0, 0, "I got an incomplete heartbeat packet! This means I'm probably out of sync!!\n");
1726 return -1;
1727 }
1728
1729 //
1730 // We got a packet on the cluster port!
1731 // Handle pings, lastseens, and heartbeats!
1732 //
1733 int processcluster(uint8_t *data, int size, in_addr_t addr)
1734 {
1735 int type, more;
1736 uint8_t *p = data;
1737 int s = size;
1738
1739 if (addr == my_address)
1740 return -1; // Ignore it. Something looped back the multicast!
1741
1742 LOG(5, 0, 0, "Process cluster: %d bytes from %s\n", size, fmtaddr(addr, 0));
1743
1744 if (s <= 0) // Any data there??
1745 return -1;
1746
1747 if (s < 8)
1748 goto shortpacket;
1749
1750 type = *((uint32_t *) p);
1751 p += sizeof(uint32_t);
1752 s -= sizeof(uint32_t);
1753
1754 more = *((uint32_t *) p);
1755 p += sizeof(uint32_t);
1756 s -= sizeof(uint32_t);
1757
1758 switch (type)
1759 {
1760 case C_PING: // Update the peers table.
1761 return cluster_add_peer(addr, more, (pingt *) p, s);
1762
1763 case C_MASTER: // Our master is wrong
1764 return cluster_set_master(addr, more);
1765
1766 case C_LASTSEEN: // Catch up a slave (slave missed a packet).
1767 return cluster_catchup_slave(more, addr);
1768
1769 case C_FORWARD: // Forwarded control packet. pass off to processudp.
1770 case C_FORWARD_DAE: // Forwarded DAE packet. pass off to processdae.
1771 if (!config->cluster_iam_master)
1772 {
1773 LOG(0, 0, 0, "I'm not the master, but I got a C_FORWARD%s from %s?\n",
1774 type == C_FORWARD_DAE ? "_DAE" : "", fmtaddr(addr, 0));
1775
1776 return -1;
1777 }
1778 else
1779 {
1780 struct sockaddr_in a;
1781 a.sin_addr.s_addr = more;
1782
1783 a.sin_port = *(int *) p;
1784 s -= sizeof(int);
1785 p += sizeof(int);
1786
1787 LOG(4, 0, 0, "Got a forwarded %spacket... (%s:%d)\n",
1788 type == C_FORWARD_DAE ? "DAE " : "", fmtaddr(more, 0), a.sin_port);
1789
1790 STAT(recv_forward);
1791 if (type == C_FORWARD_DAE)
1792 {
1793 struct in_addr local;
1794 local.s_addr = config->bind_address ? config->bind_address : my_address;
1795 processdae(p, s, &a, sizeof(a), &local);
1796 }
1797 else
1798 processudp(p, s, &a);
1799
1800 return 0;
1801 }
1802
1803 case C_THROTTLE: { // Receive a forwarded packet from a slave.
1804 if (!config->cluster_iam_master) {
1805 LOG(0, 0, 0, "I'm not the master, but I got a C_THROTTLE from %s?\n", fmtaddr(addr, 0));
1806 return -1;
1807 }
1808
1809 tbf_queue_packet(more, p, s); // The TBF id tells wether it goes in or out.
1810 return 0;
1811 }
1812 case C_GARDEN:
1813 // Receive a walled garden packet from a slave.
1814 if (!config->cluster_iam_master) {
1815 LOG(0, 0, 0, "I'm not the master, but I got a C_GARDEN from %s?\n", fmtaddr(addr, 0));
1816 return -1;
1817 }
1818
1819 tun_write(p, s);
1820 return 0;
1821
1822 case C_BYTES:
1823 if (!config->cluster_iam_master) {
1824 LOG(0, 0, 0, "I'm not the master, but I got a C_BYTES from %s?\n", fmtaddr(addr, 0));
1825 return -1;
1826 }
1827
1828 return cluster_handle_bytes(p, s);
1829
1830 case C_KILL: // The master asked us to die!? (usually because we're too out of date).
1831 if (config->cluster_iam_master) {
1832 LOG(0, 0, 0, "_I_ am master, but I received a C_KILL from %s! (Seq# %d)\n", fmtaddr(addr, 0), more);
1833 return -1;
1834 }
1835 if (more != config->cluster_seq_number) {
1836 LOG(0, 0, 0, "The master asked us to die but the seq number didn't match!?\n");
1837 return -1;
1838 }
1839
1840 if (addr != config->cluster_master_address) {
1841 LOG(0, 0, 0, "Received a C_KILL from %s which doesn't match config->cluster_master_address (%s)\n",
1842 fmtaddr(addr, 0), fmtaddr(config->cluster_master_address, 1));
1843 // We can only warn about it. The master might really have switched!
1844 }
1845
1846 LOG(0, 0, 0, "Received a valid C_KILL: I'm going to die now.\n");
1847 kill(0, SIGTERM);
1848 exit(0); // Lets be paranoid;
1849 return -1; // Just signalling the compiler.
1850
1851 case C_HEARTBEAT:
1852 LOG(4, 0, 0, "Got a heartbeat from %s\n", fmtaddr(addr, 0));
1853 return cluster_process_heartbeat(data, size, more, p, addr);
1854
1855 default:
1856 LOG(0, 0, 0, "Strange type packet received on cluster socket (%d)\n", type);
1857 return -1;
1858 }
1859 return 0;
1860
1861 shortpacket:
1862 LOG(0, 0, 0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
1863 return -1;
1864 }
1865
1866 //====================================================================================================
1867
1868 int cmd_show_cluster(struct cli_def *cli, char *command, char **argv, int argc)
1869 {
1870 int i;
1871
1872 if (CLI_HELP_REQUESTED)
1873 return CLI_HELP_NO_ARGS;
1874
1875 cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
1876 cli_print(cli, "My address : %s", fmtaddr(my_address, 0));
1877 cli_print(cli, "VIP address : %s", fmtaddr(config->bind_address, 0));
1878 cli_print(cli, "Multicast address: %s", fmtaddr(config->cluster_address, 0));
1879 cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
1880
1881 if (!config->cluster_iam_master) {
1882 cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
1883 config->cluster_master_address
1884 ? fmtaddr(config->cluster_master_address, 0)
1885 : "Not defined",
1886 0.1 * (TIME - config->cluster_last_hb));
1887 cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
1888 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1889 cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
1890 cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
1891 cli_print(cli, "%d bundles undefined of %d", config->cluster_undefined_bundles, config->cluster_highest_bundleid);
1892 cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);
1893 } else {
1894 cli_print(cli, "Table version # : %" PRIu64, config->cluster_table_version);
1895 cli_print(cli, "Next heartbeat # : %d", config->cluster_seq_number);
1896 cli_print(cli, "Highest session : %d", config->cluster_highest_sessionid);
1897 cli_print(cli, "Highest bundle : %d", config->cluster_highest_bundleid);
1898 cli_print(cli, "Highest tunnel : %d", config->cluster_highest_tunnelid);
1899 cli_print(cli, "%d changes queued for sending", config->cluster_num_changes);
1900 }
1901 cli_print(cli, "%d peers.", num_peers);
1902
1903 if (num_peers)
1904 cli_print(cli, "%20s %10s %8s", "Address", "Basetime", "Age");
1905 for (i = 0; i < num_peers; ++i) {
1906 cli_print(cli, "%20s %10u %8d", fmtaddr(peers[i].peer, 0),
1907 peers[i].basetime, TIME - peers[i].timestamp);
1908 }
1909 return CLI_OK;
1910 }
1911
1912 //
1913 // Simple run-length-encoding compression.
1914 // Format is
1915 // 1 byte < 128 = count of non-zero bytes following. // Not legal to be zero.
1916 // n non-zero bytes;
1917 // or
1918 // 1 byte > 128 = (count - 128) run of zero bytes. //
1919 // repeat.
1920 // count == 0 indicates end of compressed stream.
1921 //
1922 // Compress from 'src' into 'dst'. return number of bytes
1923 // used from 'dst'.
1924 // Updates *src_p to indicate 1 past last bytes used.
1925 //
1926 // We could get an extra byte in the zero runs by storing (count-1)
1927 // but I'm playing it safe.
1928 //
1929 // Worst case is a 50% expansion in space required (trying to
1930 // compress { 0x00, 0x01 } * N )
1931 static int rle_compress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
1932 {
1933 int count;
1934 int orig_dsize = dsize;
1935 uint8_t *x, *src;
1936 src = *src_p;
1937
1938 while (ssize > 0 && dsize > 2) {
1939 count = 0;
1940 x = dst++; --dsize; // Reserve space for count byte..
1941
1942 if (*src) { // Copy a run of non-zero bytes.
1943 while (*src && count < 127 && ssize > 0 && dsize > 1) { // Count number of non-zero bytes.
1944 *dst++ = *src++;
1945 --dsize; --ssize;
1946 ++count;
1947 }
1948 *x = count; // Store number of non-zero bytes. Guarenteed to be non-zero!
1949
1950 } else { // Compress a run of zero bytes.
1951 while (*src == 0 && count < 127 && ssize > 0) {
1952 ++src;
1953 --ssize;
1954 ++count;
1955 }
1956 *x = count | 0x80 ;
1957 }
1958 }
1959
1960 *dst++ = 0x0; // Add Stop byte.
1961 --dsize;
1962
1963 *src_p = src;
1964 return (orig_dsize - dsize);
1965 }
1966
1967 //
1968 // Decompress the buffer into **p.
1969 // 'psize' is the size of the decompression buffer available.
1970 //
1971 // Returns the number of bytes decompressed.
1972 //
1973 // Decompresses from '*src_p' into 'dst'.
1974 // Return the number of dst bytes used.
1975 // Updates the 'src_p' pointer to point to the
1976 // first un-used byte.
1977 static int rle_decompress(uint8_t **src_p, int ssize, uint8_t *dst, int dsize)
1978 {
1979 int count;
1980 int orig_dsize = dsize;
1981 uint8_t *src = *src_p;
1982
1983 while (ssize >0 && dsize > 0) { // While there's more to decompress, and there's room in the decompress buffer...
1984 count = *src++; --ssize; // get the count byte from the source.
1985 if (count == 0x0) // End marker reached? If so, finish.
1986 break;
1987
1988 if (count & 0x80) { // Decompress a run of zeros
1989 for (count &= 0x7f ; count > 0 && dsize > 0; --count) {
1990 *dst++ = 0x0;
1991 --dsize;
1992 }
1993 } else { // Copy run of non-zero bytes.
1994 for ( ; count > 0 && ssize && dsize; --count) { // Copy non-zero bytes across.
1995 *dst++ = *src++;
1996 --ssize; --dsize;
1997 }
1998 }
1999 }
2000 *src_p = src;
2001 return (orig_dsize - dsize);
2002 }