// L2TPNS Clustering Stuff
-char const *cvs_id_cluster = "$Id: cluster.c,v 1.7 2004-07-07 09:09:53 bodea Exp $";
+char const *cvs_id_cluster = "$Id: cluster.c,v 1.14 2004-10-30 07:35:00 bodea Exp $";
#include <stdio.h>
#include <sys/file.h>
int uptodate;
} peers[CLUSTER_MAX_SIZE]; // List of all the peers we've heard from.
static int num_peers; // Number of peers in list.
-static int have_peers; // At least one up to date peer
int rle_decompress(u8 ** src_p, int ssize, u8 *dst, int dsize);
int rle_compress(u8 ** src_p, int ssize, u8 *dst, int dsize);
//
int cluster_init()
{
- struct sockaddr_in addr;
- struct sockaddr_in interface_addr;
- struct ip_mreq mreq;
- struct ifreq ifr;
- int opt = 0;
+ struct sockaddr_in addr;
+ struct sockaddr_in interface_addr;
+ struct ip_mreq mreq;
+ struct ifreq ifr;
+ int opt = 0;
- config->cluster_undefined_sessions = MAXSESSION-1;
- config->cluster_undefined_tunnels = MAXTUNNEL-1;
+ config->cluster_undefined_sessions = MAXSESSION-1;
+ config->cluster_undefined_tunnels = MAXTUNNEL-1;
- if (!config->cluster_address)
- return 0;
- if (!*config->cluster_interface)
- return 0;
+ if (!config->cluster_address)
+ return 0;
+ if (!*config->cluster_interface)
+ return 0;
- cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ cluster_sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_port = htons(CLUSTERPORT);
- addr.sin_addr.s_addr = INADDR_ANY;
- setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons(CLUSTERPORT);
+ addr.sin_addr.s_addr = INADDR_ANY;
+ setsockopt(cluster_sockfd, SOL_SOCKET, SO_REUSEADDR, &addr, sizeof(addr));
- if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
- {
- log(0, 0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
- return -1;
- }
+ if (bind(cluster_sockfd, (void *) &addr, sizeof(addr)) < 0)
+ {
+ log(0, 0, 0, 0, "Failed to bind cluster socket: %s\n", strerror(errno));
+ return -1;
+ }
- strcpy(ifr.ifr_name, config->cluster_interface);
- if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0) {
- log(0, 0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
- return -1;
- }
+ strcpy(ifr.ifr_name, config->cluster_interface);
+ if (ioctl(cluster_sockfd, SIOCGIFADDR, &ifr) < 0)
+ {
+ log(0, 0, 0, 0, "Failed to get interface address for (%s): %s\n", config->cluster_interface, strerror(errno));
+ return -1;
+ }
- memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr) );
- my_address = interface_addr.sin_addr.s_addr;
+ memcpy(&interface_addr, &ifr.ifr_addr, sizeof(interface_addr));
+ my_address = interface_addr.sin_addr.s_addr;
- // Join multicast group.
- mreq.imr_multiaddr.s_addr = config->cluster_address;
- mreq.imr_interface = interface_addr.sin_addr;
+ // Join multicast group.
+ mreq.imr_multiaddr.s_addr = config->cluster_address;
+ mreq.imr_interface = interface_addr.sin_addr;
- opt = 0; // Turn off multicast loopback.
- setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
+ opt = 0; // Turn off multicast loopback.
+ setsockopt(cluster_sockfd, IPPROTO_IP, IP_MULTICAST_LOOP, &opt, sizeof(opt));
- if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0) {
- log(0, 0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
- return -1;
- }
+ if (setsockopt(cluster_sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq)) < 0)
+ {
+ log(0, 0, 0, 0, "Failed to setsockopt (join mcast group): %s\n", strerror(errno));
+ return -1;
+ }
- if (setsockopt (cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0) {
- log(0, 0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
- return -1;
- }
+ if (setsockopt (cluster_sockfd, IPPROTO_IP, IP_MULTICAST_IF, &interface_addr, sizeof(interface_addr)) < 0)
+ {
+ log(0, 0, 0, 0, "Failed to setsockopt (set mcast interface): %s\n", strerror(errno));
+ return -1;
+ }
- config->cluster_last_hb = TIME;
- config->cluster_seq_number = -1;
+ config->cluster_last_hb = TIME;
+ config->cluster_seq_number = -1;
- return cluster_sockfd;
+ return cluster_sockfd;
}
int cluster_send_data(void *data, int datalen)
{
- struct sockaddr_in addr = {0};
+ struct sockaddr_in addr = {0};
- if (!cluster_sockfd) return -1;
- if (!config->cluster_address) return 0;
+ if (!cluster_sockfd) return -1;
+ if (!config->cluster_address) return 0;
- addr.sin_addr.s_addr = config->cluster_address;
- addr.sin_port = htons(CLUSTERPORT);
- addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = config->cluster_address;
+ addr.sin_port = htons(CLUSTERPORT);
+ addr.sin_family = AF_INET;
- log(5,0,0,0, "Cluster send data: %d bytes\n", datalen);
+ log(5,0,0,0, "Cluster send data: %d bytes\n", datalen);
- if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
- {
- log(0, 0, 0, 0, "sendto: %s\n", strerror(errno));
- return -1;
- }
+ if (sendto(cluster_sockfd, data, datalen, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
+ {
+ log(0, 0, 0, 0, "sendto: %s\n", strerror(errno));
+ return -1;
+ }
- return 0;
+ return 0;
}
//
//
int peer_send_data(u32 peer, char * data, int size)
{
- struct sockaddr_in addr = {0};
+ struct sockaddr_in addr = {0};
- if (!cluster_sockfd) return -1;
- if (!config->cluster_address) return 0;
+ if (!cluster_sockfd) return -1;
+ if (!config->cluster_address) return 0;
- if (!peer) // Odd??
- return -1;
+ if (!peer) // Odd??
+ return -1;
- addr.sin_addr.s_addr = peer;
- addr.sin_port = htons(CLUSTERPORT);
- addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = peer;
+ addr.sin_port = htons(CLUSTERPORT);
+ addr.sin_family = AF_INET;
- log_hex(5, "Peer send", data, size);
+ log_hex(5, "Peer send", data, size);
- if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
- {
- log(0, 0, 0, 0, "sendto: %s\n", strerror(errno));
- return -1;
- }
+ if (sendto(cluster_sockfd, data, size, MSG_NOSIGNAL, (void *) &addr, sizeof(addr)) < 0)
+ {
+ log(0, 0, 0, 0, "sendto: %s\n", strerror(errno));
+ return -1;
+ }
- return 0;
+ return 0;
}
//
// Forward a state changing packet to the master.
//
// The master just processes the payload as if it had
-// received it off the tap device.
+// received it off the tun device.
//
-int master_forward_packet(char * data, int size, u32 addr, int port)
+int master_forward_packet(char *data, int size, u32 addr, int port)
{
char buf[65536]; // Vast overkill.
- char * p = buf;
+ char *p = buf;
if (!config->cluster_master_address) // No election has been held yet. Just skip it.
return -1;
// token bucket queue, and lets normal processing take care
// of it.
//
-int master_throttle_packet(int tbfid, char * data, int size)
+int master_throttle_packet(int tbfid, char *data, int size)
{
char buf[65536]; // Vast overkill.
- char * p = buf;
+ char *p = buf;
if (!config->cluster_master_address) // No election has been held yet. Just skip it.
return -1;
//
static void send_heartbeat(int seq, char * data, int size)
{
- int i;
- static int last_seq = -1;
-
- if (last_seq != -1 && (seq != (last_seq+1)%HB_MAX_SEQ) ) {
- log(0,0,0,0, "FATAL: Sequence number skipped! (%d != %d)\n",
- seq, last_seq);
- }
- last_seq = seq;
-
- if (size > sizeof(past_hearts[0].data)) {
- log(0,0,0,0, "Tried to heartbeat something larger than the maximum packet!\n");
- kill(0, SIGTERM);
- exit(1);
- }
- i = seq % HB_HISTORY_SIZE;
- past_hearts[i].seq = seq;
- past_hearts[i].size = size;
- memcpy(&past_hearts[i].data, data, size); // Save it.
- cluster_send_data(data, size);
+ int i;
+
+ if (size > sizeof(past_hearts[0].data))
+ {
+ log(0,0,0,0, "Tried to heartbeat something larger than the maximum packet!\n");
+ kill(0, SIGTERM);
+ exit(1);
+ }
+ i = seq % HB_HISTORY_SIZE;
+ past_hearts[i].seq = seq;
+ past_hearts[i].size = size;
+ memcpy(&past_hearts[i].data, data, size); // Save it.
+ cluster_send_data(data, size);
}
//
//
// Walk the session counters looking for non-zero ones to send
-// to the master. We send up to 100 of them at one time.
-// We examine a maximum of 2000 sessions.
+// to the master. We send up to 600 of them at one time.
+// We examine a maximum of 3000 sessions.
// (50k max session should mean that we normally
// examine the entire session table every 25 seconds).
-#define MAX_B_RECS (400)
+#define MAX_B_RECS (600)
void master_update_counts(void)
{
int i, c;
return;
}
+//
+// On the master, check how our slaves are going. If
+// one of them's not up-to-date we'll heartbeat faster.
+// If we don't have any of them, then we need to turn
+// on our own packet handling!
+//
+void cluster_check_slaves(void)
+{
+ int i;
+ static int have_peers = 0;
+ int had_peers = have_peers;
+ clockt t = TIME;
+
+ if (!config->cluster_iam_master)
+ return; // Only runs on the master...
+
+ config->cluster_iam_uptodate = 1; // cleared in loop below
+
+ for (i = have_peers = 0; i < num_peers; i++)
+ {
+ if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
+ continue; // Stale peer! Skip them.
+
+ if (!peers[i].basetime)
+ continue; // Shutdown peer! Skip them.
+
+ if (peers[i].uptodate)
+ have_peers = 1;
+
+ if (!peers[i].uptodate)
+ config->cluster_iam_uptodate = 0; // Start fast heartbeats
+ }
+
+#ifdef BGP
+ // master lost all slaves, need to handle traffic ourself
+ if (bgp_configured && had_peers && !have_peers)
+ bgp_enable_routing(1);
+ else if (bgp_configured && !had_peers && have_peers)
+ bgp_enable_routing(0);
+#endif /* BGP */
+}
+
//
// Check that we have a master. If it's been too
// long since we heard from a master then hold an election.
//
void cluster_check_master(void)
{
- int i, count, tcount, high_sid = 0;
+ int i, count, tcount, high_unique_id = 0;
int last_free = 0;
- int had_peers = have_peers;
clockt t = TIME;
static int probed = 0;
- if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
+ if (config->cluster_iam_master)
+ return; // Only runs on the slaves...
+
+ // If the master is late (missed 2 hearbeats by a second and a
+ // hair) it may be that the switch has dropped us from the
+ // multicast group, try unicasting one probe to the master
+ // which will hopefully respond with a unicast heartbeat that
+ // will allow us to limp along until the querier next runs.
+ if (TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
{
- // If the master is late (missed 2 hearbeats by a second and a
- // hair) it may be that the switch has dropped us from the
- // multicast group, try unicasting one probe to the master
- // which will hopefully respond with a unicast heartbeat that
- // will allow us to limp along until the querier next runs.
- if (config->cluster_master_address
- && TIME > (config->cluster_last_hb + 2 * config->cluster_hb_interval + 11))
+ if (!probed && config->cluster_master_address)
{
- if (!probed)
- {
- probed = 1;
- log(1, 0, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
- TIME - (config->cluster_last_hb + config->cluster_hb_interval));
-
- peer_send_message(config->cluster_master_address,
- C_LASTSEEN, config->cluster_seq_number, NULL, 0);
- }
- } else { // We got a recent heartbeat; reset the probe flag.
- probed = 0;
- }
+ probed = 1;
+ log(1, 0, 0, 0, "Heartbeat from master %.1fs late, probing...\n",
+ 0.1 * (TIME - (config->cluster_last_hb + config->cluster_hb_interval)));
- if (!config->cluster_iam_master)
- return; // Everything's ok. return.
-
- // Master needs to check peer state
+ peer_send_message(config->cluster_master_address,
+ C_LASTSEEN, config->cluster_seq_number, NULL, 0);
+ }
+ } else { // We got a recent heartbeat; reset the probe flag.
+ probed = 0;
}
- config->cluster_last_hb = TIME + 1;
+ if (TIME < (config->cluster_last_hb + config->cluster_hb_timeout))
+ return; // Everything's ok!
- if (config->cluster_iam_master)
- config->cluster_iam_uptodate = 1; // cleared in loop below
- else
- log(0,0,0,0, "Master timed out! Holding election...\n");
+ config->cluster_last_hb = TIME + 1; // Just the one election thanks.
+ log(0,0,0,0, "Master timed out! Holding election...\n");
- for (i = have_peers = 0; i < num_peers; i++)
+ for (i = 0; i < num_peers; i++)
{
if ((peers[i].timestamp + config->cluster_hb_timeout) < t)
continue; // Stale peer! Skip them.
if (!peers[i].basetime)
continue; // Shutdown peer! Skip them.
- if (peers[i].uptodate)
- have_peers = 1;
-
- if (config->cluster_iam_master)
- {
- if (!peers[i].uptodate)
- config->cluster_iam_uptodate = 0; // Start fast heartbeats
-
- continue;
- }
-
if (peers[i].basetime < basetime) {
log(1,0,0,0, "Expecting %s to become master\n", inet_toa(peers[i].peer) );
return; // They'll win the election. Get out of here.
}
}
- if (config->cluster_iam_master) // If we're the master, we've already won
- {
-#ifdef BGP
- // master lost all slaves, need to handle traffic ourself
- if (bgp_configured && had_peers && !have_peers)
- bgp_enable_routing(1);
-#endif /* BGP */
- return;
- }
-
// Wow. it's been ages since I last heard a heartbeat
// and I'm better than an of my peers so it's time
// to become a master!!!
log(0,0,0,0, "I am declaring myself the master!\n");
-#ifdef BGP
- if (bgp_configured && have_peers)
- bgp_enable_routing(0); /* stop handling traffic */
-#endif /* BGP */
-
if (config->cluster_seq_number == -1)
config->cluster_seq_number = 0;
// Re-string the free list.
// Find the ID of the highest session.
last_free = 0;
- high_sid = 0;
+ high_unique_id = 0;
config->cluster_highest_sessionid = 0;
for (i = 0, count = 0; i < MAXSESSION; ++i) {
if (session[i].tunnel == T_UNDEF) {
session[i].radius = 0; // Reset authentication as the radius blocks aren't up to date.
- if (session[i].sid >= high_sid) // This is different to the index into the session table!!!
- high_sid = session[i].sid+1;
+ if (session[i].unique_id >= high_unique_id) // This is different to the index into the session table!!!
+ high_unique_id = session[i].unique_id+1;
session[i].tbf_in = session[i].tbf_out = 0; // Remove stale pointers from old master.
throttle_session(i, session[i].throttle);
-// I'm unsure about this. --mo
-// It's potentially a good thing, but it could send a
-// LOT of packets.
-// if (session[i].throttle)
-// cluster_send_session(s); // Tell the slaves about the new tbf indexes.
-
if (session[i].tunnel != T_FREE && i > config->cluster_highest_sessionid)
config->cluster_highest_sessionid = i;
-
}
session[last_free].next = 0; // End of chain.
- last_sid = high_sid; // Keep track of the highest used session ID.
+ last_id = high_unique_id; // Keep track of the highest used session ID.
become_master();
config->cluster_undefined_sessions = 0;
config->cluster_undefined_tunnels = 0;
- config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
+ config->cluster_iam_uptodate = 1; // assume all peers are up-to-date
// FIXME. We need to fix up the tunnel control message
// queue here! There's a number of other variables we
exit(1);
}
- log(3,0,0,0, "Sending heartbeat #%d with %d changes (%d x-sess, %d x-tunnels, %d highsess, %d hightun size %d)\n",
+ log(3,0,0,0, "Sending heartbeat #%d with %d changes (%d x-sess, %d x-tunnels, %d highsess, %d hightun, size %d)\n",
h.seq, config->cluster_num_changes, count, tcount, config->cluster_highest_sessionid,
config->cluster_highest_tunnelid, (p-buff));
while (seq != config->cluster_seq_number) {
s = seq%HB_HISTORY_SIZE;
if (seq != past_hearts[s].seq) {
- int i;
log(0,0,0,0, "Tried to re-send heartbeat for %s but %d doesn't match %d! (%d,%d)\n",
inet_toa(slave), seq, past_hearts[s].seq, s, config->cluster_seq_number);
-
- for (i = 0; i < HB_HISTORY_SIZE; ++i) {
- log(0,0,0,0, "\tentry %3d: seq %d (size %d)\n", i, past_hearts[s].seq, past_hearts[s].size);
- }
return -1; // What to do here!?
}
peer_send_data(slave, past_hearts[s].data, past_hearts[s].size);
// We've heard from another peer! Add it to the list
// that we select from at election time.
//
-int cluster_add_peer(u32 peer, time_t basetime, pingt *p)
+int cluster_add_peer(u32 peer, time_t basetime, pingt *pp, int size)
{
int i;
u32 clusterid;
+ pingt p;
+
+ // Allow for backward compatability.
+ // Just the ping packet into a new structure to allow
+ // for the possibility that we might have received
+ // more or fewer elements than we were expecting.
+ if (size > sizeof(p))
+ size = sizeof(p);
- clusterid = p->addr;
+ memset( (void*) &p, 0, sizeof(p) );
+ memcpy( (void*) &p, (void*) pp, size);
+
+ clusterid = p.addr;
if (clusterid != config->bind_address)
{
// Is this for us?
// This peer already exists. Just update the timestamp.
peers[i].basetime = basetime;
peers[i].timestamp = TIME;
- peers[i].uptodate = !p->undef;
+ peers[i].uptodate = !p.undef;
break;
}
- // Is this the master shutting down??
+ // Is this the master shutting down??
if (peer == config->cluster_master_address && !basetime) {
+ log(3,0,0,0, "Master %s shutting down...\n", inet_toa(config->cluster_master_address));
config->cluster_master_address = 0;
config->cluster_last_hb = 0; // Force an election.
cluster_check_master();
peers[i].peer = peer;
peers[i].basetime = basetime;
peers[i].timestamp = TIME;
- peers[i].uptodate = !p->undef;
+ peers[i].uptodate = !p.undef;
if (i == num_peers)
++num_peers;
log(1,0,0,0, "Added %s as a new peer. Now %d peers\n", inet_toa(peer), num_peers);
}
- if (peers[i].uptodate)
- {
-#ifdef BGP
- /* drop routes if we've now got a peer */
- if (config->cluster_iam_master && bgp_configured && !have_peers)
- bgp_enable_routing(0);
-#endif /* BGP */
- have_peers = 1;
- }
- else if (config->cluster_iam_master)
- {
- config->cluster_iam_uptodate = 0; // increase heart-rate...
- }
-
-
return 1;
}
if (h->interval != config->cluster_hb_interval)
{
log(2, 0, 0, 0, "Master set ping/heartbeat interval to %u (was %u)\n",
- h->interval, config->cluster_hb_interval);
+ h->interval, config->cluster_hb_interval);
- config->cluster_hb_interval = h->interval;
+ config->cluster_hb_interval = h->interval;
}
if (h->timeout != config->cluster_hb_timeout)
{
log(2, 0, 0, 0, "Master set heartbeat timeout to %u (was %u)\n",
- h->timeout, config->cluster_hb_timeout);
+ h->timeout, config->cluster_hb_timeout);
- config->cluster_hb_timeout = h->timeout;
+ config->cluster_hb_timeout = h->timeout;
}
}
switch (type) {
case C_PING: // Update the peers table.
- return cluster_add_peer(addr, more, (pingt*)p);
+ return cluster_add_peer(addr, more, (pingt*)p, s);
case C_LASTSEEN: // Catch up a slave (slave missed a packet).
return cluster_catchup_slave(more, addr);
return 0;
shortpacket:
- log(0,0,0,0, "I got an cluster heartbeat packet! This means I'm probably out of sync!!\n");
+ log(0,0,0,0, "I got a _short_ cluster heartbeat packet! This means I'm probably out of sync!!\n");
return -1;
}
if (CLI_HELP_REQUESTED)
return CLI_HELP_NO_ARGS;
- cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
+ cli_print(cli, "Cluster status : %s", config->cluster_iam_master ? "Master" : "Slave" );
cli_print(cli, "My address : %s", inet_toa(my_address));
cli_print(cli, "VIP address : %s", inet_toa(config->bind_address));
cli_print(cli, "Multicast address: %s", inet_toa(config->cluster_address));
cli_print(cli, "Multicast i'face : %s", config->cluster_interface);
- if (!config->cluster_iam_master) {
+ if (!config->cluster_iam_master) {
cli_print(cli, "My master : %s (last heartbeat %.1f seconds old)",
config->cluster_master_address ? inet_toa(config->cluster_master_address) : "Not defined",
0.1 * (TIME - config->cluster_last_hb));
- cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
+ cli_print(cli, "Uptodate : %s", config->cluster_iam_uptodate ? "Yes" : "No");
cli_print(cli, "Next sequence number expected: %d", config->cluster_seq_number);
cli_print(cli, "%d sessions undefined of %d", config->cluster_undefined_sessions, config->cluster_highest_sessionid);
cli_print(cli, "%d tunnels undefined of %d", config->cluster_undefined_tunnels, config->cluster_highest_tunnelid);