+ default:
+ {
+ static char e[] = "unknown AVP 0xXXXX";
+ LOG(2, s, t, " Unknown AVP type %u\n", mtype);
+ fatal = flags;
+ result = 2; // general error
+ error = 8; // unknown mandatory AVP
+ sprintf((msg = e) + 14, "%04x", mtype);
+ continue; // next
+ }
+ }
+ }
+ // process message
+ if (fatal & 0x80)
+ tunnelshutdown(t, "Invalid mandatory AVP", result, error, msg);
+ else
+ switch (message)
+ {
+ case 1: // SCCRQ - Start Control Connection Request
+ tunnel[t].state = TUNNELOPENING;
+ LOG(3, s, t, "Received SCCRQ\n");
+ if (main_quit != QUIT_SHUTDOWN)
+ {
+ LOG(3, s, t, "sending SCCRP\n");
+ controlt *c = controlnew(2); // sending SCCRP
+ control16(c, 2, version, 1); // protocol version
+ control32(c, 3, 3, 1); // framing
+ controls(c, 7, config->multi_n_hostname[tunnel[t].indexudp][0]?config->multi_n_hostname[tunnel[t].indexudp]:hostname, 1); // host name
+ if (sendchalresponse) controlb(c, 13, sendchalresponse, 16, 1); // Send Challenge response
+ control16(c, 9, t, 1); // assigned tunnel
+ controladd(c, 0, t); // send the resply
+ }
+ else
+ {
+ tunnelshutdown(t, "Shutting down", 6, 0, 0);
+ }
+ break;
+ case 2: // SCCRP
+ tunnel[t].state = TUNNELOPEN;
+ tunnel[t].lastrec = time_now;
+ LOG(3, s, t, "Received SCCRP\n");
+ if (main_quit != QUIT_SHUTDOWN)
+ {
+ if (tunnel[t].isremotelns && recvchalresponse)
+ {
+ hasht hash;
+
+ lac_calc_rlns_auth(t, 2, hash); // id = 2 (SCCRP)
+ // check authenticator
+ if (memcmp(hash, recvchalresponse, 16) == 0)
+ {
+ LOG(3, s, t, "sending SCCCN to REMOTE LNS\n");
+ controlt *c = controlnew(3); // sending SCCCN
+ controls(c, 7, config->multi_n_hostname[tunnel[t].indexudp][0]?config->multi_n_hostname[tunnel[t].indexudp]:hostname, 1); // host name
+ controls(c, 8, Vendor_name, 1); // Vendor name
+ control16(c, 2, version, 1); // protocol version
+ control32(c, 3, 3, 1); // framing Capabilities
+ if (sendchalresponse) controlb(c, 13, sendchalresponse, 16, 1); // Challenge response
+ control16(c, 9, t, 1); // assigned tunnel
+ controladd(c, 0, t); // send
+ }
+ else
+ {
+ tunnelshutdown(t, "Bad chap response from REMOTE LNS", 4, 0, 0);
+ }
+ }
+ }
+ else
+ {
+ tunnelshutdown(t, "Shutting down", 6, 0, 0);
+ }
+ break;
+ case 3: // SCCN
+ LOG(3, s, t, "Received SCCN\n");
+ tunnel[t].state = TUNNELOPEN;
+ tunnel[t].lastrec = time_now;
+ controlnull(t); // ack
+ break;
+ case 4: // StopCCN
+ LOG(3, s, t, "Received StopCCN\n");
+ controlnull(t); // ack
+ tunnelshutdown(t, "Stopped", 0, 0, 0); // Shut down cleanly
+ break;
+ case 6: // HELLO
+ LOG(3, s, t, "Received HELLO\n");
+ controlnull(t); // simply ACK
+ break;
+ case 7: // OCRQ
+ // TBA
+ LOG(3, s, t, "Received OCRQ\n");
+ break;
+ case 8: // OCRO
+ // TBA
+ LOG(3, s, t, "Received OCRO\n");
+ break;
+ case 9: // OCCN
+ // TBA
+ LOG(3, s, t, "Received OCCN\n");
+ break;
+ case 10: // ICRQ
+ LOG(3, s, t, "Received ICRQ\n");
+ if (sessionfree && main_quit != QUIT_SHUTDOWN)
+ {
+ controlt *c = controlnew(11); // ICRP
+
+ LOG(3, s, t, "Sending ICRP\n");
+
+ s = sessionfree;
+ sessionfree = session[s].next;
+ memset(&session[s], 0, sizeof(session[s]));
+
+ if (s > config->cluster_highest_sessionid)
+ config->cluster_highest_sessionid = s;
+
+ session[s].opened = time_now;
+ session[s].tunnel = t;
+ session[s].far = asession;
+ session[s].last_packet = session[s].last_data = time_now;
+ LOG(3, s, t, "New session (%u/%u)\n", tunnel[t].far, session[s].far);
+ control16(c, 14, s, 1); // assigned session
+ controladd(c, asession, t); // send the reply
+
+ strncpy(session[s].called, called, sizeof(session[s].called) - 1);
+ strncpy(session[s].calling, calling, sizeof(session[s].calling) - 1);
+
+ session[s].ppp.phase = Establish;
+ session[s].ppp.lcp = Starting;
+
+ STAT(session_created);
+ break;
+ }
+
+ {
+ controlt *c = controlnew(14); // CDN
+ LOG(3, s, t, "Sending CDN\n");
+ if (!sessionfree)
+ {
+ STAT(session_overflow);
+ LOG(1, 0, t, "No free sessions\n");
+ control16(c, 1, 4, 0); // temporary lack of resources
+ }
+ else
+ control16(c, 1, 2, 7); // shutting down, try another
+
+ controladd(c, asession, t); // send the message
+ }
+ return;
+ case 11: // ICRP
+ LOG(3, s, t, "Received ICRP\n");
+ if (session[s].forwardtosession)
+ {
+ controlt *c = controlnew(12); // ICCN
+
+ session[s].opened = time_now;
+ session[s].tunnel = t;
+ session[s].far = asession;
+ session[s].last_packet = session[s].last_data = time_now;
+
+ control32(c, 19, 1, 1); // Framing Type
+ control32(c, 24, 10000000, 1); // Tx Connect Speed
+ controladd(c, asession, t); // send the message
+ LOG(3, s, t, "Sending ICCN\n");
+ }
+ break;
+ case 12: // ICCN
+ LOG(3, s, t, "Received ICCN\n");
+ if (amagic == 0) amagic = time_now;
+ session[s].magic = amagic; // set magic number
+ session[s].flags = aflags; // set flags received
+ session[s].mru = PPPoE_MRU; // default
+ controlnull(t); // ack
+
+ // start LCP
+ sess_local[s].lcp_authtype = config->radius_authprefer;
+ sess_local[s].ppp_mru = MRU;
+
+ // Set multilink options before sending initial LCP packet
+ sess_local[s].mp_mrru = 1614;
+ sess_local[s].mp_epdis = ntohl(config->iftun_address ? config->iftun_address : my_address);
+
+ sendlcp(s, t);
+ change_state(s, lcp, RequestSent);
+ break;
+
+ case 14: // CDN
+ LOG(3, s, t, "Received CDN\n");
+ controlnull(t); // ack
+ sessionshutdown(s, disc_reason, CDN_NONE, disc_cause);
+ break;
+ case 0xFFFF:
+ LOG(1, s, t, "Missing message type\n");
+ break;
+ default:
+ STAT(tunnel_rx_errors);
+ if (mandatory)
+ tunnelshutdown(t, "Unknown message type", 2, 6, "unknown message type");
+ else
+ LOG(1, s, t, "Unknown message type %u\n", message);
+ break;
+ }
+ if (sendchalresponse) free(sendchalresponse);
+ if (recvchalresponse) free(recvchalresponse);
+ cluster_send_tunnel(t);
+ }
+ else
+ {
+ LOG(4, s, t, " Got a ZLB ack\n");
+ }
+ }
+ else
+ { // data
+ uint16_t proto;
+
+ LOG_HEX(5, "Receive Tunnel Data", p, l);
+ if (l > 2 && p[0] == 0xFF && p[1] == 0x03)
+ { // HDLC address header, discard
+ p += 2;
+ l -= 2;
+ }
+ if (l < 2)
+ {
+ LOG(1, s, t, "Short ppp length %d\n", l);
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if (*p & 1)
+ {
+ proto = *p++;
+ l--;
+ }
+ else
+ {
+ proto = ntohs(*(uint16_t *) p);
+ p += 2;
+ l -= 2;
+ }
+
+ if (session[s].forwardtosession)
+ {
+ LOG(5, s, t, "Forwarding data session to session %u\n", session[s].forwardtosession);
+ // Forward to LAC/BAS or Remote LNS session
+ lac_session_forward(buf, len, s, proto, addr->sin_addr.s_addr, addr->sin_port, indexudpfd);
+ return;
+ }
+ else if (config->auth_tunnel_change_addr_src)
+ {
+ if (tunnel[t].ip != ntohl(addr->sin_addr.s_addr) &&
+ tunnel[t].port == ntohs(addr->sin_port))
+ {
+ // The remotes BAS are a clustered l2tpns server and the source IP has changed
+ LOG(5, s, t, "The tunnel IP source (%s) has changed by new IP (%s)\n",
+ fmtaddr(htonl(tunnel[t].ip), 0), fmtaddr(addr->sin_addr.s_addr, 0));
+
+ tunnel[t].ip = ntohl(addr->sin_addr.s_addr);
+ }
+ }
+
+ if (s && !session[s].opened) // Is something wrong??
+ {
+ if (!config->cluster_iam_master)
+ {
+ // Pass it off to the master to deal with..
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd);
+ return;
+ }
+
+ LOG(1, s, t, "UDP packet contains session which is not opened. Dropping packet.\n");
+ STAT(tunnel_rx_errors);
+ return;
+ }
+
+ if (proto == PPPPAP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ processpap(s, t, p, l);
+ }
+ else if (proto == PPPCHAP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ processchap(s, t, p, l);
+ }
+ else if (proto == PPPLCP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ processlcp(s, t, p, l);
+ }
+ else if (proto == PPPIPCP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ processipcp(s, t, p, l);
+ }
+ else if (proto == PPPIPV6CP && config->ipv6_prefix.s6_addr[0])
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ processipv6cp(s, t, p, l);
+ }
+ else if (proto == PPPCCP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ processccp(s, t, p, l);
+ }
+ else if (proto == PPPIP)
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "Session %u is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ if (session[s].walled_garden && !config->cluster_iam_master)
+ {
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd);
+ return;
+ }
+
+ processipin(s, t, p, l);
+ }
+ else if (proto == PPPMP)
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "Session %u is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ if (!config->cluster_iam_master)
+ {
+ // The fragments reconstruction is managed by the Master.
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd);
+ return;
+ }
+
+ processmpin(s, t, p, l);
+ }
+ else if (proto == PPPIPV6 && config->ipv6_prefix.s6_addr[0])
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "Session %u is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ if (session[s].walled_garden && !config->cluster_iam_master)
+ {
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd);
+ return;
+ }
+
+ processipv6in(s, t, p, l);
+ }
+ else if (session[s].ppp.lcp == Opened)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port, indexudpfd); return; }
+ protoreject(s, t, p, l, proto);
+ }
+ else
+ {
+ LOG(2, s, t, "Unknown PPP protocol 0x%04X received in LCP %s state\n",
+ proto, ppp_state(session[s].ppp.lcp));
+ }
+ }
+}
+
+// read and process packet on tun
+// (i.e. this routine writes to buf[-8]).
+static void processtun(uint8_t * buf, int len)
+{
+ LOG_HEX(5, "Receive TUN Data", buf, len);
+ STAT(tun_rx_packets);
+ INC_STAT(tun_rx_bytes, len);
+
+ CSTAT(processtun);
+
+ eth_rx_pkt++;
+ eth_rx += len;
+ if (len < 22)
+ {
+ LOG(1, 0, 0, "Short tun packet %d bytes\n", len);
+ STAT(tun_rx_errors);
+ return;
+ }
+
+ if (*(uint16_t *) (buf + 2) == htons(PKTIP)) // IPv4
+ processipout(buf, len);
+ else if (*(uint16_t *) (buf + 2) == htons(PKTIPV6) // IPV6
+ && config->ipv6_prefix.s6_addr[0])
+ processipv6out(buf, len);
+
+ // Else discard.
+}
+
+// Handle retries, timeouts. Runs every 1/10th sec, want to ensure
+// that we look at the whole of the tunnel, radius and session tables
+// every second
+static void regular_cleanups(double period)
+{
+ // Next tunnel, radius and session to check for actions on.
+ static tunnelidt t = 0;
+ static int r = 0;
+ static sessionidt s = 0;
+
+ int t_actions = 0;
+ int r_actions = 0;
+ int s_actions = 0;
+
+ int t_slice;
+ int r_slice;
+ int s_slice;
+
+ int i;
+ int a;
+
+ // divide up tables into slices based on the last run
+ t_slice = config->cluster_highest_tunnelid * period;
+ r_slice = (MAXRADIUS - 1) * period;
+ s_slice = config->cluster_highest_sessionid * period;
+
+ if (t_slice < 1)
+ t_slice = 1;
+ else if (t_slice > config->cluster_highest_tunnelid)
+ t_slice = config->cluster_highest_tunnelid;
+
+ if (r_slice < 1)
+ r_slice = 1;
+ else if (r_slice > (MAXRADIUS - 1))
+ r_slice = MAXRADIUS - 1;
+
+ if (s_slice < 1)
+ s_slice = 1;
+ else if (s_slice > config->cluster_highest_sessionid)
+ s_slice = config->cluster_highest_sessionid;
+
+ LOG(4, 0, 0, "Begin regular cleanup (last %f seconds ago)\n", period);
+
+ for (i = 0; i < t_slice; i++)
+ {
+ t++;
+ if (t > config->cluster_highest_tunnelid)
+ t = 1;
+
+ if (t == TUNNEL_ID_PPPOE)
+ continue;
+
+ // check for expired tunnels
+ if (tunnel[t].die && tunnel[t].die <= TIME)
+ {
+ STAT(tunnel_timeout);
+ tunnelkill(t, "Expired");
+ t_actions++;
+ continue;
+ }
+ // check for message resend
+ if (tunnel[t].retry && tunnel[t].controlc)
+ {
+ // resend pending messages as timeout on reply
+ if (tunnel[t].retry <= TIME)
+ {
+ controlt *c = tunnel[t].controls;
+ uint16_t w = tunnel[t].window;
+ tunnel[t].try++; // another try
+ if (tunnel[t].try > 5)
+ tunnelkill(t, "Timeout on control message"); // game over
+ else
+ while (c && w--)
+ {
+ tunnelsend(c->buf, c->length, t);
+ c = c->next;
+ }
+
+ t_actions++;
+ }
+ }
+ // Send hello
+ if (tunnel[t].state == TUNNELOPEN && !tunnel[t].controlc && (time_now - tunnel[t].lastrec) > 60)
+ {
+ if (!config->disable_sending_hello)
+ {
+ controlt *c = controlnew(6); // sending HELLO
+ controladd(c, 0, t); // send the message
+ LOG(3, 0, t, "Sending HELLO message\n");
+ t_actions++;
+ }
+ }
+
+ // Check for tunnel changes requested from the CLI
+ if ((a = cli_tunnel_actions[t].action))
+ {
+ cli_tunnel_actions[t].action = 0;
+ if (a & CLI_TUN_KILL)
+ {
+ LOG(2, 0, t, "Dropping tunnel by CLI\n");
+ tunnelshutdown(t, "Requested by administrator", 1, 0, 0);
+ t_actions++;
+ }
+ }
+ }
+
+ for (i = 0; i < r_slice; i++)
+ {
+ r++;
+ if (r >= MAXRADIUS)
+ r = 1;
+
+ if (!radius[r].state)
+ continue;
+
+ if (radius[r].retry <= TIME)
+ {
+ radiusretry(r);
+ r_actions++;
+ }
+ }
+
+ for (i = 0; i < s_slice; i++)
+ {
+ s++;
+ if (s > config->cluster_highest_sessionid)
+ s = 1;
+
+ if (!session[s].opened) // Session isn't in use
+ continue;
+
+ // check for expired sessions
+ if (session[s].die)
+ {
+ if (session[s].die <= TIME)
+ {
+ sessionkill(s, "Expired");
+ s_actions++;
+ }
+ continue;
+ }
+
+ // PPP timeouts
+ if (sess_local[s].lcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.lcp;
+ switch (session[s].ppp.lcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].lcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for LCP ConfigReq... resending\n");
+ sendlcp(s, session[s].tunnel);
+ change_state(s, lcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to LCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipcp;
+ switch (session[s].ppp.ipcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPCP ConfigReq... resending\n");
+ sendipcp(s, session[s].tunnel);
+ change_state(s, ipcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to IPCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipv6cp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipv6cp;
+ switch (session[s].ppp.ipv6cp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipv6cp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq... resending\n");
+ sendipv6cp(s, session[s].tunnel);
+ change_state(s, ipv6cp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq\n");
+ change_state(s, ipv6cp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ if (sess_local[s].ccp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ccp;
+ switch (session[s].ppp.ccp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ccp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq... resending\n");
+ sendccp(s, session[s].tunnel);
+ change_state(s, ccp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq\n");
+ change_state(s, ccp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ // Drop sessions who have not responded within IDLE_ECHO_TIMEOUT seconds
+ if (session[s].last_packet && (time_now - session[s].last_packet >= config->idle_echo_timeout))
+ {
+ sessionshutdown(s, "No response to LCP ECHO requests.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // No data in ECHO_TIMEOUT seconds, send LCP ECHO
+ if (session[s].ppp.phase >= Establish && (time_now - session[s].last_packet >= config->echo_timeout) &&
+ (time_now - sess_local[s].last_echo >= config->echo_timeout))
+ {
+ uint8_t b[MAXETHER];
+
+ uint8_t *q = makeppp(b, sizeof(b), 0, 0, s, session[s].tunnel, PPPLCP, 1, 0, 0);
+ if (!q) continue;
+
+ *q = EchoReq;
+ *(uint8_t *)(q + 1) = (time_now % 255); // ID
+ *(uint16_t *)(q + 2) = htons(8); // Length
+ *(uint32_t *)(q + 4) = session[s].ppp.lcp == Opened ? htonl(session[s].magic) : 0; // Magic Number
+
+ LOG(4, s, session[s].tunnel, "No data in %d seconds, sending LCP ECHO\n",
+ (int)(time_now - session[s].last_packet));
+
+ tunnelsend(b, (q - b) + 8, session[s].tunnel); // send it
+ sess_local[s].last_echo = time_now;
+ s_actions++;
+ }
+
+ // Drop sessions who have reached session_timeout seconds
+ if (session[s].session_timeout)
+ {
+ bundleidt bid = session[s].bundle;
+ if (bid)
+ {
+ if (time_now - bundle[bid].last_check >= 1)
+ {
+ bundle[bid].online_time += (time_now - bundle[bid].last_check) * bundle[bid].num_of_links;
+ bundle[bid].last_check = time_now;
+ if (bundle[bid].online_time >= session[s].session_timeout)
+ {
+ int ses;
+ for (ses = bundle[bid].num_of_links - 1; ses >= 0; ses--)
+ {
+ sessionshutdown(bundle[bid].members[ses], "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+ }
+ }
+ else if (time_now - session[s].opened >= session[s].session_timeout)
+ {
+ sessionshutdown(s, "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+
+ // Drop sessions who have reached idle_timeout seconds
+ if (session[s].last_data && session[s].idle_timeout && (time_now - session[s].last_data >= session[s].idle_timeout))
+ {
+ sessionshutdown(s, "Idle Timeout Reached", CDN_ADMIN_DISC, TERM_IDLE_TIMEOUT);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // Check for actions requested from the CLI
+ if ((a = cli_session_actions[s].action))
+ {
+ int send = 0;
+
+ cli_session_actions[s].action = 0;
+ if (a & CLI_SESS_KILL)
+ {
+ LOG(2, s, session[s].tunnel, "Dropping session by CLI\n");
+ sessionshutdown(s, "Requested by administrator.", CDN_ADMIN_DISC, TERM_ADMIN_RESET);
+ a = 0; // dead, no need to check for other actions
+ s_actions++;
+ }
+
+ if (a & CLI_SESS_NOSNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Unsnooping session by CLI\n");
+ session[s].snoop_ip = 0;
+ session[s].snoop_port = 0;
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_SNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Snooping session by CLI (to %s:%u)\n",
+ fmtaddr(cli_session_actions[s].snoop_ip, 0),
+ cli_session_actions[s].snoop_port);
+
+ session[s].snoop_ip = cli_session_actions[s].snoop_ip;
+ session[s].snoop_port = cli_session_actions[s].snoop_port;
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOTHROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Un-throttling session by CLI\n");
+ throttle_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_THROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Throttling session by CLI (to %dkb/s up and %dkb/s down)\n",
+ cli_session_actions[s].throttle_in,
+ cli_session_actions[s].throttle_out);
+
+ throttle_session(s, cli_session_actions[s].throttle_in, cli_session_actions[s].throttle_out);
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOFILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Un-filtering session by CLI\n");
+ filter_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_FILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Filtering session by CLI (in=%d, out=%d)\n",
+ cli_session_actions[s].filter_in,
+ cli_session_actions[s].filter_out);
+
+ filter_session(s, cli_session_actions[s].filter_in, cli_session_actions[s].filter_out);
+ s_actions++;
+ send++;
+ }
+
+ if (send)
+ cluster_send_session(s);
+ }
+
+ // RADIUS interim accounting
+ if (config->radius_accounting && config->radius_interim > 0
+ && session[s].ip && !session[s].walled_garden
+ && !sess_local[s].radius // RADIUS already in progress
+ && time_now - sess_local[s].last_interim >= config->radius_interim
+ && session[s].flags & SESSION_STARTED)
+ {
+ int rad = radiusnew(s);
+ if (!rad)
+ {
+ LOG(1, s, session[s].tunnel, "No free RADIUS sessions for Interim message\n");
+ STAT(radius_overflow);
+ continue;
+ }
+
+ LOG(3, s, session[s].tunnel, "Sending RADIUS Interim for %s (%u)\n",
+ session[s].user, session[s].unique_id);
+
+ radiussend(rad, RADIUSINTERIM);
+ sess_local[s].last_interim = time_now;
+ s_actions++;
+ }
+ }
+
+ LOG(4, 0, 0, "End regular cleanup: checked %d/%d/%d tunnels/radius/sessions; %d/%d/%d actions\n",
+ t_slice, r_slice, s_slice, t_actions, r_actions, s_actions);
+}
+
+//
+// Are we in the middle of a tunnel update, or radius
+// requests??
+//
+static int still_busy(void)
+{
+ int i;
+ static clockt last_talked = 0;
+ static clockt start_busy_wait = 0;
+
+#ifdef BGP
+ static time_t stopped_bgp = 0;
+ if (bgp_configured)
+ {
+ if (!stopped_bgp)
+ {
+ LOG(1, 0, 0, "Shutting down in %d seconds, stopping BGP...\n", QUIT_DELAY);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ if (bgp_peers[i].state == Established)
+ bgp_stop(&bgp_peers[i]);
+
+ stopped_bgp = time_now;
+
+ if (!config->cluster_iam_master)
+ {
+ // we don't want to become master
+ cluster_send_ping(0);
+
+ return 1;
+ }
+ }
+
+ if (!config->cluster_iam_master && time_now < (stopped_bgp + QUIT_DELAY))
+ return 1;
+ }
+#endif /* BGP */
+
+ if (!config->cluster_iam_master)
+ return 0;
+
+ if (main_quit == QUIT_SHUTDOWN)
+ {
+ static int dropped = 0;
+ if (!dropped)
+ {
+ int i;
+
+ LOG(1, 0, 0, "Dropping sessions and tunnels\n");
+ for (i = 1; i < MAXTUNNEL; i++)
+ if (tunnel[i].ip || tunnel[i].state)
+ tunnelshutdown(i, "L2TPNS Closing", 6, 0, 0);
+
+ dropped = 1;
+ }
+ }
+
+ if (start_busy_wait == 0)
+ start_busy_wait = TIME;
+
+ for (i = config->cluster_highest_tunnelid ; i > 0 ; --i)
+ {
+ if (!tunnel[i].controlc)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Tunnel %u still has un-acked control messages.\n", i);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ // We stop waiting for radius after BUSY_WAIT_TIME 1/10th seconds
+ if (abs(TIME - start_busy_wait) > BUSY_WAIT_TIME)
+ {
+ LOG(1, 0, 0, "Giving up waiting for RADIUS to be empty. Shutting down anyway.\n");
+ return 0;
+ }
+
+ for (i = 1; i < MAXRADIUS; i++)
+ {
+ if (radius[i].state == RADIUSNULL)
+ continue;
+ if (radius[i].state == RADIUSWAIT)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Radius session %u is still busy (sid %u)\n", i, radius[i].session);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_EPOLL
+# include <sys/epoll.h>
+#else
+# define FAKE_EPOLL_IMPLEMENTATION /* include the functions */
+# include "fake_epoll.h"
+#endif
+
+// the base set of fds polled: cli, cluster, tun, udp (MAX_UDPFD), control, dae, netlink, udplac, pppoedisc, pppoesess
+#define BASE_FDS (9 + MAX_UDPFD)
+
+// additional polled fds
+#ifdef BGP
+# define EXTRA_FDS BGP_NUM_PEERS
+#else
+# define EXTRA_FDS 0
+#endif
+
+// main loop - gets packets on tun or udp and processes them
+static void mainloop(void)
+{
+ int i, j;
+ uint8_t buf[65536];
+ uint8_t *p = buf + 32; // for the hearder of the forwarded MPPP packet (see C_MPPP_FORWARD)
+ // and the forwarded pppoe session
+ int size_bufp = sizeof(buf) - 32;
+ clockt next_cluster_ping = 0; // send initial ping immediately
+ struct epoll_event events[BASE_FDS + RADIUS_FDS + EXTRA_FDS];
+ int maxevent = sizeof(events)/sizeof(*events);
+
+ if ((epollfd = epoll_create(maxevent)) < 0)
+ {
+ LOG(0, 0, 0, "epoll_create failed: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ LOG(4, 0, 0, "Beginning of main loop. clifd=%d, cluster_sockfd=%d, tunfd=%d, udpfd=%d, controlfd=%d, daefd=%d, nlfd=%d , udplacfd=%d, pppoefd=%d, pppoesessfd=%d\n",
+ clifd, cluster_sockfd, tunfd, udpfd[0], controlfd, daefd, nlfd, udplacfd, pppoediscfd, pppoesessfd);
+
+ /* setup our fds to poll for input */
+ {
+ static struct event_data d[BASE_FDS];
+ struct epoll_event e;
+
+ e.events = EPOLLIN;
+ i = 0;
+
+ if (clifd >= 0)
+ {
+ d[i].type = FD_TYPE_CLI;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, clifd, &e);
+ }
+
+ d[i].type = FD_TYPE_CLUSTER;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, cluster_sockfd, &e);
+
+ d[i].type = FD_TYPE_TUN;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, tunfd, &e);
+
+ d[i].type = FD_TYPE_CONTROL;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, controlfd, &e);
+
+ d[i].type = FD_TYPE_DAE;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, daefd, &e);
+
+ d[i].type = FD_TYPE_NETLINK;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, nlfd, &e);
+
+ d[i].type = FD_TYPE_PPPOEDISC;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, pppoediscfd, &e);
+
+ d[i].type = FD_TYPE_PPPOESESS;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, pppoesessfd, &e);
+
+ for (j = 0; j < config->nbudpfd; j++)
+ {
+ d[i].type = FD_TYPE_UDP;
+ d[i].index = j;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, udpfd[j], &e);
+ }
+ }
+
+#ifdef BGP
+ signal(SIGPIPE, SIG_IGN);
+ bgp_setup(config->as_number);
+ if (config->bind_address)
+ bgp_add_route(config->bind_address, 0xffffffff);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ {
+ if (config->neighbour[i].name[0])
+ bgp_start(&bgp_peers[i], config->neighbour[i].name,
+ config->neighbour[i].as, config->neighbour[i].keepalive,
+ config->neighbour[i].hold, config->neighbour[i].update_source,
+ 0); /* 0 = routing disabled */
+ }
+#endif /* BGP */
+
+ while (!main_quit || still_busy())
+ {
+ int more = 0;
+ int n;
+
+
+ if (main_reload)
+ {
+ main_reload = 0;
+ read_config_file();
+ config->reload_config++;
+ }
+
+ if (config->reload_config)
+ {
+ config->reload_config = 0;
+ update_config();
+ }
+
+#ifdef BGP
+ bgp_set_poll();
+#endif /* BGP */
+
+ n = epoll_wait(epollfd, events, maxevent, 100); // timeout 100ms (1/10th sec)
+ STAT(select_called);
+
+ TIME = now(NULL);
+ if (n < 0)
+ {
+ if (errno == EINTR ||
+ errno == ECHILD) // EINTR was clobbered by sigchild_handler()
+ continue;
+
+ LOG(0, 0, 0, "Error returned from select(): %s\n", strerror(errno));
+ break; // exit
+ }
+
+ if (n)
+ {
+ struct sockaddr_in addr;
+ struct in_addr local;
+ socklen_t alen;
+ int c, s;
+ int udp_ready[MAX_UDPFD + 1] = INIT_TABUDPVAR;
+ int pppoesess_ready = 0;
+ int pppoesess_pkts = 0;
+ int tun_ready = 0;
+ int cluster_ready = 0;
+ int udp_pkts[MAX_UDPFD + 1] = INIT_TABUDPVAR;
+ int tun_pkts = 0;
+ int cluster_pkts = 0;
+#ifdef BGP
+ uint32_t bgp_events[BGP_NUM_PEERS];
+ memset(bgp_events, 0, sizeof(bgp_events));
+#endif /* BGP */
+
+ for (c = n, i = 0; i < c; i++)
+ {
+ struct event_data *d = events[i].data.ptr;
+
+ switch (d->type)
+ {
+ case FD_TYPE_CLI: // CLI connections
+ {
+ int cli;
+
+ alen = sizeof(addr);
+ if ((cli = accept(clifd, (struct sockaddr *)&addr, &alen)) >= 0)