+ udp_tx += len;
+}
+
+// process outgoing (to tunnel) IPv6
+//
+static void processipv6out(uint8_t * buf, int len)
+{
+ sessionidt s;
+ sessiont *sp;
+ tunnelidt t;
+ in_addr_t ip;
+ struct in6_addr ip6;
+
+ uint8_t *data = buf; // Keep a copy of the originals.
+ int size = len;
+
+ uint8_t b[MAXETHER + 20];
+
+ CSTAT(processipv6out);
+
+ if (len < MIN_IP_SIZE)
+ {
+ LOG(1, 0, 0, "Short IPv6, %d bytes\n", len);
+ STAT(tunnel_tx_errors);
+ return;
+ }
+ if (len >= MAXETHER)
+ {
+ LOG(1, 0, 0, "Oversize IPv6 packet %d bytes\n", len);
+ STAT(tunnel_tx_errors);
+ return;
+ }
+
+ // Skip the tun header
+ buf += 4;
+ len -= 4;
+
+ // Got an IP header now
+ if (*(uint8_t *)(buf) >> 4 != 6)
+ {
+ LOG(1, 0, 0, "IP: Don't understand anything except IPv6\n");
+ return;
+ }
+
+ ip6 = *(struct in6_addr *)(buf+24);
+ s = sessionbyipv6(ip6);
+
+ if (s == 0)
+ {
+ ip = *(uint32_t *)(buf + 32);
+ s = sessionbyip(ip);
+ }
+
+ if (s == 0)
+ {
+ // Is this a packet for a session that doesn't exist?
+ static int rate = 0; // Number of ICMP packets we've sent this second.
+ static int last = 0; // Last time we reset the ICMP packet counter 'rate'.
+
+ if (last != time_now)
+ {
+ last = time_now;
+ rate = 0;
+ }
+
+ if (rate++ < config->icmp_rate) // Only send a max of icmp_rate per second.
+ {
+ // FIXME: Should send icmp6 host unreachable
+ }
+ return;
+ }
+ if (session[s].bundle && bundle[session[s].bundle].num_of_links > 1)
+ {
+ bundleidt bid = session[s].bundle;
+ bundlet *b = &bundle[bid];
+
+ b->current_ses = (b->current_ses + 1) % b->num_of_links;
+ s = b->members[b->current_ses];
+ LOG(3, s, session[s].tunnel, "MPPP: Session number becomes: %u\n", s);
+ }
+ t = session[s].tunnel;
+ sp = &session[s];
+ sp->last_data = time_now;
+
+ // FIXME: add DoS prevention/filters?
+
+ if (sp->tbf_out)
+ {
+ // Are we throttling this session?
+ if (config->cluster_iam_master)
+ tbf_queue_packet(sp->tbf_out, data, size);
+ else
+ master_throttle_packet(sp->tbf_out, data, size);
+ return;
+ }
+ else if (sp->walled_garden && !config->cluster_iam_master)
+ {
+ // We are walled-gardening this
+ master_garden_packet(s, data, size);
+ return;
+ }
+
+ LOG(5, s, t, "Ethernet -> Tunnel (%d bytes)\n", len);
+
+ // Add on L2TP header
+ {
+ uint8_t *p = makeppp(b, sizeof(b), buf, len, s, t, PPPIPV6, 0, 0, 0);
+ if (!p) return;
+ tunnelsend(b, len + (p-b), t); // send it...
+ }
+
+ // Snooping this session, send it to intercept box
+ if (sp->snoop_ip && sp->snoop_port)
+ snoop_send_packet(buf, len, sp->snoop_ip, sp->snoop_port);
+
+ increment_counter(&sp->cout, &sp->cout_wrap, len); // byte count
+ sp->cout_delta += len;
+ sp->pout++;
+ udp_tx += len;
+
+ sess_local[s].cout += len; // To send to master..
+ sess_local[s].pout++;
+}
+
+//
+// Helper routine for the TBF filters.
+// Used to send queued data in to the user!
+//
+static void send_ipout(sessionidt s, uint8_t *buf, int len)
+{
+ sessiont *sp;
+ tunnelidt t;
+
+ uint8_t b[MAXETHER + 20];
+
+ if (len < 0 || len > MAXETHER)
+ {
+ LOG(1, 0, 0, "Odd size IP packet: %d bytes\n", len);
+ return;
+ }
+
+ // Skip the tun header
+ buf += 4;
+ len -= 4;
+
+ if (!session[s].ip)
+ return;
+
+ t = session[s].tunnel;
+ sp = &session[s];
+
+ LOG(5, s, t, "Ethernet -> Tunnel (%d bytes)\n", len);
+
+ // Add on L2TP header
+ {
+ uint8_t *p = makeppp(b, sizeof(b), buf, len, s, t, PPPIP, 0, 0, 0);
+ if (!p) return;
+ tunnelsend(b, len + (p-b), t); // send it...
+ }
+
+ // Snooping this session.
+ if (sp->snoop_ip && sp->snoop_port)
+ snoop_send_packet(buf, len, sp->snoop_ip, sp->snoop_port);
+
+ increment_counter(&sp->cout, &sp->cout_wrap, len); // byte count
+ sp->cout_delta += len;
+ sp->pout++;
+ udp_tx += len;
+
+ sess_local[s].cout += len; // To send to master..
+ sess_local[s].pout++;
+}
+
+// add an AVP (16 bit)
+static void control16(controlt * c, uint16_t avp, uint16_t val, uint8_t m)
+{
+ uint16_t l = (m ? 0x8008 : 0x0008);
+ c->buf16[c->length/2 + 0] = htons(l);
+ c->buf16[c->length/2 + 1] = htons(0);
+ c->buf16[c->length/2 + 2] = htons(avp);
+ c->buf16[c->length/2 + 3] = htons(val);
+ c->length += 8;
+}
+
+// add an AVP (32 bit)
+static void control32(controlt * c, uint16_t avp, uint32_t val, uint8_t m)
+{
+ uint16_t l = (m ? 0x800A : 0x000A);
+ c->buf16[c->length/2 + 0] = htons(l);
+ c->buf16[c->length/2 + 1] = htons(0);
+ c->buf16[c->length/2 + 2] = htons(avp);
+ *(uint32_t *) &c->buf[c->length + 6] = htonl(val);
+ c->length += 10;
+}
+
+// add an AVP (string)
+static void controls(controlt * c, uint16_t avp, char *val, uint8_t m)
+{
+ uint16_t l = ((m ? 0x8000 : 0) + strlen(val) + 6);
+ c->buf16[c->length/2 + 0] = htons(l);
+ c->buf16[c->length/2 + 1] = htons(0);
+ c->buf16[c->length/2 + 2] = htons(avp);
+ memcpy(&c->buf[c->length + 6], val, strlen(val));
+ c->length += 6 + strlen(val);
+}
+
+// add a binary AVP
+static void controlb(controlt * c, uint16_t avp, uint8_t *val, unsigned int len, uint8_t m)
+{
+ uint16_t l = ((m ? 0x8000 : 0) + len + 6);
+ c->buf16[c->length/2 + 0] = htons(l);
+ c->buf16[c->length/2 + 1] = htons(0);
+ c->buf16[c->length/2 + 2] = htons(avp);
+ memcpy(&c->buf[c->length + 6], val, len);
+ c->length += 6 + len;
+}
+
+// new control connection
+static controlt *controlnew(uint16_t mtype)
+{
+ controlt *c;
+ if (!controlfree)
+ c = malloc(sizeof(controlt));
+ else
+ {
+ c = controlfree;
+ controlfree = c->next;
+ }
+ assert(c);
+ c->next = 0;
+ c->buf16[0] = htons(0xC802); // flags/ver
+ c->length = 12;
+ control16(c, 0, mtype, 1);
+ return c;
+}
+
+// send zero block if nothing is waiting
+// (ZLB send).
+static void controlnull(tunnelidt t)
+{
+ uint16_t buf[6];
+ if (tunnel[t].controlc) // Messages queued; They will carry the ack.
+ return;
+
+ buf[0] = htons(0xC802); // flags/ver
+ buf[1] = htons(12); // length
+ buf[2] = htons(tunnel[t].far); // tunnel
+ buf[3] = htons(0); // session
+ buf[4] = htons(tunnel[t].ns); // sequence
+ buf[5] = htons(tunnel[t].nr); // sequence
+ tunnelsend((uint8_t *)buf, 12, t);
+}
+
+// add a control message to a tunnel, and send if within window
+static void controladd(controlt *c, sessionidt far, tunnelidt t)
+{
+ c->buf16[1] = htons(c->length); // length
+ c->buf16[2] = htons(tunnel[t].far); // tunnel
+ c->buf16[3] = htons(far); // session
+ c->buf16[4] = htons(tunnel[t].ns); // sequence
+ tunnel[t].ns++; // advance sequence
+ // link in message in to queue
+ if (tunnel[t].controlc)
+ tunnel[t].controle->next = c;
+ else
+ tunnel[t].controls = c;
+
+ tunnel[t].controle = c;
+ tunnel[t].controlc++;
+
+ // send now if space in window
+ if (tunnel[t].controlc <= tunnel[t].window)
+ {
+ tunnel[t].try = 0; // first send
+ tunnelsend(c->buf, c->length, t);
+ }
+}
+
+//
+// Throttle or Unthrottle a session
+//
+// Throttle the data from/to through a session to no more than
+// 'rate_in' kbit/sec in (from user) or 'rate_out' kbit/sec out (to
+// user).
+//
+// If either value is -1, the current value is retained for that
+// direction.
+//
+void throttle_session(sessionidt s, int rate_in, int rate_out)
+{
+ if (!session[s].opened)
+ return; // No-one home.
+
+ if (!*session[s].user)
+ return; // User not logged in
+
+ if (rate_in >= 0)
+ {
+ int bytes = rate_in * 1024 / 8; // kbits to bytes
+ if (session[s].tbf_in)
+ free_tbf(session[s].tbf_in);
+
+ if (rate_in > 0)
+ session[s].tbf_in = new_tbf(s, bytes * 2, bytes, send_ipin);
+ else
+ session[s].tbf_in = 0;
+
+ session[s].throttle_in = rate_in;
+ }
+
+ if (rate_out >= 0)
+ {
+ int bytes = rate_out * 1024 / 8;
+ if (session[s].tbf_out)
+ free_tbf(session[s].tbf_out);
+
+ if (rate_out > 0)
+ session[s].tbf_out = new_tbf(s, bytes * 2, bytes, send_ipout);
+ else
+ session[s].tbf_out = 0;
+
+ session[s].throttle_out = rate_out;
+ }
+}
+
+// add/remove filters from session (-1 = no change)
+void filter_session(sessionidt s, int filter_in, int filter_out)
+{
+ if (!session[s].opened)
+ return; // No-one home.
+
+ if (!*session[s].user)
+ return; // User not logged in
+
+ // paranoia
+ if (filter_in > MAXFILTER) filter_in = -1;
+ if (filter_out > MAXFILTER) filter_out = -1;
+ if (session[s].filter_in > MAXFILTER) session[s].filter_in = 0;
+ if (session[s].filter_out > MAXFILTER) session[s].filter_out = 0;
+
+ if (filter_in >= 0)
+ {
+ if (session[s].filter_in)
+ ip_filters[session[s].filter_in - 1].used--;
+
+ if (filter_in > 0)
+ ip_filters[filter_in - 1].used++;
+
+ session[s].filter_in = filter_in;
+ }
+
+ if (filter_out >= 0)
+ {
+ if (session[s].filter_out)
+ ip_filters[session[s].filter_out - 1].used--;
+
+ if (filter_out > 0)
+ ip_filters[filter_out - 1].used++;
+
+ session[s].filter_out = filter_out;
+ }
+}
+
+// start tidy shutdown of session
+void sessionshutdown(sessionidt s, char const *reason, int cdn_result, int cdn_error, int term_cause)
+{
+ int walled_garden = session[s].walled_garden;
+ bundleidt b = session[s].bundle;
+ //delete routes only for last session in bundle (in case of MPPP)
+ int del_routes = !b || (bundle[b].num_of_links == 1);
+
+ CSTAT(sessionshutdown);
+
+ if (!session[s].opened)
+ {
+ LOG(3, s, session[s].tunnel, "Called sessionshutdown on an unopened session.\n");
+ return; // not a live session
+ }
+
+ if (!session[s].die)
+ {
+ struct param_kill_session data = { &tunnel[session[s].tunnel], &session[s] };
+ LOG(2, s, session[s].tunnel, "Shutting down session %u: %s\n", s, reason);
+ run_plugins(PLUGIN_KILL_SESSION, &data);
+ }
+
+ if (session[s].ip && !walled_garden && !session[s].die)
+ {
+ // RADIUS Stop message
+ uint16_t r = radiusnew(s);
+ if (r)
+ {
+ // stop, if not already trying
+ if (radius[r].state != RADIUSSTOP)
+ {
+ radius[r].term_cause = term_cause;
+ radius[r].term_msg = reason;
+ radiussend(r, RADIUSSTOP);
+ }
+ }
+ else
+ LOG(1, s, session[s].tunnel, "No free RADIUS sessions for Stop message\n");
+
+ // Save counters to dump to accounting file
+ if (*config->accounting_dir && shut_acct_n < sizeof(shut_acct) / sizeof(*shut_acct))
+ memcpy(&shut_acct[shut_acct_n++], &session[s], sizeof(session[s]));
+ }
+
+ if (!session[s].die)
+ session[s].die = TIME + 150; // Clean up in 15 seconds
+
+ if (session[s].ip)
+ { // IP allocated, clear and unroute
+ int r;
+ int routed = 0;
+ for (r = 0; r < MAXROUTE && session[s].route[r].ip; r++)
+ {
+ if ((session[s].ip >> (32-session[s].route[r].prefixlen)) ==
+ (session[s].route[r].ip >> (32-session[s].route[r].prefixlen)))
+ routed++;
+
+ if (del_routes) routeset(s, session[s].route[r].ip, session[s].route[r].prefixlen, 0, 0);
+ session[s].route[r].ip = 0;
+ }
+
+ if (session[s].ip_pool_index == -1) // static ip
+ {
+ if (!routed && del_routes) routeset(s, session[s].ip, 0, 0, 0);
+ session[s].ip = 0;
+ }
+ else
+ free_ip_address(s);
+
+ // unroute IPv6, if setup
+ if (session[s].ppp.ipv6cp == Opened && session[s].ipv6prefixlen && del_routes)
+ route6set(s, session[s].ipv6route, session[s].ipv6prefixlen, 0);
+
+ if (b)
+ {
+ // This session was part of a bundle
+ bundle[b].num_of_links--;
+ LOG(3, s, session[s].tunnel, "MPPP: Dropping member link: %d from bundle %d\n",s,b);
+ if(bundle[b].num_of_links == 0)
+ {
+ bundleclear(b);
+ LOG(3, s, session[s].tunnel, "MPPP: Kill bundle: %d (No remaing member links)\n",b);
+ }
+ else
+ {
+ // Adjust the members array to accomodate the new change
+ uint8_t mem_num = 0;
+ // It should be here num_of_links instead of num_of_links-1 (previous instruction "num_of_links--")
+ if(bundle[b].members[bundle[b].num_of_links] != s)
+ {
+ uint8_t ml;
+ for(ml = 0; ml<bundle[b].num_of_links; ml++)
+ if(bundle[b].members[ml] == s)
+ {
+ mem_num = ml;
+ break;
+ }
+ bundle[b].members[mem_num] = bundle[b].members[bundle[b].num_of_links];
+ LOG(3, s, session[s].tunnel, "MPPP: Adjusted member links array\n");
+
+ // If the killed session is the first of the bundle,
+ // the new first session must be stored in the cache_ipmap
+ // else the function sessionbyip return 0 and the sending not work any more (processipout).
+ if (mem_num == 0)
+ {
+ sessionidt new_s = bundle[b].members[0];
+
+ routed = 0;
+ // Add the route for this session.
+ for (r = 0; r < MAXROUTE && session[new_s].route[r].ip; r++)
+ {
+ int i, prefixlen;
+ in_addr_t ip;
+
+ prefixlen = session[new_s].route[r].prefixlen;
+ ip = session[new_s].route[r].ip;
+
+ if (!prefixlen) prefixlen = 32;
+ ip &= 0xffffffff << (32 - prefixlen); // Force the ip to be the first one in the route.
+
+ for (i = ip; i < ip+(1<<(32-prefixlen)) ; ++i)
+ cache_ipmap(i, new_s);
+ }
+ cache_ipmap(session[new_s].ip, new_s);
+
+ // IPV6 route
+ if (session[new_s].ipv6prefixlen)
+ cache_ipv6map(session[new_s].ipv6route, session[new_s].ipv6prefixlen, new_s);
+ }
+ }
+ }
+
+ cluster_send_bundle(b);
+ }
+ }
+
+ if (session[s].throttle_in || session[s].throttle_out) // Unthrottle if throttled.
+ throttle_session(s, 0, 0);
+
+ if (cdn_result)
+ {
+ if (session[s].tunnel == TUNNEL_ID_PPPOE)
+ {
+ pppoe_shutdown_session(s);
+ }
+ else
+ {
+ // Send CDN
+ controlt *c = controlnew(14); // sending CDN
+ if (cdn_error)
+ {
+ uint16_t buf[2];
+ buf[0] = htons(cdn_result);
+ buf[1] = htons(cdn_error);
+ controlb(c, 1, (uint8_t *)buf, 4, 1);
+ }
+ else
+ control16(c, 1, cdn_result, 1);
+
+ control16(c, 14, s, 1); // assigned session (our end)
+ controladd(c, session[s].far, session[s].tunnel); // send the message
+ }
+ }
+
+ // update filter refcounts
+ if (session[s].filter_in) ip_filters[session[s].filter_in - 1].used--;
+ if (session[s].filter_out) ip_filters[session[s].filter_out - 1].used--;
+
+ // clear PPP state
+ memset(&session[s].ppp, 0, sizeof(session[s].ppp));
+ sess_local[s].lcp.restart = 0;
+ sess_local[s].ipcp.restart = 0;
+ sess_local[s].ipv6cp.restart = 0;
+ sess_local[s].ccp.restart = 0;
+
+ cluster_send_session(s);
+}
+
+void sendipcp(sessionidt s, tunnelidt t)
+{
+ uint8_t buf[MAXETHER];
+ uint8_t *q;
+
+ CSTAT(sendipcp);
+ LOG(3, s, t, "IPCP: send ConfigReq\n");
+
+ if (!session[s].unique_id)
+ {
+ if (!++last_id) ++last_id; // skip zero
+ session[s].unique_id = last_id;
+ }
+
+ q = makeppp(buf, sizeof(buf), 0, 0, s, t, PPPIPCP, 0, 0, 0);
+ if (!q) return;
+
+ *q = ConfigReq;
+ q[1] = session[s].unique_id & 0xf; // ID, dont care, we only send one type of request
+ *(uint16_t *) (q + 2) = htons(10); // packet length
+ q[4] = 3; // ip address option
+ q[5] = 6; // option length
+ *(in_addr_t *) (q + 6) = config->peer_address ? config->peer_address :
+ config->iftun_address ? config->iftun_address :
+ my_address; // send my IP
+
+ tunnelsend(buf, 10 + (q - buf), t); // send it
+ restart_timer(s, ipcp);
+}
+
+void sendipv6cp(sessionidt s, tunnelidt t)
+{
+ uint8_t buf[MAXETHER];
+ uint8_t *q;
+
+ CSTAT(sendipv6cp);
+ LOG(3, s, t, "IPV6CP: send ConfigReq\n");
+
+ q = makeppp(buf, sizeof(buf), 0, 0, s, t, PPPIPV6CP, 0, 0, 0);
+ if (!q) return;
+
+ *q = ConfigReq;
+ q[1] = session[s].unique_id & 0xf; // ID, don't care, we
+ // only send one type
+ // of request
+ *(uint16_t *) (q + 2) = htons(14);
+ q[4] = 1; // interface identifier option
+ q[5] = 10; // option length
+ *(uint32_t *) (q + 6) = 0; // We'll be prefix::1
+ *(uint32_t *) (q + 10) = 0;
+ q[13] = 1;
+
+ tunnelsend(buf, 14 + (q - buf), t); // send it
+ restart_timer(s, ipv6cp);
+}
+
+static void sessionclear(sessionidt s)
+{
+ memset(&session[s], 0, sizeof(session[s]));
+ memset(&sess_local[s], 0, sizeof(sess_local[s]));
+ memset(&cli_session_actions[s], 0, sizeof(cli_session_actions[s]));
+
+ session[s].tunnel = T_FREE; // Mark it as free.
+ session[s].next = sessionfree;
+ sessionfree = s;
+}
+
+// kill a session now
+void sessionkill(sessionidt s, char *reason)
+{
+ CSTAT(sessionkill);
+
+ if (!session[s].opened) // not alive
+ return;
+
+ if (session[s].next)
+ {
+ LOG(0, s, session[s].tunnel, "Tried to kill a session with next pointer set (%u)\n", session[s].next);
+ return;
+ }
+
+ if (!session[s].die)
+ sessionshutdown(s, reason, CDN_ADMIN_DISC, TERM_ADMIN_RESET); // close radius/routes, etc.
+
+ if (sess_local[s].radius)
+ radiusclear(sess_local[s].radius, s); // cant send clean accounting data, session is killed
+
+#ifdef LAC
+ if (session[s].forwardtosession)
+ {
+ sessionidt sess = session[s].forwardtosession;
+ if (session[sess].forwardtosession == s)
+ {
+ // Shutdown the linked session also.
+ sessionshutdown(sess, reason, CDN_ADMIN_DISC, TERM_ADMIN_RESET);
+ }
+ }
+#endif
+
+ LOG(2, s, session[s].tunnel, "Kill session %d (%s): %s\n", s, session[s].user, reason);
+ sessionclear(s);
+ cluster_send_session(s);
+}
+
+static void tunnelclear(tunnelidt t)
+{
+ if (!t) return;
+ memset(&tunnel[t], 0, sizeof(tunnel[t]));
+ tunnel[t].state = TUNNELFREE;
+}
+
+static void bundleclear(bundleidt b)
+{
+ if (!b) return;
+ memset(&bundle[b], 0, sizeof(bundle[b]));
+ bundle[b].state = BUNDLEFREE;
+}
+
+// kill a tunnel now
+static void tunnelkill(tunnelidt t, char *reason)
+{
+ sessionidt s;
+ controlt *c;
+
+ CSTAT(tunnelkill);
+
+ tunnel[t].state = TUNNELDIE;
+
+ // free control messages
+ while ((c = tunnel[t].controls))
+ {
+ controlt * n = c->next;
+ tunnel[t].controls = n;
+ tunnel[t].controlc--;
+ c->next = controlfree;
+ controlfree = c;
+ }
+ // kill sessions
+ for (s = 1; s <= config->cluster_highest_sessionid ; ++s)
+ if (session[s].tunnel == t)
+ sessionkill(s, reason);
+
+ // free tunnel
+ tunnelclear(t);
+ LOG(1, 0, t, "Kill tunnel %u: %s\n", t, reason);
+ cli_tunnel_actions[t].action = 0;
+ cluster_send_tunnel(t);
+}
+
+// shut down a tunnel cleanly
+static void tunnelshutdown(tunnelidt t, char *reason, int result, int error, char *msg)
+{
+ sessionidt s;
+
+ CSTAT(tunnelshutdown);
+
+ if (!tunnel[t].last || !tunnel[t].far || tunnel[t].state == TUNNELFREE)
+ {
+ // never set up, can immediately kill
+ tunnelkill(t, reason);
+ return;
+ }
+ LOG(1, 0, t, "Shutting down tunnel %u (%s)\n", t, reason);
+
+ // close session
+ for (s = 1; s <= config->cluster_highest_sessionid ; ++s)
+ if (session[s].tunnel == t)
+ sessionshutdown(s, reason, CDN_NONE, TERM_ADMIN_RESET);
+
+ tunnel[t].state = TUNNELDIE;
+ tunnel[t].die = TIME + 700; // Clean up in 70 seconds
+ cluster_send_tunnel(t);
+ // TBA - should we wait for sessions to stop?
+ if (result)
+ {
+ controlt *c = controlnew(4); // sending StopCCN
+ if (error)
+ {
+ uint16_t buf[32];
+ int l = 4;
+ buf[0] = htons(result);
+ buf[1] = htons(error);
+ if (msg)
+ {
+ int m = strlen(msg);
+ if (m + 4 > sizeof(buf))
+ m = sizeof(buf) - 4;
+
+ memcpy(buf+2, msg, m);
+ l += m;
+ }
+
+ controlb(c, 1, (uint8_t *)buf, l, 1);
+ }
+ else
+ control16(c, 1, result, 1);
+
+ control16(c, 9, t, 1); // assigned tunnel (our end)
+ controladd(c, 0, t); // send the message
+ }
+}
+
+// read and process packet on tunnel (UDP)
+void processudp(uint8_t *buf, int len, struct sockaddr_in *addr)
+{
+ uint8_t *chapresponse = NULL;
+ uint16_t l = len, t = 0, s = 0, ns = 0, nr = 0;
+ uint8_t *p = buf + 2;
+
+
+ CSTAT(processudp);
+
+ udp_rx += len;
+ udp_rx_pkt++;
+ LOG_HEX(5, "UDP Data", buf, len);
+ STAT(tunnel_rx_packets);
+ INC_STAT(tunnel_rx_bytes, len);
+ if (len < 6)
+ {
+ LOG(1, 0, 0, "Short UDP, %d bytes\n", len);
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if ((buf[1] & 0x0F) != 2)
+ {
+ LOG(1, 0, 0, "Bad L2TP ver %d\n", buf[1] & 0x0F);
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if (*buf & 0x40)
+ { // length
+ l = ntohs(*(uint16_t *) p);
+ p += 2;
+ }
+ t = ntohs(*(uint16_t *) p);
+ p += 2;
+ s = ntohs(*(uint16_t *) p);
+ p += 2;
+ if (s >= MAXSESSION)
+ {
+ LOG(1, s, t, "Received UDP packet with invalid session ID\n");
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if (t >= MAXTUNNEL)
+ {
+ LOG(1, s, t, "Received UDP packet with invalid tunnel ID\n");
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if (t == TUNNEL_ID_PPPOE)
+ {
+ LOG(1, s, t, "Received UDP packet with tunnel ID reserved for pppoe\n");
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if (*buf & 0x08)
+ { // ns/nr
+ ns = ntohs(*(uint16_t *) p);
+ p += 2;
+ nr = ntohs(*(uint16_t *) p);
+ p += 2;
+ }
+ if (*buf & 0x02)
+ { // offset
+ uint16_t o = ntohs(*(uint16_t *) p);
+ p += o + 2;
+ }
+ if ((p - buf) > l)
+ {
+ LOG(1, s, t, "Bad length %d>%d\n", (int) (p - buf), l);
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ l -= (p - buf);
+
+ // used to time out old tunnels
+ if (t && tunnel[t].state == TUNNELOPEN)
+ tunnel[t].lastrec = time_now;
+
+ if (*buf & 0x80)
+ { // control
+ uint16_t message = 0xFFFF; // message type
+ uint8_t fatal = 0;
+ uint8_t mandatory = 0;
+ uint16_t asession = 0; // assigned session
+ uint32_t amagic = 0; // magic number
+ uint8_t aflags = 0; // flags from last LCF
+ uint16_t version = 0x0100; // protocol version (we handle 0.0 as well and send that back just in case)
+ char called[MAXTEL] = ""; // called number
+ char calling[MAXTEL] = ""; // calling number
+
+ if (!config->cluster_iam_master)
+ {
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port);
+ return;
+ }
+
+ // control messages must have bits 0x80|0x40|0x08
+ // (type, length and sequence) set, and bits 0x02|0x01
+ // (offset and priority) clear
+ if ((*buf & 0xCB) != 0xC8)
+ {
+ LOG(1, s, t, "Bad control header %02X\n", *buf);
+ STAT(tunnel_rx_errors);
+ return;
+ }
+
+ // check for duplicate tunnel open message
+ if (!t && ns == 0)
+ {
+ int i;
+
+ //
+ // Is this a duplicate of the first packet? (SCCRQ)
+ //
+ for (i = 1; i <= config->cluster_highest_tunnelid ; ++i)
+ {
+ if (tunnel[i].state != TUNNELOPENING ||
+ tunnel[i].ip != ntohl(*(in_addr_t *) & addr->sin_addr) ||
+ tunnel[i].port != ntohs(addr->sin_port) )
+ continue;
+ t = i;
+ LOG(3, s, t, "Duplicate SCCRQ?\n");
+ break;
+ }
+ }
+
+ LOG(3, s, t, "Control message (%d bytes): (unacked %d) l-ns %u l-nr %u r-ns %u r-nr %u\n",
+ l, tunnel[t].controlc, tunnel[t].ns, tunnel[t].nr, ns, nr);
+
+ // if no tunnel specified, assign one
+ if (!t)
+ {
+ if (!(t = new_tunnel()))
+ {
+ LOG(1, 0, 0, "No more tunnels\n");
+ STAT(tunnel_overflow);
+ return;
+ }
+ tunnelclear(t);
+ tunnel[t].ip = ntohl(*(in_addr_t *) & addr->sin_addr);
+ tunnel[t].port = ntohs(addr->sin_port);
+ tunnel[t].window = 4; // default window
+ STAT(tunnel_created);
+ LOG(1, 0, t, " New tunnel from %s:%u ID %u\n",
+ fmtaddr(htonl(tunnel[t].ip), 0), tunnel[t].port, t);
+ }
+
+ // If the 'ns' just received is not the 'nr' we're
+ // expecting, just send an ack and drop it.
+ //
+ // if 'ns' is less, then we got a retransmitted packet.
+ // if 'ns' is greater than missed a packet. Either way
+ // we should ignore it.
+ if (ns != tunnel[t].nr)
+ {
+ // is this the sequence we were expecting?
+ STAT(tunnel_rx_errors);
+ LOG(1, 0, t, " Out of sequence tunnel %u, (%u is not the expected %u)\n",
+ t, ns, tunnel[t].nr);
+
+ if (l) // Is this not a ZLB?
+ controlnull(t);
+ return;
+ }
+
+ // check sequence of this message
+ {
+ int skip = tunnel[t].window; // track how many in-window packets are still in queue
+ // some to clear maybe?
+ while (tunnel[t].controlc > 0 && (((tunnel[t].ns - tunnel[t].controlc) - nr) & 0x8000))
+ {
+ controlt *c = tunnel[t].controls;
+ tunnel[t].controls = c->next;
+ tunnel[t].controlc--;
+ c->next = controlfree;
+ controlfree = c;
+ skip--;
+ tunnel[t].try = 0; // we have progress
+ }
+
+ // receiver advance (do here so quoted correctly in any sends below)
+ if (l) tunnel[t].nr = (ns + 1);
+ if (skip < 0) skip = 0;
+ if (skip < tunnel[t].controlc)
+ {
+ // some control packets can now be sent that were previous stuck out of window
+ int tosend = tunnel[t].window - skip;
+ controlt *c = tunnel[t].controls;
+ while (c && skip)
+ {
+ c = c->next;
+ skip--;
+ }
+ while (c && tosend)
+ {
+ tunnel[t].try = 0; // first send
+ tunnelsend(c->buf, c->length, t);
+ c = c->next;
+ tosend--;
+ }
+ }
+ if (!tunnel[t].controlc)
+ tunnel[t].retry = 0; // caught up
+ }
+ if (l)
+ { // if not a null message
+ int result = 0;
+ int error = 0;
+ char *msg = 0;
+
+ // Default disconnect cause/message on receipt of CDN. Set to
+ // more specific value from attribute 1 (result code) or 46
+ // (disconnect cause) if present below.
+ int disc_cause_set = 0;
+ int disc_cause = TERM_NAS_REQUEST;
+ char const *disc_reason = "Closed (Received CDN).";
+
+ // process AVPs
+ while (l && !(fatal & 0x80)) // 0x80 = mandatory AVP
+ {
+ uint16_t n = (ntohs(*(uint16_t *) p) & 0x3FF);
+ uint8_t *b = p;
+ uint8_t flags = *p;
+ uint16_t mtype;
+
+ if (n > l)
+ {
+ LOG(1, s, t, "Invalid length in AVP\n");
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ p += n; // next
+ l -= n;
+ if (flags & 0x3C) // reserved bits, should be clear
+ {
+ LOG(1, s, t, "Unrecognised AVP flags %02X\n", *b);
+ fatal = flags;
+ result = 2; // general error
+ error = 3; // reserved field non-zero
+ msg = 0;
+ continue; // next
+ }
+ b += 2;
+ if (*(uint16_t *) (b))
+ {
+ LOG(2, s, t, "Unknown AVP vendor %u\n", ntohs(*(uint16_t *) (b)));
+ fatal = flags;
+ result = 2; // general error
+ error = 6; // generic vendor-specific error
+ msg = "unsupported vendor-specific";
+ continue; // next
+ }
+ b += 2;
+ mtype = ntohs(*(uint16_t *) (b));
+ b += 2;
+ n -= 6;
+
+ if (flags & 0x40)
+ {
+ uint16_t orig_len;
+
+ // handle hidden AVPs
+ if (!*config->l2tp_secret)
+ {
+ LOG(1, s, t, "Hidden AVP requested, but no L2TP secret.\n");
+ fatal = flags;
+ result = 2; // general error
+ error = 6; // generic vendor-specific error
+ msg = "secret not specified";
+ continue;
+ }
+ if (!session[s].random_vector_length)
+ {
+ LOG(1, s, t, "Hidden AVP requested, but no random vector.\n");
+ fatal = flags;
+ result = 2; // general error
+ error = 6; // generic
+ msg = "no random vector";
+ continue;
+ }
+ if (n < 8)
+ {
+ LOG(2, s, t, "Short hidden AVP.\n");
+ fatal = flags;
+ result = 2; // general error
+ error = 2; // length is wrong
+ msg = 0;
+ continue;
+ }
+
+ // Unhide the AVP
+ unhide_value(b, n, mtype, session[s].random_vector, session[s].random_vector_length);
+
+ orig_len = ntohs(*(uint16_t *) b);
+ if (orig_len > n + 2)
+ {
+ LOG(1, s, t, "Original length %d too long in hidden AVP of length %d; wrong secret?\n",
+ orig_len, n);
+
+ fatal = flags;
+ result = 2; // general error
+ error = 2; // length is wrong
+ msg = 0;
+ continue;
+ }
+
+ b += 2;
+ n = orig_len;
+ }
+
+ LOG(4, s, t, " AVP %u (%s) len %d%s%s\n", mtype, l2tp_avp_name(mtype), n,
+ flags & 0x40 ? ", hidden" : "", flags & 0x80 ? ", mandatory" : "");
+
+ switch (mtype)
+ {
+ case 0: // message type
+ message = ntohs(*(uint16_t *) b);
+ mandatory = flags & 0x80;
+ LOG(4, s, t, " Message type = %u (%s)\n", message, l2tp_code(message));
+ break;
+ case 1: // result code
+ {
+ uint16_t rescode = ntohs(*(uint16_t *) b);
+ char const *resdesc = "(unknown)";
+ char const *errdesc = NULL;
+ int cause = 0;
+
+ if (message == 4)
+ { /* StopCCN */
+ resdesc = l2tp_stopccn_result_code(rescode);
+ cause = TERM_LOST_SERVICE;
+ }
+ else if (message == 14)
+ { /* CDN */
+ resdesc = l2tp_cdn_result_code(rescode);
+ if (rescode == 1)
+ cause = TERM_LOST_CARRIER;
+ else
+ cause = TERM_ADMIN_RESET;
+ }
+
+ LOG(4, s, t, " Result Code %u: %s\n", rescode, resdesc);
+ if (n >= 4)
+ {
+ uint16_t errcode = ntohs(*(uint16_t *)(b + 2));
+ errdesc = l2tp_error_code(errcode);
+ LOG(4, s, t, " Error Code %u: %s\n", errcode, errdesc);
+ }
+ if (n > 4)
+ LOG(4, s, t, " Error String: %.*s\n", n-4, b+4);
+
+ if (cause && disc_cause_set < mtype) // take cause from attrib 46 in preference
+ {
+ disc_cause_set = mtype;
+ disc_reason = errdesc ? errdesc : resdesc;
+ disc_cause = cause;
+ }
+
+ break;
+ }
+ break;
+ case 2: // protocol version
+ {
+ version = ntohs(*(uint16_t *) (b));
+ LOG(4, s, t, " Protocol version = %u\n", version);
+ if (version && version != 0x0100)
+ { // allow 0.0 and 1.0
+ LOG(1, s, t, " Bad protocol version %04X\n", version);
+ fatal = flags;
+ result = 5; // unspported protocol version
+ error = 0x0100; // supported version
+ msg = 0;
+ continue; // next
+ }
+ }
+ break;
+ case 3: // framing capabilities
+ break;
+ case 4: // bearer capabilities
+ break;
+ case 5: // tie breaker
+ // We never open tunnels, so we don't care about tie breakers
+ continue;
+ case 6: // firmware revision
+ break;
+ case 7: // host name
+ memset(tunnel[t].hostname, 0, sizeof(tunnel[t].hostname));
+ memcpy(tunnel[t].hostname, b, (n < sizeof(tunnel[t].hostname)) ? n : sizeof(tunnel[t].hostname) - 1);
+ LOG(4, s, t, " Tunnel hostname = \"%s\"\n", tunnel[t].hostname);
+ // TBA - to send to RADIUS
+ break;
+ case 8: // vendor name
+ memset(tunnel[t].vendor, 0, sizeof(tunnel[t].vendor));
+ memcpy(tunnel[t].vendor, b, (n < sizeof(tunnel[t].vendor)) ? n : sizeof(tunnel[t].vendor) - 1);
+ LOG(4, s, t, " Vendor name = \"%s\"\n", tunnel[t].vendor);
+ break;
+ case 9: // assigned tunnel
+ tunnel[t].far = ntohs(*(uint16_t *) (b));
+ LOG(4, s, t, " Remote tunnel id = %u\n", tunnel[t].far);
+ break;
+ case 10: // rx window
+ tunnel[t].window = ntohs(*(uint16_t *) (b));
+ if (!tunnel[t].window)
+ tunnel[t].window = 1; // window of 0 is silly
+ LOG(4, s, t, " rx window = %u\n", tunnel[t].window);
+ break;
+ case 11: // Challenge
+ {
+ LOG(4, s, t, " LAC requested CHAP authentication for tunnel\n");
+ build_chap_response(b, 2, n, &chapresponse);
+ }
+ break;
+ case 13: // Response
+#ifdef LAC
+ if (tunnel[t].isremotelns)
+ {
+ chapresponse = calloc(17, 1);
+ memcpy(chapresponse, b, (n < 17) ? n : 16);
+ LOG(3, s, t, "received challenge response from REMOTE LNS\n");
+ }
+ else
+#endif /* LAC */
+ // Why did they send a response? We never challenge.
+ LOG(2, s, t, " received unexpected challenge response\n");
+ break;
+
+ case 14: // assigned session
+ asession = session[s].far = ntohs(*(uint16_t *) (b));
+ LOG(4, s, t, " assigned session = %u\n", asession);
+ break;
+ case 15: // call serial number
+ LOG(4, s, t, " call serial number = %u\n", ntohl(*(uint32_t *)b));
+ break;
+ case 18: // bearer type
+ LOG(4, s, t, " bearer type = %u\n", ntohl(*(uint32_t *)b));
+ // TBA - for RADIUS
+ break;
+ case 19: // framing type
+ LOG(4, s, t, " framing type = %u\n", ntohl(*(uint32_t *)b));
+ // TBA
+ break;
+ case 21: // called number
+ memset(called, 0, sizeof(called));
+ memcpy(called, b, (n < sizeof(called)) ? n : sizeof(called) - 1);
+ LOG(4, s, t, " Called <%s>\n", called);
+ break;
+ case 22: // calling number
+ memset(calling, 0, sizeof(calling));
+ memcpy(calling, b, (n < sizeof(calling)) ? n : sizeof(calling) - 1);
+ LOG(4, s, t, " Calling <%s>\n", calling);
+ break;
+ case 23: // subtype
+ break;
+ case 24: // tx connect speed
+ if (n == 4)
+ {
+ session[s].tx_connect_speed = ntohl(*(uint32_t *)b);
+ }
+ else
+ {
+ // AS5300s send connect speed as a string
+ char tmp[30];
+ memset(tmp, 0, sizeof(tmp));
+ memcpy(tmp, b, (n < sizeof(tmp)) ? n : sizeof(tmp) - 1);
+ session[s].tx_connect_speed = atol(tmp);
+ }
+ LOG(4, s, t, " TX connect speed <%u>\n", session[s].tx_connect_speed);
+ break;
+ case 38: // rx connect speed
+ if (n == 4)
+ {
+ session[s].rx_connect_speed = ntohl(*(uint32_t *)b);
+ }
+ else
+ {
+ // AS5300s send connect speed as a string
+ char tmp[30];
+ memset(tmp, 0, sizeof(tmp));
+ memcpy(tmp, b, (n < sizeof(tmp)) ? n : sizeof(tmp) - 1);
+ session[s].rx_connect_speed = atol(tmp);
+ }
+ LOG(4, s, t, " RX connect speed <%u>\n", session[s].rx_connect_speed);
+ break;
+ case 25: // Physical Channel ID
+ {
+ uint32_t tmp = ntohl(*(uint32_t *) b);
+ LOG(4, s, t, " Physical Channel ID <%X>\n", tmp);
+ break;
+ }
+ case 29: // Proxy Authentication Type
+ {
+ uint16_t atype = ntohs(*(uint16_t *)b);
+ LOG(4, s, t, " Proxy Auth Type %u (%s)\n", atype, ppp_auth_type(atype));
+ break;
+ }
+ case 30: // Proxy Authentication Name
+ {
+ char authname[64];
+ memset(authname, 0, sizeof(authname));
+ memcpy(authname, b, (n < sizeof(authname)) ? n : sizeof(authname) - 1);
+ LOG(4, s, t, " Proxy Auth Name (%s)\n",
+ authname);
+ break;
+ }
+ case 31: // Proxy Authentication Challenge
+ {
+ LOG(4, s, t, " Proxy Auth Challenge\n");
+ break;
+ }
+ case 32: // Proxy Authentication ID
+ {
+ uint16_t authid = ntohs(*(uint16_t *)(b));
+ LOG(4, s, t, " Proxy Auth ID (%u)\n", authid);
+ break;
+ }
+ case 33: // Proxy Authentication Response
+ LOG(4, s, t, " Proxy Auth Response\n");
+ break;
+ case 27: // last sent lcp
+ { // find magic number
+ uint8_t *p = b, *e = p + n;
+ while (p + 1 < e && p[1] && p + p[1] <= e)
+ {
+ if (*p == 5 && p[1] == 6) // Magic-Number
+ amagic = ntohl(*(uint32_t *) (p + 2));
+ else if (*p == 7) // Protocol-Field-Compression
+ aflags |= SESSION_PFC;
+ else if (*p == 8) // Address-and-Control-Field-Compression
+ aflags |= SESSION_ACFC;
+ p += p[1];
+ }
+ }
+ break;
+ case 28: // last recv lcp confreq
+ break;
+ case 26: // Initial Received LCP CONFREQ
+ break;
+ case 39: // seq required - we control it as an LNS anyway...
+ break;
+ case 36: // Random Vector
+ LOG(4, s, t, " Random Vector received. Enabled AVP Hiding.\n");
+ memset(session[s].random_vector, 0, sizeof(session[s].random_vector));
+ if (n > sizeof(session[s].random_vector))
+ n = sizeof(session[s].random_vector);
+ memcpy(session[s].random_vector, b, n);
+ session[s].random_vector_length = n;
+ break;
+ case 46: // ppp disconnect cause
+ if (n >= 5)
+ {
+ uint16_t code = ntohs(*(uint16_t *) b);
+ uint16_t proto = ntohs(*(uint16_t *) (b + 2));
+ uint8_t dir = *(b + 4);
+
+ LOG(4, s, t, " PPP disconnect cause "
+ "(code=%u, proto=%04X, dir=%u, msg=\"%.*s\")\n",
+ code, proto, dir, n - 5, b + 5);
+
+ disc_cause_set = mtype;
+
+ switch (code)
+ {
+ case 1: // admin disconnect
+ disc_cause = TERM_ADMIN_RESET;
+ disc_reason = "Administrative disconnect";
+ break;
+ case 3: // lcp terminate
+ if (dir != 2) break; // 1=peer (LNS), 2=local (LAC)
+ disc_cause = TERM_USER_REQUEST;
+ disc_reason = "Normal disconnection";
+ break;
+ case 4: // compulsory encryption unavailable
+ if (dir != 1) break; // 1=refused by peer, 2=local
+ disc_cause = TERM_USER_ERROR;
+ disc_reason = "Compulsory encryption refused";
+ break;
+ case 5: // lcp: fsm timeout
+ disc_cause = TERM_PORT_ERROR;
+ disc_reason = "LCP: FSM timeout";
+ break;
+ case 6: // lcp: no recognisable lcp packets received
+ disc_cause = TERM_PORT_ERROR;
+ disc_reason = "LCP: no recognisable LCP packets";
+ break;
+ case 7: // lcp: magic-no error (possibly looped back)
+ disc_cause = TERM_PORT_ERROR;
+ disc_reason = "LCP: magic-no error (possible loop)";
+ break;
+ case 8: // lcp: echo request timeout
+ disc_cause = TERM_PORT_ERROR;
+ disc_reason = "LCP: echo request timeout";
+ break;
+ case 13: // auth: fsm timeout
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = "Authentication: FSM timeout";
+ break;
+ case 15: // auth: unacceptable auth protocol
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = "Unacceptable authentication protocol";
+ break;
+ case 16: // auth: authentication failed
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = "Authentication failed";
+ break;
+ case 17: // ncp: fsm timeout
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = "NCP: FSM timeout";
+ break;
+ case 18: // ncp: no ncps available
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = "NCP: no NCPs available";
+ break;
+ case 19: // ncp: failure to converge on acceptable address
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = (dir == 1)
+ ? "NCP: too many Configure-Naks received from peer"
+ : "NCP: too many Configure-Naks sent to peer";
+ break;
+ case 20: // ncp: user not permitted to use any address
+ disc_cause = TERM_SERVICE_UNAVAILABLE;
+ disc_reason = (dir == 1)
+ ? "NCP: local link address not acceptable to peer"
+ : "NCP: remote link address not acceptable";
+ break;
+ }
+ }
+ break;
+ default:
+ {
+ static char e[] = "unknown AVP 0xXXXX";
+ LOG(2, s, t, " Unknown AVP type %u\n", mtype);
+ fatal = flags;
+ result = 2; // general error
+ error = 8; // unknown mandatory AVP
+ sprintf((msg = e) + 14, "%04x", mtype);
+ continue; // next
+ }
+ }
+ }
+ // process message
+ if (fatal & 0x80)
+ tunnelshutdown(t, "Invalid mandatory AVP", result, error, msg);
+ else
+ switch (message)
+ {
+ case 1: // SCCRQ - Start Control Connection Request
+ tunnel[t].state = TUNNELOPENING;
+ LOG(3, s, t, "Received SCCRQ\n");
+ if (main_quit != QUIT_SHUTDOWN)
+ {
+ LOG(3, s, t, "sending SCCRP\n");
+ controlt *c = controlnew(2); // sending SCCRP
+ control16(c, 2, version, 1); // protocol version
+ control32(c, 3, 3, 1); // framing
+ controls(c, 7, hostname, 1); // host name
+ if (chapresponse) controlb(c, 13, chapresponse, 16, 1); // Challenge response
+ control16(c, 9, t, 1); // assigned tunnel
+ controladd(c, 0, t); // send the resply
+ }
+ else
+ {
+ tunnelshutdown(t, "Shutting down", 6, 0, 0);
+ }
+ break;
+ case 2: // SCCRP
+ tunnel[t].state = TUNNELOPEN;
+ tunnel[t].lastrec = time_now;
+#ifdef LAC
+ LOG(3, s, t, "Received SCCRP\n");
+ if (main_quit != QUIT_SHUTDOWN)
+ {
+ if (tunnel[t].isremotelns && chapresponse)
+ {
+ hasht hash;
+
+ lac_calc_rlns_auth(t, 2, hash); // id = 2 (SCCRP)
+ // check authenticator
+ if (memcmp(hash, chapresponse, 16) == 0)
+ {
+ LOG(3, s, t, "sending SCCCN to REMOTE LNS\n");
+ controlt *c = controlnew(3); // sending SCCCN
+ controls(c, 7, hostname, 1); // host name
+ controls(c, 8, Vendor_name, 1); // Vendor name
+ control16(c, 2, version, 1); // protocol version
+ control32(c, 3, 3, 1); // framing Capabilities
+ control16(c, 9, t, 1); // assigned tunnel
+ controladd(c, 0, t); // send
+ }
+ else
+ {
+ tunnelshutdown(t, "Bad chap response from REMOTE LNS", 4, 0, 0);
+ }
+ }
+ }
+ else
+ {
+ tunnelshutdown(t, "Shutting down", 6, 0, 0);
+ }
+#endif /* LAC */
+ break;
+ case 3: // SCCN
+ LOG(3, s, t, "Received SCCN\n");
+ tunnel[t].state = TUNNELOPEN;
+ tunnel[t].lastrec = time_now;
+ controlnull(t); // ack
+ break;
+ case 4: // StopCCN
+ LOG(3, s, t, "Received StopCCN\n");
+ controlnull(t); // ack
+ tunnelshutdown(t, "Stopped", 0, 0, 0); // Shut down cleanly
+ break;
+ case 6: // HELLO
+ LOG(3, s, t, "Received HELLO\n");
+ controlnull(t); // simply ACK
+ break;
+ case 7: // OCRQ
+ // TBA
+ LOG(3, s, t, "Received OCRQ\n");
+ break;
+ case 8: // OCRO
+ // TBA
+ LOG(3, s, t, "Received OCRO\n");
+ break;
+ case 9: // OCCN
+ // TBA
+ LOG(3, s, t, "Received OCCN\n");
+ break;
+ case 10: // ICRQ
+ LOG(3, s, t, "Received ICRQ\n");
+ if (sessionfree && main_quit != QUIT_SHUTDOWN)
+ {
+ controlt *c = controlnew(11); // ICRP
+
+ LOG(3, s, t, "Sending ICRP\n");
+
+ s = sessionfree;
+ sessionfree = session[s].next;
+ memset(&session[s], 0, sizeof(session[s]));
+
+ if (s > config->cluster_highest_sessionid)
+ config->cluster_highest_sessionid = s;
+
+ session[s].opened = time_now;
+ session[s].tunnel = t;
+ session[s].far = asession;
+ session[s].last_packet = session[s].last_data = time_now;
+ LOG(3, s, t, "New session (%u/%u)\n", tunnel[t].far, session[s].far);
+ control16(c, 14, s, 1); // assigned session
+ controladd(c, asession, t); // send the reply
+
+ strncpy(session[s].called, called, sizeof(session[s].called) - 1);
+ strncpy(session[s].calling, calling, sizeof(session[s].calling) - 1);
+
+ session[s].ppp.phase = Establish;
+ session[s].ppp.lcp = Starting;
+
+ STAT(session_created);
+ break;
+ }
+
+ {
+ controlt *c = controlnew(14); // CDN
+ LOG(3, s, t, "Sending CDN\n");
+ if (!sessionfree)
+ {
+ STAT(session_overflow);
+ LOG(1, 0, t, "No free sessions\n");
+ control16(c, 1, 4, 0); // temporary lack of resources
+ }
+ else
+ control16(c, 1, 2, 7); // shutting down, try another
+
+ controladd(c, asession, t); // send the message
+ }
+ return;
+ case 11: // ICRP
+#ifdef LAC
+ LOG(3, s, t, "Received ICRP\n");
+ if (session[s].forwardtosession)
+ {
+ controlt *c = controlnew(12); // ICCN
+
+ session[s].opened = time_now;
+ session[s].tunnel = t;
+ session[s].far = asession;
+ session[s].last_packet = session[s].last_data = time_now;
+
+ control32(c, 19, 1, 1); // Framing Type
+ control32(c, 24, 10000000, 1); // Tx Connect Speed
+ controladd(c, asession, t); // send the message
+ LOG(3, s, t, "Sending ICCN\n");
+ }
+#endif /* LAC */
+ break;
+ case 12: // ICCN
+ LOG(3, s, t, "Received ICCN\n");
+ if (amagic == 0) amagic = time_now;
+ session[s].magic = amagic; // set magic number
+ session[s].flags = aflags; // set flags received
+ session[s].mru = PPPoE_MRU; // default
+ controlnull(t); // ack
+
+ // start LCP
+ sess_local[s].lcp_authtype = config->radius_authprefer;
+ sess_local[s].ppp_mru = MRU;
+
+ // Set multilink options before sending initial LCP packet
+ sess_local[s].mp_mrru = 1614;
+ sess_local[s].mp_epdis = ntohl(config->iftun_address ? config->iftun_address : my_address);
+
+ sendlcp(s, t);
+ change_state(s, lcp, RequestSent);
+ break;
+
+ case 14: // CDN
+ LOG(3, s, t, "Received CDN\n");
+ controlnull(t); // ack
+ sessionshutdown(s, disc_reason, CDN_NONE, disc_cause);
+ break;
+ case 0xFFFF:
+ LOG(1, s, t, "Missing message type\n");
+ break;
+ default:
+ STAT(tunnel_rx_errors);
+ if (mandatory)
+ tunnelshutdown(t, "Unknown message type", 2, 6, "unknown message type");
+ else
+ LOG(1, s, t, "Unknown message type %u\n", message);
+ break;
+ }
+ if (chapresponse) free(chapresponse);
+ cluster_send_tunnel(t);
+ }
+ else
+ {
+ LOG(4, s, t, " Got a ZLB ack\n");
+ }
+ }
+ else
+ { // data
+ uint16_t proto;
+
+ LOG_HEX(5, "Receive Tunnel Data", p, l);
+ if (l > 2 && p[0] == 0xFF && p[1] == 0x03)
+ { // HDLC address header, discard
+ p += 2;
+ l -= 2;
+ }
+ if (l < 2)
+ {
+ LOG(1, s, t, "Short ppp length %d\n", l);
+ STAT(tunnel_rx_errors);
+ return;
+ }
+ if (*p & 1)
+ {
+ proto = *p++;
+ l--;
+ }
+ else
+ {
+ proto = ntohs(*(uint16_t *) p);
+ p += 2;
+ l -= 2;
+ }
+
+#ifdef LAC
+ if (session[s].forwardtosession)
+ {
+ LOG(5, s, t, "Forwarding data session to session %u\n", session[s].forwardtosession);
+ // Forward to LAC/BAS or Remote LNS session
+ lac_session_forward(buf, len, s, proto, addr->sin_addr.s_addr, addr->sin_port);
+ return;
+ }
+ else if (config->auth_tunnel_change_addr_src)
+ {
+ if (tunnel[t].ip != ntohl(addr->sin_addr.s_addr) &&
+ tunnel[t].port == ntohs(addr->sin_port))
+ {
+ // The remotes BAS are a clustered l2tpns server and the source IP has changed
+ LOG(5, s, t, "The tunnel IP source (%s) has changed by new IP (%s)\n",
+ fmtaddr(htonl(tunnel[t].ip), 0), fmtaddr(addr->sin_addr.s_addr, 0));
+
+ tunnel[t].ip = ntohl(addr->sin_addr.s_addr);
+ }
+ }
+#endif /* LAC */
+
+ if (s && !session[s].opened) // Is something wrong??
+ {
+ if (!config->cluster_iam_master)
+ {
+ // Pass it off to the master to deal with..
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port);
+ return;
+ }
+
+ LOG(1, s, t, "UDP packet contains session which is not opened. Dropping packet.\n");
+ STAT(tunnel_rx_errors);
+ return;
+ }
+
+ if (proto == PPPPAP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ processpap(s, t, p, l);
+ }
+ else if (proto == PPPCHAP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ processchap(s, t, p, l);
+ }
+ else if (proto == PPPLCP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ processlcp(s, t, p, l);
+ }
+ else if (proto == PPPIPCP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ processipcp(s, t, p, l);
+ }
+ else if (proto == PPPIPV6CP && config->ipv6_prefix.s6_addr[0])
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ processipv6cp(s, t, p, l);
+ }
+ else if (proto == PPPCCP)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ processccp(s, t, p, l);
+ }
+ else if (proto == PPPIP)
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "Session %u is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ if (session[s].walled_garden && !config->cluster_iam_master)
+ {
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port);
+ return;
+ }
+
+ processipin(s, t, p, l);
+ }
+ else if (proto == PPPMP)
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "Session %u is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ if (!config->cluster_iam_master)
+ {
+ // The fragments reconstruction is managed by the Master.
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port);
+ return;
+ }
+
+ processmpin(s, t, p, l);
+ }
+ else if (proto == PPPIPV6 && config->ipv6_prefix.s6_addr[0])
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "Session %u is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ if (session[s].walled_garden && !config->cluster_iam_master)
+ {
+ master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port);
+ return;
+ }
+
+ processipv6in(s, t, p, l);
+ }
+ else if (session[s].ppp.lcp == Opened)
+ {
+ session[s].last_packet = time_now;
+ if (!config->cluster_iam_master) { master_forward_packet(buf, len, addr->sin_addr.s_addr, addr->sin_port); return; }
+ protoreject(s, t, p, l, proto);
+ }
+ else
+ {
+ LOG(2, s, t, "Unknown PPP protocol 0x%04X received in LCP %s state\n",
+ proto, ppp_state(session[s].ppp.lcp));
+ }
+ }
+}
+
+// read and process packet on tun
+// (i.e. this routine writes to buf[-8]).
+static void processtun(uint8_t * buf, int len)
+{
+ LOG_HEX(5, "Receive TUN Data", buf, len);
+ STAT(tun_rx_packets);
+ INC_STAT(tun_rx_bytes, len);
+
+ CSTAT(processtun);
+
+ eth_rx_pkt++;
+ eth_rx += len;
+ if (len < 22)
+ {
+ LOG(1, 0, 0, "Short tun packet %d bytes\n", len);
+ STAT(tun_rx_errors);
+ return;
+ }
+
+ if (*(uint16_t *) (buf + 2) == htons(PKTIP)) // IPv4
+ processipout(buf, len);
+ else if (*(uint16_t *) (buf + 2) == htons(PKTIPV6) // IPV6
+ && config->ipv6_prefix.s6_addr[0])
+ processipv6out(buf, len);
+
+ // Else discard.
+}
+
+// Handle retries, timeouts. Runs every 1/10th sec, want to ensure
+// that we look at the whole of the tunnel, radius and session tables
+// every second
+static void regular_cleanups(double period)
+{
+ // Next tunnel, radius and session to check for actions on.
+ static tunnelidt t = 0;
+ static int r = 0;
+ static sessionidt s = 0;
+
+ int t_actions = 0;
+ int r_actions = 0;
+ int s_actions = 0;
+
+ int t_slice;
+ int r_slice;
+ int s_slice;
+
+ int i;
+ int a;
+
+ // divide up tables into slices based on the last run
+ t_slice = config->cluster_highest_tunnelid * period;
+ r_slice = (MAXRADIUS - 1) * period;
+ s_slice = config->cluster_highest_sessionid * period;
+
+ if (t_slice < 1)
+ t_slice = 1;
+ else if (t_slice > config->cluster_highest_tunnelid)
+ t_slice = config->cluster_highest_tunnelid;
+
+ if (r_slice < 1)
+ r_slice = 1;
+ else if (r_slice > (MAXRADIUS - 1))
+ r_slice = MAXRADIUS - 1;
+
+ if (s_slice < 1)
+ s_slice = 1;
+ else if (s_slice > config->cluster_highest_sessionid)
+ s_slice = config->cluster_highest_sessionid;
+
+ LOG(4, 0, 0, "Begin regular cleanup (last %f seconds ago)\n", period);
+
+ for (i = 0; i < t_slice; i++)
+ {
+ t++;
+ if (t > config->cluster_highest_tunnelid)
+ t = 1;
+
+ if (t == TUNNEL_ID_PPPOE)
+ continue;
+
+ // check for expired tunnels
+ if (tunnel[t].die && tunnel[t].die <= TIME)
+ {
+ STAT(tunnel_timeout);
+ tunnelkill(t, "Expired");
+ t_actions++;
+ continue;
+ }
+ // check for message resend
+ if (tunnel[t].retry && tunnel[t].controlc)
+ {
+ // resend pending messages as timeout on reply
+ if (tunnel[t].retry <= TIME)
+ {
+ controlt *c = tunnel[t].controls;
+ uint16_t w = tunnel[t].window;
+ tunnel[t].try++; // another try
+ if (tunnel[t].try > 5)
+ tunnelkill(t, "Timeout on control message"); // game over
+ else
+ while (c && w--)
+ {
+ tunnelsend(c->buf, c->length, t);
+ c = c->next;
+ }
+
+ t_actions++;
+ }
+ }
+ // Send hello
+ if (tunnel[t].state == TUNNELOPEN && !tunnel[t].controlc && (time_now - tunnel[t].lastrec) > 60)
+ {
+ controlt *c = controlnew(6); // sending HELLO
+ controladd(c, 0, t); // send the message
+ LOG(3, 0, t, "Sending HELLO message\n");
+ t_actions++;
+ }
+
+ // Check for tunnel changes requested from the CLI
+ if ((a = cli_tunnel_actions[t].action))
+ {
+ cli_tunnel_actions[t].action = 0;
+ if (a & CLI_TUN_KILL)
+ {
+ LOG(2, 0, t, "Dropping tunnel by CLI\n");
+ tunnelshutdown(t, "Requested by administrator", 1, 0, 0);
+ t_actions++;
+ }
+ }
+ }
+
+ for (i = 0; i < r_slice; i++)
+ {
+ r++;
+ if (r >= MAXRADIUS)
+ r = 1;
+
+ if (!radius[r].state)
+ continue;
+
+ if (radius[r].retry <= TIME)
+ {
+ radiusretry(r);
+ r_actions++;
+ }
+ }
+
+ for (i = 0; i < s_slice; i++)
+ {
+ s++;
+ if (s > config->cluster_highest_sessionid)
+ s = 1;
+
+ if (!session[s].opened) // Session isn't in use
+ continue;
+
+ // check for expired sessions
+ if (session[s].die)
+ {
+ if (session[s].die <= TIME)
+ {
+ sessionkill(s, "Expired");
+ s_actions++;
+ }
+ continue;
+ }
+
+ // PPP timeouts
+ if (sess_local[s].lcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.lcp;
+ switch (session[s].ppp.lcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].lcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for LCP ConfigReq... resending\n");
+ sendlcp(s, session[s].tunnel);
+ change_state(s, lcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to LCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipcp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipcp;
+ switch (session[s].ppp.ipcp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipcp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPCP ConfigReq... resending\n");
+ sendipcp(s, session[s].tunnel);
+ change_state(s, ipcp, next_state);
+ }
+ else
+ {
+ sessionshutdown(s, "No response to IPCP ConfigReq.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ }
+
+ s_actions++;
+ }
+
+ if (session[s].die)
+ continue;
+ }
+
+ if (sess_local[s].ipv6cp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ipv6cp;
+ switch (session[s].ppp.ipv6cp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ipv6cp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq... resending\n");
+ sendipv6cp(s, session[s].tunnel);
+ change_state(s, ipv6cp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for IPV6CP ConfigReq\n");
+ change_state(s, ipv6cp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ if (sess_local[s].ccp.restart <= time_now)
+ {
+ int next_state = session[s].ppp.ccp;
+ switch (session[s].ppp.ccp)
+ {
+ case RequestSent:
+ case AckReceived:
+ next_state = RequestSent;
+
+ case AckSent:
+ if (sess_local[s].ccp.conf_sent < config->ppp_max_configure)
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq... resending\n");
+ sendccp(s, session[s].tunnel);
+ change_state(s, ccp, next_state);
+ }
+ else
+ {
+ LOG(3, s, session[s].tunnel, "No ACK for CCP ConfigReq\n");
+ change_state(s, ccp, Stopped);
+ }
+
+ s_actions++;
+ }
+ }
+
+ // Drop sessions who have not responded within IDLE_ECHO_TIMEOUT seconds
+ if (session[s].last_packet && (time_now - session[s].last_packet >= config->idle_echo_timeout))
+ {
+ sessionshutdown(s, "No response to LCP ECHO requests.", CDN_ADMIN_DISC, TERM_LOST_SERVICE);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // No data in ECHO_TIMEOUT seconds, send LCP ECHO
+ if (session[s].ppp.phase >= Establish && (time_now - session[s].last_packet >= config->echo_timeout) &&
+ (time_now - sess_local[s].last_echo >= ECHO_TIMEOUT))
+ {
+ uint8_t b[MAXETHER];
+
+ uint8_t *q = makeppp(b, sizeof(b), 0, 0, s, session[s].tunnel, PPPLCP, 1, 0, 0);
+ if (!q) continue;
+
+ *q = EchoReq;
+ *(uint8_t *)(q + 1) = (time_now % 255); // ID
+ *(uint16_t *)(q + 2) = htons(8); // Length
+ *(uint32_t *)(q + 4) = session[s].ppp.lcp == Opened ? htonl(session[s].magic) : 0; // Magic Number
+
+ LOG(4, s, session[s].tunnel, "No data in %d seconds, sending LCP ECHO\n",
+ (int)(time_now - session[s].last_packet));
+
+ tunnelsend(b, (q - b) + 8, session[s].tunnel); // send it
+ sess_local[s].last_echo = time_now;
+ s_actions++;
+ }
+
+ // Drop sessions who have reached session_timeout seconds
+ if (session[s].session_timeout)
+ {
+ bundleidt bid = session[s].bundle;
+ if (bid)
+ {
+ if (time_now - bundle[bid].last_check >= 1)
+ {
+ bundle[bid].online_time += (time_now - bundle[bid].last_check) * bundle[bid].num_of_links;
+ bundle[bid].last_check = time_now;
+ if (bundle[bid].online_time >= session[s].session_timeout)
+ {
+ int ses;
+ for (ses = bundle[bid].num_of_links - 1; ses >= 0; ses--)
+ {
+ sessionshutdown(bundle[bid].members[ses], "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+ }
+ }
+ else if (time_now - session[s].opened >= session[s].session_timeout)
+ {
+ sessionshutdown(s, "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+
+ // Drop sessions who have reached idle_timeout seconds
+ if (session[s].last_data && session[s].idle_timeout && (time_now - session[s].last_data >= session[s].idle_timeout))
+ {
+ sessionshutdown(s, "Idle Timeout Reached", CDN_ADMIN_DISC, TERM_IDLE_TIMEOUT);
+ STAT(session_timeout);
+ s_actions++;
+ continue;
+ }
+
+ // Check for actions requested from the CLI
+ if ((a = cli_session_actions[s].action))
+ {
+ int send = 0;
+
+ cli_session_actions[s].action = 0;
+ if (a & CLI_SESS_KILL)
+ {
+ LOG(2, s, session[s].tunnel, "Dropping session by CLI\n");
+ sessionshutdown(s, "Requested by administrator.", CDN_ADMIN_DISC, TERM_ADMIN_RESET);
+ a = 0; // dead, no need to check for other actions
+ s_actions++;
+ }
+
+ if (a & CLI_SESS_NOSNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Unsnooping session by CLI\n");
+ session[s].snoop_ip = 0;
+ session[s].snoop_port = 0;
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_SNOOP)
+ {
+ LOG(2, s, session[s].tunnel, "Snooping session by CLI (to %s:%u)\n",
+ fmtaddr(cli_session_actions[s].snoop_ip, 0),
+ cli_session_actions[s].snoop_port);
+
+ session[s].snoop_ip = cli_session_actions[s].snoop_ip;
+ session[s].snoop_port = cli_session_actions[s].snoop_port;
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOTHROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Un-throttling session by CLI\n");
+ throttle_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_THROTTLE)
+ {
+ LOG(2, s, session[s].tunnel, "Throttling session by CLI (to %dkb/s up and %dkb/s down)\n",
+ cli_session_actions[s].throttle_in,
+ cli_session_actions[s].throttle_out);
+
+ throttle_session(s, cli_session_actions[s].throttle_in, cli_session_actions[s].throttle_out);
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOFILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Un-filtering session by CLI\n");
+ filter_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_FILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Filtering session by CLI (in=%d, out=%d)\n",
+ cli_session_actions[s].filter_in,
+ cli_session_actions[s].filter_out);
+
+ filter_session(s, cli_session_actions[s].filter_in, cli_session_actions[s].filter_out);
+ s_actions++;
+ send++;
+ }
+
+ if (send)
+ cluster_send_session(s);
+ }
+
+ // RADIUS interim accounting
+ if (config->radius_accounting && config->radius_interim > 0
+ && session[s].ip && !session[s].walled_garden
+ && !sess_local[s].radius // RADIUS already in progress
+ && time_now - sess_local[s].last_interim >= config->radius_interim
+ && session[s].flags & SESSION_STARTED)
+ {
+ int rad = radiusnew(s);
+ if (!rad)
+ {
+ LOG(1, s, session[s].tunnel, "No free RADIUS sessions for Interim message\n");
+ STAT(radius_overflow);
+ continue;
+ }
+
+ LOG(3, s, session[s].tunnel, "Sending RADIUS Interim for %s (%u)\n",
+ session[s].user, session[s].unique_id);
+
+ radiussend(rad, RADIUSINTERIM);
+ sess_local[s].last_interim = time_now;
+ s_actions++;
+ }
+ }
+
+ LOG(4, 0, 0, "End regular cleanup: checked %d/%d/%d tunnels/radius/sessions; %d/%d/%d actions\n",
+ t_slice, r_slice, s_slice, t_actions, r_actions, s_actions);
+}
+
+//
+// Are we in the middle of a tunnel update, or radius
+// requests??
+//
+static int still_busy(void)
+{
+ int i;
+ static clockt last_talked = 0;
+ static clockt start_busy_wait = 0;
+
+#ifdef BGP
+ static time_t stopped_bgp = 0;
+ if (bgp_configured)
+ {
+ if (!stopped_bgp)
+ {
+ LOG(1, 0, 0, "Shutting down in %d seconds, stopping BGP...\n", QUIT_DELAY);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ if (bgp_peers[i].state == Established)
+ bgp_stop(&bgp_peers[i]);
+
+ stopped_bgp = time_now;
+
+ if (!config->cluster_iam_master)
+ {
+ // we don't want to become master
+ cluster_send_ping(0);
+
+ return 1;
+ }
+ }
+
+ if (!config->cluster_iam_master && time_now < (stopped_bgp + QUIT_DELAY))
+ return 1;
+ }
+#endif /* BGP */
+
+ if (!config->cluster_iam_master)
+ return 0;
+
+ if (main_quit == QUIT_SHUTDOWN)
+ {
+ static int dropped = 0;
+ if (!dropped)
+ {
+ int i;
+
+ LOG(1, 0, 0, "Dropping sessions and tunnels\n");
+ for (i = 1; i < MAXTUNNEL; i++)
+ if (tunnel[i].ip || tunnel[i].state)
+ tunnelshutdown(i, "L2TPNS Closing", 6, 0, 0);
+
+ dropped = 1;
+ }
+ }
+
+ if (start_busy_wait == 0)
+ start_busy_wait = TIME;
+
+ for (i = config->cluster_highest_tunnelid ; i > 0 ; --i)
+ {
+ if (!tunnel[i].controlc)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Tunnel %u still has un-acked control messages.\n", i);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ // We stop waiting for radius after BUSY_WAIT_TIME 1/10th seconds
+ if (abs(TIME - start_busy_wait) > BUSY_WAIT_TIME)
+ {
+ LOG(1, 0, 0, "Giving up waiting for RADIUS to be empty. Shutting down anyway.\n");
+ return 0;
+ }
+
+ for (i = 1; i < MAXRADIUS; i++)
+ {
+ if (radius[i].state == RADIUSNULL)
+ continue;
+ if (radius[i].state == RADIUSWAIT)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Radius session %u is still busy (sid %u)\n", i, radius[i].session);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_EPOLL
+# include <sys/epoll.h>
+#else
+# define FAKE_EPOLL_IMPLEMENTATION /* include the functions */
+# include "fake_epoll.h"
+#endif
+
+#ifdef LAC
+// the base set of fds polled: cli, cluster, tun, udp, control, dae, netlink, udplac, pppoedisc, pppoesess
+#define BASE_FDS 10
+#else
+// the base set of fds polled: cli, cluster, tun, udp, control, dae, netlink, pppoedisc, pppoesess
+#define BASE_FDS 9
+#endif
+
+// additional polled fds
+#ifdef BGP
+# define EXTRA_FDS BGP_NUM_PEERS
+#else
+# define EXTRA_FDS 0
+#endif
+
+// main loop - gets packets on tun or udp and processes them
+static void mainloop(void)
+{
+ int i;
+ uint8_t buf[65536];
+ uint8_t *p = buf + 24; // for the hearder of the forwarded MPPP packet (see C_MPPP_FORWARD)
+ // and the forwarded pppoe session
+ int size_bufp = sizeof(buf) - 24;
+ clockt next_cluster_ping = 0; // send initial ping immediately
+ struct epoll_event events[BASE_FDS + RADIUS_FDS + EXTRA_FDS];
+ int maxevent = sizeof(events)/sizeof(*events);
+
+ if ((epollfd = epoll_create(maxevent)) < 0)
+ {
+ LOG(0, 0, 0, "epoll_create failed: %s\n", strerror(errno));
+ exit(1);
+ }
+
+#ifdef LAC
+ LOG(4, 0, 0, "Beginning of main loop. clifd=%d, cluster_sockfd=%d, tunfd=%d, udpfd=%d, controlfd=%d, daefd=%d, nlfd=%d , udplacfd=%d, pppoefd=%d, pppoesessfd=%d\n",
+ clifd, cluster_sockfd, tunfd, udpfd, controlfd, daefd, nlfd, udplacfd, pppoediscfd, pppoesessfd);
+#else
+ LOG(4, 0, 0, "Beginning of main loop. clifd=%d, cluster_sockfd=%d, tunfd=%d, udpfd=%d, controlfd=%d, daefd=%d, nlfd=%d, pppoefd=%d, pppoesessfd=%d\n",
+ clifd, cluster_sockfd, tunfd, udpfd, controlfd, daefd, nlfd, pppoediscfd, pppoesessfd);
+#endif
+
+ /* setup our fds to poll for input */
+ {
+ static struct event_data d[BASE_FDS];
+ struct epoll_event e;
+
+ e.events = EPOLLIN;
+ i = 0;
+
+ if (clifd >= 0)
+ {
+ d[i].type = FD_TYPE_CLI;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, clifd, &e);
+ }
+
+ d[i].type = FD_TYPE_CLUSTER;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, cluster_sockfd, &e);
+
+ d[i].type = FD_TYPE_TUN;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, tunfd, &e);
+
+ d[i].type = FD_TYPE_UDP;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, udpfd, &e);
+
+ d[i].type = FD_TYPE_CONTROL;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, controlfd, &e);
+
+ d[i].type = FD_TYPE_DAE;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, daefd, &e);
+
+ d[i].type = FD_TYPE_NETLINK;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, nlfd, &e);
+
+#ifdef LAC
+ d[i].type = FD_TYPE_UDPLAC;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, udplacfd, &e);
+#endif
+
+ d[i].type = FD_TYPE_PPPOEDISC;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, pppoediscfd, &e);
+
+ d[i].type = FD_TYPE_PPPOESESS;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, pppoesessfd, &e);
+ }
+
+#ifdef BGP
+ signal(SIGPIPE, SIG_IGN);
+ bgp_setup(config->as_number);
+ if (config->bind_address)
+ bgp_add_route(config->bind_address, 0xffffffff);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ {
+ if (config->neighbour[i].name[0])
+ bgp_start(&bgp_peers[i], config->neighbour[i].name,
+ config->neighbour[i].as, config->neighbour[i].keepalive,
+ config->neighbour[i].hold, config->neighbour[i].update_source,
+ 0); /* 0 = routing disabled */
+ }
+#endif /* BGP */
+
+ while (!main_quit || still_busy())
+ {
+ int more = 0;
+ int n;
+
+
+ if (main_reload)
+ {
+ main_reload = 0;
+ read_config_file();
+ config->reload_config++;
+ }
+
+ if (config->reload_config)
+ {
+ config->reload_config = 0;
+ update_config();
+ }
+
+#ifdef BGP
+ bgp_set_poll();
+#endif /* BGP */
+
+ n = epoll_wait(epollfd, events, maxevent, 100); // timeout 100ms (1/10th sec)
+ STAT(select_called);
+
+ TIME = now(NULL);
+ if (n < 0)
+ {
+ if (errno == EINTR ||
+ errno == ECHILD) // EINTR was clobbered by sigchild_handler()
+ continue;
+
+ LOG(0, 0, 0, "Error returned from select(): %s\n", strerror(errno));
+ break; // exit
+ }
+
+ if (n)
+ {
+ struct sockaddr_in addr;
+ struct in_addr local;
+ socklen_t alen;
+ int c, s;
+ int udp_ready = 0;
+#ifdef LAC
+ int udplac_ready = 0;
+ int udplac_pkts = 0;
+#endif
+ int pppoesess_ready = 0;
+ int pppoesess_pkts = 0;
+ int tun_ready = 0;
+ int cluster_ready = 0;
+ int udp_pkts = 0;
+ int tun_pkts = 0;
+ int cluster_pkts = 0;
+#ifdef BGP
+ uint32_t bgp_events[BGP_NUM_PEERS];
+ memset(bgp_events, 0, sizeof(bgp_events));
+#endif /* BGP */
+
+ for (c = n, i = 0; i < c; i++)
+ {
+ struct event_data *d = events[i].data.ptr;
+
+ switch (d->type)
+ {
+ case FD_TYPE_CLI: // CLI connections
+ {
+ int cli;
+
+ alen = sizeof(addr);
+ if ((cli = accept(clifd, (struct sockaddr *)&addr, &alen)) >= 0)
+ {
+ cli_do(cli);
+ close(cli);
+ }
+ else
+ LOG(0, 0, 0, "accept error: %s\n", strerror(errno));
+
+ n--;
+ break;
+ }
+
+ // these are handled below, with multiple interleaved reads
+ case FD_TYPE_CLUSTER: cluster_ready++; break;
+ case FD_TYPE_TUN: tun_ready++; break;
+ case FD_TYPE_UDP: udp_ready++; break;
+#ifdef LAC
+ case FD_TYPE_UDPLAC: udplac_ready++; break;
+#endif
+ case FD_TYPE_PPPOESESS: pppoesess_ready++; break;
+
+ case FD_TYPE_PPPOEDISC: // pppoe discovery
+ s = read(pppoediscfd, p, size_bufp);
+ if (s > 0) process_pppoe_disc(p, s);
+ n--;
+ break;
+
+ case FD_TYPE_CONTROL: // nsctl commands
+ alen = sizeof(addr);
+ s = recvfromto(controlfd, buf, sizeof(buf), MSG_WAITALL, (struct sockaddr *) &addr, &alen, &local);
+ if (s > 0) processcontrol(buf, s, &addr, alen, &local);
+ n--;
+ break;
+
+ case FD_TYPE_DAE: // DAE requests
+ alen = sizeof(addr);
+ s = recvfromto(daefd, buf, sizeof(buf), MSG_WAITALL, (struct sockaddr *) &addr, &alen, &local);
+ if (s > 0) processdae(buf, s, &addr, alen, &local);
+ n--;
+ break;
+
+ case FD_TYPE_RADIUS: // RADIUS response
+ alen = sizeof(addr);
+ s = recvfrom(radfds[d->index], buf, sizeof(buf), MSG_WAITALL, (struct sockaddr *) &addr, &alen);
+ if (s >= 0 && config->cluster_iam_master)
+ {
+ if (addr.sin_addr.s_addr == config->radiusserver[0] ||
+ addr.sin_addr.s_addr == config->radiusserver[1])
+ processrad(buf, s, d->index);
+ else
+ LOG(3, 0, 0, "Dropping RADIUS packet from unknown source %s\n",
+ fmtaddr(addr.sin_addr.s_addr, 0));
+ }
+
+ n--;
+ break;
+
+#ifdef BGP
+ case FD_TYPE_BGP:
+ bgp_events[d->index] = events[i].events;
+ n--;
+ break;
+#endif /* BGP */
+
+ case FD_TYPE_NETLINK:
+ {
+ struct nlmsghdr *nh = (struct nlmsghdr *)buf;
+ s = netlink_recv(buf, sizeof(buf));
+ if (nh->nlmsg_type == NLMSG_ERROR)
+ {
+ struct nlmsgerr *errmsg = NLMSG_DATA(nh);
+ if (errmsg->error)
+ {
+ if (errmsg->msg.nlmsg_seq < min_initok_nlseqnum)
+ {
+ LOG(0, 0, 0, "Got a fatal netlink error (while %s): %s\n", tun_nl_phase_msg[nh->nlmsg_seq], strerror(-errmsg->error));
+ exit(1);
+ }
+ else
+
+ LOG(0, 0, 0, "Got a netlink error: %s\n", strerror(-errmsg->error));
+ }
+ // else it's a ack
+ }
+ else
+ LOG(1, 0, 0, "Got a unknown netlink message: type %d seq %d flags %d\n", nh->nlmsg_type, nh->nlmsg_seq, nh->nlmsg_flags);
+ n--;
+ break;
+ }
+
+ default:
+ LOG(0, 0, 0, "Unexpected fd type returned from epoll_wait: %d\n", d->type);
+ }
+ }
+
+#ifdef BGP
+ bgp_process(bgp_events);
+#endif /* BGP */
+
+ for (c = 0; n && c < config->multi_read_count; c++)
+ {
+ // L2TP
+ if (udp_ready)
+ {
+ alen = sizeof(addr);
+ if ((s = recvfrom(udpfd, p, size_bufp, 0, (void *) &addr, &alen)) > 0)
+ {
+ processudp(p, s, &addr);
+ udp_pkts++;
+ }
+ else
+ {
+ udp_ready = 0;
+ n--;
+ }
+ }
+#ifdef LAC
+ // L2TP REMOTE LNS
+ if (udplac_ready)
+ {
+ alen = sizeof(addr);
+ if ((s = recvfrom(udplacfd, p, size_bufp, 0, (void *) &addr, &alen)) > 0)
+ {
+ if (!config->disable_lac_func)
+ processudp(p, s, &addr);
+
+ udplac_pkts++;
+ }
+ else
+ {
+ udplac_ready = 0;
+ n--;
+ }
+ }
+#endif
+ // incoming IP
+ if (tun_ready)
+ {
+ if ((s = read(tunfd, p, size_bufp)) > 0)
+ {
+ processtun(p, s);
+ tun_pkts++;
+ }
+ else
+ {
+ tun_ready = 0;
+ n--;
+ }
+ }
+
+ // pppoe session
+ if (pppoesess_ready)
+ {
+ if ((s = read(pppoesessfd, p, size_bufp)) > 0)