+ throttle_session(s, cli_session_actions[s].throttle_in, cli_session_actions[s].throttle_out);
+ s_actions++;
+ send++;
+ }
+
+ if (a & CLI_SESS_NOFILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Un-filtering session by CLI\n");
+ filter_session(s, 0, 0);
+ s_actions++;
+ send++;
+ }
+ else if (a & CLI_SESS_FILTER)
+ {
+ LOG(2, s, session[s].tunnel, "Filtering session by CLI (in=%d, out=%d)\n",
+ cli_session_actions[s].filter_in,
+ cli_session_actions[s].filter_out);
+
+ filter_session(s, cli_session_actions[s].filter_in, cli_session_actions[s].filter_out);
+ s_actions++;
+ send++;
+ }
+
+ if (send)
+ cluster_send_session(s);
+ }
+
+ // RADIUS interim accounting
+ if (config->radius_accounting && config->radius_interim > 0
+ && session[s].ip && !session[s].walled_garden
+ && !sess_local[s].radius // RADIUS already in progress
+ && time_now - sess_local[s].last_interim >= config->radius_interim)
+ {
+ int rad = radiusnew(s);
+ if (!rad)
+ {
+ LOG(1, s, session[s].tunnel, "No free RADIUS sessions for Interim message\n");
+ STAT(radius_overflow);
+ continue;
+ }
+
+ LOG(3, s, session[s].tunnel, "Sending RADIUS Interim for %s (%u)\n",
+ session[s].user, session[s].unique_id);
+
+ radiussend(rad, RADIUSINTERIM);
+ sess_local[s].last_interim = time_now;
+ s_actions++;
+ }
+ }
+
+ LOG(4, 0, 0, "End regular cleanup: checked %d/%d/%d tunnels/radius/sessions; %d/%d/%d actions\n",
+ t_slice, r_slice, s_slice, t_actions, r_actions, s_actions);
+}
+
+//
+// Are we in the middle of a tunnel update, or radius
+// requests??
+//
+static int still_busy(void)
+{
+ int i;
+ static clockt last_talked = 0;
+ static clockt start_busy_wait = 0;
+
+ if (!config->cluster_iam_master)
+ {
+#ifdef BGP
+ static time_t stopped_bgp = 0;
+ if (bgp_configured)
+ {
+ if (!stopped_bgp)
+ {
+ LOG(1, 0, 0, "Shutting down in %d seconds, stopping BGP...\n", QUIT_DELAY);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ if (bgp_peers[i].state == Established)
+ bgp_stop(&bgp_peers[i]);
+
+ stopped_bgp = time_now;
+
+ // we don't want to become master
+ cluster_send_ping(0);
+
+ return 1;
+ }
+
+ if (time_now < (stopped_bgp + QUIT_DELAY))
+ return 1;
+ }
+#endif /* BGP */
+
+ return 0;
+ }
+
+ if (main_quit == QUIT_SHUTDOWN)
+ {
+ static int dropped = 0;
+ if (!dropped)
+ {
+ int i;
+
+ LOG(1, 0, 0, "Dropping sessions and tunnels\n");
+ for (i = 1; i < MAXTUNNEL; i++)
+ if (tunnel[i].ip || tunnel[i].state)
+ tunnelshutdown(i, "L2TPNS Closing", 6, 0, 0);
+
+ dropped = 1;
+ }
+ }
+
+ if (start_busy_wait == 0)
+ start_busy_wait = TIME;
+
+ for (i = config->cluster_highest_tunnelid ; i > 0 ; --i)
+ {
+ if (!tunnel[i].controlc)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Tunnel %d still has un-acked control messages.\n", i);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ // We stop waiting for radius after BUSY_WAIT_TIME 1/10th seconds
+ if (abs(TIME - start_busy_wait) > BUSY_WAIT_TIME)
+ {
+ LOG(1, 0, 0, "Giving up waiting for RADIUS to be empty. Shutting down anyway.\n");
+ return 0;
+ }
+
+ for (i = 1; i < MAXRADIUS; i++)
+ {
+ if (radius[i].state == RADIUSNULL)
+ continue;
+ if (radius[i].state == RADIUSWAIT)
+ continue;
+
+ if (last_talked != TIME)
+ {
+ LOG(2, 0, 0, "Radius session %d is still busy (sid %d)\n", i, radius[i].session);
+ last_talked = TIME;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_EPOLL
+# include <sys/epoll.h>
+#else
+# define FAKE_EPOLL_IMPLEMENTATION /* include the functions */
+# include "fake_epoll.h"
+#endif
+
+// the base set of fds polled: cli, cluster, tun, udp, control, dae
+#define BASE_FDS 6
+
+// additional polled fds
+#ifdef BGP
+# define EXTRA_FDS BGP_NUM_PEERS
+#else
+# define EXTRA_FDS 0
+#endif
+
+// main loop - gets packets on tun or udp and processes them
+static void mainloop(void)
+{
+ int i;
+ uint8_t buf[65536];
+ clockt next_cluster_ping = 0; // send initial ping immediately
+ struct epoll_event events[BASE_FDS + RADIUS_FDS + EXTRA_FDS];
+ int maxevent = sizeof(events)/sizeof(*events);
+
+ if ((epollfd = epoll_create(maxevent)) < 0)
+ {
+ LOG(0, 0, 0, "epoll_create failed: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ LOG(4, 0, 0, "Beginning of main loop. clifd=%d, cluster_sockfd=%d, tunfd=%d, udpfd=%d, controlfd=%d, daefd=%d\n",
+ clifd, cluster_sockfd, tunfd, udpfd, controlfd, daefd);
+
+ /* setup our fds to poll for input */
+ {
+ static struct event_data d[BASE_FDS];
+ struct epoll_event e;
+
+ e.events = EPOLLIN;
+ i = 0;
+
+ d[i].type = FD_TYPE_CLI;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, clifd, &e);
+
+ d[i].type = FD_TYPE_CLUSTER;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, cluster_sockfd, &e);
+
+ d[i].type = FD_TYPE_TUN;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, tunfd, &e);
+
+ d[i].type = FD_TYPE_UDP;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, udpfd, &e);
+
+ d[i].type = FD_TYPE_CONTROL;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, controlfd, &e);
+
+ d[i].type = FD_TYPE_DAE;
+ e.data.ptr = &d[i++];
+ epoll_ctl(epollfd, EPOLL_CTL_ADD, daefd, &e);
+ }
+
+#ifdef BGP
+ signal(SIGPIPE, SIG_IGN);
+ bgp_setup(config->as_number);
+ if (config->bind_address)
+ bgp_add_route(config->bind_address, 0xffffffff);
+
+ for (i = 0; i < BGP_NUM_PEERS; i++)
+ {
+ if (config->neighbour[i].name[0])
+ bgp_start(&bgp_peers[i], config->neighbour[i].name,
+ config->neighbour[i].as, config->neighbour[i].keepalive,
+ config->neighbour[i].hold, 0); /* 0 = routing disabled */
+ }
+#endif /* BGP */
+
+ while (!main_quit || still_busy())
+ {
+ int more = 0;
+ int n;
+
+ if (config->reload_config)
+ {
+ // Update the config state based on config settings
+ update_config();
+ }
+
+#ifdef BGP
+ bgp_set_poll();
+#endif /* BGP */
+
+ n = epoll_wait(epollfd, events, maxevent, 100); // timeout 100ms (1/10th sec)
+ STAT(select_called);
+
+ TIME = now(NULL);
+ if (n < 0)
+ {
+ if (errno == EINTR ||
+ errno == ECHILD) // EINTR was clobbered by sigchild_handler()
+ continue;
+
+ LOG(0, 0, 0, "Error returned from select(): %s\n", strerror(errno));
+ break; // exit
+ }
+
+ if (n)
+ {
+ struct sockaddr_in addr;
+ socklen_t alen;
+ int c, s;
+ int udp_ready = 0;
+ int tun_ready = 0;
+ int cluster_ready = 0;
+ int udp_pkts = 0;
+ int tun_pkts = 0;
+ int cluster_pkts = 0;
+#ifdef BGP
+ uint32_t bgp_events[BGP_NUM_PEERS];
+ memset(bgp_events, 0, sizeof(bgp_events));
+#endif /* BGP */
+
+ for (c = n, i = 0; i < c; i++)
+ {
+ struct event_data *d = events[i].data.ptr;
+ switch (d->type)
+ {
+ case FD_TYPE_CLI: // CLI connections
+ {
+ int cli;
+
+ alen = sizeof(addr);
+ if ((cli = accept(clifd, (struct sockaddr *)&addr, &alen)) >= 0)
+ {
+ cli_do(cli);
+ close(cli);
+ }
+ else
+ LOG(0, 0, 0, "accept error: %s\n", strerror(errno));
+
+ n--;
+ break;
+ }
+
+ // these are handled below, with multiple interleaved reads
+ case FD_TYPE_CLUSTER: cluster_ready++; break;
+ case FD_TYPE_TUN: tun_ready++; break;
+ case FD_TYPE_UDP: udp_ready++; break;
+
+ case FD_TYPE_CONTROL: // nsctl commands
+ alen = sizeof(addr);
+ processcontrol(buf, recvfrom(controlfd, buf, sizeof(buf), MSG_WAITALL, (void *) &addr, &alen), &addr, alen);
+ n--;
+ break;
+
+ case FD_TYPE_DAE: // DAE requests
+ alen = sizeof(addr);
+ processdae(buf, recvfrom(daefd, buf, sizeof(buf), MSG_WAITALL, (void *) &addr, &alen), &addr, alen);
+ n--;
+ break;
+
+ case FD_TYPE_RADIUS: // RADIUS response
+ s = recv(radfds[d->index], buf, sizeof(buf), 0);
+ if (s >= 0 && config->cluster_iam_master)
+ processrad(buf, s, d->index);
+
+ n--;
+ break;
+
+#ifdef BGP
+ case FD_TYPE_BGP:
+ bgp_events[d->index] = events[i].events;
+ n--;
+ break;
+#endif /* BGP */
+
+ default:
+ LOG(0, 0, 0, "Unexpected fd type returned from epoll_wait: %d\n", d->type);
+ }
+ }
+
+#ifdef BGP
+ bgp_process(bgp_events);
+#endif /* BGP */
+
+ for (c = 0; n && c < config->multi_read_count; c++)
+ {
+ // L2TP
+ if (udp_ready)
+ {
+ alen = sizeof(addr);
+ if ((s = recvfrom(udpfd, buf, sizeof(buf), 0, (void *) &addr, &alen)) > 0)
+ {
+ processudp(buf, s, &addr);
+ udp_pkts++;
+ }
+ else
+ {
+ udp_ready = 0;
+ n--;
+ }
+ }
+
+ // incoming IP
+ if (tun_ready)
+ {
+ if ((s = read(tunfd, buf, sizeof(buf))) > 0)
+ {
+ processtun(buf, s);
+ tun_pkts++;
+ }
+ else
+ {
+ tun_ready = 0;
+ n--;
+ }
+ }
+
+ // cluster
+ if (cluster_ready)
+ {
+ alen = sizeof(addr);
+ if ((s = recvfrom(cluster_sockfd, buf, sizeof(buf), MSG_WAITALL, (void *) &addr, &alen)) > 0)
+ {
+ processcluster(buf, s, addr.sin_addr.s_addr);
+ cluster_pkts++;
+ }
+ else
+ {
+ cluster_ready = 0;
+ n--;
+ }
+ }
+ }
+
+ if (udp_pkts > 1 || tun_pkts > 1 || cluster_pkts > 1)
+ STAT(multi_read_used);
+
+ if (c >= config->multi_read_count)
+ {
+ LOG(3, 0, 0, "Reached multi_read_count (%d); processed %d udp, %d tun and %d cluster packets\n",
+ config->multi_read_count, udp_pkts, tun_pkts, cluster_pkts);
+
+ STAT(multi_read_exceeded);
+ more++;
+ }
+ }
+
+ // Runs on every machine (master and slaves).
+ if (next_cluster_ping <= TIME)
+ {
+ // Check to see which of the cluster is still alive..
+
+ cluster_send_ping(basetime); // Only does anything if we're a slave
+ cluster_check_master(); // ditto.
+
+ cluster_heartbeat(); // Only does anything if we're a master.
+ cluster_check_slaves(); // ditto.
+
+ master_update_counts(); // If we're a slave, send our byte counters to our master.
+
+ if (config->cluster_iam_master && !config->cluster_iam_uptodate)
+ next_cluster_ping = TIME + 1; // out-of-date slaves, do fast updates
+ else
+ next_cluster_ping = TIME + config->cluster_hb_interval;
+ }
+
+ if (!config->cluster_iam_master)
+ continue;
+
+ // Run token bucket filtering queue..
+ // Only run it every 1/10th of a second.
+ {
+ static clockt last_run = 0;
+ if (last_run != TIME)
+ {
+ last_run = TIME;
+ tbf_run_timer();
+ }
+ }
+
+ // Handle timeouts, retries etc.
+ {
+ static double last_clean = 0;
+ double this_clean;
+ double diff;
+
+ TIME = now(&this_clean);
+ diff = this_clean - last_clean;
+
+ // Run during idle time (after we've handled
+ // all incoming packets) or every 1/10th sec
+ if (!more || diff > 0.1)
+ {
+ regular_cleanups(diff);
+ last_clean = this_clean;
+ }
+ }
+
+ if (*config->accounting_dir)
+ {
+ static clockt next_acct = 0;
+ static clockt next_shut_acct = 0;
+
+ if (next_acct <= TIME)
+ {
+ // Dump accounting data
+ next_acct = TIME + ACCT_TIME;
+ next_shut_acct = TIME + ACCT_SHUT_TIME;
+ dump_acct_info(1);
+ }
+ else if (next_shut_acct <= TIME)
+ {
+ // Dump accounting data for shutdown sessions
+ next_shut_acct = TIME + ACCT_SHUT_TIME;
+ if (shut_acct_n)
+ dump_acct_info(0);
+ }
+ }
+ }
+
+ // Are we the master and shutting down??
+ if (config->cluster_iam_master)
+ cluster_heartbeat(); // Flush any queued changes..
+
+ // Ok. Notify everyone we're shutting down. If we're
+ // the master, this will force an election.
+ cluster_send_ping(0);
+
+ //
+ // Important!!! We MUST not process any packets past this point!
+ LOG(1, 0, 0, "Shutdown complete\n");
+}
+
+static void stripdomain(char *host)
+{
+ char *p;
+
+ if ((p = strchr(host, '.')))
+ {
+ char *domain = 0;
+ char _domain[1024];
+
+ // strip off domain
+ FILE *resolv = fopen("/etc/resolv.conf", "r");
+ if (resolv)
+ {
+ char buf[1024];
+ char *b;
+
+ while (fgets(buf, sizeof(buf), resolv))
+ {
+ if (strncmp(buf, "domain", 6) && strncmp(buf, "search", 6))
+ continue;
+
+ if (!isspace(buf[6]))
+ continue;
+
+ b = buf + 7;
+ while (isspace(*b)) b++;
+
+ if (*b)
+ {
+ char *d = b;
+ while (*b && !isspace(*b)) b++;
+ *b = 0;
+ if (buf[0] == 'd') // domain is canonical
+ {
+ domain = d;
+ break;
+ }
+
+ // first search line
+ if (!domain)
+ {
+ // hold, may be subsequent domain line
+ strncpy(_domain, d, sizeof(_domain))[sizeof(_domain)-1] = 0;
+ domain = _domain;
+ }
+ }
+ }
+
+ fclose(resolv);
+ }
+
+ if (domain)
+ {
+ int hl = strlen(host);
+ int dl = strlen(domain);
+ if (dl < hl && host[hl - dl - 1] == '.' && !strcmp(host + hl - dl, domain))
+ host[hl -dl - 1] = 0;
+ }
+ else
+ {
+ *p = 0; // everything after first dot
+ }
+ }
+}
+
+// Init data structures
+static void initdata(int optdebug, char *optconfig)
+{
+ int i;
+
+ if (!(config = shared_malloc(sizeof(configt))))
+ {
+ fprintf(stderr, "Error doing malloc for configuration: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ memset(config, 0, sizeof(configt));
+ time(&config->start_time);
+ strncpy(config->config_file, optconfig, strlen(optconfig));
+ config->debug = optdebug;
+ config->num_tbfs = MAXTBFS;
+ config->rl_rate = 28; // 28kbps
+ config->cluster_master_min_adv = 1;
+ config->ppp_restart_time = 3;
+ config->ppp_max_configure = 10;
+ config->ppp_max_failure = 5;
+ strcpy(config->random_device, RANDOMDEVICE);
+
+ log_stream = stderr;
+
+#ifdef RINGBUFFER
+ if (!(ringbuffer = shared_malloc(sizeof(struct Tringbuffer))))
+ {
+ LOG(0, 0, 0, "Error doing malloc for ringbuffer: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(ringbuffer, 0, sizeof(struct Tringbuffer));
+#endif
+
+ if (!(_statistics = shared_malloc(sizeof(struct Tstats))))
+ {
+ LOG(0, 0, 0, "Error doing malloc for _statistics: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!(tunnel = shared_malloc(sizeof(tunnelt) * MAXTUNNEL)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for tunnels: %s\n", strerror(errno));
+ exit(1);
+ }
+ if (!(session = shared_malloc(sizeof(sessiont) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for sessions: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(sess_local = shared_malloc(sizeof(sessionlocalt) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for sess_local: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(radius = shared_malloc(sizeof(radiust) * MAXRADIUS)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for radius: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(ip_address_pool = shared_malloc(sizeof(ippoolt) * MAXIPPOOL)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for ip_address_pool: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (!(ip_filters = shared_malloc(sizeof(ip_filtert) * MAXFILTER)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for ip_filters: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(ip_filters, 0, sizeof(ip_filtert) * MAXFILTER);
+
+ if (!(cli_session_actions = shared_malloc(sizeof(struct cli_session_actions) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for cli session actions: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(cli_session_actions, 0, sizeof(struct cli_session_actions) * MAXSESSION);
+
+ if (!(cli_tunnel_actions = shared_malloc(sizeof(struct cli_tunnel_actions) * MAXSESSION)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for cli tunnel actions: %s\n", strerror(errno));
+ exit(1);
+ }
+ memset(cli_tunnel_actions, 0, sizeof(struct cli_tunnel_actions) * MAXSESSION);
+
+ memset(tunnel, 0, sizeof(tunnelt) * MAXTUNNEL);
+ memset(session, 0, sizeof(sessiont) * MAXSESSION);
+ memset(radius, 0, sizeof(radiust) * MAXRADIUS);
+ memset(ip_address_pool, 0, sizeof(ippoolt) * MAXIPPOOL);
+
+ // Put all the sessions on the free list marked as undefined.
+ for (i = 1; i < MAXSESSION; i++)
+ {
+ session[i].next = i + 1;
+ session[i].tunnel = T_UNDEF; // mark it as not filled in.
+ }
+ session[MAXSESSION - 1].next = 0;
+ sessionfree = 1;
+
+ // Mark all the tunnels as undefined (waiting to be filled in by a download).
+ for (i = 1; i < MAXTUNNEL; i++)
+ tunnel[i].state = TUNNELUNDEF; // mark it as not filled in.
+
+ if (!*hostname)
+ {
+ // Grab my hostname unless it's been specified
+ gethostname(hostname, sizeof(hostname));
+ stripdomain(hostname);
+ }
+
+ _statistics->start_time = _statistics->last_reset = time(NULL);
+
+#ifdef BGP
+ if (!(bgp_peers = shared_malloc(sizeof(struct bgp_peer) * BGP_NUM_PEERS)))
+ {
+ LOG(0, 0, 0, "Error doing malloc for bgp: %s\n", strerror(errno));
+ exit(1);
+ }
+#endif /* BGP */
+}
+
+static int assign_ip_address(sessionidt s)
+{
+ uint32_t i;
+ int best = -1;
+ time_t best_time = time_now;
+ char *u = session[s].user;
+ char reuse = 0;
+
+
+ CSTAT(assign_ip_address);
+
+ for (i = 1; i < ip_pool_size; i++)
+ {
+ if (!ip_address_pool[i].address || ip_address_pool[i].assigned)
+ continue;
+
+ if (!session[s].walled_garden && ip_address_pool[i].user[0] && !strcmp(u, ip_address_pool[i].user))
+ {
+ best = i;
+ reuse = 1;
+ break;
+ }
+
+ if (ip_address_pool[i].last < best_time)
+ {
+ best = i;
+ if (!(best_time = ip_address_pool[i].last))
+ break; // never used, grab this one
+ }
+ }
+
+ if (best < 0)
+ {
+ LOG(0, s, session[s].tunnel, "assign_ip_address(): out of addresses\n");
+ return 0;
+ }
+
+ session[s].ip = ip_address_pool[best].address;
+ session[s].ip_pool_index = best;
+ ip_address_pool[best].assigned = 1;
+ ip_address_pool[best].last = time_now;
+ ip_address_pool[best].session = s;
+ if (session[s].walled_garden)
+ /* Don't track addresses of users in walled garden (note: this
+ means that their address isn't "sticky" even if they get
+ un-gardened). */
+ ip_address_pool[best].user[0] = 0;
+ else
+ strncpy(ip_address_pool[best].user, u, sizeof(ip_address_pool[best].user) - 1);
+
+ STAT(ip_allocated);
+ LOG(4, s, session[s].tunnel, "assign_ip_address(): %s ip address %d from pool\n",
+ reuse ? "Reusing" : "Allocating", best);
+
+ return 1;
+}
+
+static void free_ip_address(sessionidt s)
+{
+ int i = session[s].ip_pool_index;
+
+
+ CSTAT(free_ip_address);
+
+ if (!session[s].ip)
+ return; // what the?
+
+ if (i < 0) // Is this actually part of the ip pool?
+ i = 0;
+
+ STAT(ip_freed);
+ cache_ipmap(session[s].ip, -i); // Change the mapping to point back to the ip pool index.
+ session[s].ip = 0;
+ ip_address_pool[i].assigned = 0;
+ ip_address_pool[i].session = 0;
+ ip_address_pool[i].last = time_now;
+}
+
+//
+// Fsck the address pool against the session table.
+// Normally only called when we become a master.
+//
+// This isn't perfect: We aren't keep tracking of which
+// users used to have an IP address.
+//
+void rebuild_address_pool(void)
+{
+ int i;
+
+ //
+ // Zero the IP pool allocation, and build
+ // a map from IP address to pool index.
+ for (i = 1; i < MAXIPPOOL; ++i)
+ {
+ ip_address_pool[i].assigned = 0;
+ ip_address_pool[i].session = 0;
+ if (!ip_address_pool[i].address)
+ continue;
+
+ cache_ipmap(ip_address_pool[i].address, -i); // Map pool IP to pool index.
+ }
+
+ for (i = 0; i < MAXSESSION; ++i)
+ {
+ int ipid;
+ if (!(session[i].opened && session[i].ip))
+ continue;
+
+ ipid = - lookup_ipmap(htonl(session[i].ip));
+
+ if (session[i].ip_pool_index < 0)
+ {
+ // Not allocated out of the pool.
+ if (ipid < 1) // Not found in the pool either? good.
+ continue;
+
+ LOG(0, i, 0, "Session %d has an IP address (%s) that was marked static, but is in the pool (%d)!\n",
+ i, fmtaddr(session[i].ip, 0), ipid);
+
+ // Fall through and process it as part of the pool.
+ }
+
+
+ if (ipid > MAXIPPOOL || ipid < 0)
+ {
+ LOG(0, i, 0, "Session %d has a pool IP that's not found in the pool! (%d)\n", i, ipid);
+ ipid = -1;
+ session[i].ip_pool_index = ipid;
+ continue;
+ }
+
+ ip_address_pool[ipid].assigned = 1;
+ ip_address_pool[ipid].session = i;
+ ip_address_pool[ipid].last = time_now;
+ strncpy(ip_address_pool[ipid].user, session[i].user, sizeof(ip_address_pool[ipid].user) - 1);
+ session[i].ip_pool_index = ipid;
+ cache_ipmap(session[i].ip, i); // Fix the ip map.
+ }
+}
+
+//
+// Fix the address pool to match a changed session.
+// (usually when the master sends us an update).
+static void fix_address_pool(int sid)
+{
+ int ipid;
+
+ ipid = session[sid].ip_pool_index;
+
+ if (ipid > ip_pool_size)
+ return; // Ignore it. rebuild_address_pool will fix it up.
+
+ if (ip_address_pool[ipid].address != session[sid].ip)
+ return; // Just ignore it. rebuild_address_pool will take care of it.
+
+ ip_address_pool[ipid].assigned = 1;
+ ip_address_pool[ipid].session = sid;
+ ip_address_pool[ipid].last = time_now;
+ strncpy(ip_address_pool[ipid].user, session[sid].user, sizeof(ip_address_pool[ipid].user) - 1);
+}
+
+//
+// Add a block of addresses to the IP pool to hand out.
+//
+static void add_to_ip_pool(in_addr_t addr, in_addr_t mask)
+{
+ int i;
+ if (mask == 0)
+ mask = 0xffffffff; // Host route only.
+
+ addr &= mask;
+
+ if (ip_pool_size >= MAXIPPOOL) // Pool is full!
+ return ;
+
+ for (i = addr ;(i & mask) == addr; ++i)
+ {
+ if ((i & 0xff) == 0 || (i&0xff) == 255)
+ continue; // Skip 0 and broadcast addresses.
+
+ ip_address_pool[ip_pool_size].address = i;
+ ip_address_pool[ip_pool_size].assigned = 0;
+ ++ip_pool_size;
+ if (ip_pool_size >= MAXIPPOOL)
+ {
+ LOG(0, 0, 0, "Overflowed IP pool adding %s\n", fmtaddr(htonl(addr), 0));
+ return;
+ }
+ }
+}
+
+// Initialize the IP address pool
+static void initippool()
+{
+ FILE *f;
+ char *p;
+ char buf[4096];
+ memset(ip_address_pool, 0, sizeof(ip_address_pool));
+
+ if (!(f = fopen(IPPOOLFILE, "r")))
+ {
+ LOG(0, 0, 0, "Can't load pool file " IPPOOLFILE ": %s\n", strerror(errno));
+ exit(1);
+ }
+
+ while (ip_pool_size < MAXIPPOOL && fgets(buf, 4096, f))
+ {
+ char *pool = buf;
+ buf[4095] = 0; // Force it to be zero terminated/
+
+ if (*buf == '#' || *buf == '\n')
+ continue; // Skip comments / blank lines
+ if ((p = (char *)strrchr(buf, '\n'))) *p = 0;
+ if ((p = (char *)strchr(buf, ':')))
+ {
+ in_addr_t src;
+ *p = '\0';
+ src = inet_addr(buf);
+ if (src == INADDR_NONE)
+ {
+ LOG(0, 0, 0, "Invalid address pool IP %s\n", buf);
+ exit(1);
+ }
+ // This entry is for a specific IP only
+ if (src != config->bind_address)
+ continue;
+ *p = ':';
+ pool = p+1;
+ }
+ if ((p = (char *)strchr(pool, '/')))
+ {
+ // It's a range
+ int numbits = 0;
+ in_addr_t start = 0, mask = 0;
+
+ LOG(2, 0, 0, "Adding IP address range %s\n", buf);
+ *p++ = 0;
+ if (!*p || !(numbits = atoi(p)))
+ {
+ LOG(0, 0, 0, "Invalid pool range %s\n", buf);
+ continue;
+ }
+ start = ntohl(inet_addr(pool));
+ mask = (in_addr_t) (pow(2, numbits) - 1) << (32 - numbits);
+
+ // Add a static route for this pool
+ LOG(5, 0, 0, "Adding route for address pool %s/%u\n",
+ fmtaddr(htonl(start), 0), 32 + mask);
+
+ routeset(0, start, mask, 0, 1);
+
+ add_to_ip_pool(start, mask);
+ }
+ else
+ {
+ // It's a single ip address
+ add_to_ip_pool(inet_addr(pool), 0);
+ }
+ }
+ fclose(f);
+ LOG(1, 0, 0, "IP address pool is %d addresses\n", ip_pool_size - 1);
+}
+
+void snoop_send_packet(uint8_t *packet, uint16_t size, in_addr_t destination, uint16_t port)
+{
+ struct sockaddr_in snoop_addr = {0};
+ if (!destination || !port || snoopfd <= 0 || size <= 0 || !packet)
+ return;
+
+ snoop_addr.sin_family = AF_INET;
+ snoop_addr.sin_addr.s_addr = destination;
+ snoop_addr.sin_port = ntohs(port);
+
+ LOG(5, 0, 0, "Snooping %d byte packet to %s:%d\n", size,
+ fmtaddr(snoop_addr.sin_addr.s_addr, 0),
+ htons(snoop_addr.sin_port));
+
+ if (sendto(snoopfd, packet, size, MSG_DONTWAIT | MSG_NOSIGNAL, (void *) &snoop_addr, sizeof(snoop_addr)) < 0)
+ LOG(0, 0, 0, "Error sending intercept packet: %s\n", strerror(errno));
+
+ STAT(packets_snooped);
+}
+
+static int dump_session(FILE **f, sessiont *s)
+{
+ if (!s->opened || !s->ip || !(s->cin_delta || s->cout_delta) || !*s->user || s->walled_garden)
+ return 1;
+
+ if (!*f)
+ {
+ char filename[1024];
+ char timestr[64];
+ time_t now = time(NULL);
+
+ strftime(timestr, sizeof(timestr), "%Y%m%d%H%M%S", localtime(&now));
+ snprintf(filename, sizeof(filename), "%s/%s", config->accounting_dir, timestr);
+
+ if (!(*f = fopen(filename, "w")))
+ {
+ LOG(0, 0, 0, "Can't write accounting info to %s: %s\n", filename, strerror(errno));
+ return 0;
+ }
+
+ LOG(3, 0, 0, "Dumping accounting information to %s\n", filename);
+ fprintf(*f, "# dslwatch.pl dump file V1.01\n"
+ "# host: %s\n"
+ "# endpoint: %s\n"
+ "# time: %ld\n"
+ "# uptime: %ld\n"
+ "# format: username ip qos uptxoctets downrxoctets\n",
+ hostname,
+ fmtaddr(config->bind_address ? config->bind_address : my_address, 0),
+ now,
+ now - basetime);
+ }
+
+ LOG(4, 0, 0, "Dumping accounting information for %s\n", s->user);
+ fprintf(*f, "%s %s %d %u %u\n",
+ s->user, // username
+ fmtaddr(htonl(s->ip), 0), // ip
+ (s->throttle_in || s->throttle_out) ? 2 : 1, // qos
+ (uint32_t) s->cin_delta, // uptxoctets
+ (uint32_t) s->cout_delta); // downrxoctets
+
+ s->cin_delta = s->cout_delta = 0;
+
+ return 1;
+}
+
+static void dump_acct_info(int all)
+{
+ int i;
+ FILE *f = NULL;
+
+
+ CSTAT(dump_acct_info);
+
+ if (shut_acct_n)
+ {
+ for (i = 0; i < shut_acct_n; i++)
+ dump_session(&f, &shut_acct[i]);
+
+ shut_acct_n = 0;
+ }
+
+ if (all)
+ for (i = 1; i <= config->cluster_highest_sessionid; i++)
+ dump_session(&f, &session[i]);
+
+ if (f)
+ fclose(f);
+}
+
+// Main program
+int main(int argc, char *argv[])
+{
+ int i;
+ int optdebug = 0;
+ char *optconfig = CONFIGFILE;
+
+ time(&basetime); // start clock
+
+ // scan args
+ while ((i = getopt(argc, argv, "dvc:h:")) >= 0)
+ {
+ switch (i)
+ {
+ case 'd':
+ if (fork()) exit(0);
+ setsid();
+ freopen("/dev/null", "r", stdin);
+ freopen("/dev/null", "w", stdout);
+ freopen("/dev/null", "w", stderr);
+ break;
+ case 'v':
+ optdebug++;
+ break;
+ case 'c':
+ optconfig = optarg;
+ break;
+ case 'h':
+ snprintf(hostname, sizeof(hostname), "%s", optarg);
+ break;
+ default:
+ printf("Args are:\n"
+ "\t-d\t\tDetach from terminal\n"
+ "\t-c <file>\tConfig file\n"
+ "\t-h <hostname>\tForce hostname\n"
+ "\t-v\t\tDebug\n");
+
+ return (0);
+ break;
+ }
+ }
+
+ // Start the timer routine off
+ time(&time_now);
+ strftime(time_now_string, sizeof(time_now_string), "%Y-%m-%d %H:%M:%S", localtime(&time_now));
+ signal(SIGALRM, sigalrm_handler);
+ siginterrupt(SIGALRM, 0);
+
+ initplugins();
+ initdata(optdebug, optconfig);
+
+ init_cli(hostname);
+ read_config_file();
+ init_tbf(config->num_tbfs);
+
+ LOG(0, 0, 0, "L2TPNS version " VERSION "\n");
+ LOG(0, 0, 0, "Copyright (c) 2003, 2004, 2005 Optus Internet Engineering\n");
+ LOG(0, 0, 0, "Copyright (c) 2002 FireBrick (Andrews & Arnold Ltd / Watchfront Ltd) - GPL licenced\n");
+ {
+ struct rlimit rlim;
+ rlim.rlim_cur = RLIM_INFINITY;
+ rlim.rlim_max = RLIM_INFINITY;
+ // Remove the maximum core size
+ if (setrlimit(RLIMIT_CORE, &rlim) < 0)
+ LOG(0, 0, 0, "Can't set ulimit: %s\n", strerror(errno));
+
+ // Make core dumps go to /tmp
+ chdir("/tmp");
+ }
+
+ if (config->scheduler_fifo)
+ {
+ int ret;
+ struct sched_param params = {0};
+ params.sched_priority = 1;
+
+ if (get_nprocs() < 2)
+ {
+ LOG(0, 0, 0, "Not using FIFO scheduler, there is only 1 processor in the system.\n");
+ config->scheduler_fifo = 0;
+ }
+ else
+ {
+ if ((ret = sched_setscheduler(0, SCHED_FIFO, ¶ms)) == 0)
+ {
+ LOG(1, 0, 0, "Using FIFO scheduler. Say goodbye to any other processes running\n");
+ }
+ else
+ {
+ LOG(0, 0, 0, "Error setting scheduler to FIFO: %s\n", strerror(errno));
+ config->scheduler_fifo = 0;
+ }
+ }
+ }
+
+ /* Set up the cluster communications port. */
+ if (cluster_init() < 0)
+ exit(1);
+
+ inittun();
+ LOG(1, 0, 0, "Set up on interface %s\n", config->tundevice);
+
+ initudp();
+ initrad();
+ initippool();
+
+ // seed prng
+ {
+ unsigned seed = time_now ^ getpid();
+ LOG(4, 0, 0, "Seeding the pseudo random generator: %u\n", seed);
+ srand(seed);
+ }
+
+ signal(SIGHUP, sighup_handler);
+ signal(SIGCHLD, sigchild_handler);
+ signal(SIGTERM, shutdown_handler);
+ signal(SIGINT, shutdown_handler);
+ signal(SIGQUIT, shutdown_handler);
+
+ // Prevent us from getting paged out
+ if (config->lock_pages)
+ {
+ if (!mlockall(MCL_CURRENT))
+ LOG(1, 0, 0, "Locking pages into memory\n");
+ else
+ LOG(0, 0, 0, "Can't lock pages: %s\n", strerror(errno));
+ }
+
+ alarm(1);
+
+ // Drop privileges here
+ if (config->target_uid > 0 && geteuid() == 0)
+ setuid(config->target_uid);
+
+ mainloop();
+
+ /* remove plugins (so cleanup code gets run) */
+ plugins_done();
+
+ // Remove the PID file if we wrote it
+ if (config->wrote_pid && *config->pid_file == '/')
+ unlink(config->pid_file);
+
+ /* kill CLI children */
+ signal(SIGTERM, SIG_IGN);
+ kill(0, SIGTERM);
+ return 0;
+}
+
+static void sighup_handler(int sig)
+{
+ if (log_stream)
+ {
+ if (log_stream != stderr)
+ fclose(log_stream);
+
+ log_stream = NULL;