// Copyright (c) 2002 FireBrick (Andrews & Arnold Ltd / Watchfront Ltd) - GPL licenced
// vim: sw=8 ts=8
-char const *cvs_id_l2tpns = "$Id: l2tpns.c,v 1.169 2006-07-01 12:40:17 bodea Exp $";
+char const *cvs_id_l2tpns = "$Id: l2tpns.c,v 1.173 2009-12-08 14:49:28 bodea Exp $";
#include <arpa/inet.h>
#include <assert.h>
static int syslog_log = 0; // are we logging to syslog
static FILE *log_stream = 0; // file handle for direct logging (i.e. direct into file, not via syslog).
uint32_t last_id = 0; // Unique ID for radius accounting
+// Guest change
+char guest_users[10][32]; // Array of guest users
+int guest_accounts_num = 0; // Number of guest users
// calculated from config->l2tp_mtu
uint16_t MRU = 0; // PPP MRU
struct cli_session_actions *cli_session_actions = NULL; // Pending session changes requested by CLI
struct cli_tunnel_actions *cli_tunnel_actions = NULL; // Pending tunnel changes required by CLI
-static void *ip_hash[256]; // Mapping from IP address to session structures.
+union iphash {
+ sessionidt sess;
+ union iphash *idx;
+} ip_hash[256]; // Mapping from IP address to session structures.
+
struct ipv6radix {
- int sess;
+ sessionidt sess;
struct ipv6radix *branch;
} ipv6_hash[256]; // Mapping from IPv6 address to session structures.
CONFIG("radius_bind_min", radius_bind_min, SHORT),
CONFIG("radius_bind_max", radius_bind_max, SHORT),
CONFIG("allow_duplicate_users", allow_duplicate_users, BOOL),
+ CONFIG("kill_timedout_sessions", kill_timedout_sessions, BOOL),
CONFIG("guest_account", guest_user, STRING),
CONFIG("bind_address", bind_address, IPv4),
CONFIG("peer_address", peer_address, IPv4),
struct Tringbuffer *ringbuffer = NULL;
#endif
-static void cache_ipmap(in_addr_t ip, int s);
+static void cache_ipmap(in_addr_t ip, sessionidt s);
static void uncache_ipmap(in_addr_t ip);
-static void cache_ipv6map(struct in6_addr ip, int prefixlen, int s);
+static void cache_ipv6map(struct in6_addr ip, int prefixlen, sessionidt s);
static void free_ip_address(sessionidt s);
static void dump_acct_info(int all);
static void sighup_handler(int sig);
// IP address.
//
-static int lookup_ipmap(in_addr_t ip)
+static sessionidt lookup_ipmap(in_addr_t ip)
{
uint8_t *a = (uint8_t *) &ip;
- uint8_t **d = (uint8_t **) ip_hash;
+ union iphash *h = ip_hash;
- if (!(d = (uint8_t **) d[(size_t) *a++])) return 0;
- if (!(d = (uint8_t **) d[(size_t) *a++])) return 0;
- if (!(d = (uint8_t **) d[(size_t) *a++])) return 0;
+ if (!(h = h[*a++].idx)) return 0;
+ if (!(h = h[*a++].idx)) return 0;
+ if (!(h = h[*a++].idx)) return 0;
- return (int) (intptr_t) d[(size_t) *a];
+ return h[*a].sess;
}
-static int lookup_ipv6map(struct in6_addr ip)
+static sessionidt lookup_ipv6map(struct in6_addr ip)
{
struct ipv6radix *curnode;
int i;
sessionidt sessionbyip(in_addr_t ip)
{
- int s = lookup_ipmap(ip);
+ sessionidt s = lookup_ipmap(ip);
CSTAT(sessionbyip);
if (s > 0 && s < MAXSESSION && session[s].opened)
- return (sessionidt) s;
+ return s;
return 0;
}
sessionidt sessionbyipv6(struct in6_addr ip)
{
- int s;
+ sessionidt s;
CSTAT(sessionbyipv6);
if (!memcmp(&config->ipv6_prefix, &ip, 8) ||
//
// (It's actually cached in network order)
//
-static void cache_ipmap(in_addr_t ip, int s)
+static void cache_ipmap(in_addr_t ip, sessionidt s)
{
in_addr_t nip = htonl(ip); // MUST be in network order. I.e. MSB must in be ((char *) (&ip))[0]
uint8_t *a = (uint8_t *) &nip;
- uint8_t **d = (uint8_t **) ip_hash;
+ union iphash *h = ip_hash;
int i;
for (i = 0; i < 3; i++)
{
- if (!d[(size_t) a[i]])
- {
- if (!(d[(size_t) a[i]] = calloc(256, sizeof(void *))))
- return;
- }
+ if (!(h[a[i]].idx || (h[a[i]].idx = calloc(256, sizeof(union iphash)))))
+ return;
- d = (uint8_t **) d[(size_t) a[i]];
+ h = h[a[i]].idx;
}
- d[(size_t) a[3]] = (uint8_t *) (intptr_t) s;
+ h[a[3]].sess = s;
if (s > 0)
LOG(4, s, session[s].tunnel, "Caching ip address %s\n", fmtaddr(nip, 0));
cache_ipmap(ip, 0); // Assign it to the NULL session.
}
-static void cache_ipv6map(struct in6_addr ip, int prefixlen, int s)
+static void cache_ipv6map(struct in6_addr ip, int prefixlen, sessionidt s)
{
int i;
int bytes;
//
int cmd_show_ipcache(struct cli_def *cli, char *command, char **argv, int argc)
{
- char **d = (char **) ip_hash, **e, **f, **g;
+ union iphash *d = ip_hash, *e, *f, *g;
int i, j, k, l;
int count = 0;
for (i = 0; i < 256; ++i)
{
- if (!d[i])
+ if (!d[i].idx)
continue;
- e = (char **) d[i];
+
+ e = d[i].idx;
for (j = 0; j < 256; ++j)
{
- if (!e[j])
+ if (!e[j].idx)
continue;
- f = (char **) e[j];
+
+ f = e[j].idx;
for (k = 0; k < 256; ++k)
{
- if (!f[k])
+ if (!f[k].idx)
continue;
- g = (char **)f[k];
+
+ g = f[k].idx;
for (l = 0; l < 256; ++l)
{
- if (!g[l])
+ if (!g[l].sess)
continue;
- cli_print(cli, "%7d %d.%d.%d.%d", (int) (intptr_t) g[l], i, j, k, l);
+
+ cli_print(cli, "%7d %d.%d.%d.%d", g[l].sess, i, j, k, l);
++count;
}
}
l -= 4;
}
- // Process this frame
- if (*p & 1)
- {
- proto = *p++;
- l--;
- }
- else
- {
- proto = ntohs(*(uint16_t *) p);
- p += 2;
- l -= 2;
- }
-
- if (proto == PPPIP)
- {
- if (session[s].die)
- {
- LOG(4, s, t, "MPPP: Session %u is closing. Don't process PPP packets\n", s);
- return; // closing session, PPP not processed
- }
+ if (*p & 1)
+ {
+ proto = *p++;
+ l--;
+ }
+ else
+ {
+ proto = ntohs(*(uint16_t *) p);
+ p += 2;
+ l -= 2;
+ }
+ if (proto == PPPIP)
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "MPPP: Session %d is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+ session[s].last_packet = session[s].last_data = time_now;
+ processipin(s, t, p, l);
+ }
+ else if (proto == PPPIPV6 && config->ipv6_prefix.s6_addr[0])
+ {
+ if (session[s].die)
+ {
+ LOG(4, s, t, "MPPP: Session %d is closing. Don't process PPP packets\n", s);
+ return; // closing session, PPP not processed
+ }
+
+ session[s].last_packet = session[s].last_data = time_now;
+ processipv6in(s, t, p, l);
+ }
+ else if (proto == PPPIPCP)
+ {
+ session[s].last_packet = session[s].last_data = time_now;
+ processipcp(s, t, p, l);
+ }
+ else if (proto == PPPCCP)
+ {
+ session[s].last_packet = session[s].last_data = time_now;
+ processccp(s, t, p, l);
+ }
+ else
+ {
+ LOG(2, s, t, "MPPP: Unsupported MP protocol 0x%04X received\n",proto);
+ }
+}
- session[s].last_packet = session[s].last_data = time_now;
- processipin(s, t, p, l);
- }
- else if (proto == PPPIPV6 && config->ipv6_prefix.s6_addr[0])
- {
- if (session[s].die)
- {
- LOG(4, s, t, "MPPP: Session %u is closing. Don't process PPP packets\n", s);
- return; // closing session, PPP not processed
- }
+static void update_session_out_stat(sessionidt s, sessiont *sp, int len)
+{
+ increment_counter(&sp->cout, &sp->cout_wrap, len); // byte count
+ sp->cout_delta += len;
+ sp->pout++;
+ sp->last_data = time_now;
- session[s].last_packet = session[s].last_data = time_now;
- processipv6in(s, t, p, l);
- }
- else
- {
- LOG(2, s, t, "MPPP: Unsupported MP protocol 0x%04X received\n",proto);
- }
+ sess_local[s].cout += len; // To send to master..
+ sess_local[s].pout++;
}
// process outgoing (to tunnel) IP
uint8_t *data = buf; // Keep a copy of the originals.
int size = len;
- uint8_t b1[MAXETHER + 20];
- uint8_t b2[MAXETHER + 20];
+ uint8_t fragbuf[MAXETHER + 20];
CSTAT(processipout);
}
return;
}
+
t = session[s].tunnel;
+ if (len > session[s].mru || (session[s].mrru && len > session[s].mrru))
+ {
+ LOG(3, s, t, "Packet size more than session MRU\n");
+ return;
+ }
+
sp = &session[s];
- sp->last_data = time_now;
// DoS prevention: enforce a maximum number of packets per 0.1s for a session
if (config->max_packets > 0)
}
// Add on L2TP header
- {
- bundleidt bid = 0;
- if (session[s].bundle && bundle[session[s].bundle].num_of_links > 1)
- {
- bid = session[s].bundle;
- s = bundle[bid].members[bundle[bid].current_ses = ++bundle[bid].current_ses % bundle[bid].num_of_links];
- LOG(4, s, t, "MPPP: (1)Session number becomes: %u\n", s);
- if (len > 256)
- {
- // Partition the packet to 2 fragments
- uint32_t frag1len = len / 2;
- uint32_t frag2len = len - frag1len;
- uint8_t *p = makeppp(b1, sizeof(b1), buf, frag1len, s, t, PPPIP, 0, bid, MP_BEGIN);
- uint8_t *q;
-
- if (!p) return;
- tunnelsend(b1, frag1len + (p-b1), t); // send it...
- s = bundle[bid].members[bundle[bid].current_ses = ++bundle[bid].current_ses % bundle[bid].num_of_links];
- LOG(4, s, t, "MPPP: (2)Session number becomes: %u\n", s);
- q = makeppp(b2, sizeof(b2), buf+frag1len, frag2len, s, t, PPPIP, 0, bid, MP_END);
- if (!q) return;
- tunnelsend(b2, frag2len + (q-b2), t); // send it...
- }
- else {
- // Send it as one frame
- uint8_t *p = makeppp(b1, sizeof(b1), buf, len, s, t, PPPIP, 0, bid, MP_BOTH_BITS);
- if (!p) return;
- tunnelsend(b1, len + (p-b1), t); // send it...
- }
- }
- else
- {
- uint8_t *p = makeppp(b1, sizeof(b1), buf, len, s, t, PPPIP, 0, 0, 0);
- if (!p) return;
- tunnelsend(b1, len + (p-b1), t); // send it...
- }
- }
+ {
+ bundleidt bid = 0;
+ if(session[s].bundle != 0 && bundle[session[s].bundle].num_of_links > 1)
+ {
+ bid = session[s].bundle;
+ s = bundle[bid].members[bundle[bid].current_ses = ++bundle[bid].current_ses % bundle[bid].num_of_links];
+ t = session[s].tunnel;
+ sp = &session[s];
+ LOG(4, s, t, "MPPP: (1)Session number becomes: %d\n", s);
+ if(len > MINFRAGLEN)
+ {
+ // Partition the packet to "bundle[b].num_of_links" fragments
+ bundlet *b = &bundle[bid];
+ uint32_t num_of_links = b->num_of_links;
+ uint32_t fraglen = len / num_of_links;
+ fraglen = (fraglen > session[s].mru ? session[s].mru : fraglen);
+ uint32_t last_fraglen = fraglen + len % num_of_links;
+ last_fraglen = (last_fraglen > session[s].mru ? len % num_of_links : last_fraglen);
+ uint32_t remain = len;
+
+ // send the first packet
+ uint8_t *p = makeppp(fragbuf, sizeof(fragbuf), buf, fraglen, s, t, PPPIP, 0, bid, MP_BEGIN);
+ if (!p) return;
+ tunnelsend(fragbuf, fraglen + (p-fragbuf), t); // send it...
+ // statistics
+ update_session_out_stat(s, sp, fraglen);
+ remain -= fraglen;
+ while (remain > last_fraglen)
+ {
+ s = b->members[b->current_ses = ++b->current_ses % num_of_links];
+ t = session[s].tunnel;
+ sp = &session[s];
+ LOG(4, s, t, "MPPP: (2)Session number becomes: %d\n", s);
+ p = makeppp(fragbuf, sizeof(fragbuf), buf+(len - remain), fraglen, s, t, PPPIP, 0, bid, 0);
+ if (!p) return;
+ tunnelsend(fragbuf, fraglen + (p-fragbuf), t); // send it...
+ update_session_out_stat(s, sp, fraglen);
+ remain -= fraglen;
+ }
+ // send the last fragment
+ s = b->members[b->current_ses = ++b->current_ses % num_of_links];
+ t = session[s].tunnel;
+ sp = &session[s];
+ LOG(4, s, t, "MPPP: (2)Session number becomes: %d\n", s);
+ p = makeppp(fragbuf, sizeof(fragbuf), buf+(len - remain), remain, s, t, PPPIP, 0, bid, MP_END);
+ if (!p) return;
+ tunnelsend(fragbuf, remain + (p-fragbuf), t); // send it...
+ update_session_out_stat(s, sp, remain);
+ if (remain != last_fraglen)
+ LOG(3, s, t, "PROCESSIPOUT ERROR REMAIN != LAST_FRAGLEN, %d != %d\n", remain, last_fraglen);
+ }
+ else {
+ // Send it as one frame
+ uint8_t *p = makeppp(fragbuf, sizeof(fragbuf), buf, len, s, t, PPPIP, 0, bid, MP_BOTH_BITS);
+ if (!p) return;
+ tunnelsend(fragbuf, len + (p-fragbuf), t); // send it...
+ LOG(4, s, t, "MPPP: packet sent as one frame\n");
+ update_session_out_stat(s, sp, len);
+ }
+ }
+ else
+ {
+ uint8_t *p = makeppp(fragbuf, sizeof(fragbuf), buf, len, s, t, PPPIP, 0, 0, 0);
+ if (!p) return;
+ tunnelsend(fragbuf, len + (p-fragbuf), t); // send it...
+ update_session_out_stat(s, sp, len);
+ }
+ }
// Snooping this session, send it to intercept box
if (sp->snoop_ip && sp->snoop_port)
snoop_send_packet(buf, len, sp->snoop_ip, sp->snoop_port);
- increment_counter(&sp->cout, &sp->cout_wrap, len); // byte count
- sp->cout_delta += len;
- sp->pout++;
udp_tx += len;
-
- sess_local[s].cout += len; // To send to master..
- sess_local[s].pout++;
}
// process outgoing (to tunnel) IPv6
void sessionshutdown(sessionidt s, char const *reason, int cdn_result, int cdn_error, int term_cause)
{
int walled_garden = session[s].walled_garden;
-
+ bundleidt b = session[s].bundle;
+ //delete routes only for last session in bundle (in case of MPPP)
+ int del_routes = !b || (bundle[b].num_of_links == 1);
CSTAT(sessionshutdown);
(session[s].route[r].ip & session[s].route[r].mask))
routed++;
- routeset(s, session[s].route[r].ip, session[s].route[r].mask, 0, 0);
+ if (del_routes) routeset(s, session[s].route[r].ip, session[s].route[r].mask, 0, 0);
session[s].route[r].ip = 0;
}
if (session[s].ip_pool_index == -1) // static ip
{
- if (!routed) routeset(s, session[s].ip, 0, 0, 0);
+ if (!routed && del_routes) routeset(s, session[s].ip, 0, 0, 0);
session[s].ip = 0;
}
else
free_ip_address(s);
// unroute IPv6, if setup
- if (session[s].ppp.ipv6cp == Opened && session[s].ipv6prefixlen)
+ if (session[s].ppp.ipv6cp == Opened && session[s].ipv6prefixlen && del_routes)
route6set(s, session[s].ipv6route, session[s].ipv6prefixlen, 0);
+
+ if (b)
+ {
+ // This session was part of a bundle
+ bundle[b].num_of_links--;
+ LOG(3, s, 0, "MPPP: Dropping member link: %d from bundle %d\n",s,b);
+ if(bundle[b].num_of_links == 0)
+ {
+ bundleclear(b);
+ LOG(3, s, 0, "MPPP: Kill bundle: %d (No remaing member links)\n",b);
+ }
+ else
+ {
+ // Adjust the members array to accomodate the new change
+ uint8_t mem_num = 0;
+ // It should be here num_of_links instead of num_of_links-1 (previous instruction "num_of_links--")
+ if(bundle[b].members[bundle[b].num_of_links] != s)
+ {
+ uint8_t ml;
+ for(ml = 0; ml<bundle[b].num_of_links; ml++)
+ if(bundle[b].members[ml] == s)
+ {
+ mem_num = ml;
+ break;
+ }
+ bundle[b].members[mem_num] = bundle[b].members[bundle[b].num_of_links];
+ LOG(3, s, 0, "MPPP: Adjusted member links array\n");
+ }
+ }
+ cluster_send_bundle(b);
+ }
}
if (session[s].throttle_in || session[s].throttle_out) // Unthrottle if throttled.
// kill a session now
void sessionkill(sessionidt s, char *reason)
{
- bundleidt b;
-
CSTAT(sessionkill);
if (!session[s].opened) // not alive
if (sess_local[s].radius)
radiusclear(sess_local[s].radius, s); // cant send clean accounting data, session is killed
- LOG(2, s, session[s].tunnel, "Kill session %u (%s): %s\n", s, session[s].user, reason);
- if ((b = session[s].bundle))
- {
- // This session was part of a bundle
- bundle[b].num_of_links--;
- LOG(3, s, 0, "MPPP: Dropping member link: %u from bundle %u\n", s, b);
- if (bundle[b].num_of_links == 0)
- {
- bundleclear(b);
- LOG(3, s, 0, "MPPP: Kill bundle: %u (No remaing member links)\n", b);
- }
- else
- {
- // Adjust the members array to accomodate the new change
- uint8_t mem_num = 0;
- // It should be here num_of_links instead of num_of_links-1 (previous instruction "num_of_links--")
- if (bundle[b].members[bundle[b].num_of_links] != s)
- {
- uint8_t ml;
- for (ml = 0; ml<bundle[b].num_of_links; ml++)
- {
- if (bundle[b].members[ml] == s)
- {
- mem_num = ml;
- break;
- }
- }
- bundle[b].members[mem_num] = bundle[b].members[bundle[b].num_of_links];
- LOG(3, s, 0, "MPPP: Adjusted member links array\n");
- }
- }
- cluster_send_bundle(b);
- }
+ LOG(2, s, session[s].tunnel, "Kill session %d (%s): %s\n", s, session[s].user, reason);
sessionclear(s);
cluster_send_session(s);
}
continue;
}
- // check for timed out sessions
- if (session[s].timeout)
- {
- bundleidt bid = session[s].bundle;
- if (bid)
- {
- clockt curr_time = time_now;
- if (curr_time - bundle[bid].last_check >= 1)
- {
- bundle[bid].online_time += (curr_time-bundle[bid].last_check)*bundle[bid].num_of_links;
- bundle[bid].last_check = curr_time;
- if (bundle[bid].online_time >= session[s].timeout)
- {
- int ses;
- for (ses = bundle[bid].num_of_links - 1; ses >= 0; ses--)
- {
- sessionshutdown(bundle[bid].members[ses], "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
- s_actions++;
- continue;
- }
- }
- }
- }
- else if (session[s].timeout <= time_now - session[s].opened)
- {
- sessionshutdown(s, "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
- s_actions++;
- continue;
- }
- }
-
// PPP timeouts
if (sess_local[s].lcp.restart <= time_now)
{
}
// Drop sessions who have reached session_timeout seconds
- if (session[s].session_timeout && (time_now - session[s].opened >= session[s].session_timeout))
+ if (session[s].session_timeout)
{
- sessionshutdown(s, "Session Timeout Reached", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
- STAT(session_timeout);
- s_actions++;
- continue;
+ bundleidt bid = session[s].bundle;
+ if (bid)
+ {
+ if (time_now - bundle[bid].last_check >= 1)
+ {
+ bundle[bid].online_time += (time_now - bundle[bid].last_check) * bundle[bid].num_of_links;
+ bundle[bid].last_check = time_now;
+ if (bundle[bid].online_time >= session[s].session_timeout)
+ {
+ int ses;
+ for (ses = bundle[bid].num_of_links - 1; ses >= 0; ses--)
+ {
+ sessionshutdown(bundle[bid].members[ses], "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
+ }
+ }
+ }
+ else if (time_now - session[s].opened >= session[s].session_timeout)
+ {
+ sessionshutdown(s, "Session timeout", CDN_ADMIN_DISC, TERM_SESSION_TIMEOUT);
+ s_actions++;
+ continue;
+ }
}
// Drop sessions who have reached idle_timeout seconds
if (config->radius_accounting && config->radius_interim > 0
&& session[s].ip && !session[s].walled_garden
&& !sess_local[s].radius // RADIUS already in progress
- && time_now - sess_local[s].last_interim >= config->radius_interim)
+ && time_now - sess_local[s].last_interim >= config->radius_interim
+ && session[s].flags & SESSION_STARTED)
{
int rad = radiusnew(s);
if (!rad)
config->ppp_restart_time = 3;
config->ppp_max_configure = 10;
config->ppp_max_failure = 5;
+ config->kill_timedout_sessions = 1;
strcpy(config->random_device, RANDOMDEVICE);
log_stream = stderr;
}
}
+ // Guest change
+ guest_accounts_num = 0;
+ char *p2 = config->guest_user;
+ while (p2 && *p2)
+ {
+ char *s = strpbrk(p2, " \t,");
+ if (s)
+ {
+ *s++ = 0;
+ while (*s == ' ' || *s == '\t')
+ s++;
+
+ if (!*s)
+ s = 0;
+ }
+
+ strcpy(guest_users[guest_accounts_num], p2);
+ LOG(1, 0, 0, "Guest account[%d]: %s\n", guest_accounts_num, guest_users[guest_accounts_num]);
+ guest_accounts_num++;
+ p2 = s;
+ }
+ // Rebuild the guest_user array
+ strcpy(config->guest_user, "");
+ int ui = 0;
+ for (ui=0; ui<guest_accounts_num; ui++)
+ {
+ strcat(config->guest_user, guest_users[ui]);
+ if (ui<guest_accounts_num-1)
+ {
+ strcat(config->guest_user, ",");
+ }
+ }
+
+
memcpy(config->old_plugins, config->plugins, sizeof(config->plugins));
if (!config->multi_read_count) config->multi_read_count = 10;
if (!config->cluster_address) config->cluster_address = inet_addr(DEFAULT_MCAST_ADDR);
LOG(3, s, t, "Doing session setup for session\n");
+ // Join a bundle if the MRRU option is accepted
+ if(session[s].mrru > 0 && session[s].bundle == 0)
+ {
+ LOG(3, s, t, "This session can be part of multilink bundle\n");
+ if (join_bundle(s) > 0)
+ cluster_send_bundle(session[s].bundle);
+ else
+ {
+ LOG(0, s, t, "MPPP: Mismaching mssf option with other sessions in bundle\n");
+ sessionshutdown(s, "Mismaching mssf option.", CDN_NONE, TERM_SERVICE_UNAVAILABLE);
+ return 0;
+ }
+ }
+
if (!session[s].ip)
{
assign_ip_address(s);
// Make sure this is right
session[s].tunnel = t;
- // Join a bundle if the MRRU option is accepted
- if (session[s].mrru > 0 && !session[s].bundle)
- {
- LOG(3, s, t, "This session can be part of multilink bundle\n");
- if (join_bundle(s))
- cluster_send_bundle(session[s].bundle);
- }
-
// zap old sessions with same IP and/or username
// Don't kill gardened sessions - doing so leads to a DoS
// from someone who doesn't need to know the password
{
if (i == s) continue;
if (!session[s].opened) continue;
+ // Allow duplicate sessions for multilink ones of the same bundle.
+ if (session[s].bundle && session[i].bundle && session[s].bundle == session[i].bundle)
+ continue;
if (ip == session[i].ip)
{
sessionkill(i, "Duplicate IP address");
continue;
}
- if (config->allow_duplicate_users)
- continue;
-
- if (session[s].walled_garden || session[i].walled_garden)
- continue;
-
- // Allow duplicate sessions for guest account.
- if (*config->guest_user && !strcasecmp(user, config->guest_user))
- continue;
-
- // Allow duplicate sessions for multilink ones of the same bundle.
- if (session[s].bundle && session[i].bundle && session[s].bundle == session[i].bundle)
- continue;
+ if (config->allow_duplicate_users) continue;
+ if (session[s].walled_garden || session[i].walled_garden) continue;
+ // Guest change
+ int found = 0;
+ int gu;
+ for (gu = 0; gu < guest_accounts_num; gu++)
+ {
+ if (!strcasecmp(user, guest_users[gu]))
+ {
+ found = 1;
+ break;
+ }
+ }
+ if (found) continue;
// Drop the new session in case of duplicate sessionss, not the old one.
if (!strcasecmp(user, session[i].user))
}
}
+ // no need to set a route for the same IP address of the bundle
+ if (!session[s].bundle || (bundle[session[s].bundle].num_of_links == 1))
{
int routed = 0;