From 2ecb0924d7791372a70ef8f1174e37b329b955c3 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 21 May 2008 14:53:00 -0700 Subject: tipc: Prevent node object duplication due to simultaneous discovery This patch ensures that the simultaneous discovery of the same neighboring node by multiple interfaces does not cause TIPC to add the node into its internal data structures more than once. Signed-off-by: Allan Stephens Signed-off-by: David S. Miller --- net/tipc/node.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index 598f4d3a009..34e9a2bb7c1 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -52,16 +52,40 @@ static void node_established_contact(struct node *n_ptr); struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ +static DEFINE_SPINLOCK(node_create_lock); + u32 tipc_own_tag = 0; +/** + * tipc_node_create - create neighboring node + * + * Currently, this routine is called by neighbor discovery code, which holds + * net_lock for reading only. We must take node_create_lock to ensure a node + * isn't created twice if two different bearers discover the node at the same + * time. (It would be preferable to switch to holding net_lock in write mode, + * but this is a non-trivial change.) + */ + struct node *tipc_node_create(u32 addr) { struct cluster *c_ptr; struct node *n_ptr; struct node **curr_node; + spin_lock_bh(&node_create_lock); + + for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { + if (addr < n_ptr->addr) + break; + if (addr == n_ptr->addr) { + spin_unlock_bh(&node_create_lock); + return n_ptr; + } + } + n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); if (!n_ptr) { + spin_unlock_bh(&node_create_lock); warn("Node creation failed, no memory\n"); return NULL; } @@ -71,6 +95,7 @@ struct node *tipc_node_create(u32 addr) c_ptr = tipc_cltr_create(addr); } if (!c_ptr) { + spin_unlock_bh(&node_create_lock); kfree(n_ptr); return NULL; } @@ -91,6 +116,7 @@ struct node *tipc_node_create(u32 addr) } } (*curr_node) = n_ptr; + spin_unlock_bh(&node_create_lock); return n_ptr; } -- cgit v1.2.3 From 1aad72d6cd518872c5f545320823bf7f4dafb026 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Mon, 14 Jul 2008 22:44:58 -0700 Subject: tipc: Add missing locks when inspecting node list & link list This patch ensures that TIPC configuration commands that display info about neighboring nodes and their links take the spinlocks that protect the node list and link lists from changing while the lists are being traversed. Signed-off-by: Allan Stephens Signed-off-by: David S. Miller --- net/tipc/node.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index 34e9a2bb7c1..ee952ad6021 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -600,12 +600,14 @@ u32 tipc_available_nodes(const u32 domain) struct node *n_ptr; u32 cnt = 0; + read_lock_bh(&tipc_net_lock); for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { if (!in_scope(domain, n_ptr->addr)) continue; if (tipc_node_is_up(n_ptr)) cnt++; } + read_unlock_bh(&tipc_net_lock); return cnt; } @@ -625,19 +627,26 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (network address)"); - if (!tipc_nodes) + read_lock_bh(&tipc_net_lock); + if (!tipc_nodes) { + read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_none(); + } /* For now, get space for all other nodes (will need to modify this when slave nodes are supported */ payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); - if (payload_size > 32768u) + if (payload_size > 32768u) { + read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (too many nodes)"); + } buf = tipc_cfg_reply_alloc(payload_size); - if (!buf) + if (!buf) { + read_unlock_bh(&tipc_net_lock); return NULL; + } /* Add TLVs for all nodes in scope */ @@ -650,6 +659,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) &node_info, sizeof(node_info)); } + read_unlock_bh(&tipc_net_lock); return buf; } @@ -672,16 +682,22 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) if (tipc_mode != TIPC_NET_MODE) return tipc_cfg_reply_none(); + read_lock_bh(&tipc_net_lock); + /* Get space for all unicast links + multicast link */ payload_size = TLV_SPACE(sizeof(link_info)) * (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); - if (payload_size > 32768u) + if (payload_size > 32768u) { + read_unlock_bh(&tipc_net_lock); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (too many links)"); + } buf = tipc_cfg_reply_alloc(payload_size); - if (!buf) + if (!buf) { + read_unlock_bh(&tipc_net_lock); return NULL; + } /* Add TLV for broadcast link */ @@ -697,6 +713,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) if (!in_scope(domain, n_ptr->addr)) continue; + tipc_node_lock(n_ptr); for (i = 0; i < MAX_BEARERS; i++) { if (!n_ptr->links[i]) continue; @@ -706,7 +723,9 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); } + tipc_node_unlock(n_ptr); } + read_unlock_bh(&tipc_net_lock); return buf; } -- cgit v1.2.3