2006-01-03 02:04:38 +08:00
|
|
|
/*
|
|
|
|
* net/tipc/name_distr.c: TIPC name distribution code
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
tipc: add name distributor resiliency queue
TIPC name table updates are distributed asynchronously in a cluster,
entailing a risk of certain race conditions. E.g., if two nodes
simultaneously issue conflicting (overlapping) publications, this may
not be detected until both publications have reached a third node, in
which case one of the publications will be silently dropped on that
node. Hence, we end up with an inconsistent name table.
In most cases this conflict is just a temporary race, e.g., one
node is issuing a publication under the assumption that a previous,
conflicting, publication has already been withdrawn by the other node.
However, because of the (rtt related) distributed update delay, this
may not yet hold true on all nodes. The symptom of this failure is a
syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error".
In this commit we add a resiliency queue at the receiving end of
the name table distributor. When insertion of an arriving publication
fails, we retain it in this queue for a short amount of time, assuming
that another update will arrive very soon and clear the conflict. If so
happens, we insert the publication, otherwise we drop it.
The (configurable) retention value defaults to 2000 ms. Knowing from
experience that the situation described above is extremely rare, there
is no risk that the queue will accumulate any large number of items.
Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 15:08:47 +08:00
|
|
|
* Copyright (c) 2000-2006, 2014, Ericsson AB
|
2011-02-24 02:51:15 +08:00
|
|
|
* Copyright (c) 2005, 2010-2011, Wind River Systems
|
2006-01-03 02:04:38 +08:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2006-01-11 20:30:43 +08:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
2006-01-03 02:04:38 +08:00
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
2006-01-11 20:30:43 +08:00
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the names of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
2006-01-03 02:04:38 +08:00
|
|
|
*
|
2006-01-11 20:30:43 +08:00
|
|
|
* Alternatively, this software may be distributed under the terms of the
|
|
|
|
* GNU General Public License ("GPL") version 2 as published by the Free
|
|
|
|
* Software Foundation.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
2006-01-03 02:04:38 +08:00
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "core.h"
|
|
|
|
#include "link.h"
|
|
|
|
#include "name_distr.h"
|
|
|
|
|
tipc: add name distributor resiliency queue
TIPC name table updates are distributed asynchronously in a cluster,
entailing a risk of certain race conditions. E.g., if two nodes
simultaneously issue conflicting (overlapping) publications, this may
not be detected until both publications have reached a third node, in
which case one of the publications will be silently dropped on that
node. Hence, we end up with an inconsistent name table.
In most cases this conflict is just a temporary race, e.g., one
node is issuing a publication under the assumption that a previous,
conflicting, publication has already been withdrawn by the other node.
However, because of the (rtt related) distributed update delay, this
may not yet hold true on all nodes. The symptom of this failure is a
syslog message: "tipc: Cannot publish {%u,%u,%u}, overlap error".
In this commit we add a resiliency queue at the receiving end of
the name table distributor. When insertion of an arriving publication
fails, we retain it in this queue for a short amount of time, assuming
that another update will arrive very soon and clear the conflict. If so
happens, we insert the publication, otherwise we drop it.
The (configurable) retention value defaults to 2000 ms. Knowing from
experience that the situation described above is extremely rare, there
is no risk that the queue will accumulate any large number of items.
Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 15:08:47 +08:00
|
|
|
int sysctl_tipc_named_timeout __read_mostly = 2000;
|
|
|
|
|
|
|
|
struct distr_queue_item {
|
|
|
|
struct distr_item i;
|
|
|
|
u32 dtype;
|
|
|
|
u32 node;
|
|
|
|
unsigned long expires;
|
|
|
|
struct list_head next;
|
|
|
|
};
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
/**
|
|
|
|
* publ_to_item - add publication info to a publication message
|
|
|
|
*/
|
|
|
|
static void publ_to_item(struct distr_item *i, struct publication *p)
|
|
|
|
{
|
|
|
|
i->type = htonl(p->type);
|
|
|
|
i->lower = htonl(p->lower);
|
|
|
|
i->upper = htonl(p->upper);
|
2018-03-15 23:48:55 +08:00
|
|
|
i->port = htonl(p->port);
|
2006-01-03 02:04:38 +08:00
|
|
|
i->key = htonl(p->key);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* named_prepare_buf - allocate & initialize a publication message
|
2016-09-01 22:22:16 +08:00
|
|
|
*
|
|
|
|
* The buffer returned is of size INT_H_SIZE + payload size
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
2015-01-09 15:27:10 +08:00
|
|
|
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
|
|
|
|
u32 dest)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2017-01-13 22:46:25 +08:00
|
|
|
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
|
2018-03-23 03:42:49 +08:00
|
|
|
u32 self = tipc_own_addr(net);
|
2006-01-03 02:04:38 +08:00
|
|
|
struct tipc_msg *msg;
|
|
|
|
|
|
|
|
if (buf != NULL) {
|
|
|
|
msg = buf_msg(buf);
|
2018-03-23 03:42:49 +08:00
|
|
|
tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
|
|
|
|
type, INT_H_SIZE, dest);
|
2011-06-01 03:03:18 +08:00
|
|
|
msg_set_size(msg, INT_H_SIZE + size);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_named_publish - tell other nodes about a new publication by this node
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
2015-01-09 15:27:09 +08:00
|
|
|
struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2018-03-15 23:48:52 +08:00
|
|
|
struct name_table *nt = tipc_name_table(net);
|
2006-01-03 02:04:38 +08:00
|
|
|
struct distr_item *item;
|
2018-03-15 23:48:52 +08:00
|
|
|
struct sk_buff *skb;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2018-03-15 23:48:52 +08:00
|
|
|
if (publ->scope == TIPC_NODE_SCOPE) {
|
2018-03-15 23:48:54 +08:00
|
|
|
list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
|
2014-04-28 18:00:10 +08:00
|
|
|
return NULL;
|
2018-03-15 23:48:52 +08:00
|
|
|
}
|
tipc: eliminate message disordering during binding table update
We have seen the following race scenario:
1) named_distribute() builds a "bulk" message, containing a PUBLISH
item for a certain publication. This is based on the contents of
the binding tables's 'cluster_scope' list.
2) tipc_named_withdraw() removes the same publication from the list,
bulds a WITHDRAW message and distributes it to all cluster nodes.
3) tipc_named_node_up(), which was calling named_distribute(), sends
out the bulk message built under 1)
4) The WITHDRAW message arrives at the just detected node, finds
no corresponding publication, and is dropped.
5) The PUBLISH item arrives at the same node, is added to its binding
table, and remains there forever.
This arrival disordering was earlier taken care of by the backlog queue,
originally added for a different purpose, which was removed in the
commit referred to below, but we now need a different solution.
In this commit, we replace the rcu lock protecting the 'cluster_scope'
list with a regular RW lock which comprises even the sending of the
bulk message. This both guarantees both the list integrity and the
message sending order. We will later add a commit which cleans up
this code further.
Note that this commit needs recently added commit d3092b2efca1 ("tipc:
fix unsafe rcu locking when accessing publication list") to apply
cleanly.
Fixes: 37922ea4a310 ("tipc: permit overlapping service ranges in name table")
Reported-by: Tuong Lien Tong <tuong.t.lien@dektech.com.au>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-20 01:55:40 +08:00
|
|
|
write_lock_bh(&nt->cluster_scope_lock);
|
|
|
|
list_add_tail(&publ->binding_node, &nt->cluster_scope);
|
|
|
|
write_unlock_bh(&nt->cluster_scope_lock);
|
2018-03-15 23:48:52 +08:00
|
|
|
skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
|
|
|
|
if (!skb) {
|
2012-06-29 12:16:37 +08:00
|
|
|
pr_warn("Publication distribution failure\n");
|
2014-04-28 18:00:10 +08:00
|
|
|
return NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2018-03-15 23:48:52 +08:00
|
|
|
item = (struct distr_item *)msg_data(buf_msg(skb));
|
2006-01-03 02:04:38 +08:00
|
|
|
publ_to_item(item, publ);
|
2018-03-15 23:48:52 +08:00
|
|
|
return skb;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
2015-01-09 15:27:10 +08:00
|
|
|
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
tipc: eliminate message disordering during binding table update
We have seen the following race scenario:
1) named_distribute() builds a "bulk" message, containing a PUBLISH
item for a certain publication. This is based on the contents of
the binding tables's 'cluster_scope' list.
2) tipc_named_withdraw() removes the same publication from the list,
bulds a WITHDRAW message and distributes it to all cluster nodes.
3) tipc_named_node_up(), which was calling named_distribute(), sends
out the bulk message built under 1)
4) The WITHDRAW message arrives at the just detected node, finds
no corresponding publication, and is dropped.
5) The PUBLISH item arrives at the same node, is added to its binding
table, and remains there forever.
This arrival disordering was earlier taken care of by the backlog queue,
originally added for a different purpose, which was removed in the
commit referred to below, but we now need a different solution.
In this commit, we replace the rcu lock protecting the 'cluster_scope'
list with a regular RW lock which comprises even the sending of the
bulk message. This both guarantees both the list integrity and the
message sending order. We will later add a commit which cleans up
this code further.
Note that this commit needs recently added commit d3092b2efca1 ("tipc:
fix unsafe rcu locking when accessing publication list") to apply
cleanly.
Fixes: 37922ea4a310 ("tipc: permit overlapping service ranges in name table")
Reported-by: Tuong Lien Tong <tuong.t.lien@dektech.com.au>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-20 01:55:40 +08:00
|
|
|
struct name_table *nt = tipc_name_table(net);
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *buf;
|
|
|
|
struct distr_item *item;
|
|
|
|
|
tipc: eliminate message disordering during binding table update
We have seen the following race scenario:
1) named_distribute() builds a "bulk" message, containing a PUBLISH
item for a certain publication. This is based on the contents of
the binding tables's 'cluster_scope' list.
2) tipc_named_withdraw() removes the same publication from the list,
bulds a WITHDRAW message and distributes it to all cluster nodes.
3) tipc_named_node_up(), which was calling named_distribute(), sends
out the bulk message built under 1)
4) The WITHDRAW message arrives at the just detected node, finds
no corresponding publication, and is dropped.
5) The PUBLISH item arrives at the same node, is added to its binding
table, and remains there forever.
This arrival disordering was earlier taken care of by the backlog queue,
originally added for a different purpose, which was removed in the
commit referred to below, but we now need a different solution.
In this commit, we replace the rcu lock protecting the 'cluster_scope'
list with a regular RW lock which comprises even the sending of the
bulk message. This both guarantees both the list integrity and the
message sending order. We will later add a commit which cleans up
this code further.
Note that this commit needs recently added commit d3092b2efca1 ("tipc:
fix unsafe rcu locking when accessing publication list") to apply
cleanly.
Fixes: 37922ea4a310 ("tipc: permit overlapping service ranges in name table")
Reported-by: Tuong Lien Tong <tuong.t.lien@dektech.com.au>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-20 01:55:40 +08:00
|
|
|
write_lock_bh(&nt->cluster_scope_lock);
|
|
|
|
list_del(&publ->binding_node);
|
|
|
|
write_unlock_bh(&nt->cluster_scope_lock);
|
2012-04-18 05:57:52 +08:00
|
|
|
if (publ->scope == TIPC_NODE_SCOPE)
|
2014-04-28 18:00:10 +08:00
|
|
|
return NULL;
|
2012-04-18 05:57:52 +08:00
|
|
|
|
2015-01-09 15:27:10 +08:00
|
|
|
buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!buf) {
|
2012-06-29 12:16:37 +08:00
|
|
|
pr_warn("Withdrawal distribution failure\n");
|
2014-04-28 18:00:10 +08:00
|
|
|
return NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
item = (struct distr_item *)msg_data(buf_msg(buf));
|
|
|
|
publ_to_item(item, publ);
|
2014-04-28 18:00:10 +08:00
|
|
|
return buf;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2014-07-17 08:40:58 +08:00
|
|
|
/**
|
2012-04-18 05:57:52 +08:00
|
|
|
* named_distribute - prepare name info for bulk distribution to another node
|
2014-11-26 11:41:55 +08:00
|
|
|
* @list: list of messages (buffers) to be returned from this function
|
2014-07-17 08:40:58 +08:00
|
|
|
* @dnode: node to be updated
|
|
|
|
* @pls: linked list of publication items to be packed into buffer chain
|
2012-04-18 05:57:52 +08:00
|
|
|
*/
|
2015-01-09 15:27:05 +08:00
|
|
|
static void named_distribute(struct net *net, struct sk_buff_head *list,
|
|
|
|
u32 dnode, struct list_head *pls)
|
2012-04-18 05:57:52 +08:00
|
|
|
{
|
|
|
|
struct publication *publ;
|
2014-11-26 11:41:55 +08:00
|
|
|
struct sk_buff *skb = NULL;
|
2012-04-18 05:57:52 +08:00
|
|
|
struct distr_item *item = NULL;
|
tipc: improve throughput between nodes in netns
Currently, TIPC transports intra-node user data messages directly
socket to socket, hence shortcutting all the lower layers of the
communication stack. This gives TIPC very good intra node performance,
both regarding throughput and latency.
We now introduce a similar mechanism for TIPC data traffic across
network namespaces located in the same kernel. On the send path, the
call chain is as always accompanied by the sending node's network name
space pointer. However, once we have reliably established that the
receiving node is represented by a namespace on the same host, we just
replace the namespace pointer with the receiving node/namespace's
ditto, and follow the regular socket receive patch though the receiving
node. This technique gives us a throughput similar to the node internal
throughput, several times larger than if we let the traffic go though
the full network stacks. As a comparison, max throughput for 64k
messages is four times larger than TCP throughput for the same type of
traffic.
To meet any security concerns, the following should be noted.
- All nodes joining a cluster are supposed to have been be certified
and authenticated by mechanisms outside TIPC. This is no different for
nodes/namespaces on the same host; they have to auto discover each
other using the attached interfaces, and establish links which are
supervised via the regular link monitoring mechanism. Hence, a kernel
local node has no other way to join a cluster than any other node, and
have to obey to policies set in the IP or device layers of the stack.
- Only when a sender has established with 100% certainty that the peer
node is located in a kernel local namespace does it choose to let user
data messages, and only those, take the crossover path to the receiving
node/namespace.
- If the receiving node/namespace is removed, its namespace pointer
is invalidated at all peer nodes, and their neighbor link monitoring
will eventually note that this node is gone.
- To ensure the "100% certainty" criteria, and prevent any possible
spoofing, received discovery messages must contain a proof that the
sender knows a common secret. We use the hash mix of the sending
node/namespace for this purpose, since it can be accessed directly by
all other namespaces in the kernel. Upon reception of a discovery
message, the receiver checks this proof against all the local
namespaces'hash_mix:es. If it finds a match, that, along with a
matching node id and cluster id, this is deemed sufficient proof that
the peer node in question is in a local namespace, and a wormhole can
be opened.
- We should also consider that TIPC is intended to be a cluster local
IPC mechanism (just like e.g. UNIX sockets) rather than a network
protocol, and hence we think it can justified to allow it to shortcut the
lower protocol layers.
Regarding traceability, we should notice that since commit 6c9081a3915d
("tipc: add loopback device tracking") it is possible to follow the node
internal packet flow by just activating tcpdump on the loopback
interface. This will be true even for this mechanism; by activating
tcpdump on the involved nodes' loopback interfaces their inter-name
space messaging can easily be tracked.
v2:
- update 'net' pointer when node left/rejoined
v3:
- grab read/write lock when using node ref obj
v4:
- clone traffics between netns to loopback
Suggested-by: Jon Maloy <jon.maloy@ericsson.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: Hoang Le <hoang.h.le@dektech.com.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-10-29 08:51:21 +08:00
|
|
|
u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
|
2016-09-01 22:22:16 +08:00
|
|
|
ITEM_SIZE) * ITEM_SIZE;
|
|
|
|
u32 msg_rem = msg_dsz;
|
2012-04-18 05:57:52 +08:00
|
|
|
|
tipc: eliminate message disordering during binding table update
We have seen the following race scenario:
1) named_distribute() builds a "bulk" message, containing a PUBLISH
item for a certain publication. This is based on the contents of
the binding tables's 'cluster_scope' list.
2) tipc_named_withdraw() removes the same publication from the list,
bulds a WITHDRAW message and distributes it to all cluster nodes.
3) tipc_named_node_up(), which was calling named_distribute(), sends
out the bulk message built under 1)
4) The WITHDRAW message arrives at the just detected node, finds
no corresponding publication, and is dropped.
5) The PUBLISH item arrives at the same node, is added to its binding
table, and remains there forever.
This arrival disordering was earlier taken care of by the backlog queue,
originally added for a different purpose, which was removed in the
commit referred to below, but we now need a different solution.
In this commit, we replace the rcu lock protecting the 'cluster_scope'
list with a regular RW lock which comprises even the sending of the
bulk message. This both guarantees both the list integrity and the
message sending order. We will later add a commit which cleans up
this code further.
Note that this commit needs recently added commit d3092b2efca1 ("tipc:
fix unsafe rcu locking when accessing publication list") to apply
cleanly.
Fixes: 37922ea4a310 ("tipc: permit overlapping service ranges in name table")
Reported-by: Tuong Lien Tong <tuong.t.lien@dektech.com.au>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-20 01:55:40 +08:00
|
|
|
list_for_each_entry(publ, pls, binding_node) {
|
2014-07-17 08:40:58 +08:00
|
|
|
/* Prepare next buffer: */
|
2014-11-26 11:41:55 +08:00
|
|
|
if (!skb) {
|
2015-01-09 15:27:10 +08:00
|
|
|
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
|
|
|
|
dnode);
|
2014-11-26 11:41:55 +08:00
|
|
|
if (!skb) {
|
2012-06-29 12:16:37 +08:00
|
|
|
pr_warn("Bulk publication failure\n");
|
2012-04-18 05:57:52 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-10-28 06:51:55 +08:00
|
|
|
msg_set_bc_ack_invalid(buf_msg(skb), true);
|
2014-11-26 11:41:55 +08:00
|
|
|
item = (struct distr_item *)msg_data(buf_msg(skb));
|
2012-04-18 05:57:52 +08:00
|
|
|
}
|
2014-07-17 08:40:58 +08:00
|
|
|
|
|
|
|
/* Pack publication into message: */
|
2012-04-18 05:57:52 +08:00
|
|
|
publ_to_item(item, publ);
|
|
|
|
item++;
|
2014-07-17 08:40:58 +08:00
|
|
|
msg_rem -= ITEM_SIZE;
|
|
|
|
|
|
|
|
/* Append full buffer to list: */
|
|
|
|
if (!msg_rem) {
|
2014-11-26 11:41:55 +08:00
|
|
|
__skb_queue_tail(list, skb);
|
|
|
|
skb = NULL;
|
2014-12-02 15:00:23 +08:00
|
|
|
msg_rem = msg_dsz;
|
2012-04-18 05:57:52 +08:00
|
|
|
}
|
|
|
|
}
|
2014-12-02 15:00:23 +08:00
|
|
|
if (skb) {
|
|
|
|
msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
|
|
|
|
skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
|
|
|
|
__skb_queue_tail(list, skb);
|
|
|
|
}
|
2012-04-18 05:57:52 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_named_node_up - tell specified node about all publications by this node
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
2015-01-09 15:27:05 +08:00
|
|
|
void tipc_named_node_up(struct net *net, u32 dnode)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2018-03-15 23:48:52 +08:00
|
|
|
struct name_table *nt = tipc_name_table(net);
|
2014-11-26 11:41:55 +08:00
|
|
|
struct sk_buff_head head;
|
|
|
|
|
2019-08-15 22:42:50 +08:00
|
|
|
__skb_queue_head_init(&head);
|
2011-06-01 01:38:02 +08:00
|
|
|
|
tipc: eliminate message disordering during binding table update
We have seen the following race scenario:
1) named_distribute() builds a "bulk" message, containing a PUBLISH
item for a certain publication. This is based on the contents of
the binding tables's 'cluster_scope' list.
2) tipc_named_withdraw() removes the same publication from the list,
bulds a WITHDRAW message and distributes it to all cluster nodes.
3) tipc_named_node_up(), which was calling named_distribute(), sends
out the bulk message built under 1)
4) The WITHDRAW message arrives at the just detected node, finds
no corresponding publication, and is dropped.
5) The PUBLISH item arrives at the same node, is added to its binding
table, and remains there forever.
This arrival disordering was earlier taken care of by the backlog queue,
originally added for a different purpose, which was removed in the
commit referred to below, but we now need a different solution.
In this commit, we replace the rcu lock protecting the 'cluster_scope'
list with a regular RW lock which comprises even the sending of the
bulk message. This both guarantees both the list integrity and the
message sending order. We will later add a commit which cleans up
this code further.
Note that this commit needs recently added commit d3092b2efca1 ("tipc:
fix unsafe rcu locking when accessing publication list") to apply
cleanly.
Fixes: 37922ea4a310 ("tipc: permit overlapping service ranges in name table")
Reported-by: Tuong Lien Tong <tuong.t.lien@dektech.com.au>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-20 01:55:40 +08:00
|
|
|
read_lock_bh(&nt->cluster_scope_lock);
|
2018-03-15 23:48:52 +08:00
|
|
|
named_distribute(net, &head, dnode, &nt->cluster_scope);
|
2015-10-22 20:51:47 +08:00
|
|
|
tipc_node_xmit(net, &head, dnode, 0);
|
tipc: eliminate message disordering during binding table update
We have seen the following race scenario:
1) named_distribute() builds a "bulk" message, containing a PUBLISH
item for a certain publication. This is based on the contents of
the binding tables's 'cluster_scope' list.
2) tipc_named_withdraw() removes the same publication from the list,
bulds a WITHDRAW message and distributes it to all cluster nodes.
3) tipc_named_node_up(), which was calling named_distribute(), sends
out the bulk message built under 1)
4) The WITHDRAW message arrives at the just detected node, finds
no corresponding publication, and is dropped.
5) The PUBLISH item arrives at the same node, is added to its binding
table, and remains there forever.
This arrival disordering was earlier taken care of by the backlog queue,
originally added for a different purpose, which was removed in the
commit referred to below, but we now need a different solution.
In this commit, we replace the rcu lock protecting the 'cluster_scope'
list with a regular RW lock which comprises even the sending of the
bulk message. This both guarantees both the list integrity and the
message sending order. We will later add a commit which cleans up
this code further.
Note that this commit needs recently added commit d3092b2efca1 ("tipc:
fix unsafe rcu locking when accessing publication list") to apply
cleanly.
Fixes: 37922ea4a310 ("tipc: permit overlapping service ranges in name table")
Reported-by: Tuong Lien Tong <tuong.t.lien@dektech.com.au>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-20 01:55:40 +08:00
|
|
|
read_unlock_bh(&nt->cluster_scope_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2014-11-26 11:41:45 +08:00
|
|
|
* tipc_publ_purge - remove publication associated with a failed node
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
|
|
|
* Invoked for each publication issued by a newly failed node.
|
2006-01-03 02:04:38 +08:00
|
|
|
* Removes publication structure from name table & deletes it.
|
|
|
|
*/
|
2015-01-09 15:27:05 +08:00
|
|
|
static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2018-03-30 05:20:43 +08:00
|
|
|
struct tipc_net *tn = tipc_net(net);
|
2006-01-03 02:04:38 +08:00
|
|
|
struct publication *p;
|
2006-06-26 14:51:37 +08:00
|
|
|
|
2015-01-09 15:27:09 +08:00
|
|
|
spin_lock_bh(&tn->nametbl_lock);
|
2018-03-30 05:20:43 +08:00
|
|
|
p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
|
|
|
|
publ->node, publ->key);
|
2011-02-24 02:51:15 +08:00
|
|
|
if (p)
|
2018-03-15 23:48:54 +08:00
|
|
|
tipc_node_unsubscribe(net, &p->binding_node, addr);
|
2015-01-09 15:27:09 +08:00
|
|
|
spin_unlock_bh(&tn->nametbl_lock);
|
2006-06-26 14:51:37 +08:00
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
if (p != publ) {
|
2012-06-29 12:16:37 +08:00
|
|
|
pr_err("Unable to remove publication from failed node\n"
|
2018-03-15 23:48:55 +08:00
|
|
|
" (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n",
|
|
|
|
publ->type, publ->lower, publ->node, publ->port,
|
2012-06-29 12:16:37 +08:00
|
|
|
publ->key);
|
2006-06-26 14:51:37 +08:00
|
|
|
}
|
|
|
|
|
2019-09-03 17:53:12 +08:00
|
|
|
if (p)
|
|
|
|
kfree_rcu(p, rcu);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2016-04-07 22:40:44 +08:00
|
|
|
/**
|
|
|
|
* tipc_dist_queue_purge - remove deferred updates from a node that went down
|
|
|
|
*/
|
|
|
|
static void tipc_dist_queue_purge(struct net *net, u32 addr)
|
|
|
|
{
|
|
|
|
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
|
|
|
struct distr_queue_item *e, *tmp;
|
|
|
|
|
|
|
|
spin_lock_bh(&tn->nametbl_lock);
|
|
|
|
list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
|
|
|
|
if (e->node != addr)
|
|
|
|
continue;
|
|
|
|
list_del(&e->next);
|
|
|
|
kfree(e);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&tn->nametbl_lock);
|
|
|
|
}
|
|
|
|
|
2015-01-09 15:27:05 +08:00
|
|
|
void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
|
2014-11-26 11:41:45 +08:00
|
|
|
{
|
|
|
|
struct publication *publ, *tmp;
|
|
|
|
|
2018-03-15 23:48:54 +08:00
|
|
|
list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
|
2015-01-09 15:27:05 +08:00
|
|
|
tipc_publ_purge(net, publ, addr);
|
2016-04-07 22:40:44 +08:00
|
|
|
tipc_dist_queue_purge(net, addr);
|
2014-11-26 11:41:45 +08:00
|
|
|
}
|
|
|
|
|
2014-08-28 15:08:46 +08:00
|
|
|
/**
|
|
|
|
* tipc_update_nametbl - try to process a nametable update and notify
|
|
|
|
* subscribers
|
|
|
|
*
|
|
|
|
* tipc_nametbl_lock must be held.
|
|
|
|
* Returns the publication item if successful, otherwise NULL.
|
|
|
|
*/
|
2015-01-09 15:27:05 +08:00
|
|
|
static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
|
|
|
|
u32 node, u32 dtype)
|
2014-08-28 15:08:46 +08:00
|
|
|
{
|
2018-03-30 05:20:43 +08:00
|
|
|
struct publication *p = NULL;
|
|
|
|
u32 lower = ntohl(i->lower);
|
|
|
|
u32 upper = ntohl(i->upper);
|
|
|
|
u32 type = ntohl(i->type);
|
|
|
|
u32 port = ntohl(i->port);
|
|
|
|
u32 key = ntohl(i->key);
|
2014-08-28 15:08:46 +08:00
|
|
|
|
|
|
|
if (dtype == PUBLICATION) {
|
2018-03-30 05:20:43 +08:00
|
|
|
p = tipc_nametbl_insert_publ(net, type, lower, upper,
|
|
|
|
TIPC_CLUSTER_SCOPE, node,
|
|
|
|
port, key);
|
|
|
|
if (p) {
|
|
|
|
tipc_node_subscribe(net, &p->binding_node, node);
|
2014-09-10 20:02:50 +08:00
|
|
|
return true;
|
2014-08-28 15:08:46 +08:00
|
|
|
}
|
|
|
|
} else if (dtype == WITHDRAWAL) {
|
2018-03-30 05:20:43 +08:00
|
|
|
p = tipc_nametbl_remove_publ(net, type, lower,
|
|
|
|
upper, node, key);
|
|
|
|
if (p) {
|
|
|
|
tipc_node_unsubscribe(net, &p->binding_node, node);
|
|
|
|
kfree_rcu(p, rcu);
|
2014-09-10 20:02:50 +08:00
|
|
|
return true;
|
2014-08-28 15:08:46 +08:00
|
|
|
}
|
2018-03-30 05:20:43 +08:00
|
|
|
pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n",
|
|
|
|
type, lower, node);
|
2014-08-28 15:08:46 +08:00
|
|
|
} else {
|
|
|
|
pr_warn("Unrecognized name table message received\n");
|
|
|
|
}
|
2014-09-10 20:02:50 +08:00
|
|
|
return false;
|
2014-08-28 15:08:46 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
/**
|
tipc: resolve race problem at unicast message reception
TIPC handles message cardinality and sequencing at the link layer,
before passing messages upwards to the destination sockets. During the
upcall from link to socket no locks are held. It is therefore possible,
and we see it happen occasionally, that messages arriving in different
threads and delivered in sequence still bypass each other before they
reach the destination socket. This must not happen, since it violates
the sequentiality guarantee.
We solve this by adding a new input buffer queue to the link structure.
Arriving messages are added safely to the tail of that queue by the
link, while the head of the queue is consumed, also safely, by the
receiving socket. Sequentiality is secured per socket by only allowing
buffers to be dequeued inside the socket lock. Since there may be multiple
simultaneous readers of the queue, we use a 'filter' parameter to reduce
the risk that they peek the same buffer from the queue, hence also
reducing the risk of contention on the receiving socket locks.
This solves the sequentiality problem, and seems to cause no measurable
performance degradation.
A nice side effect of this change is that lock handling in the functions
tipc_rcv() and tipc_bcast_rcv() now becomes uniform, something that
will enable future simplifications of those functions.
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-02-05 21:36:41 +08:00
|
|
|
* tipc_named_rcv - process name table update messages sent by another node
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
tipc: resolve race problem at unicast message reception
TIPC handles message cardinality and sequencing at the link layer,
before passing messages upwards to the destination sockets. During the
upcall from link to socket no locks are held. It is therefore possible,
and we see it happen occasionally, that messages arriving in different
threads and delivered in sequence still bypass each other before they
reach the destination socket. This must not happen, since it violates
the sequentiality guarantee.
We solve this by adding a new input buffer queue to the link structure.
Arriving messages are added safely to the tail of that queue by the
link, while the head of the queue is consumed, also safely, by the
receiving socket. Sequentiality is secured per socket by only allowing
buffers to be dequeued inside the socket lock. Since there may be multiple
simultaneous readers of the queue, we use a 'filter' parameter to reduce
the risk that they peek the same buffer from the queue, hence also
reducing the risk of contention on the receiving socket locks.
This solves the sequentiality problem, and seems to cause no measurable
performance degradation.
A nice side effect of this change is that lock handling in the functions
tipc_rcv() and tipc_bcast_rcv() now becomes uniform, something that
will enable future simplifications of those functions.
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-02-05 21:36:41 +08:00
|
|
|
void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2015-01-09 15:27:09 +08:00
|
|
|
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
tipc: resolve race problem at unicast message reception
TIPC handles message cardinality and sequencing at the link layer,
before passing messages upwards to the destination sockets. During the
upcall from link to socket no locks are held. It is therefore possible,
and we see it happen occasionally, that messages arriving in different
threads and delivered in sequence still bypass each other before they
reach the destination socket. This must not happen, since it violates
the sequentiality guarantee.
We solve this by adding a new input buffer queue to the link structure.
Arriving messages are added safely to the tail of that queue by the
link, while the head of the queue is consumed, also safely, by the
receiving socket. Sequentiality is secured per socket by only allowing
buffers to be dequeued inside the socket lock. Since there may be multiple
simultaneous readers of the queue, we use a 'filter' parameter to reduce
the risk that they peek the same buffer from the queue, hence also
reducing the risk of contention on the receiving socket locks.
This solves the sequentiality problem, and seems to cause no measurable
performance degradation.
A nice side effect of this change is that lock handling in the functions
tipc_rcv() and tipc_bcast_rcv() now becomes uniform, something that
will enable future simplifications of those functions.
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-02-05 21:36:41 +08:00
|
|
|
struct tipc_msg *msg;
|
|
|
|
struct distr_item *item;
|
|
|
|
uint count;
|
|
|
|
u32 node;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int mtype;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2015-01-09 15:27:09 +08:00
|
|
|
spin_lock_bh(&tn->nametbl_lock);
|
tipc: resolve race problem at unicast message reception
TIPC handles message cardinality and sequencing at the link layer,
before passing messages upwards to the destination sockets. During the
upcall from link to socket no locks are held. It is therefore possible,
and we see it happen occasionally, that messages arriving in different
threads and delivered in sequence still bypass each other before they
reach the destination socket. This must not happen, since it violates
the sequentiality guarantee.
We solve this by adding a new input buffer queue to the link structure.
Arriving messages are added safely to the tail of that queue by the
link, while the head of the queue is consumed, also safely, by the
receiving socket. Sequentiality is secured per socket by only allowing
buffers to be dequeued inside the socket lock. Since there may be multiple
simultaneous readers of the queue, we use a 'filter' parameter to reduce
the risk that they peek the same buffer from the queue, hence also
reducing the risk of contention on the receiving socket locks.
This solves the sequentiality problem, and seems to cause no measurable
performance degradation.
A nice side effect of this change is that lock handling in the functions
tipc_rcv() and tipc_bcast_rcv() now becomes uniform, something that
will enable future simplifications of those functions.
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-02-05 21:36:41 +08:00
|
|
|
for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
|
2015-11-20 03:30:40 +08:00
|
|
|
skb_linearize(skb);
|
tipc: resolve race problem at unicast message reception
TIPC handles message cardinality and sequencing at the link layer,
before passing messages upwards to the destination sockets. During the
upcall from link to socket no locks are held. It is therefore possible,
and we see it happen occasionally, that messages arriving in different
threads and delivered in sequence still bypass each other before they
reach the destination socket. This must not happen, since it violates
the sequentiality guarantee.
We solve this by adding a new input buffer queue to the link structure.
Arriving messages are added safely to the tail of that queue by the
link, while the head of the queue is consumed, also safely, by the
receiving socket. Sequentiality is secured per socket by only allowing
buffers to be dequeued inside the socket lock. Since there may be multiple
simultaneous readers of the queue, we use a 'filter' parameter to reduce
the risk that they peek the same buffer from the queue, hence also
reducing the risk of contention on the receiving socket locks.
This solves the sequentiality problem, and seems to cause no measurable
performance degradation.
A nice side effect of this change is that lock handling in the functions
tipc_rcv() and tipc_bcast_rcv() now becomes uniform, something that
will enable future simplifications of those functions.
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-02-05 21:36:41 +08:00
|
|
|
msg = buf_msg(skb);
|
|
|
|
mtype = msg_type(msg);
|
|
|
|
item = (struct distr_item *)msg_data(msg);
|
|
|
|
count = msg_data_sz(msg) / ITEM_SIZE;
|
|
|
|
node = msg_orignode(msg);
|
|
|
|
while (count--) {
|
2018-03-30 05:20:43 +08:00
|
|
|
tipc_update_nametbl(net, item, node, mtype);
|
tipc: resolve race problem at unicast message reception
TIPC handles message cardinality and sequencing at the link layer,
before passing messages upwards to the destination sockets. During the
upcall from link to socket no locks are held. It is therefore possible,
and we see it happen occasionally, that messages arriving in different
threads and delivered in sequence still bypass each other before they
reach the destination socket. This must not happen, since it violates
the sequentiality guarantee.
We solve this by adding a new input buffer queue to the link structure.
Arriving messages are added safely to the tail of that queue by the
link, while the head of the queue is consumed, also safely, by the
receiving socket. Sequentiality is secured per socket by only allowing
buffers to be dequeued inside the socket lock. Since there may be multiple
simultaneous readers of the queue, we use a 'filter' parameter to reduce
the risk that they peek the same buffer from the queue, hence also
reducing the risk of contention on the receiving socket locks.
This solves the sequentiality problem, and seems to cause no measurable
performance degradation.
A nice side effect of this change is that lock handling in the functions
tipc_rcv() and tipc_bcast_rcv() now becomes uniform, something that
will enable future simplifications of those functions.
Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-02-05 21:36:41 +08:00
|
|
|
item++;
|
|
|
|
}
|
|
|
|
kfree_skb(skb);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2015-01-09 15:27:09 +08:00
|
|
|
spin_unlock_bh(&tn->nametbl_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-04-18 05:57:52 +08:00
|
|
|
* tipc_named_reinit - re-initialize local publications
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2011-10-15 02:42:25 +08:00
|
|
|
* This routine is called whenever TIPC networking is enabled.
|
2012-04-18 05:57:52 +08:00
|
|
|
* All name table entries published by this node are updated to reflect
|
|
|
|
* the node's new network address.
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
2015-01-09 15:27:09 +08:00
|
|
|
void tipc_named_reinit(struct net *net)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2018-03-15 23:48:52 +08:00
|
|
|
struct name_table *nt = tipc_name_table(net);
|
|
|
|
struct tipc_net *tn = tipc_net(net);
|
2006-01-03 02:04:38 +08:00
|
|
|
struct publication *publ;
|
2018-03-23 03:42:49 +08:00
|
|
|
u32 self = tipc_own_addr(net);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2015-01-09 15:27:09 +08:00
|
|
|
spin_lock_bh(&tn->nametbl_lock);
|
2011-10-15 02:42:25 +08:00
|
|
|
|
2018-03-15 23:48:54 +08:00
|
|
|
list_for_each_entry_rcu(publ, &nt->node_scope, binding_node)
|
2018-03-23 03:42:49 +08:00
|
|
|
publ->node = self;
|
2018-03-15 23:48:54 +08:00
|
|
|
list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
|
2018-03-23 03:42:49 +08:00
|
|
|
publ->node = self;
|
2011-10-15 02:42:25 +08:00
|
|
|
|
2015-01-09 15:27:09 +08:00
|
|
|
spin_unlock_bh(&tn->nametbl_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|