2006-01-03 02:04:38 +08:00
|
|
|
/*
|
|
|
|
* net/tipc/link.c: TIPC link code
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2007-06-11 08:25:24 +08:00
|
|
|
* Copyright (c) 1996-2007, Ericsson AB
|
2011-01-08 00:43:40 +08:00
|
|
|
* Copyright (c) 2004-2007, 2010-2011, Wind River Systems
|
2006-01-03 02:04:38 +08:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2006-01-11 20:30:43 +08:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
2006-01-03 02:04:38 +08:00
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
2006-01-11 20:30:43 +08:00
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the names of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
2006-01-03 02:04:38 +08:00
|
|
|
*
|
2006-01-11 20:30:43 +08:00
|
|
|
* Alternatively, this software may be distributed under the terms of the
|
|
|
|
* GNU General Public License ("GPL") version 2 as published by the Free
|
|
|
|
* Software Foundation.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
2006-01-03 02:04:38 +08:00
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "core.h"
|
|
|
|
#include "link.h"
|
|
|
|
#include "port.h"
|
|
|
|
#include "name_distr.h"
|
|
|
|
#include "discover.h"
|
|
|
|
#include "config.h"
|
|
|
|
|
|
|
|
|
2008-06-05 08:29:39 +08:00
|
|
|
/*
|
|
|
|
* Out-of-range value for link session numbers
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define INVALID_SESSION 0x10000
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* Link state events:
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#define STARTING_EVT 856384768 /* link processing trigger */
|
|
|
|
#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
|
|
|
|
#define TIMEOUT_EVT 560817u /* link timer expired */
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* The following two 'message types' is really just implementation
|
|
|
|
* data conveniently stored in the message header.
|
2006-01-03 02:04:38 +08:00
|
|
|
* They must not be considered part of the protocol
|
|
|
|
*/
|
|
|
|
#define OPEN_MSG 0
|
|
|
|
#define CLOSED_MSG 1
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2006-01-03 02:04:38 +08:00
|
|
|
* State value stored in 'exp_msg_count'
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define START_CHANGEOVER 100000u
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct link_name - deconstructed link name
|
|
|
|
* @addr_local: network address of node at this end
|
|
|
|
* @if_local: name of interface at this end
|
|
|
|
* @addr_peer: network address of node at far end
|
|
|
|
* @if_peer: name of interface at far end
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct link_name {
|
|
|
|
u32 addr_local;
|
|
|
|
char if_local[TIPC_MAX_IF_NAME];
|
|
|
|
u32 addr_peer;
|
|
|
|
char if_peer[TIPC_MAX_IF_NAME];
|
|
|
|
};
|
|
|
|
|
|
|
|
static void link_handle_out_of_seq_msg(struct link *l_ptr,
|
|
|
|
struct sk_buff *buf);
|
|
|
|
static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
|
|
|
|
static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
|
|
|
|
static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
|
2011-01-08 00:43:40 +08:00
|
|
|
static int link_send_sections_long(struct tipc_port *sender,
|
2006-01-03 02:04:38 +08:00
|
|
|
struct iovec const *msg_sect,
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
u32 num_sect, unsigned int total_len,
|
|
|
|
u32 destnode);
|
2006-01-03 02:04:38 +08:00
|
|
|
static void link_check_defragm_bufs(struct link *l_ptr);
|
|
|
|
static void link_state_event(struct link *l_ptr, u32 event);
|
|
|
|
static void link_reset_statistics(struct link *l_ptr);
|
2011-01-01 02:59:27 +08:00
|
|
|
static void link_print(struct link *l_ptr, const char *str);
|
2010-10-13 21:20:35 +08:00
|
|
|
static void link_start(struct link *l_ptr);
|
|
|
|
static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
/*
|
2006-03-21 14:37:04 +08:00
|
|
|
* Simple link routines
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static unsigned int align(unsigned int i)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
return (i + 3) & ~3u;
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void link_init_max_pkt(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
u32 max_pkt;
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2011-01-08 02:00:11 +08:00
|
|
|
max_pkt = (l_ptr->b_ptr->mtu & ~3);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (max_pkt > MAX_MSG_SIZE)
|
|
|
|
max_pkt = MAX_MSG_SIZE;
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->max_pkt_target = max_pkt;
|
2006-01-03 02:04:38 +08:00
|
|
|
if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
|
|
|
|
l_ptr->max_pkt = l_ptr->max_pkt_target;
|
2007-02-09 22:25:21 +08:00
|
|
|
else
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->max_pkt = MAX_PKT_DEFAULT;
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->max_pkt_probes = 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static u32 link_next_sent(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
if (l_ptr->next_out)
|
|
|
|
return msg_seqno(buf_msg(l_ptr->next_out));
|
|
|
|
return mod(l_ptr->next_out_no);
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static u32 link_last_sent(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
return mod(link_next_sent(l_ptr) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-03-21 14:37:04 +08:00
|
|
|
* Simple non-static link routines (i.e. referenced outside this file)
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
int tipc_link_is_up(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
if (!l_ptr)
|
|
|
|
return 0;
|
2010-09-23 04:43:57 +08:00
|
|
|
return link_working_working(l_ptr) || link_working_unknown(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
int tipc_link_is_active(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2010-09-23 04:43:57 +08:00
|
|
|
return (l_ptr->owner->active_links[0] == l_ptr) ||
|
|
|
|
(l_ptr->owner->active_links[1] == l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_name_validate - validate & (optionally) deconstruct link name
|
|
|
|
* @name - ptr to link name string
|
|
|
|
* @name_parts - ptr to area for link name components (or NULL if not needed)
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Returns 1 if link name is valid, otherwise 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int link_name_validate(const char *name, struct link_name *name_parts)
|
|
|
|
{
|
|
|
|
char name_copy[TIPC_MAX_LINK_NAME];
|
|
|
|
char *addr_local;
|
|
|
|
char *if_local;
|
|
|
|
char *addr_peer;
|
|
|
|
char *if_peer;
|
|
|
|
char dummy;
|
|
|
|
u32 z_local, c_local, n_local;
|
|
|
|
u32 z_peer, c_peer, n_peer;
|
|
|
|
u32 if_local_len;
|
|
|
|
u32 if_peer_len;
|
|
|
|
|
|
|
|
/* copy link name & ensure length is OK */
|
|
|
|
|
|
|
|
name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
|
|
|
|
/* need above in case non-Posix strncpy() doesn't pad with nulls */
|
|
|
|
strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
|
|
|
|
if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* ensure all component parts of link name are present */
|
|
|
|
|
|
|
|
addr_local = name_copy;
|
2011-01-01 02:59:33 +08:00
|
|
|
if_local = strchr(addr_local, ':');
|
|
|
|
if (if_local == NULL)
|
2006-01-03 02:04:38 +08:00
|
|
|
return 0;
|
|
|
|
*(if_local++) = 0;
|
2011-01-01 02:59:33 +08:00
|
|
|
addr_peer = strchr(if_local, '-');
|
|
|
|
if (addr_peer == NULL)
|
2006-01-03 02:04:38 +08:00
|
|
|
return 0;
|
|
|
|
*(addr_peer++) = 0;
|
|
|
|
if_local_len = addr_peer - if_local;
|
2011-01-01 02:59:33 +08:00
|
|
|
if_peer = strchr(addr_peer, ':');
|
|
|
|
if (if_peer == NULL)
|
2006-01-03 02:04:38 +08:00
|
|
|
return 0;
|
|
|
|
*(if_peer++) = 0;
|
|
|
|
if_peer_len = strlen(if_peer) + 1;
|
|
|
|
|
|
|
|
/* validate component parts of link name */
|
|
|
|
|
|
|
|
if ((sscanf(addr_local, "%u.%u.%u%c",
|
|
|
|
&z_local, &c_local, &n_local, &dummy) != 3) ||
|
|
|
|
(sscanf(addr_peer, "%u.%u.%u%c",
|
|
|
|
&z_peer, &c_peer, &n_peer, &dummy) != 3) ||
|
|
|
|
(z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
|
|
|
|
(z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
|
2007-02-09 22:25:21 +08:00
|
|
|
(if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
|
|
|
|
(if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
|
2006-01-03 02:04:38 +08:00
|
|
|
(strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
|
|
|
|
(strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* return link name components, if necessary */
|
|
|
|
|
|
|
|
if (name_parts) {
|
|
|
|
name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
|
|
|
|
strcpy(name_parts->if_local, if_local);
|
|
|
|
name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
|
|
|
|
strcpy(name_parts->if_peer, if_peer);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_timeout - handle expiration of link timer
|
|
|
|
* @l_ptr: pointer to link
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-18 07:38:21 +08:00
|
|
|
* This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
|
|
|
|
* with tipc_link_delete(). (There is no risk that the node will be deleted by
|
|
|
|
* another thread because tipc_link_delete() always cancels the link timer before
|
|
|
|
* tipc_node_delete() is called.)
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void link_timeout(struct link *l_ptr)
|
|
|
|
{
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(l_ptr->owner);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* update counters used in statistical profiling of send traffic */
|
|
|
|
|
|
|
|
l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
|
|
|
|
l_ptr->stats.queue_sz_counts++;
|
|
|
|
|
|
|
|
if (l_ptr->first_out) {
|
|
|
|
struct tipc_msg *msg = buf_msg(l_ptr->first_out);
|
|
|
|
u32 length = msg_size(msg);
|
|
|
|
|
2009-11-30 08:55:45 +08:00
|
|
|
if ((msg_user(msg) == MSG_FRAGMENTER) &&
|
|
|
|
(msg_type(msg) == FIRST_FRAGMENT)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
length = msg_size(msg_get_wrapped(msg));
|
|
|
|
}
|
|
|
|
if (length) {
|
|
|
|
l_ptr->stats.msg_lengths_total += length;
|
|
|
|
l_ptr->stats.msg_length_counts++;
|
|
|
|
if (length <= 64)
|
|
|
|
l_ptr->stats.msg_length_profile[0]++;
|
|
|
|
else if (length <= 256)
|
|
|
|
l_ptr->stats.msg_length_profile[1]++;
|
|
|
|
else if (length <= 1024)
|
|
|
|
l_ptr->stats.msg_length_profile[2]++;
|
|
|
|
else if (length <= 4096)
|
|
|
|
l_ptr->stats.msg_length_profile[3]++;
|
|
|
|
else if (length <= 16384)
|
|
|
|
l_ptr->stats.msg_length_profile[4]++;
|
|
|
|
else if (length <= 32768)
|
|
|
|
l_ptr->stats.msg_length_profile[5]++;
|
|
|
|
else
|
|
|
|
l_ptr->stats.msg_length_profile[6]++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do all other link processing performed on a periodic basis */
|
|
|
|
|
|
|
|
link_check_defragm_bufs(l_ptr);
|
|
|
|
|
|
|
|
link_state_event(l_ptr, TIMEOUT_EVT);
|
|
|
|
|
|
|
|
if (l_ptr->next_out)
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_push_queue(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(l_ptr->owner);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void link_set_timer(struct link *l_ptr, u32 time)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
k_start_timer(&l_ptr->timer, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_create - create a new link
|
2011-03-01 00:32:27 +08:00
|
|
|
* @n_ptr: pointer to associated node
|
2006-01-03 02:04:38 +08:00
|
|
|
* @b_ptr: pointer to associated bearer
|
|
|
|
* @media_addr: media address to use when sending messages over link
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Returns pointer to link.
|
|
|
|
*/
|
|
|
|
|
2011-03-01 00:32:27 +08:00
|
|
|
struct link *tipc_link_create(struct tipc_node *n_ptr,
|
|
|
|
struct tipc_bearer *b_ptr,
|
2006-01-18 07:38:21 +08:00
|
|
|
const struct tipc_media_addr *media_addr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct link *l_ptr;
|
|
|
|
struct tipc_msg *msg;
|
|
|
|
char *if_name;
|
2011-03-01 00:32:27 +08:00
|
|
|
char addr_string[16];
|
|
|
|
u32 peer = n_ptr->addr;
|
|
|
|
|
|
|
|
if (n_ptr->link_cnt >= 2) {
|
|
|
|
tipc_addr_string_fill(addr_string, n_ptr->addr);
|
|
|
|
err("Attempt to establish third link to %s\n", addr_string);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_ptr->links[b_ptr->identity]) {
|
|
|
|
tipc_addr_string_fill(addr_string, n_ptr->addr);
|
|
|
|
err("Attempt to establish second link on <%s> to %s\n",
|
|
|
|
b_ptr->name, addr_string);
|
|
|
|
return NULL;
|
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-07-22 05:51:30 +08:00
|
|
|
l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr) {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link creation failed, no memory\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
l_ptr->addr = peer;
|
2011-01-08 02:00:11 +08:00
|
|
|
if_name = strchr(b_ptr->name, ':') + 1;
|
2011-04-07 21:28:47 +08:00
|
|
|
sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_node(tipc_own_addr),
|
2006-01-03 02:04:38 +08:00
|
|
|
if_name,
|
|
|
|
tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
|
2011-04-07 21:28:47 +08:00
|
|
|
/* note: peer i/f name is updated by reset/activate message */
|
2006-01-03 02:04:38 +08:00
|
|
|
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
|
2011-03-01 00:32:27 +08:00
|
|
|
l_ptr->owner = n_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->checkpoint = 1;
|
2011-04-07 21:43:27 +08:00
|
|
|
l_ptr->peer_session = INVALID_SESSION;
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->b_ptr = b_ptr;
|
|
|
|
link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
|
|
|
|
l_ptr->state = RESET_UNKNOWN;
|
|
|
|
|
|
|
|
l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
|
|
|
|
msg = l_ptr->pmsg;
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_size(msg, sizeof(l_ptr->proto_msg));
|
2008-06-05 08:29:39 +08:00
|
|
|
msg_set_session(msg, (tipc_random & 0xffff));
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_bearer_id(msg, b_ptr->identity);
|
|
|
|
strcpy((char *)msg_data(msg), if_name);
|
|
|
|
|
|
|
|
l_ptr->priority = b_ptr->priority;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
link_init_max_pkt(l_ptr);
|
|
|
|
|
|
|
|
l_ptr->next_out_no = 1;
|
|
|
|
INIT_LIST_HEAD(&l_ptr->waiting_ports);
|
|
|
|
|
|
|
|
link_reset_statistics(l_ptr);
|
|
|
|
|
2011-03-01 00:32:27 +08:00
|
|
|
tipc_node_attach_link(n_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2007-07-26 15:05:07 +08:00
|
|
|
k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
|
|
|
|
list_add_tail(&l_ptr->link_list, &b_ptr->links);
|
2010-10-13 21:20:35 +08:00
|
|
|
tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
return l_ptr;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_delete - delete a link
|
2006-01-03 02:04:38 +08:00
|
|
|
* @l_ptr: pointer to link
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-18 07:38:21 +08:00
|
|
|
* Note: 'tipc_net_lock' is write_locked, bearer is locked.
|
2006-01-03 02:04:38 +08:00
|
|
|
* This routine must not grab the node lock until after link timer cancellation
|
2007-02-09 22:25:21 +08:00
|
|
|
* to avoid a potential deadlock situation.
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_delete(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
if (!l_ptr) {
|
|
|
|
err("Attempt to delete non-existent link\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_cancel_timer(&l_ptr->timer);
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(l_ptr->owner);
|
|
|
|
tipc_link_reset(l_ptr);
|
|
|
|
tipc_node_detach_link(l_ptr->owner, l_ptr);
|
|
|
|
tipc_link_stop(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
list_del_init(&l_ptr->link_list);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(l_ptr->owner);
|
2006-01-03 02:04:38 +08:00
|
|
|
k_term_timer(&l_ptr->timer);
|
|
|
|
kfree(l_ptr);
|
|
|
|
}
|
|
|
|
|
2010-10-13 21:20:35 +08:00
|
|
|
static void link_start(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2011-01-25 05:22:43 +08:00
|
|
|
tipc_node_lock(l_ptr->owner);
|
2006-01-03 02:04:38 +08:00
|
|
|
link_state_event(l_ptr, STARTING_EVT);
|
2011-01-25 05:22:43 +08:00
|
|
|
tipc_node_unlock(l_ptr->owner);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-02-09 22:25:21 +08:00
|
|
|
* link_schedule_port - schedule port for deferred sending
|
2006-01-03 02:04:38 +08:00
|
|
|
* @l_ptr: pointer to link
|
|
|
|
* @origport: reference to sending port
|
|
|
|
* @sz: amount of data to be sent
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
|
|
|
* Schedules port for renewed sending of messages after link congestion
|
2006-01-03 02:04:38 +08:00
|
|
|
* has abated.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
|
|
|
|
{
|
2011-01-08 00:43:40 +08:00
|
|
|
struct tipc_port *p_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
spin_lock_bh(&tipc_port_list_lock);
|
|
|
|
p_ptr = tipc_port_lock(origport);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (p_ptr) {
|
|
|
|
if (!p_ptr->wakeup)
|
|
|
|
goto exit;
|
|
|
|
if (!list_empty(&p_ptr->wait_list))
|
|
|
|
goto exit;
|
2011-01-08 00:43:40 +08:00
|
|
|
p_ptr->congested = 1;
|
2010-05-11 22:30:10 +08:00
|
|
|
p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
|
2006-01-03 02:04:38 +08:00
|
|
|
list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
|
|
|
|
l_ptr->stats.link_congs++;
|
|
|
|
exit:
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_port_unlock(p_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
spin_unlock_bh(&tipc_port_list_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
return -ELINKCONG;
|
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_wakeup_ports(struct link *l_ptr, int all)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2011-01-08 00:43:40 +08:00
|
|
|
struct tipc_port *p_ptr;
|
|
|
|
struct tipc_port *temp_p_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
|
|
|
|
|
|
|
|
if (all)
|
|
|
|
win = 100000;
|
|
|
|
if (win <= 0)
|
|
|
|
return;
|
2006-01-18 07:38:21 +08:00
|
|
|
if (!spin_trylock_bh(&tipc_port_list_lock))
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
|
|
|
if (link_congested(l_ptr))
|
|
|
|
goto exit;
|
2007-02-09 22:25:21 +08:00
|
|
|
list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
|
2006-01-03 02:04:38 +08:00
|
|
|
wait_list) {
|
|
|
|
if (win <= 0)
|
|
|
|
break;
|
|
|
|
list_del_init(&p_ptr->wait_list);
|
2011-01-08 00:43:40 +08:00
|
|
|
spin_lock_bh(p_ptr->lock);
|
|
|
|
p_ptr->congested = 0;
|
|
|
|
p_ptr->wakeup(p_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
win -= p_ptr->waiting_pkts;
|
2011-01-08 00:43:40 +08:00
|
|
|
spin_unlock_bh(p_ptr->lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
2006-01-18 07:38:21 +08:00
|
|
|
spin_unlock_bh(&tipc_port_list_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/**
|
2006-01-03 02:04:38 +08:00
|
|
|
* link_release_outqueue - purge link's outbound message queue
|
|
|
|
* @l_ptr: pointer to link
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void link_release_outqueue(struct link *l_ptr)
|
|
|
|
{
|
|
|
|
struct sk_buff *buf = l_ptr->first_out;
|
|
|
|
struct sk_buff *next;
|
|
|
|
|
|
|
|
while (buf) {
|
|
|
|
next = buf->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
buf = next;
|
|
|
|
}
|
|
|
|
l_ptr->first_out = NULL;
|
|
|
|
l_ptr->out_queue_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_reset_fragments - purge link's inbound message fragments queue
|
2006-01-03 02:04:38 +08:00
|
|
|
* @l_ptr: pointer to link
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_reset_fragments(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *buf = l_ptr->defragm_buf;
|
|
|
|
struct sk_buff *next;
|
|
|
|
|
|
|
|
while (buf) {
|
|
|
|
next = buf->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
buf = next;
|
|
|
|
}
|
|
|
|
l_ptr->defragm_buf = NULL;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_stop - purge all inbound and outbound messages associated with link
|
2006-01-03 02:04:38 +08:00
|
|
|
* @l_ptr: pointer to link
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_stop(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *buf;
|
|
|
|
struct sk_buff *next;
|
|
|
|
|
|
|
|
buf = l_ptr->oldest_deferred_in;
|
|
|
|
while (buf) {
|
|
|
|
next = buf->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
buf = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = l_ptr->first_out;
|
|
|
|
while (buf) {
|
|
|
|
next = buf->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
buf = next;
|
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset_fragments(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
buf_discard(l_ptr->proto_msg_queue);
|
|
|
|
l_ptr->proto_msg_queue = NULL;
|
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_reset(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *buf;
|
|
|
|
u32 prev_state = l_ptr->state;
|
|
|
|
u32 checkpoint = l_ptr->next_in_no;
|
2006-06-26 14:52:50 +08:00
|
|
|
int was_active_link = tipc_link_is_active(l_ptr);
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2008-06-05 08:29:39 +08:00
|
|
|
msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2008-06-05 08:29:39 +08:00
|
|
|
/* Link is down, accept any session */
|
|
|
|
l_ptr->peer_session = INVALID_SESSION;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/* Prepare for max packet size negotiation */
|
2006-01-03 02:04:38 +08:00
|
|
|
link_init_max_pkt(l_ptr);
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->state = RESET_UNKNOWN;
|
|
|
|
|
|
|
|
if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
|
|
|
|
return;
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_link_down(l_ptr->owner, l_ptr);
|
|
|
|
tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
|
2010-10-12 22:25:58 +08:00
|
|
|
|
2011-02-28 23:36:21 +08:00
|
|
|
if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->owner->permit_changeover) {
|
|
|
|
l_ptr->reset_checkpoint = checkpoint;
|
|
|
|
l_ptr->exp_msg_count = START_CHANGEOVER;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up all queues: */
|
|
|
|
|
|
|
|
link_release_outqueue(l_ptr);
|
|
|
|
buf_discard(l_ptr->proto_msg_queue);
|
|
|
|
l_ptr->proto_msg_queue = NULL;
|
|
|
|
buf = l_ptr->oldest_deferred_in;
|
|
|
|
while (buf) {
|
|
|
|
struct sk_buff *next = buf->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
buf = next;
|
|
|
|
}
|
|
|
|
if (!list_empty(&l_ptr->waiting_ports))
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_wakeup_ports(l_ptr, 1);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
l_ptr->retransm_queue_head = 0;
|
|
|
|
l_ptr->retransm_queue_size = 0;
|
|
|
|
l_ptr->last_out = NULL;
|
|
|
|
l_ptr->first_out = NULL;
|
|
|
|
l_ptr->next_out = NULL;
|
|
|
|
l_ptr->unacked_window = 0;
|
|
|
|
l_ptr->checkpoint = 1;
|
|
|
|
l_ptr->next_out_no = 1;
|
|
|
|
l_ptr->deferred_inqueue_sz = 0;
|
|
|
|
l_ptr->oldest_deferred_in = NULL;
|
|
|
|
l_ptr->newest_deferred_in = NULL;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
|
|
|
l_ptr->stale_count = 0;
|
|
|
|
link_reset_statistics(l_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void link_activate(struct link *l_ptr)
|
|
|
|
{
|
2006-06-26 14:52:50 +08:00
|
|
|
l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_link_up(l_ptr->owner, l_ptr);
|
|
|
|
tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_state_event - link finite state machine
|
|
|
|
* @l_ptr: pointer to link
|
|
|
|
* @event: state machine event to process
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void link_state_event(struct link *l_ptr, unsigned event)
|
|
|
|
{
|
2007-02-09 22:25:21 +08:00
|
|
|
struct link *other;
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 cont_intv = l_ptr->continuity_interval;
|
|
|
|
|
|
|
|
if (!l_ptr->started && (event != STARTING_EVT))
|
|
|
|
return; /* Not yet. */
|
|
|
|
|
|
|
|
if (link_blocked(l_ptr)) {
|
2011-01-01 02:59:35 +08:00
|
|
|
if (event == TIMEOUT_EVT)
|
2006-01-03 02:04:38 +08:00
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
return; /* Changeover going on */
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (l_ptr->state) {
|
|
|
|
case WORKING_WORKING:
|
|
|
|
switch (event) {
|
|
|
|
case TRAFFIC_MSG_EVT:
|
|
|
|
case ACTIVATE_MSG:
|
|
|
|
break;
|
|
|
|
case TIMEOUT_EVT:
|
|
|
|
if (l_ptr->next_in_no != l_ptr->checkpoint) {
|
|
|
|
l_ptr->checkpoint = l_ptr->next_in_no;
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bclink_acks_missing(l_ptr->owner)) {
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
2006-01-18 07:38:21 +08:00
|
|
|
0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
2006-01-18 07:38:21 +08:00
|
|
|
1, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
}
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
l_ptr->state = WORKING_UNKNOWN;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv / 4);
|
|
|
|
break;
|
|
|
|
case RESET_MSG:
|
2007-02-09 22:25:21 +08:00
|
|
|
info("Resetting link <%s>, requested by peer\n",
|
2006-06-26 14:52:17 +08:00
|
|
|
l_ptr->name);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->state = RESET_RESET;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err("Unknown link event %u in WW state\n", event);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case WORKING_UNKNOWN:
|
|
|
|
switch (event) {
|
|
|
|
case TRAFFIC_MSG_EVT:
|
|
|
|
case ACTIVATE_MSG:
|
|
|
|
l_ptr->state = WORKING_WORKING;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
case RESET_MSG:
|
2006-06-26 14:52:17 +08:00
|
|
|
info("Resetting link <%s>, requested by peer "
|
|
|
|
"while probing\n", l_ptr->name);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->state = RESET_RESET;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
case TIMEOUT_EVT:
|
|
|
|
if (l_ptr->next_in_no != l_ptr->checkpoint) {
|
|
|
|
l_ptr->state = WORKING_WORKING;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
|
|
|
l_ptr->checkpoint = l_ptr->next_in_no;
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bclink_acks_missing(l_ptr->owner)) {
|
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
|
|
|
0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
}
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
2006-01-18 07:38:21 +08:00
|
|
|
1, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv / 4);
|
|
|
|
} else { /* Link has failed */
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Resetting link <%s>, peer not responding\n",
|
|
|
|
l_ptr->name);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->state = RESET_UNKNOWN;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, RESET_MSG,
|
|
|
|
0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err("Unknown link event %u in WU state\n", event);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case RESET_UNKNOWN:
|
|
|
|
switch (event) {
|
|
|
|
case TRAFFIC_MSG_EVT:
|
|
|
|
break;
|
|
|
|
case ACTIVATE_MSG:
|
|
|
|
other = l_ptr->owner->active_links[0];
|
2011-01-01 02:59:27 +08:00
|
|
|
if (other && link_working_unknown(other))
|
2006-01-03 02:04:38 +08:00
|
|
|
break;
|
|
|
|
l_ptr->state = WORKING_WORKING;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
|
|
|
link_activate(l_ptr);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
case RESET_MSG:
|
|
|
|
l_ptr->state = RESET_RESET;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
case STARTING_EVT:
|
|
|
|
l_ptr->started = 1;
|
|
|
|
/* fall through */
|
|
|
|
case TIMEOUT_EVT:
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err("Unknown link event %u in RU state\n", event);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case RESET_RESET:
|
|
|
|
switch (event) {
|
|
|
|
case TRAFFIC_MSG_EVT:
|
|
|
|
case ACTIVATE_MSG:
|
|
|
|
other = l_ptr->owner->active_links[0];
|
2011-01-01 02:59:27 +08:00
|
|
|
if (other && link_working_unknown(other))
|
2006-01-03 02:04:38 +08:00
|
|
|
break;
|
|
|
|
l_ptr->state = WORKING_WORKING;
|
|
|
|
l_ptr->fsm_msg_cnt = 0;
|
|
|
|
link_activate(l_ptr);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
case RESET_MSG:
|
|
|
|
break;
|
|
|
|
case TIMEOUT_EVT:
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->fsm_msg_cnt++;
|
|
|
|
link_set_timer(l_ptr, cont_intv);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err("Unknown link event %u in RR state\n", event);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err("Unknown link state %u/%u\n", l_ptr->state, event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* link_bundle_buf(): Append contents of a buffer to
|
2007-02-09 22:25:21 +08:00
|
|
|
* the tail of an existing one.
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static int link_bundle_buf(struct link *l_ptr,
|
2007-02-09 22:25:21 +08:00
|
|
|
struct sk_buff *bundler,
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *buf)
|
|
|
|
{
|
|
|
|
struct tipc_msg *bundler_msg = buf_msg(bundler);
|
|
|
|
struct tipc_msg *msg = buf_msg(buf);
|
|
|
|
u32 size = msg_size(msg);
|
2006-06-30 03:32:46 +08:00
|
|
|
u32 bundle_size = msg_size(bundler_msg);
|
|
|
|
u32 to_pos = align(bundle_size);
|
|
|
|
u32 pad = to_pos - bundle_size;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (msg_user(bundler_msg) != MSG_BUNDLER)
|
|
|
|
return 0;
|
|
|
|
if (msg_type(bundler_msg) != OPEN_MSG)
|
|
|
|
return 0;
|
2006-06-30 03:32:46 +08:00
|
|
|
if (skb_tailroom(bundler) < (pad + size))
|
2006-01-03 02:04:38 +08:00
|
|
|
return 0;
|
2010-05-11 22:30:10 +08:00
|
|
|
if (l_ptr->max_pkt < (to_pos + size))
|
2006-07-04 10:39:36 +08:00
|
|
|
return 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-06-30 03:32:46 +08:00
|
|
|
skb_put(bundler, pad + size);
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_size(bundler_msg, to_pos + size);
|
|
|
|
msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
|
|
|
|
buf_discard(buf);
|
|
|
|
l_ptr->stats.sent_bundled++;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void link_add_to_outqueue(struct link *l_ptr,
|
|
|
|
struct sk_buff *buf,
|
|
|
|
struct tipc_msg *msg)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
u32 ack = mod(l_ptr->next_in_no - 1);
|
|
|
|
u32 seqno = mod(l_ptr->next_out_no++);
|
|
|
|
|
|
|
|
msg_set_word(msg, 2, ((ack << 16) | seqno));
|
|
|
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
|
|
|
buf->next = NULL;
|
|
|
|
if (l_ptr->first_out) {
|
|
|
|
l_ptr->last_out->next = buf;
|
|
|
|
l_ptr->last_out = buf;
|
|
|
|
} else
|
|
|
|
l_ptr->first_out = l_ptr->last_out = buf;
|
2011-01-19 04:02:50 +08:00
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->out_queue_size++;
|
2011-01-19 04:02:50 +08:00
|
|
|
if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
|
|
|
|
l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2011-04-21 23:50:42 +08:00
|
|
|
static void link_add_chain_to_outqueue(struct link *l_ptr,
|
|
|
|
struct sk_buff *buf_chain,
|
|
|
|
u32 long_msgno)
|
|
|
|
{
|
|
|
|
struct sk_buff *buf;
|
|
|
|
struct tipc_msg *msg;
|
|
|
|
|
|
|
|
if (!l_ptr->next_out)
|
|
|
|
l_ptr->next_out = buf_chain;
|
|
|
|
while (buf_chain) {
|
|
|
|
buf = buf_chain;
|
|
|
|
buf_chain = buf_chain->next;
|
|
|
|
|
|
|
|
msg = buf_msg(buf);
|
|
|
|
msg_set_long_msgno(msg, long_msgno);
|
|
|
|
link_add_to_outqueue(l_ptr, buf, msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* tipc_link_send_buf() is the 'full path' for messages, called from
|
2006-01-03 02:04:38 +08:00
|
|
|
* inside TIPC when the 'fast path' in tipc_send_buf
|
|
|
|
* has failed, and from link_send()
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct tipc_msg *msg = buf_msg(buf);
|
|
|
|
u32 size = msg_size(msg);
|
|
|
|
u32 dsz = msg_data_sz(msg);
|
|
|
|
u32 queue_size = l_ptr->out_queue_size;
|
2010-05-11 22:30:12 +08:00
|
|
|
u32 imp = tipc_msg_tot_importance(msg);
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 queue_limit = l_ptr->queue_limit[imp];
|
2010-05-11 22:30:10 +08:00
|
|
|
u32 max_packet = l_ptr->max_pkt;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
|
|
|
|
|
|
|
|
/* Match msg importance against queue limits: */
|
|
|
|
|
|
|
|
if (unlikely(queue_size >= queue_limit)) {
|
|
|
|
if (imp <= TIPC_CRITICAL_IMPORTANCE) {
|
2011-04-19 22:17:58 +08:00
|
|
|
link_schedule_port(l_ptr, msg_origport(msg), size);
|
|
|
|
buf_discard(buf);
|
|
|
|
return -ELINKCONG;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
buf_discard(buf);
|
|
|
|
if (imp > CONN_MANAGER) {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Resetting link <%s>, send queue full", l_ptr->name);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
return dsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fragmentation needed ? */
|
|
|
|
|
|
|
|
if (size > max_packet)
|
2010-10-13 21:20:35 +08:00
|
|
|
return link_send_long_buf(l_ptr, buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Packet can be queued or sent: */
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
!link_congested(l_ptr))) {
|
|
|
|
link_add_to_outqueue(l_ptr, buf, msg);
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->unacked_window = 0;
|
|
|
|
} else {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
l_ptr->next_out = buf;
|
|
|
|
}
|
|
|
|
return dsz;
|
|
|
|
}
|
|
|
|
/* Congestion: can message be bundled ?: */
|
|
|
|
|
|
|
|
if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
|
|
|
|
(msg_user(msg) != MSG_FRAGMENTER)) {
|
|
|
|
|
|
|
|
/* Try adding message to an existing bundle */
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
if (l_ptr->next_out &&
|
2006-01-03 02:04:38 +08:00
|
|
|
link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
return dsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try creating a new bundle */
|
|
|
|
|
|
|
|
if (size <= max_packet * 2 / 3) {
|
2010-10-13 21:20:35 +08:00
|
|
|
struct sk_buff *bundler = tipc_buf_acquire(max_packet);
|
2006-01-03 02:04:38 +08:00
|
|
|
struct tipc_msg bundler_hdr;
|
|
|
|
|
|
|
|
if (bundler) {
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
|
2008-06-05 08:37:34 +08:00
|
|
|
INT_H_SIZE, l_ptr->addr);
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(bundler, &bundler_hdr,
|
|
|
|
INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
skb_trim(bundler, INT_H_SIZE);
|
|
|
|
link_bundle_buf(l_ptr, bundler, buf);
|
|
|
|
buf = bundler;
|
|
|
|
msg = buf_msg(buf);
|
|
|
|
l_ptr->stats.sent_bundles++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!l_ptr->next_out)
|
|
|
|
l_ptr->next_out = buf;
|
|
|
|
link_add_to_outqueue(l_ptr, buf, msg);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
return dsz;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
|
2006-01-03 02:04:38 +08:00
|
|
|
* not been selected yet, and the the owner node is not locked
|
|
|
|
* Called by TIPC internal users, e.g. the name distributor
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct link *l_ptr;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *n_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
int res = -ELINKCONG;
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2011-01-01 02:59:18 +08:00
|
|
|
n_ptr = tipc_node_find(dest);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (n_ptr) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr = n_ptr->active_links[selector & 1];
|
2011-01-01 02:59:35 +08:00
|
|
|
if (l_ptr)
|
2006-01-18 07:38:21 +08:00
|
|
|
res = tipc_link_send_buf(l_ptr, buf);
|
2011-01-01 02:59:35 +08:00
|
|
|
else
|
2006-06-26 14:50:30 +08:00
|
|
|
buf_discard(buf);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2011-06-01 01:38:02 +08:00
|
|
|
/*
|
|
|
|
* tipc_link_send_names - send name table entries to new neighbor
|
|
|
|
*
|
|
|
|
* Send routine for bulk delivery of name table messages when contact
|
|
|
|
* with a new neighbor occurs. No link congestion checking is performed
|
|
|
|
* because name table messages *must* be delivered. The messages must be
|
|
|
|
* small enough not to require fragmentation.
|
|
|
|
* Called without any locks held.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void tipc_link_send_names(struct list_head *message_list, u32 dest)
|
|
|
|
{
|
|
|
|
struct tipc_node *n_ptr;
|
|
|
|
struct link *l_ptr;
|
|
|
|
struct sk_buff *buf;
|
|
|
|
struct sk_buff *temp_buf;
|
|
|
|
|
|
|
|
if (list_empty(message_list))
|
|
|
|
return;
|
|
|
|
|
|
|
|
read_lock_bh(&tipc_net_lock);
|
|
|
|
n_ptr = tipc_node_find(dest);
|
|
|
|
if (n_ptr) {
|
|
|
|
tipc_node_lock(n_ptr);
|
|
|
|
l_ptr = n_ptr->active_links[0];
|
|
|
|
if (l_ptr) {
|
|
|
|
/* convert circular list to linear list */
|
|
|
|
((struct sk_buff *)message_list->prev)->next = NULL;
|
|
|
|
link_add_chain_to_outqueue(l_ptr,
|
|
|
|
(struct sk_buff *)message_list->next, 0);
|
|
|
|
tipc_link_push_queue(l_ptr);
|
|
|
|
INIT_LIST_HEAD(message_list);
|
|
|
|
}
|
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
}
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
|
|
|
|
|
/* discard the messages if they couldn't be sent */
|
|
|
|
|
|
|
|
list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
|
|
|
|
list_del((struct list_head *)buf);
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* link_send_buf_fast: Entry for data messages where the
|
2006-01-03 02:04:38 +08:00
|
|
|
* destination link is known and the header is complete,
|
|
|
|
* inclusive total message length. Very time critical.
|
|
|
|
* Link is locked. Returns user data length.
|
|
|
|
*/
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
|
|
|
|
u32 *used_max_pkt)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct tipc_msg *msg = buf_msg(buf);
|
|
|
|
int res = msg_data_sz(msg);
|
|
|
|
|
|
|
|
if (likely(!link_congested(l_ptr))) {
|
2010-05-11 22:30:10 +08:00
|
|
|
if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
|
|
|
|
link_add_to_outqueue(l_ptr, buf, msg);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
|
|
|
|
&l_ptr->media_addr))) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->unacked_window = 0;
|
|
|
|
return res;
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
l_ptr->next_out = buf;
|
|
|
|
return res;
|
|
|
|
}
|
2011-01-01 02:59:32 +08:00
|
|
|
} else
|
2010-05-11 22:30:10 +08:00
|
|
|
*used_max_pkt = l_ptr->max_pkt;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_link_send_buf(l_ptr, buf); /* All other cases */
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* tipc_send_buf_fast: Entry for data messages where the
|
2006-01-03 02:04:38 +08:00
|
|
|
* destination node is known and the header is complete,
|
|
|
|
* inclusive total message length.
|
|
|
|
* Returns user data length.
|
|
|
|
*/
|
|
|
|
int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
|
|
|
|
{
|
|
|
|
struct link *l_ptr;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *n_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
int res;
|
|
|
|
u32 selector = msg_origport(buf_msg(buf)) & 1;
|
|
|
|
u32 dummy;
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2011-01-01 02:59:18 +08:00
|
|
|
n_ptr = tipc_node_find(destnode);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (likely(n_ptr)) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr = n_ptr->active_links[selector];
|
|
|
|
if (likely(l_ptr)) {
|
|
|
|
res = link_send_buf_fast(l_ptr, buf, &dummy);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
return res;
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
res = msg_data_sz(buf_msg(buf));
|
|
|
|
tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* tipc_link_send_sections_fast: Entry for messages where the
|
2006-01-03 02:04:38 +08:00
|
|
|
* destination processor is known and the header is complete,
|
2007-02-09 22:25:21 +08:00
|
|
|
* except for total message length.
|
2006-01-03 02:04:38 +08:00
|
|
|
* Returns user data length or errno.
|
|
|
|
*/
|
2011-01-08 00:43:40 +08:00
|
|
|
int tipc_link_send_sections_fast(struct tipc_port *sender,
|
2006-01-18 07:38:21 +08:00
|
|
|
struct iovec const *msg_sect,
|
2007-02-09 22:25:21 +08:00
|
|
|
const u32 num_sect,
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
unsigned int total_len,
|
2006-01-18 07:38:21 +08:00
|
|
|
u32 destaddr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2011-01-08 00:43:40 +08:00
|
|
|
struct tipc_msg *hdr = &sender->phdr;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct link *l_ptr;
|
|
|
|
struct sk_buff *buf;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *node;
|
2006-01-03 02:04:38 +08:00
|
|
|
int res;
|
|
|
|
u32 selector = msg_origport(hdr) & 1;
|
|
|
|
|
|
|
|
again:
|
|
|
|
/*
|
|
|
|
* Try building message using port's max_pkt hint.
|
|
|
|
* (Must not hold any locks while building message.)
|
|
|
|
*/
|
|
|
|
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
|
|
|
|
sender->max_pkt, !sender->user_port, &buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2011-01-01 02:59:18 +08:00
|
|
|
node = tipc_node_find(destaddr);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (likely(node)) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr = node->active_links[selector];
|
|
|
|
if (likely(l_ptr)) {
|
|
|
|
if (likely(buf)) {
|
|
|
|
res = link_send_buf_fast(l_ptr, buf,
|
2011-01-08 00:43:40 +08:00
|
|
|
&sender->max_pkt);
|
2006-01-03 02:04:38 +08:00
|
|
|
exit:
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Exit if build request was invalid */
|
|
|
|
|
|
|
|
if (unlikely(res < 0))
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* Exit if link (or bearer) is congested */
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
if (link_congested(l_ptr) ||
|
2006-01-03 02:04:38 +08:00
|
|
|
!list_empty(&l_ptr->b_ptr->cong_links)) {
|
|
|
|
res = link_schedule_port(l_ptr,
|
2011-01-08 00:43:40 +08:00
|
|
|
sender->ref, res);
|
2006-01-03 02:04:38 +08:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Message size exceeds max_pkt hint; update hint,
|
|
|
|
* then re-try fast path or fragment the message
|
|
|
|
*/
|
|
|
|
|
2011-01-08 00:43:40 +08:00
|
|
|
sender->max_pkt = l_ptr->max_pkt;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
|
2011-01-08 00:43:40 +08:00
|
|
|
if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
|
2006-01-03 02:04:38 +08:00
|
|
|
goto again;
|
|
|
|
|
|
|
|
return link_send_sections_long(sender, msg_sect,
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
num_sect, total_len,
|
|
|
|
destaddr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Couldn't find a link to the destination node */
|
|
|
|
|
|
|
|
if (buf)
|
|
|
|
return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
|
|
|
|
if (res >= 0)
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
total_len, TIPC_ERR_NO_NODE);
|
2006-01-03 02:04:38 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* link_send_sections_long(): Entry for long messages where the
|
2006-01-03 02:04:38 +08:00
|
|
|
* destination node is known and the header is complete,
|
2007-02-09 22:25:21 +08:00
|
|
|
* inclusive total message length.
|
2006-01-03 02:04:38 +08:00
|
|
|
* Link and bearer congestion status have been checked to be ok,
|
|
|
|
* and are ignored if they change.
|
|
|
|
*
|
|
|
|
* Note that fragments do not use the full link MTU so that they won't have
|
|
|
|
* to undergo refragmentation if link changeover causes them to be sent
|
|
|
|
* over another link with an additional tunnel header added as prefix.
|
|
|
|
* (Refragmentation will still occur if the other link has a smaller MTU.)
|
|
|
|
*
|
|
|
|
* Returns user data length or errno.
|
|
|
|
*/
|
2011-01-08 00:43:40 +08:00
|
|
|
static int link_send_sections_long(struct tipc_port *sender,
|
2006-01-03 02:04:38 +08:00
|
|
|
struct iovec const *msg_sect,
|
|
|
|
u32 num_sect,
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
unsigned int total_len,
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 destaddr)
|
|
|
|
{
|
|
|
|
struct link *l_ptr;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *node;
|
2011-01-08 00:43:40 +08:00
|
|
|
struct tipc_msg *hdr = &sender->phdr;
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
u32 dsz = total_len;
|
2011-01-01 02:59:32 +08:00
|
|
|
u32 max_pkt, fragm_sz, rest;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct tipc_msg fragm_hdr;
|
2011-01-01 02:59:32 +08:00
|
|
|
struct sk_buff *buf, *buf_chain, *prev;
|
|
|
|
u32 fragm_crs, fragm_rest, hsz, sect_rest;
|
2006-01-03 02:04:38 +08:00
|
|
|
const unchar *sect_crs;
|
|
|
|
int curr_sect;
|
|
|
|
u32 fragm_no;
|
|
|
|
|
|
|
|
again:
|
|
|
|
fragm_no = 1;
|
2011-01-08 00:43:40 +08:00
|
|
|
max_pkt = sender->max_pkt - INT_H_SIZE;
|
2006-01-03 02:04:38 +08:00
|
|
|
/* leave room for tunnel header in case of link changeover */
|
2007-02-09 22:25:21 +08:00
|
|
|
fragm_sz = max_pkt - INT_H_SIZE;
|
2006-01-03 02:04:38 +08:00
|
|
|
/* leave room for fragmentation header in each fragment */
|
|
|
|
rest = dsz;
|
|
|
|
fragm_crs = 0;
|
|
|
|
fragm_rest = 0;
|
|
|
|
sect_rest = 0;
|
2006-03-21 14:36:47 +08:00
|
|
|
sect_crs = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
curr_sect = -1;
|
|
|
|
|
|
|
|
/* Prepare reusable fragment header: */
|
|
|
|
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
|
2008-06-05 08:37:34 +08:00
|
|
|
INT_H_SIZE, msg_destnode(hdr));
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_size(&fragm_hdr, max_pkt);
|
|
|
|
msg_set_fragm_no(&fragm_hdr, 1);
|
|
|
|
|
|
|
|
/* Prepare header of first fragment: */
|
|
|
|
|
2010-10-13 21:20:35 +08:00
|
|
|
buf_chain = buf = tipc_buf_acquire(max_pkt);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
buf->next = NULL;
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
hsz = msg_hdr_sz(hdr);
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Chop up message: */
|
|
|
|
|
|
|
|
fragm_crs = INT_H_SIZE + hsz;
|
|
|
|
fragm_rest = fragm_sz - hsz;
|
|
|
|
|
|
|
|
do { /* For all sections */
|
|
|
|
u32 sz;
|
|
|
|
|
|
|
|
if (!sect_rest) {
|
|
|
|
sect_rest = msg_sect[++curr_sect].iov_len;
|
|
|
|
sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sect_rest < fragm_rest)
|
|
|
|
sz = sect_rest;
|
|
|
|
else
|
|
|
|
sz = fragm_rest;
|
|
|
|
|
|
|
|
if (likely(!sender->user_port)) {
|
|
|
|
if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
|
|
|
|
error:
|
|
|
|
for (; buf_chain; buf_chain = buf) {
|
|
|
|
buf = buf_chain->next;
|
|
|
|
buf_discard(buf_chain);
|
|
|
|
}
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
} else
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data_offset(buf, fragm_crs,
|
|
|
|
sect_crs, sz);
|
2006-01-03 02:04:38 +08:00
|
|
|
sect_crs += sz;
|
|
|
|
sect_rest -= sz;
|
|
|
|
fragm_crs += sz;
|
|
|
|
fragm_rest -= sz;
|
|
|
|
rest -= sz;
|
|
|
|
|
|
|
|
if (!fragm_rest && rest) {
|
|
|
|
|
|
|
|
/* Initiate new fragment: */
|
|
|
|
if (rest <= fragm_sz) {
|
|
|
|
fragm_sz = rest;
|
2011-01-01 02:59:32 +08:00
|
|
|
msg_set_type(&fragm_hdr, LAST_FRAGMENT);
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
|
|
|
msg_set_type(&fragm_hdr, FRAGMENT);
|
|
|
|
}
|
|
|
|
msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
|
|
|
|
msg_set_fragm_no(&fragm_hdr, ++fragm_no);
|
|
|
|
prev = buf;
|
2010-10-13 21:20:35 +08:00
|
|
|
buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!buf)
|
|
|
|
goto error;
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
buf->next = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
prev->next = buf;
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
fragm_crs = INT_H_SIZE;
|
|
|
|
fragm_rest = fragm_sz;
|
|
|
|
}
|
2011-01-01 02:59:32 +08:00
|
|
|
} while (rest > 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Now we have a buffer chain. Select a link and check
|
|
|
|
* that packet size is still OK
|
|
|
|
*/
|
2011-01-01 02:59:18 +08:00
|
|
|
node = tipc_node_find(destaddr);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (likely(node)) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(node);
|
2011-01-08 00:43:40 +08:00
|
|
|
l_ptr = node->active_links[sender->ref & 1];
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
goto reject;
|
|
|
|
}
|
2010-05-11 22:30:10 +08:00
|
|
|
if (l_ptr->max_pkt < max_pkt) {
|
2011-01-08 00:43:40 +08:00
|
|
|
sender->max_pkt = l_ptr->max_pkt;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
for (; buf_chain; buf_chain = buf) {
|
|
|
|
buf = buf_chain->next;
|
|
|
|
buf_discard(buf_chain);
|
|
|
|
}
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
reject:
|
|
|
|
for (; buf_chain; buf_chain = buf) {
|
|
|
|
buf = buf_chain->next;
|
|
|
|
buf_discard(buf_chain);
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
|
tipc: Avoid recomputation of outgoing message length
Rework TIPC's message sending routines to take advantage of the total
amount of data value passed to it by the kernel socket infrastructure.
This change eliminates the need for TIPC to compute the size of outgoing
messages itself, as well as the check for an oversize message in
tipc_msg_build(). In addition, this change warrants an explanation:
- res = send_packet(NULL, sock, &my_msg, 0);
+ res = send_packet(NULL, sock, &my_msg, bytes_to_send);
Previously, the final argument to send_packet() was ignored (since the
amount of data being sent was recalculated by a lower-level routine)
and we could just pass in a dummy value (0). Now that the
recalculation is being eliminated, the argument value being passed to
send_packet() is significant and we have to supply the actual amount
of data we want to send.
Signed-off-by: Allan Stephens <Allan.Stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-04-21 23:42:07 +08:00
|
|
|
total_len, TIPC_ERR_NO_NODE);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2011-04-21 23:50:42 +08:00
|
|
|
/* Append chain of fragments to send queue & send them */
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2011-04-17 23:44:24 +08:00
|
|
|
l_ptr->long_msg_seq_no++;
|
2011-04-21 23:50:42 +08:00
|
|
|
link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
|
|
|
|
l_ptr->stats.sent_fragments += fragm_no;
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.sent_fragmented++;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_push_queue(l_ptr);
|
|
|
|
tipc_node_unlock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
return dsz;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_push_packet: Push one unsent packet to the media
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
2006-01-18 07:38:21 +08:00
|
|
|
u32 tipc_link_push_packet(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *buf = l_ptr->first_out;
|
|
|
|
u32 r_q_size = l_ptr->retransm_queue_size;
|
|
|
|
u32 r_q_head = l_ptr->retransm_queue_head;
|
|
|
|
|
|
|
|
/* Step to position where retransmission failed, if any, */
|
|
|
|
/* consider that buffers may have been released in meantime */
|
|
|
|
|
|
|
|
if (r_q_size && buf) {
|
2007-02-09 22:25:21 +08:00
|
|
|
u32 last = lesser(mod(r_q_head + r_q_size),
|
2006-01-03 02:04:38 +08:00
|
|
|
link_last_sent(l_ptr));
|
|
|
|
u32 first = msg_seqno(buf_msg(buf));
|
|
|
|
|
|
|
|
while (buf && less(first, r_q_head)) {
|
|
|
|
first = mod(first + 1);
|
|
|
|
buf = buf->next;
|
|
|
|
}
|
|
|
|
l_ptr->retransm_queue_head = r_q_head = first;
|
|
|
|
l_ptr->retransm_queue_size = r_q_size = mod(last - first);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Continue retransmission now, if there is anything: */
|
|
|
|
|
2010-03-15 15:58:45 +08:00
|
|
|
if (r_q_size && buf) {
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
|
2007-02-09 22:25:21 +08:00
|
|
|
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->retransm_queue_head = mod(++r_q_head);
|
|
|
|
l_ptr->retransm_queue_size = --r_q_size;
|
|
|
|
l_ptr->stats.retransmitted++;
|
2008-07-15 13:44:01 +08:00
|
|
|
return 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
return PUSH_FAILED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send deferred protocol message, if any: */
|
|
|
|
|
|
|
|
buf = l_ptr->proto_msg_queue;
|
|
|
|
if (buf) {
|
|
|
|
msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
|
2011-01-01 02:59:32 +08:00
|
|
|
msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->unacked_window = 0;
|
|
|
|
buf_discard(buf);
|
2006-03-21 14:36:47 +08:00
|
|
|
l_ptr->proto_msg_queue = NULL;
|
2008-07-15 13:44:01 +08:00
|
|
|
return 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
return PUSH_FAILED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send one deferred data message, if send window not full: */
|
|
|
|
|
|
|
|
buf = l_ptr->next_out;
|
|
|
|
if (buf) {
|
|
|
|
struct tipc_msg *msg = buf_msg(buf);
|
|
|
|
u32 next = msg_seqno(msg);
|
|
|
|
u32 first = msg_seqno(buf_msg(l_ptr->first_out));
|
|
|
|
|
|
|
|
if (mod(next - first) < l_ptr->queue_limit[0]) {
|
|
|
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
2007-02-09 22:25:21 +08:00
|
|
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
if (msg_user(msg) == MSG_BUNDLER)
|
|
|
|
msg_set_type(msg, CLOSED_MSG);
|
|
|
|
l_ptr->next_out = buf->next;
|
2008-07-15 13:44:01 +08:00
|
|
|
return 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
return PUSH_FAILED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return PUSH_FINISHED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* push_queue(): push out the unsent messages of a link where
|
|
|
|
* congestion has abated. Node is locked
|
|
|
|
*/
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_push_queue(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
u32 res;
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
do {
|
2006-01-18 07:38:21 +08:00
|
|
|
res = tipc_link_push_packet(l_ptr);
|
2008-07-15 13:44:01 +08:00
|
|
|
} while (!res);
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (res == PUSH_FAILED)
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-06-26 14:40:01 +08:00
|
|
|
static void link_reset_all(unsigned long addr)
|
|
|
|
{
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *n_ptr;
|
2006-06-26 14:40:01 +08:00
|
|
|
char addr_string[16];
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
read_lock_bh(&tipc_net_lock);
|
|
|
|
n_ptr = tipc_node_find((u32)addr);
|
|
|
|
if (!n_ptr) {
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
|
return; /* node no longer exists */
|
|
|
|
}
|
|
|
|
|
|
|
|
tipc_node_lock(n_ptr);
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
warn("Resetting all links to %s\n",
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_addr_string_fill(addr_string, n_ptr->addr));
|
2006-06-26 14:40:01 +08:00
|
|
|
|
|
|
|
for (i = 0; i < MAX_BEARERS; i++) {
|
|
|
|
if (n_ptr->links[i]) {
|
2011-01-01 02:59:27 +08:00
|
|
|
link_print(n_ptr->links[i], "Resetting link\n");
|
2006-06-26 14:40:01 +08:00
|
|
|
tipc_link_reset(n_ptr->links[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
|
|
|
|
{
|
|
|
|
struct tipc_msg *msg = buf_msg(buf);
|
|
|
|
|
|
|
|
warn("Retransmission failure on link <%s>\n", l_ptr->name);
|
|
|
|
|
|
|
|
if (l_ptr->addr) {
|
|
|
|
|
|
|
|
/* Handle failure on standard link */
|
|
|
|
|
2011-01-01 02:59:27 +08:00
|
|
|
link_print(l_ptr, "Resetting link\n");
|
2006-06-26 14:40:01 +08:00
|
|
|
tipc_link_reset(l_ptr);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/* Handle failure on broadcast link */
|
|
|
|
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *n_ptr;
|
2006-06-26 14:40:01 +08:00
|
|
|
char addr_string[16];
|
|
|
|
|
2011-01-01 02:59:27 +08:00
|
|
|
info("Msg seq number: %u, ", msg_seqno(msg));
|
|
|
|
info("Outstanding acks: %lu\n",
|
|
|
|
(unsigned long) TIPC_SKB_CB(buf)->handle);
|
2006-10-04 07:25:34 +08:00
|
|
|
|
2011-01-19 02:53:16 +08:00
|
|
|
n_ptr = tipc_bclink_retransmit_to();
|
2006-06-26 14:40:01 +08:00
|
|
|
tipc_node_lock(n_ptr);
|
|
|
|
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_addr_string_fill(addr_string, n_ptr->addr);
|
2011-01-01 02:59:27 +08:00
|
|
|
info("Multicast link info for %s\n", addr_string);
|
|
|
|
info("Supported: %d, ", n_ptr->bclink.supported);
|
|
|
|
info("Acked: %u\n", n_ptr->bclink.acked);
|
|
|
|
info("Last in: %u, ", n_ptr->bclink.last_in);
|
|
|
|
info("Gap after: %u, ", n_ptr->bclink.gap_after);
|
|
|
|
info("Gap to: %u\n", n_ptr->bclink.gap_to);
|
|
|
|
info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
|
2006-06-26 14:40:01 +08:00
|
|
|
|
|
|
|
tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
|
|
|
|
|
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
|
|
|
|
l_ptr->stale_count = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
|
2006-01-18 07:38:21 +08:00
|
|
|
u32 retransmits)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct tipc_msg *msg;
|
|
|
|
|
2006-06-26 14:40:01 +08:00
|
|
|
if (!buf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
msg = buf_msg(buf);
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2006-06-26 14:40:01 +08:00
|
|
|
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
|
2010-03-15 15:58:45 +08:00
|
|
|
if (l_ptr->retransm_queue_size == 0) {
|
2006-06-26 14:40:01 +08:00
|
|
|
l_ptr->retransm_queue_head = msg_seqno(msg);
|
|
|
|
l_ptr->retransm_queue_size = retransmits;
|
|
|
|
} else {
|
2010-03-15 15:58:45 +08:00
|
|
|
err("Unexpected retransmit on link %s (qsize=%d)\n",
|
|
|
|
l_ptr->name, l_ptr->retransm_queue_size);
|
2006-06-26 14:40:01 +08:00
|
|
|
}
|
2010-03-15 15:58:45 +08:00
|
|
|
return;
|
2006-06-26 14:40:01 +08:00
|
|
|
} else {
|
|
|
|
/* Detect repeated retransmit failures on uncongested bearer */
|
|
|
|
|
|
|
|
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
|
|
|
|
if (++l_ptr->stale_count > 100) {
|
|
|
|
link_retransmit_failure(l_ptr, buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
l_ptr->last_retransmitted = msg_seqno(msg);
|
|
|
|
l_ptr->stale_count = 1;
|
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2006-06-26 14:40:01 +08:00
|
|
|
|
2010-03-15 15:58:45 +08:00
|
|
|
while (retransmits && (buf != l_ptr->next_out) && buf) {
|
2006-01-03 02:04:38 +08:00
|
|
|
msg = buf_msg(buf);
|
|
|
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
2007-02-09 22:25:21 +08:00
|
|
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
buf = buf->next;
|
|
|
|
retransmits--;
|
|
|
|
l_ptr->stats.retransmitted++;
|
|
|
|
} else {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
|
|
|
|
l_ptr->retransm_queue_size = retransmits;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2006-06-26 14:40:01 +08:00
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/**
|
2006-01-03 02:04:38 +08:00
|
|
|
* link_insert_deferred_queue - insert deferred messages back into receive chain
|
|
|
|
*/
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *buf)
|
|
|
|
{
|
|
|
|
u32 seq_no;
|
|
|
|
|
|
|
|
if (l_ptr->oldest_deferred_in == NULL)
|
|
|
|
return buf;
|
|
|
|
|
|
|
|
seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
|
|
|
|
if (seq_no == mod(l_ptr->next_in_no)) {
|
|
|
|
l_ptr->newest_deferred_in->next = buf;
|
|
|
|
buf = l_ptr->oldest_deferred_in;
|
|
|
|
l_ptr->oldest_deferred_in = NULL;
|
|
|
|
l_ptr->deferred_inqueue_sz = 0;
|
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2008-04-16 10:04:54 +08:00
|
|
|
/**
|
|
|
|
* link_recv_buf_validate - validate basic format of received message
|
|
|
|
*
|
|
|
|
* This routine ensures a TIPC message has an acceptable header, and at least
|
|
|
|
* as much data as the header indicates it should. The routine also ensures
|
|
|
|
* that the entire message header is stored in the main fragment of the message
|
|
|
|
* buffer, to simplify future access to message header fields.
|
|
|
|
*
|
|
|
|
* Note: Having extra info present in the message header or data areas is OK.
|
|
|
|
* TIPC will ignore the excess, under the assumption that it is optional info
|
|
|
|
* introduced by a later release of the protocol.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int link_recv_buf_validate(struct sk_buff *buf)
|
|
|
|
{
|
|
|
|
static u32 min_data_hdr_size[8] = {
|
2011-06-01 03:03:18 +08:00
|
|
|
SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
|
2008-04-16 10:04:54 +08:00
|
|
|
MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
|
|
|
|
};
|
|
|
|
|
|
|
|
struct tipc_msg *msg;
|
|
|
|
u32 tipc_hdr[2];
|
|
|
|
u32 size;
|
|
|
|
u32 hdr_size;
|
|
|
|
u32 min_hdr_size;
|
|
|
|
|
|
|
|
if (unlikely(buf->len < MIN_H_SIZE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
|
|
|
|
if (msg == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (unlikely(msg_version(msg) != TIPC_VERSION))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
size = msg_size(msg);
|
|
|
|
hdr_size = msg_hdr_sz(msg);
|
|
|
|
min_hdr_size = msg_isdata(msg) ?
|
|
|
|
min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
|
|
|
|
|
|
|
|
if (unlikely((hdr_size < min_hdr_size) ||
|
|
|
|
(size < hdr_size) ||
|
|
|
|
(buf->len < size) ||
|
|
|
|
(size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return pskb_may_pull(buf, hdr_size);
|
|
|
|
}
|
|
|
|
|
2010-08-17 19:00:07 +08:00
|
|
|
/**
|
|
|
|
* tipc_recv_msg - process TIPC messages arriving from off-node
|
|
|
|
* @head: pointer to message buffer chain
|
|
|
|
* @tb_ptr: pointer to bearer message arrived on
|
|
|
|
*
|
|
|
|
* Invoked with no locks held. Bearer pointer must point to a valid bearer
|
|
|
|
* structure (i.e. cannot be NULL), but bearer can be inactive.
|
|
|
|
*/
|
|
|
|
|
2011-01-08 02:00:11 +08:00
|
|
|
void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
while (head) {
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *n_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct link *l_ptr;
|
|
|
|
struct sk_buff *crs;
|
|
|
|
struct sk_buff *buf = head;
|
2008-04-16 10:04:54 +08:00
|
|
|
struct tipc_msg *msg;
|
|
|
|
u32 seq_no;
|
|
|
|
u32 ackd;
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 released = 0;
|
|
|
|
int type;
|
|
|
|
|
|
|
|
head = head->next;
|
2008-04-16 10:04:54 +08:00
|
|
|
|
2010-08-17 19:00:07 +08:00
|
|
|
/* Ensure bearer is still enabled */
|
|
|
|
|
|
|
|
if (unlikely(!b_ptr->active))
|
|
|
|
goto cont;
|
|
|
|
|
2008-04-16 10:04:54 +08:00
|
|
|
/* Ensure message is well-formed */
|
|
|
|
|
|
|
|
if (unlikely(!link_recv_buf_validate(buf)))
|
2006-01-03 02:04:38 +08:00
|
|
|
goto cont;
|
|
|
|
|
2008-04-16 10:03:23 +08:00
|
|
|
/* Ensure message data is a single contiguous unit */
|
|
|
|
|
2011-01-01 02:59:35 +08:00
|
|
|
if (unlikely(buf_linearize(buf)))
|
2008-04-16 10:03:23 +08:00
|
|
|
goto cont;
|
|
|
|
|
2008-04-16 10:04:54 +08:00
|
|
|
/* Handle arrival of a non-unicast link message */
|
|
|
|
|
|
|
|
msg = buf_msg(buf);
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (unlikely(msg_non_seq(msg))) {
|
2008-06-05 08:32:35 +08:00
|
|
|
if (msg_user(msg) == LINK_CONFIG)
|
|
|
|
tipc_disc_recv_msg(buf, b_ptr);
|
|
|
|
else
|
|
|
|
tipc_bclink_recv_pkt(buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2011-04-06 03:15:04 +08:00
|
|
|
/* Discard unicast link messages destined for another node */
|
|
|
|
|
2006-06-26 14:39:31 +08:00
|
|
|
if (unlikely(!msg_short(msg) &&
|
|
|
|
(msg_destnode(msg) != tipc_own_addr)))
|
|
|
|
goto cont;
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2010-08-17 19:00:16 +08:00
|
|
|
/* Locate neighboring node that sent message */
|
2008-04-16 10:04:54 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
n_ptr = tipc_node_find(msg_prevnode(msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
if (unlikely(!n_ptr))
|
|
|
|
goto cont;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(n_ptr);
|
2008-04-16 10:04:54 +08:00
|
|
|
|
tipc: Ensure both nodes recognize loss of contact between them
Enhances TIPC to ensure that a node that loses contact with a
neighboring node does not allow contact to be re-established until
it sees that its peer has also recognized the loss of contact.
Previously, nodes that were connected by two or more links could
encounter a situation in which node A would lose contact with node B
on all of its links, purge its name table of names published by B,
and then fail to repopulate those names once contact with B was restored.
This would happen because B was able to re-establish one or more links
so quickly that it never reached a point where it had no links to A --
meaning that B never saw a loss of contact with A, and consequently
didn't re-publish its names to A.
This problem is now prevented by enhancing the cleanup done by TIPC
following a loss of contact with a neighboring node to ensure that
node A ignores all messages sent by B until it receives a LINK_PROTOCOL
message that indicates B has lost contact with A, thereby preventing
the (re)establishment of links between the nodes. The loss of contact
is recognized when a RESET or ACTIVATE message is received that has
a "redundant link exists" field of 0, indicating that B's sending link
endpoint is in a reset state and that B has no other working links.
Additionally, TIPC now suppresses the sending of (most) link protocol
messages to a neighboring node while it is cleaning up after an earlier
loss of contact with that node. This stops the peer node from prematurely
activating its link endpoint, which would prevent TIPC from later
activating its own end. TIPC still allows outgoing RESET messages to
occur during cleanup, to avoid problems if its own node recognizes
the loss of contact first and tries to notify the peer of the situation.
Finally, TIPC now recognizes an impending loss of contact with a peer node
as soon as it receives a RESET message on a working link that is the
peer's only link to the node, and ensures that the link protocol
suppression mentioned above goes into effect right away -- that is,
even before its own link endpoints have failed. This is necessary to
ensure correct operation when there are redundant links between the nodes,
since otherwise TIPC would send an ACTIVATE message upon receiving a RESET
on its first link and only begin suppressing when a RESET on its second
link was received, instead of initiating suppression with the first RESET
message as it needs to.
Note: The reworked cleanup code also eliminates a check that prevented
a link endpoint's discovery object from responding to incoming messages
while stale name table entries are being purged. This check is now
unnecessary and would have slowed down re-establishment of communication
between the nodes in some situations.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-05-27 23:00:51 +08:00
|
|
|
/* Locate unicast link endpoint that should handle message */
|
2010-08-17 19:00:16 +08:00
|
|
|
|
tipc: Ensure both nodes recognize loss of contact between them
Enhances TIPC to ensure that a node that loses contact with a
neighboring node does not allow contact to be re-established until
it sees that its peer has also recognized the loss of contact.
Previously, nodes that were connected by two or more links could
encounter a situation in which node A would lose contact with node B
on all of its links, purge its name table of names published by B,
and then fail to repopulate those names once contact with B was restored.
This would happen because B was able to re-establish one or more links
so quickly that it never reached a point where it had no links to A --
meaning that B never saw a loss of contact with A, and consequently
didn't re-publish its names to A.
This problem is now prevented by enhancing the cleanup done by TIPC
following a loss of contact with a neighboring node to ensure that
node A ignores all messages sent by B until it receives a LINK_PROTOCOL
message that indicates B has lost contact with A, thereby preventing
the (re)establishment of links between the nodes. The loss of contact
is recognized when a RESET or ACTIVATE message is received that has
a "redundant link exists" field of 0, indicating that B's sending link
endpoint is in a reset state and that B has no other working links.
Additionally, TIPC now suppresses the sending of (most) link protocol
messages to a neighboring node while it is cleaning up after an earlier
loss of contact with that node. This stops the peer node from prematurely
activating its link endpoint, which would prevent TIPC from later
activating its own end. TIPC still allows outgoing RESET messages to
occur during cleanup, to avoid problems if its own node recognizes
the loss of contact first and tries to notify the peer of the situation.
Finally, TIPC now recognizes an impending loss of contact with a peer node
as soon as it receives a RESET message on a working link that is the
peer's only link to the node, and ensures that the link protocol
suppression mentioned above goes into effect right away -- that is,
even before its own link endpoints have failed. This is necessary to
ensure correct operation when there are redundant links between the nodes,
since otherwise TIPC would send an ACTIVATE message upon receiving a RESET
on its first link and only begin suppressing when a RESET on its second
link was received, instead of initiating suppression with the first RESET
message as it needs to.
Note: The reworked cleanup code also eliminates a check that prevented
a link endpoint's discovery object from responding to incoming messages
while stale name table entries are being purged. This check is now
unnecessary and would have slowed down re-establishment of communication
between the nodes in some situations.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-05-27 23:00:51 +08:00
|
|
|
l_ptr = n_ptr->links[b_ptr->identity];
|
|
|
|
if (unlikely(!l_ptr)) {
|
2010-08-17 19:00:16 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
goto cont;
|
|
|
|
}
|
|
|
|
|
tipc: Ensure both nodes recognize loss of contact between them
Enhances TIPC to ensure that a node that loses contact with a
neighboring node does not allow contact to be re-established until
it sees that its peer has also recognized the loss of contact.
Previously, nodes that were connected by two or more links could
encounter a situation in which node A would lose contact with node B
on all of its links, purge its name table of names published by B,
and then fail to repopulate those names once contact with B was restored.
This would happen because B was able to re-establish one or more links
so quickly that it never reached a point where it had no links to A --
meaning that B never saw a loss of contact with A, and consequently
didn't re-publish its names to A.
This problem is now prevented by enhancing the cleanup done by TIPC
following a loss of contact with a neighboring node to ensure that
node A ignores all messages sent by B until it receives a LINK_PROTOCOL
message that indicates B has lost contact with A, thereby preventing
the (re)establishment of links between the nodes. The loss of contact
is recognized when a RESET or ACTIVATE message is received that has
a "redundant link exists" field of 0, indicating that B's sending link
endpoint is in a reset state and that B has no other working links.
Additionally, TIPC now suppresses the sending of (most) link protocol
messages to a neighboring node while it is cleaning up after an earlier
loss of contact with that node. This stops the peer node from prematurely
activating its link endpoint, which would prevent TIPC from later
activating its own end. TIPC still allows outgoing RESET messages to
occur during cleanup, to avoid problems if its own node recognizes
the loss of contact first and tries to notify the peer of the situation.
Finally, TIPC now recognizes an impending loss of contact with a peer node
as soon as it receives a RESET message on a working link that is the
peer's only link to the node, and ensures that the link protocol
suppression mentioned above goes into effect right away -- that is,
even before its own link endpoints have failed. This is necessary to
ensure correct operation when there are redundant links between the nodes,
since otherwise TIPC would send an ACTIVATE message upon receiving a RESET
on its first link and only begin suppressing when a RESET on its second
link was received, instead of initiating suppression with the first RESET
message as it needs to.
Note: The reworked cleanup code also eliminates a check that prevented
a link endpoint's discovery object from responding to incoming messages
while stale name table entries are being purged. This check is now
unnecessary and would have slowed down re-establishment of communication
between the nodes in some situations.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-05-27 23:00:51 +08:00
|
|
|
/* Verify that communication with node is currently allowed */
|
2010-08-17 19:00:16 +08:00
|
|
|
|
tipc: Ensure both nodes recognize loss of contact between them
Enhances TIPC to ensure that a node that loses contact with a
neighboring node does not allow contact to be re-established until
it sees that its peer has also recognized the loss of contact.
Previously, nodes that were connected by two or more links could
encounter a situation in which node A would lose contact with node B
on all of its links, purge its name table of names published by B,
and then fail to repopulate those names once contact with B was restored.
This would happen because B was able to re-establish one or more links
so quickly that it never reached a point where it had no links to A --
meaning that B never saw a loss of contact with A, and consequently
didn't re-publish its names to A.
This problem is now prevented by enhancing the cleanup done by TIPC
following a loss of contact with a neighboring node to ensure that
node A ignores all messages sent by B until it receives a LINK_PROTOCOL
message that indicates B has lost contact with A, thereby preventing
the (re)establishment of links between the nodes. The loss of contact
is recognized when a RESET or ACTIVATE message is received that has
a "redundant link exists" field of 0, indicating that B's sending link
endpoint is in a reset state and that B has no other working links.
Additionally, TIPC now suppresses the sending of (most) link protocol
messages to a neighboring node while it is cleaning up after an earlier
loss of contact with that node. This stops the peer node from prematurely
activating its link endpoint, which would prevent TIPC from later
activating its own end. TIPC still allows outgoing RESET messages to
occur during cleanup, to avoid problems if its own node recognizes
the loss of contact first and tries to notify the peer of the situation.
Finally, TIPC now recognizes an impending loss of contact with a peer node
as soon as it receives a RESET message on a working link that is the
peer's only link to the node, and ensures that the link protocol
suppression mentioned above goes into effect right away -- that is,
even before its own link endpoints have failed. This is necessary to
ensure correct operation when there are redundant links between the nodes,
since otherwise TIPC would send an ACTIVATE message upon receiving a RESET
on its first link and only begin suppressing when a RESET on its second
link was received, instead of initiating suppression with the first RESET
message as it needs to.
Note: The reworked cleanup code also eliminates a check that prevented
a link endpoint's discovery object from responding to incoming messages
while stale name table entries are being purged. This check is now
unnecessary and would have slowed down re-establishment of communication
between the nodes in some situations.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-05-27 23:00:51 +08:00
|
|
|
if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
|
|
|
|
msg_user(msg) == LINK_PROTOCOL &&
|
|
|
|
(msg_type(msg) == RESET_MSG ||
|
|
|
|
msg_type(msg) == ACTIVATE_MSG) &&
|
|
|
|
!msg_redundant_link(msg))
|
|
|
|
n_ptr->block_setup &= ~WAIT_PEER_DOWN;
|
|
|
|
|
|
|
|
if (n_ptr->block_setup) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
goto cont;
|
|
|
|
}
|
2008-04-16 10:04:54 +08:00
|
|
|
|
|
|
|
/* Validate message sequence number info */
|
|
|
|
|
|
|
|
seq_no = msg_seqno(msg);
|
|
|
|
ackd = msg_ack(msg);
|
|
|
|
|
|
|
|
/* Release acked messages */
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
|
|
|
|
tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
crs = l_ptr->first_out;
|
2007-02-09 22:25:21 +08:00
|
|
|
while ((crs != l_ptr->next_out) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
less_eq(msg_seqno(buf_msg(crs)), ackd)) {
|
|
|
|
struct sk_buff *next = crs->next;
|
|
|
|
|
|
|
|
buf_discard(crs);
|
|
|
|
crs = next;
|
|
|
|
released++;
|
|
|
|
}
|
|
|
|
if (released) {
|
|
|
|
l_ptr->first_out = crs;
|
|
|
|
l_ptr->out_queue_size -= released;
|
|
|
|
}
|
2008-04-16 10:04:54 +08:00
|
|
|
|
|
|
|
/* Try sending any messages link endpoint has pending */
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (unlikely(l_ptr->next_out))
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_push_queue(l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (unlikely(!list_empty(&l_ptr->waiting_ports)))
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_wakeup_ports(l_ptr, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
|
|
|
|
l_ptr->stats.sent_acks++;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2008-04-16 10:04:54 +08:00
|
|
|
/* Now (finally!) process the incoming message */
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
protocol_check:
|
|
|
|
if (likely(link_working_working(l_ptr))) {
|
|
|
|
if (likely(seq_no == mod(l_ptr->next_in_no))) {
|
|
|
|
l_ptr->next_in_no++;
|
|
|
|
if (unlikely(l_ptr->oldest_deferred_in))
|
|
|
|
head = link_insert_deferred_queue(l_ptr,
|
|
|
|
head);
|
|
|
|
if (likely(msg_is_dest(msg, tipc_own_addr))) {
|
|
|
|
deliver:
|
|
|
|
if (likely(msg_isdata(msg))) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
tipc_port_recv_msg(buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
switch (msg_user(msg)) {
|
|
|
|
case MSG_BUNDLER:
|
|
|
|
l_ptr->stats.recv_bundles++;
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->stats.recv_bundled +=
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_msgcnt(msg);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
tipc_link_recv_bundle(buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
case NAME_DISTRIBUTOR:
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
tipc_named_recv(buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
case CONN_MANAGER:
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
tipc_port_recv_proto_msg(buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
case MSG_FRAGMENTER:
|
|
|
|
l_ptr->stats.recv_fragments++;
|
2007-02-09 22:25:21 +08:00
|
|
|
if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
|
2006-01-18 07:38:21 +08:00
|
|
|
&buf, &msg)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.recv_fragmented++;
|
|
|
|
goto deliver;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case CHANGEOVER_PROTOCOL:
|
|
|
|
type = msg_type(msg);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (link_recv_changeover_msg(&l_ptr, &buf)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
msg = buf_msg(buf);
|
|
|
|
seq_no = msg_seqno(msg);
|
|
|
|
if (type == ORIGINAL_MSG)
|
|
|
|
goto deliver;
|
|
|
|
goto protocol_check;
|
|
|
|
}
|
|
|
|
break;
|
2011-03-12 02:09:28 +08:00
|
|
|
default:
|
|
|
|
buf_discard(buf);
|
|
|
|
buf = NULL;
|
|
|
|
break;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
|
|
|
tipc_net_route_msg(buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
link_handle_out_of_seq_msg(l_ptr, buf);
|
|
|
|
head = link_insert_deferred_queue(l_ptr, head);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg_user(msg) == LINK_PROTOCOL) {
|
|
|
|
link_recv_proto_msg(l_ptr, buf);
|
|
|
|
head = link_insert_deferred_queue(l_ptr, head);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
link_state_event(l_ptr, TRAFFIC_MSG_EVT);
|
|
|
|
|
|
|
|
if (link_working_working(l_ptr)) {
|
|
|
|
/* Re-insert in front of queue */
|
|
|
|
buf->next = head;
|
|
|
|
head = buf;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
cont:
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* link_defer_buf(): Sort a received out-of-sequence packet
|
2006-01-03 02:04:38 +08:00
|
|
|
* into the deferred reception queue.
|
|
|
|
* Returns the increase of the queue length,i.e. 0 or 1
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
u32 tipc_link_defer_pkt(struct sk_buff **head,
|
|
|
|
struct sk_buff **tail,
|
|
|
|
struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2006-03-21 14:36:47 +08:00
|
|
|
struct sk_buff *prev = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *crs = *head;
|
|
|
|
u32 seq_no = msg_seqno(buf_msg(buf));
|
|
|
|
|
|
|
|
buf->next = NULL;
|
|
|
|
|
|
|
|
/* Empty queue ? */
|
|
|
|
if (*head == NULL) {
|
|
|
|
*head = *tail = buf;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Last ? */
|
|
|
|
if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
|
|
|
|
(*tail)->next = buf;
|
|
|
|
*tail = buf;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Scan through queue and sort it in */
|
|
|
|
do {
|
|
|
|
struct tipc_msg *msg = buf_msg(crs);
|
|
|
|
|
|
|
|
if (less(seq_no, msg_seqno(msg))) {
|
|
|
|
buf->next = crs;
|
|
|
|
if (prev)
|
|
|
|
prev->next = buf;
|
|
|
|
else
|
2007-02-09 22:25:21 +08:00
|
|
|
*head = buf;
|
2006-01-03 02:04:38 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2011-01-01 02:59:35 +08:00
|
|
|
if (seq_no == msg_seqno(msg))
|
2006-01-03 02:04:38 +08:00
|
|
|
break;
|
|
|
|
prev = crs;
|
|
|
|
crs = crs->next;
|
2011-01-01 02:59:32 +08:00
|
|
|
} while (crs);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Message is a duplicate of an existing message */
|
|
|
|
|
|
|
|
buf_discard(buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/**
|
2006-01-03 02:04:38 +08:00
|
|
|
* link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
|
|
|
|
*/
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
static void link_handle_out_of_seq_msg(struct link *l_ptr,
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *buf)
|
|
|
|
{
|
|
|
|
u32 seq_no = msg_seqno(buf_msg(buf));
|
|
|
|
|
|
|
|
if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
|
|
|
|
link_recv_proto_msg(l_ptr, buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Record OOS packet arrival (force mismatch on next timeout) */
|
|
|
|
|
|
|
|
l_ptr->checkpoint--;
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Discard packet if a duplicate; otherwise add it to deferred queue
|
|
|
|
* and notify peer of gap as per protocol specification
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (less(seq_no, mod(l_ptr->next_in_no))) {
|
|
|
|
l_ptr->stats.duplicates++;
|
|
|
|
buf_discard(buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
|
|
|
|
&l_ptr->newest_deferred_in, buf)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->deferred_inqueue_sz++;
|
|
|
|
l_ptr->stats.deferred_recv++;
|
|
|
|
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
} else
|
|
|
|
l_ptr->stats.duplicates++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send protocol message to the other endpoint.
|
|
|
|
*/
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
|
|
|
|
u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2006-03-21 14:36:47 +08:00
|
|
|
struct sk_buff *buf = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct tipc_msg *msg = l_ptr->pmsg;
|
2007-02-09 22:25:21 +08:00
|
|
|
u32 msg_size = sizeof(l_ptr->proto_msg);
|
2011-03-01 04:30:20 +08:00
|
|
|
int r_flag;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (link_blocked(l_ptr))
|
|
|
|
return;
|
tipc: Ensure both nodes recognize loss of contact between them
Enhances TIPC to ensure that a node that loses contact with a
neighboring node does not allow contact to be re-established until
it sees that its peer has also recognized the loss of contact.
Previously, nodes that were connected by two or more links could
encounter a situation in which node A would lose contact with node B
on all of its links, purge its name table of names published by B,
and then fail to repopulate those names once contact with B was restored.
This would happen because B was able to re-establish one or more links
so quickly that it never reached a point where it had no links to A --
meaning that B never saw a loss of contact with A, and consequently
didn't re-publish its names to A.
This problem is now prevented by enhancing the cleanup done by TIPC
following a loss of contact with a neighboring node to ensure that
node A ignores all messages sent by B until it receives a LINK_PROTOCOL
message that indicates B has lost contact with A, thereby preventing
the (re)establishment of links between the nodes. The loss of contact
is recognized when a RESET or ACTIVATE message is received that has
a "redundant link exists" field of 0, indicating that B's sending link
endpoint is in a reset state and that B has no other working links.
Additionally, TIPC now suppresses the sending of (most) link protocol
messages to a neighboring node while it is cleaning up after an earlier
loss of contact with that node. This stops the peer node from prematurely
activating its link endpoint, which would prevent TIPC from later
activating its own end. TIPC still allows outgoing RESET messages to
occur during cleanup, to avoid problems if its own node recognizes
the loss of contact first and tries to notify the peer of the situation.
Finally, TIPC now recognizes an impending loss of contact with a peer node
as soon as it receives a RESET message on a working link that is the
peer's only link to the node, and ensures that the link protocol
suppression mentioned above goes into effect right away -- that is,
even before its own link endpoints have failed. This is necessary to
ensure correct operation when there are redundant links between the nodes,
since otherwise TIPC would send an ACTIVATE message upon receiving a RESET
on its first link and only begin suppressing when a RESET on its second
link was received, instead of initiating suppression with the first RESET
message as it needs to.
Note: The reworked cleanup code also eliminates a check that prevented
a link endpoint's discovery object from responding to incoming messages
while stale name table entries are being purged. This check is now
unnecessary and would have slowed down re-establishment of communication
between the nodes in some situations.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-05-27 23:00:51 +08:00
|
|
|
|
|
|
|
/* Abort non-RESET send if communication with node is prohibited */
|
|
|
|
|
|
|
|
if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
|
|
|
|
return;
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_type(msg, msg_typ);
|
|
|
|
msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
|
2007-02-09 22:25:21 +08:00
|
|
|
msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
|
2006-01-18 07:38:21 +08:00
|
|
|
msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (msg_typ == STATE_MSG) {
|
|
|
|
u32 next_sent = mod(l_ptr->next_out_no);
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (!tipc_link_is_up(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
|
|
|
if (l_ptr->next_out)
|
|
|
|
next_sent = msg_seqno(buf_msg(l_ptr->next_out));
|
|
|
|
msg_set_next_sent(msg, next_sent);
|
|
|
|
if (l_ptr->oldest_deferred_in) {
|
|
|
|
u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
|
|
|
|
gap = mod(rec - mod(l_ptr->next_in_no));
|
|
|
|
}
|
|
|
|
msg_set_seq_gap(msg, gap);
|
|
|
|
if (gap)
|
|
|
|
l_ptr->stats.sent_nacks++;
|
|
|
|
msg_set_link_tolerance(msg, tolerance);
|
|
|
|
msg_set_linkprio(msg, priority);
|
|
|
|
msg_set_max_pkt(msg, ack_mtu);
|
|
|
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
|
|
|
|
msg_set_probe(msg, probe_msg != 0);
|
2007-02-09 22:25:21 +08:00
|
|
|
if (probe_msg) {
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 mtu = l_ptr->max_pkt;
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
if ((mtu < l_ptr->max_pkt_target) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
link_working_working(l_ptr) &&
|
|
|
|
l_ptr->fsm_msg_cnt) {
|
|
|
|
msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
|
2007-02-09 22:25:21 +08:00
|
|
|
if (l_ptr->max_pkt_probes == 10) {
|
|
|
|
l_ptr->max_pkt_target = (msg_size - 4);
|
|
|
|
l_ptr->max_pkt_probes = 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
|
2007-02-09 22:25:21 +08:00
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->max_pkt_probes++;
|
2007-02-09 22:25:21 +08:00
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
l_ptr->stats.sent_probes++;
|
2007-02-09 22:25:21 +08:00
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.sent_states++;
|
|
|
|
} else { /* RESET_MSG or ACTIVATE_MSG */
|
|
|
|
msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
|
|
|
|
msg_set_seq_gap(msg, 0);
|
|
|
|
msg_set_next_sent(msg, 1);
|
2011-01-19 04:15:34 +08:00
|
|
|
msg_set_probe(msg, 0);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_link_tolerance(msg, l_ptr->tolerance);
|
|
|
|
msg_set_linkprio(msg, l_ptr->priority);
|
|
|
|
msg_set_max_pkt(msg, l_ptr->max_pkt_target);
|
|
|
|
}
|
|
|
|
|
2011-03-01 04:30:20 +08:00
|
|
|
r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
|
|
|
|
msg_set_redundant_link(msg, r_flag);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_linkprio(msg, l_ptr->priority);
|
|
|
|
|
|
|
|
/* Ensure sequence number will not fit : */
|
|
|
|
|
|
|
|
msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
|
|
|
|
|
|
|
|
/* Congestion? */
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr->proto_msg_queue) {
|
|
|
|
l_ptr->proto_msg_queue =
|
2010-10-13 21:20:35 +08:00
|
|
|
tipc_buf_acquire(sizeof(l_ptr->proto_msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
buf = l_ptr->proto_msg_queue;
|
|
|
|
if (!buf)
|
|
|
|
return;
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Message can be sent */
|
|
|
|
|
2010-10-13 21:20:35 +08:00
|
|
|
buf = tipc_buf_acquire(msg_size);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!buf)
|
|
|
|
return;
|
|
|
|
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
|
2007-02-09 22:25:21 +08:00
|
|
|
msg_set_size(buf_msg(buf), msg_size);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->unacked_window = 0;
|
|
|
|
buf_discard(buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* New congestion */
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->proto_msg_queue = buf;
|
|
|
|
l_ptr->stats.bearer_congs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Receive protocol message :
|
2007-02-09 22:25:21 +08:00
|
|
|
* Note that network plane id propagates through the network, and may
|
|
|
|
* change at any time. The node with lowest address rules
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
|
|
|
|
{
|
|
|
|
u32 rec_gap = 0;
|
|
|
|
u32 max_pkt_info;
|
2007-02-09 22:25:21 +08:00
|
|
|
u32 max_pkt_ack;
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 msg_tol;
|
|
|
|
struct tipc_msg *msg = buf_msg(buf);
|
|
|
|
|
|
|
|
if (link_blocked(l_ptr))
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* record unnumbered packet arrival (force mismatch on next timeout) */
|
|
|
|
|
|
|
|
l_ptr->checkpoint--;
|
|
|
|
|
|
|
|
if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
|
|
|
|
if (tipc_own_addr > msg_prevnode(msg))
|
|
|
|
l_ptr->b_ptr->net_plane = msg_net_plane(msg);
|
|
|
|
|
|
|
|
l_ptr->owner->permit_changeover = msg_redundant_link(msg);
|
|
|
|
|
|
|
|
switch (msg_type(msg)) {
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
case RESET_MSG:
|
2008-06-05 08:29:39 +08:00
|
|
|
if (!link_working_unknown(l_ptr) &&
|
|
|
|
(l_ptr->peer_session != INVALID_SESSION)) {
|
2011-04-07 21:54:43 +08:00
|
|
|
if (less_eq(msg_session(msg), l_ptr->peer_session))
|
|
|
|
break; /* duplicate or old reset: ignore */
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
tipc: Ensure both nodes recognize loss of contact between them
Enhances TIPC to ensure that a node that loses contact with a
neighboring node does not allow contact to be re-established until
it sees that its peer has also recognized the loss of contact.
Previously, nodes that were connected by two or more links could
encounter a situation in which node A would lose contact with node B
on all of its links, purge its name table of names published by B,
and then fail to repopulate those names once contact with B was restored.
This would happen because B was able to re-establish one or more links
so quickly that it never reached a point where it had no links to A --
meaning that B never saw a loss of contact with A, and consequently
didn't re-publish its names to A.
This problem is now prevented by enhancing the cleanup done by TIPC
following a loss of contact with a neighboring node to ensure that
node A ignores all messages sent by B until it receives a LINK_PROTOCOL
message that indicates B has lost contact with A, thereby preventing
the (re)establishment of links between the nodes. The loss of contact
is recognized when a RESET or ACTIVATE message is received that has
a "redundant link exists" field of 0, indicating that B's sending link
endpoint is in a reset state and that B has no other working links.
Additionally, TIPC now suppresses the sending of (most) link protocol
messages to a neighboring node while it is cleaning up after an earlier
loss of contact with that node. This stops the peer node from prematurely
activating its link endpoint, which would prevent TIPC from later
activating its own end. TIPC still allows outgoing RESET messages to
occur during cleanup, to avoid problems if its own node recognizes
the loss of contact first and tries to notify the peer of the situation.
Finally, TIPC now recognizes an impending loss of contact with a peer node
as soon as it receives a RESET message on a working link that is the
peer's only link to the node, and ensures that the link protocol
suppression mentioned above goes into effect right away -- that is,
even before its own link endpoints have failed. This is necessary to
ensure correct operation when there are redundant links between the nodes,
since otherwise TIPC would send an ACTIVATE message upon receiving a RESET
on its first link and only begin suppressing when a RESET on its second
link was received, instead of initiating suppression with the first RESET
message as it needs to.
Note: The reworked cleanup code also eliminates a check that prevented
a link endpoint's discovery object from responding to incoming messages
while stale name table entries are being purged. This check is now
unnecessary and would have slowed down re-establishment of communication
between the nodes in some situations.
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2011-05-27 23:00:51 +08:00
|
|
|
|
|
|
|
if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
|
|
|
|
link_working_unknown(l_ptr))) {
|
|
|
|
/*
|
|
|
|
* peer has lost contact -- don't allow peer's links
|
|
|
|
* to reactivate before we recognize loss & clean up
|
|
|
|
*/
|
|
|
|
l_ptr->owner->block_setup = WAIT_NODE_DOWN;
|
|
|
|
}
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
/* fall thru' */
|
|
|
|
case ACTIVATE_MSG:
|
|
|
|
/* Update link settings according other endpoint's values */
|
|
|
|
|
|
|
|
strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
|
|
|
|
|
2011-01-01 02:59:33 +08:00
|
|
|
msg_tol = msg_link_tolerance(msg);
|
|
|
|
if (msg_tol > l_ptr->tolerance)
|
2006-01-03 02:04:38 +08:00
|
|
|
link_set_supervision_props(l_ptr, msg_tol);
|
|
|
|
|
|
|
|
if (msg_linkprio(msg) > l_ptr->priority)
|
|
|
|
l_ptr->priority = msg_linkprio(msg);
|
|
|
|
|
|
|
|
max_pkt_info = msg_max_pkt(msg);
|
2007-02-09 22:25:21 +08:00
|
|
|
if (max_pkt_info) {
|
2006-01-03 02:04:38 +08:00
|
|
|
if (max_pkt_info < l_ptr->max_pkt_target)
|
|
|
|
l_ptr->max_pkt_target = max_pkt_info;
|
|
|
|
if (l_ptr->max_pkt > l_ptr->max_pkt_target)
|
|
|
|
l_ptr->max_pkt = l_ptr->max_pkt_target;
|
|
|
|
} else {
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->max_pkt = l_ptr->max_pkt_target;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
l_ptr->owner->bclink.supported = (max_pkt_info != 0);
|
|
|
|
|
|
|
|
link_state_event(l_ptr, msg_type(msg));
|
|
|
|
|
|
|
|
l_ptr->peer_session = msg_session(msg);
|
|
|
|
l_ptr->peer_bearer_id = msg_bearer_id(msg);
|
|
|
|
|
|
|
|
/* Synchronize broadcast sequence numbers */
|
2011-02-28 23:36:21 +08:00
|
|
|
if (!tipc_node_redundant_links(l_ptr->owner))
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
|
|
|
|
break;
|
|
|
|
case STATE_MSG:
|
|
|
|
|
2011-01-01 02:59:33 +08:00
|
|
|
msg_tol = msg_link_tolerance(msg);
|
|
|
|
if (msg_tol)
|
2006-01-03 02:04:38 +08:00
|
|
|
link_set_supervision_props(l_ptr, msg_tol);
|
2007-02-09 22:25:21 +08:00
|
|
|
|
|
|
|
if (msg_linkprio(msg) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
(msg_linkprio(msg) != l_ptr->priority)) {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Resetting link <%s>, priority change %u->%u\n",
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->name, l_ptr->priority, msg_linkprio(msg));
|
|
|
|
l_ptr->priority = msg_linkprio(msg);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset(l_ptr); /* Enforce change to take effect */
|
2006-01-03 02:04:38 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
link_state_event(l_ptr, TRAFFIC_MSG_EVT);
|
|
|
|
l_ptr->stats.recv_states++;
|
|
|
|
if (link_reset_unknown(l_ptr))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
|
2007-02-09 22:25:21 +08:00
|
|
|
rec_gap = mod(msg_next_sent(msg) -
|
2006-01-03 02:04:38 +08:00
|
|
|
mod(l_ptr->next_in_no));
|
|
|
|
}
|
|
|
|
|
|
|
|
max_pkt_ack = msg_max_pkt(msg);
|
2007-02-09 22:25:21 +08:00
|
|
|
if (max_pkt_ack > l_ptr->max_pkt) {
|
|
|
|
l_ptr->max_pkt = max_pkt_ack;
|
|
|
|
l_ptr->max_pkt_probes = 0;
|
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
max_pkt_ack = 0;
|
2007-02-09 22:25:21 +08:00
|
|
|
if (msg_probe(msg)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.recv_probes++;
|
2011-01-01 02:59:35 +08:00
|
|
|
if (msg_size(msg) > sizeof(l_ptr->proto_msg))
|
2007-02-09 22:25:21 +08:00
|
|
|
max_pkt_ack = msg_size(msg);
|
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Protocol message before retransmits, reduce loss risk */
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (rec_gap || (msg_probe(msg))) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
|
|
|
0, rec_gap, 0, 0, max_pkt_ack);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
if (msg_seq_gap(msg)) {
|
|
|
|
l_ptr->stats.recv_nacks++;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_retransmit(l_ptr, l_ptr->first_out,
|
|
|
|
msg_seq_gap(msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
exit:
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2007-02-09 22:25:21 +08:00
|
|
|
* tipc_link_tunnel(): Send one message via a link belonging to
|
2006-01-03 02:04:38 +08:00
|
|
|
* another bearer. Owner node is locked.
|
|
|
|
*/
|
2010-10-13 21:20:35 +08:00
|
|
|
static void tipc_link_tunnel(struct link *l_ptr,
|
|
|
|
struct tipc_msg *tunnel_hdr,
|
|
|
|
struct tipc_msg *msg,
|
|
|
|
u32 selector)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct link *tunnel;
|
|
|
|
struct sk_buff *buf;
|
|
|
|
u32 length = msg_size(msg);
|
|
|
|
|
|
|
|
tunnel = l_ptr->owner->active_links[selector & 1];
|
2006-06-26 14:52:50 +08:00
|
|
|
if (!tipc_link_is_up(tunnel)) {
|
|
|
|
warn("Link changeover error, "
|
|
|
|
"tunnel link no longer available\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
2006-06-26 14:52:50 +08:00
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
|
2010-10-13 21:20:35 +08:00
|
|
|
buf = tipc_buf_acquire(length + INT_H_SIZE);
|
2006-06-26 14:52:50 +08:00
|
|
|
if (!buf) {
|
|
|
|
warn("Link changeover error, "
|
|
|
|
"unable to send tunnel msg\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
2006-06-26 14:52:50 +08:00
|
|
|
}
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
|
|
|
|
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_buf(tunnel, buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* changeover(): Send whole message queue via the remaining link
|
|
|
|
* Owner node is locked.
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_changeover(struct link *l_ptr)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
u32 msgcount = l_ptr->out_queue_size;
|
|
|
|
struct sk_buff *crs = l_ptr->first_out;
|
|
|
|
struct link *tunnel = l_ptr->owner->active_links[0];
|
|
|
|
struct tipc_msg tunnel_hdr;
|
2006-06-26 14:52:50 +08:00
|
|
|
int split_bundles;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (!tunnel)
|
|
|
|
return;
|
|
|
|
|
2006-06-26 14:52:50 +08:00
|
|
|
if (!l_ptr->owner->permit_changeover) {
|
|
|
|
warn("Link changeover error, "
|
|
|
|
"peer did not permit changeover\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
2006-06-26 14:52:50 +08:00
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
|
2008-06-05 08:37:34 +08:00
|
|
|
ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
|
|
|
msg_set_msgcnt(&tunnel_hdr, msgcount);
|
2006-06-26 14:51:37 +08:00
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr->first_out) {
|
|
|
|
struct sk_buff *buf;
|
|
|
|
|
2010-10-13 21:20:35 +08:00
|
|
|
buf = tipc_buf_acquire(INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (buf) {
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_size(&tunnel_hdr, INT_H_SIZE);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_buf(tunnel, buf);
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link changeover error, "
|
|
|
|
"unable to send changeover msg\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2006-06-26 14:51:37 +08:00
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
split_bundles = (l_ptr->owner->active_links[0] !=
|
2006-06-26 14:52:50 +08:00
|
|
|
l_ptr->owner->active_links[1]);
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
while (crs) {
|
|
|
|
struct tipc_msg *msg = buf_msg(crs);
|
|
|
|
|
|
|
|
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
|
|
|
|
struct tipc_msg *m = msg_get_wrapped(msg);
|
2011-01-01 02:59:32 +08:00
|
|
|
unchar *pos = (unchar *)m;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2007-08-03 10:28:06 +08:00
|
|
|
msgcount = msg_msgcnt(msg);
|
2006-01-03 02:04:38 +08:00
|
|
|
while (msgcount--) {
|
2011-01-01 02:59:32 +08:00
|
|
|
msg_set_seqno(m, msg_seqno(msg));
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
|
|
|
|
msg_link_selector(m));
|
2006-01-03 02:04:38 +08:00
|
|
|
pos += align(msg_size(m));
|
|
|
|
m = (struct tipc_msg *)pos;
|
|
|
|
}
|
|
|
|
} else {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
|
|
|
|
msg_link_selector(msg));
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
crs = crs->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *iter;
|
|
|
|
struct tipc_msg tunnel_hdr;
|
|
|
|
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
|
2008-06-05 08:37:34 +08:00
|
|
|
DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
|
|
|
|
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
|
|
|
|
iter = l_ptr->first_out;
|
|
|
|
while (iter) {
|
|
|
|
struct sk_buff *outbuf;
|
|
|
|
struct tipc_msg *msg = buf_msg(iter);
|
|
|
|
u32 length = msg_size(msg);
|
|
|
|
|
|
|
|
if (msg_user(msg) == MSG_BUNDLER)
|
|
|
|
msg_set_type(msg, CLOSED_MSG);
|
|
|
|
msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
|
2007-02-09 22:25:21 +08:00
|
|
|
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
|
2006-01-03 02:04:38 +08:00
|
|
|
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
|
2010-10-13 21:20:35 +08:00
|
|
|
outbuf = tipc_buf_acquire(length + INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (outbuf == NULL) {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link changeover error, "
|
|
|
|
"unable to send duplicate msg\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
|
|
|
}
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
|
|
|
|
skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
|
|
|
|
length);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_send_buf(tunnel, outbuf);
|
|
|
|
if (!tipc_link_is_up(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
return;
|
|
|
|
iter = iter->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* buf_extract - extracts embedded TIPC message from another message
|
|
|
|
* @skb: encapsulating message buffer
|
|
|
|
* @from_pos: offset to extract from
|
|
|
|
*
|
2007-02-09 22:25:21 +08:00
|
|
|
* Returns a new message buffer containing an embedded message. The
|
2006-01-03 02:04:38 +08:00
|
|
|
* encapsulating message itself is left unchanged.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
|
|
|
|
{
|
|
|
|
struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
|
|
|
|
u32 size = msg_size(msg);
|
|
|
|
struct sk_buff *eb;
|
|
|
|
|
2010-10-13 21:20:35 +08:00
|
|
|
eb = tipc_buf_acquire(size);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (eb)
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(eb, msg, size);
|
2006-01-03 02:04:38 +08:00
|
|
|
return eb;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2006-01-03 02:04:38 +08:00
|
|
|
* link_recv_changeover_msg(): Receive tunneled packet sent
|
|
|
|
* via other link. Node is locked. Return extracted buffer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int link_recv_changeover_msg(struct link **l_ptr,
|
|
|
|
struct sk_buff **buf)
|
|
|
|
{
|
|
|
|
struct sk_buff *tunnel_buf = *buf;
|
|
|
|
struct link *dest_link;
|
|
|
|
struct tipc_msg *msg;
|
|
|
|
struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
|
|
|
|
u32 msg_typ = msg_type(tunnel_msg);
|
|
|
|
u32 msg_count = msg_msgcnt(tunnel_msg);
|
|
|
|
|
|
|
|
dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
|
2011-01-01 02:59:25 +08:00
|
|
|
if (!dest_link)
|
2006-01-03 02:04:38 +08:00
|
|
|
goto exit;
|
2006-06-26 14:51:37 +08:00
|
|
|
if (dest_link == *l_ptr) {
|
2007-02-09 22:25:21 +08:00
|
|
|
err("Unexpected changeover message on link <%s>\n",
|
2006-06-26 14:51:37 +08:00
|
|
|
(*l_ptr)->name);
|
|
|
|
goto exit;
|
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
*l_ptr = dest_link;
|
|
|
|
msg = msg_get_wrapped(tunnel_msg);
|
|
|
|
|
|
|
|
if (msg_typ == DUPLICATE_MSG) {
|
2011-01-01 02:59:25 +08:00
|
|
|
if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
|
2006-01-03 02:04:38 +08:00
|
|
|
goto exit;
|
2011-01-01 02:59:32 +08:00
|
|
|
*buf = buf_extract(tunnel_buf, INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (*buf == NULL) {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link changeover error, duplicate msg dropped\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
buf_discard(tunnel_buf);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First original message ?: */
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_link_is_up(dest_link)) {
|
2006-06-26 14:52:17 +08:00
|
|
|
info("Resetting link <%s>, changeover initiated by peer\n",
|
|
|
|
dest_link->name);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_reset(dest_link);
|
2006-01-03 02:04:38 +08:00
|
|
|
dest_link->exp_msg_count = msg_count;
|
|
|
|
if (!msg_count)
|
|
|
|
goto exit;
|
|
|
|
} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
|
|
|
|
dest_link->exp_msg_count = msg_count;
|
|
|
|
if (!msg_count)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Receive original message */
|
|
|
|
|
|
|
|
if (dest_link->exp_msg_count == 0) {
|
2006-06-26 14:52:50 +08:00
|
|
|
warn("Link switchover error, "
|
|
|
|
"got too many tunnelled messages\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
dest_link->exp_msg_count--;
|
|
|
|
if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
|
|
|
|
goto exit;
|
|
|
|
} else {
|
|
|
|
*buf = buf_extract(tunnel_buf, INT_H_SIZE);
|
|
|
|
if (*buf != NULL) {
|
|
|
|
buf_discard(tunnel_buf);
|
|
|
|
return 1;
|
|
|
|
} else {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link changeover error, original msg dropped\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
exit:
|
2006-03-21 14:36:47 +08:00
|
|
|
*buf = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
buf_discard(tunnel_buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bundler functionality:
|
|
|
|
*/
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_recv_bundle(struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
u32 msgcount = msg_msgcnt(buf_msg(buf));
|
|
|
|
u32 pos = INT_H_SIZE;
|
|
|
|
struct sk_buff *obuf;
|
|
|
|
|
|
|
|
while (msgcount--) {
|
|
|
|
obuf = buf_extract(buf, pos);
|
|
|
|
if (obuf == NULL) {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link unable to unbundle message(s)\n");
|
|
|
|
break;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2006-01-03 02:04:38 +08:00
|
|
|
pos += align(msg_size(buf_msg(obuf)));
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_net_route_msg(obuf);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fragmentation/defragmentation:
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
2010-10-13 21:20:35 +08:00
|
|
|
* link_send_long_buf: Entry for buffers needing fragmentation.
|
2007-02-09 22:25:21 +08:00
|
|
|
* The buffer is complete, inclusive total message length.
|
2006-01-03 02:04:38 +08:00
|
|
|
* Returns user data length.
|
|
|
|
*/
|
2010-10-13 21:20:35 +08:00
|
|
|
static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2011-04-18 01:06:23 +08:00
|
|
|
struct sk_buff *buf_chain = NULL;
|
|
|
|
struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct tipc_msg *inmsg = buf_msg(buf);
|
|
|
|
struct tipc_msg fragm_hdr;
|
|
|
|
u32 insize = msg_size(inmsg);
|
|
|
|
u32 dsz = msg_data_sz(inmsg);
|
|
|
|
unchar *crs = buf->data;
|
|
|
|
u32 rest = insize;
|
2010-05-11 22:30:10 +08:00
|
|
|
u32 pack_sz = l_ptr->max_pkt;
|
2006-01-03 02:04:38 +08:00
|
|
|
u32 fragm_sz = pack_sz - INT_H_SIZE;
|
2011-04-18 01:06:23 +08:00
|
|
|
u32 fragm_no = 0;
|
2008-06-05 08:36:58 +08:00
|
|
|
u32 destaddr;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (msg_short(inmsg))
|
|
|
|
destaddr = l_ptr->addr;
|
2008-06-05 08:36:58 +08:00
|
|
|
else
|
|
|
|
destaddr = msg_destnode(inmsg);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Prepare reusable fragment header: */
|
|
|
|
|
2010-05-11 22:30:12 +08:00
|
|
|
tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
|
2008-06-05 08:37:34 +08:00
|
|
|
INT_H_SIZE, destaddr);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Chop up message: */
|
|
|
|
|
|
|
|
while (rest > 0) {
|
|
|
|
struct sk_buff *fragm;
|
|
|
|
|
|
|
|
if (rest <= fragm_sz) {
|
|
|
|
fragm_sz = rest;
|
|
|
|
msg_set_type(&fragm_hdr, LAST_FRAGMENT);
|
|
|
|
}
|
2010-10-13 21:20:35 +08:00
|
|
|
fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (fragm == NULL) {
|
2011-04-18 01:06:23 +08:00
|
|
|
buf_discard(buf);
|
|
|
|
while (buf_chain) {
|
|
|
|
buf = buf_chain;
|
|
|
|
buf_chain = buf_chain->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
|
|
|
return -ENOMEM;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
|
2011-04-18 01:06:23 +08:00
|
|
|
fragm_no++;
|
|
|
|
msg_set_fragm_no(&fragm_hdr, fragm_no);
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
|
|
|
|
skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
|
|
|
|
fragm_sz);
|
2011-04-18 01:06:23 +08:00
|
|
|
buf_chain_tail->next = fragm;
|
|
|
|
buf_chain_tail = fragm;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
rest -= fragm_sz;
|
|
|
|
crs += fragm_sz;
|
|
|
|
msg_set_type(&fragm_hdr, FRAGMENT);
|
|
|
|
}
|
|
|
|
buf_discard(buf);
|
2011-04-18 01:06:23 +08:00
|
|
|
|
|
|
|
/* Append chain of fragments to send queue & send them */
|
|
|
|
|
|
|
|
l_ptr->long_msg_seq_no++;
|
|
|
|
link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
|
|
|
|
l_ptr->stats.sent_fragments += fragm_no;
|
|
|
|
l_ptr->stats.sent_fragmented++;
|
|
|
|
tipc_link_push_queue(l_ptr);
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
return dsz;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* A pending message being re-assembled must store certain values
|
|
|
|
* to handle subsequent fragments correctly. The following functions
|
2006-01-03 02:04:38 +08:00
|
|
|
* help storing these values in unused, available fields in the
|
2011-03-31 09:57:33 +08:00
|
|
|
* pending message. This makes dynamic memory allocation unnecessary.
|
2006-01-03 02:04:38 +08:00
|
|
|
*/
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
msg_set_seqno(buf_msg(buf), seqno);
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static u32 get_fragm_size(struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
return msg_ack(buf_msg(buf));
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void set_fragm_size(struct sk_buff *buf, u32 sz)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
msg_set_ack(buf_msg(buf), sz);
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static u32 get_expected_frags(struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
return msg_bcast_ack(buf_msg(buf));
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void set_expected_frags(struct sk_buff *buf, u32 exp)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
msg_set_bcast_ack(buf_msg(buf), exp);
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static u32 get_timer_cnt(struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
return msg_reroute_cnt(buf_msg(buf));
|
|
|
|
}
|
|
|
|
|
2006-03-21 14:37:04 +08:00
|
|
|
static void incr_timer_cnt(struct sk_buff *buf)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
msg_incr_reroute_cnt(buf_msg(buf));
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
/*
|
|
|
|
* tipc_link_recv_fragment(): Called with node lock on. Returns
|
2006-01-03 02:04:38 +08:00
|
|
|
* the reassembled buffer if message is complete.
|
|
|
|
*/
|
2007-02-09 22:25:21 +08:00
|
|
|
int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
|
2006-01-18 07:38:21 +08:00
|
|
|
struct tipc_msg **m)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2006-03-21 14:36:47 +08:00
|
|
|
struct sk_buff *prev = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *fbuf = *fb;
|
|
|
|
struct tipc_msg *fragm = buf_msg(fbuf);
|
|
|
|
struct sk_buff *pbuf = *pending;
|
|
|
|
u32 long_msg_seq_no = msg_long_msgno(fragm);
|
|
|
|
|
2006-03-21 14:36:47 +08:00
|
|
|
*fb = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
/* Is there an incomplete message waiting for this fragment? */
|
|
|
|
|
2009-11-30 08:55:45 +08:00
|
|
|
while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
|
|
|
|
(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
|
2006-01-03 02:04:38 +08:00
|
|
|
prev = pbuf;
|
|
|
|
pbuf = pbuf->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
|
|
|
|
struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
|
|
|
|
u32 msg_sz = msg_size(imsg);
|
|
|
|
u32 fragm_sz = msg_data_sz(fragm);
|
|
|
|
u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
|
2011-06-01 03:03:18 +08:00
|
|
|
u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
|
2006-01-03 02:04:38 +08:00
|
|
|
if (msg_type(imsg) == TIPC_MCAST_MSG)
|
|
|
|
max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
|
|
|
|
if (msg_size(imsg) > max) {
|
|
|
|
buf_discard(fbuf);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-10-13 21:20:35 +08:00
|
|
|
pbuf = tipc_buf_acquire(msg_size(imsg));
|
2006-01-03 02:04:38 +08:00
|
|
|
if (pbuf != NULL) {
|
|
|
|
pbuf->next = *pending;
|
|
|
|
*pending = pbuf;
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data(pbuf, imsg,
|
|
|
|
msg_data_sz(fragm));
|
2006-01-03 02:04:38 +08:00
|
|
|
/* Prepare buffer for subsequent fragments. */
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
set_long_msg_seqno(pbuf, long_msg_seq_no);
|
2011-01-01 02:59:32 +08:00
|
|
|
set_fragm_size(pbuf, fragm_sz);
|
|
|
|
set_expected_frags(pbuf, exp_fragm_cnt - 1);
|
2006-01-03 02:04:38 +08:00
|
|
|
} else {
|
2006-06-26 14:52:17 +08:00
|
|
|
warn("Link unable to reassemble fragmented message\n");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
buf_discard(fbuf);
|
|
|
|
return 0;
|
|
|
|
} else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
|
|
|
|
u32 dsz = msg_data_sz(fragm);
|
|
|
|
u32 fsz = get_fragm_size(pbuf);
|
|
|
|
u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
|
|
|
|
u32 exp_frags = get_expected_frags(pbuf) - 1;
|
2007-03-31 22:55:19 +08:00
|
|
|
skb_copy_to_linear_data_offset(pbuf, crs,
|
|
|
|
msg_data(fragm), dsz);
|
2006-01-03 02:04:38 +08:00
|
|
|
buf_discard(fbuf);
|
|
|
|
|
|
|
|
/* Is message complete? */
|
|
|
|
|
|
|
|
if (exp_frags == 0) {
|
|
|
|
if (prev)
|
|
|
|
prev->next = pbuf->next;
|
|
|
|
else
|
|
|
|
*pending = pbuf->next;
|
|
|
|
msg_reset_reroute_cnt(buf_msg(pbuf));
|
|
|
|
*fb = pbuf;
|
|
|
|
*m = buf_msg(pbuf);
|
|
|
|
return 1;
|
|
|
|
}
|
2011-01-01 02:59:32 +08:00
|
|
|
set_expected_frags(pbuf, exp_frags);
|
2006-01-03 02:04:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
buf_discard(fbuf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_check_defragm_bufs - flush stale incoming message fragments
|
|
|
|
* @l_ptr: pointer to link
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void link_check_defragm_bufs(struct link *l_ptr)
|
|
|
|
{
|
2006-03-21 14:36:47 +08:00
|
|
|
struct sk_buff *prev = NULL;
|
|
|
|
struct sk_buff *next = NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct sk_buff *buf = l_ptr->defragm_buf;
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
return;
|
|
|
|
if (!link_working_working(l_ptr))
|
|
|
|
return;
|
|
|
|
while (buf) {
|
|
|
|
u32 cnt = get_timer_cnt(buf);
|
|
|
|
|
|
|
|
next = buf->next;
|
|
|
|
if (cnt < 4) {
|
|
|
|
incr_timer_cnt(buf);
|
|
|
|
prev = buf;
|
|
|
|
} else {
|
|
|
|
if (prev)
|
|
|
|
prev->next = buf->next;
|
|
|
|
else
|
|
|
|
l_ptr->defragm_buf = buf->next;
|
|
|
|
buf_discard(buf);
|
|
|
|
}
|
|
|
|
buf = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
|
|
|
|
{
|
2011-01-19 02:24:55 +08:00
|
|
|
if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
|
|
|
|
return;
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->tolerance = tolerance;
|
|
|
|
l_ptr->continuity_interval =
|
|
|
|
((tolerance / 4) > 500) ? 500 : tolerance / 4;
|
|
|
|
l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
/* Data messages from this node, inclusive FIRST_FRAGM */
|
2008-03-07 07:06:55 +08:00
|
|
|
l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
|
|
|
|
l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
|
|
|
|
l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
|
|
|
|
l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
|
2006-01-03 02:04:38 +08:00
|
|
|
/* Transiting data messages,inclusive FIRST_FRAGM */
|
2008-03-07 07:06:55 +08:00
|
|
|
l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
|
|
|
|
l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
|
|
|
|
l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
|
|
|
|
l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->queue_limit[CONN_MANAGER] = 1200;
|
|
|
|
l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
|
|
|
|
l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
|
|
|
|
/* FRAGMENT and LAST_FRAGMENT packets */
|
|
|
|
l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_find_link - locate link by name
|
|
|
|
* @name - ptr to link name string
|
|
|
|
* @node - ptr to area to be filled with ptr to associated node
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-18 07:38:21 +08:00
|
|
|
* Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
|
2006-01-03 02:04:38 +08:00
|
|
|
* this also prevents link deletion.
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Returns pointer to link (or 0 if invalid link name).
|
|
|
|
*/
|
|
|
|
|
2008-09-03 14:38:32 +08:00
|
|
|
static struct link *link_find_link(const char *name, struct tipc_node **node)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct link_name link_name_parts;
|
2011-01-08 02:00:11 +08:00
|
|
|
struct tipc_bearer *b_ptr;
|
2007-02-09 22:25:21 +08:00
|
|
|
struct link *l_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (!link_name_validate(name, &link_name_parts))
|
2006-03-21 14:36:47 +08:00
|
|
|
return NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!b_ptr)
|
2006-03-21 14:36:47 +08:00
|
|
|
return NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
*node = tipc_node_find(link_name_parts.addr_peer);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!*node)
|
2006-03-21 14:36:47 +08:00
|
|
|
return NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
l_ptr = (*node)->links[b_ptr->identity];
|
|
|
|
if (!l_ptr || strcmp(l_ptr->name, name))
|
2006-03-21 14:36:47 +08:00
|
|
|
return NULL;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
return l_ptr;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
|
2006-01-18 07:38:21 +08:00
|
|
|
u16 cmd)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct tipc_link_config *args;
|
2007-02-09 22:25:21 +08:00
|
|
|
u32 new_value;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct link *l_ptr;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *node;
|
2007-02-09 22:25:21 +08:00
|
|
|
int res;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
|
|
|
|
new_value = ntohl(args->value);
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (!strcmp(args->name, tipc_bclink_name)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
|
2006-01-18 07:38:21 +08:00
|
|
|
(tipc_bclink_set_queue_limits(new_value) == 0))
|
|
|
|
return tipc_cfg_reply_none();
|
2007-02-09 22:25:21 +08:00
|
|
|
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
|
2006-01-18 07:38:21 +08:00
|
|
|
" (cannot change setting on broadcast link)");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr = link_find_link(args->name, &node);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr) {
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2007-02-09 22:25:21 +08:00
|
|
|
return tipc_cfg_reply_error_string("link not found");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
res = -EINVAL;
|
|
|
|
switch (cmd) {
|
2007-02-09 22:25:21 +08:00
|
|
|
case TIPC_CMD_SET_LINK_TOL:
|
|
|
|
if ((new_value >= TIPC_MIN_LINK_TOL) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
(new_value <= TIPC_MAX_LINK_TOL)) {
|
|
|
|
link_set_supervision_props(l_ptr, new_value);
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
2006-01-18 07:38:21 +08:00
|
|
|
0, 0, new_value, 0, 0);
|
2008-07-15 13:44:01 +08:00
|
|
|
res = 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
break;
|
2007-02-09 22:25:21 +08:00
|
|
|
case TIPC_CMD_SET_LINK_PRI:
|
2006-01-14 05:22:22 +08:00
|
|
|
if ((new_value >= TIPC_MIN_LINK_PRI) &&
|
|
|
|
(new_value <= TIPC_MAX_LINK_PRI)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->priority = new_value;
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_link_send_proto_msg(l_ptr, STATE_MSG,
|
2006-01-18 07:38:21 +08:00
|
|
|
0, 0, 0, new_value, 0);
|
2008-07-15 13:44:01 +08:00
|
|
|
res = 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
break;
|
2007-02-09 22:25:21 +08:00
|
|
|
case TIPC_CMD_SET_LINK_WINDOW:
|
|
|
|
if ((new_value >= TIPC_MIN_LINK_WIN) &&
|
2006-01-03 02:04:38 +08:00
|
|
|
(new_value <= TIPC_MAX_LINK_WIN)) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_link_set_queue_limits(l_ptr, new_value);
|
2008-07-15 13:44:01 +08:00
|
|
|
res = 0;
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (res)
|
2007-02-09 22:25:21 +08:00
|
|
|
return tipc_cfg_reply_error_string("cannot change link setting");
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_cfg_reply_none();
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_reset_statistics - reset link statistics
|
|
|
|
* @l_ptr: pointer to link
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void link_reset_statistics(struct link *l_ptr)
|
|
|
|
{
|
|
|
|
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
|
|
|
|
l_ptr->stats.sent_info = l_ptr->next_out_no;
|
|
|
|
l_ptr->stats.recv_info = l_ptr->next_in_no;
|
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
char *link_name;
|
2007-02-09 22:25:21 +08:00
|
|
|
struct link *l_ptr;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *node;
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
|
|
|
link_name = (char *)TLV_DATA(req_tlv_area);
|
2006-01-18 07:38:21 +08:00
|
|
|
if (!strcmp(link_name, tipc_bclink_name)) {
|
|
|
|
if (tipc_bclink_reset_stats())
|
|
|
|
return tipc_cfg_reply_error_string("link not found");
|
|
|
|
return tipc_cfg_reply_none();
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr = link_find_link(link_name, &node);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr) {
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
|
return tipc_cfg_reply_error_string("link not found");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
link_reset_statistics(l_ptr);
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
|
return tipc_cfg_reply_none();
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* percent - convert count to a percentage of total (rounding up or down)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static u32 percent(u32 count, u32 total)
|
|
|
|
{
|
|
|
|
return (count * 100 + (total / 2)) / total;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_stats - print link statistics
|
2006-01-03 02:04:38 +08:00
|
|
|
* @name: link name
|
|
|
|
* @buf: print buffer area
|
|
|
|
* @buf_size: size of print buffer area
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-03 02:04:38 +08:00
|
|
|
* Returns length of print buffer data string (or 0 if error)
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct print_buf pb;
|
2007-02-09 22:25:21 +08:00
|
|
|
struct link *l_ptr;
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *node;
|
2006-01-03 02:04:38 +08:00
|
|
|
char *status;
|
|
|
|
u32 profile_total = 0;
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (!strcmp(name, tipc_bclink_name))
|
|
|
|
return tipc_bclink_stats(buf, buf_size);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_printbuf_init(&pb, buf, buf_size);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr = link_find_link(name, &node);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!l_ptr) {
|
2006-01-18 07:38:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(node);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
if (tipc_link_is_active(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
status = "ACTIVE";
|
2006-01-18 07:38:21 +08:00
|
|
|
else if (tipc_link_is_up(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
status = "STANDBY";
|
|
|
|
else
|
|
|
|
status = "DEFUNCT";
|
|
|
|
tipc_printf(&pb, "Link <%s>\n"
|
2007-02-09 22:25:21 +08:00
|
|
|
" %s MTU:%u Priority:%u Tolerance:%u ms"
|
|
|
|
" Window:%u packets\n",
|
2010-05-11 22:30:10 +08:00
|
|
|
l_ptr->name, status, l_ptr->max_pkt,
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->next_in_no - l_ptr->stats.recv_info,
|
|
|
|
l_ptr->stats.recv_fragments,
|
|
|
|
l_ptr->stats.recv_fragmented,
|
|
|
|
l_ptr->stats.recv_bundles,
|
|
|
|
l_ptr->stats.recv_bundled);
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->next_out_no - l_ptr->stats.sent_info,
|
|
|
|
l_ptr->stats.sent_fragments,
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->stats.sent_fragmented,
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.sent_bundles,
|
|
|
|
l_ptr->stats.sent_bundled);
|
|
|
|
profile_total = l_ptr->stats.msg_length_counts;
|
|
|
|
if (!profile_total)
|
|
|
|
profile_total = 1;
|
|
|
|
tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
|
2007-02-09 22:25:21 +08:00
|
|
|
" 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
|
2011-05-31 03:36:56 +08:00
|
|
|
"-16384:%u%% -32768:%u%% -66000:%u%%\n",
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.msg_length_counts,
|
|
|
|
l_ptr->stats.msg_lengths_total / profile_total,
|
|
|
|
percent(l_ptr->stats.msg_length_profile[0], profile_total),
|
|
|
|
percent(l_ptr->stats.msg_length_profile[1], profile_total),
|
|
|
|
percent(l_ptr->stats.msg_length_profile[2], profile_total),
|
|
|
|
percent(l_ptr->stats.msg_length_profile[3], profile_total),
|
|
|
|
percent(l_ptr->stats.msg_length_profile[4], profile_total),
|
|
|
|
percent(l_ptr->stats.msg_length_profile[5], profile_total),
|
|
|
|
percent(l_ptr->stats.msg_length_profile[6], profile_total));
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.recv_states,
|
|
|
|
l_ptr->stats.recv_probes,
|
|
|
|
l_ptr->stats.recv_nacks,
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->stats.deferred_recv,
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.duplicates);
|
2007-02-09 22:25:21 +08:00
|
|
|
tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
|
|
|
|
l_ptr->stats.sent_states,
|
|
|
|
l_ptr->stats.sent_probes,
|
|
|
|
l_ptr->stats.sent_nacks,
|
|
|
|
l_ptr->stats.sent_acks,
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.retransmitted);
|
|
|
|
tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
|
|
|
|
l_ptr->stats.bearer_congs,
|
2007-02-09 22:25:21 +08:00
|
|
|
l_ptr->stats.link_congs,
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr->stats.max_queue_sz,
|
|
|
|
l_ptr->stats.queue_sz_counts
|
|
|
|
? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
|
|
|
|
: 0);
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(node);
|
|
|
|
read_unlock_bh(&tipc_net_lock);
|
|
|
|
return tipc_printbuf_validate(&pb);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_LINK_STATS_INFO 2000
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
|
|
|
struct sk_buff *buf;
|
|
|
|
struct tlv_desc *rep_tlv;
|
|
|
|
int str_len;
|
|
|
|
|
|
|
|
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
|
2006-01-18 07:38:21 +08:00
|
|
|
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
|
2006-01-03 02:04:38 +08:00
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rep_tlv = (struct tlv_desc *)buf->data;
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
|
|
|
|
(char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (!str_len) {
|
|
|
|
buf_discard(buf);
|
2007-02-09 22:25:21 +08:00
|
|
|
return tipc_cfg_reply_error_string("link not found");
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
skb_put(buf, TLV_SPACE(str_len));
|
|
|
|
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2006-01-18 07:38:21 +08:00
|
|
|
* tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
|
2006-01-03 02:04:38 +08:00
|
|
|
* @dest: network address of destination node
|
|
|
|
* @selector: used to select from set of active links
|
2007-02-09 22:25:21 +08:00
|
|
|
*
|
2006-01-03 02:04:38 +08:00
|
|
|
* If no active link can be found, uses default maximum packet size.
|
|
|
|
*/
|
|
|
|
|
2006-01-18 07:38:21 +08:00
|
|
|
u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2008-09-03 14:38:32 +08:00
|
|
|
struct tipc_node *n_ptr;
|
2006-01-03 02:04:38 +08:00
|
|
|
struct link *l_ptr;
|
|
|
|
u32 res = MAX_PKT_DEFAULT;
|
2007-02-09 22:25:21 +08:00
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (dest == tipc_own_addr)
|
|
|
|
return MAX_MSG_SIZE;
|
|
|
|
|
2007-02-09 22:25:21 +08:00
|
|
|
read_lock_bh(&tipc_net_lock);
|
2011-01-01 02:59:18 +08:00
|
|
|
n_ptr = tipc_node_find(dest);
|
2006-01-03 02:04:38 +08:00
|
|
|
if (n_ptr) {
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_lock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
l_ptr = n_ptr->active_links[selector & 1];
|
|
|
|
if (l_ptr)
|
2010-05-11 22:30:10 +08:00
|
|
|
res = l_ptr->max_pkt;
|
2006-01-18 07:38:21 +08:00
|
|
|
tipc_node_unlock(n_ptr);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
2007-02-09 22:25:21 +08:00
|
|
|
read_unlock_bh(&tipc_net_lock);
|
2006-01-03 02:04:38 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2011-01-01 02:59:27 +08:00
|
|
|
static void link_print(struct link *l_ptr, const char *str)
|
2006-01-03 02:04:38 +08:00
|
|
|
{
|
2011-01-01 02:59:27 +08:00
|
|
|
char print_area[256];
|
|
|
|
struct print_buf pb;
|
|
|
|
struct print_buf *buf = &pb;
|
|
|
|
|
|
|
|
tipc_printbuf_init(buf, print_area, sizeof(print_area));
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_printf(buf, str);
|
|
|
|
tipc_printf(buf, "Link %x<%s>:",
|
2011-01-08 02:00:11 +08:00
|
|
|
l_ptr->addr, l_ptr->b_ptr->name);
|
2011-01-01 02:59:27 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_TIPC_DEBUG
|
|
|
|
if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
|
|
|
|
goto print_state;
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
|
|
|
|
tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
|
|
|
|
tipc_printf(buf, "SQUE");
|
|
|
|
if (l_ptr->first_out) {
|
|
|
|
tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
|
|
|
|
if (l_ptr->next_out)
|
|
|
|
tipc_printf(buf, "%u..",
|
|
|
|
msg_seqno(buf_msg(l_ptr->next_out)));
|
2010-05-11 22:30:04 +08:00
|
|
|
tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
|
2007-02-09 22:25:21 +08:00
|
|
|
if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
|
|
|
|
msg_seqno(buf_msg(l_ptr->first_out)))
|
2009-11-30 08:55:45 +08:00
|
|
|
!= (l_ptr->out_queue_size - 1)) ||
|
|
|
|
(l_ptr->last_out->next != NULL)) {
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_printf(buf, "\nSend queue inconsistency\n");
|
2011-01-19 02:31:32 +08:00
|
|
|
tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
|
|
|
|
tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
|
|
|
|
tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
tipc_printf(buf, "[]");
|
|
|
|
tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
|
|
|
|
if (l_ptr->oldest_deferred_in) {
|
|
|
|
u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
|
|
|
|
u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
|
|
|
|
tipc_printf(buf, ":RQUE[%u..%u]", o, n);
|
|
|
|
if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
|
|
|
|
tipc_printf(buf, ":RQSIZ(%u)",
|
|
|
|
l_ptr->deferred_inqueue_sz);
|
|
|
|
}
|
|
|
|
}
|
2011-01-01 02:59:27 +08:00
|
|
|
print_state:
|
|
|
|
#endif
|
|
|
|
|
2006-01-03 02:04:38 +08:00
|
|
|
if (link_working_unknown(l_ptr))
|
|
|
|
tipc_printf(buf, ":WU");
|
2011-01-01 02:59:27 +08:00
|
|
|
else if (link_reset_reset(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_printf(buf, ":RR");
|
2011-01-01 02:59:27 +08:00
|
|
|
else if (link_reset_unknown(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_printf(buf, ":RU");
|
2011-01-01 02:59:27 +08:00
|
|
|
else if (link_working_working(l_ptr))
|
2006-01-03 02:04:38 +08:00
|
|
|
tipc_printf(buf, ":WW");
|
|
|
|
tipc_printf(buf, "\n");
|
2011-01-01 02:59:27 +08:00
|
|
|
|
|
|
|
tipc_printbuf_validate(buf);
|
|
|
|
info("%s", print_area);
|
2006-01-03 02:04:38 +08:00
|
|
|
}
|
|
|
|
|