caif: Use RCU and lists in cfcnfg.c for managing caif link layers

RCU lists are used for handling the link layers instead of array.
When generating CAIF phy-id, ifindex is used as base. Legal range is 1-6.
Introduced set_phy_state() for managing CAIF Link layer state.

Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
sjur.brandeland@stericsson.com 2011-05-13 02:44:01 +00:00 committed by David S. Miller
parent bd30ce4bc0
commit f362144084
1 changed files with 215 additions and 162 deletions

View File

@ -10,6 +10,7 @@
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
@ -18,11 +19,7 @@
#include <net/caif/cffrml.h>
#include <net/caif/cfserl.h>
#include <net/caif/cfsrvl.h>
#include <linux/module.h>
#include <asm/atomic.h>
#define MAX_PHY_LAYERS 7
#include <net/caif/caif_dev.h>
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
@ -30,6 +27,9 @@
* to manage physical interfaces
*/
struct cfcnfg_phyinfo {
struct list_head node;
bool up;
/* Pointer to the layer below the MUX (framing layer) */
struct cflayer *frm_layer;
/* Pointer to the lowest actual physical layer */
@ -39,9 +39,6 @@ struct cfcnfg_phyinfo {
/* Preference of the physical in interface */
enum cfcnfg_phy_preference pref;
/* Reference count, number of channels using the device */
int phy_ref_count;
/* Information about the physical device */
struct dev_info dev_info;
@ -59,8 +56,8 @@ struct cfcnfg {
struct cflayer layer;
struct cflayer *ctrl;
struct cflayer *mux;
u8 last_phyid;
struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
struct list_head phys;
struct mutex lock;
};
static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
@ -76,6 +73,9 @@ struct cfcnfg *cfcnfg_create(void)
{
struct cfcnfg *this;
struct cfctrl_rsp *resp;
might_sleep();
/* Initiate this layer */
this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
if (!this) {
@ -99,15 +99,19 @@ struct cfcnfg *cfcnfg_create(void)
resp->radioset_rsp = cfctrl_resp_func;
resp->linksetup_rsp = cfcnfg_linkup_rsp;
resp->reject_rsp = cfcnfg_reject_rsp;
this->last_phyid = 1;
INIT_LIST_HEAD(&this->phys);
cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
layer_set_dn(this->ctrl, this->mux);
layer_set_up(this->ctrl, this);
mutex_init(&this->lock);
return this;
out_of_mem:
pr_warn("Out of memory\n");
synchronize_rcu();
kfree(this->mux);
kfree(this->ctrl);
kfree(this);
@ -117,7 +121,10 @@ EXPORT_SYMBOL(cfcnfg_create);
void cfcnfg_remove(struct cfcnfg *cfg)
{
might_sleep();
if (cfg) {
synchronize_rcu();
kfree(cfg->mux);
kfree(cfg->ctrl);
kfree(cfg);
@ -128,6 +135,17 @@ static void cfctrl_resp_func(void)
{
}
static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
u8 phyid)
{
struct cfcnfg_phyinfo *phy;
list_for_each_entry_rcu(phy, &cnfg->phys, node)
if (phy->id == phyid)
return phy;
return NULL;
}
static void cfctrl_enum_resp(void)
{
}
@ -135,106 +153,65 @@ static void cfctrl_enum_resp(void)
struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
enum cfcnfg_phy_preference phy_pref)
{
u16 i;
/* Try to match with specified preference */
for (i = 1; i < MAX_PHY_LAYERS; i++) {
if (cnfg->phy_layers[i].id == i &&
cnfg->phy_layers[i].pref == phy_pref &&
cnfg->phy_layers[i].frm_layer != NULL) {
caif_assert(cnfg->phy_layers != NULL);
caif_assert(cnfg->phy_layers[i].id == i);
return &cnfg->phy_layers[i].dev_info;
}
struct cfcnfg_phyinfo *phy;
list_for_each_entry_rcu(phy, &cnfg->phys, node) {
if (phy->up && phy->pref == phy_pref &&
phy->frm_layer != NULL)
return &phy->dev_info;
}
/* Otherwise just return something */
for (i = 1; i < MAX_PHY_LAYERS; i++) {
if (cnfg->phy_layers[i].id == i) {
caif_assert(cnfg->phy_layers != NULL);
caif_assert(cnfg->phy_layers[i].id == i);
return &cnfg->phy_layers[i].dev_info;
}
}
list_for_each_entry_rcu(phy, &cnfg->phys, node)
if (phy->up)
return &phy->dev_info;
return NULL;
}
static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
u8 phyid)
{
int i;
/* Try to match with specified preference */
for (i = 0; i < MAX_PHY_LAYERS; i++)
if (cnfg->phy_layers[i].frm_layer != NULL &&
cnfg->phy_layers[i].id == phyid)
return &cnfg->phy_layers[i];
return NULL;
}
int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
{
int i;
for (i = 0; i < MAX_PHY_LAYERS; i++)
if (cnfg->phy_layers[i].frm_layer != NULL &&
cnfg->phy_layers[i].ifindex == ifi)
return i;
struct cfcnfg_phyinfo *phy;
list_for_each_entry_rcu(phy, &cnfg->phys, node)
if (phy->ifindex == ifi && phy->up)
return phy->id;
return -ENODEV;
}
int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
int cfcnfg_disconn_adapt_layer(struct cfcnfg *cfg, struct cflayer *adap_layer)
{
u8 channel_id = 0;
int ret = 0;
struct cflayer *servl = NULL;
struct cfcnfg_phyinfo *phyinfo = NULL;
u8 phyid = 0;
caif_assert(adap_layer != NULL);
channel_id = adap_layer->id;
if (adap_layer->dn == NULL || channel_id == 0) {
pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
ret = -ENOTCONN;
goto end;
}
servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
if (servl == NULL) {
pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
channel_id);
pr_err("PROTOCOL ERROR - "
"Error removing service_layer Channel_Id(%d)",
channel_id);
ret = -EINVAL;
goto end;
}
layer_set_up(servl, NULL);
ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
if (ret)
goto end;
caif_assert(channel_id == servl->id);
if (adap_layer->dn != NULL) {
phyid = cfsrvl_getphyid(adap_layer->dn);
phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
if (phyinfo == NULL) {
pr_warn("No interface to send disconnect to\n");
ret = -ENODEV;
goto end;
}
if (phyinfo->id != phyid ||
phyinfo->phy_layer->id != phyid ||
phyinfo->frm_layer->id != phyid) {
pr_err("Inconsistency in phy registration\n");
ret = -EINVAL;
goto end;
}
}
if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
phyinfo->phy_layer != NULL &&
phyinfo->phy_layer->modemcmd != NULL) {
phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
_CAIF_MODEMCMD_PHYIF_USELESS);
}
ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
end:
cfsrvl_put(servl);
cfctrl_cancel_req(cnfg->ctrl, adap_layer);
cfctrl_cancel_req(cfg->ctrl, adap_layer);
/* Do RCU sync before initiating cleanup */
synchronize_rcu();
if (adap_layer->ctrlcmd != NULL)
adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
return ret;
@ -269,39 +246,56 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
int *proto_tail)
{
struct cflayer *frml;
struct cfcnfg_phyinfo *phy;
int err;
rcu_read_lock();
phy = cfcnfg_get_phyinfo_rcu(cnfg, param->phyid);
if (!phy) {
err = -ENODEV;
goto unlock;
}
err = -EINVAL;
if (adap_layer == NULL) {
pr_err("adap_layer is zero\n");
return -EINVAL;
goto unlock;
}
if (adap_layer->receive == NULL) {
pr_err("adap_layer->receive is NULL\n");
return -EINVAL;
goto unlock;
}
if (adap_layer->ctrlcmd == NULL) {
pr_err("adap_layer->ctrlcmd == NULL\n");
return -EINVAL;
goto unlock;
}
frml = cnfg->phy_layers[param->phyid].frm_layer;
err = -ENODEV;
frml = phy->frm_layer;
if (frml == NULL) {
pr_err("Specified PHY type does not exist!\n");
return -ENODEV;
goto unlock;
}
caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
caif_assert(param->phyid == phy->id);
caif_assert(phy->frm_layer->id ==
param->phyid);
caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
caif_assert(phy->phy_layer->id ==
param->phyid);
*ifindex = cnfg->phy_layers[param->phyid].ifindex;
*proto_head =
protohead[param->linktype]+
(cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
*ifindex = phy->ifindex;
*proto_tail = 2;
*proto_head =
protohead[param->linktype] + (phy->use_stx ? 1 : 0);
rcu_read_unlock();
/* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
cfctrl_enum_req(cnfg->ctrl, param->phyid);
return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
unlock:
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
@ -315,32 +309,37 @@ static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
static void
cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
u8 phyid, struct cflayer *adapt_layer)
u8 phyid, struct cflayer *adapt_layer)
{
struct cfcnfg *cnfg = container_obj(layer);
struct cflayer *servicel = NULL;
struct cfcnfg_phyinfo *phyinfo;
struct net_device *netdev;
rcu_read_lock();
if (adapt_layer == NULL) {
pr_debug("link setup response but no client exist, send linkdown back\n");
pr_debug("link setup response but no client exist,"
"send linkdown back\n");
cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
return;
goto unlock;
}
caif_assert(cnfg != NULL);
caif_assert(phyid != 0);
phyinfo = &cnfg->phy_layers[phyid];
phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
if (phyinfo == NULL) {
pr_err("ERROR: Link Layer Device dissapeared"
"while connecting\n");
goto unlock;
}
caif_assert(phyinfo != NULL);
caif_assert(phyinfo->id == phyid);
caif_assert(phyinfo->phy_layer != NULL);
caif_assert(phyinfo->phy_layer->id == phyid);
phyinfo->phy_ref_count++;
if (phyinfo->phy_ref_count == 1 &&
phyinfo->phy_layer->modemcmd != NULL) {
phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
_CAIF_MODEMCMD_PHYIF_USEFULL);
}
adapt_layer->id = channel_id;
switch (serv) {
@ -348,7 +347,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
servicel = cfvei_create(channel_id, &phyinfo->dev_info);
break;
case CFCTRL_SRV_DATAGRAM:
servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
servicel = cfdgml_create(channel_id,
&phyinfo->dev_info);
break;
case CFCTRL_SRV_RFM:
netdev = phyinfo->dev_info.dev;
@ -365,94 +365,93 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
break;
default:
pr_err("Protocol error. Link setup response - unknown channel type\n");
return;
pr_err("Protocol error. Link setup response "
"- unknown channel type\n");
goto unlock;
}
if (!servicel) {
pr_warn("Out of memory\n");
return;
goto unlock;
}
layer_set_dn(servicel, cnfg->mux);
cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
layer_set_up(servicel, adapt_layer);
layer_set_dn(adapt_layer, servicel);
cfsrvl_get(servicel);
rcu_read_unlock();
servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
return;
unlock:
rcu_read_unlock();
}
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
struct net_device *dev, struct cflayer *phy_layer,
u16 *phyid, enum cfcnfg_phy_preference pref,
u16 *phy_id, enum cfcnfg_phy_preference pref,
bool fcs, bool stx)
{
struct cflayer *frml;
struct cflayer *phy_driver = NULL;
struct cfcnfg_phyinfo *phyinfo;
int i;
u8 phyid;
mutex_lock(&cnfg->lock);
if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
*phyid = cnfg->last_phyid;
/* range: * 1..(MAX_PHY_LAYERS-1) */
cnfg->last_phyid =
(cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
} else {
*phyid = 0;
for (i = 1; i < MAX_PHY_LAYERS; i++) {
if (cnfg->phy_layers[i].frm_layer == NULL) {
*phyid = i;
break;
}
}
}
if (*phyid == 0) {
pr_err("No Available PHY ID\n");
return;
/* CAIF protocol allow maximum 6 link-layers */
for (i = 0; i < 7; i++) {
phyid = (dev->ifindex + i) & 0x7;
if (phyid == 0)
continue;
if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
goto out;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
switch (phy_type) {
case CFPHYTYPE_FRAG:
phy_driver =
cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
if (!phy_driver) {
pr_warn("Out of memory\n");
return;
goto out;
}
break;
case CFPHYTYPE_CAIF:
phy_driver = NULL;
break;
default:
pr_err("%d\n", phy_type);
return;
break;
goto out;
}
phy_layer->id = *phyid;
cnfg->phy_layers[*phyid].pref = pref;
cnfg->phy_layers[*phyid].id = *phyid;
cnfg->phy_layers[*phyid].dev_info.id = *phyid;
cnfg->phy_layers[*phyid].dev_info.dev = dev;
cnfg->phy_layers[*phyid].phy_layer = phy_layer;
cnfg->phy_layers[*phyid].phy_ref_count = 0;
cnfg->phy_layers[*phyid].ifindex = dev->ifindex;
cnfg->phy_layers[*phyid].use_stx = stx;
cnfg->phy_layers[*phyid].use_fcs = fcs;
phy_layer->id = phyid;
phyinfo->pref = pref;
phyinfo->id = phyid;
phyinfo->dev_info.id = phyid;
phyinfo->dev_info.dev = dev;
phyinfo->phy_layer = phy_layer;
phyinfo->ifindex = dev->ifindex;
phyinfo->use_stx = stx;
phyinfo->use_fcs = fcs;
phy_layer->type = phy_type;
frml = cffrml_create(*phyid, fcs);
frml = cffrml_create(phyid, fcs);
if (!frml) {
pr_warn("Out of memory\n");
return;
kfree(phyinfo);
goto out;
}
cnfg->phy_layers[*phyid].frm_layer = frml;
cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
if (phy_driver != NULL) {
phy_driver->id = *phyid;
phy_driver->id = phyid;
layer_set_dn(frml, phy_driver);
layer_set_up(phy_driver, frml);
layer_set_dn(phy_driver, phy_layer);
@ -461,33 +460,87 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
layer_set_dn(frml, phy_layer);
layer_set_up(phy_layer, frml);
}
list_add_rcu(&phyinfo->node, &cnfg->phys);
out:
mutex_unlock(&cnfg->lock);
}
EXPORT_SYMBOL(cfcnfg_add_phy_layer);
int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
bool up)
{
struct cfcnfg_phyinfo *phyinfo;
rcu_read_lock();
phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
if (phyinfo == NULL) {
rcu_read_unlock();
return -ENODEV;
}
if (phyinfo->up == up) {
rcu_read_unlock();
return 0;
}
phyinfo->up = up;
if (up) {
cffrml_hold(phyinfo->frm_layer);
cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
phy_layer->id);
} else {
cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
cffrml_put(phyinfo->frm_layer);
}
rcu_read_unlock();
return 0;
}
EXPORT_SYMBOL(cfcnfg_set_phy_state);
int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
{
struct cflayer *frml, *frml_dn;
u16 phyid;
phyid = phy_layer->id;
caif_assert(phyid == cnfg->phy_layers[phyid].id);
caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
caif_assert(phy_layer->id == phyid);
caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
struct cfcnfg_phyinfo *phyinfo;
memset(&cnfg->phy_layers[phy_layer->id], 0,
sizeof(struct cfcnfg_phyinfo));
frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
might_sleep();
mutex_lock(&cnfg->lock);
phyid = phy_layer->id;
phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
if (phyinfo == NULL)
return 0;
caif_assert(phyid == phyinfo->id);
caif_assert(phy_layer == phyinfo->phy_layer);
caif_assert(phy_layer->id == phyid);
caif_assert(phyinfo->frm_layer->id == phyid);
list_del_rcu(&phyinfo->node);
synchronize_rcu();
frml = phyinfo->frm_layer;
frml_dn = frml->dn;
cffrml_set_uplayer(frml, NULL);
cffrml_set_dnlayer(frml, NULL);
kfree(frml);
if (phy_layer != frml_dn) {
layer_set_up(frml_dn, NULL);
layer_set_dn(frml_dn, NULL);
kfree(frml_dn);
}
layer_set_up(phy_layer, NULL);
if (phyinfo->phy_layer != frml_dn)
kfree(frml_dn);
kfree(frml);
kfree(phyinfo);
mutex_unlock(&cnfg->lock);
return 0;
}
EXPORT_SYMBOL(cfcnfg_del_phy_layer);