aboutsummaryrefslogtreecommitdiff
path: root/datapath
diff options
context:
space:
mode:
authorJesse Gross <jesse@nicira.com>2011-11-21 17:15:20 -0800
committerJesse Gross <jesse@nicira.com>2011-11-22 11:13:35 -0800
commit850b6b3b9f8c38b42e315c2c07d232a33b82da3e (patch)
treed871e18ffa1fcb45bb85610fe3e4d15a8ce52e2d /datapath
parent28da1f8f725fc2a797174df18a7b3e31ef49ede0 (diff)
datapath: Scope global symbols with ovs_ prefix.
OVS has quite a few global symbols that should be scoped with a prefix to prevent collisions with other modules in the kernel. Suggested-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Jesse Gross <jesse@nicira.com> Acked-by: Ben Pfaff <blp@nicira.com>
Diffstat (limited to 'datapath')
-rw-r--r--datapath/actions.c8
-rw-r--r--datapath/brcompat.c4
-rw-r--r--datapath/datapath.c208
-rw-r--r--datapath/datapath.h18
-rw-r--r--datapath/dp_notify.c20
-rw-r--r--datapath/dp_sysfs.h10
-rw-r--r--datapath/dp_sysfs_dp.c32
-rw-r--r--datapath/dp_sysfs_if.c8
-rw-r--r--datapath/flow.c70
-rw-r--r--datapath/flow.h58
-rw-r--r--datapath/tunnel.c90
-rw-r--r--datapath/tunnel.h47
-rw-r--r--datapath/vport-capwap.c32
-rw-r--r--datapath/vport-generic.c6
-rw-r--r--datapath/vport-generic.h6
-rw-r--r--datapath/vport-gre.c36
-rw-r--r--datapath/vport-internal_dev.c46
-rw-r--r--datapath/vport-internal_dev.h4
-rw-r--r--datapath/vport-netdev.c58
-rw-r--r--datapath/vport-netdev.h22
-rw-r--r--datapath/vport-patch.c24
-rw-r--r--datapath/vport.c82
-rw-r--r--datapath/vport.h46
23 files changed, 470 insertions, 465 deletions
diff --git a/datapath/actions.c b/datapath/actions.c
index 70fe153a..824791d1 100644
--- a/datapath/actions.c
+++ b/datapath/actions.c
@@ -254,7 +254,7 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
return -ENODEV;
}
- vport_send(vport, skb);
+ ovs_vport_send(vport, skb);
return 0;
}
@@ -283,7 +283,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
}
}
- return dp_upcall(dp, skb, &upcall);
+ return ovs_dp_upcall(dp, skb, &upcall);
}
static int sample(struct datapath *dp, struct sk_buff *skb,
@@ -426,13 +426,13 @@ static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
{
if (net_ratelimit())
pr_warn("%s: flow looped %d times, dropping\n",
- dp_name(dp), MAX_LOOPS);
+ ovs_dp_name(dp), MAX_LOOPS);
actions->actions_len = 0;
return -ELOOP;
}
/* Execute a list of actions against 'skb'. */
-int execute_actions(struct datapath *dp, struct sk_buff *skb)
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
{
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
struct loop_counter *loop;
diff --git a/datapath/brcompat.c b/datapath/brcompat.c
index a315fc5a..10a75ece 100644
--- a/datapath/brcompat.c
+++ b/datapath/brcompat.c
@@ -517,7 +517,7 @@ static int __init brc_init(void)
brioctl_set(brc_ioctl_deviceless_stub);
/* Set the openvswitch_mod device ioctl handler */
- dp_ioctl_hook = brc_dev_ioctl;
+ ovs_dp_ioctl_hook = brc_dev_ioctl;
/* Randomize the initial sequence number. This is not a security
* feature; it only helps avoid crossed wires between userspace and
@@ -548,7 +548,7 @@ error:
static void brc_cleanup(void)
{
/* Unregister ioctl hooks */
- dp_ioctl_hook = NULL;
+ ovs_dp_ioctl_hook = NULL;
brioctl_set(NULL);
genl_unregister_family(&brc_genl_family);
diff --git a/datapath/datapath.c b/datapath/datapath.c
index a69461d9..4d95e04d 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -63,8 +63,8 @@
#error Kernels before 2.6.18 or after 3.2 are not supported by this version of Open vSwitch.
#endif
-int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
-EXPORT_SYMBOL(dp_ioctl_hook);
+int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
+EXPORT_SYMBOL(ovs_dp_ioctl_hook);
/**
* DOC: Locking:
@@ -103,7 +103,7 @@ static struct datapath *get_dp(int dp_ifindex)
rcu_read_lock();
dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
if (dev) {
- struct vport *vport = internal_dev_get_vport(dev);
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
if (vport)
dp = vport->dp;
}
@@ -113,7 +113,7 @@ static struct datapath *get_dp(int dp_ifindex)
}
/* Must be called with rcu_read_lock or RTNL lock. */
-const char *dp_name(const struct datapath *dp)
+const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
return vport->ops->get_name(vport);
@@ -236,7 +236,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- flow_tbl_destroy((__force struct flow_table *)dp->table);
+ ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
}
@@ -246,7 +246,7 @@ static struct vport *new_vport(const struct vport_parms *parms)
{
struct vport *vport;
- vport = vport_add(parms);
+ vport = ovs_vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
@@ -260,12 +260,12 @@ static struct vport *new_vport(const struct vport_parms *parms)
}
/* Called with RTNL lock. */
-void dp_detach_port(struct vport *p)
+void ovs_dp_detach_port(struct vport *p)
{
ASSERT_RTNL();
if (p->port_no != OVSP_LOCAL)
- dp_sysfs_del_if(p);
+ ovs_dp_sysfs_del_if(p);
dp_ifinfo_notify(RTM_DELLINK, p);
/* First drop references to device. */
@@ -273,11 +273,11 @@ void dp_detach_port(struct vport *p)
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
/* Then destroy it. */
- vport_del(p);
+ ovs_vport_del(p);
}
/* Must be called with rcu_read_lock. */
-void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct sw_flow *flow;
@@ -292,15 +292,15 @@ void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
int key_len;
/* Extract flow from 'skb' into 'key'. */
- error = flow_extract(skb, p->port_no, &key, &key_len);
+ error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
if (unlikely(error)) {
kfree_skb(skb);
return;
}
/* Look up flow. */
- flow = flow_tbl_lookup(rcu_dereference(dp->table),
- &key, key_len);
+ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
+ &key, key_len);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -308,7 +308,7 @@ void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
upcall.key = &key;
upcall.userdata = NULL;
upcall.pid = p->upcall_pid;
- dp_upcall(dp, skb, &upcall);
+ ovs_dp_upcall(dp, skb, &upcall);
consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
@@ -318,8 +318,8 @@ void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
stats_counter = &stats->n_hit;
- flow_used(OVS_CB(skb)->flow, skb);
- execute_actions(dp, skb);
+ ovs_flow_used(OVS_CB(skb)->flow, skb);
+ ovs_execute_actions(dp, skb);
out:
/* Update datapath statistics. */
@@ -336,8 +336,8 @@ static struct genl_family dp_packet_genl_family = {
.maxattr = OVS_PACKET_ATTR_MAX
};
-int dp_upcall(struct datapath *dp, struct sk_buff *skb,
- const struct dp_upcall_info *upcall_info)
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
int dp_ifindex;
@@ -464,7 +464,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- flow_to_nlattrs(upcall_info->key, user_skb);
+ ovs_flow_to_nlattrs(upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
@@ -494,13 +494,13 @@ static int flush_flows(int dp_ifindex)
return -ENODEV;
old_table = genl_dereference(dp->table);
- new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
+ new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
rcu_assign_pointer(dp->table, new_table);
- flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_deferred_destroy(old_table);
return 0;
}
@@ -743,19 +743,19 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
packet->protocol = htons(ETH_P_802_2);
/* Build an sw_flow for sending this packet. */
- flow = flow_alloc();
+ flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
- err = flow_extract(packet, -1, &flow->key, &key_len);
+ err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
if (err)
goto err_flow_put;
- err = flow_metadata_from_nlattrs(&flow->key.phy.priority,
- &flow->key.phy.in_port,
- &flow->key.phy.tun_id,
- a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+ &flow->key.phy.in_port,
+ &flow->key.phy.tun_id,
+ a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_put;
@@ -763,9 +763,9 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_flow_put;
- flow->hash = flow_hash(&flow->key, key_len);
+ flow->hash = ovs_flow_hash(&flow->key, key_len);
- acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+ acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
if (IS_ERR(acts))
goto err_flow_put;
@@ -781,17 +781,17 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
local_bh_disable();
- err = execute_actions(dp, packet);
+ err = ovs_execute_actions(dp, packet);
local_bh_enable();
rcu_read_unlock();
- flow_put(flow);
+ ovs_flow_put(flow);
return err;
err_unlock:
rcu_read_unlock();
err_flow_put:
- flow_put(flow);
+ ovs_flow_put(flow);
err_kfree_skb:
kfree_skb(packet);
err:
@@ -817,7 +817,7 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
int i;
struct flow_table *table = genl_dereference(dp->table);
- stats->n_flows = flow_tbl_count(table);
+ stats->n_flows = ovs_flow_tbl_count(table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
@@ -852,7 +852,7 @@ static struct genl_family dp_flow_genl_family = {
.maxattr = OVS_FLOW_ATTR_MAX
};
-static struct genl_multicast_group dp_flow_multicast_group = {
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP
};
@@ -882,7 +882,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
goto nla_put_failure;
- err = flow_to_nlattrs(&flow->key, skb);
+ err = ovs_flow_to_nlattrs(&flow->key, skb);
if (err)
goto error;
nla_nest_end(skb, nla);
@@ -895,7 +895,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
spin_unlock_bh(&flow->lock);
if (used)
- NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
+ NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
if (stats.n_packets)
NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
@@ -984,7 +984,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
error = -EINVAL;
if (!a[OVS_FLOW_ATTR_KEY])
goto error;
- error = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (error)
goto error;
@@ -1004,7 +1004,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto error;
table = genl_dereference(dp->table);
- flow = flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow) {
struct sw_flow_actions *acts;
@@ -1014,19 +1014,19 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto error;
/* Expand table, if necessary, to make room. */
- if (flow_tbl_need_to_expand(table)) {
+ if (ovs_flow_tbl_need_to_expand(table)) {
struct flow_table *new_table;
- new_table = flow_tbl_expand(table);
+ new_table = ovs_flow_tbl_expand(table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- flow_tbl_deferred_destroy(table);
+ ovs_flow_tbl_deferred_destroy(table);
table = genl_dereference(dp->table);
}
}
/* Allocate flow. */
- flow = flow_alloc();
+ flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
goto error;
@@ -1035,15 +1035,15 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
clear_stats(flow);
/* Obtain actions. */
- acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+ acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
error = PTR_ERR(acts);
if (IS_ERR(acts))
goto error_free_flow;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- flow->hash = flow_hash(&key, key_len);
- flow_tbl_insert(table, flow);
+ flow->hash = ovs_flow_hash(&key, key_len);
+ ovs_flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
info->snd_seq,
@@ -1074,13 +1074,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
old_acts->actions_len))) {
struct sw_flow_actions *new_acts;
- new_acts = flow_actions_alloc(acts_attrs);
+ new_acts = ovs_flow_actions_alloc(acts_attrs);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
rcu_assign_pointer(flow->sf_acts, new_acts);
- flow_deferred_free_acts(old_acts);
+ ovs_flow_deferred_free_acts(old_acts);
}
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
@@ -1096,14 +1096,16 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (!IS_ERR(reply))
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_flow_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
else
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_flow_multicast_group.id, PTR_ERR(reply));
+ ovs_dp_flow_multicast_group.id,
+ PTR_ERR(reply));
return 0;
error_free_flow:
- flow_put(flow);
+ ovs_flow_put(flow);
error:
return error;
}
@@ -1122,7 +1124,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
if (!a[OVS_FLOW_ATTR_KEY])
return -EINVAL;
- err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
return err;
@@ -1131,7 +1133,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
return -ENODEV;
table = genl_dereference(dp->table);
- flow = flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow)
return -ENOENT;
@@ -1157,7 +1159,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (!a[OVS_FLOW_ATTR_KEY])
return flush_flows(ovs_header->dp_ifindex);
- err = flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
if (err)
return err;
@@ -1166,7 +1168,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
return -ENODEV;
table = genl_dereference(dp->table);
- flow = flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
if (!flow)
return -ENOENT;
@@ -1174,16 +1176,16 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (!reply)
return -ENOMEM;
- flow_tbl_remove(table, flow);
+ ovs_flow_tbl_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
BUG_ON(err < 0);
- flow_deferred_free(flow);
+ ovs_flow_deferred_free(flow);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
return 0;
}
@@ -1205,7 +1207,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
bucket = cb->args[0];
obj = cb->args[1];
- flow = flow_tbl_next(table, &bucket, &obj);
+ flow = ovs_flow_tbl_next(table, &bucket, &obj);
if (!flow)
break;
@@ -1260,7 +1262,7 @@ static struct genl_family dp_datapath_genl_family = {
.maxattr = OVS_DP_ATTR_MAX
};
-static struct genl_multicast_group dp_datapath_multicast_group = {
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
.name = OVS_DATAPATH_MCGROUP
};
@@ -1279,7 +1281,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
ovs_header->dp_ifindex = get_dpifindex(dp);
rcu_read_lock();
- err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
+ err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
rcu_read_unlock();
if (err)
goto nla_put_failure;
@@ -1330,7 +1332,7 @@ static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
struct vport *vport;
rcu_read_lock();
- vport = vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+ vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
rcu_read_unlock();
}
@@ -1372,7 +1374,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
+ rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
@@ -1406,20 +1408,21 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_local_port;
list_add_tail(&dp->list_node, &dps);
- dp_sysfs_add_dp(dp);
+ ovs_dp_sysfs_add_dp(dp);
rtnl_unlock();
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
return 0;
err_destroy_local_port:
- dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- flow_tbl_destroy(genl_dereference(dp->table));
+ ovs_flow_tbl_destroy(genl_dereference(dp->table));
err_free_dp:
kfree(dp);
err_put_module:
@@ -1455,11 +1458,11 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
if (vport->port_no != OVSP_LOCAL)
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
- dp_sysfs_del_dp(dp);
+ ovs_dp_sysfs_del_dp(dp);
list_del(&dp->list_node);
- dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
/* rtnl_unlock() will wait until all the references to devices that
* are pending unregistration have been dropped. We do it here to
@@ -1472,7 +1475,8 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
module_put(THIS_MODULE);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
return 0;
@@ -1501,12 +1505,14 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_datapath_multicast_group.id, err);
+ ovs_dp_datapath_multicast_group.id, err);
return 0;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+
return 0;
}
@@ -1600,7 +1606,7 @@ static struct genl_family dp_vport_genl_family = {
.maxattr = OVS_VPORT_ATTR_MAX
};
-struct genl_multicast_group dp_vport_multicast_group = {
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
.name = OVS_VPORT_MCGROUP
};
@@ -1624,14 +1630,14 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
- vport_get_stats(vport, &vport_stats);
+ ovs_vport_get_stats(vport, &vport_stats);
NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
&vport_stats);
NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
vport->ops->get_addr(vport));
- err = vport_get_options(vport, skb);
+ err = ovs_vport_get_options(vport, skb);
if (err == -EMSGSIZE)
goto error;
@@ -1676,7 +1682,7 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
struct vport *vport;
if (a[OVS_VPORT_ATTR_NAME]) {
- vport = vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+ vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
return vport;
@@ -1705,10 +1711,10 @@ static int change_vport(struct vport *vport,
int err = 0;
if (a[OVS_VPORT_ATTR_STATS])
- vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
+ ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
if (a[OVS_VPORT_ATTR_ADDRESS])
- err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
+ err = ovs_vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
return err;
}
@@ -1774,7 +1780,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(vport))
goto exit_unlock;
- dp_sysfs_add_if(vport);
+ ovs_dp_sysfs_add_if(vport);
err = change_vport(vport, a);
if (!err) {
@@ -1785,11 +1791,11 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
err = PTR_ERR(reply);
}
if (err) {
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
@@ -1821,7 +1827,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
- err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+ err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (!err)
err = change_vport(vport, a);
if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
@@ -1832,12 +1838,12 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_vport_multicast_group.id, err);
+ ovs_dp_vport_multicast_group.id, err);
return 0;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
@@ -1873,10 +1879,10 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(reply))
goto exit_unlock;
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
exit_unlock:
rtnl_unlock();
@@ -1984,13 +1990,13 @@ struct genl_family_and_ops {
static const struct genl_family_and_ops dp_genl_families[] = {
{ &dp_datapath_genl_family,
dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
- &dp_datapath_multicast_group },
+ &ovs_dp_datapath_multicast_group },
{ &dp_vport_genl_family,
dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
- &dp_vport_multicast_group },
+ &ovs_dp_vport_multicast_group },
{ &dp_flow_genl_family,
dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
- &dp_flow_multicast_group },
+ &ovs_dp_flow_multicast_group },
{ &dp_packet_genl_family,
dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
NULL },
@@ -2044,19 +2050,19 @@ static int __init dp_init(void)
pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
VERSION BUILDNR);
- err = tnl_init();
+ err = ovs_tnl_init();
if (err)
goto error;
- err = flow_init();
+ err = ovs_flow_init();
if (err)
goto error_tnl_exit;
- err = vport_init();
+ err = ovs_vport_init();
if (err)
goto error_flow_exit;
- err = register_netdevice_notifier(&dp_device_notifier);
+ err = register_netdevice_notifier(&ovs_dp_device_notifier);
if (err)
goto error_vport_exit;
@@ -2067,13 +2073,13 @@ static int __init dp_init(void)
return 0;
error_unreg_notifier:
- unregister_netdevice_notifier(&dp_device_notifier);
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
error_vport_exit:
- vport_exit();
+ ovs_vport_exit();
error_flow_exit:
- flow_exit();
+ ovs_flow_exit();
error_tnl_exit:
- tnl_exit();
+ ovs_tnl_exit();
error:
return err;
}
@@ -2082,10 +2088,10 @@ static void dp_cleanup(void)
{
rcu_barrier();
dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
- unregister_netdevice_notifier(&dp_device_notifier);
- vport_exit();
- flow_exit();
- tnl_exit();
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
+ ovs_vport_exit();
+ ovs_flow_exit();
+ ovs_tnl_exit();
}
module_init(dp_init);
diff --git a/datapath/datapath.h b/datapath/datapath.h
index 85842215..27151b9c 100644
--- a/datapath/datapath.h
+++ b/datapath/datapath.h
@@ -131,18 +131,18 @@ struct dp_upcall_info {
u32 pid;
};
-extern struct notifier_block dp_device_notifier;
-extern struct genl_multicast_group dp_vport_multicast_group;
-extern int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
+extern struct notifier_block ovs_dp_device_notifier;
+extern struct genl_multicast_group ovs_dp_vport_multicast_group;
+extern int (*ovs_dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
-void dp_process_received_packet(struct vport *, struct sk_buff *);
-void dp_detach_port(struct vport *);
-int dp_upcall(struct datapath *, struct sk_buff *,
- const struct dp_upcall_info *);
+void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_detach_port(struct vport *);
+int ovs_dp_upcall(struct datapath *, struct sk_buff *,
+ const struct dp_upcall_info *);
-const char *dp_name(const struct datapath *dp);
+const char *ovs_dp_name(const struct datapath *dp);
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd);
-int execute_actions(struct datapath *dp, struct sk_buff *skb);
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
#endif /* datapath.h */
diff --git a/datapath/dp_notify.c b/datapath/dp_notify.c
index 5d41d27e..d040d46e 100644
--- a/datapath/dp_notify.c
+++ b/datapath/dp_notify.c
@@ -29,38 +29,38 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
struct net_device *dev = ptr;
struct vport *vport;
- if (is_internal_dev(dev))
- vport = internal_dev_get_vport(dev);
+ if (ovs_is_internal_dev(dev))
+ vport = ovs_internal_dev_get_vport(dev);
else
- vport = netdev_get_vport(dev);
+ vport = ovs_netdev_get_vport(dev);
if (!vport)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
- if (!is_internal_dev(dev)) {
+ if (!ovs_is_internal_dev(dev)) {
struct sk_buff *notify;
notify = ovs_vport_cmd_build_info(vport, 0, 0,
OVS_VPORT_CMD_DEL);
- dp_detach_port(vport);
+ ovs_dp_detach_port(vport);
if (IS_ERR(notify)) {
netlink_set_err(INIT_NET_GENL_SOCK, 0,
- dp_vport_multicast_group.id,
+ ovs_dp_vport_multicast_group.id,
PTR_ERR(notify));
break;
}
- genlmsg_multicast(notify, 0, dp_vport_multicast_group.id,
+ genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
GFP_KERNEL);
}
break;
case NETDEV_CHANGENAME:
if (vport->port_no != OVSP_LOCAL) {
- dp_sysfs_del_if(vport);
- dp_sysfs_add_if(vport);
+ ovs_dp_sysfs_del_if(vport);
+ ovs_dp_sysfs_add_if(vport);
}
break;
}
@@ -68,6 +68,6 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
return NOTIFY_DONE;
}
-struct notifier_block dp_device_notifier = {
+struct notifier_block ovs_dp_device_notifier = {
.notifier_call = dp_device_event
};
diff --git a/datapath/dp_sysfs.h b/datapath/dp_sysfs.h
index 8a1013b9..c9812855 100644
--- a/datapath/dp_sysfs.h
+++ b/datapath/dp_sysfs.h
@@ -23,15 +23,15 @@ struct datapath;
struct vport;
/* dp_sysfs_dp.c */
-int dp_sysfs_add_dp(struct datapath *dp);
-int dp_sysfs_del_dp(struct datapath *dp);
+int ovs_dp_sysfs_add_dp(struct datapath *dp);
+int ovs_dp_sysfs_del_dp(struct datapath *dp);
/* dp_sysfs_if.c */
-int dp_sysfs_add_if(struct vport *p);
-int dp_sysfs_del_if(struct vport *p);
+int ovs_dp_sysfs_add_if(struct vport *p);
+int ovs_dp_sysfs_del_if(struct vport *p);
#ifdef CONFIG_SYSFS
-extern struct sysfs_ops brport_sysfs_ops;
+extern struct sysfs_ops ovs_brport_sysfs_ops;
#endif
#endif /* dp_sysfs.h */
diff --git a/datapath/dp_sysfs_dp.c b/datapath/dp_sysfs_dp.c
index a83f5f7f..1574a93f 100644
--- a/datapath/dp_sysfs_dp.c
+++ b/datapath/dp_sysfs_dp.c
@@ -55,7 +55,7 @@
static struct datapath *sysfs_get_dp(struct net_device *netdev)
{
- struct vport *vport = internal_dev_get_vport(netdev);
+ struct vport *vport = ovs_internal_dev_get_vport(netdev);
return vport ? vport->dp : NULL;
}
/*
@@ -88,7 +88,7 @@ static ssize_t store_bridge_parm(DEVICE_PARAMS,
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
pr_warning("%s: xxx writing dp parms not supported yet!\n",
- dp_name(dp));
+ ovs_dp_name(dp));
else
result = -ENODEV;
@@ -106,7 +106,7 @@ static ssize_t show_forward_delay(DEVICE_PARAMS, char *buf)
static void set_forward_delay(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_forward_delay()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_forward_delay()\n", ovs_dp_name(dp));
}
static ssize_t store_forward_delay(DEVICE_PARAMS,
@@ -124,7 +124,7 @@ static ssize_t show_hello_time(DEVICE_PARAMS, char *buf)
static void set_hello_time(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_hello_time()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_hello_time()\n", ovs_dp_name(dp));
}
static ssize_t store_hello_time(DEVICE_PARAMS,
@@ -143,7 +143,7 @@ static ssize_t show_max_age(DEVICE_PARAMS, char *buf)
static void set_max_age(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_max_age()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_max_age()\n", ovs_dp_name(dp));
}
static ssize_t store_max_age(DEVICE_PARAMS,
@@ -160,7 +160,7 @@ static ssize_t show_ageing_time(DEVICE_PARAMS, char *buf)
static void set_ageing_time(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_ageing_time()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_ageing_time()\n", ovs_dp_name(dp));
}
static ssize_t store_ageing_time(DEVICE_PARAMS,
@@ -188,7 +188,7 @@ static ssize_t store_stp_state(DEVICE_PARAMS,
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
- pr_info("%s: xxx attempt to set_stp_state()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_stp_state()\n", ovs_dp_name(dp));
else
result = -ENODEV;
@@ -206,7 +206,7 @@ static ssize_t show_priority(DEVICE_PARAMS, char *buf)
static void set_priority(struct datapath *dp, unsigned long val)
{
- pr_info("%s: xxx attempt to set_priority()\n", dp_name(dp));
+ pr_info("%s: xxx attempt to set_priority()\n", ovs_dp_name(dp));
}
static ssize_t store_priority(DEVICE_PARAMS,
@@ -229,7 +229,7 @@ static ssize_t show_bridge_id(DEVICE_PARAMS, char *buf)
rcu_read_lock();
- vport = internal_dev_get_vport(to_net_dev(d));
+ vport = ovs_internal_dev_get_vport(to_net_dev(d));
if (vport) {
const unsigned char *addr;
@@ -312,7 +312,7 @@ static ssize_t store_group_addr(DEVICE_PARAMS,
dp = sysfs_get_dp(to_net_dev(d));
if (dp)
pr_info("%s: xxx attempt to store_group_addr()\n",
- dp_name(dp));
+ ovs_dp_name(dp));
else
result = -ENODEV;
@@ -360,7 +360,7 @@ static struct attribute_group bridge_group = {
* to hold links. The ifobj exists in the same data structure
* as its parent the bridge so reference counting works.
*/
-int dp_sysfs_add_dp(struct datapath *dp)
+int ovs_dp_sysfs_add_dp(struct datapath *dp)
{
struct vport *vport = rtnl_dereference(dp->ports[OVSP_LOCAL]);
struct kobject *kobj = vport->ops->get_kobj(vport);
@@ -370,7 +370,7 @@ int dp_sysfs_add_dp(struct datapath *dp)
err = sysfs_create_group(kobj, &bridge_group);
if (err) {
pr_info("%s: can't create group %s/%s\n",
- __func__, dp_name(dp), bridge_group.name);
+ __func__, ovs_dp_name(dp), bridge_group.name);
goto out1;
}
@@ -378,7 +378,7 @@ int dp_sysfs_add_dp(struct datapath *dp)
err = kobject_add(&dp->ifobj, kobj, SYSFS_BRIDGE_PORT_SUBDIR);
if (err) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
- __func__, dp_name(dp), kobject_name(&dp->ifobj));
+ __func__, ovs_dp_name(dp), kobject_name(&dp->ifobj));
goto out2;
}
kobject_uevent(&dp->ifobj, KOBJ_ADD);
@@ -390,7 +390,7 @@ int dp_sysfs_add_dp(struct datapath *dp)
return err;
}
-int dp_sysfs_del_dp(struct datapath *dp)
+int ovs_dp_sysfs_del_dp(struct datapath *dp)
{
struct vport *vport = rtnl_dereference(dp->ports[OVSP_LOCAL]);
struct kobject *kobj = vport->ops->get_kobj(vport);
@@ -401,8 +401,8 @@ int dp_sysfs_del_dp(struct datapath *dp)
return 0;
}
#else /* !CONFIG_SYSFS */
-int dp_sysfs_add_dp(struct datapath *dp) { return 0; }
-int dp_sysfs_del_dp(struct datapath *dp) { return 0; }
+int ovs_dp_sysfs_add_dp(struct datapath *dp) { return 0; }
+int ovs_dp_sysfs_del_dp(struct datapath *dp) { return 0; }
int dp_sysfs_add_if(struct vport *p) { return 0; }
int dp_sysfs_del_if(struct vport *p) { return 0; }
#endif /* !CONFIG_SYSFS */
diff --git a/datapath/dp_sysfs_if.c b/datapath/dp_sysfs_if.c
index d35821b6..5b695cfc 100644
--- a/datapath/dp_sysfs_if.c
+++ b/datapath/dp_sysfs_if.c
@@ -191,12 +191,12 @@ static ssize_t brport_store(struct kobject *kobj,
return -EPERM;
pr_warning("%s: xxx writing port parms not supported yet!\n",
- dp_name(p->dp));
+ ovs_dp_name(p->dp));
return ret;
}
-struct sysfs_ops brport_sysfs_ops = {
+struct sysfs_ops ovs_brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
@@ -206,7 +206,7 @@ struct sysfs_ops brport_sysfs_ops = {
* Creates a brport subdirectory with bridge attributes.
* Puts symlink in bridge's brport subdirectory
*/
-int dp_sysfs_add_if(struct vport *p)
+int ovs_dp_sysfs_add_if(struct vport *p)
{
struct datapath *dp = p->dp;
struct vport *local_port = rtnl_dereference(dp->ports[OVSP_LOCAL]);
@@ -254,7 +254,7 @@ err:
return err;
}
-int dp_sysfs_del_if(struct vport *p)
+int ovs_dp_sysfs_del_if(struct vport *p)
{
if (p->linkname[0]) {
sysfs_remove_link(&p->dp->ifobj, p->linkname);
diff --git a/datapath/flow.c b/datapath/flow.c
index 644a377f..78f737ab 100644
--- a/datapath/flow.c
+++ b/datapath/flow.c
@@ -111,7 +111,7 @@ static bool icmphdr_ok(struct sk_buff *skb)
sizeof(struct icmphdr));
}
-u64 flow_used_time(unsigned long flow_jiffies)
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
struct timespec cur_ts;
u64 cur_ms, idle_ms;
@@ -234,7 +234,7 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
#define TCP_FLAGS_OFFSET 13
#define TCP_FLAG_MASK 0x3f
-void flow_used(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
u8 tcp_flags = 0;
@@ -252,7 +252,7 @@ void flow_used(struct sw_flow *flow, struct sk_buff *skb)
spin_unlock(&flow->lock);
}
-struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
{
int actions_len = nla_len(actions);
struct sw_flow_actions *sfa;
@@ -272,7 +272,7 @@ struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
return sfa;
}
-struct sw_flow *flow_alloc(void)
+struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
@@ -322,7 +322,7 @@ static void free_buckets(struct flex_array *buckets)
flex_array_free(buckets);
}
-struct flow_table *flow_tbl_alloc(int new_size)
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
@@ -344,10 +344,10 @@ struct flow_table *flow_tbl_alloc(int new_size)
static void flow_free(struct sw_flow *flow)
{
flow->dead = true;
- flow_put(flow);
+ ovs_flow_put(flow);
}
-void flow_tbl_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table)
{
int i;
@@ -373,10 +373,10 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
- flow_tbl_destroy(table);
+ ovs_flow_tbl_destroy(table);
}
-void flow_tbl_deferred_destroy(struct flow_table *table)
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
{
if (!table)
return;
@@ -384,7 +384,7 @@ void flow_tbl_deferred_destroy(struct flow_table *table)
call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
}
-struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
@@ -409,13 +409,13 @@ struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
return NULL;
}
-struct flow_table *flow_tbl_expand(struct flow_table *table)
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
{
struct flow_table *new_table;
int n_buckets = table->n_buckets * 2;
int i;
- new_table = flow_tbl_alloc(n_buckets);
+ new_table = ovs_flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
@@ -428,35 +428,35 @@ struct flow_table *flow_tbl_expand(struct flow_table *table)
hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
hlist_del_init_rcu(&flow->hash_node);
- flow_tbl_insert(new_table, flow);
+ ovs_flow_tbl_insert(new_table, flow);
}
}
return new_table;
}
-/* RCU callback used by flow_deferred_free. */
+/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
flow->dead = true;
- flow_put(flow);
+ ovs_flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void flow_deferred_free(struct sw_flow *flow)
+void ovs_flow_deferred_free(struct sw_flow *flow)
{
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
-void flow_hold(struct sw_flow *flow)
+void ovs_flow_hold(struct sw_flow *flow)
{
atomic_inc(&flow->refcnt);
}
-void flow_put(struct sw_flow *flow)
+void ovs_flow_put(struct sw_flow *flow)
{
if (unlikely(!flow))
return;
@@ -467,7 +467,7 @@ void flow_put(struct sw_flow *flow)
}
}
-/* RCU callback used by flow_deferred_free_acts. */
+/* RCU callback used by ovs_flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
struct sw_flow_actions *sf_acts = container_of(rcu,
@@ -477,7 +477,7 @@ static void rcu_free_acts_callback(struct rcu_head *rcu)
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
{
call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
}
@@ -621,7 +621,7 @@ out:
}
/**
- * flow_extract - extracts a flow key from an Ethernet frame.
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* Ethernet header
* @in_port: port number on which @skb was received.
@@ -644,7 +644,7 @@ out:
* of a correct length, otherwise the same as skb->network_header.
* For other key->dl_type values it is left untouched.
*/
-int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
int *key_lenp)
{
int error = 0;
@@ -811,12 +811,12 @@ out:
return error;
}
-u32 flow_hash(const struct sw_flow_key *key, int key_len)
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
{
return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
}
-struct sw_flow *flow_tbl_lookup(struct flow_table *table,
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
struct sw_flow_key *key, int key_len)
{
struct sw_flow *flow;
@@ -824,7 +824,7 @@ struct sw_flow *flow_tbl_lookup(struct flow_table *table,
struct hlist_head *head;
u32 hash;
- hash = flow_hash(key, key_len);
+ hash = ovs_flow_hash(key, key_len);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, n, head, hash_node) {
@@ -837,7 +837,7 @@ struct sw_flow *flow_tbl_lookup(struct flow_table *table,
return NULL;
}
-void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
@@ -846,7 +846,7 @@ void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
table->count++;
}
-void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
if (!hlist_unhashed(&flow->hash_node)) {
hlist_del_init_rcu(&flow->hash_node);
@@ -1012,13 +1012,13 @@ static int parse_flow_nlattrs(const struct nlattr *attr,
}
/**
- * flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
* @swkey: receives the extracted flow key.
* @key_lenp: number of bytes used in @swkey.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*/
-int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *attr)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
@@ -1178,7 +1178,7 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
}
/**
- * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
* @in_port: receives the extracted input port.
* @tun_id: receives the extracted tunnel ID.
* @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
@@ -1189,8 +1189,8 @@ int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
-int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
- const struct nlattr *attr)
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
+ const struct nlattr *attr)
{
const struct nlattr *nla;
int rem;
@@ -1228,7 +1228,7 @@ int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
return 0;
}
-int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
@@ -1390,7 +1390,7 @@ nla_put_failure:
/* Initializes the flow module.
* Returns zero if successful or a negative error code. */
-int flow_init(void)
+int ovs_flow_init(void)
{
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
@@ -1403,7 +1403,7 @@ int flow_init(void)
}
/* Uninitializes the flow module. */
-void flow_exit(void)
+void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_cache);
}
diff --git a/datapath/flow.h b/datapath/flow.h
index 931d5d7e..36e738d6 100644
--- a/datapath/flow.h
+++ b/datapath/flow.h
@@ -126,22 +126,22 @@ struct arp_eth_header {
unsigned char ar_tip[4]; /* target IP address */
} __packed;
-int flow_init(void);
-void flow_exit(void);
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
-struct sw_flow *flow_alloc(void);
-void flow_deferred_free(struct sw_flow *);
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_deferred_free(struct sw_flow *);
-struct sw_flow_actions *flow_actions_alloc(const struct nlattr *);
-void flow_deferred_free_acts(struct sw_flow_actions *);
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-void flow_hold(struct sw_flow *);
-void flow_put(struct sw_flow *);
+void ovs_flow_hold(struct sw_flow *);
+void ovs_flow_put(struct sw_flow *);
-int flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
- int *key_lenp);
-void flow_used(struct sw_flow *, struct sk_buff *);
-u64 flow_used_time(unsigned long flow_jiffies);
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
+ int *key_lenp);
+void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+u64 ovs_flow_used_time(unsigned long flow_jiffies);
/* Upper bound on the length of a nlattr-formatted flow key. The longest
* nlattr-formatted flow key would be:
@@ -162,11 +162,11 @@ u64 flow_used_time(unsigned long flow_jiffies);
*/
#define FLOW_BUFSIZE 144
-int flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
-int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *);
-int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
- const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
+ const struct nlattr *);
#define TBL_MIN_BUCKETS 1024
@@ -176,27 +176,27 @@ struct flow_table {
struct rcu_head rcu;
};
-static inline int flow_tbl_count(struct flow_table *table)
+static inline int ovs_flow_tbl_count(struct flow_table *table)
{
return table->count;
}
-static inline int flow_tbl_need_to_expand(struct flow_table *table)
+static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
{
return (table->count > table->n_buckets);
}
-struct sw_flow *flow_tbl_lookup(struct flow_table *table,
- struct sw_flow_key *key, int len);
-void flow_tbl_destroy(struct flow_table *table);
-void flow_tbl_deferred_destroy(struct flow_table *table);
-struct flow_table *flow_tbl_alloc(int new_size);
-struct flow_table *flow_tbl_expand(struct flow_table *table);
-void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
-void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
-u32 flow_hash(const struct sw_flow_key *key, int key_len);
-
-struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+ struct sw_flow_key *key, int len);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_alloc(int new_size);
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
#endif /* flow.h */
diff --git a/datapath/tunnel.c b/datapath/tunnel.c
index 8bd0ee9c..41907b95 100644
--- a/datapath/tunnel.c
+++ b/datapath/tunnel.c
@@ -143,7 +143,7 @@ static void free_cache(struct tnl_cache *cache)
if (!cache)
return;
- flow_put(cache->flow);
+ ovs_flow_put(cache->flow);
ip_rt_put(cache->rt);
kfree(cache);
}
@@ -299,9 +299,9 @@ static struct vport *port_table_lookup(struct port_lookup_key *key,
return NULL;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
- int tunnel_type,
- const struct tnl_mutable_config **mutable)
+struct vport *ovs_tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
+ int tunnel_type,
+ const struct tnl_mutable_config **mutable)
{
struct port_lookup_key lookup;
struct vport *vport;
@@ -399,7 +399,7 @@ static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
}
/**
- * tnl_rcv - ingress point for generic tunnel code
+ * ovs_tnl_rcv - ingress point for generic tunnel code
*
* @vport: port this packet was received on
* @skb: received packet
@@ -413,7 +413,7 @@ static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
* - skb->csum does not include the inner Ethernet header.
* - The layer pointers are undefined.
*/
-void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
+void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
{
struct ethhdr *eh;
@@ -438,7 +438,7 @@ void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
return;
}
- vport_receive(vport, skb);
+ ovs_vport_receive(vport, skb);
}
static bool check_ipv4_address(__be32 addr)
@@ -607,9 +607,9 @@ static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
}
#endif /* IPv6 */
-bool tnl_frag_needed(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
+bool ovs_tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
@@ -709,7 +709,7 @@ bool tnl_frag_needed(struct vport *vport,
return false;
}
- vport_receive(vport, nskb);
+ ovs_vport_receive(vport, nskb);
return true;
}
@@ -756,8 +756,8 @@ static bool check_mtu(struct sk_buff *skb,
mtu = max(mtu, IP_MIN_MTU);
if (packet_length > mtu &&
- tnl_frag_needed(vport, mutable, skb, mtu,
- OVS_CB(skb)->tun_id))
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu,
+ OVS_CB(skb)->tun_id))
return false;
}
}
@@ -773,8 +773,8 @@ static bool check_mtu(struct sk_buff *skb,
mtu = max(mtu, IPV6_MIN_MTU);
if (packet_length > mtu &&
- tnl_frag_needed(vport, mutable, skb, mtu,
- OVS_CB(skb)->tun_id))
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu,
+ OVS_CB(skb)->tun_id))
return false;
}
}
@@ -830,7 +830,7 @@ static bool check_cache_valid(const struct tnl_cache *cache,
hh->hh_lock.sequence == cache->hh_seq &&
#endif
mutable->seq == cache->mutable_seq &&
- (!is_internal_dev(rt_dst(cache->rt).dev) ||
+ (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
(cache->flow && !cache->flow->dead));
}
@@ -945,7 +945,7 @@ static struct tnl_cache *build_cache(struct vport *vport,
cache->expiration = jiffies + tnl_vport->cache_exp_interval;
#endif
- if (is_internal_dev(rt_dst(rt).dev)) {
+ if (ovs_is_internal_dev(rt_dst(rt).dev)) {
struct sw_flow_key flow_key;
struct vport *dst_vport;
struct sk_buff *skb;
@@ -953,7 +953,7 @@ static struct tnl_cache *build_cache(struct vport *vport,
int flow_key_len;
struct sw_flow *flow;
- dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
if (!dst_vport)
goto done;
@@ -964,18 +964,18 @@ static struct tnl_cache *build_cache(struct vport *vport,
__skb_put(skb, cache->len);
memcpy(skb->data, get_cached_header(cache), cache->len);
- err = flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len);
+ err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
+ &flow_key_len);
consume_skb(skb);
if (err)
goto done;
- flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len);
+ flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
+ &flow_key, flow_key_len);
if (flow) {
cache->flow = flow;
- flow_hold(flow);
+ ovs_flow_hold(flow);
}
}
@@ -1151,11 +1151,11 @@ free_frags:
* dropped so just free the rest. This may help improve the congestion
* that caused the first packet to be dropped.
*/
- tnl_free_linked_skbs(skb);
+ ovs_tnl_free_linked_skbs(skb);
return sent_len;
}
-int tnl_send(struct vport *vport, struct sk_buff *skb)
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
@@ -1302,7 +1302,7 @@ int tnl_send(struct vport *vport, struct sk_buff *skb)
int orig_len = skb->len - cache->len;
struct vport *cache_vport;
- cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ cache_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
iph->tot_len = htons(skb->len - skb_network_offset(skb));
@@ -1315,7 +1315,7 @@ int tnl_send(struct vport *vport, struct sk_buff *skb)
}
OVS_CB(skb)->flow = cache->flow;
- vport_receive(cache_vport, skb);
+ ovs_vport_receive(cache_vport, skb);
sent_len += orig_len;
} else {
int xmit_err;
@@ -1334,14 +1334,14 @@ next:
}
if (unlikely(sent_len == 0))
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
goto out;
error_free:
- tnl_free_linked_skbs(skb);
+ ovs_tnl_free_linked_skbs(skb);
error:
- vport_record_error(vport, err);
+ ovs_vport_record_error(vport, err);
out:
dst_release(unattached_dst);
return sent_len;
@@ -1439,9 +1439,9 @@ static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
return 0;
}
-struct vport *tnl_create(const struct vport_parms *parms,
- const struct vport_ops *vport_ops,
- const struct tnl_ops *tnl_ops)
+struct vport *ovs_tnl_create(const struct vport_parms *parms,
+ const struct vport_ops *vport_ops,
+ const struct tnl_ops *tnl_ops)
{
struct vport *vport;
struct tnl_vport *tnl_vport;
@@ -1449,7 +1449,7 @@ struct vport *tnl_create(const struct vport_parms *parms,
int initial_frag_id;
int err;
- vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
@@ -1491,12 +1491,12 @@ error_free_mutable:
free_mutable_rtnl(mutable);
kfree(mutable);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
-int tnl_set_options(struct vport *vport, struct nlattr *options)
+int ovs_tnl_set_options(struct vport *vport, struct nlattr *options)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *old_mutable;
@@ -1533,7 +1533,7 @@ error:
return err;
}
-int tnl_get_options(const struct vport *vport, struct sk_buff *skb)
+int ovs_tnl_get_options(const struct vport *vport, struct sk_buff *skb)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
const struct tnl_mutable_config *mutable = rcu_dereference_rtnl(tnl_vport->mutable);
@@ -1565,10 +1565,10 @@ static void free_port_rcu(struct rcu_head *rcu)
free_cache((struct tnl_cache __force *)tnl_vport->cache);
kfree((struct tnl_mutable __force *)tnl_vport->mutable);
- vport_free(tnl_vport_to_vport(tnl_vport));
+ ovs_vport_free(tnl_vport_to_vport(tnl_vport));
}
-void tnl_destroy(struct vport *vport)
+void ovs_tnl_destroy(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *mutable;
@@ -1579,7 +1579,7 @@ void tnl_destroy(struct vport *vport)
call_rcu(&tnl_vport->rcu, free_port_rcu);
}
-int tnl_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *old_mutable, *mutable;
@@ -1597,19 +1597,19 @@ int tnl_set_addr(struct vport *vport, const unsigned char *addr)
return 0;
}
-const char *tnl_get_name(const struct vport *vport)
+const char *ovs_tnl_get_name(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
return tnl_vport->name;
}
-const unsigned char *tnl_get_addr(const struct vport *vport)
+const unsigned char *ovs_tnl_get_addr(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
}
-void tnl_free_linked_skbs(struct sk_buff *skb)
+void ovs_tnl_free_linked_skbs(struct sk_buff *skb)
{
while (skb) {
struct sk_buff *next = skb->next;
@@ -1618,7 +1618,7 @@ void tnl_free_linked_skbs(struct sk_buff *skb)
}
}
-int tnl_init(void)
+int ovs_tnl_init(void)
{
int i;
@@ -1633,7 +1633,7 @@ int tnl_init(void)
return 0;
}
-void tnl_exit(void)
+void ovs_tnl_exit(void)
{
int i;
diff --git a/datapath/tunnel.h b/datapath/tunnel.h
index e98b789c..6865ae61 100644
--- a/datapath/tunnel.h
+++ b/datapath/tunnel.h
@@ -242,33 +242,32 @@ struct tnl_vport {
#endif
};
-struct vport *tnl_create(const struct vport_parms *, const struct vport_ops *,
- const struct tnl_ops *);
-void tnl_destroy(struct vport *);
-
-int tnl_set_options(struct vport *, struct nlattr *);
-int tnl_get_options(const struct vport *, struct sk_buff *);
-
-int tnl_set_addr(struct vport *vport, const unsigned char *addr);
-const char *tnl_get_name(const struct vport *vport);
-const unsigned char *tnl_get_addr(const struct vport *vport);
-int tnl_send(struct vport *vport, struct sk_buff *skb);
-void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos);
-
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
- int tunnel_type,
- const struct tnl_mutable_config **mutable);
-bool tnl_frag_needed(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
-void tnl_free_linked_skbs(struct sk_buff *skb);
-
-int tnl_init(void);
-void tnl_exit(void);
+struct vport *ovs_tnl_create(const struct vport_parms *, const struct vport_ops *,
+ const struct tnl_ops *);
+void ovs_tnl_destroy(struct vport *);
+
+int ovs_tnl_set_options(struct vport *, struct nlattr *);
+int ovs_tnl_get_options(const struct vport *, struct sk_buff *);
+
+int ovs_tnl_set_addr(struct vport *vport, const unsigned char *addr);
+const char *ovs_tnl_get_name(const struct vport *vport);
+const unsigned char *ovs_tnl_get_addr(const struct vport *vport);
+int ovs_tnl_send(struct vport *vport, struct sk_buff *skb);
+void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos);
+
+struct vport *ovs_tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
+ int tunnel_type,
+ const struct tnl_mutable_config **mutable);
+bool ovs_tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
+void ovs_tnl_free_linked_skbs(struct sk_buff *skb);
+
+int ovs_tnl_init(void);
+void ovs_tnl_exit(void);
static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
{
return vport_priv(vport);
}
-
#endif /* tunnel.h */
diff --git a/datapath/vport-capwap.c b/datapath/vport-capwap.c
index 191156b8..6c1b0da9 100644
--- a/datapath/vport-capwap.c
+++ b/datapath/vport-capwap.c
@@ -333,8 +333,8 @@ static int capwap_rcv(struct sock *sk, struct sk_buff *skb)
goto out;
iph = ip_hdr(skb);
- vport = tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_CAPWAP,
- &mutable);
+ vport = ovs_tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_CAPWAP,
+ &mutable);
if (unlikely(!vport)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
goto error;
@@ -345,7 +345,7 @@ static int capwap_rcv(struct sock *sk, struct sk_buff *skb)
else
OVS_CB(skb)->tun_id = 0;
- tnl_rcv(vport, skb, iph->tos);
+ ovs_tnl_rcv(vport, skb, iph->tos);
goto out;
error:
@@ -364,7 +364,7 @@ static const struct tnl_ops capwap_tnl_ops = {
static struct vport *capwap_create(const struct vport_parms *parms)
{
- return tnl_create(parms, &capwap_vport_ops, &capwap_tnl_ops);
+ return ovs_tnl_create(parms, &ovs_capwap_vport_ops, &capwap_tnl_ops);
}
/* Random value. Irrelevant as long as it's not 0 since we set the handler. */
@@ -510,7 +510,7 @@ static struct sk_buff *fragment(struct sk_buff *skb, const struct vport *vport,
return result;
error:
- tnl_free_linked_skbs(result);
+ ovs_tnl_free_linked_skbs(result);
kfree_skb(skb);
return NULL;
}
@@ -785,22 +785,22 @@ static void capwap_frag_expire(unsigned long ifq)
inet_frag_put(&fq->ifq, &frag_state);
}
-const struct vport_ops capwap_vport_ops = {
+const struct vport_ops ovs_capwap_vport_ops = {
.type = OVS_VPORT_TYPE_CAPWAP,
.flags = VPORT_F_TUN_ID,
.init = capwap_init,
.exit = capwap_exit,
.create = capwap_create,
- .destroy = tnl_destroy,
- .set_addr = tnl_set_addr,
- .get_name = tnl_get_name,
- .get_addr = tnl_get_addr,
- .get_options = tnl_get_options,
- .set_options = tnl_set_options,
- .get_dev_flags = vport_gen_get_dev_flags,
- .is_running = vport_gen_is_running,
- .get_operstate = vport_gen_get_operstate,
- .send = tnl_send,
+ .destroy = ovs_tnl_destroy,
+ .set_addr = ovs_tnl_set_addr,
+ .get_name = ovs_tnl_get_name,
+ .get_addr = ovs_tnl_get_addr,
+ .get_options = ovs_tnl_get_options,
+ .set_options = ovs_tnl_set_options,
+ .get_dev_flags = ovs_vport_gen_get_dev_flags,
+ .is_running = ovs_vport_gen_is_running,
+ .get_operstate = ovs_vport_gen_get_operstate,
+ .send = ovs_tnl_send,
};
#else
#warning CAPWAP tunneling will not be available on kernels before 2.6.26
diff --git a/datapath/vport-generic.c b/datapath/vport-generic.c
index 8fec185c..b10f28af 100644
--- a/datapath/vport-generic.c
+++ b/datapath/vport-generic.c
@@ -20,17 +20,17 @@
#include "vport-generic.h"
-unsigned vport_gen_get_dev_flags(const struct vport *vport)
+unsigned ovs_vport_gen_get_dev_flags(const struct vport *vport)
{
return IFF_UP | IFF_RUNNING | IFF_LOWER_UP;
}
-int vport_gen_is_running(const struct vport *vport)
+int ovs_vport_gen_is_running(const struct vport *vport)
{
return 1;
}
-unsigned char vport_gen_get_operstate(const struct vport *vport)
+unsigned char ovs_vport_gen_get_operstate(const struct vport *vport)
{
return IF_OPER_UP;
}
diff --git a/datapath/vport-generic.h b/datapath/vport-generic.h
index b7070e41..ff55b854 100644
--- a/datapath/vport-generic.h
+++ b/datapath/vport-generic.h
@@ -21,8 +21,8 @@
#include "vport.h"
-unsigned vport_gen_get_dev_flags(const struct vport *);
-int vport_gen_is_running(const struct vport *);
-unsigned char vport_gen_get_operstate(const struct vport *);
+unsigned ovs_vport_gen_get_dev_flags(const struct vport *);
+int ovs_vport_gen_is_running(const struct vport *);
+unsigned char ovs_vport_gen_get_operstate(const struct vport *);
#endif /* vport-generic.h */
diff --git a/datapath/vport-gre.c b/datapath/vport-gre.c
index 6e965cd8..4411cac9 100644
--- a/datapath/vport-gre.c
+++ b/datapath/vport-gre.c
@@ -205,8 +205,8 @@ static void gre_err(struct sk_buff *skb, u32 info)
if (tunnel_hdr_len < 0)
return;
- vport = tnl_find_port(iph->saddr, iph->daddr, key, TNL_T_PROTO_GRE,
- &mutable);
+ vport = ovs_tnl_find_port(iph->saddr, iph->daddr, key, TNL_T_PROTO_GRE,
+ &mutable);
if (!vport)
return;
@@ -283,7 +283,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
#endif
__skb_pull(skb, tunnel_hdr_len);
- tnl_frag_needed(vport, mutable, skb, mtu, key);
+ ovs_tnl_frag_needed(vport, mutable, skb, mtu, key);
__skb_push(skb, tunnel_hdr_len);
out:
@@ -342,8 +342,8 @@ static int gre_rcv(struct sk_buff *skb)
goto error;
iph = ip_hdr(skb);
- vport = tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_GRE,
- &mutable);
+ vport = ovs_tnl_find_port(iph->daddr, iph->saddr, key, TNL_T_PROTO_GRE,
+ &mutable);
if (unlikely(!vport)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
goto error;
@@ -357,7 +357,7 @@ static int gre_rcv(struct sk_buff *skb)
__skb_pull(skb, hdr_len);
skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);
- tnl_rcv(vport, skb, iph->tos);
+ ovs_tnl_rcv(vport, skb, iph->tos);
return 0;
error:
@@ -375,7 +375,7 @@ static const struct tnl_ops gre_tnl_ops = {
static struct vport *gre_create(const struct vport_parms *parms)
{
- return tnl_create(parms, &gre_vport_ops, &gre_tnl_ops);
+ return ovs_tnl_create(parms, &ovs_gre_vport_ops, &gre_tnl_ops);
}
static const struct net_protocol gre_protocol_handlers = {
@@ -399,20 +399,20 @@ static void gre_exit(void)
inet_del_protocol(&gre_protocol_handlers, IPPROTO_GRE);
}
-const struct vport_ops gre_vport_ops = {
+const struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.flags = VPORT_F_TUN_ID,
.init = gre_init,
.exit = gre_exit,
.create = gre_create,
- .destroy = tnl_destroy,
- .set_addr = tnl_set_addr,
- .get_name = tnl_get_name,
- .get_addr = tnl_get_addr,
- .get_options = tnl_get_options,
- .set_options = tnl_set_options,
- .get_dev_flags = vport_gen_get_dev_flags,
- .is_running = vport_gen_is_running,
- .get_operstate = vport_gen_get_operstate,
- .send = tnl_send,
+ .destroy = ovs_tnl_destroy,
+ .set_addr = ovs_tnl_set_addr,
+ .get_name = ovs_tnl_get_name,
+ .get_addr = ovs_tnl_get_addr,
+ .get_options = ovs_tnl_get_options,
+ .set_options = ovs_tnl_set_options,
+ .get_dev_flags = ovs_vport_gen_get_dev_flags,
+ .is_running = ovs_vport_gen_is_running,
+ .get_operstate = ovs_vport_gen_get_operstate,
+ .send = ovs_tnl_send,
};
diff --git a/datapath/vport-internal_dev.c b/datapath/vport-internal_dev.c
index 6448ab60..c56f3b29 100644
--- a/datapath/vport-internal_dev.c
+++ b/datapath/vport-internal_dev.c
@@ -62,10 +62,10 @@ static struct net_device_stats *internal_dev_sys_stats(struct net_device *netdev
struct net_device_stats *stats = &netdev->stats;
#endif
#endif
- struct vport *vport = internal_dev_get_vport(netdev);
+ struct vport *vport = ovs_internal_dev_get_vport(netdev);
struct ovs_vport_stats vport_stats;
- vport_get_stats(vport, &vport_stats);
+ ovs_vport_get_stats(vport, &vport_stats);
/* The tx and rx stats need to be swapped because the
* switch and host OS have opposite perspectives. */
@@ -103,7 +103,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
OVS_CB(skb)->flow = NULL;
rcu_read_lock();
- vport_receive(internal_dev_priv(netdev)->vport, skb);
+ ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
rcu_read_unlock();
return 0;
}
@@ -151,17 +151,17 @@ static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
static int internal_dev_do_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
- if (dp_ioctl_hook)
- return dp_ioctl_hook(dev, ifr, cmd);
+ if (ovs_dp_ioctl_hook)
+ return ovs_dp_ioctl_hook(dev, ifr, cmd);
return -EOPNOTSUPP;
}
static void internal_dev_destructor(struct net_device *dev)
{
- struct vport *vport = internal_dev_get_vport(dev);
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
- vport_free(vport);
+ ovs_vport_free(vport);
free_netdev(dev);
}
@@ -223,8 +223,8 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
struct internal_dev *internal_dev;
int err;
- vport = vport_alloc(sizeof(struct netdev_vport),
- &internal_vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+ &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
@@ -254,7 +254,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
error_free_netdev:
free_netdev(netdev_vport->dev);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
@@ -295,24 +295,24 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
return len;
}
-const struct vport_ops internal_vport_ops = {
+const struct vport_ops ovs_internal_vport_ops = {
.type = OVS_VPORT_TYPE_INTERNAL,
.flags = VPORT_F_REQUIRED | VPORT_F_FLOW,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
- .set_addr = netdev_set_addr,
- .get_name = netdev_get_name,
- .get_addr = netdev_get_addr,
- .get_kobj = netdev_get_kobj,
- .get_dev_flags = netdev_get_dev_flags,
- .is_running = netdev_is_running,
- .get_operstate = netdev_get_operstate,
- .get_ifindex = netdev_get_ifindex,
- .get_mtu = netdev_get_mtu,
+ .set_addr = ovs_netdev_set_addr,
+ .get_name = ovs_netdev_get_name,
+ .get_addr = ovs_netdev_get_addr,
+ .get_kobj = ovs_netdev_get_kobj,
+ .get_dev_flags = ovs_netdev_get_dev_flags,
+ .is_running = ovs_netdev_is_running,
+ .get_operstate = ovs_netdev_get_operstate,
+ .get_ifindex = ovs_netdev_get_ifindex,
+ .get_mtu = ovs_netdev_get_mtu,
.send = internal_dev_recv,
};
-int is_internal_dev(const struct net_device *netdev)
+int ovs_is_internal_dev(const struct net_device *netdev)
{
#ifdef HAVE_NET_DEVICE_OPS
return netdev->netdev_ops == &internal_dev_netdev_ops;
@@ -321,9 +321,9 @@ int is_internal_dev(const struct net_device *netdev)
#endif
}
-struct vport *internal_dev_get_vport(struct net_device *netdev)
+struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
{
- if (!is_internal_dev(netdev))
+ if (!ovs_is_internal_dev(netdev))
return NULL;
return internal_dev_priv(netdev)->vport;
diff --git a/datapath/vport-internal_dev.h b/datapath/vport-internal_dev.h
index 91002cbd..3454447c 100644
--- a/datapath/vport-internal_dev.h
+++ b/datapath/vport-internal_dev.h
@@ -22,7 +22,7 @@
#include "datapath.h"
#include "vport.h"
-int is_internal_dev(const struct net_device *);
-struct vport *internal_dev_get_vport(struct net_device *);
+int ovs_is_internal_dev(const struct net_device *);
+struct vport *ovs_internal_dev_get_vport(struct net_device *);
#endif /* vport-internal_dev.h */
diff --git a/datapath/vport-netdev.c b/datapath/vport-netdev.c
index 46283f3e..5e7eaa4f 100644
--- a/datapath/vport-netdev.c
+++ b/datapath/vport-netdev.c
@@ -57,7 +57,7 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
- vport = netdev_get_vport(skb->dev);
+ vport = ovs_netdev_get_vport(skb->dev);
netdev_port_receive(vport, skb);
@@ -130,8 +130,8 @@ static struct vport *netdev_create(const struct vport_parms *parms)
struct netdev_vport *netdev_vport;
int err;
- vport = vport_alloc(sizeof(struct netdev_vport),
- &netdev_vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+ &ovs_netdev_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
@@ -147,7 +147,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
if (netdev_vport->dev->flags & IFF_LOOPBACK ||
netdev_vport->dev->type != ARPHRD_ETHER ||
- is_internal_dev(netdev_vport->dev)) {
+ ovs_is_internal_dev(netdev_vport->dev)) {
err = -EINVAL;
goto error_put;
}
@@ -168,7 +168,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
error_put:
dev_put(netdev_vport->dev);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
@@ -184,10 +184,10 @@ static void netdev_destroy(struct vport *vport)
synchronize_rcu();
dev_put(netdev_vport->dev);
- vport_free(vport);
+ ovs_vport_free(vport);
}
-int netdev_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_netdev_set_addr(struct vport *vport, const unsigned char *addr)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
struct sockaddr sa;
@@ -198,49 +198,49 @@ int netdev_set_addr(struct vport *vport, const unsigned char *addr)
return dev_set_mac_address(netdev_vport->dev, &sa);
}
-const char *netdev_get_name(const struct vport *vport)
+const char *ovs_netdev_get_name(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->name;
}
-const unsigned char *netdev_get_addr(const struct vport *vport)
+const unsigned char *ovs_netdev_get_addr(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->dev_addr;
}
-struct kobject *netdev_get_kobj(const struct vport *vport)
+struct kobject *ovs_netdev_get_kobj(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return &netdev_vport->dev->NETDEV_DEV_MEMBER.kobj;
}
-unsigned netdev_get_dev_flags(const struct vport *vport)
+unsigned ovs_netdev_get_dev_flags(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return dev_get_flags(netdev_vport->dev);
}
-int netdev_is_running(const struct vport *vport)
+int ovs_netdev_is_running(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netif_running(netdev_vport->dev);
}
-unsigned char netdev_get_operstate(const struct vport *vport)
+unsigned char ovs_netdev_get_operstate(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->operstate;
}
-int netdev_get_ifindex(const struct vport *vport)
+int ovs_netdev_get_ifindex(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->ifindex;
}
-int netdev_get_mtu(const struct vport *vport)
+int ovs_netdev_get_mtu(const struct vport *vport)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
return netdev_vport->dev->mtu;
@@ -270,7 +270,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
}
vlan_copy_skb_tci(skb);
- vport_receive(vport, skb);
+ ovs_vport_receive(vport, skb);
}
static unsigned packet_length(const struct sk_buff *skb)
@@ -305,7 +305,7 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
if (net_ratelimit())
pr_warn("%s: dropped over-mtu packet: %d > %d\n",
- dp_name(vport->dp), packet_length(skb), mtu);
+ ovs_dp_name(vport->dp), packet_length(skb), mtu);
goto error;
}
@@ -378,12 +378,12 @@ tag:
error:
kfree_skb(skb);
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
return 0;
}
/* Returns null if this device is not attached to a datapath. */
-struct vport *netdev_get_vport(struct net_device *dev)
+struct vport *ovs_netdev_get_vport(struct net_device *dev)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
#if IFF_BRIDGE_PORT != IFF_OVS_DATAPATH
@@ -399,22 +399,22 @@ struct vport *netdev_get_vport(struct net_device *dev)
#endif
}
-const struct vport_ops netdev_vport_ops = {
+const struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.flags = VPORT_F_REQUIRED,
.init = netdev_init,
.exit = netdev_exit,
.create = netdev_create,
.destroy = netdev_destroy,
- .set_addr = netdev_set_addr,
- .get_name = netdev_get_name,
- .get_addr = netdev_get_addr,
- .get_kobj = netdev_get_kobj,
- .get_dev_flags = netdev_get_dev_flags,
- .is_running = netdev_is_running,
- .get_operstate = netdev_get_operstate,
- .get_ifindex = netdev_get_ifindex,
- .get_mtu = netdev_get_mtu,
+ .set_addr = ovs_netdev_set_addr,
+ .get_name = ovs_netdev_get_name,
+ .get_addr = ovs_netdev_get_addr,
+ .get_kobj = ovs_netdev_get_kobj,
+ .get_dev_flags = ovs_netdev_get_dev_flags,
+ .is_running = ovs_netdev_is_running,
+ .get_operstate = ovs_netdev_get_operstate,
+ .get_ifindex = ovs_netdev_get_ifindex,
+ .get_mtu = ovs_netdev_get_mtu,
.send = netdev_send,
};
diff --git a/datapath/vport-netdev.h b/datapath/vport-netdev.h
index f9453c25..721810bd 100644
--- a/datapath/vport-netdev.h
+++ b/datapath/vport-netdev.h
@@ -23,7 +23,7 @@
#include "vport.h"
-struct vport *netdev_get_vport(struct net_device *dev);
+struct vport *ovs_netdev_get_vport(struct net_device *dev);
struct netdev_vport {
struct net_device *dev;
@@ -35,15 +35,15 @@ netdev_vport_priv(const struct vport *vport)
return vport_priv(vport);
}
-int netdev_set_addr(struct vport *, const unsigned char *addr);
-const char *netdev_get_name(const struct vport *);
-const unsigned char *netdev_get_addr(const struct vport *);
-const char *netdev_get_config(const struct vport *);
-struct kobject *netdev_get_kobj(const struct vport *);
-unsigned netdev_get_dev_flags(const struct vport *);
-int netdev_is_running(const struct vport *);
-unsigned char netdev_get_operstate(const struct vport *);
-int netdev_get_ifindex(const struct vport *);
-int netdev_get_mtu(const struct vport *);
+int ovs_netdev_set_addr(struct vport *, const unsigned char *addr);
+const char *ovs_netdev_get_name(const struct vport *);
+const unsigned char *ovs_netdev_get_addr(const struct vport *);
+const char *ovs_netdev_get_config(const struct vport *);
+struct kobject *ovs_netdev_get_kobj(const struct vport *);
+unsigned ovs_netdev_get_dev_flags(const struct vport *);
+int ovs_netdev_is_running(const struct vport *);
+unsigned char ovs_netdev_get_operstate(const struct vport *);
+int ovs_netdev_get_ifindex(const struct vport *);
+int ovs_netdev_get_mtu(const struct vport *);
#endif /* vport_netdev.h */
diff --git a/datapath/vport-patch.c b/datapath/vport-patch.c
index 55b8cec8..53b24b0f 100644
--- a/datapath/vport-patch.c
+++ b/datapath/vport-patch.c
@@ -137,8 +137,8 @@ static struct vport *patch_create(const struct vport_parms *parms)
struct patch_config *patchconf;
int err;
- vport = vport_alloc(sizeof(struct patch_vport),
- &patch_vport_ops, parms);
+ vport = ovs_vport_alloc(sizeof(struct patch_vport),
+ &ovs_patch_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
@@ -164,7 +164,7 @@ static struct vport *patch_create(const struct vport_parms *parms)
peer_name = patchconf->peer_name;
hlist_add_head(&patch_vport->hash_node, hash_bucket(peer_name));
- rcu_assign_pointer(patch_vport->peer, vport_locate(peer_name));
+ rcu_assign_pointer(patch_vport->peer, ovs_vport_locate(peer_name));
update_peers(patch_vport->name, vport);
return vport;
@@ -172,7 +172,7 @@ static struct vport *patch_create(const struct vport_parms *parms)
error_free_patchconf:
kfree(patchconf);
error_free_vport:
- vport_free(vport);
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
@@ -183,7 +183,7 @@ static void free_port_rcu(struct rcu_head *rcu)
struct patch_vport, rcu);
kfree((struct patch_config __force *)patch_vport->patchconf);
- vport_free(vport_from_priv(patch_vport));
+ ovs_vport_free(vport_from_priv(patch_vport));
}
static void patch_destroy(struct vport *vport)
@@ -216,7 +216,7 @@ static int patch_set_options(struct vport *vport, struct nlattr *options)
hlist_del(&patch_vport->hash_node);
- rcu_assign_pointer(patch_vport->peer, vport_locate(patchconf->peer_name));
+ rcu_assign_pointer(patch_vport->peer, ovs_vport_locate(patchconf->peer_name));
hlist_add_head(&patch_vport->hash_node, hash_bucket(patchconf->peer_name));
return 0;
@@ -287,16 +287,16 @@ static int patch_send(struct vport *vport, struct sk_buff *skb)
if (!peer) {
kfree_skb(skb);
- vport_record_error(vport, VPORT_E_TX_DROPPED);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
return 0;
}
- vport_receive(peer, skb);
+ ovs_vport_receive(peer, skb);
return skb_len;
}
-const struct vport_ops patch_vport_ops = {
+const struct vport_ops ovs_patch_vport_ops = {
.type = OVS_VPORT_TYPE_PATCH,
.init = patch_init,
.exit = patch_exit,
@@ -307,8 +307,8 @@ const struct vport_ops patch_vport_ops = {
.get_addr = patch_get_addr,
.get_options = patch_get_options,
.set_options = patch_set_options,
- .get_dev_flags = vport_gen_get_dev_flags,
- .is_running = vport_gen_is_running,
- .get_operstate = vport_gen_get_operstate,
+ .get_dev_flags = ovs_vport_gen_get_dev_flags,
+ .is_running = ovs_vport_gen_is_running,
+ .get_operstate = ovs_vport_gen_get_operstate,
.send = patch_send,
};
diff --git a/datapath/vport.c b/datapath/vport.c
index 04ae50e5..9881fb86 100644
--- a/datapath/vport.c
+++ b/datapath/vport.c
@@ -35,12 +35,12 @@
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the bottom of vport.h. */
static const struct vport_ops *base_vport_ops_list[] = {
- &netdev_vport_ops,
- &internal_vport_ops,
- &patch_vport_ops,
- &gre_vport_ops,
+ &ovs_netdev_vport_ops,
+ &ovs_internal_vport_ops,
+ &ovs_patch_vport_ops,
+ &ovs_gre_vport_ops,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
- &capwap_vport_ops,
+ &ovs_capwap_vport_ops,
#endif
};
@@ -52,12 +52,12 @@ static struct hlist_head *dev_table;
#define VPORT_HASH_BUCKETS 1024
/**
- * vport_init - initialize vport subsystem
+ * ovs_vport_init - initialize vport subsystem
*
* Called at module load time to initialize the vport subsystem and any
* compiled in vport types.
*/
-int vport_init(void)
+int ovs_vport_init(void)
{
int err;
int i;
@@ -87,7 +87,7 @@ int vport_init(void)
if (!err)
vport_ops_list[n_vport_types++] = new_ops;
else if (new_ops->flags & VPORT_F_REQUIRED) {
- vport_exit();
+ ovs_vport_exit();
goto error;
}
}
@@ -101,12 +101,12 @@ error:
}
/**
- * vport_exit - shutdown vport subsystem
+ * ovs_vport_exit - shutdown vport subsystem
*
* Called at module exit time to shutdown the vport subsystem and any
* initialized vport types.
*/
-void vport_exit(void)
+void ovs_vport_exit(void)
{
int i;
@@ -126,13 +126,13 @@ static struct hlist_head *hash_bucket(const char *name)
}
/**
- * vport_locate - find a port that has already been created
+ * ovs_vport_locate - find a port that has already been created
*
* @name: name of port to find
*
* Must be called with RTNL or RCU read lock.
*/
-struct vport *vport_locate(const char *name)
+struct vport *ovs_vport_locate(const char *name)
{
struct hlist_head *bucket = hash_bucket(name);
struct vport *vport;
@@ -153,13 +153,13 @@ static void release_vport(struct kobject *kobj)
static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
- .sysfs_ops = &brport_sysfs_ops,
+ .sysfs_ops = &ovs_brport_sysfs_ops,
#endif
.release = release_vport
};
/**
- * vport_alloc - allocate and initialize new vport
+ * ovs_vport_alloc - allocate and initialize new vport
*
* @priv_size: Size of private data area to allocate.
* @ops: vport device ops
@@ -167,10 +167,10 @@ static struct kobj_type brport_ktype = {
* Allocate and initialize a new vport defined by @ops. The vport will contain
* a private data area of size @priv_size that can be accessed using
* vport_priv(). vports that are no longer needed should be released with
- * vport_free().
+ * ovs_vport_free().
*/
-struct vport *vport_alloc(int priv_size, const struct vport_ops *ops,
- const struct vport_parms *parms)
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
+ const struct vport_parms *parms)
{
struct vport *vport;
size_t alloc_size;
@@ -205,16 +205,16 @@ struct vport *vport_alloc(int priv_size, const struct vport_ops *ops,
}
/**
- * vport_free - uninitialize and free vport
+ * ovs_vport_free - uninitialize and free vport
*
* @vport: vport to free
*
- * Frees a vport allocated with vport_alloc() when it is no longer needed.
+ * Frees a vport allocated with ovs_vport_alloc() when it is no longer needed.
*
* The caller must ensure that an RCU grace period has passed since the last
* time @vport was in a datapath.
*/
-void vport_free(struct vport *vport)
+void ovs_vport_free(struct vport *vport)
{
free_percpu(vport->percpu_stats);
@@ -222,14 +222,14 @@ void vport_free(struct vport *vport)
}
/**
- * vport_add - add vport device (for kernel callers)
+ * ovs_vport_add - add vport device (for kernel callers)
*
* @parms: Information about new vport.
*
* Creates a new vport with the specified configuration (which is dependent on
* device type). RTNL lock must be held.
*/
-struct vport *vport_add(const struct vport_parms *parms)
+struct vport *ovs_vport_add(const struct vport_parms *parms)
{
struct vport *vport;
int err = 0;
@@ -258,7 +258,7 @@ out:
}
/**
- * vport_set_options - modify existing vport device (for kernel callers)
+ * ovs_vport_set_options - modify existing vport device (for kernel callers)
*
* @vport: vport to modify.
* @port: New configuration.
@@ -266,7 +266,7 @@ out:
* Modifies an existing device with the specified configuration (which is
* dependent on device type). RTNL lock must be held.
*/
-int vport_set_options(struct vport *vport, struct nlattr *options)
+int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
{
ASSERT_RTNL();
@@ -276,14 +276,14 @@ int vport_set_options(struct vport *vport, struct nlattr *options)
}
/**
- * vport_del - delete existing vport device
+ * ovs_vport_del - delete existing vport device
*
* @vport: vport to delete.
*
* Detaches @vport from its datapath and destroys it. It is possible to fail
* for reasons such as lack of memory. RTNL lock must be held.
*/
-void vport_del(struct vport *vport)
+void ovs_vport_del(struct vport *vport)
{
ASSERT_RTNL();
@@ -293,7 +293,7 @@ void vport_del(struct vport *vport)
}
/**
- * vport_set_addr - set device Ethernet address (for kernel callers)
+ * ovs_vport_set_addr - set device Ethernet address (for kernel callers)
*
* @vport: vport on which to set Ethernet address.
* @addr: New address.
@@ -302,7 +302,7 @@ void vport_del(struct vport *vport)
* setting the Ethernet address, in which case the result will always be
* -EOPNOTSUPP. RTNL lock must be held.
*/
-int vport_set_addr(struct vport *vport, const unsigned char *addr)
+int ovs_vport_set_addr(struct vport *vport, const unsigned char *addr)
{
ASSERT_RTNL();
@@ -316,7 +316,7 @@ int vport_set_addr(struct vport *vport, const unsigned char *addr)
}
/**
- * vport_set_stats - sets offset device stats
+ * ovs_vport_set_stats - sets offset device stats
*
* @vport: vport on which to set stats
* @stats: stats to set
@@ -328,7 +328,7 @@ int vport_set_addr(struct vport *vport, const unsigned char *addr)
*
* Must be called with RTNL lock.
*/
-void vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
+void ovs_vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
ASSERT_RTNL();
@@ -338,7 +338,7 @@ void vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
}
/**
- * vport_get_stats - retrieve device stats
+ * ovs_vport_get_stats - retrieve device stats
*
* @vport: vport from which to retrieve the stats
* @stats: location to store stats
@@ -347,7 +347,7 @@ void vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
*
* Must be called with RTNL lock or rcu_read_lock.
*/
-void vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
+void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
int i;
@@ -393,7 +393,7 @@ void vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
}
/**
- * vport_get_options - retrieve device options
+ * ovs_vport_get_options - retrieve device options
*
* @vport: vport from which to retrieve the options.
* @skb: sk_buff where options should be appended.
@@ -408,7 +408,7 @@ void vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
*
* Must be called with RTNL lock or rcu_read_lock.
*/
-int vport_get_options(const struct vport *vport, struct sk_buff *skb)
+int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
{
struct nlattr *nla;
@@ -429,7 +429,7 @@ int vport_get_options(const struct vport *vport, struct sk_buff *skb)
}
/**
- * vport_receive - pass up received packet to the datapath for processing
+ * ovs_vport_receive - pass up received packet to the datapath for processing
*
* @vport: vport that received the packet
* @skb: skb that was received
@@ -438,7 +438,7 @@ int vport_get_options(const struct vport *vport, struct sk_buff *skb)
* skb->data should point to the Ethernet header. The caller must have already
* called compute_ip_summed() to initialize the checksumming fields.
*/
-void vport_receive(struct vport *vport, struct sk_buff *skb)
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
{
struct vport_percpu_stats *stats;
@@ -455,11 +455,11 @@ void vport_receive(struct vport *vport, struct sk_buff *skb)
if (!(vport->ops->flags & VPORT_F_TUN_ID))
OVS_CB(skb)->tun_id = 0;
- dp_process_received_packet(vport, skb);
+ ovs_dp_process_received_packet(vport, skb);
}
/**
- * vport_send - send a packet on a device
+ * ovs_vport_send - send a packet on a device
*
* @vport: vport on which to send the packet
* @skb: skb to send
@@ -467,7 +467,7 @@ void vport_receive(struct vport *vport, struct sk_buff *skb)
* Sends the given packet and returns the length of data sent. Either RTNL
* lock or rcu_read_lock must be held.
*/
-int vport_send(struct vport *vport, struct sk_buff *skb)
+int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
int sent = vport->ops->send(vport, skb);
@@ -485,7 +485,7 @@ int vport_send(struct vport *vport, struct sk_buff *skb)
}
/**
- * vport_record_error - indicate device error to generic stats layer
+ * ovs_vport_record_error - indicate device error to generic stats layer
*
* @vport: vport that encountered the error
* @err_type: one of enum vport_err_type types to indicate the error type
@@ -493,7 +493,7 @@ int vport_send(struct vport *vport, struct sk_buff *skb)
* If using the vport generic stats layer indicate that an error of the given
* type has occured.
*/
-void vport_record_error(struct vport *vport, enum vport_err_type err_type)
+void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
{
spin_lock(&vport->stats_lock);
diff --git a/datapath/vport.h b/datapath/vport.h
index 8f03f2e9..44cf6033 100644
--- a/datapath/vport.h
+++ b/datapath/vport.h
@@ -32,22 +32,22 @@ struct vport_parms;
/* The following definitions are for users of the vport subsytem: */
-int vport_init(void);
-void vport_exit(void);
+int ovs_vport_init(void);
+void ovs_vport_exit(void);
-struct vport *vport_add(const struct vport_parms *);
-void vport_del(struct vport *);
+struct vport *ovs_vport_add(const struct vport_parms *);
+void ovs_vport_del(struct vport *);
-struct vport *vport_locate(const char *name);
+struct vport *ovs_vport_locate(const char *name);
-int vport_set_addr(struct vport *, const unsigned char *);
-void vport_set_stats(struct vport *, struct ovs_vport_stats *);
-void vport_get_stats(struct vport *, struct ovs_vport_stats *);
+int ovs_vport_set_addr(struct vport *, const unsigned char *);
+void ovs_vport_set_stats(struct vport *, struct ovs_vport_stats *);
+void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
-int vport_set_options(struct vport *, struct nlattr *options);
-int vport_get_options(const struct vport *, struct sk_buff *);
+int ovs_vport_set_options(struct vport *, struct nlattr *options);
+int ovs_vport_get_options(const struct vport *, struct sk_buff *);
-int vport_send(struct vport *, struct sk_buff *);
+int ovs_vport_send(struct vport *, struct sk_buff *);
/* The following definitions are for implementers of vport devices: */
@@ -124,7 +124,7 @@ struct vport_parms {
enum ovs_vport_type type;
struct nlattr *options;
- /* For vport_alloc(). */
+ /* For ovs_vport_alloc(). */
struct datapath *dp;
u16 port_no;
u32 upcall_pid;
@@ -141,7 +141,7 @@ struct vport_parms {
* not set and initialzation fails then no vports of this type can be created.
* @exit: Called at module unload.
* @create: Create a new vport configured as specified. On success returns
- * a new vport allocated with vport_alloc(), otherwise an ERR_PTR() value.
+ * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
* @destroy: Destroys a vport. Must call vport_free() on the vport but not
* before an RCU grace period has elapsed.
* @set_options: Modify the configuration of an existing vport. May be %NULL
@@ -205,9 +205,9 @@ enum vport_err_type {
VPORT_E_TX_ERROR,
};
-struct vport *vport_alloc(int priv_size, const struct vport_ops *,
- const struct vport_parms *);
-void vport_free(struct vport *);
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
+ const struct vport_parms *);
+void ovs_vport_free(struct vport *);
#define VPORT_ALIGN 8
@@ -240,15 +240,15 @@ static inline struct vport *vport_from_priv(const void *priv)
return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
}
-void vport_receive(struct vport *, struct sk_buff *);
-void vport_record_error(struct vport *, enum vport_err_type err_type);
+void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */
-extern const struct vport_ops netdev_vport_ops;
-extern const struct vport_ops internal_vport_ops;
-extern const struct vport_ops patch_vport_ops;
-extern const struct vport_ops gre_vport_ops;
-extern const struct vport_ops capwap_vport_ops;
+extern const struct vport_ops ovs_netdev_vport_ops;
+extern const struct vport_ops ovs_internal_vport_ops;
+extern const struct vport_ops ovs_patch_vport_ops;
+extern const struct vport_ops ovs_gre_vport_ops;
+extern const struct vport_ops ovs_capwap_vport_ops;
#endif /* vport.h */