aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/batman-adv/send.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/batman-adv/send.c')
-rw-r--r--drivers/staging/batman-adv/send.c172
1 files changed, 97 insertions, 75 deletions
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index eb617508cca4..2a9fac8c240e 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -21,16 +21,14 @@
#include "main.h"
#include "send.h"
-#include "log.h"
#include "routing.h"
#include "translation-table.h"
+#include "soft-interface.h"
#include "hard-interface.h"
#include "types.h"
#include "vis.h"
#include "aggregation.h"
-#include "compat.h"
-
/* apply hop penalty for a normal link */
static uint8_t hop_penalty(const uint8_t tq)
{
@@ -59,51 +57,69 @@ static unsigned long forward_send_time(void)
return send_time;
}
-/* sends a raw packet. */
-void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
- struct batman_if *batman_if, uint8_t *dst_addr)
+/* send out an already prepared packet to the given address via the
+ * specified batman interface */
+int send_skb_packet(struct sk_buff *skb,
+ struct batman_if *batman_if,
+ uint8_t *dst_addr)
{
struct ethhdr *ethhdr;
- struct sk_buff *skb;
- int retval;
- char *data;
if (batman_if->if_active != IF_ACTIVE)
- return;
+ goto send_skb_err;
+
+ if (unlikely(!batman_if->net_dev))
+ goto send_skb_err;
if (!(batman_if->net_dev->flags & IFF_UP)) {
- debug_log(LOG_TYPE_WARN,
- "Interface %s is not up - can't send packet via that interface (IF_TO_BE_DEACTIVATED was here) !\n",
- batman_if->dev);
- return;
+ printk(KERN_WARNING
+ "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
+ batman_if->dev);
+ goto send_skb_err;
}
- skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
- if (!skb)
- return;
- data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
+ /* push to the ethernet header. */
+ if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
+ goto send_skb_err;
- memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
+ skb_reset_mac_header(skb);
- ethhdr = (struct ethhdr *) data;
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
- skb_reset_mac_header(skb);
skb_set_network_header(skb, ETH_HLEN);
skb->priority = TC_PRIO_CONTROL;
skb->protocol = __constant_htons(ETH_P_BATMAN);
+
skb->dev = batman_if->net_dev;
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
* (which is > 0). This will not be treated as an error. */
- retval = dev_queue_xmit(skb);
- if (retval < 0)
- debug_log(LOG_TYPE_CRIT,
- "Can't write to raw socket (IF_TO_BE_DEACTIVATED was here): %i\n",
- retval);
+
+ return dev_queue_xmit(skb);
+send_skb_err:
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+/* sends a raw packet. */
+void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
+ struct batman_if *batman_if, uint8_t *dst_addr)
+{
+ struct sk_buff *skb;
+ char *data;
+
+ skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
+ if (!skb)
+ return;
+ data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
+ memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
+ /* pull back to the batman "network header" */
+ skb_pull(skb, sizeof(struct ethhdr));
+ send_skb_packet(skb, batman_if, dst_addr);
}
/* Send a packet to a given interface */
@@ -114,7 +130,6 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
uint8_t packet_num;
int16_t buff_pos;
struct batman_packet *batman_packet;
- char orig_str[ETH_STR_LEN];
if (batman_if->if_active != IF_ACTIVE)
return;
@@ -136,19 +151,18 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
else
batman_packet->flags &= ~DIRECTLINK;
- addr_to_string(orig_str, batman_packet->orig);
fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
"Sending own" :
"Forwarding"));
- debug_log(LOG_TYPE_BATMAN,
- "%s %spacket (originator %s, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
- fwd_str,
- (packet_num > 0 ? "aggregated " : ""),
- orig_str, ntohs(batman_packet->seqno),
- batman_packet->tq, batman_packet->ttl,
- (batman_packet->flags & DIRECTLINK ?
- "on" : "off"),
- batman_if->dev, batman_if->addr_str);
+ bat_dbg(DBG_BATMAN,
+ "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
+ fwd_str,
+ (packet_num > 0 ? "aggregated " : ""),
+ batman_packet->orig, ntohs(batman_packet->seqno),
+ batman_packet->tq, batman_packet->ttl,
+ (batman_packet->flags & DIRECTLINK ?
+ "on" : "off"),
+ batman_if->dev, batman_if->addr_str);
buff_pos += sizeof(struct batman_packet) +
(batman_packet->num_hna * ETH_ALEN);
@@ -168,32 +182,28 @@ static void send_packet(struct forw_packet *forw_packet)
struct batman_if *batman_if;
struct batman_packet *batman_packet =
(struct batman_packet *)(forw_packet->packet_buff);
- char orig_str[ETH_STR_LEN];
unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
- debug_log(LOG_TYPE_CRIT,
- "Error - can't forward packet: incoming iface not specified\n");
+ printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
return;
}
if (forw_packet->if_incoming->if_active != IF_ACTIVE)
return;
- addr_to_string(orig_str, batman_packet->orig);
-
/* multihomed peer assumed */
/* non-primary OGMs are only broadcasted on their interface */
if ((directlink && (batman_packet->ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
/* FIXME: what about aggregated packets ? */
- debug_log(LOG_TYPE_BATMAN,
- "%s packet (originator %s, seqno %d, TTL %d) on interface %s [%s]\n",
- (forw_packet->own ? "Sending own" : "Forwarding"),
- orig_str, ntohs(batman_packet->seqno),
- batman_packet->ttl, forw_packet->if_incoming->dev,
- forw_packet->if_incoming->addr_str);
+ bat_dbg(DBG_BATMAN,
+ "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
+ (forw_packet->own ? "Sending own" : "Forwarding"),
+ batman_packet->orig, ntohs(batman_packet->seqno),
+ batman_packet->ttl, forw_packet->if_incoming->dev,
+ forw_packet->if_incoming->addr_str);
send_raw_packet(forw_packet->packet_buff,
forw_packet->packet_len,
@@ -238,6 +248,7 @@ void schedule_own_packet(struct batman_if *batman_if)
{
unsigned long send_time;
struct batman_packet *batman_packet;
+ int vis_server = atomic_read(&vis_mode);
/**
* the interface gets activated here to avoid race conditions between
@@ -262,7 +273,7 @@ void schedule_own_packet(struct batman_if *batman_if)
/* change sequence number to network order */
batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
- if (is_vis_server())
+ if (vis_server == VIS_TYPE_SERVER_SYNC)
batman_packet->flags = VIS_SERVER;
else
batman_packet->flags = 0;
@@ -286,7 +297,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
unsigned long send_time;
if (batman_packet->ttl <= 1) {
- debug_log(LOG_TYPE_BATMAN, "ttl exceeded \n");
+ bat_dbg(DBG_BATMAN, "ttl exceeded \n");
return;
}
@@ -314,9 +325,9 @@ void schedule_forward_packet(struct orig_node *orig_node,
/* apply hop penalty */
batman_packet->tq = hop_penalty(batman_packet->tq);
- debug_log(LOG_TYPE_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
- in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
- batman_packet->ttl);
+ bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
+ in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
+ batman_packet->ttl);
batman_packet->seqno = htons(batman_packet->seqno);
@@ -333,6 +344,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
static void forw_packet_free(struct forw_packet *forw_packet)
{
+ if (forw_packet->skb)
+ kfree_skb(forw_packet->skb);
kfree(forw_packet->packet_buff);
kfree(forw_packet);
}
@@ -340,12 +353,13 @@ static void forw_packet_free(struct forw_packet *forw_packet)
static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
unsigned long send_time)
{
+ unsigned long flags;
INIT_HLIST_NODE(&forw_packet->list);
/* add new packet to packet list */
- spin_lock(&forw_bcast_list_lock);
+ spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_add_head(&forw_packet->list, &forw_bcast_list);
- spin_unlock(&forw_bcast_list_lock);
+ spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
@@ -354,7 +368,7 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
send_time);
}
-void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
+void add_bcast_packet_to_list(struct sk_buff *skb)
{
struct forw_packet *forw_packet;
@@ -362,14 +376,16 @@ void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
if (!forw_packet)
return;
- forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC);
- if (!forw_packet->packet_buff) {
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
kfree(forw_packet);
return;
}
- forw_packet->packet_len = packet_len;
- memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len);
+ skb_reset_mac_header(skb);
+
+ forw_packet->skb = skb;
+ forw_packet->packet_buff = NULL;
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
@@ -384,16 +400,20 @@ void send_outstanding_bcast_packet(struct work_struct *work)
container_of(work, struct delayed_work, work);
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
+ unsigned long flags;
+ struct sk_buff *skb1;
- spin_lock(&forw_bcast_list_lock);
+ spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock(&forw_bcast_list_lock);
+ spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/* rebroadcast packet */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- send_raw_packet(forw_packet->packet_buff,
- forw_packet->packet_len,
+ /* send a copy of the saved skb */
+ skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
+ if (skb1)
+ send_skb_packet(skb1,
batman_if, broadcastAddr);
}
rcu_read_unlock();
@@ -415,10 +435,11 @@ void send_outstanding_bat_packet(struct work_struct *work)
container_of(work, struct delayed_work, work);
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
+ unsigned long flags;
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
send_packet(forw_packet);
@@ -438,38 +459,39 @@ void purge_outstanding_packets(void)
{
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
+ unsigned long flags;
- debug_log(LOG_TYPE_BATMAN, "purge_outstanding_packets()\n");
+ bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
/* free bcast list */
- spin_lock(&forw_bcast_list_lock);
+ spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bcast_list, list) {
- spin_unlock(&forw_bcast_list_lock);
+ spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/**
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock(&forw_bcast_list_lock);
+ spin_lock_irqsave(&forw_bcast_list_lock, flags);
}
- spin_unlock(&forw_bcast_list_lock);
+ spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/* free batman packet list */
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bat_list, list) {
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
/**
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
}
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
}