int own_packet)
{
struct forw_packet *forw_packet_aggr;
+ unsigned long flags;
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet_aggr)
packet_buff,
forw_packet_aggr->packet_len);
+ forw_packet_aggr->skb = NULL;
forw_packet_aggr->own = own_packet;
forw_packet_aggr->if_incoming = if_incoming;
forw_packet_aggr->num_packets = 0;
forw_packet_aggr->direct_link_flags |= 1;
/* add new packet to packet list */
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_add_head(&forw_packet_aggr->list, &forw_bat_list);
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
struct batman_packet *batman_packet =
(struct batman_packet *)packet_buff;
bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
+ unsigned long flags;
/* find position for the packet in the forward queue */
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
/* own packets are not to be aggregated */
if ((atomic_read(&aggregation_enabled)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
* suitable aggregation packet found */
if (forw_packet_aggr == NULL) {
/* the following section can run without the lock */
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
new_aggregated_packet(packet_buff, packet_len,
send_time, direct_link,
if_incoming, own_packet);
aggregate(forw_packet_aggr,
packet_buff, packet_len,
direct_link);
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
}
}
(struct device_client *)file->private_data;
struct device_packet *device_packet;
struct list_head *list_pos, *list_pos_tmp;
+ unsigned long flags;
- spin_lock(&device_client->lock);
+ spin_lock_irqsave(&device_client->lock, flags);
/* for all packets in the queue ... */
list_for_each_safe(list_pos, list_pos_tmp, &device_client->queue_list) {
}
device_client_hash[device_client->index] = NULL;
- spin_unlock(&device_client->lock);
+ spin_unlock_irqrestore(&device_client->lock, flags);
kfree(device_client);
dec_module_count();
(struct device_client *)file->private_data;
struct device_packet *device_packet;
int error;
+ unsigned long flags;
if ((file->f_flags & O_NONBLOCK) && (device_client->queue_len == 0))
return -EAGAIN;
if (error)
return error;
- spin_lock(&device_client->lock);
+ spin_lock_irqsave(&device_client->lock, flags);
device_packet = list_first_entry(&device_client->queue_list,
struct device_packet, list);
list_del(&device_packet->list);
device_client->queue_len--;
- spin_unlock(&device_client->lock);
+ spin_unlock_irqrestore(&device_client->lock, flags);
error = __copy_to_user(buf, &device_packet->icmp_packet,
sizeof(struct icmp_packet));
struct icmp_packet icmp_packet;
struct orig_node *orig_node;
struct batman_if *batman_if;
+ unsigned long flags;
if (len < sizeof(struct icmp_packet)) {
bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: invalid packet size\n");
if (atomic_read(&module_state) != MODULE_ACTIVE)
goto dst_unreach;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
if (!orig_node)
sizeof(struct icmp_packet),
batman_if, orig_node->router->addr);
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
goto out;
unlock:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
dst_unreach:
icmp_packet.msg_type = DESTINATION_UNREACHABLE;
bat_device_add_packet(device_client, &icmp_packet);
struct icmp_packet *icmp_packet)
{
struct device_packet *device_packet;
+ unsigned long flags;
device_packet = kmalloc(sizeof(struct device_packet), GFP_KERNEL);
memcpy(&device_packet->icmp_packet, icmp_packet,
sizeof(struct icmp_packet));
- spin_lock(&device_client->lock);
+ spin_lock_irqsave(&device_client->lock, flags);
/* while waiting for the lock the device_client could have been
* deleted */
if (!device_client_hash[icmp_packet->uid]) {
- spin_unlock(&device_client->lock);
+ spin_unlock_irqrestore(&device_client->lock, flags);
kfree(device_packet);
return;
}
device_client->queue_len--;
}
- spin_unlock(&device_client->lock);
+ spin_unlock_irqrestore(&device_client->lock, flags);
wake_up(&device_client->queue_wait);
}
if (batman_if->if_active != IF_ACTIVE)
return;
- if (batman_if->raw_sock)
- sock_release(batman_if->raw_sock);
-
/**
* batman_if->net_dev has been acquired by dev_get_by_name() in
* proc_interfaces_write() and has to be unreferenced.
if (batman_if->net_dev)
dev_put(batman_if->net_dev);
- batman_if->raw_sock = NULL;
- batman_if->net_dev = NULL;
-
batman_if->if_active = IF_INACTIVE;
active_ifs--;
/* (re)activate given interface. */
static void hardif_activate_interface(struct batman_if *batman_if)
{
- struct sockaddr_ll bind_addr;
- int retval;
-
if (batman_if->if_active != IF_INACTIVE)
return;
if (!batman_if->net_dev)
goto dev_err;
- retval = sock_create_kern(PF_PACKET, SOCK_RAW,
- __constant_htons(ETH_P_BATMAN),
- &batman_if->raw_sock);
-
- if (retval < 0) {
- printk(KERN_ERR "batman-adv:Can't create raw socket: %i\n",
- retval);
- goto sock_err;
- }
-
- bind_addr.sll_family = AF_PACKET;
- bind_addr.sll_ifindex = batman_if->net_dev->ifindex;
- bind_addr.sll_protocol = 0; /* is set by the kernel */
-
- retval = kernel_bind(batman_if->raw_sock,
- (struct sockaddr *)&bind_addr, sizeof(bind_addr));
-
- if (retval < 0) {
- printk(KERN_ERR "batman-adv:Can't create bind raw socket: %i\n",
- retval);
- goto bind_err;
- }
-
check_known_mac_addr(batman_if->net_dev->dev_addr);
- batman_if->raw_sock->sk->sk_user_data =
- batman_if->raw_sock->sk->sk_data_ready;
- batman_if->raw_sock->sk->sk_data_ready = batman_data_ready;
-
addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
return;
-bind_err:
- sock_release(batman_if->raw_sock);
-sock_err:
- dev_put(batman_if->net_dev);
dev_err:
- batman_if->raw_sock = NULL;
batman_if->net_dev = NULL;
}
struct batman_if *batman_if;
struct batman_packet *batman_packet;
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
batman_if = kmalloc(sizeof(struct batman_if), GFP_KERNEL);
return -1;
}
- batman_if->raw_sock = NULL;
batman_if->net_dev = NULL;
if ((if_num == 0) && (num_hna > 0))
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
if (resize_orig(orig_node, if_num) == -1) {
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
goto out;
}
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
if (!hardif_is_interface_up(batman_if->dev))
printk(KERN_ERR "batman-adv:Not using interface %s (retrying later): interface not active\n", batman_if->dev);
return NOTIFY_DONE;
}
+/* find batman interface by netdev. assumes rcu_read_lock on */
+static struct batman_if *find_batman_if(struct net_device *dev)
+{
+ struct batman_if *batman_if;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->net_dev == dev) {
+ rcu_read_unlock();
+ return batman_if;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+
+/* receive a packet with the batman ethertype coming on a hard
+ * interface */
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
+{
+ struct batman_packet *batman_packet;
+ struct batman_if *batman_if;
+ struct net_device_stats *stats;
+ int ret;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+
+ if (skb == NULL)
+ goto err_free;
+
+ /* packet should hold at least type and version */
+ if (unlikely(skb_headlen(skb) < 2))
+ goto err_free;
+
+ /* expect a valid ethernet header here. */
+ if (unlikely(skb->mac_len != sizeof(struct ethhdr)
+ || !skb_mac_header(skb)))
+ goto err_free;
+
+ batman_if = find_batman_if(skb->dev);
+ if (!batman_if)
+ goto err_free;
+
+ stats = &skb->dev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ batman_packet = (struct batman_packet *)skb->data;
+
+ if (batman_packet->version != COMPAT_VERSION) {
+ bat_dbg(DBG_BATMAN,
+ "Drop packet: incompatible batman version (%i)\n",
+ batman_packet->version);
+ goto err_free;
+ }
+
+ /* all receive handlers return whether they received or reused
+ * the supplied skb. if not, we have to free the skb. */
+
+ switch (batman_packet->packet_type) {
+ /* batman originator packet */
+ case BAT_PACKET:
+ ret = recv_bat_packet(skb, batman_if);
+ break;
+
+ /* batman icmp packet */
+ case BAT_ICMP:
+ ret = recv_icmp_packet(skb);
+ break;
+
+ /* unicast packet */
+ case BAT_UNICAST:
+ ret = recv_unicast_packet(skb);
+ break;
+
+ /* broadcast packet */
+ case BAT_BCAST:
+ ret = recv_bcast_packet(skb);
+ break;
+
+ /* vis packet */
+ case BAT_VIS:
+ ret = recv_vis_packet(skb);
+ break;
+ default:
+ ret = NET_RX_DROP;
+ }
+ if (ret == NET_RX_DROP)
+ kfree_skb(skb);
+
+ /* return NET_RX_SUCCESS in any case as we
+ * most probably dropped the packet for
+ * routing-logical reasons. */
+
+ return NET_RX_SUCCESS;
+
+err_free:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+
+}
+
+
struct notifier_block hard_if_notifier = {
.notifier_call = hard_if_event,
};
char hardif_get_active_if_num(void);
void hardif_check_interfaces_status(void);
void hardif_check_interfaces_status_wq(struct work_struct *work);
+int batman_skb_recv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
int hardif_min_mtu(void);
void update_min_mtu(void);
struct net_device *soft_device;
-static struct task_struct *kthread_task;
-
unsigned char broadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
atomic_t module_state;
+static struct packet_type batman_adv_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_BATMAN),
+ .func = batman_skb_recv,
+};
+
struct workqueue_struct *bat_event_workqueue;
#ifdef CONFIG_BATMAN_ADV_DEBUG
}
register_netdevice_notifier(&hard_if_notifier);
+ dev_add_pack(&batman_adv_packet_type);
printk(KERN_INFO "batman-adv:B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded \n",
SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION);
soft_device = NULL;
}
+ dev_remove_pack(&batman_adv_packet_type);
+
unregister_netdevice_notifier(&hard_if_notifier);
cleanup_procfs();
if (vis_init() < 1)
goto err;
- /* (re)start kernel thread for packet processing */
- if (!kthread_task) {
- kthread_task = kthread_run(packet_recv_thread, NULL, "batman-adv");
-
- if (IS_ERR(kthread_task)) {
- printk(KERN_ERR "batman-adv:Unable to start packet receive thread\n");
- kthread_task = NULL;
- }
- }
-
update_min_mtu();
atomic_set(&module_state, MODULE_ACTIVE);
goto end;
vis_quit();
- /* deactivate kernel thread for packet processing (if running) */
- if (kthread_task) {
- atomic_set(&exit_cond, 1);
- wake_up_interruptible(&thread_wait);
- kthread_stop(kthread_task);
-
- kthread_task = NULL;
- }
+ /* TODO: unregister BATMAN pack */
originator_free();
int originator_init(void)
{
+ unsigned long flags;
if (orig_hash)
return 1;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_hash = hash_new(128, compare_orig, choose_orig);
if (!orig_hash)
goto err;
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
start_purge_timer();
return 1;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
}
void originator_free(void)
{
+ unsigned long flags;
+
if (!orig_hash)
return;
cancel_delayed_work_sync(&purge_orig_wq);
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
hash_delete(orig_hash, free_orig_node);
orig_hash = NULL;
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
}
struct neigh_node *
{
HASHIT(hashit);
struct orig_node *orig_node;
+ unsigned long flags;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
/* for all origins... */
while (hash_iterate(orig_hash, &hashit)) {
}
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
start_purge_timer();
}
struct neigh_node *neigh_node;
int batman_count = 0;
char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
+ unsigned long flags;
rcu_read_lock();
if (list_empty(&if_list)) {
((struct batman_if *)if_list.next)->addr_str);
rcu_read_unlock();
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
if (batman_count == 0)
seq_printf(seq, "No batman nodes in range ... \n");
HLIST_HEAD(vis_if_list);
int i;
char tmp_addr_str[ETH_STR_LEN];
+ unsigned long flags;
rcu_read_lock();
if (list_empty(&if_list) || (!is_vis_server())) {
rcu_read_unlock();
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
while (hash_iterate(vis_hash, &hashit)) {
info = hashit.bucket->data;
entries = (struct vis_info_entry *)
proc_vis_read_prim_sec(seq, &vis_if_list);
seq_printf(seq, "\n");
}
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
end:
return 0;
DECLARE_WAIT_QUEUE_HEAD(thread_wait);
-static atomic_t data_ready_cond;
atomic_t exit_cond;
+
void slide_own_bcast_window(struct batman_if *batman_if)
{
HASHIT(hashit);
struct orig_node *orig_node;
TYPE_OF_WORD *word;
+ unsigned long flags;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
bit_packet_count(word);
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
}
static void update_HNA(struct orig_node *orig_node,
}
void receive_bat_packet(struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- unsigned char *hna_buff,
- int hna_buff_len,
- struct batman_if *if_incoming)
+ struct batman_packet *batman_packet,
+ unsigned char *hna_buff, int hna_buff_len,
+ struct batman_if *if_incoming)
{
struct batman_if *batman_if;
struct orig_node *orig_neigh_node, *orig_node;
0, hna_buff_len, if_incoming);
}
-
-static int receive_raw_packet(struct socket *raw_sock,
- unsigned char *packet_buff, int packet_buff_len)
+int recv_bat_packet(struct sk_buff *skb,
+ struct batman_if *batman_if)
{
- struct kvec iov;
- struct msghdr msg;
+ struct ethhdr *ethhdr;
+ unsigned long flags;
- iov.iov_base = packet_buff;
- iov.iov_len = packet_buff_len;
+ /* drop packet if it has not necessary minimum size */
+ if (skb_headlen(skb) < sizeof(struct batman_packet))
+ return NET_RX_DROP;
- msg.msg_flags = MSG_DONTWAIT; /* non-blocking */
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
- return kernel_recvmsg(raw_sock, &msg, &iov, 1, packet_buff_len,
- MSG_DONTWAIT);
-}
-
-static void recv_bat_packet(struct ethhdr *ethhdr,
- unsigned char *packet_buff,
- int result,
- struct batman_if *batman_if)
-{
/* packet with broadcast indication but unicast recipient */
if (!is_bcast(ethhdr->h_dest))
- return;
+ return NET_RX_DROP;
/* packet with broadcast sender address */
if (is_bcast(ethhdr->h_source))
- return;
-
- /* drop packet if it has not at least one batman packet as payload */
- if (result < sizeof(struct ethhdr) + sizeof(struct batman_packet))
- return;
-
- spin_lock(&orig_hash_lock);
+ return NET_RX_DROP;
+
+ spin_lock_irqsave(&orig_hash_lock, flags);
+ /* TODO: we use headlen instead of "length", because
+ * only this data is paged in. */
+ /* TODO: is another skb_copy needed here? there will be
+ * written on the data, but nobody (?) should further use
+ * this data */
receive_aggr_bat_packet(ethhdr,
- packet_buff + sizeof(struct ethhdr),
- result - sizeof(struct ethhdr),
+ skb->data,
+ skb_headlen(skb),
batman_if);
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
}
-static void recv_my_icmp_packet(struct ethhdr *ethhdr,
- struct icmp_packet *icmp_packet,
- unsigned char *packet_buff,
- int result)
+static int recv_my_icmp_packet(struct sk_buff *skb)
{
struct orig_node *orig_node;
+ struct icmp_packet *icmp_packet;
+ struct ethhdr *ethhdr;
+ struct sk_buff *skb_old;
+ struct batman_if *batman_if;
+ int ret;
+ unsigned long flags;
+ uint8_t dstaddr[ETH_ALEN];
+
+ icmp_packet = (struct icmp_packet *) skb->data;
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
bat_device_receive_packet(icmp_packet);
- return;
+ return NET_RX_DROP;
}
/* answer echo request (ping) */
/* get routing information */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)hash_find(orig_hash,
icmp_packet->orig));
+ ret = NET_RX_DROP;
if ((orig_node != NULL) &&
(orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ /* create a copy of the skb, if needed, to modify it. */
+ skb_old = NULL;
+ if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
+ skb_old = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+ icmp_packet = (struct icmp_packet *) skb->data;
+ kfree_skb(skb_old);
+ }
+
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->ttl = TTL;
- send_raw_packet(packet_buff + sizeof(struct ethhdr),
- result - sizeof(struct ethhdr),
- orig_node->batman_if,
- orig_node->router->addr);
- }
+ send_skb_packet(skb, batman_if, dstaddr);
+ ret = NET_RX_SUCCESS;
- spin_unlock(&orig_hash_lock);
- return;
+ } else
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ return ret;
}
-static void recv_icmp_ttl_exceeded(struct icmp_packet *icmp_packet,
- struct ethhdr *ethhdr,
- unsigned char *packet_buff,
- int result,
- struct batman_if *batman_if)
+static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
{
unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN];
struct orig_node *orig_node;
+ struct icmp_packet *icmp_packet;
+ struct ethhdr *ethhdr;
+ struct sk_buff *skb_old;
+ struct batman_if *batman_if;
+ int ret;
+ unsigned long flags;
+ uint8_t dstaddr[ETH_ALEN];
+
+ icmp_packet = (struct icmp_packet *) skb->data;
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
addr_to_string(src_str, icmp_packet->orig);
addr_to_string(dst_str, icmp_packet->dst);
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST)
- return;
+ return NET_RX_DROP;
/* get routing information */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, icmp_packet->orig));
+ ret = NET_RX_DROP;
if ((orig_node != NULL) &&
(orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
+ skb_old = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+ icmp_packet = (struct icmp_packet *) skb->data;
+ kfree_skb(skb_old);
+ }
+
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->ttl = TTL;
- send_raw_packet(packet_buff + sizeof(struct ethhdr),
- result - sizeof(struct ethhdr),
- orig_node->batman_if,
- orig_node->router->addr);
+ send_skb_packet(skb, batman_if, dstaddr);
+ ret = NET_RX_SUCCESS;
- }
+ } else
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
- spin_unlock(&orig_hash_lock);
+ return ret;
}
-
-static void recv_icmp_packet(struct ethhdr *ethhdr,
- unsigned char *packet_buff,
- int result,
- struct batman_if *batman_if)
+int recv_icmp_packet(struct sk_buff *skb)
{
struct icmp_packet *icmp_packet;
+ struct ethhdr *ethhdr;
struct orig_node *orig_node;
+ struct sk_buff *skb_old;
+ struct batman_if *batman_if;
+ int hdr_size = sizeof(struct icmp_packet);
+ int ret;
+ unsigned long flags;
+ uint8_t dstaddr[ETH_ALEN];
+
+ /* drop packet if it has not necessary minimum size */
+ if (skb_headlen(skb) < hdr_size)
+ return NET_RX_DROP;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */
if (is_bcast(ethhdr->h_dest))
- return;
+ return NET_RX_DROP;
/* packet with broadcast sender address */
if (is_bcast(ethhdr->h_source))
- return;
+ return NET_RX_DROP;
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
- return;
+ return NET_RX_DROP;
- /* drop packet if it has not necessary minimum size */
- if (result < sizeof(struct ethhdr) + sizeof(struct icmp_packet))
- return;
-
- icmp_packet = (struct icmp_packet *)
- (packet_buff + sizeof(struct ethhdr));
+ icmp_packet = (struct icmp_packet *) skb->data;
/* packet for me */
if (is_my_mac(icmp_packet->dst))
- recv_my_icmp_packet(ethhdr, icmp_packet, packet_buff, result);
+ return recv_my_icmp_packet(skb);
/* TTL exceeded */
- if (icmp_packet->ttl < 2) {
- recv_icmp_ttl_exceeded(icmp_packet, ethhdr, packet_buff, result,
- batman_if);
- return;
+ if (icmp_packet->ttl < 2)
+ return recv_icmp_ttl_exceeded(skb);
- }
+ ret = NET_RX_DROP;
/* get routing information */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, icmp_packet->dst));
(orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
+ skb_old = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+ icmp_packet = (struct icmp_packet *) skb->data;
+ kfree_skb(skb_old);
+ }
+
/* decrement ttl */
icmp_packet->ttl--;
/* route it */
- send_raw_packet(packet_buff + sizeof(struct ethhdr),
- result - sizeof(struct ethhdr),
- orig_node->batman_if,
- orig_node->router->addr);
- }
- spin_unlock(&orig_hash_lock);
+ send_skb_packet(skb, batman_if, dstaddr);
+ ret = NET_RX_SUCCESS;
+
+ } else
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ return ret;
}
-static void recv_unicast_packet(struct ethhdr *ethhdr,
- unsigned char *packet_buff,
- int result,
- struct batman_if *batman_if)
+int recv_unicast_packet(struct sk_buff *skb)
{
struct unicast_packet *unicast_packet;
unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN];
struct orig_node *orig_node;
- int hdr_size = sizeof(struct ethhdr) + sizeof(struct unicast_packet);
+ struct ethhdr *ethhdr;
+ struct batman_if *batman_if;
+ struct sk_buff *skb_old;
+ uint8_t dstaddr[ETH_ALEN];
+ int hdr_size = sizeof(struct unicast_packet);
+ int ret;
+ unsigned long flags;
+
+ /* drop packet if it has not necessary minimum size */
+ if (skb_headlen(skb) < hdr_size)
+ return NET_RX_DROP;
+
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */
if (is_bcast(ethhdr->h_dest))
- return;
+ return NET_RX_DROP;
/* packet with broadcast sender address */
if (is_bcast(ethhdr->h_source))
- return;
+ return NET_RX_DROP;
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
- return;
-
- /* drop packet if it has not necessary minimum size */
- if (result < hdr_size)
- return;
+ return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *)
- (packet_buff + sizeof(struct ethhdr));
+ unicast_packet = (struct unicast_packet *) skb->data;
/* packet for me */
if (is_my_mac(unicast_packet->dest)) {
- interface_rx(soft_device, packet_buff + hdr_size,
- result - hdr_size);
- return;
-
+ interface_rx(skb, hdr_size);
+ return NET_RX_SUCCESS;
}
/* TTL exceeded */
if (unicast_packet->ttl < 2) {
- addr_to_string(src_str, ((struct ethhdr *)
- (unicast_packet + 1))->h_source);
- addr_to_string(dst_str, unicast_packet->dest);
+ addr_to_string(src_str, ethhdr->h_source);
+ addr_to_string(dst_str, ethhdr->h_dest);
printk(KERN_WARNING "batman-adv:Warning - can't send packet from %s to %s: ttl exceeded\n", src_str, dst_str);
- return;
+ return NET_RX_DROP;
}
+ ret = NET_RX_DROP;
/* get routing information */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, unicast_packet->dest));
if ((orig_node != NULL) &&
(orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) {
+ skb_old = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+ unicast_packet = (struct unicast_packet *) skb->data;
+ kfree_skb(skb_old);
+ }
/* decrement ttl */
unicast_packet->ttl--;
/* route it */
- send_raw_packet(packet_buff + sizeof(struct ethhdr),
- result - sizeof(struct ethhdr),
- orig_node->batman_if,
- orig_node->router->addr);
- }
- spin_unlock(&orig_hash_lock);
+ send_skb_packet(skb, batman_if, dstaddr);
+ ret = NET_RX_SUCCESS;
+
+ } else
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ return ret;
}
-static void recv_bcast_packet(struct ethhdr *ethhdr,
- unsigned char *packet_buff,
- int result,
- struct batman_if *batman_if)
+int recv_bcast_packet(struct sk_buff *skb)
{
struct orig_node *orig_node;
struct bcast_packet *bcast_packet;
- int hdr_size = sizeof(struct ethhdr) + sizeof(struct bcast_packet);
+ struct ethhdr *ethhdr;
+ int hdr_size = sizeof(struct bcast_packet);
+ unsigned long flags;
+
+ /* drop packet if it has not necessary minimum size */
+ if (skb_headlen(skb) < hdr_size)
+ return NET_RX_DROP;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_bcast(ethhdr->h_dest))
- return;
+ return NET_RX_DROP;
/* packet with broadcast sender address */
if (is_bcast(ethhdr->h_source))
- return;
-
- /* drop packet if it has not necessary minimum size */
- if (result < hdr_size)
- return;
+ return NET_RX_DROP;
/* ignore broadcasts sent by myself */
if (is_my_mac(ethhdr->h_source))
- return;
+ return NET_RX_DROP;
- bcast_packet = (struct bcast_packet *)
- (packet_buff + sizeof(struct ethhdr));
+ bcast_packet = (struct bcast_packet *) skb->data;
/* ignore broadcasts originated by myself */
if (is_my_mac(bcast_packet->orig))
- return;
+ return NET_RX_DROP;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, bcast_packet->orig));
if (orig_node == NULL) {
- spin_unlock(&orig_hash_lock);
- return;
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+ return NET_RX_DROP;
}
/* check flood history */
if (get_bit_status(orig_node->bcast_bits,
orig_node->last_bcast_seqno,
ntohs(bcast_packet->seqno))) {
- spin_unlock(&orig_hash_lock);
- return;
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+ return NET_RX_DROP;
}
/* mark broadcast in flood history */
orig_node->last_bcast_seqno, 1))
orig_node->last_bcast_seqno = ntohs(bcast_packet->seqno);
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ /* rebroadcast packet */
+ add_bcast_packet_to_list(skb);
/* broadcast for me */
- interface_rx(soft_device, packet_buff + hdr_size, result - hdr_size);
+ interface_rx(skb, hdr_size);
- /* rebroadcast packet */
- add_bcast_packet_to_list(packet_buff + sizeof(struct ethhdr),
- result - sizeof(struct ethhdr));
+ return NET_RX_SUCCESS;
}
-static void recv_vis_packet(struct ethhdr *ethhdr,
- unsigned char *packet_buff,
- int result)
+int recv_vis_packet(struct sk_buff *skb)
{
struct vis_packet *vis_packet;
- int hdr_size = sizeof(struct ethhdr) + sizeof(struct vis_packet);
- int vis_info_len;
+ struct ethhdr *ethhdr;
+ int hdr_size = sizeof(struct vis_packet);
+ int ret;
- /* drop if too short. */
- if (result < hdr_size)
- return;
+ if (skb_headlen(skb) < hdr_size)
+ return NET_RX_DROP;
+
+ vis_packet = (struct vis_packet *) skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
- return;
-
- vis_packet = (struct vis_packet *)(packet_buff + sizeof(struct ethhdr));
- vis_info_len = result - hdr_size;
+ return NET_RX_DROP;
/* ignore own packets */
if (is_my_mac(vis_packet->vis_orig))
- return;
+ return NET_RX_DROP;
if (is_my_mac(vis_packet->sender_orig))
- return;
+ return NET_RX_DROP;
switch (vis_packet->vis_type) {
case VIS_TYPE_SERVER_SYNC:
- receive_server_sync_packet(vis_packet, vis_info_len);
+ /* TODO: handle fragmented skbs properly */
+ receive_server_sync_packet(vis_packet, skb_headlen(skb));
+ ret = NET_RX_SUCCESS;
break;
case VIS_TYPE_CLIENT_UPDATE:
- receive_client_update_packet(vis_packet, vis_info_len);
+ /* TODO: handle fragmented skbs properly */
+ receive_client_update_packet(vis_packet, skb_headlen(skb));
+ ret = NET_RX_SUCCESS;
break;
default: /* ignore unknown packet */
+ ret = NET_RX_DROP;
break;
}
-}
-
-static int recv_one_packet(struct batman_if *batman_if,
- unsigned char *packet_buff)
-{
- int result;
- struct ethhdr *ethhdr;
- struct batman_packet *batman_packet;
-
- result = receive_raw_packet(batman_if->raw_sock, packet_buff,
- PACKBUFF_SIZE);
- if (result <= 0)
- return result;
-
- if (result < sizeof(struct ethhdr) + 2)
- return 0;
-
- ethhdr = (struct ethhdr *)packet_buff;
- batman_packet = (struct batman_packet *)
- (packet_buff + sizeof(struct ethhdr));
-
- if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN,
- "Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
- return 0;
- }
-
- switch (batman_packet->packet_type) {
- /* batman originator packet */
- case BAT_PACKET:
- recv_bat_packet(ethhdr, packet_buff, result, batman_if);
- break;
-
- /* batman icmp packet */
- case BAT_ICMP:
- recv_icmp_packet(ethhdr, packet_buff, result, batman_if);
- break;
-
- /* unicast packet */
- case BAT_UNICAST:
- recv_unicast_packet(ethhdr, packet_buff, result, batman_if);
- break;
-
- /* broadcast packet */
- case BAT_BCAST:
- recv_bcast_packet(ethhdr,
- packet_buff, result, batman_if);
- break;
-
- /* vis packet */
- case BAT_VIS:
- recv_vis_packet(ethhdr, packet_buff, result);
- break;
- }
- return 0;
-}
-
-
-static int discard_one_packet(struct batman_if *batman_if,
- unsigned char *packet_buff)
-{
- int result = -EAGAIN;
-
- if (batman_if->raw_sock) {
- result = receive_raw_packet(batman_if->raw_sock,
- packet_buff,
- PACKBUFF_SIZE);
- }
- return result;
-}
-
-
-static bool is_interface_active(struct batman_if *batman_if)
-{
- if (batman_if->if_active != IF_ACTIVE)
- return false;
-
- return true;
-}
-
-static void service_interface(struct batman_if *batman_if,
- unsigned char *packet_buff)
-
-{
- int result;
-
- do {
- if (is_interface_active(batman_if))
- result = recv_one_packet(batman_if, packet_buff);
- else
- result = discard_one_packet(batman_if, packet_buff);
- } while (result >= 0);
-
- /* we perform none blocking reads, so EAGAIN indicates there
- are no more packets to read. Anything else is a real
- error.*/
-
- if ((result < 0) && (result != -EAGAIN))
- printk(KERN_ERR "batman-adv:Could not receive packet from interface %s: %i\n", batman_if->dev, result);
-}
-
-static void service_interfaces(unsigned char *packet_buffer)
-{
- struct batman_if *batman_if;
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- rcu_read_unlock();
- service_interface(batman_if, packet_buffer);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
-
-int packet_recv_thread(void *data)
-{
- unsigned char *packet_buff;
-
- atomic_set(&data_ready_cond, 0);
- atomic_set(&exit_cond, 0);
- packet_buff = kmalloc(PACKBUFF_SIZE, GFP_KERNEL);
- if (!packet_buff) {
- printk(KERN_ERR"batman-adv:Could allocate memory for the packet buffer. :(\n");
- return -1;
- }
-
- while ((!kthread_should_stop()) && (!atomic_read(&exit_cond))) {
-
- wait_event_interruptible(thread_wait,
- (atomic_read(&data_ready_cond) ||
- atomic_read(&exit_cond)));
-
- atomic_set(&data_ready_cond, 0);
-
- if (kthread_should_stop() || atomic_read(&exit_cond))
- break;
-
- service_interfaces(packet_buff);
- }
- kfree(packet_buff);
-
- /* do not exit until kthread_stop() is actually called,
- * otherwise it will wait for us forever. */
- while (!kthread_should_stop())
- schedule();
-
- return 0;
-}
-
-void batman_data_ready(struct sock *sk, int len)
-{
- void (*data_ready)(struct sock *, int) = sk->sk_user_data;
-
- data_ready(sk, len);
-
- atomic_set(&data_ready_cond, 1);
- wake_up_interruptible(&thread_wait);
+ return ret;
}
extern atomic_t exit_cond;
void slide_own_bcast_window(struct batman_if *batman_if);
-void batman_data_ready(struct sock *sk, int len);
-int packet_recv_thread(void *data);
void receive_bat_packet(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
unsigned char *hna_buff, int hna_buff_len,
void update_routes(struct orig_node *orig_node,
struct neigh_node *neigh_node,
unsigned char *hna_buff, int hna_buff_len);
+int recv_icmp_packet(struct sk_buff *skb);
+int recv_unicast_packet(struct sk_buff *skb);
+int recv_bcast_packet(struct sk_buff *skb);
+int recv_vis_packet(struct sk_buff *skb);
+int recv_bat_packet(struct sk_buff *skb,
+ struct batman_if *batman_if);
#include "send.h"
#include "routing.h"
#include "translation-table.h"
+#include "soft-interface.h"
#include "hard-interface.h"
#include "types.h"
#include "vis.h"
return send_time;
}
-/* sends a raw packet. */
-void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
- struct batman_if *batman_if, uint8_t *dst_addr)
+/* send out an already prepared packet to the given address via the
+ * specified batman interface */
+int send_skb_packet(struct sk_buff *skb,
+ struct batman_if *batman_if,
+ uint8_t *dst_addr)
{
struct ethhdr *ethhdr;
- struct sk_buff *skb;
- int retval;
- char *data;
if (batman_if->if_active != IF_ACTIVE)
- return;
+ goto send_skb_err;
+
+ if (unlikely(!batman_if->net_dev))
+ goto send_skb_err;
if (!(batman_if->net_dev->flags & IFF_UP)) {
printk(KERN_WARNING
"batman-adv:Interface %s is not up - can't send packet via that interface!\n",
batman_if->dev);
- return;
+ goto send_skb_err;
}
- skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
- if (!skb)
- return;
- data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
+ /* push to the ethernet header. */
+ if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
+ goto send_skb_err;
- memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
+ skb_reset_mac_header(skb);
- ethhdr = (struct ethhdr *) data;
+ ethhdr = (struct ethhdr *) skb_mac_header(skb);
memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
- skb_reset_mac_header(skb);
skb_set_network_header(skb, ETH_HLEN);
skb->priority = TC_PRIO_CONTROL;
skb->protocol = __constant_htons(ETH_P_BATMAN);
+
skb->dev = batman_if->net_dev;
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
* (which is > 0). This will not be treated as an error. */
- retval = dev_queue_xmit(skb);
- if (retval < 0)
- printk(KERN_WARNING
- "batman-adv:Can't write to raw socket: %i\n",
- retval);
+
+ return dev_queue_xmit(skb);
+send_skb_err:
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+/* sends a raw packet. */
+void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
+ struct batman_if *batman_if, uint8_t *dst_addr)
+{
+ struct sk_buff *skb;
+ char *data;
+
+ skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
+ if (!skb)
+ return;
+ data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
+ memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
+ /* pull back to the batman "network header" */
+ skb_pull(skb, sizeof(struct ethhdr));
+ send_skb_packet(skb, batman_if, dst_addr);
}
/* Send a packet to a given interface */
static void forw_packet_free(struct forw_packet *forw_packet)
{
+ if (forw_packet->skb)
+ kfree_skb(forw_packet->skb);
kfree(forw_packet->packet_buff);
kfree(forw_packet);
}
send_time);
}
-void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
+void add_bcast_packet_to_list(struct sk_buff *skb)
{
struct forw_packet *forw_packet;
if (!forw_packet)
return;
- forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC);
- if (!forw_packet->packet_buff) {
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
kfree(forw_packet);
return;
}
- forw_packet->packet_len = packet_len;
- memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len);
+ skb_reset_mac_header(skb);
+
+ forw_packet->skb = skb;
+ forw_packet->packet_buff = NULL;
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
+ struct sk_buff *skb1;
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
/* rebroadcast packet */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- send_raw_packet(forw_packet->packet_buff,
- forw_packet->packet_len,
+ /* send a copy of the saved skb */
+ skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
+ if (skb1)
+ send_skb_packet(skb1,
batman_if, broadcastAddr);
}
rcu_read_unlock();
container_of(work, struct delayed_work, work);
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
+ unsigned long flags;
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
send_packet(forw_packet);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/* free batman packet list */
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bat_list, list) {
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
/**
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock(&forw_bat_list_lock);
+ spin_lock_irqsave(&forw_bat_list_lock, flags);
}
- spin_unlock(&forw_bat_list_lock);
+ spin_unlock_irqrestore(&forw_bat_list_lock, flags);
}
#include "types.h"
void send_own_packet_work(struct work_struct *work);
+int send_skb_packet(struct sk_buff *skb,
+ struct batman_if *batman_if,
+ uint8_t *dst_addr);
void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
struct batman_if *batman_if, uint8_t *dst_addr);
void schedule_own_packet(struct batman_if *batman_if);
struct batman_packet *batman_packet,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_outgoing);
-void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len);
+void add_bcast_packet_to_list(struct sk_buff *skb);
void send_outstanding_bcast_packet(struct work_struct *work);
void send_outstanding_bat_packet(struct work_struct *work);
void purge_outstanding_packets(void);
* broadcast storms */
static int32_t skb_packets;
static int32_t skb_bad_packets;
-static int32_t lock_dropped;
unsigned char mainIfAddr[ETH_ALEN];
static unsigned char mainIfAddr_default[ETH_ALEN];
return (memcmp(mainIfAddr, mainIfAddr_default, ETH_ALEN) != 0 ? 1 : 0);
}
-static int my_skb_push(struct sk_buff *skb, unsigned int len)
+int my_skb_push(struct sk_buff *skb, unsigned int len)
{
int result = 0;
skb_packets++;
- if (skb->data - len < skb->head) {
+ if (skb_headroom(skb) < len) {
skb_bad_packets++;
result = pskb_expand_head(skb, len, 0, GFP_ATOMIC);
struct orig_node *orig_node;
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct bat_priv *priv = netdev_priv(dev);
+ struct batman_if *batman_if;
+ uint8_t dstaddr[6];
int data_len = skb->len;
+ unsigned long flags;
if (atomic_read(&module_state) != MODULE_ACTIVE)
goto dropped;
goto dropped;
bcast_packet = (struct bcast_packet *)skb->data;
-
bcast_packet->version = COMPAT_VERSION;
/* batman packet type: broadcast */
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh */
memcpy(bcast_packet->orig, mainIfAddr, ETH_ALEN);
+
/* set broadcast sequence number */
bcast_packet->seqno = htons(bcast_seqno);
bcast_seqno++;
/* broadcast packet */
- add_bcast_packet_to_list(skb->data, skb->len);
+ add_bcast_packet_to_list(skb);
+ /* a copy is stored in the bcast list, therefore removing
+ * the original skb. */
+ kfree_skb(skb);
/* unicast packet */
} else {
-
- /* simply spin_lock()ing can deadlock when the lock is already
- * hold. */
- /* TODO: defer the work in a working queue instead of
- * dropping */
- if (!spin_trylock(&orig_hash_lock)) {
- lock_dropped++;
- printk(KERN_WARNING "batman-adv:%d packets dropped because lock was hold\n", lock_dropped);
- goto dropped;
- }
-
+ spin_lock_irqsave(&orig_hash_lock, flags);
/* get routing information */
orig_node = ((struct orig_node *)hash_find(orig_hash,
ethhdr->h_dest));
if (orig_node->batman_if->if_active != IF_ACTIVE)
goto unlock;
- send_raw_packet(skb->data, skb->len,
- orig_node->batman_if,
- orig_node->router->addr);
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+
+ batman_if = orig_node->batman_if;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ send_skb_packet(skb, batman_if, dstaddr);
} else {
goto unlock;
}
-
- spin_unlock(&orig_hash_lock);
}
priv->stats.tx_packets++;
goto end;
unlock:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
dropped:
priv->stats.tx_dropped++;
end:
- kfree_skb(skb);
return 0;
}
-void interface_rx(struct net_device *dev, void *packet, int packet_len)
+void interface_rx(struct sk_buff *skb, int hdr_size)
{
- struct sk_buff *skb;
+ struct net_device *dev = soft_device;
struct bat_priv *priv = netdev_priv(dev);
- skb = dev_alloc_skb(packet_len);
-
- if (!skb) {
- priv->stats.rx_dropped++;
- goto out;
+ /* check if enough space is available for pulling, and pull */
+ if (!pskb_may_pull(skb, hdr_size)) {
+ kfree_skb(skb);
+ return;
}
+ skb_pull_rcsum(skb, hdr_size);
+/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
- memcpy(skb_put(skb, packet_len), packet, packet_len);
-
- /* Write metadata, and then pass to the receive level */
skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* should not be neccesary anymore as we use skb_pull_rcsum()
+ * TODO: please verify this and remove this TODO
+ * -- Dec 21st 2009, Simon Wunderlich */
+
+/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
+
+ /* TODO: set skb->pkt_type to PACKET_BROADCAST, PACKET_MULTICAST,
+ * PACKET_OTHERHOST or PACKET_HOST */
priv->stats.rx_packets++;
- priv->stats.rx_bytes += packet_len;
+ priv->stats.rx_bytes += skb->len;
dev->last_rx = jiffies;
netif_rx(skb);
-
-out:
- return;
}
/* ethtool */
int interface_set_mac_addr(struct net_device *dev, void *addr);
int interface_change_mtu(struct net_device *dev, int new_mtu);
int interface_tx(struct sk_buff *skb, struct net_device *dev);
-void interface_rx(struct net_device *dev, void *packet, int packet_len);
+void interface_rx(struct sk_buff *skb, int hdr_size);
+int my_skb_push(struct sk_buff *skb, unsigned int len);
extern unsigned char mainIfAddr[];
char if_active;
char addr_str[ETH_STR_LEN];
struct net_device *net_dev;
- struct socket *raw_sock;
atomic_t seqno;
unsigned char *packet_buff;
int packet_len;
struct hlist_node list;
unsigned long send_time;
uint8_t own;
+ struct sk_buff *skb;
unsigned char *packet_buff;
uint16_t packet_len;
uint32_t direct_link_flags;
/* set the mode of the visualization to client or server */
void vis_set_mode(int mode)
{
- spin_lock(&vis_hash_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&vis_hash_lock, flags);
if (my_vis_info != NULL)
my_vis_info->packet.vis_type = mode;
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
}
/* is_vis_server(), locked outside */
int is_vis_server(void)
{
int ret = 0;
+ unsigned long flags;
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
ret = is_vis_server_locked();
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
return ret;
}
{
struct vis_info *info;
int is_new;
+ unsigned long flags;
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
info = add_packet(vis_packet, vis_info_len, &is_new);
if (info == NULL)
goto end;
list_add_tail(&info->send_list, &send_list);
}
end:
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
}
/* handle an incoming client update packet and schedule forward if needed. */
{
struct vis_info *info;
int is_new;
+ unsigned long flags;
/* clients shall not broadcast. */
if (is_bcast(vis_packet->target_orig))
return;
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
info = add_packet(vis_packet, vis_info_len, &is_new);
if (info == NULL)
goto end;
list_add_tail(&info->send_list, &send_list);
}
end:
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
info->first_seen = jiffies;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
info->packet.ttl = TTL;
info->packet.seqno++;
if (!is_vis_server_locked()) {
best_tq = find_best_vis_server(info);
if (best_tq < 0) {
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -1;
}
}
info->packet.entries++;
if (vis_packet_full(info)) {
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
}
}
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_lock_irqsave(&hna_local_hash_lock, flags);
while (hash_iterate(hna_local_hash, &hashit_local)) {
{
HASHIT(hashit);
struct orig_node *orig_node;
+ unsigned long flags;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
/* send to all routers in range. */
while (hash_iterate(orig_hash, &hashit)) {
}
}
memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
}
static void unicast_vis_packet(struct vis_info *info, int packet_length)
{
struct orig_node *orig_node;
+ unsigned long flags;
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, info->packet.target_orig));
orig_node->batman_if,
orig_node->router->addr);
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
}
/* only send one vis packet. called from send_vis_packets() */
static void send_vis_packets(struct work_struct *work)
{
struct vis_info *info, *temp;
+ unsigned long flags;
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
purge_vis_packets();
if (generate_vis_packet() == 0)
list_del_init(&info->send_list);
send_vis_packet(info);
}
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
start_vis_timer();
}
static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets);
* initialized (e.g. bat0 is initialized, interfaces have been added) */
int vis_init(void)
{
+ unsigned long flags;
if (vis_hash)
return 1;
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
if (!vis_hash) {
goto err;
}
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
start_vis_timer();
return 1;
err:
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
vis_quit();
return 0;
}
/* shutdown vis-server */
void vis_quit(void)
{
+ unsigned long flags;
if (!vis_hash)
return;
cancel_delayed_work_sync(&vis_timer_wq);
- spin_lock(&vis_hash_lock);
+ spin_lock_irqsave(&vis_hash_lock, flags);
/* properly remove, kill timers ... */
hash_delete(vis_hash, free_info);
vis_hash = NULL;
my_vis_info = NULL;
- spin_unlock(&vis_hash_lock);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
}
/* schedule packets for (re)transmission */