struct sk_buff *completion_queue;
/* Elements below can be accessed between CPUs for RPS */
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RPS
struct call_single_data csd ____cacheline_aligned_in_smp;
#endif
struct sk_buff_head input_pkt_queue;
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
{
u8 ip_proto;
u32 addr1, addr2, ports, ihl;
- rcu_read_lock();
-
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
}
done:
- rcu_read_unlock();
return cpu;
}
int netif_rx(struct sk_buff *skb)
{
- int cpu;
+ int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
net_timestamp(skb);
#ifdef CONFIG_RPS
- cpu = get_rps_cpu(skb->dev, skb);
- if (cpu < 0)
- cpu = smp_processor_id();
+ {
+ int cpu;
+
+ rcu_read_lock();
+ cpu = get_rps_cpu(skb->dev, skb);
+ if (cpu < 0)
+ cpu = smp_processor_id();
+ ret = enqueue_to_backlog(skb, cpu);
+ rcu_read_unlock();
+ }
#else
- cpu = smp_processor_id();
+ ret = enqueue_to_backlog(skb, get_cpu());
+ put_cpu();
#endif
-
- return enqueue_to_backlog(skb, cpu);
+ return ret;
}
EXPORT_SYMBOL(netif_rx);