#endif
struct rt6_info;
+struct fib6_info;
struct fib6_config {
u32 fc_table;
int nh_weight;
};
+struct fib6_info {
+ struct fib6_table *rt6i_table;
+ struct fib6_info __rcu *rt6_next;
+ struct fib6_node __rcu *rt6i_node;
+
+ /* Multipath routes:
+ * siblings is a list of fib6_info that have the the same metric/weight,
+ * destination, but not the same gateway. nsiblings is just a cache
+ * to speed up lookup.
+ */
+ struct list_head rt6i_siblings;
+ unsigned int rt6i_nsiblings;
+
+ atomic_t rt6i_ref;
+ struct inet6_dev *rt6i_idev;
+ unsigned long expires;
+ struct dst_metrics *fib6_metrics;
+#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
+
+ struct rt6key rt6i_dst;
+ u32 rt6i_flags;
+ struct rt6key rt6i_src;
+ struct rt6key rt6i_prefsrc;
+
+ struct rt6_info * __percpu *rt6i_pcpu;
+ struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+
+ u32 rt6i_metric;
+ u8 rt6i_protocol;
+ u8 fib6_type;
+ u8 exception_bucket_flushed:1,
+ should_flush:1,
+ dst_nocount:1,
+ dst_nopolicy:1,
+ dst_host:1,
+ unused:3;
+
+ struct fib6_nh fib6_nh;
+};
+
struct rt6_info {
struct dst_entry dst;
struct rt6_info __rcu *rt6_next;
void rt6_free_pcpu(struct rt6_info *non_pcpu_rt);
+struct rt6_info *fib6_info_alloc(gfp_t gfp_flags);
+void fib6_info_destroy(struct rt6_info *f6i);
+
+static inline void fib6_info_hold(struct rt6_info *f6i)
+{
+ atomic_inc(&f6i->rt6i_ref);
+}
+
+static inline void fib6_info_release(struct rt6_info *f6i)
+{
+ if (f6i && atomic_dec_and_test(&f6i->rt6i_ref))
+ fib6_info_destroy(f6i);
+}
+
static inline void rt6_hold(struct rt6_info *rt)
{
atomic_inc(&rt->rt6i_ref);
addr[fn_bit >> 5];
}
+struct rt6_info *fib6_info_alloc(gfp_t gfp_flags)
+{
+ struct rt6_info *f6i;
+
+ f6i = kzalloc(sizeof(*f6i), gfp_flags);
+ if (!f6i)
+ return NULL;
+
+ f6i->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
+ if (!f6i->rt6i_pcpu) {
+ kfree(f6i);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&f6i->rt6i_siblings);
+ f6i->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
+
+ atomic_inc(&f6i->rt6i_ref);
+
+ return f6i;
+}
+
+void fib6_info_destroy(struct rt6_info *f6i)
+{
+ struct rt6_exception_bucket *bucket;
+
+ WARN_ON(f6i->rt6i_node);
+
+ bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket, 1);
+ if (bucket) {
+ f6i->rt6i_exception_bucket = NULL;
+ kfree(bucket);
+ }
+
+ if (f6i->rt6i_pcpu) {
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct rt6_info **ppcpu_rt;
+ struct rt6_info *pcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(f6i->rt6i_pcpu, cpu);
+ pcpu_rt = *ppcpu_rt;
+ if (pcpu_rt) {
+ dst_dev_put(&pcpu_rt->dst);
+ dst_release(&pcpu_rt->dst);
+ *ppcpu_rt = NULL;
+ }
+ }
+ }
+
+ if (f6i->rt6i_idev)
+ in6_dev_put(f6i->rt6i_idev);
+ if (f6i->fib6_nh.nh_dev)
+ dev_put(f6i->fib6_nh.nh_dev);
+
+ kfree(f6i);
+}
+EXPORT_SYMBOL_GPL(fib6_info_destroy);
+
static struct fib6_node *node_alloc(struct net *net)
{
struct fib6_node *fn;