#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_PRECOW_METRICS 0x02
#define FLOWI_FLAG_CAN_SLEEP 0x04
+#define FLOWI_FLAG_RT_NOCACHE 0x08
__u32 flowic_secid;
};
extern struct dst_entry* inet_csk_route_req(struct sock *sk,
struct flowi4 *fl4,
- const struct request_sock *req);
+ const struct request_sock *req,
+ bool nocache);
extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
struct sock *newsk,
const struct request_sock *req);
struct dst_entry *dst;
struct flowi4 fl4;
- dst = inet_csk_route_req(sk, &fl4, req);
+ dst = inet_csk_route_req(sk, &fl4, req, false);
if (dst == NULL)
goto out;
struct dst_entry *inet_csk_route_req(struct sock *sk,
struct flowi4 *fl4,
- const struct request_sock *req)
+ const struct request_sock *req,
+ bool nocache)
{
struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options_rcu *opt = inet_rsk(req)->opt;
struct net *net = sock_net(sk);
+ int flags = inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS;
+ if (nocache)
+ flags |= FLOWI_FLAG_RT_NOCACHE;
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol,
- inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
+ flags,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
candp = NULL;
now = jiffies;
- if (!rt_caching(dev_net(rt->dst.dev))) {
+ if (!rt_caching(dev_net(rt->dst.dev)) || (rt->dst.flags & DST_NOCACHE)) {
/*
* If we're not caching, just tell the caller we
* were successful and don't touch the route. The
rt_set_nexthop(rth, fl4, res, fi, type, 0);
+ if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE)
+ rth->dst.flags |= DST_NOCACHE;
+
return rth;
}
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct request_values *rvp,
- u16 queue_mapping)
+ u16 queue_mapping,
+ bool nocache)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
struct sk_buff * skb;
/* First, grab a route. */
- if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
+ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
return -1;
skb = tcp_make_synack(sk, dst, req, rvp);
struct request_values *rvp)
{
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
- return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
+ return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
}
/*
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
- (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
+ (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
fl4.daddr == saddr &&
(peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
inet_peer_refcheck(peer);
if (tcp_v4_send_synack(sk, dst, req,
(struct request_values *)&tmp_ext,
- skb_get_queue_mapping(skb)) ||
+ skb_get_queue_mapping(skb),
+ want_cookie) ||
want_cookie)
goto drop_and_free;