1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/dccp/ipv4.c
4 *
5 * An implementation of the DCCP protocol
6 * Arnaldo Carvalho de Melo <[email protected]>
7 */
8
9 #include <linux/dccp.h>
10 #include <linux/icmp.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/random.h>
15
16 #include <net/icmp.h>
17 #include <net/inet_common.h>
18 #include <net/inet_dscp.h>
19 #include <net/inet_hashtables.h>
20 #include <net/inet_sock.h>
21 #include <net/protocol.h>
22 #include <net/sock.h>
23 #include <net/timewait_sock.h>
24 #include <net/tcp_states.h>
25 #include <net/xfrm.h>
26 #include <net/secure_seq.h>
27 #include <net/netns/generic.h>
28 #include <net/rstreason.h>
29
30 #include "ackvec.h"
31 #include "ccid.h"
32 #include "dccp.h"
33 #include "feat.h"
34
35 struct dccp_v4_pernet {
36 struct sock *v4_ctl_sk;
37 };
38
39 static unsigned int dccp_v4_pernet_id __read_mostly;
40
41 /*
42 * The per-net v4_ctl_sk socket is used for responding to
43 * the Out-of-the-blue (OOTB) packets. A control sock will be created
44 * for this socket at the initialization time.
45 */
46
dccp_v4_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)47 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
48 {
49 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
50 struct inet_sock *inet = inet_sk(sk);
51 struct dccp_sock *dp = dccp_sk(sk);
52 __be16 orig_sport, orig_dport;
53 __be32 daddr, nexthop;
54 struct flowi4 *fl4;
55 struct rtable *rt;
56 int err;
57 struct ip_options_rcu *inet_opt;
58
59 dp->dccps_role = DCCP_ROLE_CLIENT;
60
61 if (addr_len < sizeof(struct sockaddr_in))
62 return -EINVAL;
63
64 if (usin->sin_family != AF_INET)
65 return -EAFNOSUPPORT;
66
67 nexthop = daddr = usin->sin_addr.s_addr;
68
69 inet_opt = rcu_dereference_protected(inet->inet_opt,
70 lockdep_sock_is_held(sk));
71 if (inet_opt != NULL && inet_opt->opt.srr) {
72 if (daddr == 0)
73 return -EINVAL;
74 nexthop = inet_opt->opt.faddr;
75 }
76
77 orig_sport = inet->inet_sport;
78 orig_dport = usin->sin_port;
79 fl4 = &inet->cork.fl.u.ip4;
80 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
81 sk->sk_bound_dev_if, IPPROTO_DCCP, orig_sport,
82 orig_dport, sk);
83 if (IS_ERR(rt))
84 return PTR_ERR(rt);
85
86 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
87 ip_rt_put(rt);
88 return -ENETUNREACH;
89 }
90
91 if (inet_opt == NULL || !inet_opt->opt.srr)
92 daddr = fl4->daddr;
93
94 if (inet->inet_saddr == 0) {
95 err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
96 if (err) {
97 ip_rt_put(rt);
98 return err;
99 }
100 } else {
101 sk_rcv_saddr_set(sk, inet->inet_saddr);
102 }
103
104 inet->inet_dport = usin->sin_port;
105 sk_daddr_set(sk, daddr);
106
107 inet_csk(sk)->icsk_ext_hdr_len = 0;
108 if (inet_opt)
109 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
110 /*
111 * Socket identity is still unknown (sport may be zero).
112 * However we set state to DCCP_REQUESTING and not releasing socket
113 * lock select source port, enter ourselves into the hash tables and
114 * complete initialization after this.
115 */
116 dccp_set_state(sk, DCCP_REQUESTING);
117 err = inet_hash_connect(&dccp_death_row, sk);
118 if (err != 0)
119 goto failure;
120
121 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
122 inet->inet_sport, inet->inet_dport, sk);
123 if (IS_ERR(rt)) {
124 err = PTR_ERR(rt);
125 rt = NULL;
126 goto failure;
127 }
128 /* OK, now commit destination to socket. */
129 sk_setup_caps(sk, &rt->dst);
130
131 dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
132 inet->inet_daddr,
133 inet->inet_sport,
134 inet->inet_dport);
135 atomic_set(&inet->inet_id, get_random_u16());
136
137 err = dccp_connect(sk);
138 rt = NULL;
139 if (err != 0)
140 goto failure;
141 out:
142 return err;
143 failure:
144 /*
145 * This unhashes the socket and releases the local port, if necessary.
146 */
147 dccp_set_state(sk, DCCP_CLOSED);
148 inet_bhash2_reset_saddr(sk);
149 ip_rt_put(rt);
150 sk->sk_route_caps = 0;
151 inet->inet_dport = 0;
152 goto out;
153 }
154 EXPORT_SYMBOL_GPL(dccp_v4_connect);
155
156 /*
157 * This routine does path mtu discovery as defined in RFC1191.
158 */
dccp_do_pmtu_discovery(struct sock * sk,const struct iphdr * iph,u32 mtu)159 static inline void dccp_do_pmtu_discovery(struct sock *sk,
160 const struct iphdr *iph,
161 u32 mtu)
162 {
163 struct dst_entry *dst;
164 const struct inet_sock *inet = inet_sk(sk);
165 const struct dccp_sock *dp = dccp_sk(sk);
166
167 /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
168 * send out by Linux are always < 576bytes so they should go through
169 * unfragmented).
170 */
171 if (sk->sk_state == DCCP_LISTEN)
172 return;
173
174 dst = inet_csk_update_pmtu(sk, mtu);
175 if (!dst)
176 return;
177
178 /* Something is about to be wrong... Remember soft error
179 * for the case, if this connection will not able to recover.
180 */
181 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
182 WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
183
184 mtu = dst_mtu(dst);
185
186 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
187 ip_sk_accept_pmtu(sk) &&
188 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
189 dccp_sync_mss(sk, mtu);
190
191 /*
192 * From RFC 4340, sec. 14.1:
193 *
194 * DCCP-Sync packets are the best choice for upward
195 * probing, since DCCP-Sync probes do not risk application
196 * data loss.
197 */
198 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
199 } /* else let the usual retransmit timer handle it */
200 }
201
dccp_do_redirect(struct sk_buff * skb,struct sock * sk)202 static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
203 {
204 struct dst_entry *dst = __sk_dst_check(sk, 0);
205
206 if (dst)
207 dst->ops->redirect(dst, sk, skb);
208 }
209
dccp_req_err(struct sock * sk,u64 seq)210 void dccp_req_err(struct sock *sk, u64 seq)
211 {
212 struct request_sock *req = inet_reqsk(sk);
213 struct net *net = sock_net(sk);
214
215 /*
216 * ICMPs are not backlogged, hence we cannot get an established
217 * socket here.
218 */
219 if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
220 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
221 } else {
222 /*
223 * Still in RESPOND, just remove it silently.
224 * There is no good way to pass the error to the newly
225 * created socket, and POSIX does not want network
226 * errors returned from accept().
227 */
228 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
229 }
230 reqsk_put(req);
231 }
232 EXPORT_SYMBOL(dccp_req_err);
233
234 /*
235 * This routine is called by the ICMP module when it gets some sort of error
236 * condition. If err < 0 then the socket should be closed and the error
237 * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
238 * After adjustment header points to the first 8 bytes of the tcp header. We
239 * need to find the appropriate port.
240 *
241 * The locking strategy used here is very "optimistic". When someone else
242 * accesses the socket the ICMP is just dropped and for some paths there is no
243 * check at all. A more general error queue to queue errors for later handling
244 * is probably better.
245 */
dccp_v4_err(struct sk_buff * skb,u32 info)246 static int dccp_v4_err(struct sk_buff *skb, u32 info)
247 {
248 const struct iphdr *iph = (struct iphdr *)skb->data;
249 const u8 offset = iph->ihl << 2;
250 const struct dccp_hdr *dh;
251 struct dccp_sock *dp;
252 const int type = icmp_hdr(skb)->type;
253 const int code = icmp_hdr(skb)->code;
254 struct sock *sk;
255 __u64 seq;
256 int err;
257 struct net *net = dev_net(skb->dev);
258
259 if (!pskb_may_pull(skb, offset + sizeof(*dh)))
260 return -EINVAL;
261 dh = (struct dccp_hdr *)(skb->data + offset);
262 if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
263 return -EINVAL;
264 iph = (struct iphdr *)skb->data;
265 dh = (struct dccp_hdr *)(skb->data + offset);
266
267 sk = __inet_lookup_established(net, &dccp_hashinfo,
268 iph->daddr, dh->dccph_dport,
269 iph->saddr, ntohs(dh->dccph_sport),
270 inet_iif(skb), 0);
271 if (!sk) {
272 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
273 return -ENOENT;
274 }
275
276 if (sk->sk_state == DCCP_TIME_WAIT) {
277 inet_twsk_put(inet_twsk(sk));
278 return 0;
279 }
280 seq = dccp_hdr_seq(dh);
281 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
282 dccp_req_err(sk, seq);
283 return 0;
284 }
285
286 bh_lock_sock(sk);
287 /* If too many ICMPs get dropped on busy
288 * servers this needs to be solved differently.
289 */
290 if (sock_owned_by_user(sk))
291 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
292
293 if (sk->sk_state == DCCP_CLOSED)
294 goto out;
295
296 dp = dccp_sk(sk);
297 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
298 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
299 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
300 goto out;
301 }
302
303 switch (type) {
304 case ICMP_REDIRECT:
305 if (!sock_owned_by_user(sk))
306 dccp_do_redirect(skb, sk);
307 goto out;
308 case ICMP_SOURCE_QUENCH:
309 /* Just silently ignore these. */
310 goto out;
311 case ICMP_PARAMETERPROB:
312 err = EPROTO;
313 break;
314 case ICMP_DEST_UNREACH:
315 if (code > NR_ICMP_UNREACH)
316 goto out;
317
318 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
319 if (!sock_owned_by_user(sk))
320 dccp_do_pmtu_discovery(sk, iph, info);
321 goto out;
322 }
323
324 err = icmp_err_convert[code].errno;
325 break;
326 case ICMP_TIME_EXCEEDED:
327 err = EHOSTUNREACH;
328 break;
329 default:
330 goto out;
331 }
332
333 switch (sk->sk_state) {
334 case DCCP_REQUESTING:
335 case DCCP_RESPOND:
336 if (!sock_owned_by_user(sk)) {
337 __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
338 sk->sk_err = err;
339
340 sk_error_report(sk);
341
342 dccp_done(sk);
343 } else {
344 WRITE_ONCE(sk->sk_err_soft, err);
345 }
346 goto out;
347 }
348
349 /* If we've already connected we will keep trying
350 * until we time out, or the user gives up.
351 *
352 * rfc1122 4.2.3.9 allows to consider as hard errors
353 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
354 * but it is obsoleted by pmtu discovery).
355 *
356 * Note, that in modern internet, where routing is unreliable
357 * and in each dark corner broken firewalls sit, sending random
358 * errors ordered by their masters even this two messages finally lose
359 * their original sense (even Linux sends invalid PORT_UNREACHs)
360 *
361 * Now we are in compliance with RFCs.
362 * --ANK (980905)
363 */
364
365 if (!sock_owned_by_user(sk) && inet_test_bit(RECVERR, sk)) {
366 sk->sk_err = err;
367 sk_error_report(sk);
368 } else { /* Only an error on timeout */
369 WRITE_ONCE(sk->sk_err_soft, err);
370 }
371 out:
372 bh_unlock_sock(sk);
373 sock_put(sk);
374 return 0;
375 }
376
dccp_v4_csum_finish(struct sk_buff * skb,__be32 src,__be32 dst)377 static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
378 __be32 src, __be32 dst)
379 {
380 return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
381 }
382
dccp_v4_send_check(struct sock * sk,struct sk_buff * skb)383 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
384 {
385 const struct inet_sock *inet = inet_sk(sk);
386 struct dccp_hdr *dh = dccp_hdr(skb);
387
388 dccp_csum_outgoing(skb);
389 dh->dccph_checksum = dccp_v4_csum_finish(skb,
390 inet->inet_saddr,
391 inet->inet_daddr);
392 }
393 EXPORT_SYMBOL_GPL(dccp_v4_send_check);
394
dccp_v4_init_sequence(const struct sk_buff * skb)395 static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
396 {
397 return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
398 ip_hdr(skb)->saddr,
399 dccp_hdr(skb)->dccph_dport,
400 dccp_hdr(skb)->dccph_sport);
401 }
402
403 /*
404 * The three way handshake has completed - we got a valid ACK or DATAACK -
405 * now create the new socket.
406 *
407 * This is the equivalent of TCP's tcp_v4_syn_recv_sock
408 */
dccp_v4_request_recv_sock(const struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst,struct request_sock * req_unhash,bool * own_req)409 struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
410 struct sk_buff *skb,
411 struct request_sock *req,
412 struct dst_entry *dst,
413 struct request_sock *req_unhash,
414 bool *own_req)
415 {
416 struct inet_request_sock *ireq;
417 struct inet_sock *newinet;
418 struct sock *newsk;
419
420 if (sk_acceptq_is_full(sk))
421 goto exit_overflow;
422
423 newsk = dccp_create_openreq_child(sk, req, skb);
424 if (newsk == NULL)
425 goto exit_nonewsk;
426
427 newinet = inet_sk(newsk);
428 ireq = inet_rsk(req);
429 sk_daddr_set(newsk, ireq->ir_rmt_addr);
430 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
431 newinet->inet_saddr = ireq->ir_loc_addr;
432 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
433 newinet->mc_index = inet_iif(skb);
434 newinet->mc_ttl = ip_hdr(skb)->ttl;
435 atomic_set(&newinet->inet_id, get_random_u16());
436
437 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
438 goto put_and_exit;
439
440 sk_setup_caps(newsk, dst);
441
442 dccp_sync_mss(newsk, dst_mtu(dst));
443
444 if (__inet_inherit_port(sk, newsk) < 0)
445 goto put_and_exit;
446 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
447 if (*own_req)
448 ireq->ireq_opt = NULL;
449 else
450 newinet->inet_opt = NULL;
451 return newsk;
452
453 exit_overflow:
454 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
455 exit_nonewsk:
456 dst_release(dst);
457 exit:
458 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
459 return NULL;
460 put_and_exit:
461 newinet->inet_opt = NULL;
462 inet_csk_prepare_forced_close(newsk);
463 dccp_done(newsk);
464 goto exit;
465 }
466 EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
467
dccp_v4_route_skb(struct net * net,struct sock * sk,struct sk_buff * skb)468 static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
469 struct sk_buff *skb)
470 {
471 struct rtable *rt;
472 const struct iphdr *iph = ip_hdr(skb);
473 struct flowi4 fl4 = {
474 .flowi4_oif = inet_iif(skb),
475 .daddr = iph->saddr,
476 .saddr = iph->daddr,
477 .flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk))),
478 .flowi4_scope = ip_sock_rt_scope(sk),
479 .flowi4_proto = sk->sk_protocol,
480 .fl4_sport = dccp_hdr(skb)->dccph_dport,
481 .fl4_dport = dccp_hdr(skb)->dccph_sport,
482 };
483
484 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
485 rt = ip_route_output_flow(net, &fl4, sk);
486 if (IS_ERR(rt)) {
487 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
488 return NULL;
489 }
490
491 return &rt->dst;
492 }
493
dccp_v4_send_response(const struct sock * sk,struct request_sock * req)494 static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req)
495 {
496 int err = -1;
497 struct sk_buff *skb;
498 struct dst_entry *dst;
499 struct flowi4 fl4;
500
501 dst = inet_csk_route_req(sk, &fl4, req);
502 if (dst == NULL)
503 goto out;
504
505 skb = dccp_make_response(sk, dst, req);
506 if (skb != NULL) {
507 const struct inet_request_sock *ireq = inet_rsk(req);
508 struct dccp_hdr *dh = dccp_hdr(skb);
509
510 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
511 ireq->ir_rmt_addr);
512 rcu_read_lock();
513 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
514 ireq->ir_rmt_addr,
515 rcu_dereference(ireq->ireq_opt),
516 READ_ONCE(inet_sk(sk)->tos));
517 rcu_read_unlock();
518 err = net_xmit_eval(err);
519 }
520
521 out:
522 dst_release(dst);
523 return err;
524 }
525
dccp_v4_ctl_send_reset(const struct sock * sk,struct sk_buff * rxskb,enum sk_rst_reason reason)526 static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb,
527 enum sk_rst_reason reason)
528 {
529 int err;
530 const struct iphdr *rxiph;
531 struct sk_buff *skb;
532 struct dst_entry *dst;
533 struct net *net = dev_net(skb_dst(rxskb)->dev);
534 struct dccp_v4_pernet *pn;
535 struct sock *ctl_sk;
536
537 /* Never send a reset in response to a reset. */
538 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
539 return;
540
541 if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
542 return;
543
544 pn = net_generic(net, dccp_v4_pernet_id);
545 ctl_sk = pn->v4_ctl_sk;
546 dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
547 if (dst == NULL)
548 return;
549
550 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
551 if (skb == NULL)
552 goto out;
553
554 rxiph = ip_hdr(rxskb);
555 dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
556 rxiph->daddr);
557 skb_dst_set(skb, dst_clone(dst));
558
559 local_bh_disable();
560 bh_lock_sock(ctl_sk);
561 err = ip_build_and_send_pkt(skb, ctl_sk,
562 rxiph->daddr, rxiph->saddr, NULL,
563 inet_sk(ctl_sk)->tos);
564 bh_unlock_sock(ctl_sk);
565
566 if (net_xmit_eval(err) == 0) {
567 __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
568 __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
569 }
570 local_bh_enable();
571 out:
572 dst_release(dst);
573 }
574
dccp_v4_reqsk_destructor(struct request_sock * req)575 static void dccp_v4_reqsk_destructor(struct request_sock *req)
576 {
577 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
578 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
579 }
580
dccp_syn_ack_timeout(const struct request_sock * req)581 void dccp_syn_ack_timeout(const struct request_sock *req)
582 {
583 }
584 EXPORT_SYMBOL(dccp_syn_ack_timeout);
585
586 static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
587 .family = PF_INET,
588 .obj_size = sizeof(struct dccp_request_sock),
589 .rtx_syn_ack = dccp_v4_send_response,
590 .send_ack = dccp_reqsk_send_ack,
591 .destructor = dccp_v4_reqsk_destructor,
592 .send_reset = dccp_v4_ctl_send_reset,
593 .syn_ack_timeout = dccp_syn_ack_timeout,
594 };
595
dccp_v4_conn_request(struct sock * sk,struct sk_buff * skb)596 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
597 {
598 struct inet_request_sock *ireq;
599 struct request_sock *req;
600 struct dccp_request_sock *dreq;
601 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
602 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
603
604 /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
605 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
606 return 0; /* discard, don't send a reset here */
607
608 if (dccp_bad_service_code(sk, service)) {
609 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
610 goto drop;
611 }
612 /*
613 * TW buckets are converted to open requests without
614 * limitations, they conserve resources and peer is
615 * evidently real one.
616 */
617 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
618 if (inet_csk_reqsk_queue_is_full(sk))
619 goto drop;
620
621 if (sk_acceptq_is_full(sk))
622 goto drop;
623
624 req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
625 if (req == NULL)
626 goto drop;
627
628 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
629 goto drop_and_free;
630
631 dreq = dccp_rsk(req);
632 if (dccp_parse_options(sk, dreq, skb))
633 goto drop_and_free;
634
635 ireq = inet_rsk(req);
636 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
637 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
638 ireq->ir_mark = inet_request_mark(sk, skb);
639 ireq->ireq_family = AF_INET;
640 ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
641
642 if (security_inet_conn_request(sk, skb, req))
643 goto drop_and_free;
644
645 /*
646 * Step 3: Process LISTEN state
647 *
648 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
649 *
650 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
651 */
652 dreq->dreq_isr = dcb->dccpd_seq;
653 dreq->dreq_gsr = dreq->dreq_isr;
654 dreq->dreq_iss = dccp_v4_init_sequence(skb);
655 dreq->dreq_gss = dreq->dreq_iss;
656 dreq->dreq_service = service;
657
658 if (dccp_v4_send_response(sk, req))
659 goto drop_and_free;
660
661 if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
662 reqsk_free(req);
663 else
664 reqsk_put(req);
665
666 return 0;
667
668 drop_and_free:
669 reqsk_free(req);
670 drop:
671 __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
672 return -1;
673 }
674 EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
675
dccp_v4_do_rcv(struct sock * sk,struct sk_buff * skb)676 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
677 {
678 struct dccp_hdr *dh = dccp_hdr(skb);
679
680 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
681 if (dccp_rcv_established(sk, skb, dh, skb->len))
682 goto reset;
683 return 0;
684 }
685
686 /*
687 * Step 3: Process LISTEN state
688 * If P.type == Request or P contains a valid Init Cookie option,
689 * (* Must scan the packet's options to check for Init
690 * Cookies. Only Init Cookies are processed here,
691 * however; other options are processed in Step 8. This
692 * scan need only be performed if the endpoint uses Init
693 * Cookies *)
694 * (* Generate a new socket and switch to that socket *)
695 * Set S := new socket for this port pair
696 * S.state = RESPOND
697 * Choose S.ISS (initial seqno) or set from Init Cookies
698 * Initialize S.GAR := S.ISS
699 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
700 * Continue with S.state == RESPOND
701 * (* A Response packet will be generated in Step 11 *)
702 * Otherwise,
703 * Generate Reset(No Connection) unless P.type == Reset
704 * Drop packet and return
705 *
706 * NOTE: the check for the packet types is done in
707 * dccp_rcv_state_process
708 */
709
710 if (dccp_rcv_state_process(sk, skb, dh, skb->len))
711 goto reset;
712 return 0;
713
714 reset:
715 dccp_v4_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
716 kfree_skb(skb);
717 return 0;
718 }
719 EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
720
721 /**
722 * dccp_invalid_packet - check for malformed packets
723 * @skb: Packet to validate
724 *
725 * Implements RFC 4340, 8.5: Step 1: Check header basics
726 * Packets that fail these checks are ignored and do not receive Resets.
727 */
dccp_invalid_packet(struct sk_buff * skb)728 int dccp_invalid_packet(struct sk_buff *skb)
729 {
730 const struct dccp_hdr *dh;
731 unsigned int cscov;
732 u8 dccph_doff;
733
734 if (skb->pkt_type != PACKET_HOST)
735 return 1;
736
737 /* If the packet is shorter than 12 bytes, drop packet and return */
738 if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
739 DCCP_WARN("pskb_may_pull failed\n");
740 return 1;
741 }
742
743 dh = dccp_hdr(skb);
744
745 /* If P.type is not understood, drop packet and return */
746 if (dh->dccph_type >= DCCP_PKT_INVALID) {
747 DCCP_WARN("invalid packet type\n");
748 return 1;
749 }
750
751 /*
752 * If P.Data Offset is too small for packet type, drop packet and return
753 */
754 dccph_doff = dh->dccph_doff;
755 if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
756 DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
757 return 1;
758 }
759 /*
760 * If P.Data Offset is too large for packet, drop packet and return
761 */
762 if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
763 DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
764 return 1;
765 }
766 dh = dccp_hdr(skb);
767 /*
768 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
769 * has short sequence numbers), drop packet and return
770 */
771 if ((dh->dccph_type < DCCP_PKT_DATA ||
772 dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) {
773 DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
774 dccp_packet_name(dh->dccph_type));
775 return 1;
776 }
777
778 /*
779 * If P.CsCov is too large for the packet size, drop packet and return.
780 * This must come _before_ checksumming (not as RFC 4340 suggests).
781 */
782 cscov = dccp_csum_coverage(skb);
783 if (cscov > skb->len) {
784 DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
785 dh->dccph_cscov, skb->len);
786 return 1;
787 }
788
789 /* If header checksum is incorrect, drop packet and return.
790 * (This step is completed in the AF-dependent functions.) */
791 skb->csum = skb_checksum(skb, 0, cscov, 0);
792
793 return 0;
794 }
795 EXPORT_SYMBOL_GPL(dccp_invalid_packet);
796
797 /* this is called when real data arrives */
dccp_v4_rcv(struct sk_buff * skb)798 static int dccp_v4_rcv(struct sk_buff *skb)
799 {
800 const struct dccp_hdr *dh;
801 const struct iphdr *iph;
802 bool refcounted;
803 struct sock *sk;
804 int min_cov;
805
806 /* Step 1: Check header basics */
807
808 if (dccp_invalid_packet(skb))
809 goto discard_it;
810
811 iph = ip_hdr(skb);
812 /* Step 1: If header checksum is incorrect, drop packet and return */
813 if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
814 DCCP_WARN("dropped packet with invalid checksum\n");
815 goto discard_it;
816 }
817
818 dh = dccp_hdr(skb);
819
820 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
821 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
822
823 dccp_pr_debug("%8.8s src=%pI4@%-5d dst=%pI4@%-5d seq=%llu",
824 dccp_packet_name(dh->dccph_type),
825 &iph->saddr, ntohs(dh->dccph_sport),
826 &iph->daddr, ntohs(dh->dccph_dport),
827 (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
828
829 if (dccp_packet_without_ack(skb)) {
830 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
831 dccp_pr_debug_cat("\n");
832 } else {
833 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
834 dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
835 DCCP_SKB_CB(skb)->dccpd_ack_seq);
836 }
837
838 lookup:
839 sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
840 dh->dccph_sport, dh->dccph_dport, 0, &refcounted);
841 if (!sk) {
842 dccp_pr_debug("failed to look up flow ID in table and "
843 "get corresponding socket\n");
844 goto no_dccp_socket;
845 }
846
847 /*
848 * Step 2:
849 * ... or S.state == TIMEWAIT,
850 * Generate Reset(No Connection) unless P.type == Reset
851 * Drop packet and return
852 */
853 if (sk->sk_state == DCCP_TIME_WAIT) {
854 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
855 inet_twsk_put(inet_twsk(sk));
856 goto no_dccp_socket;
857 }
858
859 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
860 struct request_sock *req = inet_reqsk(sk);
861 struct sock *nsk;
862
863 sk = req->rsk_listener;
864 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
865 inet_csk_reqsk_queue_drop_and_put(sk, req);
866 goto lookup;
867 }
868 sock_hold(sk);
869 refcounted = true;
870 nsk = dccp_check_req(sk, skb, req);
871 if (!nsk) {
872 reqsk_put(req);
873 goto discard_and_relse;
874 }
875 if (nsk == sk) {
876 reqsk_put(req);
877 } else if (dccp_child_process(sk, nsk, skb)) {
878 dccp_v4_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
879 goto discard_and_relse;
880 } else {
881 sock_put(sk);
882 return 0;
883 }
884 }
885 /*
886 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
887 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
888 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
889 */
890 min_cov = dccp_sk(sk)->dccps_pcrlen;
891 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
892 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
893 dh->dccph_cscov, min_cov);
894 /* FIXME: "Such packets SHOULD be reported using Data Dropped
895 * options (Section 11.7) with Drop Code 0, Protocol
896 * Constraints." */
897 goto discard_and_relse;
898 }
899
900 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
901 goto discard_and_relse;
902 nf_reset_ct(skb);
903
904 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
905
906 no_dccp_socket:
907 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
908 goto discard_it;
909 /*
910 * Step 2:
911 * If no socket ...
912 * Generate Reset(No Connection) unless P.type == Reset
913 * Drop packet and return
914 */
915 if (dh->dccph_type != DCCP_PKT_RESET) {
916 DCCP_SKB_CB(skb)->dccpd_reset_code =
917 DCCP_RESET_CODE_NO_CONNECTION;
918 dccp_v4_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
919 }
920
921 discard_it:
922 kfree_skb(skb);
923 return 0;
924
925 discard_and_relse:
926 if (refcounted)
927 sock_put(sk);
928 goto discard_it;
929 }
930
931 static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
932 .queue_xmit = ip_queue_xmit,
933 .send_check = dccp_v4_send_check,
934 .rebuild_header = inet_sk_rebuild_header,
935 .conn_request = dccp_v4_conn_request,
936 .syn_recv_sock = dccp_v4_request_recv_sock,
937 .net_header_len = sizeof(struct iphdr),
938 .setsockopt = ip_setsockopt,
939 .getsockopt = ip_getsockopt,
940 .addr2sockaddr = inet_csk_addr2sockaddr,
941 .sockaddr_len = sizeof(struct sockaddr_in),
942 };
943
dccp_v4_init_sock(struct sock * sk)944 static int dccp_v4_init_sock(struct sock *sk)
945 {
946 static __u8 dccp_v4_ctl_sock_initialized;
947 int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
948
949 if (err == 0) {
950 if (unlikely(!dccp_v4_ctl_sock_initialized))
951 dccp_v4_ctl_sock_initialized = 1;
952 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
953 }
954
955 return err;
956 }
957
958 static struct timewait_sock_ops dccp_timewait_sock_ops = {
959 .twsk_obj_size = sizeof(struct inet_timewait_sock),
960 };
961
962 static struct proto dccp_v4_prot = {
963 .name = "DCCP",
964 .owner = THIS_MODULE,
965 .close = dccp_close,
966 .connect = dccp_v4_connect,
967 .disconnect = dccp_disconnect,
968 .ioctl = dccp_ioctl,
969 .init = dccp_v4_init_sock,
970 .setsockopt = dccp_setsockopt,
971 .getsockopt = dccp_getsockopt,
972 .sendmsg = dccp_sendmsg,
973 .recvmsg = dccp_recvmsg,
974 .backlog_rcv = dccp_v4_do_rcv,
975 .hash = inet_hash,
976 .unhash = inet_unhash,
977 .accept = inet_csk_accept,
978 .get_port = inet_csk_get_port,
979 .shutdown = dccp_shutdown,
980 .destroy = dccp_destroy_sock,
981 .orphan_count = &dccp_orphan_count,
982 .max_header = MAX_DCCP_HEADER,
983 .obj_size = sizeof(struct dccp_sock),
984 .slab_flags = SLAB_TYPESAFE_BY_RCU,
985 .rsk_prot = &dccp_request_sock_ops,
986 .twsk_prot = &dccp_timewait_sock_ops,
987 .h.hashinfo = &dccp_hashinfo,
988 };
989
990 static const struct net_protocol dccp_v4_protocol = {
991 .handler = dccp_v4_rcv,
992 .err_handler = dccp_v4_err,
993 .no_policy = 1,
994 .icmp_strict_tag_validation = 1,
995 };
996
997 static const struct proto_ops inet_dccp_ops = {
998 .family = PF_INET,
999 .owner = THIS_MODULE,
1000 .release = inet_release,
1001 .bind = inet_bind,
1002 .connect = inet_stream_connect,
1003 .socketpair = sock_no_socketpair,
1004 .accept = inet_accept,
1005 .getname = inet_getname,
1006 /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
1007 .poll = dccp_poll,
1008 .ioctl = inet_ioctl,
1009 .gettstamp = sock_gettstamp,
1010 /* FIXME: work on inet_listen to rename it to sock_common_listen */
1011 .listen = inet_dccp_listen,
1012 .shutdown = inet_shutdown,
1013 .setsockopt = sock_common_setsockopt,
1014 .getsockopt = sock_common_getsockopt,
1015 .sendmsg = inet_sendmsg,
1016 .recvmsg = sock_common_recvmsg,
1017 .mmap = sock_no_mmap,
1018 };
1019
1020 static struct inet_protosw dccp_v4_protosw = {
1021 .type = SOCK_DCCP,
1022 .protocol = IPPROTO_DCCP,
1023 .prot = &dccp_v4_prot,
1024 .ops = &inet_dccp_ops,
1025 .flags = INET_PROTOSW_ICSK,
1026 };
1027
dccp_v4_init_net(struct net * net)1028 static int __net_init dccp_v4_init_net(struct net *net)
1029 {
1030 struct dccp_v4_pernet *pn = net_generic(net, dccp_v4_pernet_id);
1031
1032 if (dccp_hashinfo.bhash == NULL)
1033 return -ESOCKTNOSUPPORT;
1034
1035 return inet_ctl_sock_create(&pn->v4_ctl_sk, PF_INET,
1036 SOCK_DCCP, IPPROTO_DCCP, net);
1037 }
1038
dccp_v4_exit_net(struct net * net)1039 static void __net_exit dccp_v4_exit_net(struct net *net)
1040 {
1041 struct dccp_v4_pernet *pn = net_generic(net, dccp_v4_pernet_id);
1042
1043 inet_ctl_sock_destroy(pn->v4_ctl_sk);
1044 }
1045
dccp_v4_exit_batch(struct list_head * net_exit_list)1046 static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
1047 {
1048 inet_twsk_purge(&dccp_hashinfo);
1049 }
1050
1051 static struct pernet_operations dccp_v4_ops = {
1052 .init = dccp_v4_init_net,
1053 .exit = dccp_v4_exit_net,
1054 .exit_batch = dccp_v4_exit_batch,
1055 .id = &dccp_v4_pernet_id,
1056 .size = sizeof(struct dccp_v4_pernet),
1057 };
1058
dccp_v4_init(void)1059 static int __init dccp_v4_init(void)
1060 {
1061 int err = proto_register(&dccp_v4_prot, 1);
1062
1063 if (err)
1064 goto out;
1065
1066 inet_register_protosw(&dccp_v4_protosw);
1067
1068 err = register_pernet_subsys(&dccp_v4_ops);
1069 if (err)
1070 goto out_destroy_ctl_sock;
1071
1072 err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1073 if (err)
1074 goto out_proto_unregister;
1075
1076 out:
1077 return err;
1078 out_proto_unregister:
1079 unregister_pernet_subsys(&dccp_v4_ops);
1080 out_destroy_ctl_sock:
1081 inet_unregister_protosw(&dccp_v4_protosw);
1082 proto_unregister(&dccp_v4_prot);
1083 goto out;
1084 }
1085
dccp_v4_exit(void)1086 static void __exit dccp_v4_exit(void)
1087 {
1088 inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
1089 unregister_pernet_subsys(&dccp_v4_ops);
1090 inet_unregister_protosw(&dccp_v4_protosw);
1091 proto_unregister(&dccp_v4_prot);
1092 }
1093
1094 module_init(dccp_v4_init);
1095 module_exit(dccp_v4_exit);
1096
1097 /*
1098 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1099 * values directly, Also cover the case where the protocol is not specified,
1100 * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
1101 */
1102 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
1103 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
1104 MODULE_LICENSE("GPL");
1105 MODULE_AUTHOR("Arnaldo Carvalho de Melo <[email protected]>");
1106 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
1107