1 /* Local endpoint object management
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/udp.h>
20 #include <linux/hashtable.h>
22 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
25 static void rxrpc_local_processor(struct work_struct *);
26 static void rxrpc_local_rcu(struct rcu_head *);
29 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
30 * same or greater than.
32 * We explicitly don't compare the RxRPC service ID as we want to reject
33 * conflicting uses by differing services. Further, we don't want to share
34 * addresses with different options (IPv6), so we don't compare those bits
37 static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
38 const struct sockaddr_rxrpc *srx)
42 diff = ((local->srx.transport_type - srx->transport_type) ?:
43 (local->srx.transport_len - srx->transport_len) ?:
44 (local->srx.transport.family - srx->transport.family));
48 switch (srx->transport.family) {
50 /* If the choice of UDP port is left up to the transport, then
51 * the endpoint record doesn't match.
53 return ((u16 __force)local->srx.transport.sin.sin_port -
54 (u16 __force)srx->transport.sin.sin_port) ?:
55 memcmp(&local->srx.transport.sin.sin_addr,
56 &srx->transport.sin.sin_addr,
57 sizeof(struct in_addr));
58 #ifdef CONFIG_AF_RXRPC_IPV6
60 /* If the choice of UDP6 port is left up to the transport, then
61 * the endpoint record doesn't match.
63 return ((u16 __force)local->srx.transport.sin6.sin6_port -
64 (u16 __force)srx->transport.sin6.sin6_port) ?:
65 memcmp(&local->srx.transport.sin6.sin6_addr,
66 &srx->transport.sin6.sin6_addr,
67 sizeof(struct in6_addr));
75 * Allocate a new local endpoint.
77 static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
78 const struct sockaddr_rxrpc *srx)
80 struct rxrpc_local *local;
82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
84 atomic_set(&local->usage, 1);
86 INIT_LIST_HEAD(&local->link);
87 INIT_WORK(&local->processor, rxrpc_local_processor);
88 init_rwsem(&local->defrag_sem);
89 skb_queue_head_init(&local->reject_queue);
90 skb_queue_head_init(&local->event_queue);
91 local->client_conns = RB_ROOT;
92 spin_lock_init(&local->client_conns_lock);
93 spin_lock_init(&local->lock);
94 rwlock_init(&local->services_lock);
95 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
96 memcpy(&local->srx, srx, sizeof(*srx));
97 local->srx.srx_service = 0;
98 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
101 _leave(" = %p", local);
106 * create the local socket
107 * - must be called with rxrpc_local_mutex locked
109 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
115 local, local->srx.transport_type, local->srx.transport.family);
117 /* create a socket to represent the local endpoint */
118 ret = sock_create_kern(net, local->srx.transport.family,
119 local->srx.transport_type, 0, &local->socket);
121 _leave(" = %d [socket]", ret);
125 /* if a local address was supplied then bind it */
126 if (local->srx.transport_len > sizeof(sa_family_t)) {
128 ret = kernel_bind(local->socket,
129 (struct sockaddr *)&local->srx.transport,
130 local->srx.transport_len);
132 _debug("bind failed %d", ret);
137 /* we want to receive ICMP errors */
139 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
140 (char *) &opt, sizeof(opt));
142 _debug("setsockopt failed");
146 /* we want to set the don't fragment bit */
147 opt = IP_PMTUDISC_DO;
148 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
149 (char *) &opt, sizeof(opt));
151 _debug("setsockopt failed");
155 /* set the socket up */
156 sock = local->socket->sk;
157 sock->sk_user_data = local;
158 sock->sk_data_ready = rxrpc_data_ready;
159 sock->sk_error_report = rxrpc_error_report;
164 kernel_sock_shutdown(local->socket, SHUT_RDWR);
165 local->socket->sk->sk_user_data = NULL;
166 sock_release(local->socket);
167 local->socket = NULL;
169 _leave(" = %d", ret);
174 * Look up or create a new local endpoint using the specified local address.
176 struct rxrpc_local *rxrpc_lookup_local(struct net *net,
177 const struct sockaddr_rxrpc *srx)
179 struct rxrpc_local *local;
180 struct rxrpc_net *rxnet = rxrpc_net(net);
181 struct list_head *cursor;
186 _enter("{%d,%d,%pISp}",
187 srx->transport_type, srx->transport.family, &srx->transport);
189 mutex_lock(&rxnet->local_mutex);
191 for (cursor = rxnet->local_endpoints.next;
192 cursor != &rxnet->local_endpoints;
193 cursor = cursor->next) {
194 local = list_entry(cursor, struct rxrpc_local, link);
196 diff = rxrpc_local_cmp_key(local, srx);
202 /* Services aren't allowed to share transport sockets, so
203 * reject that here. It is possible that the object is dying -
204 * but it may also still have the local transport address that
207 if (srx->srx_service) {
212 /* Found a match. We replace a dying object. Attempting to
213 * bind the transport socket may still fail if we're attempting
214 * to use a local address that the dying object is still using.
216 if (!rxrpc_get_local_maybe(local)) {
217 cursor = cursor->next;
218 list_del_init(&local->link);
226 local = rxrpc_alloc_local(rxnet, srx);
230 ret = rxrpc_open_socket(local, net);
234 list_add_tail(&local->link, cursor);
238 mutex_unlock(&rxnet->local_mutex);
240 _net("LOCAL %s %d {%pISp}",
241 age, local->debug_id, &local->srx.transport);
243 _leave(" = %p", local);
249 mutex_unlock(&rxnet->local_mutex);
251 _leave(" = %d", ret);
255 mutex_unlock(&rxnet->local_mutex);
256 _leave(" = -EADDRINUSE");
257 return ERR_PTR(-EADDRINUSE);
261 * Get a ref on a local endpoint.
263 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
265 const void *here = __builtin_return_address(0);
268 n = atomic_inc_return(&local->usage);
269 trace_rxrpc_local(local, rxrpc_local_got, n, here);
274 * Get a ref on a local endpoint unless its usage has already reached 0.
276 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
278 const void *here = __builtin_return_address(0);
281 int n = __atomic_add_unless(&local->usage, 1, 0);
283 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
291 * Queue a local endpoint.
293 void rxrpc_queue_local(struct rxrpc_local *local)
295 const void *here = __builtin_return_address(0);
297 if (rxrpc_queue_work(&local->processor))
298 trace_rxrpc_local(local, rxrpc_local_queued,
299 atomic_read(&local->usage), here);
303 * A local endpoint reached its end of life.
305 static void __rxrpc_put_local(struct rxrpc_local *local)
307 _enter("%d", local->debug_id);
308 rxrpc_queue_work(&local->processor);
312 * Drop a ref on a local endpoint.
314 void rxrpc_put_local(struct rxrpc_local *local)
316 const void *here = __builtin_return_address(0);
320 n = atomic_dec_return(&local->usage);
321 trace_rxrpc_local(local, rxrpc_local_put, n, here);
324 __rxrpc_put_local(local);
329 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
332 * Closing the socket cannot be done from bottom half context or RCU callback
333 * context because it might sleep.
335 static void rxrpc_local_destroyer(struct rxrpc_local *local)
337 struct socket *socket = local->socket;
338 struct rxrpc_net *rxnet = local->rxnet;
340 _enter("%d", local->debug_id);
342 /* We can get a race between an incoming call packet queueing the
343 * processor again and the work processor starting the destruction
344 * process which will shut down the UDP socket.
347 _leave(" [already dead]");
352 mutex_lock(&rxnet->local_mutex);
353 list_del_init(&local->link);
354 mutex_unlock(&rxnet->local_mutex);
356 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
357 ASSERT(!local->service);
360 local->socket = NULL;
361 kernel_sock_shutdown(socket, SHUT_RDWR);
362 socket->sk->sk_user_data = NULL;
363 sock_release(socket);
366 /* At this point, there should be no more packets coming in to the
369 rxrpc_purge_queue(&local->reject_queue);
370 rxrpc_purge_queue(&local->event_queue);
372 _debug("rcu local %d", local->debug_id);
373 call_rcu(&local->rcu, rxrpc_local_rcu);
377 * Process events on an endpoint
379 static void rxrpc_local_processor(struct work_struct *work)
381 struct rxrpc_local *local =
382 container_of(work, struct rxrpc_local, processor);
385 trace_rxrpc_local(local, rxrpc_local_processing,
386 atomic_read(&local->usage), NULL);
390 if (atomic_read(&local->usage) == 0)
391 return rxrpc_local_destroyer(local);
393 if (!skb_queue_empty(&local->reject_queue)) {
394 rxrpc_reject_packets(local);
398 if (!skb_queue_empty(&local->event_queue)) {
399 rxrpc_process_local_events(local);
406 * Destroy a local endpoint after the RCU grace period expires.
408 static void rxrpc_local_rcu(struct rcu_head *rcu)
410 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
412 _enter("%d", local->debug_id);
414 ASSERT(!work_pending(&local->processor));
416 _net("DESTROY LOCAL %d", local->debug_id);
422 * Verify the local endpoint list is empty by this point.
424 void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
426 struct rxrpc_local *local;
430 flush_workqueue(rxrpc_workqueue);
432 if (!list_empty(&rxnet->local_endpoints)) {
433 mutex_lock(&rxnet->local_mutex);
434 list_for_each_entry(local, &rxnet->local_endpoints, link) {
435 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
436 local, atomic_read(&local->usage));
438 mutex_unlock(&rxnet->local_mutex);