OSDN Git Service

Merge branch 'net-hns3-add-some-fixes-for-net'
[uclinux-h8/linux.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT      100 /* ms */
20
21 /**
22  * struct tb_cm - Simple Thunderbolt connection manager
23  * @tunnel_list: List of active tunnels
24  * @dp_resources: List of available DP resources for DP tunneling
25  * @hotplug_active: tb_handle_hotplug will stop progressing plug
26  *                  events and exit if this is not set (it needs to
27  *                  acquire the lock one more time). Used to drain wq
28  *                  after cfg has been paused.
29  * @remove_work: Work used to remove any unplugged routers after
30  *               runtime resume
31  */
32 struct tb_cm {
33         struct list_head tunnel_list;
34         struct list_head dp_resources;
35         bool hotplug_active;
36         struct delayed_work remove_work;
37 };
38
39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
40 {
41         return ((void *)tcm - sizeof(struct tb));
42 }
43
44 struct tb_hotplug_event {
45         struct work_struct work;
46         struct tb *tb;
47         u64 route;
48         u8 port;
49         bool unplug;
50 };
51
52 static void tb_handle_hotplug(struct work_struct *work);
53
54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
55 {
56         struct tb_hotplug_event *ev;
57
58         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
59         if (!ev)
60                 return;
61
62         ev->tb = tb;
63         ev->route = route;
64         ev->port = port;
65         ev->unplug = unplug;
66         INIT_WORK(&ev->work, tb_handle_hotplug);
67         queue_work(tb->wq, &ev->work);
68 }
69
70 /* enumeration & hot plug handling */
71
72 static void tb_add_dp_resources(struct tb_switch *sw)
73 {
74         struct tb_cm *tcm = tb_priv(sw->tb);
75         struct tb_port *port;
76
77         tb_switch_for_each_port(sw, port) {
78                 if (!tb_port_is_dpin(port))
79                         continue;
80
81                 if (!tb_switch_query_dp_resource(sw, port))
82                         continue;
83
84                 list_add_tail(&port->list, &tcm->dp_resources);
85                 tb_port_dbg(port, "DP IN resource available\n");
86         }
87 }
88
89 static void tb_remove_dp_resources(struct tb_switch *sw)
90 {
91         struct tb_cm *tcm = tb_priv(sw->tb);
92         struct tb_port *port, *tmp;
93
94         /* Clear children resources first */
95         tb_switch_for_each_port(sw, port) {
96                 if (tb_port_has_remote(port))
97                         tb_remove_dp_resources(port->remote->sw);
98         }
99
100         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101                 if (port->sw == sw) {
102                         tb_port_dbg(port, "DP OUT resource unavailable\n");
103                         list_del_init(&port->list);
104                 }
105         }
106 }
107
108 static void tb_switch_discover_tunnels(struct tb_switch *sw,
109                                        struct list_head *list,
110                                        bool alloc_hopids)
111 {
112         struct tb *tb = sw->tb;
113         struct tb_port *port;
114
115         tb_switch_for_each_port(sw, port) {
116                 struct tb_tunnel *tunnel = NULL;
117
118                 switch (port->config.type) {
119                 case TB_TYPE_DP_HDMI_IN:
120                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
121                         break;
122
123                 case TB_TYPE_PCIE_DOWN:
124                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
125                         break;
126
127                 case TB_TYPE_USB3_DOWN:
128                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
129                         break;
130
131                 default:
132                         break;
133                 }
134
135                 if (tunnel)
136                         list_add_tail(&tunnel->list, list);
137         }
138
139         tb_switch_for_each_port(sw, port) {
140                 if (tb_port_has_remote(port)) {
141                         tb_switch_discover_tunnels(port->remote->sw, list,
142                                                    alloc_hopids);
143                 }
144         }
145 }
146
147 static void tb_discover_tunnels(struct tb *tb)
148 {
149         struct tb_cm *tcm = tb_priv(tb);
150         struct tb_tunnel *tunnel;
151
152         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
153
154         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
155                 if (tb_tunnel_is_pci(tunnel)) {
156                         struct tb_switch *parent = tunnel->dst_port->sw;
157
158                         while (parent != tunnel->src_port->sw) {
159                                 parent->boot = true;
160                                 parent = tb_switch_parent(parent);
161                         }
162                 } else if (tb_tunnel_is_dp(tunnel)) {
163                         /* Keep the domain from powering down */
164                         pm_runtime_get_sync(&tunnel->src_port->sw->dev);
165                         pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
166                 }
167         }
168 }
169
170 static int tb_port_configure_xdomain(struct tb_port *port)
171 {
172         /*
173          * XDomain paths currently only support single lane so we must
174          * disable the other lane according to USB4 spec.
175          */
176         tb_port_disable(port->dual_link_port);
177
178         if (tb_switch_is_usb4(port->sw))
179                 return usb4_port_configure_xdomain(port);
180         return tb_lc_configure_xdomain(port);
181 }
182
183 static void tb_port_unconfigure_xdomain(struct tb_port *port)
184 {
185         if (tb_switch_is_usb4(port->sw))
186                 usb4_port_unconfigure_xdomain(port);
187         else
188                 tb_lc_unconfigure_xdomain(port);
189
190         tb_port_enable(port->dual_link_port);
191 }
192
193 static void tb_scan_xdomain(struct tb_port *port)
194 {
195         struct tb_switch *sw = port->sw;
196         struct tb *tb = sw->tb;
197         struct tb_xdomain *xd;
198         u64 route;
199
200         if (!tb_is_xdomain_enabled())
201                 return;
202
203         route = tb_downstream_route(port);
204         xd = tb_xdomain_find_by_route(tb, route);
205         if (xd) {
206                 tb_xdomain_put(xd);
207                 return;
208         }
209
210         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
211                               NULL);
212         if (xd) {
213                 tb_port_at(route, sw)->xdomain = xd;
214                 tb_port_configure_xdomain(port);
215                 tb_xdomain_add(xd);
216         }
217 }
218
219 static int tb_enable_tmu(struct tb_switch *sw)
220 {
221         int ret;
222
223         /* If it is already enabled in correct mode, don't touch it */
224         if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
225                 return 0;
226
227         ret = tb_switch_tmu_disable(sw);
228         if (ret)
229                 return ret;
230
231         ret = tb_switch_tmu_post_time(sw);
232         if (ret)
233                 return ret;
234
235         return tb_switch_tmu_enable(sw);
236 }
237
238 /**
239  * tb_find_unused_port() - return the first inactive port on @sw
240  * @sw: Switch to find the port on
241  * @type: Port type to look for
242  */
243 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
244                                            enum tb_port_type type)
245 {
246         struct tb_port *port;
247
248         tb_switch_for_each_port(sw, port) {
249                 if (tb_is_upstream_port(port))
250                         continue;
251                 if (port->config.type != type)
252                         continue;
253                 if (!port->cap_adap)
254                         continue;
255                 if (tb_port_is_enabled(port))
256                         continue;
257                 return port;
258         }
259         return NULL;
260 }
261
262 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
263                                          const struct tb_port *port)
264 {
265         struct tb_port *down;
266
267         down = usb4_switch_map_usb3_down(sw, port);
268         if (down && !tb_usb3_port_is_enabled(down))
269                 return down;
270         return NULL;
271 }
272
273 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
274                                         struct tb_port *src_port,
275                                         struct tb_port *dst_port)
276 {
277         struct tb_cm *tcm = tb_priv(tb);
278         struct tb_tunnel *tunnel;
279
280         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
281                 if (tunnel->type == type &&
282                     ((src_port && src_port == tunnel->src_port) ||
283                      (dst_port && dst_port == tunnel->dst_port))) {
284                         return tunnel;
285                 }
286         }
287
288         return NULL;
289 }
290
291 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
292                                                    struct tb_port *src_port,
293                                                    struct tb_port *dst_port)
294 {
295         struct tb_port *port, *usb3_down;
296         struct tb_switch *sw;
297
298         /* Pick the router that is deepest in the topology */
299         if (dst_port->sw->config.depth > src_port->sw->config.depth)
300                 sw = dst_port->sw;
301         else
302                 sw = src_port->sw;
303
304         /* Can't be the host router */
305         if (sw == tb->root_switch)
306                 return NULL;
307
308         /* Find the downstream USB4 port that leads to this router */
309         port = tb_port_at(tb_route(sw), tb->root_switch);
310         /* Find the corresponding host router USB3 downstream port */
311         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
312         if (!usb3_down)
313                 return NULL;
314
315         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
316 }
317
318 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
319         struct tb_port *dst_port, int *available_up, int *available_down)
320 {
321         int usb3_consumed_up, usb3_consumed_down, ret;
322         struct tb_cm *tcm = tb_priv(tb);
323         struct tb_tunnel *tunnel;
324         struct tb_port *port;
325
326         tb_port_dbg(dst_port, "calculating available bandwidth\n");
327
328         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
329         if (tunnel) {
330                 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
331                                                    &usb3_consumed_down);
332                 if (ret)
333                         return ret;
334         } else {
335                 usb3_consumed_up = 0;
336                 usb3_consumed_down = 0;
337         }
338
339         *available_up = *available_down = 40000;
340
341         /* Find the minimum available bandwidth over all links */
342         tb_for_each_port_on_path(src_port, dst_port, port) {
343                 int link_speed, link_width, up_bw, down_bw;
344
345                 if (!tb_port_is_null(port))
346                         continue;
347
348                 if (tb_is_upstream_port(port)) {
349                         link_speed = port->sw->link_speed;
350                 } else {
351                         link_speed = tb_port_get_link_speed(port);
352                         if (link_speed < 0)
353                                 return link_speed;
354                 }
355
356                 link_width = port->bonded ? 2 : 1;
357
358                 up_bw = link_speed * link_width * 1000; /* Mb/s */
359                 /* Leave 10% guard band */
360                 up_bw -= up_bw / 10;
361                 down_bw = up_bw;
362
363                 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
364
365                 /*
366                  * Find all DP tunnels that cross the port and reduce
367                  * their consumed bandwidth from the available.
368                  */
369                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
370                         int dp_consumed_up, dp_consumed_down;
371
372                         if (!tb_tunnel_is_dp(tunnel))
373                                 continue;
374
375                         if (!tb_tunnel_port_on_path(tunnel, port))
376                                 continue;
377
378                         ret = tb_tunnel_consumed_bandwidth(tunnel,
379                                                            &dp_consumed_up,
380                                                            &dp_consumed_down);
381                         if (ret)
382                                 return ret;
383
384                         up_bw -= dp_consumed_up;
385                         down_bw -= dp_consumed_down;
386                 }
387
388                 /*
389                  * If USB3 is tunneled from the host router down to the
390                  * branch leading to port we need to take USB3 consumed
391                  * bandwidth into account regardless whether it actually
392                  * crosses the port.
393                  */
394                 up_bw -= usb3_consumed_up;
395                 down_bw -= usb3_consumed_down;
396
397                 if (up_bw < *available_up)
398                         *available_up = up_bw;
399                 if (down_bw < *available_down)
400                         *available_down = down_bw;
401         }
402
403         if (*available_up < 0)
404                 *available_up = 0;
405         if (*available_down < 0)
406                 *available_down = 0;
407
408         return 0;
409 }
410
411 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
412                                             struct tb_port *src_port,
413                                             struct tb_port *dst_port)
414 {
415         struct tb_tunnel *tunnel;
416
417         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
418         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
419 }
420
421 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
422                                       struct tb_port *dst_port)
423 {
424         int ret, available_up, available_down;
425         struct tb_tunnel *tunnel;
426
427         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
428         if (!tunnel)
429                 return;
430
431         tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
432
433         /*
434          * Calculate available bandwidth for the first hop USB3 tunnel.
435          * That determines the whole USB3 bandwidth for this branch.
436          */
437         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
438                                      &available_up, &available_down);
439         if (ret) {
440                 tb_warn(tb, "failed to calculate available bandwidth\n");
441                 return;
442         }
443
444         tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
445                available_up, available_down);
446
447         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
448 }
449
450 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
451 {
452         struct tb_switch *parent = tb_switch_parent(sw);
453         int ret, available_up, available_down;
454         struct tb_port *up, *down, *port;
455         struct tb_cm *tcm = tb_priv(tb);
456         struct tb_tunnel *tunnel;
457
458         if (!tb_acpi_may_tunnel_usb3()) {
459                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
460                 return 0;
461         }
462
463         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
464         if (!up)
465                 return 0;
466
467         if (!sw->link_usb4)
468                 return 0;
469
470         /*
471          * Look up available down port. Since we are chaining it should
472          * be found right above this switch.
473          */
474         port = tb_port_at(tb_route(sw), parent);
475         down = tb_find_usb3_down(parent, port);
476         if (!down)
477                 return 0;
478
479         if (tb_route(parent)) {
480                 struct tb_port *parent_up;
481                 /*
482                  * Check first that the parent switch has its upstream USB3
483                  * port enabled. Otherwise the chain is not complete and
484                  * there is no point setting up a new tunnel.
485                  */
486                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
487                 if (!parent_up || !tb_port_is_enabled(parent_up))
488                         return 0;
489
490                 /* Make all unused bandwidth available for the new tunnel */
491                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
492                 if (ret)
493                         return ret;
494         }
495
496         ret = tb_available_bandwidth(tb, down, up, &available_up,
497                                      &available_down);
498         if (ret)
499                 goto err_reclaim;
500
501         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
502                     available_up, available_down);
503
504         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
505                                       available_down);
506         if (!tunnel) {
507                 ret = -ENOMEM;
508                 goto err_reclaim;
509         }
510
511         if (tb_tunnel_activate(tunnel)) {
512                 tb_port_info(up,
513                              "USB3 tunnel activation failed, aborting\n");
514                 ret = -EIO;
515                 goto err_free;
516         }
517
518         list_add_tail(&tunnel->list, &tcm->tunnel_list);
519         if (tb_route(parent))
520                 tb_reclaim_usb3_bandwidth(tb, down, up);
521
522         return 0;
523
524 err_free:
525         tb_tunnel_free(tunnel);
526 err_reclaim:
527         if (tb_route(parent))
528                 tb_reclaim_usb3_bandwidth(tb, down, up);
529
530         return ret;
531 }
532
533 static int tb_create_usb3_tunnels(struct tb_switch *sw)
534 {
535         struct tb_port *port;
536         int ret;
537
538         if (!tb_acpi_may_tunnel_usb3())
539                 return 0;
540
541         if (tb_route(sw)) {
542                 ret = tb_tunnel_usb3(sw->tb, sw);
543                 if (ret)
544                         return ret;
545         }
546
547         tb_switch_for_each_port(sw, port) {
548                 if (!tb_port_has_remote(port))
549                         continue;
550                 ret = tb_create_usb3_tunnels(port->remote->sw);
551                 if (ret)
552                         return ret;
553         }
554
555         return 0;
556 }
557
558 static void tb_scan_port(struct tb_port *port);
559
560 /*
561  * tb_scan_switch() - scan for and initialize downstream switches
562  */
563 static void tb_scan_switch(struct tb_switch *sw)
564 {
565         struct tb_port *port;
566
567         pm_runtime_get_sync(&sw->dev);
568
569         tb_switch_for_each_port(sw, port)
570                 tb_scan_port(port);
571
572         pm_runtime_mark_last_busy(&sw->dev);
573         pm_runtime_put_autosuspend(&sw->dev);
574 }
575
576 /*
577  * tb_scan_port() - check for and initialize switches below port
578  */
579 static void tb_scan_port(struct tb_port *port)
580 {
581         struct tb_cm *tcm = tb_priv(port->sw->tb);
582         struct tb_port *upstream_port;
583         struct tb_switch *sw;
584
585         if (tb_is_upstream_port(port))
586                 return;
587
588         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
589             !tb_dp_port_is_enabled(port)) {
590                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
591                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
592                                  false);
593                 return;
594         }
595
596         if (port->config.type != TB_TYPE_PORT)
597                 return;
598         if (port->dual_link_port && port->link_nr)
599                 return; /*
600                          * Downstream switch is reachable through two ports.
601                          * Only scan on the primary port (link_nr == 0).
602                          */
603         if (tb_wait_for_port(port, false) <= 0)
604                 return;
605         if (port->remote) {
606                 tb_port_dbg(port, "port already has a remote\n");
607                 return;
608         }
609
610         tb_retimer_scan(port, true);
611
612         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
613                              tb_downstream_route(port));
614         if (IS_ERR(sw)) {
615                 /*
616                  * If there is an error accessing the connected switch
617                  * it may be connected to another domain. Also we allow
618                  * the other domain to be connected to a max depth switch.
619                  */
620                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
621                         tb_scan_xdomain(port);
622                 return;
623         }
624
625         if (tb_switch_configure(sw)) {
626                 tb_switch_put(sw);
627                 return;
628         }
629
630         /*
631          * If there was previously another domain connected remove it
632          * first.
633          */
634         if (port->xdomain) {
635                 tb_xdomain_remove(port->xdomain);
636                 tb_port_unconfigure_xdomain(port);
637                 port->xdomain = NULL;
638         }
639
640         /*
641          * Do not send uevents until we have discovered all existing
642          * tunnels and know which switches were authorized already by
643          * the boot firmware.
644          */
645         if (!tcm->hotplug_active)
646                 dev_set_uevent_suppress(&sw->dev, true);
647
648         /*
649          * At the moment Thunderbolt 2 and beyond (devices with LC) we
650          * can support runtime PM.
651          */
652         sw->rpm = sw->generation > 1;
653
654         if (tb_switch_add(sw)) {
655                 tb_switch_put(sw);
656                 return;
657         }
658
659         /* Link the switches using both links if available */
660         upstream_port = tb_upstream_port(sw);
661         port->remote = upstream_port;
662         upstream_port->remote = port;
663         if (port->dual_link_port && upstream_port->dual_link_port) {
664                 port->dual_link_port->remote = upstream_port->dual_link_port;
665                 upstream_port->dual_link_port->remote = port->dual_link_port;
666         }
667
668         /* Enable lane bonding if supported */
669         tb_switch_lane_bonding_enable(sw);
670         /* Set the link configured */
671         tb_switch_configure_link(sw);
672         if (tb_switch_enable_clx(sw, TB_CL0S))
673                 tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
674
675         tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
676                                 tb_switch_is_clx_enabled(sw));
677
678         if (tb_enable_tmu(sw))
679                 tb_sw_warn(sw, "failed to enable TMU\n");
680
681         /* Scan upstream retimers */
682         tb_retimer_scan(upstream_port, true);
683
684         /*
685          * Create USB 3.x tunnels only when the switch is plugged to the
686          * domain. This is because we scan the domain also during discovery
687          * and want to discover existing USB 3.x tunnels before we create
688          * any new.
689          */
690         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
691                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
692
693         tb_add_dp_resources(sw);
694         tb_scan_switch(sw);
695 }
696
697 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
698 {
699         struct tb_port *src_port, *dst_port;
700         struct tb *tb;
701
702         if (!tunnel)
703                 return;
704
705         tb_tunnel_deactivate(tunnel);
706         list_del(&tunnel->list);
707
708         tb = tunnel->tb;
709         src_port = tunnel->src_port;
710         dst_port = tunnel->dst_port;
711
712         switch (tunnel->type) {
713         case TB_TUNNEL_DP:
714                 /*
715                  * In case of DP tunnel make sure the DP IN resource is
716                  * deallocated properly.
717                  */
718                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
719                 /* Now we can allow the domain to runtime suspend again */
720                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
721                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
722                 pm_runtime_mark_last_busy(&src_port->sw->dev);
723                 pm_runtime_put_autosuspend(&src_port->sw->dev);
724                 fallthrough;
725
726         case TB_TUNNEL_USB3:
727                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
728                 break;
729
730         default:
731                 /*
732                  * PCIe and DMA tunnels do not consume guaranteed
733                  * bandwidth.
734                  */
735                 break;
736         }
737
738         tb_tunnel_free(tunnel);
739 }
740
741 /*
742  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
743  */
744 static void tb_free_invalid_tunnels(struct tb *tb)
745 {
746         struct tb_cm *tcm = tb_priv(tb);
747         struct tb_tunnel *tunnel;
748         struct tb_tunnel *n;
749
750         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
751                 if (tb_tunnel_is_invalid(tunnel))
752                         tb_deactivate_and_free_tunnel(tunnel);
753         }
754 }
755
756 /*
757  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
758  */
759 static void tb_free_unplugged_children(struct tb_switch *sw)
760 {
761         struct tb_port *port;
762
763         tb_switch_for_each_port(sw, port) {
764                 if (!tb_port_has_remote(port))
765                         continue;
766
767                 if (port->remote->sw->is_unplugged) {
768                         tb_retimer_remove_all(port);
769                         tb_remove_dp_resources(port->remote->sw);
770                         tb_switch_unconfigure_link(port->remote->sw);
771                         tb_switch_lane_bonding_disable(port->remote->sw);
772                         tb_switch_remove(port->remote->sw);
773                         port->remote = NULL;
774                         if (port->dual_link_port)
775                                 port->dual_link_port->remote = NULL;
776                 } else {
777                         tb_free_unplugged_children(port->remote->sw);
778                 }
779         }
780 }
781
782 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
783                                          const struct tb_port *port)
784 {
785         struct tb_port *down = NULL;
786
787         /*
788          * To keep plugging devices consistently in the same PCIe
789          * hierarchy, do mapping here for switch downstream PCIe ports.
790          */
791         if (tb_switch_is_usb4(sw)) {
792                 down = usb4_switch_map_pcie_down(sw, port);
793         } else if (!tb_route(sw)) {
794                 int phy_port = tb_phy_port_from_link(port->port);
795                 int index;
796
797                 /*
798                  * Hard-coded Thunderbolt port to PCIe down port mapping
799                  * per controller.
800                  */
801                 if (tb_switch_is_cactus_ridge(sw) ||
802                     tb_switch_is_alpine_ridge(sw))
803                         index = !phy_port ? 6 : 7;
804                 else if (tb_switch_is_falcon_ridge(sw))
805                         index = !phy_port ? 6 : 8;
806                 else if (tb_switch_is_titan_ridge(sw))
807                         index = !phy_port ? 8 : 9;
808                 else
809                         goto out;
810
811                 /* Validate the hard-coding */
812                 if (WARN_ON(index > sw->config.max_port_number))
813                         goto out;
814
815                 down = &sw->ports[index];
816         }
817
818         if (down) {
819                 if (WARN_ON(!tb_port_is_pcie_down(down)))
820                         goto out;
821                 if (tb_pci_port_is_enabled(down))
822                         goto out;
823
824                 return down;
825         }
826
827 out:
828         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
829 }
830
831 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
832 {
833         struct tb_port *host_port, *port;
834         struct tb_cm *tcm = tb_priv(tb);
835
836         host_port = tb_route(in->sw) ?
837                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
838
839         list_for_each_entry(port, &tcm->dp_resources, list) {
840                 if (!tb_port_is_dpout(port))
841                         continue;
842
843                 if (tb_port_is_enabled(port)) {
844                         tb_port_dbg(port, "in use\n");
845                         continue;
846                 }
847
848                 tb_port_dbg(port, "DP OUT available\n");
849
850                 /*
851                  * Keep the DP tunnel under the topology starting from
852                  * the same host router downstream port.
853                  */
854                 if (host_port && tb_route(port->sw)) {
855                         struct tb_port *p;
856
857                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
858                         if (p != host_port)
859                                 continue;
860                 }
861
862                 return port;
863         }
864
865         return NULL;
866 }
867
868 static void tb_tunnel_dp(struct tb *tb)
869 {
870         int available_up, available_down, ret;
871         struct tb_cm *tcm = tb_priv(tb);
872         struct tb_port *port, *in, *out;
873         struct tb_tunnel *tunnel;
874
875         if (!tb_acpi_may_tunnel_dp()) {
876                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
877                 return;
878         }
879
880         /*
881          * Find pair of inactive DP IN and DP OUT adapters and then
882          * establish a DP tunnel between them.
883          */
884         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
885
886         in = NULL;
887         out = NULL;
888         list_for_each_entry(port, &tcm->dp_resources, list) {
889                 if (!tb_port_is_dpin(port))
890                         continue;
891
892                 if (tb_port_is_enabled(port)) {
893                         tb_port_dbg(port, "in use\n");
894                         continue;
895                 }
896
897                 tb_port_dbg(port, "DP IN available\n");
898
899                 out = tb_find_dp_out(tb, port);
900                 if (out) {
901                         in = port;
902                         break;
903                 }
904         }
905
906         if (!in) {
907                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
908                 return;
909         }
910         if (!out) {
911                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
912                 return;
913         }
914
915         /*
916          * DP stream needs the domain to be active so runtime resume
917          * both ends of the tunnel.
918          *
919          * This should bring the routers in the middle active as well
920          * and keeps the domain from runtime suspending while the DP
921          * tunnel is active.
922          */
923         pm_runtime_get_sync(&in->sw->dev);
924         pm_runtime_get_sync(&out->sw->dev);
925
926         if (tb_switch_alloc_dp_resource(in->sw, in)) {
927                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
928                 goto err_rpm_put;
929         }
930
931         /* Make all unused USB3 bandwidth available for the new DP tunnel */
932         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
933         if (ret) {
934                 tb_warn(tb, "failed to release unused bandwidth\n");
935                 goto err_dealloc_dp;
936         }
937
938         ret = tb_available_bandwidth(tb, in, out, &available_up,
939                                      &available_down);
940         if (ret)
941                 goto err_reclaim;
942
943         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
944                available_up, available_down);
945
946         tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
947         if (!tunnel) {
948                 tb_port_dbg(out, "could not allocate DP tunnel\n");
949                 goto err_reclaim;
950         }
951
952         if (tb_tunnel_activate(tunnel)) {
953                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
954                 goto err_free;
955         }
956
957         list_add_tail(&tunnel->list, &tcm->tunnel_list);
958         tb_reclaim_usb3_bandwidth(tb, in, out);
959         return;
960
961 err_free:
962         tb_tunnel_free(tunnel);
963 err_reclaim:
964         tb_reclaim_usb3_bandwidth(tb, in, out);
965 err_dealloc_dp:
966         tb_switch_dealloc_dp_resource(in->sw, in);
967 err_rpm_put:
968         pm_runtime_mark_last_busy(&out->sw->dev);
969         pm_runtime_put_autosuspend(&out->sw->dev);
970         pm_runtime_mark_last_busy(&in->sw->dev);
971         pm_runtime_put_autosuspend(&in->sw->dev);
972 }
973
974 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
975 {
976         struct tb_port *in, *out;
977         struct tb_tunnel *tunnel;
978
979         if (tb_port_is_dpin(port)) {
980                 tb_port_dbg(port, "DP IN resource unavailable\n");
981                 in = port;
982                 out = NULL;
983         } else {
984                 tb_port_dbg(port, "DP OUT resource unavailable\n");
985                 in = NULL;
986                 out = port;
987         }
988
989         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
990         tb_deactivate_and_free_tunnel(tunnel);
991         list_del_init(&port->list);
992
993         /*
994          * See if there is another DP OUT port that can be used for
995          * to create another tunnel.
996          */
997         tb_tunnel_dp(tb);
998 }
999
1000 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1001 {
1002         struct tb_cm *tcm = tb_priv(tb);
1003         struct tb_port *p;
1004
1005         if (tb_port_is_enabled(port))
1006                 return;
1007
1008         list_for_each_entry(p, &tcm->dp_resources, list) {
1009                 if (p == port)
1010                         return;
1011         }
1012
1013         tb_port_dbg(port, "DP %s resource available\n",
1014                     tb_port_is_dpin(port) ? "IN" : "OUT");
1015         list_add_tail(&port->list, &tcm->dp_resources);
1016
1017         /* Look for suitable DP IN <-> DP OUT pairs now */
1018         tb_tunnel_dp(tb);
1019 }
1020
1021 static void tb_disconnect_and_release_dp(struct tb *tb)
1022 {
1023         struct tb_cm *tcm = tb_priv(tb);
1024         struct tb_tunnel *tunnel, *n;
1025
1026         /*
1027          * Tear down all DP tunnels and release their resources. They
1028          * will be re-established after resume based on plug events.
1029          */
1030         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1031                 if (tb_tunnel_is_dp(tunnel))
1032                         tb_deactivate_and_free_tunnel(tunnel);
1033         }
1034
1035         while (!list_empty(&tcm->dp_resources)) {
1036                 struct tb_port *port;
1037
1038                 port = list_first_entry(&tcm->dp_resources,
1039                                         struct tb_port, list);
1040                 list_del_init(&port->list);
1041         }
1042 }
1043
1044 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1045 {
1046         struct tb_tunnel *tunnel;
1047         struct tb_port *up;
1048
1049         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1050         if (WARN_ON(!up))
1051                 return -ENODEV;
1052
1053         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1054         if (WARN_ON(!tunnel))
1055                 return -ENODEV;
1056
1057         tb_tunnel_deactivate(tunnel);
1058         list_del(&tunnel->list);
1059         tb_tunnel_free(tunnel);
1060         return 0;
1061 }
1062
1063 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1064 {
1065         struct tb_port *up, *down, *port;
1066         struct tb_cm *tcm = tb_priv(tb);
1067         struct tb_switch *parent_sw;
1068         struct tb_tunnel *tunnel;
1069
1070         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1071         if (!up)
1072                 return 0;
1073
1074         /*
1075          * Look up available down port. Since we are chaining it should
1076          * be found right above this switch.
1077          */
1078         parent_sw = tb_to_switch(sw->dev.parent);
1079         port = tb_port_at(tb_route(sw), parent_sw);
1080         down = tb_find_pcie_down(parent_sw, port);
1081         if (!down)
1082                 return 0;
1083
1084         tunnel = tb_tunnel_alloc_pci(tb, up, down);
1085         if (!tunnel)
1086                 return -ENOMEM;
1087
1088         if (tb_tunnel_activate(tunnel)) {
1089                 tb_port_info(up,
1090                              "PCIe tunnel activation failed, aborting\n");
1091                 tb_tunnel_free(tunnel);
1092                 return -EIO;
1093         }
1094
1095         /*
1096          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1097          * here.
1098          */
1099         if (tb_switch_pcie_l1_enable(sw))
1100                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1101
1102         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1103         return 0;
1104 }
1105
1106 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1107                                     int transmit_path, int transmit_ring,
1108                                     int receive_path, int receive_ring)
1109 {
1110         struct tb_cm *tcm = tb_priv(tb);
1111         struct tb_port *nhi_port, *dst_port;
1112         struct tb_tunnel *tunnel;
1113         struct tb_switch *sw;
1114
1115         sw = tb_to_switch(xd->dev.parent);
1116         dst_port = tb_port_at(xd->route, sw);
1117         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1118
1119         mutex_lock(&tb->lock);
1120         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1121                                      transmit_ring, receive_path, receive_ring);
1122         if (!tunnel) {
1123                 mutex_unlock(&tb->lock);
1124                 return -ENOMEM;
1125         }
1126
1127         if (tb_tunnel_activate(tunnel)) {
1128                 tb_port_info(nhi_port,
1129                              "DMA tunnel activation failed, aborting\n");
1130                 tb_tunnel_free(tunnel);
1131                 mutex_unlock(&tb->lock);
1132                 return -EIO;
1133         }
1134
1135         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1136         mutex_unlock(&tb->lock);
1137         return 0;
1138 }
1139
1140 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1141                                           int transmit_path, int transmit_ring,
1142                                           int receive_path, int receive_ring)
1143 {
1144         struct tb_cm *tcm = tb_priv(tb);
1145         struct tb_port *nhi_port, *dst_port;
1146         struct tb_tunnel *tunnel, *n;
1147         struct tb_switch *sw;
1148
1149         sw = tb_to_switch(xd->dev.parent);
1150         dst_port = tb_port_at(xd->route, sw);
1151         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1152
1153         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1154                 if (!tb_tunnel_is_dma(tunnel))
1155                         continue;
1156                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1157                         continue;
1158
1159                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1160                                         receive_path, receive_ring))
1161                         tb_deactivate_and_free_tunnel(tunnel);
1162         }
1163 }
1164
1165 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1166                                        int transmit_path, int transmit_ring,
1167                                        int receive_path, int receive_ring)
1168 {
1169         if (!xd->is_unplugged) {
1170                 mutex_lock(&tb->lock);
1171                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1172                                               transmit_ring, receive_path,
1173                                               receive_ring);
1174                 mutex_unlock(&tb->lock);
1175         }
1176         return 0;
1177 }
1178
1179 /* hotplug handling */
1180
1181 /*
1182  * tb_handle_hotplug() - handle hotplug event
1183  *
1184  * Executes on tb->wq.
1185  */
1186 static void tb_handle_hotplug(struct work_struct *work)
1187 {
1188         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1189         struct tb *tb = ev->tb;
1190         struct tb_cm *tcm = tb_priv(tb);
1191         struct tb_switch *sw;
1192         struct tb_port *port;
1193
1194         /* Bring the domain back from sleep if it was suspended */
1195         pm_runtime_get_sync(&tb->dev);
1196
1197         mutex_lock(&tb->lock);
1198         if (!tcm->hotplug_active)
1199                 goto out; /* during init, suspend or shutdown */
1200
1201         sw = tb_switch_find_by_route(tb, ev->route);
1202         if (!sw) {
1203                 tb_warn(tb,
1204                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1205                         ev->route, ev->port, ev->unplug);
1206                 goto out;
1207         }
1208         if (ev->port > sw->config.max_port_number) {
1209                 tb_warn(tb,
1210                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1211                         ev->route, ev->port, ev->unplug);
1212                 goto put_sw;
1213         }
1214         port = &sw->ports[ev->port];
1215         if (tb_is_upstream_port(port)) {
1216                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1217                        ev->route, ev->port, ev->unplug);
1218                 goto put_sw;
1219         }
1220
1221         pm_runtime_get_sync(&sw->dev);
1222
1223         if (ev->unplug) {
1224                 tb_retimer_remove_all(port);
1225
1226                 if (tb_port_has_remote(port)) {
1227                         tb_port_dbg(port, "switch unplugged\n");
1228                         tb_sw_set_unplugged(port->remote->sw);
1229                         tb_free_invalid_tunnels(tb);
1230                         tb_remove_dp_resources(port->remote->sw);
1231                         tb_switch_tmu_disable(port->remote->sw);
1232                         tb_switch_unconfigure_link(port->remote->sw);
1233                         tb_switch_lane_bonding_disable(port->remote->sw);
1234                         tb_switch_remove(port->remote->sw);
1235                         port->remote = NULL;
1236                         if (port->dual_link_port)
1237                                 port->dual_link_port->remote = NULL;
1238                         /* Maybe we can create another DP tunnel */
1239                         tb_tunnel_dp(tb);
1240                 } else if (port->xdomain) {
1241                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1242
1243                         tb_port_dbg(port, "xdomain unplugged\n");
1244                         /*
1245                          * Service drivers are unbound during
1246                          * tb_xdomain_remove() so setting XDomain as
1247                          * unplugged here prevents deadlock if they call
1248                          * tb_xdomain_disable_paths(). We will tear down
1249                          * all the tunnels below.
1250                          */
1251                         xd->is_unplugged = true;
1252                         tb_xdomain_remove(xd);
1253                         port->xdomain = NULL;
1254                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1255                         tb_xdomain_put(xd);
1256                         tb_port_unconfigure_xdomain(port);
1257                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1258                         tb_dp_resource_unavailable(tb, port);
1259                 } else {
1260                         tb_port_dbg(port,
1261                                    "got unplug event for disconnected port, ignoring\n");
1262                 }
1263         } else if (port->remote) {
1264                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1265         } else {
1266                 if (tb_port_is_null(port)) {
1267                         tb_port_dbg(port, "hotplug: scanning\n");
1268                         tb_scan_port(port);
1269                         if (!port->remote)
1270                                 tb_port_dbg(port, "hotplug: no switch found\n");
1271                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1272                         tb_dp_resource_available(tb, port);
1273                 }
1274         }
1275
1276         pm_runtime_mark_last_busy(&sw->dev);
1277         pm_runtime_put_autosuspend(&sw->dev);
1278
1279 put_sw:
1280         tb_switch_put(sw);
1281 out:
1282         mutex_unlock(&tb->lock);
1283
1284         pm_runtime_mark_last_busy(&tb->dev);
1285         pm_runtime_put_autosuspend(&tb->dev);
1286
1287         kfree(ev);
1288 }
1289
1290 /*
1291  * tb_schedule_hotplug_handler() - callback function for the control channel
1292  *
1293  * Delegates to tb_handle_hotplug.
1294  */
1295 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1296                             const void *buf, size_t size)
1297 {
1298         const struct cfg_event_pkg *pkg = buf;
1299         u64 route;
1300
1301         if (type != TB_CFG_PKG_EVENT) {
1302                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1303                 return;
1304         }
1305
1306         route = tb_cfg_get_route(&pkg->header);
1307
1308         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1309                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1310                         pkg->port);
1311         }
1312
1313         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1314 }
1315
1316 static void tb_stop(struct tb *tb)
1317 {
1318         struct tb_cm *tcm = tb_priv(tb);
1319         struct tb_tunnel *tunnel;
1320         struct tb_tunnel *n;
1321
1322         cancel_delayed_work(&tcm->remove_work);
1323         /* tunnels are only present after everything has been initialized */
1324         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1325                 /*
1326                  * DMA tunnels require the driver to be functional so we
1327                  * tear them down. Other protocol tunnels can be left
1328                  * intact.
1329                  */
1330                 if (tb_tunnel_is_dma(tunnel))
1331                         tb_tunnel_deactivate(tunnel);
1332                 tb_tunnel_free(tunnel);
1333         }
1334         tb_switch_remove(tb->root_switch);
1335         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1336 }
1337
1338 static int tb_scan_finalize_switch(struct device *dev, void *data)
1339 {
1340         if (tb_is_switch(dev)) {
1341                 struct tb_switch *sw = tb_to_switch(dev);
1342
1343                 /*
1344                  * If we found that the switch was already setup by the
1345                  * boot firmware, mark it as authorized now before we
1346                  * send uevent to userspace.
1347                  */
1348                 if (sw->boot)
1349                         sw->authorized = 1;
1350
1351                 dev_set_uevent_suppress(dev, false);
1352                 kobject_uevent(&dev->kobj, KOBJ_ADD);
1353                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1354         }
1355
1356         return 0;
1357 }
1358
1359 static int tb_start(struct tb *tb)
1360 {
1361         struct tb_cm *tcm = tb_priv(tb);
1362         int ret;
1363
1364         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1365         if (IS_ERR(tb->root_switch))
1366                 return PTR_ERR(tb->root_switch);
1367
1368         /*
1369          * ICM firmware upgrade needs running firmware and in native
1370          * mode that is not available so disable firmware upgrade of the
1371          * root switch.
1372          */
1373         tb->root_switch->no_nvm_upgrade = true;
1374         /* All USB4 routers support runtime PM */
1375         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1376
1377         ret = tb_switch_configure(tb->root_switch);
1378         if (ret) {
1379                 tb_switch_put(tb->root_switch);
1380                 return ret;
1381         }
1382
1383         /* Announce the switch to the world */
1384         ret = tb_switch_add(tb->root_switch);
1385         if (ret) {
1386                 tb_switch_put(tb->root_switch);
1387                 return ret;
1388         }
1389
1390         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
1391         /* Enable TMU if it is off */
1392         tb_switch_tmu_enable(tb->root_switch);
1393         /* Full scan to discover devices added before the driver was loaded. */
1394         tb_scan_switch(tb->root_switch);
1395         /* Find out tunnels created by the boot firmware */
1396         tb_discover_tunnels(tb);
1397         /*
1398          * If the boot firmware did not create USB 3.x tunnels create them
1399          * now for the whole topology.
1400          */
1401         tb_create_usb3_tunnels(tb->root_switch);
1402         /* Add DP IN resources for the root switch */
1403         tb_add_dp_resources(tb->root_switch);
1404         /* Make the discovered switches available to the userspace */
1405         device_for_each_child(&tb->root_switch->dev, NULL,
1406                               tb_scan_finalize_switch);
1407
1408         /* Allow tb_handle_hotplug to progress events */
1409         tcm->hotplug_active = true;
1410         return 0;
1411 }
1412
1413 static int tb_suspend_noirq(struct tb *tb)
1414 {
1415         struct tb_cm *tcm = tb_priv(tb);
1416
1417         tb_dbg(tb, "suspending...\n");
1418         tb_disconnect_and_release_dp(tb);
1419         tb_switch_suspend(tb->root_switch, false);
1420         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1421         tb_dbg(tb, "suspend finished\n");
1422
1423         return 0;
1424 }
1425
1426 static void tb_restore_children(struct tb_switch *sw)
1427 {
1428         struct tb_port *port;
1429
1430         /* No need to restore if the router is already unplugged */
1431         if (sw->is_unplugged)
1432                 return;
1433
1434         if (tb_switch_enable_clx(sw, TB_CL0S))
1435                 tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
1436
1437         /*
1438          * tb_switch_tmu_configure() was already called when the switch was
1439          * added before entering system sleep or runtime suspend,
1440          * so no need to call it again before enabling TMU.
1441          */
1442         if (tb_enable_tmu(sw))
1443                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1444
1445         tb_switch_for_each_port(sw, port) {
1446                 if (!tb_port_has_remote(port) && !port->xdomain)
1447                         continue;
1448
1449                 if (port->remote) {
1450                         tb_switch_lane_bonding_enable(port->remote->sw);
1451                         tb_switch_configure_link(port->remote->sw);
1452
1453                         tb_restore_children(port->remote->sw);
1454                 } else if (port->xdomain) {
1455                         tb_port_configure_xdomain(port);
1456                 }
1457         }
1458 }
1459
1460 static int tb_resume_noirq(struct tb *tb)
1461 {
1462         struct tb_cm *tcm = tb_priv(tb);
1463         struct tb_tunnel *tunnel, *n;
1464         unsigned int usb3_delay = 0;
1465         LIST_HEAD(tunnels);
1466
1467         tb_dbg(tb, "resuming...\n");
1468
1469         /* remove any pci devices the firmware might have setup */
1470         tb_switch_reset(tb->root_switch);
1471
1472         tb_switch_resume(tb->root_switch);
1473         tb_free_invalid_tunnels(tb);
1474         tb_free_unplugged_children(tb->root_switch);
1475         tb_restore_children(tb->root_switch);
1476
1477         /*
1478          * If we get here from suspend to disk the boot firmware or the
1479          * restore kernel might have created tunnels of its own. Since
1480          * we cannot be sure they are usable for us we find and tear
1481          * them down.
1482          */
1483         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1484         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1485                 if (tb_tunnel_is_usb3(tunnel))
1486                         usb3_delay = 500;
1487                 tb_tunnel_deactivate(tunnel);
1488                 tb_tunnel_free(tunnel);
1489         }
1490
1491         /* Re-create our tunnels now */
1492         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1493                 /* USB3 requires delay before it can be re-activated */
1494                 if (tb_tunnel_is_usb3(tunnel)) {
1495                         msleep(usb3_delay);
1496                         /* Only need to do it once */
1497                         usb3_delay = 0;
1498                 }
1499                 tb_tunnel_restart(tunnel);
1500         }
1501         if (!list_empty(&tcm->tunnel_list)) {
1502                 /*
1503                  * the pcie links need some time to get going.
1504                  * 100ms works for me...
1505                  */
1506                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1507                 msleep(100);
1508         }
1509          /* Allow tb_handle_hotplug to progress events */
1510         tcm->hotplug_active = true;
1511         tb_dbg(tb, "resume finished\n");
1512
1513         return 0;
1514 }
1515
1516 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1517 {
1518         struct tb_port *port;
1519         int ret = 0;
1520
1521         tb_switch_for_each_port(sw, port) {
1522                 if (tb_is_upstream_port(port))
1523                         continue;
1524                 if (port->xdomain && port->xdomain->is_unplugged) {
1525                         tb_retimer_remove_all(port);
1526                         tb_xdomain_remove(port->xdomain);
1527                         tb_port_unconfigure_xdomain(port);
1528                         port->xdomain = NULL;
1529                         ret++;
1530                 } else if (port->remote) {
1531                         ret += tb_free_unplugged_xdomains(port->remote->sw);
1532                 }
1533         }
1534
1535         return ret;
1536 }
1537
1538 static int tb_freeze_noirq(struct tb *tb)
1539 {
1540         struct tb_cm *tcm = tb_priv(tb);
1541
1542         tcm->hotplug_active = false;
1543         return 0;
1544 }
1545
1546 static int tb_thaw_noirq(struct tb *tb)
1547 {
1548         struct tb_cm *tcm = tb_priv(tb);
1549
1550         tcm->hotplug_active = true;
1551         return 0;
1552 }
1553
1554 static void tb_complete(struct tb *tb)
1555 {
1556         /*
1557          * Release any unplugged XDomains and if there is a case where
1558          * another domain is swapped in place of unplugged XDomain we
1559          * need to run another rescan.
1560          */
1561         mutex_lock(&tb->lock);
1562         if (tb_free_unplugged_xdomains(tb->root_switch))
1563                 tb_scan_switch(tb->root_switch);
1564         mutex_unlock(&tb->lock);
1565 }
1566
1567 static int tb_runtime_suspend(struct tb *tb)
1568 {
1569         struct tb_cm *tcm = tb_priv(tb);
1570
1571         mutex_lock(&tb->lock);
1572         tb_switch_suspend(tb->root_switch, true);
1573         tcm->hotplug_active = false;
1574         mutex_unlock(&tb->lock);
1575
1576         return 0;
1577 }
1578
1579 static void tb_remove_work(struct work_struct *work)
1580 {
1581         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1582         struct tb *tb = tcm_to_tb(tcm);
1583
1584         mutex_lock(&tb->lock);
1585         if (tb->root_switch) {
1586                 tb_free_unplugged_children(tb->root_switch);
1587                 tb_free_unplugged_xdomains(tb->root_switch);
1588         }
1589         mutex_unlock(&tb->lock);
1590 }
1591
1592 static int tb_runtime_resume(struct tb *tb)
1593 {
1594         struct tb_cm *tcm = tb_priv(tb);
1595         struct tb_tunnel *tunnel, *n;
1596
1597         mutex_lock(&tb->lock);
1598         tb_switch_resume(tb->root_switch);
1599         tb_free_invalid_tunnels(tb);
1600         tb_restore_children(tb->root_switch);
1601         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1602                 tb_tunnel_restart(tunnel);
1603         tcm->hotplug_active = true;
1604         mutex_unlock(&tb->lock);
1605
1606         /*
1607          * Schedule cleanup of any unplugged devices. Run this in a
1608          * separate thread to avoid possible deadlock if the device
1609          * removal runtime resumes the unplugged device.
1610          */
1611         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1612         return 0;
1613 }
1614
1615 static const struct tb_cm_ops tb_cm_ops = {
1616         .start = tb_start,
1617         .stop = tb_stop,
1618         .suspend_noirq = tb_suspend_noirq,
1619         .resume_noirq = tb_resume_noirq,
1620         .freeze_noirq = tb_freeze_noirq,
1621         .thaw_noirq = tb_thaw_noirq,
1622         .complete = tb_complete,
1623         .runtime_suspend = tb_runtime_suspend,
1624         .runtime_resume = tb_runtime_resume,
1625         .handle_event = tb_handle_event,
1626         .disapprove_switch = tb_disconnect_pci,
1627         .approve_switch = tb_tunnel_pci,
1628         .approve_xdomain_paths = tb_approve_xdomain_paths,
1629         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1630 };
1631
1632 /*
1633  * During suspend the Thunderbolt controller is reset and all PCIe
1634  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1635  * during resume. This adds device links between the tunneled PCIe
1636  * downstream ports and the NHI so that the device core will make sure
1637  * NHI is resumed first before the rest.
1638  */
1639 static void tb_apple_add_links(struct tb_nhi *nhi)
1640 {
1641         struct pci_dev *upstream, *pdev;
1642
1643         if (!x86_apple_machine)
1644                 return;
1645
1646         switch (nhi->pdev->device) {
1647         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1648         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1649         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1650         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1651                 break;
1652         default:
1653                 return;
1654         }
1655
1656         upstream = pci_upstream_bridge(nhi->pdev);
1657         while (upstream) {
1658                 if (!pci_is_pcie(upstream))
1659                         return;
1660                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1661                         break;
1662                 upstream = pci_upstream_bridge(upstream);
1663         }
1664
1665         if (!upstream)
1666                 return;
1667
1668         /*
1669          * For each hotplug downstream port, create add device link
1670          * back to NHI so that PCIe tunnels can be re-established after
1671          * sleep.
1672          */
1673         for_each_pci_bridge(pdev, upstream->subordinate) {
1674                 const struct device_link *link;
1675
1676                 if (!pci_is_pcie(pdev))
1677                         continue;
1678                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1679                     !pdev->is_hotplug_bridge)
1680                         continue;
1681
1682                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1683                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
1684                                        DL_FLAG_PM_RUNTIME);
1685                 if (link) {
1686                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1687                                 dev_name(&pdev->dev));
1688                 } else {
1689                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1690                                  dev_name(&pdev->dev));
1691                 }
1692         }
1693 }
1694
1695 struct tb *tb_probe(struct tb_nhi *nhi)
1696 {
1697         struct tb_cm *tcm;
1698         struct tb *tb;
1699
1700         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1701         if (!tb)
1702                 return NULL;
1703
1704         if (tb_acpi_may_tunnel_pcie())
1705                 tb->security_level = TB_SECURITY_USER;
1706         else
1707                 tb->security_level = TB_SECURITY_NOPCIE;
1708
1709         tb->cm_ops = &tb_cm_ops;
1710
1711         tcm = tb_priv(tb);
1712         INIT_LIST_HEAD(&tcm->tunnel_list);
1713         INIT_LIST_HEAD(&tcm->dp_resources);
1714         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1715
1716         tb_dbg(tb, "using software connection manager\n");
1717
1718         tb_apple_add_links(nhi);
1719         tb_acpi_add_links(nhi);
1720
1721         return tb;
1722 }