OSDN Git Service

Merge tag 'pci-v5.17-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[uclinux-h8/linux.git] / drivers / thunderbolt / tunnel.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12
13 #include "tunnel.h"
14 #include "tb.h"
15
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID                    8
18
19 #define TB_PCI_PATH_DOWN                0
20 #define TB_PCI_PATH_UP                  1
21
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID                   8
24
25 #define TB_USB3_PATH_DOWN               0
26 #define TB_USB3_PATH_UP                 1
27
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID              8
30 #define TB_DP_AUX_RX_HOPID              8
31 #define TB_DP_VIDEO_HOPID               9
32
33 #define TB_DP_VIDEO_PATH_OUT            0
34 #define TB_DP_AUX_PATH_OUT              1
35 #define TB_DP_AUX_PATH_IN               2
36
37 /* Minimum number of credits needed for PCIe path */
38 #define TB_MIN_PCIE_CREDITS             6U
39 /*
40  * Number of credits we try to allocate for each DMA path if not limited
41  * by the host router baMaxHI.
42  */
43 #define TB_DMA_CREDITS                  14U
44 /* Minimum number of credits for DMA path */
45 #define TB_MIN_DMA_CREDITS              1U
46
47 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
48
49 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
50         do {                                                            \
51                 struct tb_tunnel *__tunnel = (tunnel);                  \
52                 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
53                       tb_route(__tunnel->src_port->sw),                 \
54                       __tunnel->src_port->port,                         \
55                       tb_route(__tunnel->dst_port->sw),                 \
56                       __tunnel->dst_port->port,                         \
57                       tb_tunnel_names[__tunnel->type],                  \
58                       ## arg);                                          \
59         } while (0)
60
61 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
62         __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
63 #define tb_tunnel_warn(tunnel, fmt, arg...) \
64         __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
65 #define tb_tunnel_info(tunnel, fmt, arg...) \
66         __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
67 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
68         __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
69
70 static inline unsigned int tb_usable_credits(const struct tb_port *port)
71 {
72         return port->total_credits - port->ctl_credits;
73 }
74
75 /**
76  * tb_available_credits() - Available credits for PCIe and DMA
77  * @port: Lane adapter to check
78  * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
79  *                  streams possible through this lane adapter
80  */
81 static unsigned int tb_available_credits(const struct tb_port *port,
82                                          size_t *max_dp_streams)
83 {
84         const struct tb_switch *sw = port->sw;
85         int credits, usb3, pcie, spare;
86         size_t ndp;
87
88         usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
89         pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
90
91         if (tb_acpi_is_xdomain_allowed()) {
92                 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
93                 /* Add some credits for potential second DMA tunnel */
94                 spare += TB_MIN_DMA_CREDITS;
95         } else {
96                 spare = 0;
97         }
98
99         credits = tb_usable_credits(port);
100         if (tb_acpi_may_tunnel_dp()) {
101                 /*
102                  * Maximum number of DP streams possible through the
103                  * lane adapter.
104                  */
105                 ndp = (credits - (usb3 + pcie + spare)) /
106                       (sw->min_dp_aux_credits + sw->min_dp_main_credits);
107         } else {
108                 ndp = 0;
109         }
110         credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
111         credits -= usb3;
112
113         if (max_dp_streams)
114                 *max_dp_streams = ndp;
115
116         return credits > 0 ? credits : 0;
117 }
118
119 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
120                                          enum tb_tunnel_type type)
121 {
122         struct tb_tunnel *tunnel;
123
124         tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
125         if (!tunnel)
126                 return NULL;
127
128         tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
129         if (!tunnel->paths) {
130                 tb_tunnel_free(tunnel);
131                 return NULL;
132         }
133
134         INIT_LIST_HEAD(&tunnel->list);
135         tunnel->tb = tb;
136         tunnel->npaths = npaths;
137         tunnel->type = type;
138
139         return tunnel;
140 }
141
142 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
143 {
144         int res;
145
146         res = tb_pci_port_enable(tunnel->src_port, activate);
147         if (res)
148                 return res;
149
150         if (tb_port_is_pcie_up(tunnel->dst_port))
151                 return tb_pci_port_enable(tunnel->dst_port, activate);
152
153         return 0;
154 }
155
156 static int tb_pci_init_credits(struct tb_path_hop *hop)
157 {
158         struct tb_port *port = hop->in_port;
159         struct tb_switch *sw = port->sw;
160         unsigned int credits;
161
162         if (tb_port_use_credit_allocation(port)) {
163                 unsigned int available;
164
165                 available = tb_available_credits(port, NULL);
166                 credits = min(sw->max_pcie_credits, available);
167
168                 if (credits < TB_MIN_PCIE_CREDITS)
169                         return -ENOSPC;
170
171                 credits = max(TB_MIN_PCIE_CREDITS, credits);
172         } else {
173                 if (tb_port_is_null(port))
174                         credits = port->bonded ? 32 : 16;
175                 else
176                         credits = 7;
177         }
178
179         hop->initial_credits = credits;
180         return 0;
181 }
182
183 static int tb_pci_init_path(struct tb_path *path)
184 {
185         struct tb_path_hop *hop;
186
187         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
188         path->egress_shared_buffer = TB_PATH_NONE;
189         path->ingress_fc_enable = TB_PATH_ALL;
190         path->ingress_shared_buffer = TB_PATH_NONE;
191         path->priority = 3;
192         path->weight = 1;
193         path->drop_packages = 0;
194
195         tb_path_for_each_hop(path, hop) {
196                 int ret;
197
198                 ret = tb_pci_init_credits(hop);
199                 if (ret)
200                         return ret;
201         }
202
203         return 0;
204 }
205
206 /**
207  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
208  * @tb: Pointer to the domain structure
209  * @down: PCIe downstream adapter
210  * @alloc_hopid: Allocate HopIDs from visited ports
211  *
212  * If @down adapter is active, follows the tunnel to the PCIe upstream
213  * adapter and back. Returns the discovered tunnel or %NULL if there was
214  * no tunnel.
215  */
216 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
217                                          bool alloc_hopid)
218 {
219         struct tb_tunnel *tunnel;
220         struct tb_path *path;
221
222         if (!tb_pci_port_is_enabled(down))
223                 return NULL;
224
225         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
226         if (!tunnel)
227                 return NULL;
228
229         tunnel->activate = tb_pci_activate;
230         tunnel->src_port = down;
231
232         /*
233          * Discover both paths even if they are not complete. We will
234          * clean them up by calling tb_tunnel_deactivate() below in that
235          * case.
236          */
237         path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
238                                 &tunnel->dst_port, "PCIe Up", alloc_hopid);
239         if (!path) {
240                 /* Just disable the downstream port */
241                 tb_pci_port_enable(down, false);
242                 goto err_free;
243         }
244         tunnel->paths[TB_PCI_PATH_UP] = path;
245         if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
246                 goto err_free;
247
248         path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
249                                 "PCIe Down", alloc_hopid);
250         if (!path)
251                 goto err_deactivate;
252         tunnel->paths[TB_PCI_PATH_DOWN] = path;
253         if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
254                 goto err_deactivate;
255
256         /* Validate that the tunnel is complete */
257         if (!tb_port_is_pcie_up(tunnel->dst_port)) {
258                 tb_port_warn(tunnel->dst_port,
259                              "path does not end on a PCIe adapter, cleaning up\n");
260                 goto err_deactivate;
261         }
262
263         if (down != tunnel->src_port) {
264                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
265                 goto err_deactivate;
266         }
267
268         if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
269                 tb_tunnel_warn(tunnel,
270                                "tunnel is not fully activated, cleaning up\n");
271                 goto err_deactivate;
272         }
273
274         tb_tunnel_dbg(tunnel, "discovered\n");
275         return tunnel;
276
277 err_deactivate:
278         tb_tunnel_deactivate(tunnel);
279 err_free:
280         tb_tunnel_free(tunnel);
281
282         return NULL;
283 }
284
285 /**
286  * tb_tunnel_alloc_pci() - allocate a pci tunnel
287  * @tb: Pointer to the domain structure
288  * @up: PCIe upstream adapter port
289  * @down: PCIe downstream adapter port
290  *
291  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
292  * TB_TYPE_PCIE_DOWN.
293  *
294  * Return: Returns a tb_tunnel on success or NULL on failure.
295  */
296 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
297                                       struct tb_port *down)
298 {
299         struct tb_tunnel *tunnel;
300         struct tb_path *path;
301
302         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
303         if (!tunnel)
304                 return NULL;
305
306         tunnel->activate = tb_pci_activate;
307         tunnel->src_port = down;
308         tunnel->dst_port = up;
309
310         path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
311                              "PCIe Down");
312         if (!path)
313                 goto err_free;
314         tunnel->paths[TB_PCI_PATH_DOWN] = path;
315         if (tb_pci_init_path(path))
316                 goto err_free;
317
318         path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
319                              "PCIe Up");
320         if (!path)
321                 goto err_free;
322         tunnel->paths[TB_PCI_PATH_UP] = path;
323         if (tb_pci_init_path(path))
324                 goto err_free;
325
326         return tunnel;
327
328 err_free:
329         tb_tunnel_free(tunnel);
330         return NULL;
331 }
332
333 static bool tb_dp_is_usb4(const struct tb_switch *sw)
334 {
335         /* Titan Ridge DP adapters need the same treatment as USB4 */
336         return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
337 }
338
339 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
340 {
341         int timeout = 10;
342         u32 val;
343         int ret;
344
345         /* Both ends need to support this */
346         if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
347                 return 0;
348
349         ret = tb_port_read(out, &val, TB_CFG_PORT,
350                            out->cap_adap + DP_STATUS_CTRL, 1);
351         if (ret)
352                 return ret;
353
354         val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
355
356         ret = tb_port_write(out, &val, TB_CFG_PORT,
357                             out->cap_adap + DP_STATUS_CTRL, 1);
358         if (ret)
359                 return ret;
360
361         do {
362                 ret = tb_port_read(out, &val, TB_CFG_PORT,
363                                    out->cap_adap + DP_STATUS_CTRL, 1);
364                 if (ret)
365                         return ret;
366                 if (!(val & DP_STATUS_CTRL_CMHS))
367                         return 0;
368                 usleep_range(10, 100);
369         } while (timeout--);
370
371         return -ETIMEDOUT;
372 }
373
374 static inline u32 tb_dp_cap_get_rate(u32 val)
375 {
376         u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
377
378         switch (rate) {
379         case DP_COMMON_CAP_RATE_RBR:
380                 return 1620;
381         case DP_COMMON_CAP_RATE_HBR:
382                 return 2700;
383         case DP_COMMON_CAP_RATE_HBR2:
384                 return 5400;
385         case DP_COMMON_CAP_RATE_HBR3:
386                 return 8100;
387         default:
388                 return 0;
389         }
390 }
391
392 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
393 {
394         val &= ~DP_COMMON_CAP_RATE_MASK;
395         switch (rate) {
396         default:
397                 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
398                 fallthrough;
399         case 1620:
400                 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
401                 break;
402         case 2700:
403                 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
404                 break;
405         case 5400:
406                 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
407                 break;
408         case 8100:
409                 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
410                 break;
411         }
412         return val;
413 }
414
415 static inline u32 tb_dp_cap_get_lanes(u32 val)
416 {
417         u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
418
419         switch (lanes) {
420         case DP_COMMON_CAP_1_LANE:
421                 return 1;
422         case DP_COMMON_CAP_2_LANES:
423                 return 2;
424         case DP_COMMON_CAP_4_LANES:
425                 return 4;
426         default:
427                 return 0;
428         }
429 }
430
431 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
432 {
433         val &= ~DP_COMMON_CAP_LANES_MASK;
434         switch (lanes) {
435         default:
436                 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
437                      lanes);
438                 fallthrough;
439         case 1:
440                 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
441                 break;
442         case 2:
443                 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
444                 break;
445         case 4:
446                 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
447                 break;
448         }
449         return val;
450 }
451
452 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
453 {
454         /* Tunneling removes the DP 8b/10b encoding */
455         return rate * lanes * 8 / 10;
456 }
457
458 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
459                                   u32 out_rate, u32 out_lanes, u32 *new_rate,
460                                   u32 *new_lanes)
461 {
462         static const u32 dp_bw[][2] = {
463                 /* Mb/s, lanes */
464                 { 8100, 4 }, /* 25920 Mb/s */
465                 { 5400, 4 }, /* 17280 Mb/s */
466                 { 8100, 2 }, /* 12960 Mb/s */
467                 { 2700, 4 }, /* 8640 Mb/s */
468                 { 5400, 2 }, /* 8640 Mb/s */
469                 { 8100, 1 }, /* 6480 Mb/s */
470                 { 1620, 4 }, /* 5184 Mb/s */
471                 { 5400, 1 }, /* 4320 Mb/s */
472                 { 2700, 2 }, /* 4320 Mb/s */
473                 { 1620, 2 }, /* 2592 Mb/s */
474                 { 2700, 1 }, /* 2160 Mb/s */
475                 { 1620, 1 }, /* 1296 Mb/s */
476         };
477         unsigned int i;
478
479         /*
480          * Find a combination that can fit into max_bw and does not
481          * exceed the maximum rate and lanes supported by the DP OUT and
482          * DP IN adapters.
483          */
484         for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
485                 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
486                         continue;
487
488                 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
489                         continue;
490
491                 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
492                         *new_rate = dp_bw[i][0];
493                         *new_lanes = dp_bw[i][1];
494                         return 0;
495                 }
496         }
497
498         return -ENOSR;
499 }
500
501 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
502 {
503         u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
504         struct tb_port *out = tunnel->dst_port;
505         struct tb_port *in = tunnel->src_port;
506         int ret, max_bw;
507
508         /*
509          * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
510          * newer generation hardware.
511          */
512         if (in->sw->generation < 2 || out->sw->generation < 2)
513                 return 0;
514
515         /*
516          * Perform connection manager handshake between IN and OUT ports
517          * before capabilities exchange can take place.
518          */
519         ret = tb_dp_cm_handshake(in, out);
520         if (ret)
521                 return ret;
522
523         /* Read both DP_LOCAL_CAP registers */
524         ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
525                            in->cap_adap + DP_LOCAL_CAP, 1);
526         if (ret)
527                 return ret;
528
529         ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
530                            out->cap_adap + DP_LOCAL_CAP, 1);
531         if (ret)
532                 return ret;
533
534         /* Write IN local caps to OUT remote caps */
535         ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
536                             out->cap_adap + DP_REMOTE_CAP, 1);
537         if (ret)
538                 return ret;
539
540         in_rate = tb_dp_cap_get_rate(in_dp_cap);
541         in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
542         tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
543                     in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
544
545         /*
546          * If the tunnel bandwidth is limited (max_bw is set) then see
547          * if we need to reduce bandwidth to fit there.
548          */
549         out_rate = tb_dp_cap_get_rate(out_dp_cap);
550         out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
551         bw = tb_dp_bandwidth(out_rate, out_lanes);
552         tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
553                     out_rate, out_lanes, bw);
554
555         if (in->sw->config.depth < out->sw->config.depth)
556                 max_bw = tunnel->max_down;
557         else
558                 max_bw = tunnel->max_up;
559
560         if (max_bw && bw > max_bw) {
561                 u32 new_rate, new_lanes, new_bw;
562
563                 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
564                                              out_rate, out_lanes, &new_rate,
565                                              &new_lanes);
566                 if (ret) {
567                         tb_port_info(out, "not enough bandwidth for DP tunnel\n");
568                         return ret;
569                 }
570
571                 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
572                 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
573                             new_rate, new_lanes, new_bw);
574
575                 /*
576                  * Set new rate and number of lanes before writing it to
577                  * the IN port remote caps.
578                  */
579                 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
580                 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
581         }
582
583         return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
584                              in->cap_adap + DP_REMOTE_CAP, 1);
585 }
586
587 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
588 {
589         int ret;
590
591         if (active) {
592                 struct tb_path **paths;
593                 int last;
594
595                 paths = tunnel->paths;
596                 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
597
598                 tb_dp_port_set_hops(tunnel->src_port,
599                         paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
600                         paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
601                         paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
602
603                 tb_dp_port_set_hops(tunnel->dst_port,
604                         paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
605                         paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
606                         paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
607         } else {
608                 tb_dp_port_hpd_clear(tunnel->src_port);
609                 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
610                 if (tb_port_is_dpout(tunnel->dst_port))
611                         tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
612         }
613
614         ret = tb_dp_port_enable(tunnel->src_port, active);
615         if (ret)
616                 return ret;
617
618         if (tb_port_is_dpout(tunnel->dst_port))
619                 return tb_dp_port_enable(tunnel->dst_port, active);
620
621         return 0;
622 }
623
624 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
625                                     int *consumed_down)
626 {
627         struct tb_port *in = tunnel->src_port;
628         const struct tb_switch *sw = in->sw;
629         u32 val, rate = 0, lanes = 0;
630         int ret;
631
632         if (tb_dp_is_usb4(sw)) {
633                 int timeout = 20;
634
635                 /*
636                  * Wait for DPRX done. Normally it should be already set
637                  * for active tunnel.
638                  */
639                 do {
640                         ret = tb_port_read(in, &val, TB_CFG_PORT,
641                                            in->cap_adap + DP_COMMON_CAP, 1);
642                         if (ret)
643                                 return ret;
644
645                         if (val & DP_COMMON_CAP_DPRX_DONE) {
646                                 rate = tb_dp_cap_get_rate(val);
647                                 lanes = tb_dp_cap_get_lanes(val);
648                                 break;
649                         }
650                         msleep(250);
651                 } while (timeout--);
652
653                 if (!timeout)
654                         return -ETIMEDOUT;
655         } else if (sw->generation >= 2) {
656                 /*
657                  * Read from the copied remote cap so that we take into
658                  * account if capabilities were reduced during exchange.
659                  */
660                 ret = tb_port_read(in, &val, TB_CFG_PORT,
661                                    in->cap_adap + DP_REMOTE_CAP, 1);
662                 if (ret)
663                         return ret;
664
665                 rate = tb_dp_cap_get_rate(val);
666                 lanes = tb_dp_cap_get_lanes(val);
667         } else {
668                 /* No bandwidth management for legacy devices  */
669                 *consumed_up = 0;
670                 *consumed_down = 0;
671                 return 0;
672         }
673
674         if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
675                 *consumed_up = 0;
676                 *consumed_down = tb_dp_bandwidth(rate, lanes);
677         } else {
678                 *consumed_up = tb_dp_bandwidth(rate, lanes);
679                 *consumed_down = 0;
680         }
681
682         return 0;
683 }
684
685 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
686 {
687         struct tb_port *port = hop->in_port;
688         struct tb_switch *sw = port->sw;
689
690         if (tb_port_use_credit_allocation(port))
691                 hop->initial_credits = sw->min_dp_aux_credits;
692         else
693                 hop->initial_credits = 1;
694 }
695
696 static void tb_dp_init_aux_path(struct tb_path *path)
697 {
698         struct tb_path_hop *hop;
699
700         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
701         path->egress_shared_buffer = TB_PATH_NONE;
702         path->ingress_fc_enable = TB_PATH_ALL;
703         path->ingress_shared_buffer = TB_PATH_NONE;
704         path->priority = 2;
705         path->weight = 1;
706
707         tb_path_for_each_hop(path, hop)
708                 tb_dp_init_aux_credits(hop);
709 }
710
711 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
712 {
713         struct tb_port *port = hop->in_port;
714         struct tb_switch *sw = port->sw;
715
716         if (tb_port_use_credit_allocation(port)) {
717                 unsigned int nfc_credits;
718                 size_t max_dp_streams;
719
720                 tb_available_credits(port, &max_dp_streams);
721                 /*
722                  * Read the number of currently allocated NFC credits
723                  * from the lane adapter. Since we only use them for DP
724                  * tunneling we can use that to figure out how many DP
725                  * tunnels already go through the lane adapter.
726                  */
727                 nfc_credits = port->config.nfc_credits &
728                                 ADP_CS_4_NFC_BUFFERS_MASK;
729                 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
730                         return -ENOSPC;
731
732                 hop->nfc_credits = sw->min_dp_main_credits;
733         } else {
734                 hop->nfc_credits = min(port->total_credits - 2, 12U);
735         }
736
737         return 0;
738 }
739
740 static int tb_dp_init_video_path(struct tb_path *path)
741 {
742         struct tb_path_hop *hop;
743
744         path->egress_fc_enable = TB_PATH_NONE;
745         path->egress_shared_buffer = TB_PATH_NONE;
746         path->ingress_fc_enable = TB_PATH_NONE;
747         path->ingress_shared_buffer = TB_PATH_NONE;
748         path->priority = 1;
749         path->weight = 1;
750
751         tb_path_for_each_hop(path, hop) {
752                 int ret;
753
754                 ret = tb_dp_init_video_credits(hop);
755                 if (ret)
756                         return ret;
757         }
758
759         return 0;
760 }
761
762 /**
763  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
764  * @tb: Pointer to the domain structure
765  * @in: DP in adapter
766  * @alloc_hopid: Allocate HopIDs from visited ports
767  *
768  * If @in adapter is active, follows the tunnel to the DP out adapter
769  * and back. Returns the discovered tunnel or %NULL if there was no
770  * tunnel.
771  *
772  * Return: DP tunnel or %NULL if no tunnel found.
773  */
774 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
775                                         bool alloc_hopid)
776 {
777         struct tb_tunnel *tunnel;
778         struct tb_port *port;
779         struct tb_path *path;
780
781         if (!tb_dp_port_is_enabled(in))
782                 return NULL;
783
784         tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
785         if (!tunnel)
786                 return NULL;
787
788         tunnel->init = tb_dp_xchg_caps;
789         tunnel->activate = tb_dp_activate;
790         tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
791         tunnel->src_port = in;
792
793         path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
794                                 &tunnel->dst_port, "Video", alloc_hopid);
795         if (!path) {
796                 /* Just disable the DP IN port */
797                 tb_dp_port_enable(in, false);
798                 goto err_free;
799         }
800         tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
801         if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
802                 goto err_free;
803
804         path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
805                                 alloc_hopid);
806         if (!path)
807                 goto err_deactivate;
808         tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
809         tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
810
811         path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
812                                 &port, "AUX RX", alloc_hopid);
813         if (!path)
814                 goto err_deactivate;
815         tunnel->paths[TB_DP_AUX_PATH_IN] = path;
816         tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
817
818         /* Validate that the tunnel is complete */
819         if (!tb_port_is_dpout(tunnel->dst_port)) {
820                 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
821                 goto err_deactivate;
822         }
823
824         if (!tb_dp_port_is_enabled(tunnel->dst_port))
825                 goto err_deactivate;
826
827         if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
828                 goto err_deactivate;
829
830         if (port != tunnel->src_port) {
831                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
832                 goto err_deactivate;
833         }
834
835         tb_tunnel_dbg(tunnel, "discovered\n");
836         return tunnel;
837
838 err_deactivate:
839         tb_tunnel_deactivate(tunnel);
840 err_free:
841         tb_tunnel_free(tunnel);
842
843         return NULL;
844 }
845
846 /**
847  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
848  * @tb: Pointer to the domain structure
849  * @in: DP in adapter port
850  * @out: DP out adapter port
851  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
852  *          if not limited)
853  * @max_down: Maximum available downstream bandwidth for the DP tunnel
854  *            (%0 if not limited)
855  *
856  * Allocates a tunnel between @in and @out that is capable of tunneling
857  * Display Port traffic.
858  *
859  * Return: Returns a tb_tunnel on success or NULL on failure.
860  */
861 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
862                                      struct tb_port *out, int max_up,
863                                      int max_down)
864 {
865         struct tb_tunnel *tunnel;
866         struct tb_path **paths;
867         struct tb_path *path;
868
869         if (WARN_ON(!in->cap_adap || !out->cap_adap))
870                 return NULL;
871
872         tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
873         if (!tunnel)
874                 return NULL;
875
876         tunnel->init = tb_dp_xchg_caps;
877         tunnel->activate = tb_dp_activate;
878         tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
879         tunnel->src_port = in;
880         tunnel->dst_port = out;
881         tunnel->max_up = max_up;
882         tunnel->max_down = max_down;
883
884         paths = tunnel->paths;
885
886         path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
887                              1, "Video");
888         if (!path)
889                 goto err_free;
890         tb_dp_init_video_path(path);
891         paths[TB_DP_VIDEO_PATH_OUT] = path;
892
893         path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
894                              TB_DP_AUX_TX_HOPID, 1, "AUX TX");
895         if (!path)
896                 goto err_free;
897         tb_dp_init_aux_path(path);
898         paths[TB_DP_AUX_PATH_OUT] = path;
899
900         path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
901                              TB_DP_AUX_RX_HOPID, 1, "AUX RX");
902         if (!path)
903                 goto err_free;
904         tb_dp_init_aux_path(path);
905         paths[TB_DP_AUX_PATH_IN] = path;
906
907         return tunnel;
908
909 err_free:
910         tb_tunnel_free(tunnel);
911         return NULL;
912 }
913
914 static unsigned int tb_dma_available_credits(const struct tb_port *port)
915 {
916         const struct tb_switch *sw = port->sw;
917         int credits;
918
919         credits = tb_available_credits(port, NULL);
920         if (tb_acpi_may_tunnel_pcie())
921                 credits -= sw->max_pcie_credits;
922         credits -= port->dma_credits;
923
924         return credits > 0 ? credits : 0;
925 }
926
927 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
928 {
929         struct tb_port *port = hop->in_port;
930
931         if (tb_port_use_credit_allocation(port)) {
932                 unsigned int available = tb_dma_available_credits(port);
933
934                 /*
935                  * Need to have at least TB_MIN_DMA_CREDITS, otherwise
936                  * DMA path cannot be established.
937                  */
938                 if (available < TB_MIN_DMA_CREDITS)
939                         return -ENOSPC;
940
941                 while (credits > available)
942                         credits--;
943
944                 tb_port_dbg(port, "reserving %u credits for DMA path\n",
945                             credits);
946
947                 port->dma_credits += credits;
948         } else {
949                 if (tb_port_is_null(port))
950                         credits = port->bonded ? 14 : 6;
951                 else
952                         credits = min(port->total_credits, credits);
953         }
954
955         hop->initial_credits = credits;
956         return 0;
957 }
958
959 /* Path from lane adapter to NHI */
960 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
961 {
962         struct tb_path_hop *hop;
963         unsigned int i, tmp;
964
965         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
966         path->ingress_fc_enable = TB_PATH_ALL;
967         path->egress_shared_buffer = TB_PATH_NONE;
968         path->ingress_shared_buffer = TB_PATH_NONE;
969         path->priority = 5;
970         path->weight = 1;
971         path->clear_fc = true;
972
973         /*
974          * First lane adapter is the one connected to the remote host.
975          * We don't tunnel other traffic over this link so can use all
976          * the credits (except the ones reserved for control traffic).
977          */
978         hop = &path->hops[0];
979         tmp = min(tb_usable_credits(hop->in_port), credits);
980         hop->initial_credits = tmp;
981         hop->in_port->dma_credits += tmp;
982
983         for (i = 1; i < path->path_length; i++) {
984                 int ret;
985
986                 ret = tb_dma_reserve_credits(&path->hops[i], credits);
987                 if (ret)
988                         return ret;
989         }
990
991         return 0;
992 }
993
994 /* Path from NHI to lane adapter */
995 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
996 {
997         struct tb_path_hop *hop;
998
999         path->egress_fc_enable = TB_PATH_ALL;
1000         path->ingress_fc_enable = TB_PATH_ALL;
1001         path->egress_shared_buffer = TB_PATH_NONE;
1002         path->ingress_shared_buffer = TB_PATH_NONE;
1003         path->priority = 5;
1004         path->weight = 1;
1005         path->clear_fc = true;
1006
1007         tb_path_for_each_hop(path, hop) {
1008                 int ret;
1009
1010                 ret = tb_dma_reserve_credits(hop, credits);
1011                 if (ret)
1012                         return ret;
1013         }
1014
1015         return 0;
1016 }
1017
1018 static void tb_dma_release_credits(struct tb_path_hop *hop)
1019 {
1020         struct tb_port *port = hop->in_port;
1021
1022         if (tb_port_use_credit_allocation(port)) {
1023                 port->dma_credits -= hop->initial_credits;
1024
1025                 tb_port_dbg(port, "released %u DMA path credits\n",
1026                             hop->initial_credits);
1027         }
1028 }
1029
1030 static void tb_dma_deinit_path(struct tb_path *path)
1031 {
1032         struct tb_path_hop *hop;
1033
1034         tb_path_for_each_hop(path, hop)
1035                 tb_dma_release_credits(hop);
1036 }
1037
1038 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1039 {
1040         int i;
1041
1042         for (i = 0; i < tunnel->npaths; i++) {
1043                 if (!tunnel->paths[i])
1044                         continue;
1045                 tb_dma_deinit_path(tunnel->paths[i]);
1046         }
1047 }
1048
1049 /**
1050  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1051  * @tb: Pointer to the domain structure
1052  * @nhi: Host controller port
1053  * @dst: Destination null port which the other domain is connected to
1054  * @transmit_path: HopID used for transmitting packets
1055  * @transmit_ring: NHI ring number used to send packets towards the
1056  *                 other domain. Set to %-1 if TX path is not needed.
1057  * @receive_path: HopID used for receiving packets
1058  * @receive_ring: NHI ring number used to receive packets from the
1059  *                other domain. Set to %-1 if RX path is not needed.
1060  *
1061  * Return: Returns a tb_tunnel on success or NULL on failure.
1062  */
1063 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1064                                       struct tb_port *dst, int transmit_path,
1065                                       int transmit_ring, int receive_path,
1066                                       int receive_ring)
1067 {
1068         struct tb_tunnel *tunnel;
1069         size_t npaths = 0, i = 0;
1070         struct tb_path *path;
1071         int credits;
1072
1073         if (receive_ring > 0)
1074                 npaths++;
1075         if (transmit_ring > 0)
1076                 npaths++;
1077
1078         if (WARN_ON(!npaths))
1079                 return NULL;
1080
1081         tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1082         if (!tunnel)
1083                 return NULL;
1084
1085         tunnel->src_port = nhi;
1086         tunnel->dst_port = dst;
1087         tunnel->deinit = tb_dma_deinit;
1088
1089         credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1090
1091         if (receive_ring > 0) {
1092                 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1093                                      "DMA RX");
1094                 if (!path)
1095                         goto err_free;
1096                 tunnel->paths[i++] = path;
1097                 if (tb_dma_init_rx_path(path, credits)) {
1098                         tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1099                         goto err_free;
1100                 }
1101         }
1102
1103         if (transmit_ring > 0) {
1104                 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1105                                      "DMA TX");
1106                 if (!path)
1107                         goto err_free;
1108                 tunnel->paths[i++] = path;
1109                 if (tb_dma_init_tx_path(path, credits)) {
1110                         tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1111                         goto err_free;
1112                 }
1113         }
1114
1115         return tunnel;
1116
1117 err_free:
1118         tb_tunnel_free(tunnel);
1119         return NULL;
1120 }
1121
1122 /**
1123  * tb_tunnel_match_dma() - Match DMA tunnel
1124  * @tunnel: Tunnel to match
1125  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1126  * @transmit_ring: NHI ring number used to send packets towards the
1127  *                 other domain. Pass %-1 to ignore.
1128  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1129  * @receive_ring: NHI ring number used to receive packets from the
1130  *                other domain. Pass %-1 to ignore.
1131  *
1132  * This function can be used to match specific DMA tunnel, if there are
1133  * multiple DMA tunnels going through the same XDomain connection.
1134  * Returns true if there is match and false otherwise.
1135  */
1136 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1137                          int transmit_ring, int receive_path, int receive_ring)
1138 {
1139         const struct tb_path *tx_path = NULL, *rx_path = NULL;
1140         int i;
1141
1142         if (!receive_ring || !transmit_ring)
1143                 return false;
1144
1145         for (i = 0; i < tunnel->npaths; i++) {
1146                 const struct tb_path *path = tunnel->paths[i];
1147
1148                 if (!path)
1149                         continue;
1150
1151                 if (tb_port_is_nhi(path->hops[0].in_port))
1152                         tx_path = path;
1153                 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1154                         rx_path = path;
1155         }
1156
1157         if (transmit_ring > 0 || transmit_path > 0) {
1158                 if (!tx_path)
1159                         return false;
1160                 if (transmit_ring > 0 &&
1161                     (tx_path->hops[0].in_hop_index != transmit_ring))
1162                         return false;
1163                 if (transmit_path > 0 &&
1164                     (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1165                         return false;
1166         }
1167
1168         if (receive_ring > 0 || receive_path > 0) {
1169                 if (!rx_path)
1170                         return false;
1171                 if (receive_path > 0 &&
1172                     (rx_path->hops[0].in_hop_index != receive_path))
1173                         return false;
1174                 if (receive_ring > 0 &&
1175                     (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1176                         return false;
1177         }
1178
1179         return true;
1180 }
1181
1182 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1183 {
1184         int ret, up_max_rate, down_max_rate;
1185
1186         ret = usb4_usb3_port_max_link_rate(up);
1187         if (ret < 0)
1188                 return ret;
1189         up_max_rate = ret;
1190
1191         ret = usb4_usb3_port_max_link_rate(down);
1192         if (ret < 0)
1193                 return ret;
1194         down_max_rate = ret;
1195
1196         return min(up_max_rate, down_max_rate);
1197 }
1198
1199 static int tb_usb3_init(struct tb_tunnel *tunnel)
1200 {
1201         tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1202                       tunnel->allocated_up, tunnel->allocated_down);
1203
1204         return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1205                                                  &tunnel->allocated_up,
1206                                                  &tunnel->allocated_down);
1207 }
1208
1209 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1210 {
1211         int res;
1212
1213         res = tb_usb3_port_enable(tunnel->src_port, activate);
1214         if (res)
1215                 return res;
1216
1217         if (tb_port_is_usb3_up(tunnel->dst_port))
1218                 return tb_usb3_port_enable(tunnel->dst_port, activate);
1219
1220         return 0;
1221 }
1222
1223 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1224                 int *consumed_up, int *consumed_down)
1225 {
1226         int pcie_enabled = tb_acpi_may_tunnel_pcie();
1227
1228         /*
1229          * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1230          * take that it into account here.
1231          */
1232         *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1233         *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1234         return 0;
1235 }
1236
1237 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1238 {
1239         int ret;
1240
1241         ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1242                                                &tunnel->allocated_up,
1243                                                &tunnel->allocated_down);
1244         if (ret)
1245                 return ret;
1246
1247         tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1248                       tunnel->allocated_up, tunnel->allocated_down);
1249         return 0;
1250 }
1251
1252 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1253                                                 int *available_up,
1254                                                 int *available_down)
1255 {
1256         int ret, max_rate, allocate_up, allocate_down;
1257
1258         ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1259         if (ret < 0) {
1260                 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1261                 return;
1262         } else if (!ret) {
1263                 /* Use maximum link rate if the link valid is not set */
1264                 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1265                 if (ret < 0) {
1266                         tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1267                         return;
1268                 }
1269         }
1270
1271         /*
1272          * 90% of the max rate can be allocated for isochronous
1273          * transfers.
1274          */
1275         max_rate = ret * 90 / 100;
1276
1277         /* No need to reclaim if already at maximum */
1278         if (tunnel->allocated_up >= max_rate &&
1279             tunnel->allocated_down >= max_rate)
1280                 return;
1281
1282         /* Don't go lower than what is already allocated */
1283         allocate_up = min(max_rate, *available_up);
1284         if (allocate_up < tunnel->allocated_up)
1285                 allocate_up = tunnel->allocated_up;
1286
1287         allocate_down = min(max_rate, *available_down);
1288         if (allocate_down < tunnel->allocated_down)
1289                 allocate_down = tunnel->allocated_down;
1290
1291         /* If no changes no need to do more */
1292         if (allocate_up == tunnel->allocated_up &&
1293             allocate_down == tunnel->allocated_down)
1294                 return;
1295
1296         ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1297                                                 &allocate_down);
1298         if (ret) {
1299                 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1300                 return;
1301         }
1302
1303         tunnel->allocated_up = allocate_up;
1304         *available_up -= tunnel->allocated_up;
1305
1306         tunnel->allocated_down = allocate_down;
1307         *available_down -= tunnel->allocated_down;
1308
1309         tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1310                       tunnel->allocated_up, tunnel->allocated_down);
1311 }
1312
1313 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1314 {
1315         struct tb_port *port = hop->in_port;
1316         struct tb_switch *sw = port->sw;
1317         unsigned int credits;
1318
1319         if (tb_port_use_credit_allocation(port)) {
1320                 credits = sw->max_usb3_credits;
1321         } else {
1322                 if (tb_port_is_null(port))
1323                         credits = port->bonded ? 32 : 16;
1324                 else
1325                         credits = 7;
1326         }
1327
1328         hop->initial_credits = credits;
1329 }
1330
1331 static void tb_usb3_init_path(struct tb_path *path)
1332 {
1333         struct tb_path_hop *hop;
1334
1335         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1336         path->egress_shared_buffer = TB_PATH_NONE;
1337         path->ingress_fc_enable = TB_PATH_ALL;
1338         path->ingress_shared_buffer = TB_PATH_NONE;
1339         path->priority = 3;
1340         path->weight = 3;
1341         path->drop_packages = 0;
1342
1343         tb_path_for_each_hop(path, hop)
1344                 tb_usb3_init_credits(hop);
1345 }
1346
1347 /**
1348  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1349  * @tb: Pointer to the domain structure
1350  * @down: USB3 downstream adapter
1351  * @alloc_hopid: Allocate HopIDs from visited ports
1352  *
1353  * If @down adapter is active, follows the tunnel to the USB3 upstream
1354  * adapter and back. Returns the discovered tunnel or %NULL if there was
1355  * no tunnel.
1356  */
1357 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1358                                           bool alloc_hopid)
1359 {
1360         struct tb_tunnel *tunnel;
1361         struct tb_path *path;
1362
1363         if (!tb_usb3_port_is_enabled(down))
1364                 return NULL;
1365
1366         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1367         if (!tunnel)
1368                 return NULL;
1369
1370         tunnel->activate = tb_usb3_activate;
1371         tunnel->src_port = down;
1372
1373         /*
1374          * Discover both paths even if they are not complete. We will
1375          * clean them up by calling tb_tunnel_deactivate() below in that
1376          * case.
1377          */
1378         path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1379                                 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1380         if (!path) {
1381                 /* Just disable the downstream port */
1382                 tb_usb3_port_enable(down, false);
1383                 goto err_free;
1384         }
1385         tunnel->paths[TB_USB3_PATH_DOWN] = path;
1386         tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1387
1388         path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1389                                 "USB3 Up", alloc_hopid);
1390         if (!path)
1391                 goto err_deactivate;
1392         tunnel->paths[TB_USB3_PATH_UP] = path;
1393         tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1394
1395         /* Validate that the tunnel is complete */
1396         if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1397                 tb_port_warn(tunnel->dst_port,
1398                              "path does not end on an USB3 adapter, cleaning up\n");
1399                 goto err_deactivate;
1400         }
1401
1402         if (down != tunnel->src_port) {
1403                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1404                 goto err_deactivate;
1405         }
1406
1407         if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1408                 tb_tunnel_warn(tunnel,
1409                                "tunnel is not fully activated, cleaning up\n");
1410                 goto err_deactivate;
1411         }
1412
1413         if (!tb_route(down->sw)) {
1414                 int ret;
1415
1416                 /*
1417                  * Read the initial bandwidth allocation for the first
1418                  * hop tunnel.
1419                  */
1420                 ret = usb4_usb3_port_allocated_bandwidth(down,
1421                         &tunnel->allocated_up, &tunnel->allocated_down);
1422                 if (ret)
1423                         goto err_deactivate;
1424
1425                 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1426                               tunnel->allocated_up, tunnel->allocated_down);
1427
1428                 tunnel->init = tb_usb3_init;
1429                 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1430                 tunnel->release_unused_bandwidth =
1431                         tb_usb3_release_unused_bandwidth;
1432                 tunnel->reclaim_available_bandwidth =
1433                         tb_usb3_reclaim_available_bandwidth;
1434         }
1435
1436         tb_tunnel_dbg(tunnel, "discovered\n");
1437         return tunnel;
1438
1439 err_deactivate:
1440         tb_tunnel_deactivate(tunnel);
1441 err_free:
1442         tb_tunnel_free(tunnel);
1443
1444         return NULL;
1445 }
1446
1447 /**
1448  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1449  * @tb: Pointer to the domain structure
1450  * @up: USB3 upstream adapter port
1451  * @down: USB3 downstream adapter port
1452  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1453  *          if not limited).
1454  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1455  *            (%0 if not limited).
1456  *
1457  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1458  * @TB_TYPE_USB3_DOWN.
1459  *
1460  * Return: Returns a tb_tunnel on success or %NULL on failure.
1461  */
1462 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1463                                        struct tb_port *down, int max_up,
1464                                        int max_down)
1465 {
1466         struct tb_tunnel *tunnel;
1467         struct tb_path *path;
1468         int max_rate = 0;
1469
1470         /*
1471          * Check that we have enough bandwidth available for the new
1472          * USB3 tunnel.
1473          */
1474         if (max_up > 0 || max_down > 0) {
1475                 max_rate = tb_usb3_max_link_rate(down, up);
1476                 if (max_rate < 0)
1477                         return NULL;
1478
1479                 /* Only 90% can be allocated for USB3 isochronous transfers */
1480                 max_rate = max_rate * 90 / 100;
1481                 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1482                             max_rate);
1483
1484                 if (max_rate > max_up || max_rate > max_down) {
1485                         tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1486                         return NULL;
1487                 }
1488         }
1489
1490         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1491         if (!tunnel)
1492                 return NULL;
1493
1494         tunnel->activate = tb_usb3_activate;
1495         tunnel->src_port = down;
1496         tunnel->dst_port = up;
1497         tunnel->max_up = max_up;
1498         tunnel->max_down = max_down;
1499
1500         path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1501                              "USB3 Down");
1502         if (!path) {
1503                 tb_tunnel_free(tunnel);
1504                 return NULL;
1505         }
1506         tb_usb3_init_path(path);
1507         tunnel->paths[TB_USB3_PATH_DOWN] = path;
1508
1509         path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1510                              "USB3 Up");
1511         if (!path) {
1512                 tb_tunnel_free(tunnel);
1513                 return NULL;
1514         }
1515         tb_usb3_init_path(path);
1516         tunnel->paths[TB_USB3_PATH_UP] = path;
1517
1518         if (!tb_route(down->sw)) {
1519                 tunnel->allocated_up = max_rate;
1520                 tunnel->allocated_down = max_rate;
1521
1522                 tunnel->init = tb_usb3_init;
1523                 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1524                 tunnel->release_unused_bandwidth =
1525                         tb_usb3_release_unused_bandwidth;
1526                 tunnel->reclaim_available_bandwidth =
1527                         tb_usb3_reclaim_available_bandwidth;
1528         }
1529
1530         return tunnel;
1531 }
1532
1533 /**
1534  * tb_tunnel_free() - free a tunnel
1535  * @tunnel: Tunnel to be freed
1536  *
1537  * Frees a tunnel. The tunnel does not need to be deactivated.
1538  */
1539 void tb_tunnel_free(struct tb_tunnel *tunnel)
1540 {
1541         int i;
1542
1543         if (!tunnel)
1544                 return;
1545
1546         if (tunnel->deinit)
1547                 tunnel->deinit(tunnel);
1548
1549         for (i = 0; i < tunnel->npaths; i++) {
1550                 if (tunnel->paths[i])
1551                         tb_path_free(tunnel->paths[i]);
1552         }
1553
1554         kfree(tunnel->paths);
1555         kfree(tunnel);
1556 }
1557
1558 /**
1559  * tb_tunnel_is_invalid - check whether an activated path is still valid
1560  * @tunnel: Tunnel to check
1561  */
1562 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1563 {
1564         int i;
1565
1566         for (i = 0; i < tunnel->npaths; i++) {
1567                 WARN_ON(!tunnel->paths[i]->activated);
1568                 if (tb_path_is_invalid(tunnel->paths[i]))
1569                         return true;
1570         }
1571
1572         return false;
1573 }
1574
1575 /**
1576  * tb_tunnel_restart() - activate a tunnel after a hardware reset
1577  * @tunnel: Tunnel to restart
1578  *
1579  * Return: 0 on success and negative errno in case if failure
1580  */
1581 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1582 {
1583         int res, i;
1584
1585         tb_tunnel_dbg(tunnel, "activating\n");
1586
1587         /*
1588          * Make sure all paths are properly disabled before enabling
1589          * them again.
1590          */
1591         for (i = 0; i < tunnel->npaths; i++) {
1592                 if (tunnel->paths[i]->activated) {
1593                         tb_path_deactivate(tunnel->paths[i]);
1594                         tunnel->paths[i]->activated = false;
1595                 }
1596         }
1597
1598         if (tunnel->init) {
1599                 res = tunnel->init(tunnel);
1600                 if (res)
1601                         return res;
1602         }
1603
1604         for (i = 0; i < tunnel->npaths; i++) {
1605                 res = tb_path_activate(tunnel->paths[i]);
1606                 if (res)
1607                         goto err;
1608         }
1609
1610         if (tunnel->activate) {
1611                 res = tunnel->activate(tunnel, true);
1612                 if (res)
1613                         goto err;
1614         }
1615
1616         return 0;
1617
1618 err:
1619         tb_tunnel_warn(tunnel, "activation failed\n");
1620         tb_tunnel_deactivate(tunnel);
1621         return res;
1622 }
1623
1624 /**
1625  * tb_tunnel_activate() - activate a tunnel
1626  * @tunnel: Tunnel to activate
1627  *
1628  * Return: Returns 0 on success or an error code on failure.
1629  */
1630 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1631 {
1632         int i;
1633
1634         for (i = 0; i < tunnel->npaths; i++) {
1635                 if (tunnel->paths[i]->activated) {
1636                         tb_tunnel_WARN(tunnel,
1637                                        "trying to activate an already activated tunnel\n");
1638                         return -EINVAL;
1639                 }
1640         }
1641
1642         return tb_tunnel_restart(tunnel);
1643 }
1644
1645 /**
1646  * tb_tunnel_deactivate() - deactivate a tunnel
1647  * @tunnel: Tunnel to deactivate
1648  */
1649 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1650 {
1651         int i;
1652
1653         tb_tunnel_dbg(tunnel, "deactivating\n");
1654
1655         if (tunnel->activate)
1656                 tunnel->activate(tunnel, false);
1657
1658         for (i = 0; i < tunnel->npaths; i++) {
1659                 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1660                         tb_path_deactivate(tunnel->paths[i]);
1661         }
1662 }
1663
1664 /**
1665  * tb_tunnel_port_on_path() - Does the tunnel go through port
1666  * @tunnel: Tunnel to check
1667  * @port: Port to check
1668  *
1669  * Returns true if @tunnel goes through @port (direction does not matter),
1670  * false otherwise.
1671  */
1672 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1673                             const struct tb_port *port)
1674 {
1675         int i;
1676
1677         for (i = 0; i < tunnel->npaths; i++) {
1678                 if (!tunnel->paths[i])
1679                         continue;
1680
1681                 if (tb_path_port_on_path(tunnel->paths[i], port))
1682                         return true;
1683         }
1684
1685         return false;
1686 }
1687
1688 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1689 {
1690         int i;
1691
1692         for (i = 0; i < tunnel->npaths; i++) {
1693                 if (!tunnel->paths[i])
1694                         return false;
1695                 if (!tunnel->paths[i]->activated)
1696                         return false;
1697         }
1698
1699         return true;
1700 }
1701
1702 /**
1703  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1704  * @tunnel: Tunnel to check
1705  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1706  *               Can be %NULL.
1707  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1708  *                 Can be %NULL.
1709  *
1710  * Stores the amount of isochronous bandwidth @tunnel consumes in
1711  * @consumed_up and @consumed_down. In case of success returns %0,
1712  * negative errno otherwise.
1713  */
1714 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1715                                  int *consumed_down)
1716 {
1717         int up_bw = 0, down_bw = 0;
1718
1719         if (!tb_tunnel_is_active(tunnel))
1720                 goto out;
1721
1722         if (tunnel->consumed_bandwidth) {
1723                 int ret;
1724
1725                 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1726                 if (ret)
1727                         return ret;
1728
1729                 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1730                               down_bw);
1731         }
1732
1733 out:
1734         if (consumed_up)
1735                 *consumed_up = up_bw;
1736         if (consumed_down)
1737                 *consumed_down = down_bw;
1738
1739         return 0;
1740 }
1741
1742 /**
1743  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1744  * @tunnel: Tunnel whose unused bandwidth to release
1745  *
1746  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1747  * moment) this function makes it to release all the unused bandwidth.
1748  *
1749  * Returns %0 in case of success and negative errno otherwise.
1750  */
1751 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1752 {
1753         if (!tb_tunnel_is_active(tunnel))
1754                 return 0;
1755
1756         if (tunnel->release_unused_bandwidth) {
1757                 int ret;
1758
1759                 ret = tunnel->release_unused_bandwidth(tunnel);
1760                 if (ret)
1761                         return ret;
1762         }
1763
1764         return 0;
1765 }
1766
1767 /**
1768  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1769  * @tunnel: Tunnel reclaiming available bandwidth
1770  * @available_up: Available upstream bandwidth (in Mb/s)
1771  * @available_down: Available downstream bandwidth (in Mb/s)
1772  *
1773  * Reclaims bandwidth from @available_up and @available_down and updates
1774  * the variables accordingly (e.g decreases both according to what was
1775  * reclaimed by the tunnel). If nothing was reclaimed the values are
1776  * kept as is.
1777  */
1778 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1779                                            int *available_up,
1780                                            int *available_down)
1781 {
1782         if (!tb_tunnel_is_active(tunnel))
1783                 return;
1784
1785         if (tunnel->reclaim_available_bandwidth)
1786                 tunnel->reclaim_available_bandwidth(tunnel, available_up,
1787                                                     available_down);
1788 }