1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID 8
19 #define TB_PCI_PATH_DOWN 0
20 #define TB_PCI_PATH_UP 1
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID 8
25 #define TB_USB3_PATH_DOWN 0
26 #define TB_USB3_PATH_UP 1
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID 8
30 #define TB_DP_AUX_RX_HOPID 8
31 #define TB_DP_VIDEO_HOPID 9
33 #define TB_DP_VIDEO_PATH_OUT 0
34 #define TB_DP_AUX_PATH_OUT 1
35 #define TB_DP_AUX_PATH_IN 2
37 /* Minimum number of credits needed for PCIe path */
38 #define TB_MIN_PCIE_CREDITS 6U
40 * Number of credits we try to allocate for each DMA path if not limited
41 * by the host router baMaxHI.
43 #define TB_DMA_CREDITS 14U
44 /* Minimum number of credits for DMA path */
45 #define TB_MIN_DMA_CREDITS 1U
47 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
49 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
51 struct tb_tunnel *__tunnel = (tunnel); \
52 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
53 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
57 tb_tunnel_names[__tunnel->type], \
61 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
62 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
63 #define tb_tunnel_warn(tunnel, fmt, arg...) \
64 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
65 #define tb_tunnel_info(tunnel, fmt, arg...) \
66 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
67 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
68 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
70 static inline unsigned int tb_usable_credits(const struct tb_port *port)
72 return port->total_credits - port->ctl_credits;
76 * tb_available_credits() - Available credits for PCIe and DMA
77 * @port: Lane adapter to check
78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
79 * streams possible through this lane adapter
81 static unsigned int tb_available_credits(const struct tb_port *port,
82 size_t *max_dp_streams)
84 const struct tb_switch *sw = port->sw;
85 int credits, usb3, pcie, spare;
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
91 if (tb_acpi_is_xdomain_allowed()) {
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
93 /* Add some credits for potential second DMA tunnel */
94 spare += TB_MIN_DMA_CREDITS;
99 credits = tb_usable_credits(port);
100 if (tb_acpi_may_tunnel_dp()) {
102 * Maximum number of DP streams possible through the
105 ndp = (credits - (usb3 + pcie + spare)) /
106 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
110 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
114 *max_dp_streams = ndp;
116 return credits > 0 ? credits : 0;
119 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
120 enum tb_tunnel_type type)
122 struct tb_tunnel *tunnel;
124 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
128 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
129 if (!tunnel->paths) {
130 tb_tunnel_free(tunnel);
134 INIT_LIST_HEAD(&tunnel->list);
136 tunnel->npaths = npaths;
142 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
146 res = tb_pci_port_enable(tunnel->src_port, activate);
150 if (tb_port_is_pcie_up(tunnel->dst_port))
151 return tb_pci_port_enable(tunnel->dst_port, activate);
156 static int tb_pci_init_credits(struct tb_path_hop *hop)
158 struct tb_port *port = hop->in_port;
159 struct tb_switch *sw = port->sw;
160 unsigned int credits;
162 if (tb_port_use_credit_allocation(port)) {
163 unsigned int available;
165 available = tb_available_credits(port, NULL);
166 credits = min(sw->max_pcie_credits, available);
168 if (credits < TB_MIN_PCIE_CREDITS)
171 credits = max(TB_MIN_PCIE_CREDITS, credits);
173 if (tb_port_is_null(port))
174 credits = port->bonded ? 32 : 16;
179 hop->initial_credits = credits;
183 static int tb_pci_init_path(struct tb_path *path)
185 struct tb_path_hop *hop;
187 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
188 path->egress_shared_buffer = TB_PATH_NONE;
189 path->ingress_fc_enable = TB_PATH_ALL;
190 path->ingress_shared_buffer = TB_PATH_NONE;
193 path->drop_packages = 0;
195 tb_path_for_each_hop(path, hop) {
198 ret = tb_pci_init_credits(hop);
207 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
208 * @tb: Pointer to the domain structure
209 * @down: PCIe downstream adapter
210 * @alloc_hopid: Allocate HopIDs from visited ports
212 * If @down adapter is active, follows the tunnel to the PCIe upstream
213 * adapter and back. Returns the discovered tunnel or %NULL if there was
216 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
219 struct tb_tunnel *tunnel;
220 struct tb_path *path;
222 if (!tb_pci_port_is_enabled(down))
225 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
229 tunnel->activate = tb_pci_activate;
230 tunnel->src_port = down;
233 * Discover both paths even if they are not complete. We will
234 * clean them up by calling tb_tunnel_deactivate() below in that
237 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
238 &tunnel->dst_port, "PCIe Up", alloc_hopid);
240 /* Just disable the downstream port */
241 tb_pci_port_enable(down, false);
244 tunnel->paths[TB_PCI_PATH_UP] = path;
245 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
248 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
249 "PCIe Down", alloc_hopid);
252 tunnel->paths[TB_PCI_PATH_DOWN] = path;
253 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
256 /* Validate that the tunnel is complete */
257 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
258 tb_port_warn(tunnel->dst_port,
259 "path does not end on a PCIe adapter, cleaning up\n");
263 if (down != tunnel->src_port) {
264 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
268 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
269 tb_tunnel_warn(tunnel,
270 "tunnel is not fully activated, cleaning up\n");
274 tb_tunnel_dbg(tunnel, "discovered\n");
278 tb_tunnel_deactivate(tunnel);
280 tb_tunnel_free(tunnel);
286 * tb_tunnel_alloc_pci() - allocate a pci tunnel
287 * @tb: Pointer to the domain structure
288 * @up: PCIe upstream adapter port
289 * @down: PCIe downstream adapter port
291 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
294 * Return: Returns a tb_tunnel on success or NULL on failure.
296 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
297 struct tb_port *down)
299 struct tb_tunnel *tunnel;
300 struct tb_path *path;
302 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
306 tunnel->activate = tb_pci_activate;
307 tunnel->src_port = down;
308 tunnel->dst_port = up;
310 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
314 tunnel->paths[TB_PCI_PATH_DOWN] = path;
315 if (tb_pci_init_path(path))
318 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
322 tunnel->paths[TB_PCI_PATH_UP] = path;
323 if (tb_pci_init_path(path))
329 tb_tunnel_free(tunnel);
333 static bool tb_dp_is_usb4(const struct tb_switch *sw)
335 /* Titan Ridge DP adapters need the same treatment as USB4 */
336 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
339 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
345 /* Both ends need to support this */
346 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
349 ret = tb_port_read(out, &val, TB_CFG_PORT,
350 out->cap_adap + DP_STATUS_CTRL, 1);
354 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
356 ret = tb_port_write(out, &val, TB_CFG_PORT,
357 out->cap_adap + DP_STATUS_CTRL, 1);
362 ret = tb_port_read(out, &val, TB_CFG_PORT,
363 out->cap_adap + DP_STATUS_CTRL, 1);
366 if (!(val & DP_STATUS_CTRL_CMHS))
368 usleep_range(10, 100);
374 static inline u32 tb_dp_cap_get_rate(u32 val)
376 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
379 case DP_COMMON_CAP_RATE_RBR:
381 case DP_COMMON_CAP_RATE_HBR:
383 case DP_COMMON_CAP_RATE_HBR2:
385 case DP_COMMON_CAP_RATE_HBR3:
392 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
394 val &= ~DP_COMMON_CAP_RATE_MASK;
397 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
400 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
403 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
406 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
409 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
415 static inline u32 tb_dp_cap_get_lanes(u32 val)
417 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
420 case DP_COMMON_CAP_1_LANE:
422 case DP_COMMON_CAP_2_LANES:
424 case DP_COMMON_CAP_4_LANES:
431 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
433 val &= ~DP_COMMON_CAP_LANES_MASK;
436 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
440 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
443 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
446 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
452 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
454 /* Tunneling removes the DP 8b/10b encoding */
455 return rate * lanes * 8 / 10;
458 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
459 u32 out_rate, u32 out_lanes, u32 *new_rate,
462 static const u32 dp_bw[][2] = {
464 { 8100, 4 }, /* 25920 Mb/s */
465 { 5400, 4 }, /* 17280 Mb/s */
466 { 8100, 2 }, /* 12960 Mb/s */
467 { 2700, 4 }, /* 8640 Mb/s */
468 { 5400, 2 }, /* 8640 Mb/s */
469 { 8100, 1 }, /* 6480 Mb/s */
470 { 1620, 4 }, /* 5184 Mb/s */
471 { 5400, 1 }, /* 4320 Mb/s */
472 { 2700, 2 }, /* 4320 Mb/s */
473 { 1620, 2 }, /* 2592 Mb/s */
474 { 2700, 1 }, /* 2160 Mb/s */
475 { 1620, 1 }, /* 1296 Mb/s */
480 * Find a combination that can fit into max_bw and does not
481 * exceed the maximum rate and lanes supported by the DP OUT and
484 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
485 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
488 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
491 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
492 *new_rate = dp_bw[i][0];
493 *new_lanes = dp_bw[i][1];
501 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
503 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
504 struct tb_port *out = tunnel->dst_port;
505 struct tb_port *in = tunnel->src_port;
509 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
510 * newer generation hardware.
512 if (in->sw->generation < 2 || out->sw->generation < 2)
516 * Perform connection manager handshake between IN and OUT ports
517 * before capabilities exchange can take place.
519 ret = tb_dp_cm_handshake(in, out);
523 /* Read both DP_LOCAL_CAP registers */
524 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
525 in->cap_adap + DP_LOCAL_CAP, 1);
529 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
530 out->cap_adap + DP_LOCAL_CAP, 1);
534 /* Write IN local caps to OUT remote caps */
535 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
536 out->cap_adap + DP_REMOTE_CAP, 1);
540 in_rate = tb_dp_cap_get_rate(in_dp_cap);
541 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
542 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
543 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
546 * If the tunnel bandwidth is limited (max_bw is set) then see
547 * if we need to reduce bandwidth to fit there.
549 out_rate = tb_dp_cap_get_rate(out_dp_cap);
550 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
551 bw = tb_dp_bandwidth(out_rate, out_lanes);
552 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
553 out_rate, out_lanes, bw);
555 if (in->sw->config.depth < out->sw->config.depth)
556 max_bw = tunnel->max_down;
558 max_bw = tunnel->max_up;
560 if (max_bw && bw > max_bw) {
561 u32 new_rate, new_lanes, new_bw;
563 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
564 out_rate, out_lanes, &new_rate,
567 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
571 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
572 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
573 new_rate, new_lanes, new_bw);
576 * Set new rate and number of lanes before writing it to
577 * the IN port remote caps.
579 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
580 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
583 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
584 in->cap_adap + DP_REMOTE_CAP, 1);
587 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
592 struct tb_path **paths;
595 paths = tunnel->paths;
596 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
598 tb_dp_port_set_hops(tunnel->src_port,
599 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
600 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
601 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
603 tb_dp_port_set_hops(tunnel->dst_port,
604 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
605 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
606 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
608 tb_dp_port_hpd_clear(tunnel->src_port);
609 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
610 if (tb_port_is_dpout(tunnel->dst_port))
611 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
614 ret = tb_dp_port_enable(tunnel->src_port, active);
618 if (tb_port_is_dpout(tunnel->dst_port))
619 return tb_dp_port_enable(tunnel->dst_port, active);
624 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
627 struct tb_port *in = tunnel->src_port;
628 const struct tb_switch *sw = in->sw;
629 u32 val, rate = 0, lanes = 0;
632 if (tb_dp_is_usb4(sw)) {
636 * Wait for DPRX done. Normally it should be already set
640 ret = tb_port_read(in, &val, TB_CFG_PORT,
641 in->cap_adap + DP_COMMON_CAP, 1);
645 if (val & DP_COMMON_CAP_DPRX_DONE) {
646 rate = tb_dp_cap_get_rate(val);
647 lanes = tb_dp_cap_get_lanes(val);
655 } else if (sw->generation >= 2) {
657 * Read from the copied remote cap so that we take into
658 * account if capabilities were reduced during exchange.
660 ret = tb_port_read(in, &val, TB_CFG_PORT,
661 in->cap_adap + DP_REMOTE_CAP, 1);
665 rate = tb_dp_cap_get_rate(val);
666 lanes = tb_dp_cap_get_lanes(val);
668 /* No bandwidth management for legacy devices */
674 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
676 *consumed_down = tb_dp_bandwidth(rate, lanes);
678 *consumed_up = tb_dp_bandwidth(rate, lanes);
685 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
687 struct tb_port *port = hop->in_port;
688 struct tb_switch *sw = port->sw;
690 if (tb_port_use_credit_allocation(port))
691 hop->initial_credits = sw->min_dp_aux_credits;
693 hop->initial_credits = 1;
696 static void tb_dp_init_aux_path(struct tb_path *path)
698 struct tb_path_hop *hop;
700 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
701 path->egress_shared_buffer = TB_PATH_NONE;
702 path->ingress_fc_enable = TB_PATH_ALL;
703 path->ingress_shared_buffer = TB_PATH_NONE;
707 tb_path_for_each_hop(path, hop)
708 tb_dp_init_aux_credits(hop);
711 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
713 struct tb_port *port = hop->in_port;
714 struct tb_switch *sw = port->sw;
716 if (tb_port_use_credit_allocation(port)) {
717 unsigned int nfc_credits;
718 size_t max_dp_streams;
720 tb_available_credits(port, &max_dp_streams);
722 * Read the number of currently allocated NFC credits
723 * from the lane adapter. Since we only use them for DP
724 * tunneling we can use that to figure out how many DP
725 * tunnels already go through the lane adapter.
727 nfc_credits = port->config.nfc_credits &
728 ADP_CS_4_NFC_BUFFERS_MASK;
729 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
732 hop->nfc_credits = sw->min_dp_main_credits;
734 hop->nfc_credits = min(port->total_credits - 2, 12U);
740 static int tb_dp_init_video_path(struct tb_path *path)
742 struct tb_path_hop *hop;
744 path->egress_fc_enable = TB_PATH_NONE;
745 path->egress_shared_buffer = TB_PATH_NONE;
746 path->ingress_fc_enable = TB_PATH_NONE;
747 path->ingress_shared_buffer = TB_PATH_NONE;
751 tb_path_for_each_hop(path, hop) {
754 ret = tb_dp_init_video_credits(hop);
763 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
764 * @tb: Pointer to the domain structure
766 * @alloc_hopid: Allocate HopIDs from visited ports
768 * If @in adapter is active, follows the tunnel to the DP out adapter
769 * and back. Returns the discovered tunnel or %NULL if there was no
772 * Return: DP tunnel or %NULL if no tunnel found.
774 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
777 struct tb_tunnel *tunnel;
778 struct tb_port *port;
779 struct tb_path *path;
781 if (!tb_dp_port_is_enabled(in))
784 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
788 tunnel->init = tb_dp_xchg_caps;
789 tunnel->activate = tb_dp_activate;
790 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
791 tunnel->src_port = in;
793 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
794 &tunnel->dst_port, "Video", alloc_hopid);
796 /* Just disable the DP IN port */
797 tb_dp_port_enable(in, false);
800 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
801 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
804 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
808 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
809 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
811 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
812 &port, "AUX RX", alloc_hopid);
815 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
816 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
818 /* Validate that the tunnel is complete */
819 if (!tb_port_is_dpout(tunnel->dst_port)) {
820 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
824 if (!tb_dp_port_is_enabled(tunnel->dst_port))
827 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
830 if (port != tunnel->src_port) {
831 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
835 tb_tunnel_dbg(tunnel, "discovered\n");
839 tb_tunnel_deactivate(tunnel);
841 tb_tunnel_free(tunnel);
847 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
848 * @tb: Pointer to the domain structure
849 * @in: DP in adapter port
850 * @out: DP out adapter port
851 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
853 * @max_down: Maximum available downstream bandwidth for the DP tunnel
854 * (%0 if not limited)
856 * Allocates a tunnel between @in and @out that is capable of tunneling
857 * Display Port traffic.
859 * Return: Returns a tb_tunnel on success or NULL on failure.
861 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
862 struct tb_port *out, int max_up,
865 struct tb_tunnel *tunnel;
866 struct tb_path **paths;
867 struct tb_path *path;
869 if (WARN_ON(!in->cap_adap || !out->cap_adap))
872 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
876 tunnel->init = tb_dp_xchg_caps;
877 tunnel->activate = tb_dp_activate;
878 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
879 tunnel->src_port = in;
880 tunnel->dst_port = out;
881 tunnel->max_up = max_up;
882 tunnel->max_down = max_down;
884 paths = tunnel->paths;
886 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
890 tb_dp_init_video_path(path);
891 paths[TB_DP_VIDEO_PATH_OUT] = path;
893 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
894 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
897 tb_dp_init_aux_path(path);
898 paths[TB_DP_AUX_PATH_OUT] = path;
900 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
901 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
904 tb_dp_init_aux_path(path);
905 paths[TB_DP_AUX_PATH_IN] = path;
910 tb_tunnel_free(tunnel);
914 static unsigned int tb_dma_available_credits(const struct tb_port *port)
916 const struct tb_switch *sw = port->sw;
919 credits = tb_available_credits(port, NULL);
920 if (tb_acpi_may_tunnel_pcie())
921 credits -= sw->max_pcie_credits;
922 credits -= port->dma_credits;
924 return credits > 0 ? credits : 0;
927 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
929 struct tb_port *port = hop->in_port;
931 if (tb_port_use_credit_allocation(port)) {
932 unsigned int available = tb_dma_available_credits(port);
935 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
936 * DMA path cannot be established.
938 if (available < TB_MIN_DMA_CREDITS)
941 while (credits > available)
944 tb_port_dbg(port, "reserving %u credits for DMA path\n",
947 port->dma_credits += credits;
949 if (tb_port_is_null(port))
950 credits = port->bonded ? 14 : 6;
952 credits = min(port->total_credits, credits);
955 hop->initial_credits = credits;
959 /* Path from lane adapter to NHI */
960 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
962 struct tb_path_hop *hop;
965 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
966 path->ingress_fc_enable = TB_PATH_ALL;
967 path->egress_shared_buffer = TB_PATH_NONE;
968 path->ingress_shared_buffer = TB_PATH_NONE;
971 path->clear_fc = true;
974 * First lane adapter is the one connected to the remote host.
975 * We don't tunnel other traffic over this link so can use all
976 * the credits (except the ones reserved for control traffic).
978 hop = &path->hops[0];
979 tmp = min(tb_usable_credits(hop->in_port), credits);
980 hop->initial_credits = tmp;
981 hop->in_port->dma_credits += tmp;
983 for (i = 1; i < path->path_length; i++) {
986 ret = tb_dma_reserve_credits(&path->hops[i], credits);
994 /* Path from NHI to lane adapter */
995 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
997 struct tb_path_hop *hop;
999 path->egress_fc_enable = TB_PATH_ALL;
1000 path->ingress_fc_enable = TB_PATH_ALL;
1001 path->egress_shared_buffer = TB_PATH_NONE;
1002 path->ingress_shared_buffer = TB_PATH_NONE;
1005 path->clear_fc = true;
1007 tb_path_for_each_hop(path, hop) {
1010 ret = tb_dma_reserve_credits(hop, credits);
1018 static void tb_dma_release_credits(struct tb_path_hop *hop)
1020 struct tb_port *port = hop->in_port;
1022 if (tb_port_use_credit_allocation(port)) {
1023 port->dma_credits -= hop->initial_credits;
1025 tb_port_dbg(port, "released %u DMA path credits\n",
1026 hop->initial_credits);
1030 static void tb_dma_deinit_path(struct tb_path *path)
1032 struct tb_path_hop *hop;
1034 tb_path_for_each_hop(path, hop)
1035 tb_dma_release_credits(hop);
1038 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1042 for (i = 0; i < tunnel->npaths; i++) {
1043 if (!tunnel->paths[i])
1045 tb_dma_deinit_path(tunnel->paths[i]);
1050 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1051 * @tb: Pointer to the domain structure
1052 * @nhi: Host controller port
1053 * @dst: Destination null port which the other domain is connected to
1054 * @transmit_path: HopID used for transmitting packets
1055 * @transmit_ring: NHI ring number used to send packets towards the
1056 * other domain. Set to %-1 if TX path is not needed.
1057 * @receive_path: HopID used for receiving packets
1058 * @receive_ring: NHI ring number used to receive packets from the
1059 * other domain. Set to %-1 if RX path is not needed.
1061 * Return: Returns a tb_tunnel on success or NULL on failure.
1063 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1064 struct tb_port *dst, int transmit_path,
1065 int transmit_ring, int receive_path,
1068 struct tb_tunnel *tunnel;
1069 size_t npaths = 0, i = 0;
1070 struct tb_path *path;
1073 if (receive_ring > 0)
1075 if (transmit_ring > 0)
1078 if (WARN_ON(!npaths))
1081 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1085 tunnel->src_port = nhi;
1086 tunnel->dst_port = dst;
1087 tunnel->deinit = tb_dma_deinit;
1089 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1091 if (receive_ring > 0) {
1092 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1096 tunnel->paths[i++] = path;
1097 if (tb_dma_init_rx_path(path, credits)) {
1098 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1103 if (transmit_ring > 0) {
1104 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1108 tunnel->paths[i++] = path;
1109 if (tb_dma_init_tx_path(path, credits)) {
1110 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1118 tb_tunnel_free(tunnel);
1123 * tb_tunnel_match_dma() - Match DMA tunnel
1124 * @tunnel: Tunnel to match
1125 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1126 * @transmit_ring: NHI ring number used to send packets towards the
1127 * other domain. Pass %-1 to ignore.
1128 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1129 * @receive_ring: NHI ring number used to receive packets from the
1130 * other domain. Pass %-1 to ignore.
1132 * This function can be used to match specific DMA tunnel, if there are
1133 * multiple DMA tunnels going through the same XDomain connection.
1134 * Returns true if there is match and false otherwise.
1136 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1137 int transmit_ring, int receive_path, int receive_ring)
1139 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1142 if (!receive_ring || !transmit_ring)
1145 for (i = 0; i < tunnel->npaths; i++) {
1146 const struct tb_path *path = tunnel->paths[i];
1151 if (tb_port_is_nhi(path->hops[0].in_port))
1153 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1157 if (transmit_ring > 0 || transmit_path > 0) {
1160 if (transmit_ring > 0 &&
1161 (tx_path->hops[0].in_hop_index != transmit_ring))
1163 if (transmit_path > 0 &&
1164 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1168 if (receive_ring > 0 || receive_path > 0) {
1171 if (receive_path > 0 &&
1172 (rx_path->hops[0].in_hop_index != receive_path))
1174 if (receive_ring > 0 &&
1175 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1182 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1184 int ret, up_max_rate, down_max_rate;
1186 ret = usb4_usb3_port_max_link_rate(up);
1191 ret = usb4_usb3_port_max_link_rate(down);
1194 down_max_rate = ret;
1196 return min(up_max_rate, down_max_rate);
1199 static int tb_usb3_init(struct tb_tunnel *tunnel)
1201 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1202 tunnel->allocated_up, tunnel->allocated_down);
1204 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1205 &tunnel->allocated_up,
1206 &tunnel->allocated_down);
1209 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1213 res = tb_usb3_port_enable(tunnel->src_port, activate);
1217 if (tb_port_is_usb3_up(tunnel->dst_port))
1218 return tb_usb3_port_enable(tunnel->dst_port, activate);
1223 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1224 int *consumed_up, int *consumed_down)
1226 int pcie_enabled = tb_acpi_may_tunnel_pcie();
1229 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1230 * take that it into account here.
1232 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1233 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1237 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1241 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1242 &tunnel->allocated_up,
1243 &tunnel->allocated_down);
1247 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1248 tunnel->allocated_up, tunnel->allocated_down);
1252 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1254 int *available_down)
1256 int ret, max_rate, allocate_up, allocate_down;
1258 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1260 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1263 /* Use maximum link rate if the link valid is not set */
1264 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1266 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1272 * 90% of the max rate can be allocated for isochronous
1275 max_rate = ret * 90 / 100;
1277 /* No need to reclaim if already at maximum */
1278 if (tunnel->allocated_up >= max_rate &&
1279 tunnel->allocated_down >= max_rate)
1282 /* Don't go lower than what is already allocated */
1283 allocate_up = min(max_rate, *available_up);
1284 if (allocate_up < tunnel->allocated_up)
1285 allocate_up = tunnel->allocated_up;
1287 allocate_down = min(max_rate, *available_down);
1288 if (allocate_down < tunnel->allocated_down)
1289 allocate_down = tunnel->allocated_down;
1291 /* If no changes no need to do more */
1292 if (allocate_up == tunnel->allocated_up &&
1293 allocate_down == tunnel->allocated_down)
1296 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1299 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1303 tunnel->allocated_up = allocate_up;
1304 *available_up -= tunnel->allocated_up;
1306 tunnel->allocated_down = allocate_down;
1307 *available_down -= tunnel->allocated_down;
1309 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1310 tunnel->allocated_up, tunnel->allocated_down);
1313 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1315 struct tb_port *port = hop->in_port;
1316 struct tb_switch *sw = port->sw;
1317 unsigned int credits;
1319 if (tb_port_use_credit_allocation(port)) {
1320 credits = sw->max_usb3_credits;
1322 if (tb_port_is_null(port))
1323 credits = port->bonded ? 32 : 16;
1328 hop->initial_credits = credits;
1331 static void tb_usb3_init_path(struct tb_path *path)
1333 struct tb_path_hop *hop;
1335 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1336 path->egress_shared_buffer = TB_PATH_NONE;
1337 path->ingress_fc_enable = TB_PATH_ALL;
1338 path->ingress_shared_buffer = TB_PATH_NONE;
1341 path->drop_packages = 0;
1343 tb_path_for_each_hop(path, hop)
1344 tb_usb3_init_credits(hop);
1348 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1349 * @tb: Pointer to the domain structure
1350 * @down: USB3 downstream adapter
1351 * @alloc_hopid: Allocate HopIDs from visited ports
1353 * If @down adapter is active, follows the tunnel to the USB3 upstream
1354 * adapter and back. Returns the discovered tunnel or %NULL if there was
1357 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1360 struct tb_tunnel *tunnel;
1361 struct tb_path *path;
1363 if (!tb_usb3_port_is_enabled(down))
1366 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1370 tunnel->activate = tb_usb3_activate;
1371 tunnel->src_port = down;
1374 * Discover both paths even if they are not complete. We will
1375 * clean them up by calling tb_tunnel_deactivate() below in that
1378 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1379 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1381 /* Just disable the downstream port */
1382 tb_usb3_port_enable(down, false);
1385 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1386 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1388 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1389 "USB3 Up", alloc_hopid);
1391 goto err_deactivate;
1392 tunnel->paths[TB_USB3_PATH_UP] = path;
1393 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1395 /* Validate that the tunnel is complete */
1396 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1397 tb_port_warn(tunnel->dst_port,
1398 "path does not end on an USB3 adapter, cleaning up\n");
1399 goto err_deactivate;
1402 if (down != tunnel->src_port) {
1403 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1404 goto err_deactivate;
1407 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1408 tb_tunnel_warn(tunnel,
1409 "tunnel is not fully activated, cleaning up\n");
1410 goto err_deactivate;
1413 if (!tb_route(down->sw)) {
1417 * Read the initial bandwidth allocation for the first
1420 ret = usb4_usb3_port_allocated_bandwidth(down,
1421 &tunnel->allocated_up, &tunnel->allocated_down);
1423 goto err_deactivate;
1425 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1426 tunnel->allocated_up, tunnel->allocated_down);
1428 tunnel->init = tb_usb3_init;
1429 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1430 tunnel->release_unused_bandwidth =
1431 tb_usb3_release_unused_bandwidth;
1432 tunnel->reclaim_available_bandwidth =
1433 tb_usb3_reclaim_available_bandwidth;
1436 tb_tunnel_dbg(tunnel, "discovered\n");
1440 tb_tunnel_deactivate(tunnel);
1442 tb_tunnel_free(tunnel);
1448 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1449 * @tb: Pointer to the domain structure
1450 * @up: USB3 upstream adapter port
1451 * @down: USB3 downstream adapter port
1452 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1454 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1455 * (%0 if not limited).
1457 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1458 * @TB_TYPE_USB3_DOWN.
1460 * Return: Returns a tb_tunnel on success or %NULL on failure.
1462 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1463 struct tb_port *down, int max_up,
1466 struct tb_tunnel *tunnel;
1467 struct tb_path *path;
1471 * Check that we have enough bandwidth available for the new
1474 if (max_up > 0 || max_down > 0) {
1475 max_rate = tb_usb3_max_link_rate(down, up);
1479 /* Only 90% can be allocated for USB3 isochronous transfers */
1480 max_rate = max_rate * 90 / 100;
1481 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1484 if (max_rate > max_up || max_rate > max_down) {
1485 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1490 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1494 tunnel->activate = tb_usb3_activate;
1495 tunnel->src_port = down;
1496 tunnel->dst_port = up;
1497 tunnel->max_up = max_up;
1498 tunnel->max_down = max_down;
1500 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1503 tb_tunnel_free(tunnel);
1506 tb_usb3_init_path(path);
1507 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1509 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1512 tb_tunnel_free(tunnel);
1515 tb_usb3_init_path(path);
1516 tunnel->paths[TB_USB3_PATH_UP] = path;
1518 if (!tb_route(down->sw)) {
1519 tunnel->allocated_up = max_rate;
1520 tunnel->allocated_down = max_rate;
1522 tunnel->init = tb_usb3_init;
1523 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1524 tunnel->release_unused_bandwidth =
1525 tb_usb3_release_unused_bandwidth;
1526 tunnel->reclaim_available_bandwidth =
1527 tb_usb3_reclaim_available_bandwidth;
1534 * tb_tunnel_free() - free a tunnel
1535 * @tunnel: Tunnel to be freed
1537 * Frees a tunnel. The tunnel does not need to be deactivated.
1539 void tb_tunnel_free(struct tb_tunnel *tunnel)
1547 tunnel->deinit(tunnel);
1549 for (i = 0; i < tunnel->npaths; i++) {
1550 if (tunnel->paths[i])
1551 tb_path_free(tunnel->paths[i]);
1554 kfree(tunnel->paths);
1559 * tb_tunnel_is_invalid - check whether an activated path is still valid
1560 * @tunnel: Tunnel to check
1562 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1566 for (i = 0; i < tunnel->npaths; i++) {
1567 WARN_ON(!tunnel->paths[i]->activated);
1568 if (tb_path_is_invalid(tunnel->paths[i]))
1576 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1577 * @tunnel: Tunnel to restart
1579 * Return: 0 on success and negative errno in case if failure
1581 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1585 tb_tunnel_dbg(tunnel, "activating\n");
1588 * Make sure all paths are properly disabled before enabling
1591 for (i = 0; i < tunnel->npaths; i++) {
1592 if (tunnel->paths[i]->activated) {
1593 tb_path_deactivate(tunnel->paths[i]);
1594 tunnel->paths[i]->activated = false;
1599 res = tunnel->init(tunnel);
1604 for (i = 0; i < tunnel->npaths; i++) {
1605 res = tb_path_activate(tunnel->paths[i]);
1610 if (tunnel->activate) {
1611 res = tunnel->activate(tunnel, true);
1619 tb_tunnel_warn(tunnel, "activation failed\n");
1620 tb_tunnel_deactivate(tunnel);
1625 * tb_tunnel_activate() - activate a tunnel
1626 * @tunnel: Tunnel to activate
1628 * Return: Returns 0 on success or an error code on failure.
1630 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1634 for (i = 0; i < tunnel->npaths; i++) {
1635 if (tunnel->paths[i]->activated) {
1636 tb_tunnel_WARN(tunnel,
1637 "trying to activate an already activated tunnel\n");
1642 return tb_tunnel_restart(tunnel);
1646 * tb_tunnel_deactivate() - deactivate a tunnel
1647 * @tunnel: Tunnel to deactivate
1649 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1653 tb_tunnel_dbg(tunnel, "deactivating\n");
1655 if (tunnel->activate)
1656 tunnel->activate(tunnel, false);
1658 for (i = 0; i < tunnel->npaths; i++) {
1659 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1660 tb_path_deactivate(tunnel->paths[i]);
1665 * tb_tunnel_port_on_path() - Does the tunnel go through port
1666 * @tunnel: Tunnel to check
1667 * @port: Port to check
1669 * Returns true if @tunnel goes through @port (direction does not matter),
1672 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1673 const struct tb_port *port)
1677 for (i = 0; i < tunnel->npaths; i++) {
1678 if (!tunnel->paths[i])
1681 if (tb_path_port_on_path(tunnel->paths[i], port))
1688 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1692 for (i = 0; i < tunnel->npaths; i++) {
1693 if (!tunnel->paths[i])
1695 if (!tunnel->paths[i]->activated)
1703 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1704 * @tunnel: Tunnel to check
1705 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1707 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1710 * Stores the amount of isochronous bandwidth @tunnel consumes in
1711 * @consumed_up and @consumed_down. In case of success returns %0,
1712 * negative errno otherwise.
1714 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1717 int up_bw = 0, down_bw = 0;
1719 if (!tb_tunnel_is_active(tunnel))
1722 if (tunnel->consumed_bandwidth) {
1725 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1729 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1735 *consumed_up = up_bw;
1737 *consumed_down = down_bw;
1743 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1744 * @tunnel: Tunnel whose unused bandwidth to release
1746 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1747 * moment) this function makes it to release all the unused bandwidth.
1749 * Returns %0 in case of success and negative errno otherwise.
1751 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1753 if (!tb_tunnel_is_active(tunnel))
1756 if (tunnel->release_unused_bandwidth) {
1759 ret = tunnel->release_unused_bandwidth(tunnel);
1768 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1769 * @tunnel: Tunnel reclaiming available bandwidth
1770 * @available_up: Available upstream bandwidth (in Mb/s)
1771 * @available_down: Available downstream bandwidth (in Mb/s)
1773 * Reclaims bandwidth from @available_up and @available_down and updates
1774 * the variables accordingly (e.g decreases both according to what was
1775 * reclaimed by the tunnel). If nothing was reclaimed the values are
1778 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1780 int *available_down)
1782 if (!tb_tunnel_is_active(tunnel))
1785 if (tunnel->reclaim_available_bandwidth)
1786 tunnel->reclaim_available_bandwidth(tunnel, available_up,