1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Macros for SMC statistics
7 * Copyright IBM Corp. 2021
9 * Author(s): Guvenc Gulce
12 #ifndef NET_SMC_SMC_STATS_H_
13 #define NET_SMC_SMC_STATS_H_
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/percpu.h>
17 #include <linux/ctype.h>
18 #include <linux/smc.h>
22 #define SMC_MAX_FBACK_RSN_CNT 30
24 extern struct smc_stats __percpu *smc_stats; /* per cpu counters for SMC */
25 extern struct smc_stats_reason fback_rsn;
26 extern struct mutex smc_stat_fback_rsn;
41 struct smc_stats_fback {
46 struct smc_stats_reason {
47 struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
48 struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
53 struct smc_stats_rmbcnt {
54 u64 buf_size_small_peer_cnt;
55 u64 buf_size_small_cnt;
56 u64 buf_full_peer_cnt;
63 struct smc_stats_memsize {
67 struct smc_stats_tech {
68 struct smc_stats_memsize tx_rmbsize;
69 struct smc_stats_memsize rx_rmbsize;
70 struct smc_stats_memsize tx_pd;
71 struct smc_stats_memsize rx_pd;
72 struct smc_stats_rmbcnt rmb_tx;
73 struct smc_stats_rmbcnt rmb_rx;
90 struct smc_stats_tech smc[2];
91 u64 clnt_hshake_err_cnt;
92 u64 srv_hshake_err_cnt;
95 #define SMC_STAT_PAYLOAD_SUB(_tech, key, _len, _rc) \
97 typeof(_tech) t = (_tech); \
98 typeof(_len) l = (_len); \
99 int _pos = fls64((l) >> 13); \
100 typeof(_rc) r = (_rc); \
101 int m = SMC_BUF_MAX - 1; \
102 this_cpu_inc((*smc_stats).smc[t].key ## _cnt); \
105 _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
106 this_cpu_inc((*smc_stats).smc[t].key ## _pd.buf[_pos]); \
107 this_cpu_add((*smc_stats).smc[t].key ## _bytes, r); \
111 #define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
113 typeof(_smc) __smc = _smc; \
114 typeof(length) _len = (length); \
115 typeof(rcode) _rc = (rcode); \
116 bool is_smcd = !__smc->conn.lnk; \
118 SMC_STAT_PAYLOAD_SUB(SMC_TYPE_D, tx, _len, _rc); \
120 SMC_STAT_PAYLOAD_SUB(SMC_TYPE_R, tx, _len, _rc); \
124 #define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
126 typeof(_smc) __smc = _smc; \
127 typeof(length) _len = (length); \
128 typeof(rcode) _rc = (rcode); \
129 bool is_smcd = !__smc->conn.lnk; \
131 SMC_STAT_PAYLOAD_SUB(SMC_TYPE_D, rx, _len, _rc); \
133 SMC_STAT_PAYLOAD_SUB(SMC_TYPE_R, rx, _len, _rc); \
137 #define SMC_STAT_RMB_SIZE_SUB(_tech, k, _len) \
139 typeof(_len) _l = (_len); \
140 typeof(_tech) t = (_tech); \
141 int _pos = fls((_l) >> 13); \
142 int m = SMC_BUF_MAX - 1; \
143 _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
144 this_cpu_inc((*smc_stats).smc[t].k ## _rmbsize.buf[_pos]); \
148 #define SMC_STAT_RMB_SUB(type, t, key) \
149 this_cpu_inc((*smc_stats).smc[t].rmb ## _ ## key.type ## _cnt)
151 #define SMC_STAT_RMB_SIZE(_is_smcd, _is_rx, _len) \
153 typeof(_is_smcd) is_d = (_is_smcd); \
154 typeof(_is_rx) is_r = (_is_rx); \
155 typeof(_len) l = (_len); \
156 if ((is_d) && (is_r)) \
157 SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_D, rx, l); \
158 if ((is_d) && !(is_r)) \
159 SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_D, tx, l); \
160 if (!(is_d) && (is_r)) \
161 SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_R, rx, l); \
162 if (!(is_d) && !(is_r)) \
163 SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_R, tx, l); \
167 #define SMC_STAT_RMB(type, _is_smcd, _is_rx) \
169 typeof(_is_smcd) is_d = (_is_smcd); \
170 typeof(_is_rx) is_r = (_is_rx); \
171 if ((is_d) && (is_r)) \
172 SMC_STAT_RMB_SUB(type, SMC_TYPE_D, rx); \
173 if ((is_d) && !(is_r)) \
174 SMC_STAT_RMB_SUB(type, SMC_TYPE_D, tx); \
175 if (!(is_d) && (is_r)) \
176 SMC_STAT_RMB_SUB(type, SMC_TYPE_R, rx); \
177 if (!(is_d) && !(is_r)) \
178 SMC_STAT_RMB_SUB(type, SMC_TYPE_R, tx); \
182 #define SMC_STAT_BUF_REUSE(is_smcd, is_rx) \
183 SMC_STAT_RMB(reuse, is_smcd, is_rx)
185 #define SMC_STAT_RMB_ALLOC(is_smcd, is_rx) \
186 SMC_STAT_RMB(alloc, is_smcd, is_rx)
188 #define SMC_STAT_RMB_DOWNGRADED(is_smcd, is_rx) \
189 SMC_STAT_RMB(dgrade, is_smcd, is_rx)
191 #define SMC_STAT_RMB_TX_PEER_FULL(is_smcd) \
192 SMC_STAT_RMB(buf_full_peer, is_smcd, false)
194 #define SMC_STAT_RMB_TX_FULL(is_smcd) \
195 SMC_STAT_RMB(buf_full, is_smcd, false)
197 #define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(is_smcd) \
198 SMC_STAT_RMB(buf_size_small_peer, is_smcd, false)
200 #define SMC_STAT_RMB_TX_SIZE_SMALL(is_smcd) \
201 SMC_STAT_RMB(buf_size_small, is_smcd, false)
203 #define SMC_STAT_RMB_RX_SIZE_SMALL(is_smcd) \
204 SMC_STAT_RMB(buf_size_small, is_smcd, true)
206 #define SMC_STAT_RMB_RX_FULL(is_smcd) \
207 SMC_STAT_RMB(buf_full, is_smcd, true)
209 #define SMC_STAT_INC(is_smcd, type) \
212 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
214 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
218 #define SMC_STAT_CLNT_SUCC_INC(_aclc) \
220 typeof(_aclc) acl = (_aclc); \
221 bool is_v2 = (acl->hdr.version == SMC_V2); \
222 bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
223 if (is_v2 && is_smcd) \
224 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
225 else if (is_v2 && !is_smcd) \
226 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
227 else if (!is_v2 && is_smcd) \
228 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
229 else if (!is_v2 && !is_smcd) \
230 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
234 #define SMC_STAT_SERV_SUCC_INC(_ini) \
236 typeof(_ini) i = (_ini); \
237 bool is_v2 = (i->smcd_version & SMC_V2); \
238 bool is_smcd = (i->is_smcd); \
239 if (is_v2 && is_smcd) \
240 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
241 else if (is_v2 && !is_smcd) \
242 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
243 else if (!is_v2 && is_smcd) \
244 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
245 else if (!is_v2 && !is_smcd) \
246 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
250 int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
251 int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
252 int smc_stats_init(void) __init;
253 void smc_stats_exit(void);
255 #endif /* NET_SMC_SMC_STATS_H_ */