aboutsummaryrefslogtreecommitdiffstats
path: root/include/rdma
diff options
context:
space:
mode:
authorMark Bloch <mbloch@nvidia.com>2021-03-01 09:04:20 +0200
committerJason Gunthorpe <jgg@nvidia.com>2021-03-26 09:31:21 -0300
commit1fb7f8973f51ca1a7ffe61a2c779ed15f57f3d82 (patch)
tree9fee23821e374c8fb844521d88f87ea377c529f6 /include/rdma
parent847d19a451465304f54d69b5be97baecc86c3617 (diff)
downloadlinux-1fb7f8973f51ca1a7ffe61a2c779ed15f57f3d82.tar.gz
RDMA: Support more than 255 rdma ports
Current code uses many different types when dealing with a port of a RDMA device: u8, unsigned int and u32. Switch to u32 to clean up the logic. This allows us to make (at least) the core view consistent and use the same type. Unfortunately not all places can be converted. Many uverbs functions expect port to be u8 so keep those places in order not to break UAPIs. HW/Spec defined values must also not be changed. With the switch to u32 we now can support devices with more than 255 ports. U32_MAX is reserved to make control logic a bit easier to deal with. As a device with U32_MAX ports probably isn't going to happen any time soon this seems like a non issue. When a device with more than 255 ports is created uverbs will report the RDMA device as having 255 ports as this is the max currently supported. The verbs interface is not changed yet because the IBTA spec limits the port size in too many places to be u8 and all applications that relies in verbs won't be able to cope with this change. At this stage, we are extending the interfaces that are using vendor channel solely Once the limitation is lifted mlx5 in switchdev mode will be able to have thousands of SFs created by the device. As the only instance of an RDMA device that reports more than 255 ports will be a representor device and it exposes itself as a RAW Ethernet only device CM/MAD/IPoIB and other ULPs aren't effected by this change and their sysfs/interfaces that are exposes to userspace can remain unchanged. While here cleanup some alignment issues and remove unneeded sanity checks (mainly in rdmavt), Link: https://lore.kernel.org/r/20210301070420.439400-1-leon@kernel.org Signed-off-by: Mark Bloch <mbloch@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_cache.h18
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_sa.h15
-rw-r--r--include/rdma/ib_verbs.h177
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/rdma/rdma_counter.h16
-rw-r--r--include/rdma/rdma_vt.h8
-rw-r--r--include/rdma/rw.h18
8 files changed, 135 insertions, 121 deletions
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index bae29f50adffc..226ae3702d8a8 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -10,7 +10,7 @@
#include <rdma/ib_verbs.h>
-int rdma_query_gid(struct ib_device *device, u8 port_num, int index,
+int rdma_query_gid(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr);
const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
@@ -20,10 +20,10 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev,
const union ib_gid *gid,
enum ib_gid_type gid_type,
- u8 port,
+ u32 port,
struct net_device *ndev);
const struct ib_gid_attr *rdma_find_gid_by_filter(
- struct ib_device *device, const union ib_gid *gid, u8 port_num,
+ struct ib_device *device, const union ib_gid *gid, u32 port_num,
bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
void *),
void *context);
@@ -43,7 +43,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
* the local software cache.
*/
int ib_get_cached_pkey(struct ib_device *device_handle,
- u8 port_num,
+ u32 port_num,
int index,
u16 *pkey);
@@ -59,7 +59,7 @@ int ib_get_cached_pkey(struct ib_device *device_handle,
* the local software cache.
*/
int ib_find_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u16 pkey,
u16 *index);
@@ -75,7 +75,7 @@ int ib_find_cached_pkey(struct ib_device *device,
* the local software cache.
*/
int ib_find_exact_cached_pkey(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u16 pkey,
u16 *index);
@@ -89,7 +89,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
* the local software cache.
*/
int ib_get_cached_lmc(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
u8 *lmc);
/**
@@ -102,12 +102,12 @@ int ib_get_cached_lmc(struct ib_device *device,
* the local software cache.
*/
int ib_get_cached_port_state(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_port_state *port_active);
bool rdma_is_zero_gid(const union ib_gid *gid);
const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device,
- u8 port_num, int index);
+ u32 port_num, int index);
void rdma_put_gid_attr(const struct ib_gid_attr *attr);
void rdma_hold_gid_attr(const struct ib_gid_attr *attr);
ssize_t rdma_query_gid_table(struct ib_device *device,
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 8dfb1ddf345a1..f1d34f06a68b8 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -668,7 +668,7 @@ struct ib_mad_reg_req {
* @registration_flags: Registration flags to set for this agent
*/
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
- u8 port_num,
+ u32 port_num,
enum ib_qp_type qp_type,
struct ib_mad_reg_req *mad_reg_req,
u8 rmpp_version,
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 4c52c2fd22a17..ba3c808a3789e 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -423,7 +423,7 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
- u8 port_num, struct sa_path_rec *rec,
+ u32 port_num, struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
gfp_t gfp_mask,
void (*callback)(int status, struct sa_path_rec *resp,
@@ -431,7 +431,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
void *context, struct ib_sa_query **query);
int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num, u8 method,
+ struct ib_device *device, u32 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
gfp_t gfp_mask,
@@ -477,7 +477,8 @@ struct ib_sa_multicast {
* group, and the user must rejoin the group to continue using it.
*/
struct ib_sa_multicast *ib_sa_join_multicast(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device,
+ u32 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
int (*callback)(int status,
@@ -506,20 +507,20 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast);
* @mgid: MGID of multicast group.
* @rec: Location to copy SA multicast member record.
*/
-int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
+int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec);
/**
* ib_init_ah_from_mcmember - Initialize address handle attributes based on
* an SA multicast member record.
*/
-int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
struct rdma_ah_attr *ah_attr);
-int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *sgid_attr);
@@ -538,7 +539,7 @@ void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec);
/* Support GuidInfoRecord */
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
+ struct ib_device *device, u32 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
unsigned long timeout_ms, gfp_t gfp_mask,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 21c19b1fcee7a..bed4cfe50554f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -152,7 +152,7 @@ struct ib_gid_attr {
union ib_gid gid;
enum ib_gid_type gid_type;
u16 index;
- u8 port_num;
+ u32 port_num;
};
enum {
@@ -736,7 +736,7 @@ struct ib_event {
struct ib_qp *qp;
struct ib_srq *srq;
struct ib_wq *wq;
- u8 port_num;
+ u32 port_num;
} element;
enum ib_event_type event;
};
@@ -919,7 +919,7 @@ struct rdma_ah_attr {
struct ib_global_route grh;
u8 sl;
u8 static_rate;
- u8 port_num;
+ u32 port_num;
u8 ah_flags;
enum rdma_ah_attr_type type;
union {
@@ -1006,7 +1006,7 @@ struct ib_wc {
u16 pkey_index;
u8 sl;
u8 dlid_path_bits;
- u8 port_num; /* valid only for DR SMPs on switches */
+ u32 port_num; /* valid only for DR SMPs on switches */
u8 smac[ETH_ALEN];
u16 vlan_id;
u8 network_hdr_type;
@@ -1161,7 +1161,7 @@ struct ib_qp_init_attr {
/*
* Only needed for special QP types, or when using the RW API.
*/
- u8 port_num;
+ u32 port_num;
struct ib_rwq_ind_table *rwq_ind_tbl;
u32 source_qpn;
};
@@ -1280,11 +1280,11 @@ struct ib_qp_attr {
u8 max_rd_atomic;
u8 max_dest_rd_atomic;
u8 min_rnr_timer;
- u8 port_num;
+ u32 port_num;
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
- u8 alt_port_num;
+ u32 alt_port_num;
u8 alt_timeout;
u32 rate_limit;
struct net_device *xmit_slave;
@@ -1401,7 +1401,7 @@ struct ib_ud_wr {
u32 remote_qpn;
u32 remote_qkey;
u16 pkey_index; /* valid for GSI only */
- u8 port_num; /* valid for DR SMPs on switch only */
+ u32 port_num; /* valid for DR SMPs on switch only */
};
static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
@@ -1708,7 +1708,7 @@ struct ib_qp_security;
struct ib_port_pkey {
enum port_pkey_state state;
u16 pkey_index;
- u8 port_num;
+ u32 port_num;
struct list_head qp_list;
struct list_head to_error_list;
struct ib_qp_security *sec;
@@ -1769,7 +1769,7 @@ struct ib_qp {
enum ib_qp_type qp_type;
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
- u8 port;
+ u32 port;
bool integrity_en;
/*
@@ -2065,7 +2065,7 @@ struct ib_flow_attr {
u16 priority;
u32 flags;
u8 num_of_specs;
- u8 port;
+ u32 port;
union ib_flow_spec flows[];
};
@@ -2194,7 +2194,7 @@ enum rdma_netdev_t {
struct rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
- u8 port_num;
+ u32 port_num;
int mtu;
/*
@@ -2223,7 +2223,7 @@ struct rdma_netdev_alloc_params {
unsigned int rxqs;
void *param;
- int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
+ int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
struct net_device *netdev, void *param);
};
@@ -2305,7 +2305,7 @@ struct ib_device_ops {
const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int (*process_mad)(struct ib_device *device, int process_mad_flags,
- u8 port_num, const struct ib_wc *in_wc,
+ u32 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index);
@@ -2317,9 +2317,9 @@ struct ib_device_ops {
void (*get_dev_fw_str)(struct ib_device *device, char *str);
const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
int comp_vector);
- int (*query_port)(struct ib_device *device, u8 port_num,
+ int (*query_port)(struct ib_device *device, u32 port_num,
struct ib_port_attr *port_attr);
- int (*modify_port)(struct ib_device *device, u8 port_num,
+ int (*modify_port)(struct ib_device *device, u32 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
/**
@@ -2328,10 +2328,10 @@ struct ib_device_ops {
* structure to avoid cache line misses when accessing struct ib_device
* in fast paths.
*/
- int (*get_port_immutable)(struct ib_device *device, u8 port_num,
+ int (*get_port_immutable)(struct ib_device *device, u32 port_num,
struct ib_port_immutable *immutable);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
@@ -2340,7 +2340,8 @@ struct ib_device_ops {
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
- struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
+ struct net_device *(*get_netdev)(struct ib_device *device,
+ u32 port_num);
/**
* rdma netdev operation
*
@@ -2348,11 +2349,11 @@ struct ib_device_ops {
* must return -EOPNOTSUPP if it doesn't support the specified type.
*/
struct net_device *(*alloc_rdma_netdev)(
- struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
+ struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
const char *name, unsigned char name_assign_type,
void (*setup)(struct net_device *));
- int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
+ int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
/**
@@ -2360,7 +2361,7 @@ struct ib_device_ops {
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
- int (*query_gid)(struct ib_device *device, u8 port_num, int index,
+ int (*query_gid)(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
/**
* When calling add_gid, the HW vendor's driver should add the gid
@@ -2385,7 +2386,7 @@ struct ib_device_ops {
* This function is only called when roce_gid_table is used.
*/
int (*del_gid)(const struct ib_gid_attr *attr, void **context);
- int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
+ int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
u16 *pkey);
int (*alloc_ucontext)(struct ib_ucontext *context,
struct ib_udata *udata);
@@ -2474,16 +2475,16 @@ struct ib_device_ops {
struct ib_flow_action *action,
const struct ib_flow_action_attrs_esp *attr,
struct uverbs_attr_bundle *attrs);
- int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+ int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
int state);
- int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *ivf);
- int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats);
- int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
+ int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
- int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+ int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
@@ -2521,7 +2522,7 @@ struct ib_device_ops {
* struct tells the core to set a default lifespan.
*/
struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* get_hw_stats - Fill in the counter value(s) in the stats struct.
* @index - The index in the value array we wish to have updated, or
@@ -2535,12 +2536,12 @@ struct ib_device_ops {
* one given in index at their option
*/
int (*get_hw_stats)(struct ib_device *device,
- struct rdma_hw_stats *stats, u8 port, int index);
+ struct rdma_hw_stats *stats, u32 port, int index);
/*
* This function is called once for each port when a ib device is
* registered.
*/
- int (*init_port)(struct ib_device *device, u8 port_num,
+ int (*init_port)(struct ib_device *device, u32 port_num,
struct kobject *port_sysfs);
/**
* Allows rdma drivers to add their own restrack attributes.
@@ -2684,7 +2685,7 @@ struct ib_device {
/* CQ adaptive moderation (RDMA DIM) */
u16 use_cq_dim:1;
u8 node_type;
- u8 phys_port_cnt;
+ u32 phys_port_cnt;
struct ib_device_attr attrs;
struct attribute_group *hw_stats_ag;
struct rdma_hw_stats *hw_stats;
@@ -2750,7 +2751,7 @@ struct ib_client {
* netdev. */
struct net_device *(*get_net_dev_by_params)(
struct ib_device *dev,
- u8 port,
+ u32 port,
u16 pkey,
const union ib_gid *gid,
const struct sockaddr *addr,
@@ -2931,10 +2932,10 @@ void ib_unregister_event_handler(struct ib_event_handler *event_handler);
void ib_dispatch_event(const struct ib_event *event);
int ib_query_port(struct ib_device *device,
- u8 port_num, struct ib_port_attr *port_attr);
+ u32 port_num, struct ib_port_attr *port_attr);
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
- u8 port_num);
+ u32 port_num);
/**
* rdma_cap_ib_switch - Check if the device is IB switch
@@ -2958,7 +2959,7 @@ static inline bool rdma_cap_ib_switch(const struct ib_device *device)
*
* Return start port number
*/
-static inline u8 rdma_start_port(const struct ib_device *device)
+static inline u32 rdma_start_port(const struct ib_device *device)
{
return rdma_cap_ib_switch(device) ? 0 : 1;
}
@@ -2969,9 +2970,10 @@ static inline u8 rdma_start_port(const struct ib_device *device)
* @iter - The unsigned int to store the port number
*/
#define rdma_for_each_port(device, iter) \
- for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
- unsigned int, iter))); \
- iter <= rdma_end_port(device); (iter)++)
+ for (iter = rdma_start_port(device + \
+ BUILD_BUG_ON_ZERO(!__same_type(u32, \
+ iter))); \
+ iter <= rdma_end_port(device); iter++)
/**
* rdma_end_port - Return the last valid port number for the device
@@ -2981,7 +2983,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
*
* Return last port number
*/
-static inline u8 rdma_end_port(const struct ib_device *device)
+static inline u32 rdma_end_port(const struct ib_device *device)
{
return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
}
@@ -2994,55 +2996,63 @@ static inline int rdma_is_port_valid(const struct ib_device *device,
}
static inline bool rdma_is_grh_required(const struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_PORT_IB_GRH_REQUIRED;
}
-static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_ib(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_IB;
}
-static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
}
-static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
}
-static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_ROCE;
}
-static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_iwarp(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_IWARP;
}
-static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
+static inline bool rdma_ib_or_roce(const struct ib_device *device,
+ u32 port_num)
{
return rdma_protocol_ib(device, port_num) ||
rdma_protocol_roce(device, port_num);
}
-static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_RAW_PACKET;
}
-static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_usnic(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_PROT_USNIC;
@@ -3060,7 +3070,7 @@ static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_n
*
* Return: true if the port supports sending/receiving of MAD packets.
*/
-static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_MAD;
@@ -3085,7 +3095,7 @@ static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
*
* Return: true if the port supports OPA MAD packet formats.
*/
-static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_MAD;
@@ -3111,7 +3121,7 @@ static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
*
* Return: true if the port provides an SMI.
*/
-static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_SMI;
@@ -3132,7 +3142,7 @@ static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
* Return: true if the port supports an IB CM (this does not guarantee that
* a CM is actually running however).
*/
-static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_CM;
@@ -3150,7 +3160,7 @@ static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
* Return: true if the port supports an iWARP CM (this does not guarantee that
* a CM is actually running however).
*/
-static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IW_CM;
@@ -3171,7 +3181,7 @@ static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
* Administration interface. This does not imply that the SA service is
* running locally.
*/
-static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_IB_SA;
@@ -3194,7 +3204,8 @@ static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
* overhead of registering/unregistering with the SM and tracking of the
* total number of queue pairs attached to the multicast group.
*/
-static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
+ u32 port_num)
{
return rdma_cap_ib_sa(device, port_num);
}
@@ -3212,7 +3223,7 @@ static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num
* Return: true if the port uses a GID address to identify devices on the
* network.
*/
-static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_AF_IB;
@@ -3234,7 +3245,7 @@ static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
* addition of a Global Route Header built from our Ethernet Address
* Handle into our header list for connectionless packets.
*/
-static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
{
return device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_ETH_AH;
@@ -3249,7 +3260,7 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
* Return: true if we are running on an OPA device which supports
* the extended OPA addressing.
*/
-static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
+static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
{
return (device->port_data[port_num].immutable.core_cap_flags &
RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
@@ -3267,7 +3278,8 @@ static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
* Return the max MAD size required by the Port. Will return 0 if the port
* does not support MADs
*/
-static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
+static inline size_t rdma_max_mad_size(const struct ib_device *device,
+ u32 port_num)
{
return device->port_data[port_num].immutable.max_mad_size;
}
@@ -3286,7 +3298,7 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n
* its GIDs.
*/
static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
return rdma_protocol_roce(device, port_num) &&
device->ops.add_gid && device->ops.del_gid;
@@ -3327,7 +3339,7 @@ static inline bool rdma_core_cap_opa_port(struct ib_device *device,
* Return the MTU size supported by the port as an integer value. Will return
* -1 if enum value of mtu is not supported.
*/
-static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
+static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
int mtu)
{
if (rdma_core_cap_opa_port(device, port))
@@ -3344,7 +3356,7 @@ static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
*
* Return the MTU size supported by the port as an integer value.
*/
-static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
+static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
struct ib_port_attr *attr)
{
if (rdma_core_cap_opa_port(device, port))
@@ -3353,34 +3365,34 @@ static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
return ib_mtu_enum_to_int(attr->max_mtu);
}
-int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
int state);
-int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
struct ifla_vf_info *info);
-int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
struct ifla_vf_stats *stats);
-int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid);
-int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
int type);
int ib_query_pkey(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey);
+ u32 port_num, u16 index, u16 *pkey);
int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify);
int ib_modify_port(struct ib_device *device,
- u8 port_num, int port_modify_mask,
+ u32 port_num, int port_modify_mask,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- u8 *port_num, u16 *index);
+ u32 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
- u8 port_num, u16 pkey, u16 *index);
+ u32 port_num, u16 pkey, u16 *index);
enum ib_pd_flags {
/*
@@ -3495,7 +3507,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* attributes which are initialized using ib_init_ah_attr_from_wc().
*
*/
-int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr);
@@ -3512,7 +3524,7 @@ int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
* in all UD QP post sends.
*/
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
- const struct ib_grh *grh, u8 port_num);
+ const struct ib_grh *grh, u32 port_num);
/**
* rdma_modify_ah - Modifies the address vector associated with an address
@@ -4257,12 +4269,12 @@ struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
enum rdma_driver_id driver_id);
struct ib_device *ib_device_get_by_name(const char *name,
enum rdma_driver_id driver_id);
-struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
+struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
+struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
@@ -4296,7 +4308,8 @@ void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width);
+int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
+ u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
{
@@ -4364,12 +4377,12 @@ static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
return false;
}
-static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
+static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
{
attr->port_num = port_num;
}
-static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
+static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
{
return attr->port_num;
}
@@ -4467,7 +4480,7 @@ void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
* @port_num: Port number
*/
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
- u8 port_num)
+ u32 port_num)
{
if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
@@ -4539,12 +4552,12 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
-struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
-int rdma_init_netdev(struct ib_device *device, u8 port_num,
+int rdma_init_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 32a67af18415d..b2eed6d305435 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -107,7 +107,7 @@ struct rdma_cm_id {
struct rdma_route route;
enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
- u8 port_num;
+ u32 port_num;
};
struct rdma_cm_id *
diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h
index e75cf9742e040..0295b22cd1cd8 100644
--- a/include/rdma/rdma_counter.h
+++ b/include/rdma/rdma_counter.h
@@ -40,26 +40,26 @@ struct rdma_counter {
struct rdma_counter_mode mode;
struct mutex lock;
struct rdma_hw_stats *stats;
- u8 port;
+ u32 port;
};
void rdma_counter_init(struct ib_device *dev);
void rdma_counter_release(struct ib_device *dev);
-int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
+int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mask mask,
struct netlink_ext_ack *extack);
-int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port);
+int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port);
int rdma_counter_unbind_qp(struct ib_qp *qp, bool force);
int rdma_counter_query_stats(struct rdma_counter *counter);
-u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index);
-int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index);
+int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
-int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
u32 qp_num, u32 *counter_id);
-int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
+int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
-int rdma_counter_get_mode(struct ib_device *dev, u8 port,
+int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
enum rdma_nl_counter_mask *mask);
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 0af89cedfbf5d..bf97d5d0dbf6f 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -309,16 +309,16 @@ struct rvt_driver_provided {
/*
* Query driver for the state of the port.
*/
- int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
+ int (*query_port_state)(struct rvt_dev_info *rdi, u32 port_num,
struct ib_port_attr *props);
/*
* Tell driver to shutdown a port
*/
- int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
+ int (*shut_down_port)(struct rvt_dev_info *rdi, u32 port_num);
/* Tell driver to send a trap for changed port capabilities */
- void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
+ void (*cap_mask_chg)(struct rvt_dev_info *rdi, u32 port_num);
/*
* The following functions can be safely ignored completely. Any use of
@@ -338,7 +338,7 @@ struct rvt_driver_provided {
/* Let the driver pick the next queue pair number*/
int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num);
+ enum ib_qp_type type, u32 port_num);
/* Determine if its safe or allowed to modify the qp */
int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
index 6ad9dc836c107..d606cac482338 100644
--- a/include/rdma/rw.h
+++ b/include/rdma/rw.h
@@ -42,29 +42,29 @@ struct rdma_rw_ctx {
};
};
-int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir);
-void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
- struct scatterlist *sg, u32 sg_cnt,
- enum dma_data_direction dir);
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir);
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
enum dma_data_direction dir);
void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ u32 port_num, struct scatterlist *sg, u32 sg_cnt,
struct scatterlist *prot_sg, u32 prot_sg_cnt,
enum dma_data_direction dir);
struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
- u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
-int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
-unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
+unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
unsigned int maxpages);
void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);