From 99ee4df6aaf542b84f68d66a02de7b68b72a87a8 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 3 Feb 2015 18:32:53 +0200 Subject: IB/mlx4: Alias GUID adding persistency support If the SM rejects an alias GUID request the PF driver keeps trying to acquire the specified GUID indefinitely, utilizing an exponential backoff scheme. Retrying is managed per GUID entry. Each entry that wasn't applied holds its next retry information. Retry requests to the SM consist of records of 8 consecutive GUIDS. Each record that contains GUIDs requiring retries holds its next time-to-run based on the retry information of all its GUID entries. The record having the lowest retry time will run first when that retry time arrives. Since the method (SET or DELETE) as sent to the SM applies to all the GUIDs in the record, we must handle SET requests and DELETE requests in separate SM messages (one for SETs and the other for DELETEs). To avoid race conditions where a GUID entry request (set or delete) was modified after the SM request was sent, we save the method and the requested indices as part of the callback's context -- thus, only the requested indexes are evaluated when the response is received. When an GUID entry is approved we turn off its retry-required bit, this prevents redundant SM retries from occurring on that record. The port down event should be sent only when previously it was up. Likewise, the port up event should be sent only if previously the port was down. Synchronization was added around the flows that change entries and record state to prevent race conditions. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/alias_GUID.c | 314 ++++++++++++++++++++++++-------- drivers/infiniband/hw/mlx4/mlx4_ib.h | 4 +- drivers/infiniband/hw/mlx4/sysfs.c | 11 +- 3 files changed, 245 insertions(+), 84 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index a31e031afd874..a968388b8176a 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context { int query_id; struct list_head list; int block_num; + ib_sa_comp_mask guid_indexes; + u8 method; }; struct mlx4_next_alias_guid_work { u8 port; u8 block_num; + u8 method; struct mlx4_sriov_alias_guid_info_rec_det rec_det; }; +static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, + int *resched_delay_sec); void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u8 port_num, u8 *p_data) @@ -138,10 +143,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; enum slave_port_gen_event gen_event; + struct mlx4_sriov_alias_guid_info_rec_det *rec; + unsigned long flags; + __be64 required_value; if (!mlx4_is_master(dev->dev)) return; + rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. + all_rec_per_port[block_num]; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); @@ -166,8 +176,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, */ if (tmp_cur_ag != form_cache_ag) continue; - mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; + + if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + required_value = 0; + + if (tmp_cur_ag == required_value) { + rec->guid_indexes = rec->guid_indexes & + ~mlx4_ib_get_aguid_comp_mask_from_ix(i); + } else { + /* may notify port down if value is 0 */ + if (tmp_cur_ag != MLX4_NOT_SET_GUID) { + spin_unlock_irqrestore(&dev->sriov. + alias_guid.ag_work_lock, flags); + continue; + } + } + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, + flags); + mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); /*2 cases: Valid GUID, and Invalid Guid*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ @@ -188,10 +217,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, &gen_event); - pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", - slave_id, port_num); - mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, - MLX4_PORT_CHANGE_SUBTYPE_DOWN); + if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { + pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", + slave_id, port_num); + mlx4_gen_port_state_change_eqe(dev->dev, + slave_id, + port_num, + MLX4_PORT_CHANGE_SUBTYPE_DOWN); + } } } } @@ -206,6 +239,9 @@ static void aliasguid_query_handler(int status, int i; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags, flags1; + ib_sa_comp_mask declined_guid_indexes = 0; + ib_sa_comp_mask applied_guid_indexes = 0; + unsigned int resched_delay_sec = 0; if (!context) return; @@ -216,9 +252,9 @@ static void aliasguid_query_handler(int status, all_rec_per_port[cb_ctx->block_num]; if (status) { - rec->status = MLX4_GUID_INFO_STATUS_IDLE; pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); + rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; goto out; } @@ -235,57 +271,97 @@ static void aliasguid_query_handler(int status, rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[guid_rec->block_num]; - rec->status = MLX4_GUID_INFO_STATUS_SET; - rec->method = MLX4_GUID_INFO_RECORD_SET; - + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { - __be64 tmp_cur_ag; - tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; + __be64 sm_response, required_val; + + if (!(cb_ctx->guid_indexes & + mlx4_ib_get_aguid_comp_mask_from_ix(i))) + continue; + sm_response = *(__be64 *)&guid_rec->guid_info_list + [i * GUID_REC_SIZE]; + required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; + if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { + if (required_val == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + goto next_entry; + + /* A new value was set till we got the response */ + pr_debug("need to set new value %llx, record num %d, block_num:%d\n", + be64_to_cpu(required_val), + i, guid_rec->block_num); + goto entry_declined; + } + /* check if the SM didn't assign one of the records. - * if it didn't, if it was not sysadmin request: - * ask the SM to give a new GUID, (instead of the driver request). + * if it didn't, re-ask for. */ - if (tmp_cur_ag == MLX4_NOT_SET_GUID) { - mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " - "block_num: %d was declined by SM, " - "ownership by %d (0 = driver, 1=sysAdmin," - " 2=None)\n", __func__, i, - guid_rec->block_num, rec->ownership); - if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { - /* if it is driver assign, asks for new GUID from SM*/ - *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = - MLX4_NOT_SET_GUID; - - /* Mark the record as not assigned, and let it - * be sent again in the next work sched.*/ - rec->status = MLX4_GUID_INFO_STATUS_IDLE; - rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); - } + if (sm_response == MLX4_NOT_SET_GUID) { + if (rec->guids_retry_schedule[i] == 0) + mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " + "block_num: %d was declined by SM, " + "ownership by %d (0 = driver, 1=sysAdmin," + " 2=None)\n", __func__, i, + guid_rec->block_num, + rec->ownership); + goto entry_declined; } else { /* properly assigned record. */ /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && - tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { - /* the sysadmin assignment failed.*/ - mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" - " admin guid after SysAdmin " - "configuration. " - "Record num %d in block_num:%d " - "was declined by SM, " - "new val(0x%llx) was kept\n", - __func__, i, - guid_rec->block_num, - be64_to_cpu(*(__be64 *) & - rec->all_recs[i * GUID_REC_SIZE])); + sm_response != required_val) { + /* Warn only on first retry */ + if (rec->guids_retry_schedule[i] == 0) + mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" + " admin guid after SysAdmin " + "configuration. " + "Record num %d in block_num:%d " + "was declined by SM, " + "new val(0x%llx) was kept, SM returned (0x%llx)\n", + __func__, i, + guid_rec->block_num, + be64_to_cpu(required_val), + be64_to_cpu(sm_response)); + goto entry_declined; } else { - memcpy(&rec->all_recs[i * GUID_REC_SIZE], - &guid_rec->guid_info_list[i * GUID_REC_SIZE], - GUID_REC_SIZE); + *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = + sm_response; + goto next_entry; } } +entry_declined: + declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); + rec->guids_retry_schedule[i] = + (rec->guids_retry_schedule[i] == 0) ? 1 : + min((unsigned int)60, + rec->guids_retry_schedule[i] * 2); + /* using the minimum value among all entries in that record */ + resched_delay_sec = (resched_delay_sec == 0) ? + rec->guids_retry_schedule[i] : + min(resched_delay_sec, + rec->guids_retry_schedule[i]); + continue; + +next_entry: + rec->guids_retry_schedule[i] = 0; + } + + applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; + if (declined_guid_indexes || + rec->guid_indexes & ~(applied_guid_indexes)) { + pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", + guid_rec->block_num, + be64_to_cpu((__force __be64)rec->guid_indexes), + be64_to_cpu((__force __be64)applied_guid_indexes), + be64_to_cpu((__force __be64)declined_guid_indexes)); + rec->time_to_run = ktime_get_real_ns() + + resched_delay_sec * NSEC_PER_SEC; + } else { + rec->status = MLX4_GUID_INFO_STATUS_SET; } + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); /* The func is call here to close the cases when the sm doesn't send smp, so in the sa response the driver @@ -297,10 +373,13 @@ static void aliasguid_query_handler(int status, out: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); - if (!dev->sriov.is_going_down) + if (!dev->sriov.is_going_down) { + get_low_record_time_index(dev, port_index, &resched_delay_sec); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, &dev->sriov.alias_guid.ports_guid[port_index]. - alias_guid_work, 0); + alias_guid_work, + msecs_to_jiffies(resched_delay_sec * 1000)); + } if (cb_ctx->sa_query) { list_del(&cb_ctx->list); kfree(cb_ctx); @@ -317,9 +396,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) ib_sa_comp_mask comp_mask = 0; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status - = MLX4_GUID_INFO_STATUS_IDLE; - dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method - = MLX4_GUID_INFO_RECORD_SET; + = MLX4_GUID_INFO_STATUS_SET; /* calculate the comp_mask for that record.*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { @@ -340,12 +417,16 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } dev->sriov.alias_guid.ports_guid[port - 1]. - all_rec_per_port[index].guid_indexes = comp_mask; + all_rec_per_port[index].guid_indexes |= comp_mask; + if (dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[index].guid_indexes) + dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; + } static int set_guid_rec(struct ib_device *ibdev, - u8 port, int index, - struct mlx4_sriov_alias_guid_info_rec_det *rec_det) + struct mlx4_next_alias_guid_work *rec) { int err; struct mlx4_ib_dev *dev = to_mdev(ibdev); @@ -354,6 +435,9 @@ static int set_guid_rec(struct ib_device *ibdev, struct ib_port_attr attr; struct mlx4_alias_guid_work_context *callback_context; unsigned long resched_delay, flags, flags1; + u8 port = rec->port + 1; + int index = rec->block_num; + struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; struct list_head *head = &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; @@ -380,6 +464,8 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->port = port; callback_context->dev = dev; callback_context->block_num = index; + callback_context->guid_indexes = rec_det->guid_indexes; + callback_context->method = rec->method; memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); @@ -399,7 +485,7 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->query_id = ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ibdev, port, &guid_info_rec, - comp_mask, rec_det->method, 1000, + comp_mask, rec->method, 1000, GFP_KERNEL, aliasguid_query_handler, callback_context, &callback_context->sa_query); @@ -462,31 +548,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } +static void set_required_record(struct mlx4_ib_dev *dev, u8 port, + struct mlx4_next_alias_guid_work *next_rec, + int record_index) +{ + int i; + int lowset_time_entry = -1; + int lowest_time = 0; + ib_sa_comp_mask delete_guid_indexes = 0; + ib_sa_comp_mask set_guid_indexes = 0; + struct mlx4_sriov_alias_guid_info_rec_det *rec = + &dev->sriov.alias_guid.ports_guid[port]. + all_rec_per_port[record_index]; + + for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { + if (!(rec->guid_indexes & + mlx4_ib_get_aguid_comp_mask_from_ix(i))) + continue; + + if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + delete_guid_indexes |= + mlx4_ib_get_aguid_comp_mask_from_ix(i); + else + set_guid_indexes |= + mlx4_ib_get_aguid_comp_mask_from_ix(i); + + if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= + lowest_time) { + lowset_time_entry = i; + lowest_time = rec->guids_retry_schedule[i]; + } + } + + memcpy(&next_rec->rec_det, rec, sizeof(*rec)); + next_rec->port = port; + next_rec->block_num = record_index; + + if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { + next_rec->rec_det.guid_indexes = delete_guid_indexes; + next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; + } else { + next_rec->rec_det.guid_indexes = set_guid_indexes; + next_rec->method = MLX4_GUID_INFO_RECORD_SET; + } +} + +/* return index of record that should be updated based on lowest + * rescheduled time + */ +static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, + int *resched_delay_sec) +{ + int record_index = -1; + u64 low_record_time = 0; + struct mlx4_sriov_alias_guid_info_rec_det rec; + int j; + + for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { + rec = dev->sriov.alias_guid.ports_guid[port]. + all_rec_per_port[j]; + if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && + rec.guid_indexes) { + if (record_index == -1 || + rec.time_to_run < low_record_time) { + record_index = j; + low_record_time = rec.time_to_run; + } + } + } + if (resched_delay_sec) { + u64 curr_time = ktime_get_real_ns(); + + *resched_delay_sec = (low_record_time < curr_time) ? 0 : + div_u64((low_record_time - curr_time), NSEC_PER_SEC); + } + + return record_index; +} + /* The function returns the next record that was * not configured (or failed to be configured) */ static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, struct mlx4_next_alias_guid_work *rec) { - int j; unsigned long flags; + int record_index; + int ret = 0; - for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { - spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); - if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == - MLX4_GUID_INFO_STATUS_IDLE) { - memcpy(&rec->rec_det, - &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], - sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); - rec->port = port; - rec->block_num = j; - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = - MLX4_GUID_INFO_STATUS_PENDING; - spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); - return 0; - } - spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + record_index = get_low_record_time_index(dev, port, NULL); + + if (record_index < 0) { + ret = -ENOENT; + goto out; } - return -ENOENT; + + set_required_record(dev, port, rec, record_index); +out: + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + return ret; } static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, @@ -497,8 +659,6 @@ static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, rec_det->guid_indexes; memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = - rec_det->status; } static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) @@ -545,9 +705,7 @@ static void alias_guid_work(struct work_struct *work) goto out; } - set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, - &rec->rec_det); - + set_guid_rec(&dev->ib_dev, rec); out: kfree(rec); } @@ -562,6 +720,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { + /* If there is pending one should cancell then run, otherwise + * won't run till previous one is ended as same work + * struct is used. + */ + cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. + alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); } diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f829fd935b790..1cfc2bb7acdff 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -342,7 +342,6 @@ struct mlx4_ib_ah { enum mlx4_guid_alias_rec_status { MLX4_GUID_INFO_STATUS_IDLE, MLX4_GUID_INFO_STATUS_SET, - MLX4_GUID_INFO_STATUS_PENDING, }; enum mlx4_guid_alias_rec_ownership { @@ -360,8 +359,9 @@ struct mlx4_sriov_alias_guid_info_rec_det { u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC]; ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/ enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/ - u8 method; /*set or delete*/ enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/ + unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC]; + u64 time_to_run; }; struct mlx4_sriov_alias_guid_port_rec_det { diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index d10c2b8a5dadb..7423d7e4a829c 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -80,6 +80,7 @@ static ssize_t store_admin_alias_guid(struct device *dev, struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; u64 sysadmin_ag_val; + unsigned long flags; record_num = mlx4_ib_iov_dentry->entry_num / 8; guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8; @@ -87,6 +88,7 @@ static ssize_t store_admin_alias_guid(struct device *dev, pr_err("GUID 0 block 0 is RO\n"); return count; } + spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags); sscanf(buf, "%llx", &sysadmin_ag_val); *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. all_rec_per_port[record_num]. @@ -96,14 +98,8 @@ static ssize_t store_admin_alias_guid(struct device *dev, /* Change the state to be pending for update */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE ; - - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method - = MLX4_GUID_INFO_RECORD_SET; - switch (sysadmin_ag_val) { case MLX4_GUID_FOR_DELETE_VAL: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method - = MLX4_GUID_INFO_RECORD_DELETE; mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership = MLX4_GUID_SYSADMIN_ASSIGN; break; @@ -121,8 +117,9 @@ static ssize_t store_admin_alias_guid(struct device *dev, /* set the record index */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes - = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); + |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); + spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_init_alias_guid_work(mdev, port->num - 1); return count; -- cgit 1.2.3-korg From 773af94e4e3984d4055c332602de5d0d2ee3d840 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 3 Mar 2015 10:54:48 +0200 Subject: net/mlx4_core: Manage alias GUID per VF Manages alias GUIDs per VF per port in the core layer. This is a pre-step for managing alias GUIDs in a mode that the admin GUID is returned via ib_query_gid() regardless of whether the SM has approved it or not. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx4/main.c | 16 ++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + include/linux/mlx4/device.h | 3 +++ 3 files changed, 20 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index acceb75e8c440..9162a6dd7823f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2260,6 +2260,22 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) } EXPORT_SYMBOL_GPL(mlx4_counter_free); +void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} +EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); + +__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->mfunc.master.vf_admin[entry].vport[port].guid; +} +EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); + static int mlx4_setup_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index f30eeb730a866..502d3dd2c8885 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -499,6 +499,7 @@ struct mlx4_vport_state { bool spoofchk; u32 link_state; u8 qos_vport; + __be64 guid; }; struct mlx4_vf_admin_state { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index f9ce34bec45b1..39a91b0c5d5c1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1345,6 +1345,9 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, + int port); +__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port); int mlx4_flow_attach(struct mlx4_dev *dev, struct mlx4_net_trans_rule *rule, u64 *reg_id); int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); -- cgit 1.2.3-korg From fb517a4f03041c5eaed394bd57ee518b44301f1a Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 3 Mar 2015 11:23:32 +0200 Subject: net/mlx4_core: Set initial admin GUIDs for VFs To have out of the box experience, the PF generates random GUIDs who serve as the initial admin values. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 1 + drivers/net/ethernet/mellanox/mlx4/main.c | 15 +++++++++++++++ include/linux/mlx4/device.h | 1 + 3 files changed, 17 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index f0fbb4ade85db..50ce67d4abfc0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2350,6 +2350,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT; vf_oper->vport[port].vlan_idx = NO_INDX; vf_oper->vport[port].mac_idx = NO_INDX; + mlx4_set_random_admin_guid(dev, i, port); } spin_lock_init(&s_state->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 9162a6dd7823f..ced5ecab5aa75 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2276,6 +2276,21 @@ __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) } EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); +void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 guid; + + /* hw GUID */ + if (entry == 0) + return; + + get_random_bytes((char *)&guid, sizeof(guid)); + guid &= ~(cpu_to_be64(1ULL << 56)); + guid |= cpu_to_be64(1ULL << 57); + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} + static int mlx4_setup_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 39a91b0c5d5c1..83e80ab945004 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1348,6 +1348,7 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port); __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port); +void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port); int mlx4_flow_attach(struct mlx4_dev *dev, struct mlx4_net_trans_rule *rule, u64 *reg_id); int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); -- cgit 1.2.3-korg From 2350f24774627d73fe2e3843172b69db91714cd2 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Sun, 1 Mar 2015 18:17:22 +0200 Subject: IB/mlx4: Manage admin alias GUID upon admin request Set the admin alias GUID per the administrator's request via the sysfs mechanism into the core layer. The "get" request returns the current value. However, if the administrator requests the SM to assign a new value by requesting 0, the SM assigned GUID is returned. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/alias_GUID.c | 6 ++++++ drivers/infiniband/hw/mlx4/sysfs.c | 18 +++++++++--------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index a968388b8176a..e4edd73f79b97 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -328,6 +328,12 @@ static void aliasguid_query_handler(int status, } else { *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = sm_response; + if (required_val == 0) + mlx4_set_admin_guid(dev->dev, + sm_response, + (guid_rec->block_num + * NUM_ALIAS_GUID_IN_REC) + i, + cb_ctx->port); goto next_entry; } } diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 7423d7e4a829c..bb1c34a4817e0 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -46,21 +46,17 @@ static ssize_t show_admin_alias_guid(struct device *dev, struct device_attribute *attr, char *buf) { - int record_num;/*0-15*/ - int guid_index_in_rec; /*0 - 7*/ struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; + __be64 sysadmin_ag_val; - record_num = mlx4_ib_iov_dentry->entry_num / 8 ; - guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ; + sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev, + mlx4_ib_iov_dentry->entry_num, + port->num); - return sprintf(buf, "%llx\n", - be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid. - ports_guid[port->num - 1]. - all_rec_per_port[record_num]. - all_recs[8 * guid_index_in_rec])); + return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val)); } /* store_admin_alias_guid stores the (new) administratively assigned value of that GUID. @@ -98,6 +94,10 @@ static ssize_t store_admin_alias_guid(struct device *dev, /* Change the state to be pending for update */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE ; + mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val), + mlx4_ib_iov_dentry->entry_num, + port->num); + switch (sysadmin_ag_val) { case MLX4_GUID_FOR_DELETE_VAL: mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership -- cgit 1.2.3-korg From f54796012837687532d0a87a0504de22da7c2503 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 3 Mar 2015 16:12:14 +0200 Subject: IB/mlx4: Change init flow to request alias GUIDs for active VFs Change the init flow to ask GUIDs only for active VFs. This is done for both SM & HOST modes so that there is no need any more to maintain the ownership record type. In case SM mode is used, the initial value will be 0, ask the SM to assign, for the HOST mode the initial value will be the HOST generated GUID. This will enable out of the box experience for both probed and attached VFs. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/alias_GUID.c | 104 +++++++++++++++----------------- drivers/infiniband/hw/mlx4/mlx4_ib.h | 8 +-- drivers/infiniband/hw/mlx4/sysfs.c | 17 ------ 3 files changed, 50 insertions(+), 79 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index e4edd73f79b97..5b4080740321a 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -298,19 +298,17 @@ static void aliasguid_query_handler(int status, */ if (sm_response == MLX4_NOT_SET_GUID) { if (rec->guids_retry_schedule[i] == 0) - mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " - "block_num: %d was declined by SM, " - "ownership by %d (0 = driver, 1=sysAdmin," - " 2=None)\n", __func__, i, - guid_rec->block_num, - rec->ownership); + mlx4_ib_warn(&dev->ib_dev, + "%s:Record num %d in block_num: %d was declined by SM\n", + __func__, i, + guid_rec->block_num); goto entry_declined; } else { /* properly assigned record. */ /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ - if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && + if (required_val && sm_response != required_val) { /* Warn only on first retry */ if (rec->guids_retry_schedule[i] == 0) @@ -416,9 +414,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) need to assign GUIDs, then don't put it up for assignment. */ if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || - (!index && !i) || - MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. - ports_guid[port - 1].all_rec_per_port[index].ownership) + (!index && !i)) continue; comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } @@ -526,6 +522,30 @@ out: return err; } +static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) +{ + int j, k, entry; + __be64 guid; + + /*Check if the SM doesn't need to assign the GUIDs*/ + for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { + for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { + entry = j * NUM_ALIAS_GUID_IN_REC + k; + /* no request for the 0 entry (hw guid) */ + if (!entry || entry > dev->dev->persist->num_vfs || + !mlx4_is_slave_active(dev->dev, entry)) + continue; + guid = mlx4_get_admin_guid(dev->dev, entry, port); + *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[j].all_recs + [GUID_REC_SIZE * k] = guid; + pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n", + entry, + be64_to_cpu(guid), + port); + } + } +} void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) { int i; @@ -535,6 +555,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); + + if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & + GUID_STATE_NEED_PORT_INIT) { + mlx4_ib_guid_port_init(dev, port); + dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= + (~GUID_STATE_NEED_PORT_INIT); + } for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) invalidate_guid_record(dev, port, i); @@ -657,33 +684,6 @@ out: return ret; } -static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, - int rec_index, - struct mlx4_sriov_alias_guid_info_rec_det *rec_det) -{ - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = - rec_det->guid_indexes; - memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, - rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); -} - -static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) -{ - int j; - struct mlx4_sriov_alias_guid_info_rec_det rec_det ; - - for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { - memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); - rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | - IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | - IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | - IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | - IB_SA_GUIDINFO_REC_GID7; - rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; - set_administratively_guid_record(dev, port, j, &rec_det); - } -} - static void alias_guid_work(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); @@ -779,7 +779,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { char alias_wq_name[15]; int ret = 0; - int i, j, k; + int i, j; union ib_gid gid; if (!mlx4_is_master(dev->dev)) @@ -803,33 +803,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); - /*Check if the SM doesn't need to assign the GUIDs*/ + dev->sriov.alias_guid.ports_guid[i].state_flags |= + GUID_STATE_NEED_PORT_INIT; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { - if (mlx4_ib_sm_guid_assign) { - dev->sriov.alias_guid.ports_guid[i]. - all_rec_per_port[j]. - ownership = MLX4_GUID_DRIVER_ASSIGN; - continue; - } - dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. - ownership = MLX4_GUID_NONE_ASSIGN; - /*mark each val as it was deleted, - till the sysAdmin will give it valid val*/ - for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { - *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. - all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = - cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); - } + /* mark each val as it was deleted */ + memset(dev->sriov.alias_guid.ports_guid[i]. + all_rec_per_port[j].all_recs, 0xFF, + sizeof(dev->sriov.alias_guid.ports_guid[i]. + all_rec_per_port[j].all_recs)); } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ + if (mlx4_ib_sm_guid_assign) + for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++) + mlx4_set_admin_guid(dev->dev, 0, j, i + 1); for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; - if (mlx4_ib_sm_guid_assign) - set_all_slaves_guids(dev, i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 1cfc2bb7acdff..532a8772b63fd 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -344,11 +344,7 @@ enum mlx4_guid_alias_rec_status { MLX4_GUID_INFO_STATUS_SET, }; -enum mlx4_guid_alias_rec_ownership { - MLX4_GUID_DRIVER_ASSIGN, - MLX4_GUID_SYSADMIN_ASSIGN, - MLX4_GUID_NONE_ASSIGN, /*init state of each record*/ -}; +#define GUID_STATE_NEED_PORT_INIT 0x01 enum mlx4_guid_alias_rec_method { MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET, @@ -359,7 +355,6 @@ struct mlx4_sriov_alias_guid_info_rec_det { u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC]; ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/ enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/ - enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/ unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC]; u64 time_to_run; }; @@ -369,6 +364,7 @@ struct mlx4_sriov_alias_guid_port_rec_det { struct workqueue_struct *wq; struct delayed_work alias_guid_work; u8 port; + u32 state_flags; struct mlx4_sriov_alias_guid *parent; struct list_head cb_list; }; diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index bb1c34a4817e0..6797108ce8735 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -98,23 +98,6 @@ static ssize_t store_admin_alias_guid(struct device *dev, mlx4_ib_iov_dentry->entry_num, port->num); - switch (sysadmin_ag_val) { - case MLX4_GUID_FOR_DELETE_VAL: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_SYSADMIN_ASSIGN; - break; - /* The sysadmin requests the SM to re-assign */ - case MLX4_NOT_SET_GUID: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_DRIVER_ASSIGN; - break; - /* The sysadmin requests a specific value.*/ - default: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_SYSADMIN_ASSIGN; - break; - } - /* set the record index */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); -- cgit 1.2.3-korg From ee59fa0d7e9af130bfc1b75524e04c101670bd5e Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Tue, 3 Mar 2015 17:28:49 +0200 Subject: IB/mlx4: Request alias GUID on demand Request GIDs from the SM on demand, i.e., when a VF actually needs them, and release them when the GIDs are no longer in use. In cloud environments, this is useful for GID migrations, in which a GID is assigned to a VF on the destination HCA, while the VF on the source HCA is shutdown (but the GID was not administratively released). Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/alias_GUID.c | 51 +++++++++++++++++++++++++++++++++ drivers/infiniband/hw/mlx4/main.c | 22 ++++++++++++++ drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 ++ 3 files changed, 75 insertions(+) diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 5b4080740321a..0f00204d2ecea 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -123,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) return IB_SA_COMP_MASK(4 + index); } +void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, + int port, int slave_init) +{ + __be64 curr_guid, required_guid; + int record_num = slave / 8; + int index = slave % 8; + int port_index = port - 1; + unsigned long flags; + int do_work = 0; + + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & + GUID_STATE_NEED_PORT_INIT) + goto unlock; + if (!slave_init) { + curr_guid = *(__be64 *)&dev->sriov. + alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + all_recs[GUID_REC_SIZE * index]; + if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) || + !curr_guid) + goto unlock; + required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); + } else { + required_guid = mlx4_get_admin_guid(dev->dev, slave, port); + if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + goto unlock; + } + *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + all_recs[GUID_REC_SIZE * index] = required_guid; + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].guid_indexes + |= mlx4_ib_get_aguid_comp_mask_from_ix(index); + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].status + = MLX4_GUID_INFO_STATUS_IDLE; + /* set to run immediately */ + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].time_to_run = 0; + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + guids_retry_schedule[index] = 0; + do_work = 1; +unlock: + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + + if (do_work) + mlx4_ib_init_alias_guid_work(dev, port_index); +} + /* * Whenever new GUID is set/unset (guid table change) create event and * notify the relevant slave (master also should be notified). diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 976bea794b5f7..e92dc9aa075e6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2791,9 +2791,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ do_slave_init(ibdev, p, 1); + if (mlx4_is_master(dev)) { + int i; + + for (i = 1; i <= ibdev->num_ports; i++) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) + == IB_LINK_LAYER_INFINIBAND) + mlx4_ib_slave_alias_guid_event(ibdev, + p, i, + 1); + } + } return; case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + if (mlx4_is_master(dev)) { + int i; + + for (i = 1; i <= ibdev->num_ports; i++) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) + == IB_LINK_LAYER_INFINIBAND) + mlx4_ib_slave_alias_guid_event(ibdev, + p, i, + 0); + } + } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); return; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 532a8772b63fd..fce3934372a16 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -798,6 +798,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, struct attribute *attr); ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index); +void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, + int port, int slave_init); int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ; -- cgit 1.2.3-korg From a0667a83aff20c7659960d7e7cd6a5d187c7df20 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 18 Mar 2015 14:56:04 +0200 Subject: net/mlx4_core: Raise slave shutdown event upon FLR There might be cases that PF doesn't get a "reset" command upon slave down (e.g. virsh destroy). In these cases, however, an FLR event is issued. Therefore, when the PF receives an FLR event for a slave, it should also generate a shutdown event on the PF for that slave, to let the PF upper layers (mlx4_ib, eth) perform any required cleanup/actions associated with slave shutdown. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx4/eq.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 190fd624bdfeb..2619c9fbf42df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -702,6 +702,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; } spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, + flr_slave); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; -- cgit 1.2.3-korg From e9a7ff3a71154ec03b6dc617d97d7c710b7bb98c Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Mon, 2 Feb 2015 15:07:23 +0200 Subject: net/mlx4_core: Return the admin alias GUID upon host view request Return the admin alias GUID value upon a GET request via HOST. We do this so that the GUID value requested by the admin is returned even if the SM has not yet approved this GUID (e.g. the SM is down). Note that this does not create a problem, since the virtual port will remain down until the SM does ACK the requested GUID value. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 41 +++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 50ce67d4abfc0..4f7dc044601e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -939,21 +939,34 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, return err; } if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { - /* compute slave's gid block */ - smp->attr_mod = cpu_to_be32(slave / 8); - /* execute cmd */ - err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, - vhcr->in_modifier, opcode_modifier, - vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); - if (!err) { - /* if needed, move slave gid to index 0 */ - if (slave % 8) - memcpy(outsmp->data, - outsmp->data + (slave % 8) * 8, 8); - /* delete all other gids */ - memset(outsmp->data + 8, 0, 56); + __be64 guid = mlx4_get_admin_guid(dev, slave, + port); + + /* set the PF admin guid to the FW/HW burned + * GUID, if it wasn't yet set + */ + if (slave == 0 && guid == 0) { + smp->attr_mod = 0; + err = mlx4_cmd_box(dev, + inbox->dma, + outbox->dma, + vhcr->in_modifier, + opcode_modifier, + vhcr->op, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) + return err; + mlx4_set_admin_guid(dev, + *(__be64 *)outsmp-> + data, slave, port); + } else { + memcpy(outsmp->data, &guid, 8); } - return err; + + /* clean all other gids */ + memset(outsmp->data + 8, 0, 56); + return 0; } if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, -- cgit 1.2.3-korg From 56c1d2335b355205d6fabb7663d383e73ea0de47 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 12 Feb 2015 09:49:43 +0200 Subject: IB/mlx4: Change alias guids default to be host assigned Change the default mode to be HOST assigned instead of SM assigned. This is the expected operational mode, because it doesn't depend on SM availability. As PF generates random GUIDs as the initial admin values, this gives out of the box experience. Signed-off-by: Yishai Hadas Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e92dc9aa075e6..57070c529dfb5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -66,9 +66,9 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -int mlx4_ib_sm_guid_assign = 1; +int mlx4_ib_sm_guid_assign = 0; module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); -MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)"); +MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)"); static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" -- cgit 1.2.3-korg From e135106fac9525352feb8e49077c8f46c3eaf288 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:26:59 -0500 Subject: IB/ipoib: factor out ah flushing Create a an ipoib_flush_ah and ipoib_stop_ah routines to use at appropriate times to flush out all remaining ah entries before we shut the device down. Because neighbors and mcast entries can each have a reference on any given ah, we must make sure to free all of those first before our ah will actually have a 0 refcount and be able to be reaped. This factoring is needed in preparation for having per-device work queues. The original per-device workqueue code resulted in the following error message: : ib_dealloc_pd failed That error was tracked down to this issue. With the changes to which workqueues were flushed when, there were no flushes of the per device workqueue after the last ah's were freed, resulting in an attempt to dealloc the pd with outstanding resources still allocated. This code puts the explicit flushes in the needed places to avoid that problem. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 46 ++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 72626c3481749..cb02466a0eb95 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -659,6 +659,24 @@ void ipoib_reap_ah(struct work_struct *work) round_jiffies_relative(HZ)); } +static void ipoib_flush_ah(struct net_device *dev, int flush) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + + cancel_delayed_work(&priv->ah_reap_task); + if (flush) + flush_workqueue(ipoib_workqueue); + ipoib_reap_ah(&priv->ah_reap_task.work); +} + +static void ipoib_stop_ah(struct net_device *dev, int flush) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + + set_bit(IPOIB_STOP_REAPER, &priv->flags); + ipoib_flush_ah(dev, flush); +} + static void ipoib_ib_tx_timer_func(unsigned long ctx) { drain_tx_cq((struct net_device *)ctx); @@ -877,24 +895,7 @@ timeout: if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); - /* Wait for all AHs to be reaped */ - set_bit(IPOIB_STOP_REAPER, &priv->flags); - cancel_delayed_work(&priv->ah_reap_task); - if (flush) - flush_workqueue(ipoib_workqueue); - - begin = jiffies; - - while (!list_empty(&priv->dead_ahs)) { - __ipoib_reap_ah(dev); - - if (time_after(jiffies, begin + HZ)) { - ipoib_warn(priv, "timing out; will leak address handles\n"); - break; - } - - msleep(1); - } + ipoib_flush_ah(dev, flush); ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); @@ -1037,6 +1038,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, if (level == IPOIB_FLUSH_LIGHT) { ipoib_mark_paths_invalid(dev); ipoib_mcast_dev_flush(dev); + ipoib_flush_ah(dev, 0); } if (level >= IPOIB_FLUSH_NORMAL) @@ -1100,6 +1102,14 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); + /* + * All of our ah references aren't free until after + * ipoib_mcast_dev_flush(), ipoib_flush_paths, and + * the neighbor garbage collection is stopped and reaped. + * That should all be done now, so make a final ah flush. + */ + ipoib_stop_ah(dev, 1); + ipoib_transport_dev_cleanup(dev); } -- cgit 1.2.3-korg From be7aa663fc1d9156798f5af3c60e6df45e1fe5de Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:00 -0500 Subject: IB/ipoib: change init sequence ordering In preparation for using per device work queues, we need to move the start of the neighbor thread task to after ipoib_ib_dev_init and move the destruction of the neighbor task to before ipoib_ib_dev_cleanup. Otherwise we will end up freeing our workqueue with work possibly still on it. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 657b89b1d291c..98c738d827d1d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1269,15 +1269,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); - if (ipoib_neigh_hash_init(priv) < 0) - goto out; /* Allocate RX/TX "rings" to hold queued skbs */ priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, GFP_KERNEL); if (!priv->rx_ring) { printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", ca->name, ipoib_recvq_size); - goto out_neigh_hash_cleanup; + goto out; } priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); @@ -1292,16 +1290,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) if (ipoib_ib_dev_init(dev, ca, port)) goto out_tx_ring_cleanup; + /* + * Must be after ipoib_ib_dev_init so we can allocate a per + * device wq there and use it here + */ + if (ipoib_neigh_hash_init(priv) < 0) + goto out_dev_uninit; + return 0; +out_dev_uninit: + ipoib_ib_dev_cleanup(dev); + out_tx_ring_cleanup: vfree(priv->tx_ring); out_rx_ring_cleanup: kfree(priv->rx_ring); -out_neigh_hash_cleanup: - ipoib_neigh_hash_uninit(dev); out: return -ENOMEM; } @@ -1324,6 +1330,12 @@ void ipoib_dev_cleanup(struct net_device *dev) } unregister_netdevice_many(&head); + /* + * Must be before ipoib_ib_dev_cleanup or we delete an in use + * work queue + */ + ipoib_neigh_hash_uninit(dev); + ipoib_ib_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1331,8 +1343,6 @@ void ipoib_dev_cleanup(struct net_device *dev) priv->rx_ring = NULL; priv->tx_ring = NULL; - - ipoib_neigh_hash_uninit(dev); } static const struct header_ops ipoib_header_ops = { -- cgit 1.2.3-korg From c84ca6d2b1a1bfbdab2dd2bc243840e9589a5aaf Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:01 -0500 Subject: IB/ipoib: Consolidate rtnl_lock tasks in workqueue The ipoib_mcast_flush_dev routine is called with the rtnl_lock held and needs to keep it held. It also needs to call flush_workqueue() to flush out any outstanding work. In the past, we've had to try and make sure that we didn't flush out any outstanding join completions because they also wanted to grab rtnl_lock() and that would deadlock. It turns out that the only thing in the join completion handler that needs this lock can be safely moved to our carrier_on_task, thereby reducing the potential for the join completion code and the flush code to deadlock against each other. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ffb83b5f7e805..eee66d13e5b9a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -190,12 +190,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; set_qkey = 1; - - if (!ipoib_cm_admin_enabled(dev)) { - rtnl_lock(); - dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); - rtnl_unlock(); - } } if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { @@ -371,6 +365,8 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) } rtnl_lock(); + if (!ipoib_cm_admin_enabled(priv->dev)) + dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); netif_carrier_on(priv->dev); rtnl_unlock(); } -- cgit 1.2.3-korg From 894021a752912ef4c2b63c8d2c466c00bc3cd2e6 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:02 -0500 Subject: IB/ipoib: Make the carrier_on_task race aware We blindly assume that we can just take the rtnl lock and that will prevent races with downing this interface. Unfortunately, that's not the case. In ipoib_mcast_stop_thread() we will call flush_workqueue() in an attempt to clear out all remaining instances of ipoib_join_task. But, since this task is put on the same workqueue as the join task, the flush_workqueue waits on this thread too. But this thread is deadlocked on the rtnl lock. The better thing here is to use trylock and loop on that until we either get the lock or we see that FLAG_OPER_UP has been cleared, in which case we don't need to do anything anyway and we just return. While investigating which flag should be used, FLAG_ADMIN_UP or FLAG_OPER_UP, it was determined that FLAG_OPER_UP was the more appropriate flag to use. However, there was a mix of these two flags in use in the existing code. So while we check for that flag here as part of this race fix, also cleanup the two places that had used the less appropriate flag for their tests. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index eee66d13e5b9a..c63a598d0b41d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -353,18 +353,27 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) carrier_on_task); struct ib_port_attr attr; - /* - * Take rtnl_lock to avoid racing with ipoib_stop() and - * turning the carrier back on while a device is being - * removed. - */ if (ib_query_port(priv->ca, priv->port, &attr) || attr.state != IB_PORT_ACTIVE) { ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); return; } - rtnl_lock(); + /* + * Take rtnl_lock to avoid racing with ipoib_stop() and + * turning the carrier back on while a device is being + * removed. However, ipoib_stop() will attempt to flush + * the workqueue while holding the rtnl lock, so loop + * on trylock until either we get the lock or we see + * FLAG_OPER_UP go away as that signals that we are bailing + * and can safely ignore the carrier on work. + */ + while (!rtnl_trylock()) { + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + return; + else + msleep(20); + } if (!ipoib_cm_admin_enabled(priv->dev)) dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); netif_carrier_on(priv->dev); @@ -535,7 +544,7 @@ void ipoib_mcast_join_task(struct work_struct *work) if (!priv->broadcast) { struct ipoib_mcast *broadcast; - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return; broadcast = ipoib_mcast_alloc(dev, 1); @@ -882,7 +891,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_mcast_free(mcast); } - if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) ipoib_mcast_start_thread(dev); } -- cgit 1.2.3-korg From 0b39578bcde4298a392fb2df16235c316d932127 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:03 -0500 Subject: IB/ipoib: Use dedicated workqueues per interface During my recent work on the rtnl lock deadlock in the IPoIB driver, I saw that even once I fixed the apparent races for a single device, as soon as that device had any children, new races popped up. It turns out that this is because no matter how well we protect against races on a single device, the fact that all devices use the same workqueue, and flush_workqueue() flushes *everything* from that workqueue means that we would also have to prevent all races between different devices (for instance, ipoib_mcast_restart_task on interface ib0 can race with ipoib_mcast_flush_dev on interface ib0.8002, resulting in a deadlock on the rtnl_lock). There are several possible solutions to this problem: Make carrier_on_task and mcast_restart_task try to take the rtnl for some set period of time and if they fail, then bail. This runs the real risk of dropping work on the floor, which can end up being its own separate kind of deadlock. Set some global flag in the driver that says some device is in the middle of going down, letting all tasks know to bail. Again, this can drop work on the floor. Or the method this patch attempts to use, which is when we bring an interface up, create a workqueue specifically for that interface, so that when we take it back down, we are flushing only those tasks associated with our interface. In addition, keep the global workqueue, but now limit it to only flush tasks. In this way, the flush tasks can always flush the device specific work queues without having deadlock issues. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 1 + drivers/infiniband/ulp/ipoib/ipoib_cm.c | 18 +++++++-------- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 6 ++--- drivers/infiniband/ulp/ipoib/ipoib_main.c | 28 +++++++++++++---------- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 20 ++++++++--------- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 31 +++++++++++++++++++++++--- 6 files changed, 66 insertions(+), 38 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index d7562beb54236..e940cd9f84719 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -317,6 +317,7 @@ struct ipoib_dev_priv { struct list_head multicast_list; struct rb_root multicast_tree; + struct workqueue_struct *wq; struct delayed_work mcast_task; struct work_struct carrier_on_task; struct work_struct flush_light; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 933efcea0d03f..56959adb6c7da 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even } spin_lock_irq(&priv->lock); - queue_delayed_work(ipoib_workqueue, + queue_delayed_work(priv->wq, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); /* Add this entry to passive ids list head, but do not re-add it * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ @@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_lock_irqsave(&priv->lock, flags); list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); ipoib_cm_start_rx_drain(priv); - queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); + queue_work(priv->wq, &priv->cm.rx_reap_task); spin_unlock_irqrestore(&priv->lock, flags); } else ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", @@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) spin_lock_irqsave(&priv->lock, flags); list_move(&p->list, &priv->cm.rx_reap_list); spin_unlock_irqrestore(&priv->lock, flags); - queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); + queue_work(priv->wq, &priv->cm.rx_reap_task); } return; } @@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); - queue_work(ipoib_workqueue, &priv->cm.reap_task); + queue_work(priv->wq, &priv->cm.reap_task); } clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); @@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); - queue_work(ipoib_workqueue, &priv->cm.reap_task); + queue_work(priv->wq, &priv->cm.reap_task); } spin_unlock_irqrestore(&priv->lock, flags); @@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); - queue_work(ipoib_workqueue, &priv->cm.start_task); + queue_work(priv->wq, &priv->cm.start_task); return tx; } @@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { spin_lock_irqsave(&priv->lock, flags); list_move(&tx->list, &priv->cm.reap_list); - queue_work(ipoib_workqueue, &priv->cm.reap_task); + queue_work(priv->wq, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->daddr + 4); tx->neigh = NULL; @@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, skb_queue_tail(&priv->cm.skb_queue, skb); if (e) - queue_work(ipoib_workqueue, &priv->cm.skb_task); + queue_work(priv->wq, &priv->cm.skb_task); } static void ipoib_cm_rx_reap(struct work_struct *work) @@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work) } if (!list_empty(&priv->cm.passive_ids)) - queue_delayed_work(ipoib_workqueue, + queue_delayed_work(priv->wq, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); spin_unlock_irq(&priv->lock); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index cb02466a0eb95..2a56b7a11a92c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work) __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) - queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, + queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } @@ -665,7 +665,7 @@ static void ipoib_flush_ah(struct net_device *dev, int flush) cancel_delayed_work(&priv->ah_reap_task); if (flush) - flush_workqueue(ipoib_workqueue); + flush_workqueue(priv->wq); ipoib_reap_ah(&priv->ah_reap_task.work); } @@ -714,7 +714,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush) } clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, + queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 98c738d827d1d..4be80bec93ca1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) return; } - queue_work(ipoib_workqueue, &priv->restart_task); + queue_work(priv->wq, &priv->restart_task); } static int ipoib_get_iflink(const struct net_device *dev) @@ -961,7 +961,7 @@ static void ipoib_reap_neigh(struct work_struct *work) __ipoib_reap_neigh(priv); if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) - queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, + queue_delayed_work(priv->wq, &priv->neigh_reap_task, arp_tbl.gc_interval); } @@ -1140,7 +1140,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) /* start garbage collection */ clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); - queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, + queue_delayed_work(priv->wq, &priv->neigh_reap_task, arp_tbl.gc_interval); return 0; @@ -1651,10 +1651,11 @@ sysfs_failed: register_failed: ib_unregister_event_handler(&priv->event_handler); + flush_workqueue(ipoib_workqueue); /* Stop GC if started before flush */ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); - flush_workqueue(ipoib_workqueue); + flush_workqueue(priv->wq); event_failed: ipoib_dev_cleanup(priv->dev); @@ -1717,6 +1718,7 @@ static void ipoib_remove_one(struct ib_device *device) list_for_each_entry_safe(priv, tmp, dev_list, list) { ib_unregister_event_handler(&priv->event_handler); + flush_workqueue(ipoib_workqueue); rtnl_lock(); dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); @@ -1725,7 +1727,7 @@ static void ipoib_remove_one(struct ib_device *device) /* Stop GC */ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); - flush_workqueue(ipoib_workqueue); + flush_workqueue(priv->wq); unregister_netdev(priv->dev); free_netdev(priv->dev); @@ -1760,14 +1762,16 @@ static int __init ipoib_init_module(void) return ret; /* - * We create our own workqueue mainly because we want to be - * able to flush it when devices are being removed. We can't - * use schedule_work()/flush_scheduled_work() because both - * unregister_netdev() and linkwatch_event take the rtnl lock, - * so flush_scheduled_work() can deadlock during device - * removal. + * We create a global workqueue here that is used for all flush + * operations. However, if you attempt to flush a workqueue + * from a task on that same workqueue, it deadlocks the system. + * We want to be able to flush the tasks associated with a + * specific net device, so we also create a workqueue for each + * netdevice. We queue up the tasks for that device only on + * its private workqueue, and we only queue up flush events + * on our global flush workqueue. This avoids the deadlocks. */ - ipoib_workqueue = create_singlethread_workqueue("ipoib"); + ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); if (!ipoib_workqueue) { ret = -ENOMEM; goto err_fs; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index c63a598d0b41d..9d3c1ed576ea7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -403,16 +403,15 @@ static int ipoib_mcast_join_complete(int status, mcast->backoff = 1; mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, - &priv->mcast_task, 0); + queue_delayed_work(priv->wq, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); /* - * Defer carrier on work to ipoib_workqueue to avoid a + * Defer carrier on work to priv->wq to avoid a * deadlock on rtnl_lock here. */ if (mcast == priv->broadcast) - queue_work(ipoib_workqueue, &priv->carrier_on_task); + queue_work(priv->wq, &priv->carrier_on_task); status = 0; goto out; @@ -438,7 +437,7 @@ static int ipoib_mcast_join_complete(int status, mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, &priv->mcast_task, + queue_delayed_work(priv->wq, &priv->mcast_task, mcast->backoff * HZ); spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); @@ -511,8 +510,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, - &priv->mcast_task, + queue_delayed_work(priv->wq, &priv->mcast_task, mcast->backoff * HZ); mutex_unlock(&mcast_mutex); } @@ -552,8 +550,8 @@ void ipoib_mcast_join_task(struct work_struct *work) ipoib_warn(priv, "failed to allocate broadcast group\n"); mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, - &priv->mcast_task, HZ); + queue_delayed_work(priv->wq, &priv->mcast_task, + HZ); mutex_unlock(&mcast_mutex); return; } @@ -609,7 +607,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) mutex_lock(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); + queue_delayed_work(priv->wq, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); return 0; @@ -627,7 +625,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush) mutex_unlock(&mcast_mutex); if (flush) - flush_workqueue(ipoib_workqueue); + flush_workqueue(priv->wq); return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index c56d5d44c53b3..34628403fd83b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -157,6 +157,16 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) goto out_free_pd; } + /* + * the various IPoIB tasks assume they will never race against + * themselves, so always use a single thread workqueue + */ + priv->wq = create_singlethread_workqueue("ipoib_wq"); + if (!priv->wq) { + printk(KERN_WARNING "ipoib: failed to allocate device WQ\n"); + goto out_free_mr; + } + size = ipoib_recvq_size + 1; ret = ipoib_cm_dev_init(dev); if (!ret) { @@ -165,12 +175,13 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */ else size += ipoib_recvq_size * ipoib_max_conn_qp; - } + } else + goto out_free_wq; priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); if (IS_ERR(priv->recv_cq)) { printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name); - goto out_free_mr; + goto out_cm_dev_cleanup; } priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, @@ -236,12 +247,19 @@ out_free_send_cq: out_free_recv_cq: ib_destroy_cq(priv->recv_cq); +out_cm_dev_cleanup: + ipoib_cm_dev_cleanup(dev); + +out_free_wq: + destroy_workqueue(priv->wq); + priv->wq = NULL; + out_free_mr: ib_dereg_mr(priv->mr); - ipoib_cm_dev_cleanup(dev); out_free_pd: ib_dealloc_pd(priv->pd); + return -ENODEV; } @@ -265,11 +283,18 @@ void ipoib_transport_dev_cleanup(struct net_device *dev) ipoib_cm_dev_cleanup(dev); + if (priv->wq) { + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + priv->wq = NULL; + } + if (ib_dereg_mr(priv->mr)) ipoib_warn(priv, "ib_dereg_mr failed\n"); if (ib_dealloc_pd(priv->pd)) ipoib_warn(priv, "ib_dealloc_pd failed\n"); + } void ipoib_event(struct ib_event_handler *handler, -- cgit 1.2.3-korg From efc82eeeae4ece716091d8540079b7f276ca1ad5 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:04 -0500 Subject: IB/ipoib: No longer use flush as a parameter Various places in the IPoIB code had a deadlock related to flushing the ipoib workqueue. Now that we have per device workqueues and a specific flush workqueue, there is no longer a deadlock issue with flushing the device specific workqueues and we can do so unilaterally. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 8 +++--- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 35 +++++++++++++------------- drivers/infiniband/ulp/ipoib/ipoib_main.c | 8 +++--- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 18 ++++++++++--- 4 files changed, 39 insertions(+), 30 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index e940cd9f84719..9ef432ae72e88 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -478,10 +478,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); void ipoib_pkey_event(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); -int ipoib_ib_dev_open(struct net_device *dev, int flush); +int ipoib_ib_dev_open(struct net_device *dev); int ipoib_ib_dev_up(struct net_device *dev); -int ipoib_ib_dev_down(struct net_device *dev, int flush); -int ipoib_ib_dev_stop(struct net_device *dev, int flush); +int ipoib_ib_dev_down(struct net_device *dev); +int ipoib_ib_dev_stop(struct net_device *dev); void ipoib_pkey_dev_check_presence(struct net_device *dev); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); @@ -493,7 +493,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); -int ipoib_mcast_stop_thread(struct net_device *dev, int flush); +int ipoib_mcast_stop_thread(struct net_device *dev); void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2a56b7a11a92c..e144d07d53ccd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -659,22 +659,21 @@ void ipoib_reap_ah(struct work_struct *work) round_jiffies_relative(HZ)); } -static void ipoib_flush_ah(struct net_device *dev, int flush) +static void ipoib_flush_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); cancel_delayed_work(&priv->ah_reap_task); - if (flush) - flush_workqueue(priv->wq); + flush_workqueue(priv->wq); ipoib_reap_ah(&priv->ah_reap_task.work); } -static void ipoib_stop_ah(struct net_device *dev, int flush) +static void ipoib_stop_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); set_bit(IPOIB_STOP_REAPER, &priv->flags); - ipoib_flush_ah(dev, flush); + ipoib_flush_ah(dev); } static void ipoib_ib_tx_timer_func(unsigned long ctx) @@ -682,7 +681,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) drain_tx_cq((struct net_device *)ctx); } -int ipoib_ib_dev_open(struct net_device *dev, int flush) +int ipoib_ib_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; @@ -724,7 +723,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush) dev_stop: if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); - ipoib_ib_dev_stop(dev, flush); + ipoib_ib_dev_stop(dev); return -1; } @@ -756,7 +755,7 @@ int ipoib_ib_dev_up(struct net_device *dev) return ipoib_mcast_start_thread(dev); } -int ipoib_ib_dev_down(struct net_device *dev, int flush) +int ipoib_ib_dev_down(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -765,7 +764,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush) clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); - ipoib_mcast_stop_thread(dev, flush); + ipoib_mcast_stop_thread(dev); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); @@ -825,7 +824,7 @@ void ipoib_drain_cq(struct net_device *dev) local_bh_enable(); } -int ipoib_ib_dev_stop(struct net_device *dev, int flush) +int ipoib_ib_dev_stop(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; @@ -895,7 +894,7 @@ timeout: if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); - ipoib_flush_ah(dev, flush); + ipoib_flush_ah(dev); ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); @@ -919,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) (unsigned long) dev); if (dev->flags & IFF_UP) { - if (ipoib_ib_dev_open(dev, 1)) { + if (ipoib_ib_dev_open(dev)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } @@ -1038,16 +1037,16 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, if (level == IPOIB_FLUSH_LIGHT) { ipoib_mark_paths_invalid(dev); ipoib_mcast_dev_flush(dev); - ipoib_flush_ah(dev, 0); + ipoib_flush_ah(dev); } if (level >= IPOIB_FLUSH_NORMAL) - ipoib_ib_dev_down(dev, 0); + ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) - ipoib_ib_dev_stop(dev, 0); - if (ipoib_ib_dev_open(dev, 0) != 0) + ipoib_ib_dev_stop(dev); + if (ipoib_ib_dev_open(dev) != 0) return; if (netif_queue_stopped(dev)) netif_start_queue(dev); @@ -1099,7 +1098,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) */ ipoib_flush_paths(dev); - ipoib_mcast_stop_thread(dev, 1); + ipoib_mcast_stop_thread(dev); ipoib_mcast_dev_flush(dev); /* @@ -1108,7 +1107,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ - ipoib_stop_ah(dev, 1); + ipoib_stop_ah(dev); ipoib_transport_dev_cleanup(dev); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4be80bec93ca1..176b3dfb49c3d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev) set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); - if (ipoib_ib_dev_open(dev, 1)) { + if (ipoib_ib_dev_open(dev)) { if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) return 0; goto err_disable; @@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev) return 0; err_stop: - ipoib_ib_dev_stop(dev, 1); + ipoib_ib_dev_stop(dev); err_disable: clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); @@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev) netif_stop_queue(dev); - ipoib_ib_dev_down(dev, 1); - ipoib_ib_dev_stop(dev, 0); + ipoib_ib_dev_down(dev); + ipoib_ib_dev_stop(dev); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { struct ipoib_dev_priv *cpriv; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 9d3c1ed576ea7..bb1b69904f966 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -613,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) return 0; } -int ipoib_mcast_stop_thread(struct net_device *dev, int flush) +int ipoib_mcast_stop_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -624,8 +624,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush) cancel_delayed_work(&priv->mcast_task); mutex_unlock(&mcast_mutex); - if (flush) - flush_workqueue(priv->wq); + flush_workqueue(priv->wq); return 0; } @@ -797,7 +796,18 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_dbg_mcast(priv, "restarting multicast task\n"); - ipoib_mcast_stop_thread(dev, 0); + /* + * We're running on the priv->wq right now, so we can't call + * mcast_stop_thread as it wants to flush the wq and that + * will deadlock. We don't actually *need* to stop the + * thread here anyway, so just clear the run flag, cancel + * any delayed work, do our work, remove the old entries, + * then restart the thread. + */ + mutex_lock(&mcast_mutex); + clear_bit(IPOIB_MCAST_RUN, &priv->flags); + cancel_delayed_work(&priv->mcast_task); + mutex_unlock(&mcast_mutex); local_irq_save(flags); netif_addr_lock(dev); -- cgit 1.2.3-korg From 69911416d87d6673c48d23a9fbc060e85f41fc73 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:05 -0500 Subject: IB/ipoib: fix MCAST_FLAG_BUSY usage Commit a9c8ba5884 ("IPoIB: Fix usage of uninitialized multicast objects") added a new flag MCAST_JOIN_STARTED, but was not very strict in how it was used. We didn't always initialize the completion struct before we set the flag, and we didn't always call complete on the completion struct from all paths that complete it. And when we did complete it, sometimes we continued to touch the mcast entry after the completion, opening us up to possible use after free issues. This made it less than totally effective, and certainly made its use confusing. And in the flush function we would use the presence of this flag to signal that we should wait on the completion struct, but we never cleared this flag, ever. In order to make things clearer and aid in resolving the rtnl deadlock bug I've been chasing, I cleaned this up a bit. 1) Remove the MCAST_JOIN_STARTED flag entirely 2) Change MCAST_FLAG_BUSY so it now only means a join is in-flight 3) Test mcast->mc directly to see if we have completed ib_sa_join_multicast (using IS_ERR_OR_NULL) 4) Make sure that before setting MCAST_FLAG_BUSY we always initialize the mcast->done completion struct 5) Make sure that before calling complete(&mcast->done), we always clear the MCAST_FLAG_BUSY bit 6) Take the mcast_mutex before we call ib_sa_multicast_join and also take the mutex in our join callback. This forces ib_sa_multicast_join to return and set mcast->mc before we process the callback. This way, our callback can safely clear mcast->mc if there is an error on the join and we will do the right thing as a result in mcast_dev_flush. 7) Because we need the mutex to synchronize mcast->mc, we can no longer call mcast_sendonly_join directly from mcast_send and instead must add sendonly join processing to the mcast_join_task 8) Make MCAST_RUN mean that we have a working mcast subsystem, not that we have a running task. We know when we need to reschedule our join task thread and don't need a flag to tell us. 9) Add a helper for rescheduling the join task thread A number of different races are resolved with these changes. These races existed with the old MCAST_FLAG_BUSY usage, the MCAST_JOIN_STARTED flag was an attempt to address them, and while it helped, a determined effort could still trip things up. One race looks something like this: Thread 1 Thread 2 ib_sa_join_multicast (as part of running restart mcast task) alloc member call callback ifconfig ib0 down wait_for_completion callback call completes wait_for_completion in mcast_dev_flush completes mcast->mc is PTR_ERR_OR_NULL so we skip ib_sa_leave_multicast return from callback return from ib_sa_join_multicast set mcast->mc = return from ib_sa_multicast We now have a permanently unbalanced join/leave issue that trips up the refcounting in core/multicast.c Another like this: Thread 1 Thread 2 Thread 3 ib_sa_multicast_join ifconfig ib0 down priv->broadcast = NULL join_complete wait_for_completion mcast->mc is not yet set, so don't clear return from ib_sa_join_multicast and set mcast->mc complete return -EAGAIN (making mcast->mc invalid) call ib_sa_multicast_leave on invalid mcast->mc, hang forever By holding the mutex around ib_sa_multicast_join and taking the mutex early in the callback, we force mcast->mc to be valid at the time we run the callback. This allows us to clear mcast->mc if there is an error and the join is going to fail. We do this before we complete the mcast. In this way, mcast_dev_flush always sees consistent state in regards to mcast->mc membership at the time that the wait_for_completion() returns. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 11 +- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 355 ++++++++++++++++--------- 2 files changed, 238 insertions(+), 128 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 9ef432ae72e88..c79dcd5ee8ad0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -98,9 +98,15 @@ enum { IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ IPOIB_MCAST_FLAG_SENDONLY = 1, - IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ + /* + * For IPOIB_MCAST_FLAG_BUSY + * When set, in flight join and mcast->mc is unreliable + * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or + * haven't started yet + * When clear and mcast->mc is valid pointer, join was successful + */ + IPOIB_MCAST_FLAG_BUSY = 2, IPOIB_MCAST_FLAG_ATTACHED = 3, - IPOIB_MCAST_JOIN_STARTED = 4, MAX_SEND_CQE = 16, IPOIB_CM_COPYBREAK = 256, @@ -148,6 +154,7 @@ struct ipoib_mcast { unsigned long created; unsigned long backoff; + unsigned long delay_until; unsigned long flags; unsigned char logcount; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index bb1b69904f966..277e7ac7c4dbf 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -66,6 +66,48 @@ struct ipoib_mcast_iter { unsigned int send_only; }; +/* + * This should be called with the mcast_mutex held + */ +static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, + struct ipoib_mcast *mcast, + bool delay) +{ + if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) + return; + + /* + * We will be scheduling *something*, so cancel whatever is + * currently scheduled first + */ + cancel_delayed_work(&priv->mcast_task); + if (mcast && delay) { + /* + * We had a failure and want to schedule a retry later + */ + mcast->backoff *= 2; + if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) + mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; + mcast->delay_until = jiffies + (mcast->backoff * HZ); + /* + * Mark this mcast for its delay, but restart the + * task immediately. The join task will make sure to + * clear out all entries without delays, and then + * schedule itself to run again when the earliest + * delay expires + */ + queue_delayed_work(priv->wq, &priv->mcast_task, 0); + } else if (delay) { + /* + * Special case of retrying after a failure to + * allocate the broadcast multicast group, wait + * 1 second and try again + */ + queue_delayed_work(priv->wq, &priv->mcast_task, HZ); + } else + queue_delayed_work(priv->wq, &priv->mcast_task, 0); +} + static void ipoib_mcast_free(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; @@ -103,6 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, mcast->dev = dev; mcast->created = jiffies; + mcast->delay_until = jiffies; mcast->backoff = 1; INIT_LIST_HEAD(&mcast->list); @@ -270,17 +313,31 @@ ipoib_mcast_sendonly_join_complete(int status, { struct ipoib_mcast *mcast = multicast->context; struct net_device *dev = mcast->dev; + struct ipoib_dev_priv *priv = netdev_priv(dev); + + /* + * We have to take the mutex to force mcast_sendonly_join to + * return from ib_sa_multicast_join and set mcast->mc to a + * valid value. Otherwise we were racing with ourselves in + * that we might fail here, but get a valid return from + * ib_sa_multicast_join after we had cleared mcast->mc here, + * resulting in mis-matched joins and leaves and a deadlock + */ + mutex_lock(&mcast_mutex); /* We trap for port events ourselves. */ - if (status == -ENETRESET) - return 0; + if (status == -ENETRESET) { + status = 0; + goto out; + } if (!status) status = ipoib_mcast_join_finish(mcast, &multicast->rec); if (status) { if (mcast->logcount++ < 20) - ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", + ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " + "join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); /* Flush out any queued packets */ @@ -290,11 +347,18 @@ ipoib_mcast_sendonly_join_complete(int status, dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } netif_tx_unlock_bh(dev); - - /* Clear the busy flag so we try again */ - status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, - &mcast->flags); + __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + } else { + mcast->backoff = 1; + mcast->delay_until = jiffies; + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); } +out: + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + if (status) + mcast->mc = NULL; + complete(&mcast->done); + mutex_unlock(&mcast_mutex); return status; } @@ -312,19 +376,18 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) int ret = 0; if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { - ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); + ipoib_dbg_mcast(priv, "device shutting down, no sendonly " + "multicast joins\n"); + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + complete(&mcast->done); return -ENODEV; } - if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { - ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); - return -EBUSY; - } - rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; rec.pkey = cpu_to_be16(priv->pkey); + mutex_lock(&mcast_mutex); mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | @@ -337,12 +400,14 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) if (IS_ERR(mcast->mc)) { ret = PTR_ERR(mcast->mc); clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", - ret); + ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " + "failed (ret = %d)\n", ret); + complete(&mcast->done); } else { - ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", - mcast->mcmember.mgid.raw); + ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " + "sendonly join\n", mcast->mcmember.mgid.raw); } + mutex_unlock(&mcast_mutex); return ret; } @@ -390,6 +455,16 @@ static int ipoib_mcast_join_complete(int status, ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", mcast->mcmember.mgid.raw, status); + /* + * We have to take the mutex to force mcast_join to + * return from ib_sa_multicast_join and set mcast->mc to a + * valid value. Otherwise we were racing with ourselves in + * that we might fail here, but get a valid return from + * ib_sa_multicast_join after we had cleared mcast->mc here, + * resulting in mis-matched joins and leaves and a deadlock + */ + mutex_lock(&mcast_mutex); + /* We trap for port events ourselves. */ if (status == -ENETRESET) { status = 0; @@ -401,10 +476,8 @@ static int ipoib_mcast_join_complete(int status, if (!status) { mcast->backoff = 1; - mutex_lock(&mcast_mutex); - if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); - mutex_unlock(&mcast_mutex); + mcast->delay_until = jiffies; + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); /* * Defer carrier on work to priv->wq to avoid a @@ -412,37 +485,26 @@ static int ipoib_mcast_join_complete(int status, */ if (mcast == priv->broadcast) queue_work(priv->wq, &priv->carrier_on_task); - - status = 0; - goto out; - } - - if (mcast->logcount++ < 20) { - if (status == -ETIMEDOUT || status == -EAGAIN) { - ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); - } else { - ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); + } else { + if (mcast->logcount++ < 20) { + if (status == -ETIMEDOUT || status == -EAGAIN) { + ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", + mcast->mcmember.mgid.raw, status); + } else { + ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", + mcast->mcmember.mgid.raw, status); + } } - } - - mcast->backoff *= 2; - if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) - mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; - /* Clear the busy flag so we try again */ - status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - - mutex_lock(&mcast_mutex); - spin_lock_irq(&priv->lock); - if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, - mcast->backoff * HZ); - spin_unlock_irq(&priv->lock); - mutex_unlock(&mcast_mutex); + /* Requeue this join task with a backoff delay */ + __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + } out: + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + if (status) + mcast->mc = NULL; complete(&mcast->done); + mutex_unlock(&mcast_mutex); return status; } @@ -491,29 +553,18 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.hop_limit = priv->broadcast->mcmember.hop_limit; } - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); - + mutex_lock(&mcast_mutex); mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); if (IS_ERR(mcast->mc)) { clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - complete(&mcast->done); ret = PTR_ERR(mcast->mc); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); - - mcast->backoff *= 2; - if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) - mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; - - mutex_lock(&mcast_mutex); - if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, - mcast->backoff * HZ); - mutex_unlock(&mcast_mutex); + __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + complete(&mcast->done); } + mutex_unlock(&mcast_mutex); } void ipoib_mcast_join_task(struct work_struct *work) @@ -522,6 +573,9 @@ void ipoib_mcast_join_task(struct work_struct *work) container_of(work, struct ipoib_dev_priv, mcast_task.work); struct net_device *dev = priv->dev; struct ib_port_attr port_attr; + unsigned long delay_until = 0; + struct ipoib_mcast *mcast = NULL; + int create = 1; if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; @@ -539,64 +593,102 @@ void ipoib_mcast_join_task(struct work_struct *work) else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); + /* + * We have to hold the mutex to keep from racing with the join + * completion threads on setting flags on mcasts, and we have + * to hold the priv->lock because dev_flush will remove entries + * out from underneath us, so at a minimum we need the lock + * through the time that we do the for_each loop of the mcast + * list or else dev_flush can make us oops. + */ + mutex_lock(&mcast_mutex); + spin_lock_irq(&priv->lock); + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + goto out; + if (!priv->broadcast) { struct ipoib_mcast *broadcast; - if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) - return; - - broadcast = ipoib_mcast_alloc(dev, 1); + broadcast = ipoib_mcast_alloc(dev, 0); if (!broadcast) { ipoib_warn(priv, "failed to allocate broadcast group\n"); - mutex_lock(&mcast_mutex); - if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, - HZ); - mutex_unlock(&mcast_mutex); - return; + /* + * Restart us after a 1 second delay to retry + * creating our broadcast group and attaching to + * it. Until this succeeds, this ipoib dev is + * completely stalled (multicast wise). + */ + __ipoib_mcast_schedule_join_thread(priv, NULL, 1); + goto out; } - spin_lock_irq(&priv->lock); memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid)); priv->broadcast = broadcast; __ipoib_mcast_add(dev, priv->broadcast); - spin_unlock_irq(&priv->lock); } if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { - if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) - ipoib_mcast_join(dev, priv->broadcast, 0); - return; + if (IS_ERR_OR_NULL(priv->broadcast->mc) && + !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) { + mcast = priv->broadcast; + create = 0; + if (mcast->backoff > 1 && + time_before(jiffies, mcast->delay_until)) { + delay_until = mcast->delay_until; + mcast = NULL; + } + } + goto out; } - while (1) { - struct ipoib_mcast *mcast = NULL; - - spin_lock_irq(&priv->lock); - list_for_each_entry(mcast, &priv->multicast_list, list) { - if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) - && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) - && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + /* + * We'll never get here until the broadcast group is both allocated + * and attached + */ + list_for_each_entry(mcast, &priv->multicast_list, list) { + if (IS_ERR_OR_NULL(mcast->mc) && + !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && + !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + if (mcast->backoff == 1 || + time_after_eq(jiffies, mcast->delay_until)) /* Found the next unjoined group */ break; - } + else if (!delay_until || + time_before(mcast->delay_until, delay_until)) + delay_until = mcast->delay_until; } - spin_unlock_irq(&priv->lock); - - if (&mcast->list == &priv->multicast_list) { - /* All done */ - break; - } - - ipoib_mcast_join(dev, mcast, 1); - return; } - ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); + if (&mcast->list == &priv->multicast_list) { + /* + * All done, unless we have delayed work from + * backoff retransmissions, but we will get + * restarted when the time is right, so we are + * done for now + */ + mcast = NULL; + ipoib_dbg_mcast(priv, "successfully joined all " + "multicast groups\n"); + } - clear_bit(IPOIB_MCAST_RUN, &priv->flags); +out: + if (mcast) { + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + } + spin_unlock_irq(&priv->lock); + mutex_unlock(&mcast_mutex); + if (mcast) { + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + ipoib_mcast_sendonly_join(mcast); + else + ipoib_mcast_join(dev, mcast, create); + } + if (delay_until) + queue_delayed_work(priv->wq, &priv->mcast_task, + delay_until - jiffies); } int ipoib_mcast_start_thread(struct net_device *dev) @@ -606,8 +698,8 @@ int ipoib_mcast_start_thread(struct net_device *dev) ipoib_dbg_mcast(priv, "starting multicast thread\n"); mutex_lock(&mcast_mutex); - if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_delayed_work(priv->wq, &priv->mcast_task, 0); + set_bit(IPOIB_MCAST_RUN, &priv->flags); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); mutex_unlock(&mcast_mutex); return 0; @@ -635,7 +727,12 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) int ret = 0; if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n"); + + if (!IS_ERR_OR_NULL(mcast->mc)) ib_sa_free_multicast(mcast->mc); + else + ipoib_dbg(priv, "ipoib_mcast_leave with mcast->mc invalid\n"); if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", @@ -646,7 +743,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) be16_to_cpu(mcast->mcmember.mlid)); if (ret) ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); - } + } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + ipoib_dbg(priv, "leaving with no mcmember but not a " + "SENDONLY join\n"); return 0; } @@ -687,6 +786,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); __ipoib_mcast_add(dev, mcast); list_add_tail(&mcast->list, &priv->multicast_list); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); } if (!mcast->ah) { @@ -696,13 +796,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } - - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) - ipoib_dbg_mcast(priv, "no address vector, " - "but multicast join already started\n"); - else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - ipoib_mcast_sendonly_join(mcast); - /* * If lookup completes between here and out:, don't * want to send packet twice. @@ -761,9 +854,12 @@ void ipoib_mcast_dev_flush(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* seperate between the wait to the leave*/ + /* + * make sure the in-flight joins have finished before we attempt + * to leave + */ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) - if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) wait_for_completion(&mcast->done); list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { @@ -794,20 +890,14 @@ void ipoib_mcast_restart_task(struct work_struct *work) unsigned long flags; struct ib_sa_mcmember_rec rec; - ipoib_dbg_mcast(priv, "restarting multicast task\n"); + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) + /* + * shortcut...on shutdown flush is called next, just + * let it do all the work + */ + return; - /* - * We're running on the priv->wq right now, so we can't call - * mcast_stop_thread as it wants to flush the wq and that - * will deadlock. We don't actually *need* to stop the - * thread here anyway, so just clear the run flag, cancel - * any delayed work, do our work, remove the old entries, - * then restart the thread. - */ - mutex_lock(&mcast_mutex); - clear_bit(IPOIB_MCAST_RUN, &priv->flags); - cancel_delayed_work(&priv->mcast_task); - mutex_unlock(&mcast_mutex); + ipoib_dbg_mcast(priv, "restarting multicast task\n"); local_irq_save(flags); netif_addr_lock(dev); @@ -893,14 +983,27 @@ void ipoib_mcast_restart_task(struct work_struct *work) netif_addr_unlock(dev); local_irq_restore(flags); - /* We have to cancel outside of the spinlock */ + /* + * make sure the in-flight joins have finished before we attempt + * to leave + */ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) + wait_for_completion(&mcast->done); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_free(mcast); } - if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) - ipoib_mcast_start_thread(dev); + /* + * Double check that we are still up + */ + if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { + spin_lock_irqsave(&priv->lock, flags); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + spin_unlock_irqrestore(&priv->lock, flags); + } } #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG -- cgit 1.2.3-korg From d2fe937ce6ce23daf5fb214e45432dbb631581b7 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:06 -0500 Subject: IB/ipoib: deserialize multicast joins Allow the ipoib layer to attempt to join all outstanding multicast groups at once. The ib_sa layer will serialize multiple attempts to join the same group, but will process attempts to join different groups in parallel. Take advantage of that. In order to make this happen, change the mcast_join_thread to loop through all needed joins, sending a join request for each one that we still need to join. There are a few special cases we handle though: 1) Don't attempt to join anything but the broadcast group until the join of the broadcast group has succeeded. 2) No longer restart the join task at the end of completion handling. If we completed successfully, we are done. The join task now needs kicked either by mcast_send or mcast_restart_task or mcast_start_thread, but should not need started anytime else except when scheduling a backoff attempt to rejoin. 3) No longer use separate join/completion routines for regular and sendonly joins, pass them all through the same routine and just do the right thing based on the SENDONLY join flag. 4) Only try to join a SENDONLY join twice, then drop the packets and quit trying. We leave the mcast group in the list so that if we get a new packet, all that we have to do is queue up the packet and restart the join task and it will automatically try to join twice and then either send or flush the queue again. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 250 ++++++++----------------- 1 file changed, 82 insertions(+), 168 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 277e7ac7c4dbf..c670d9c2cda74 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -307,111 +307,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, return 0; } -static int -ipoib_mcast_sendonly_join_complete(int status, - struct ib_sa_multicast *multicast) -{ - struct ipoib_mcast *mcast = multicast->context; - struct net_device *dev = mcast->dev; - struct ipoib_dev_priv *priv = netdev_priv(dev); - - /* - * We have to take the mutex to force mcast_sendonly_join to - * return from ib_sa_multicast_join and set mcast->mc to a - * valid value. Otherwise we were racing with ourselves in - * that we might fail here, but get a valid return from - * ib_sa_multicast_join after we had cleared mcast->mc here, - * resulting in mis-matched joins and leaves and a deadlock - */ - mutex_lock(&mcast_mutex); - - /* We trap for port events ourselves. */ - if (status == -ENETRESET) { - status = 0; - goto out; - } - - if (!status) - status = ipoib_mcast_join_finish(mcast, &multicast->rec); - - if (status) { - if (mcast->logcount++ < 20) - ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " - "join failed for %pI6, status %d\n", - mcast->mcmember.mgid.raw, status); - - /* Flush out any queued packets */ - netif_tx_lock_bh(dev); - while (!skb_queue_empty(&mcast->pkt_queue)) { - ++dev->stats.tx_dropped; - dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); - } - netif_tx_unlock_bh(dev); - __ipoib_mcast_schedule_join_thread(priv, mcast, 1); - } else { - mcast->backoff = 1; - mcast->delay_until = jiffies; - __ipoib_mcast_schedule_join_thread(priv, NULL, 0); - } -out: - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - if (status) - mcast->mc = NULL; - complete(&mcast->done); - mutex_unlock(&mcast_mutex); - return status; -} - -static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) -{ - struct net_device *dev = mcast->dev; - struct ipoib_dev_priv *priv = netdev_priv(dev); - struct ib_sa_mcmember_rec rec = { -#if 0 /* Some SMs don't support send-only yet */ - .join_state = 4 -#else - .join_state = 1 -#endif - }; - int ret = 0; - - if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { - ipoib_dbg_mcast(priv, "device shutting down, no sendonly " - "multicast joins\n"); - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - complete(&mcast->done); - return -ENODEV; - } - - rec.mgid = mcast->mcmember.mgid; - rec.port_gid = priv->local_gid; - rec.pkey = cpu_to_be16(priv->pkey); - - mutex_lock(&mcast_mutex); - mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, - priv->port, &rec, - IB_SA_MCMEMBER_REC_MGID | - IB_SA_MCMEMBER_REC_PORT_GID | - IB_SA_MCMEMBER_REC_PKEY | - IB_SA_MCMEMBER_REC_JOIN_STATE, - GFP_ATOMIC, - ipoib_mcast_sendonly_join_complete, - mcast); - if (IS_ERR(mcast->mc)) { - ret = PTR_ERR(mcast->mc); - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " - "failed (ret = %d)\n", ret); - complete(&mcast->done); - } else { - ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " - "sendonly join\n", mcast->mcmember.mgid.raw); - } - mutex_unlock(&mcast_mutex); - - return ret; -} - void ipoib_mcast_carrier_on_task(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, @@ -452,7 +347,9 @@ static int ipoib_mcast_join_complete(int status, struct net_device *dev = mcast->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); - ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", + ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n", + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? + "sendonly " : "", mcast->mcmember.mgid.raw, status); /* @@ -477,27 +374,52 @@ static int ipoib_mcast_join_complete(int status, if (!status) { mcast->backoff = 1; mcast->delay_until = jiffies; - __ipoib_mcast_schedule_join_thread(priv, NULL, 0); /* * Defer carrier on work to priv->wq to avoid a - * deadlock on rtnl_lock here. + * deadlock on rtnl_lock here. Requeue our multicast + * work too, which will end up happening right after + * our carrier on task work and will allow us to + * send out all of the non-broadcast joins */ - if (mcast == priv->broadcast) + if (mcast == priv->broadcast) { queue_work(priv->wq, &priv->carrier_on_task); + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + } } else { if (mcast->logcount++ < 20) { if (status == -ETIMEDOUT || status == -EAGAIN) { - ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", + ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n", + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", mcast->mcmember.mgid.raw, status); } else { - ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", + ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n", + test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", mcast->mcmember.mgid.raw, status); } } - /* Requeue this join task with a backoff delay */ - __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && + mcast->backoff >= 2) { + /* + * We only retry sendonly joins once before we drop + * the packet and quit trying to deal with the + * group. However, we leave the group in the + * mcast list as an unjoined group. If we want to + * try joining again, we simply queue up a packet + * and restart the join thread. The empty queue + * is why the join thread ignores this group. + */ + mcast->backoff = 1; + netif_tx_lock_bh(dev); + while (!skb_queue_empty(&mcast->pkt_queue)) { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); + } + netif_tx_unlock_bh(dev); + } else + /* Requeue this join task with a backoff delay */ + __ipoib_mcast_schedule_join_thread(priv, mcast, 1); } out: clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); @@ -650,45 +572,45 @@ void ipoib_mcast_join_task(struct work_struct *work) list_for_each_entry(mcast, &priv->multicast_list, list) { if (IS_ERR_OR_NULL(mcast->mc) && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && - !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { + (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) || + !skb_queue_empty(&mcast->pkt_queue))) { if (mcast->backoff == 1 || - time_after_eq(jiffies, mcast->delay_until)) + time_after_eq(jiffies, mcast->delay_until)) { /* Found the next unjoined group */ - break; - else if (!delay_until || + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) + create = 0; + else + create = 1; + spin_unlock_irq(&priv->lock); + mutex_unlock(&mcast_mutex); + ipoib_mcast_join(dev, mcast, create); + mutex_lock(&mcast_mutex); + spin_lock_irq(&priv->lock); + } else if (!delay_until || time_before(mcast->delay_until, delay_until)) delay_until = mcast->delay_until; } } - if (&mcast->list == &priv->multicast_list) { - /* - * All done, unless we have delayed work from - * backoff retransmissions, but we will get - * restarted when the time is right, so we are - * done for now - */ - mcast = NULL; - ipoib_dbg_mcast(priv, "successfully joined all " - "multicast groups\n"); - } + mcast = NULL; + ipoib_dbg_mcast(priv, "successfully started all multicast joins\n"); out: + if (delay_until) { + cancel_delayed_work(&priv->mcast_task); + queue_delayed_work(priv->wq, &priv->mcast_task, + delay_until - jiffies); + } if (mcast) { init_completion(&mcast->done); set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); } spin_unlock_irq(&priv->lock); mutex_unlock(&mcast_mutex); - if (mcast) { - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - ipoib_mcast_sendonly_join(mcast); - else - ipoib_mcast_join(dev, mcast, create); - } - if (delay_until) - queue_delayed_work(priv->wq, &priv->mcast_task, - delay_until - jiffies); + if (mcast) + ipoib_mcast_join(dev, mcast, create); } int ipoib_mcast_start_thread(struct net_device *dev) @@ -731,8 +653,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) if (!IS_ERR_OR_NULL(mcast->mc)) ib_sa_free_multicast(mcast->mc); - else - ipoib_dbg(priv, "ipoib_mcast_leave with mcast->mc invalid\n"); if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", @@ -768,43 +688,37 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) } mcast = __ipoib_mcast_find(dev, mgid); - if (!mcast) { - /* Let's create a new send only group now */ - ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", - mgid); - - mcast = ipoib_mcast_alloc(dev, 0); + if (!mcast || !mcast->ah) { if (!mcast) { - ipoib_warn(priv, "unable to allocate memory for " - "multicast structure\n"); - ++dev->stats.tx_dropped; - dev_kfree_skb_any(skb); - goto out; - } - - set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); - memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); - __ipoib_mcast_add(dev, mcast); - list_add_tail(&mcast->list, &priv->multicast_list); - __ipoib_mcast_schedule_join_thread(priv, NULL, 0); - } + /* Let's create a new send only group now */ + ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", + mgid); + + mcast = ipoib_mcast_alloc(dev, 0); + if (!mcast) { + ipoib_warn(priv, "unable to allocate memory " + "for multicast structure\n"); + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + goto unlock; + } - if (!mcast->ah) { + set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); + memcpy(mcast->mcmember.mgid.raw, mgid, + sizeof (union ib_gid)); + __ipoib_mcast_add(dev, mcast); + list_add_tail(&mcast->list, &priv->multicast_list); + } if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) skb_queue_tail(&mcast->pkt_queue, skb); else { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } - /* - * If lookup completes between here and out:, don't - * want to send packet twice. - */ - mcast = NULL; - } - -out: - if (mcast && mcast->ah) { + if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { + __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + } + } else { struct ipoib_neigh *neigh; spin_unlock_irqrestore(&priv->lock, flags); -- cgit 1.2.3-korg From 1c0453d64a341909bbf89cb68c9edaa6cff93850 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Sat, 21 Feb 2015 19:27:07 -0500 Subject: IB/ipoib: drop mcast_mutex usage We needed the mcast_mutex when we had to prevent the join completion callback from having the value it stored in mcast->mc overwritten by a delayed return from ib_sa_join_multicast. By storing the return of ib_sa_join_multicast in an intermediate variable, we prevent a delayed return from ib_sa_join_multicast overwriting the valid contents of mcast->mc, and we no longer need a mutex to force the join callback to run after the return of ib_sa_join_multicast. This allows us to do away with the mutex entirely and protect our critical sections with a just a spinlock instead. This is highly desirable as there were some places where we couldn't use a mutex because the code was not allowed to sleep, and so we were currently using a mix of mutex and spinlock to protect what we needed to protect. Now we only have a spin lock and the locking complexity is greatly reduced. Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 70 ++++++++++++-------------- 1 file changed, 32 insertions(+), 38 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index c670d9c2cda74..3203ebe9b1003 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -55,8 +55,6 @@ MODULE_PARM_DESC(mcast_debug_level, "Enable multicast debug tracing if > 0"); #endif -static DEFINE_MUTEX(mcast_mutex); - struct ipoib_mcast_iter { struct net_device *dev; union ib_gid mgid; @@ -67,7 +65,7 @@ struct ipoib_mcast_iter { }; /* - * This should be called with the mcast_mutex held + * This should be called with the priv->lock held */ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, struct ipoib_mcast *mcast, @@ -352,16 +350,6 @@ static int ipoib_mcast_join_complete(int status, "sendonly " : "", mcast->mcmember.mgid.raw, status); - /* - * We have to take the mutex to force mcast_join to - * return from ib_sa_multicast_join and set mcast->mc to a - * valid value. Otherwise we were racing with ourselves in - * that we might fail here, but get a valid return from - * ib_sa_multicast_join after we had cleared mcast->mc here, - * resulting in mis-matched joins and leaves and a deadlock - */ - mutex_lock(&mcast_mutex); - /* We trap for port events ourselves. */ if (status == -ENETRESET) { status = 0; @@ -383,8 +371,10 @@ static int ipoib_mcast_join_complete(int status, * send out all of the non-broadcast joins */ if (mcast == priv->broadcast) { + spin_lock_irq(&priv->lock); queue_work(priv->wq, &priv->carrier_on_task); __ipoib_mcast_schedule_join_thread(priv, NULL, 0); + goto out_locked; } } else { if (mcast->logcount++ < 20) { @@ -417,16 +407,28 @@ static int ipoib_mcast_join_complete(int status, dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } netif_tx_unlock_bh(dev); - } else + } else { + spin_lock_irq(&priv->lock); /* Requeue this join task with a backoff delay */ __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + goto out_locked; + } } out: - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + spin_lock_irq(&priv->lock); +out_locked: + /* + * Make sure to set mcast->mc before we clear the busy flag to avoid + * racing with code that checks for BUSY before checking mcast->mc + */ if (status) mcast->mc = NULL; + else + mcast->mc = multicast; + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + spin_unlock_irq(&priv->lock); complete(&mcast->done); - mutex_unlock(&mcast_mutex); + return status; } @@ -434,6 +436,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, int create) { struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ib_sa_multicast *multicast; struct ib_sa_mcmember_rec rec = { .join_state = 1 }; @@ -475,18 +478,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.hop_limit = priv->broadcast->mcmember.hop_limit; } - mutex_lock(&mcast_mutex); - mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, + multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, &rec, comp_mask, GFP_KERNEL, ipoib_mcast_join_complete, mcast); - if (IS_ERR(mcast->mc)) { - clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); - ret = PTR_ERR(mcast->mc); + if (IS_ERR(multicast)) { + ret = PTR_ERR(multicast); ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); + spin_lock_irq(&priv->lock); + /* Requeue this join task with a backoff delay */ __ipoib_mcast_schedule_join_thread(priv, mcast, 1); + clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + spin_unlock_irq(&priv->lock); complete(&mcast->done); } - mutex_unlock(&mcast_mutex); } void ipoib_mcast_join_task(struct work_struct *work) @@ -515,15 +519,6 @@ void ipoib_mcast_join_task(struct work_struct *work) else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); - /* - * We have to hold the mutex to keep from racing with the join - * completion threads on setting flags on mcasts, and we have - * to hold the priv->lock because dev_flush will remove entries - * out from underneath us, so at a minimum we need the lock - * through the time that we do the for_each loop of the mcast - * list or else dev_flush can make us oops. - */ - mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) goto out; @@ -584,9 +579,7 @@ void ipoib_mcast_join_task(struct work_struct *work) else create = 1; spin_unlock_irq(&priv->lock); - mutex_unlock(&mcast_mutex); ipoib_mcast_join(dev, mcast, create); - mutex_lock(&mcast_mutex); spin_lock_irq(&priv->lock); } else if (!delay_until || time_before(mcast->delay_until, delay_until)) @@ -608,7 +601,6 @@ out: set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); } spin_unlock_irq(&priv->lock); - mutex_unlock(&mcast_mutex); if (mcast) ipoib_mcast_join(dev, mcast, create); } @@ -616,13 +608,14 @@ out: int ipoib_mcast_start_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); + unsigned long flags; ipoib_dbg_mcast(priv, "starting multicast thread\n"); - mutex_lock(&mcast_mutex); + spin_lock_irqsave(&priv->lock, flags); set_bit(IPOIB_MCAST_RUN, &priv->flags); __ipoib_mcast_schedule_join_thread(priv, NULL, 0); - mutex_unlock(&mcast_mutex); + spin_unlock_irqrestore(&priv->lock, flags); return 0; } @@ -630,13 +623,14 @@ int ipoib_mcast_start_thread(struct net_device *dev) int ipoib_mcast_stop_thread(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); + unsigned long flags; ipoib_dbg_mcast(priv, "stopping multicast thread\n"); - mutex_lock(&mcast_mutex); + spin_lock_irqsave(&priv->lock, flags); clear_bit(IPOIB_MCAST_RUN, &priv->flags); cancel_delayed_work(&priv->mcast_task); - mutex_unlock(&mcast_mutex); + spin_unlock_irqrestore(&priv->lock, flags); flush_workqueue(priv->wq); -- cgit 1.2.3-korg From a44878d100630a34a44f54960115b81e449858db Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Apr 2015 13:39:00 +0300 Subject: IB/ipoib: Use one linear skb in RX flow The current code in the RX flow uses two sg entries for each incoming packet, the first one was for the IB headers and the second for the rest of the data, that causes two dma map/unmap and two allocations, and few more actions that were done at the data path. Use only one linear skb on each incoming packet, for the data (IB headers and payload), that reduces the packet processing in the data-path (only one skb, no frags, the first frag was not used anyway, less memory allocations) and the dma handling (only one dma map/unmap over each incoming packet instead of two map/unmap per each incoming packet). After commit 73d3fe6d1c6d ("gro: fix aggregation for skb using frag_list") from Eric Dumazet, we will get full aggregation for large packets. When running bandwidth tests before and after the (over the card's numa node), using "netperf -H 1.1.1.3 -T -t TCP_STREAM", the results before are ~12Gbs before and after ~16Gbs on my setup (Mellanox's ConnectX3). Signed-off-by: Erez Shitrit Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 5 --- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 67 ++++-------------------------- drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 13 ++---- 3 files changed, 13 insertions(+), 72 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index c79dcd5ee8ad0..769044c25ca54 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -434,11 +434,6 @@ struct ipoib_neigh { #define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) #define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) -static inline int ipoib_ud_need_sg(unsigned int ib_mtu) -{ - return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE; -} - void ipoib_neigh_dtor(struct ipoib_neigh *neigh); static inline void ipoib_neigh_put(struct ipoib_neigh *neigh) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index e144d07d53ccd..29b376dadd2b2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -94,39 +94,9 @@ void ipoib_free_ah(struct kref *kref) static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, u64 mapping[IPOIB_UD_RX_SG]) { - if (ipoib_ud_need_sg(priv->max_ib_mtu)) { - ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, - DMA_FROM_DEVICE); - ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, - DMA_FROM_DEVICE); - } else - ib_dma_unmap_single(priv->ca, mapping[0], - IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), - DMA_FROM_DEVICE); -} - -static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, - struct sk_buff *skb, - unsigned int length) -{ - if (ipoib_ud_need_sg(priv->max_ib_mtu)) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; - unsigned int size; - /* - * There is only two buffers needed for max_payload = 4K, - * first buf size is IPOIB_UD_HEAD_SIZE - */ - skb->tail += IPOIB_UD_HEAD_SIZE; - skb->len += length; - - size = length - IPOIB_UD_HEAD_SIZE; - - skb_frag_size_set(frag, size); - skb->data_len += size; - skb->truesize += PAGE_SIZE; - } else - skb_put(skb, length); - + ib_dma_unmap_single(priv->ca, mapping[0], + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), + DMA_FROM_DEVICE); } static int ipoib_ib_post_receive(struct net_device *dev, int id) @@ -156,18 +126,11 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; - int tailroom; u64 *mapping; - if (ipoib_ud_need_sg(priv->max_ib_mtu)) { - buf_size = IPOIB_UD_HEAD_SIZE; - tailroom = 128; /* reserve some tailroom for IP/TCP headers */ - } else { - buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); - tailroom = 0; - } + buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); - skb = dev_alloc_skb(buf_size + tailroom + 4); + skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN); if (unlikely(!skb)) return NULL; @@ -184,23 +147,8 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) goto error; - if (ipoib_ud_need_sg(priv->max_ib_mtu)) { - struct page *page = alloc_page(GFP_ATOMIC); - if (!page) - goto partial_error; - skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); - mapping[1] = - ib_dma_map_page(priv->ca, page, - 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) - goto partial_error; - } - priv->rx_ring[id].skb = skb; return skb; - -partial_error: - ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE); error: dev_kfree_skb_any(skb); return NULL; @@ -278,7 +226,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) wc->byte_len, wc->slid); ipoib_ud_dma_unmap_rx(priv, mapping); - ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); + + skb_put(skb, wc->byte_len); /* First byte of dgid signals multicast when 0xff */ dgid = &((struct ib_grh *)skb->data)->dgid; @@ -296,6 +245,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); + skb->truesize = SKB_TRUESIZE(skb->len); + ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 34628403fd83b..e5cc43074196d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -227,15 +227,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) priv->tx_wr.send_flags = IB_SEND_SIGNALED; priv->rx_sge[0].lkey = priv->mr->lkey; - if (ipoib_ud_need_sg(priv->max_ib_mtu)) { - priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE; - priv->rx_sge[1].length = PAGE_SIZE; - priv->rx_sge[1].lkey = priv->mr->lkey; - priv->rx_wr.num_sge = IPOIB_UD_RX_SG; - } else { - priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); - priv->rx_wr.num_sge = 1; - } + + priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); + priv->rx_wr.num_sge = 1; + priv->rx_wr.next = NULL; priv->rx_wr.sg_list = priv->rx_sge; -- cgit 1.2.3-korg From 3fd0605caa74c3dc7fe69ed11ada452fe7169575 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Apr 2015 13:39:01 +0300 Subject: IB/ipoib: Update broadcast record values after each successful join request Update the cached broadcast record in the priv object after every new join of this broadcast domain group. These values are needed for the port configuration (MTU size) and to all the new multicast (non-broadcast) join requests initial parameters. For example, SM starts with 2K MTU for all the fabric, and after that it restarts (or handover to new SM) with new port configuration of 4K MTU. Without using the new values, the driver will keep its old configuration of 2K and will not apply the new configuration of 4K. Signed-off-by: Erez Shitrit Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3203ebe9b1003..c83c9586c59cc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -226,7 +226,23 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, spin_unlock_irq(&priv->lock); return -EAGAIN; } - priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); + /*update priv member according to the new mcast*/ + priv->broadcast->mcmember.qkey = mcmember->qkey; + priv->broadcast->mcmember.mtu = mcmember->mtu; + priv->broadcast->mcmember.traffic_class = mcmember->traffic_class; + priv->broadcast->mcmember.rate = mcmember->rate; + priv->broadcast->mcmember.sl = mcmember->sl; + priv->broadcast->mcmember.flow_label = mcmember->flow_label; + priv->broadcast->mcmember.hop_limit = mcmember->hop_limit; + /* assume if the admin and the mcast are the same both can be changed */ + if (priv->mcast_mtu == priv->admin_mtu) + priv->admin_mtu = + priv->mcast_mtu = + IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); + else + priv->mcast_mtu = + IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); + priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; -- cgit 1.2.3-korg From 2c0107309550b332f83f8be366521557f4b3fd5a Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Apr 2015 13:39:02 +0300 Subject: IB/ipoib: Handle QP in SQE state As the result of a completion error the QP can moved to SQE state by the hardware. Since it's not the Error state, there are no flushes and hence the driver doesn't know about that. The fix creates a task that after completion with error which is not a flush tracks the QP state and if it is in SQE state moves it back to RTS. Signed-off-by: Erez Shitrit Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 5 +++ drivers/infiniband/ulp/ipoib/ipoib_ib.c | 59 ++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 769044c25ca54..2703d9a3e9b46 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -299,6 +299,11 @@ struct ipoib_neigh_table { struct completion deleted; }; +struct ipoib_qp_state_validate { + struct work_struct work; + struct ipoib_dev_priv *priv; +}; + /* * Device private locking: network stack tx_lock protects members used * in TX fast path, lock protects everything else. lock nests inside diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 29b376dadd2b2..63b92cbb29ad0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -327,6 +327,51 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, } } +/* + * As the result of a completion error the QP Can be transferred to SQE states. + * The function checks if the (send)QP is in SQE state and + * moves it back to RTS state, that in order to have it functional again. + */ +static void ipoib_qp_state_validate_work(struct work_struct *work) +{ + struct ipoib_qp_state_validate *qp_work = + container_of(work, struct ipoib_qp_state_validate, work); + + struct ipoib_dev_priv *priv = qp_work->priv; + struct ib_qp_attr qp_attr; + struct ib_qp_init_attr query_init_attr; + int ret; + + ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); + if (ret) { + ipoib_warn(priv, "%s: Failed to query QP ret: %d\n", + __func__, ret); + goto free_res; + } + pr_info("%s: QP: 0x%x is in state: %d\n", + __func__, priv->qp->qp_num, qp_attr.qp_state); + + /* currently support only in SQE->RTS transition*/ + if (qp_attr.qp_state == IB_QPS_SQE) { + qp_attr.qp_state = IB_QPS_RTS; + + ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); + if (ret) { + pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n", + ret, priv->qp->qp_num); + goto free_res; + } + pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n", + __func__, priv->qp->qp_num); + } else { + pr_warn("QP (%d) will stay in state: %d\n", + priv->qp->qp_num, qp_attr.qp_state); + } + +free_res: + kfree(qp_work); +} + static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -358,10 +403,22 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) netif_wake_queue(dev); if (wc->status != IB_WC_SUCCESS && - wc->status != IB_WC_WR_FLUSH_ERR) + wc->status != IB_WC_WR_FLUSH_ERR) { + struct ipoib_qp_state_validate *qp_work; ipoib_warn(priv, "failed send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); + qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC); + if (!qp_work) { + ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n", + __func__, priv->qp->qp_num); + return; + } + + INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work); + qp_work->priv = priv; + queue_work(priv->wq, &qp_work->work); + } } static int poll_tx(struct ipoib_dev_priv *priv) -- cgit 1.2.3-korg From 1e85b806f9b988ce2e465fb0f86c8cca228d83a9 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Apr 2015 13:39:03 +0300 Subject: IB/ipoib: Save only IPOIB_MAX_PATH_REC_QUEUE skb's Whenever there is no path->ah to the destination, keep only defined number of skb's. Otherwise there are cases that the driver can keep infinite list of skb's. For example, when one device want to send unicast arp to the destination, and from some reason the SM doesn't respond, the driver currently keeps all the skb's. If that unicast arp traffic stopped, all these skb's are kept by the path object till the interface is down. Signed-off-by: Erez Shitrit Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib_main.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 176b3dfb49c3d..7cad4dd87469a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -640,8 +640,10 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, if (!path->query && path_rec_start(dev, path)) goto err_path; - - __skb_queue_tail(&neigh->queue, skb); + if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) + __skb_queue_tail(&neigh->queue, skb); + else + goto err_drop; } spin_unlock_irqrestore(&priv->lock, flags); @@ -676,7 +678,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, new_path = 1; } if (path) { - __skb_queue_tail(&path->queue, skb); + if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { + __skb_queue_tail(&path->queue, skb); + } else { + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + } if (!path->query && path_rec_start(dev, path)) { spin_unlock_irqrestore(&priv->lock, flags); -- cgit 1.2.3-korg From 0e5544d9bff432bfcac49ecf237738cc7cf3d032 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Apr 2015 13:39:04 +0300 Subject: IB/ipoib: Remove IPOIB_MCAST_RUN bit After Doug Ledford's changes there is no need in that bit, it's semantic becomes subset of the IPOIB_FLAG_OPER_UP bit. Signed-off-by: Erez Shitrit Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 1 - drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 2703d9a3e9b46..bd94b0a6e9e53 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -87,7 +87,6 @@ enum { IPOIB_FLAG_ADMIN_UP = 2, IPOIB_PKEY_ASSIGNED = 3, IPOIB_FLAG_SUBINTERFACE = 5, - IPOIB_MCAST_RUN = 6, IPOIB_STOP_REAPER = 7, IPOIB_FLAG_ADMIN_CM = 9, IPOIB_FLAG_UMCAST = 10, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index c83c9586c59cc..0d23e0568deb6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -71,7 +71,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, struct ipoib_mcast *mcast, bool delay) { - if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return; /* @@ -519,7 +519,7 @@ void ipoib_mcast_join_task(struct work_struct *work) struct ipoib_mcast *mcast = NULL; int create = 1; - if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return; if (ib_query_port(priv->ca, priv->port, &port_attr) || @@ -629,7 +629,6 @@ int ipoib_mcast_start_thread(struct net_device *dev) ipoib_dbg_mcast(priv, "starting multicast thread\n"); spin_lock_irqsave(&priv->lock, flags); - set_bit(IPOIB_MCAST_RUN, &priv->flags); __ipoib_mcast_schedule_join_thread(priv, NULL, 0); spin_unlock_irqrestore(&priv->lock, flags); @@ -644,7 +643,6 @@ int ipoib_mcast_stop_thread(struct net_device *dev) ipoib_dbg_mcast(priv, "stopping multicast thread\n"); spin_lock_irqsave(&priv->lock, flags); - clear_bit(IPOIB_MCAST_RUN, &priv->flags); cancel_delayed_work(&priv->mcast_task); spin_unlock_irqrestore(&priv->lock, flags); -- cgit 1.2.3-korg From ca9b590caa17bcbbea119594992666e96cde9c2f Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 2 Apr 2015 13:39:05 +0300 Subject: IB/mlx4: Fix WQE LSO segment calculation The current code decreases from the mss size (which is the gso_size from the kernel skb) the size of the packet headers. It shouldn't do that because the mss that comes from the stack (e.g IPoIB) includes only the tcp payload without the headers. The result is indication to the HW that each packet that the HW sends is smaller than what it could be, and too many packets will be sent for big messages. An easy way to demonstrate one more aspect of the problem is by configuring the ipoib mtu to be less than 2*hlen (2*56) and then run app sending big TCP messages. This will tell the HW to send packets with giant (negative value which under unsigned arithmetics becomes a huge positive one) length and the QP moves to SQE state. Fixes: b832be1e4007 ('IB/mlx4: Add IPoIB LSO support') Reported-by: Matthew Finlay Signed-off-by: Erez Shitrit Signed-off-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ed2bd6701f9b1..fbde33a5228ce 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); - *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | - wr->wr.ud.hlen); + *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } -- cgit 1.2.3-korg From a233c4b54c882817ae9dd73384d5dd75d3e57498 Mon Sep 17 00:00:00 2001 From: Sébastien Dugué Date: Thu, 9 Apr 2015 11:13:41 +0200 Subject: ib_uverbs: Fix pages leak when using XRC SRQs Hello, When an application using XRCs abruptly terminates, the mmaped pages of the CQ buffers are leaked. This comes from the fact that when resources are released in ib_uverbs_cleanup_ucontext(), we fail to release the CQs because their refcount is not 0. When creating an XRC SRQ, we increment the associated CQ refcount. This refcount is only decremented when the SRQ is released. Therefore we need to release the SRQs prior to the CQs to make sure that all references to the CQs are gone before trying to release these. Signed-off-by: Sebastien Dugue Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_main.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 259dcc7779f5e..88cce9bb72fea 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -246,6 +246,17 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, kfree(uqp); } + list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { + struct ib_srq *srq = uobj->object; + struct ib_uevent_object *uevent = + container_of(uobj, struct ib_uevent_object, uobject); + + idr_remove_uobj(&ib_uverbs_srq_idr, uobj); + ib_destroy_srq(srq); + ib_uverbs_release_uevent(file, uevent); + kfree(uevent); + } + list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { struct ib_cq *cq = uobj->object; struct ib_uverbs_event_file *ev_file = cq->cq_context; @@ -258,17 +269,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, kfree(ucq); } - list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { - struct ib_srq *srq = uobj->object; - struct ib_uevent_object *uevent = - container_of(uobj, struct ib_uevent_object, uobject); - - idr_remove_uobj(&ib_uverbs_srq_idr, uobj); - ib_destroy_srq(srq); - ib_uverbs_release_uevent(file, uevent); - kfree(uevent); - } - list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { struct ib_mr *mr = uobj->object; -- cgit 1.2.3-korg From b962dc0a480559c4c85911686bbb32a515b3d5c8 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sun, 8 Mar 2015 16:36:14 -0700 Subject: rdma: replace deprecated ifconfig in doc The ifconfig command has been deprecated for many years. To encourage new users not to continue using it and learning iproute2; the ifconfig should not be used in examples. Signed-off-by: Stephen Hemminger Signed-off-by: Doug Ledford --- Documentation/filesystems/nfs/nfs-rdma.txt | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt index 724043858b083..95c13aa575ff3 100644 --- a/Documentation/filesystems/nfs/nfs-rdma.txt +++ b/Documentation/filesystems/nfs/nfs-rdma.txt @@ -187,8 +187,10 @@ Check RDMA and NFS Setup To further test the InfiniBand software stack, use IPoIB (this assumes you have two IB hosts named host1 and host2): - host1$ ifconfig ib0 a.b.c.x - host2$ ifconfig ib0 a.b.c.y + host1$ ip link set dev ib0 up + host1$ ip address add dev ib0 a.b.c.x + host2$ ip link set dev ib0 up + host2$ ip address add dev ib0 a.b.c.y host1$ ping a.b.c.y host2$ ping a.b.c.x @@ -229,7 +231,8 @@ NFS/RDMA Setup $ modprobe ib_mthca $ modprobe ib_ipoib - $ ifconfig ib0 a.b.c.d + $ ip li set dev ib0 up + $ ip addr add dev ib0 a.b.c.d NOTE: use unique addresses for the client and server -- cgit 1.2.3-korg From d2928a8c0db061939ea34ff3f53f36dcccdf5aa2 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Sun, 15 Mar 2015 00:09:41 +0530 Subject: MAINTAINERS: Adding list of maintainers for ocrdma Updating the MAINTAINERS file with ocrdma maintainers and their email ids Signed-off-by: Selvin Xavier Signed-off-by: Doug Ledford --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index c72a7baec55cc..1b9e38d023181 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8791,6 +8791,15 @@ W: http://www.emulex.com S: Supported F: drivers/net/ethernet/emulex/benet/ +EMULEX ONECONNECT ROCE DRIVER +M: Selvin Xavier +M: Devesh Sharma +M: Mitesh Ahuja +L: linux-rdma@vger.kernel.org +W: http://www.emulex.com +S: Supported +F: drivers/infiniband/hw/ocrdma/ + SFC NETWORK DRIVER M: Solarflare linux maintainers M: Shradha Shah -- cgit 1.2.3-korg From cc47d369b51286129aad1735096b1884b8068ad1 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Mon, 16 Mar 2015 18:49:59 +0100 Subject: infiniband/mlx4: check for mapping error Since ib_dma_map_single can fail use ib_dma_mapping_error to check for errors. Signed-off-by: Sebastian Ott Acked-by: Jack Morgenstein Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/mad.c | 9 +++++++++ drivers/infiniband/hw/mlx4/qp.c | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 59040265e3614..9cd2b002d7ae5 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->ring[i].addr, rx_buf_size, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { + kfree(tun_qp->ring[i].addr); + goto err; + } } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { @@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->tx_ring[i].buf.addr, tx_buf_size, DMA_TO_DEVICE); + if (ib_dma_mapping_error(ctx->ib_dev, + tun_qp->tx_ring[i].buf.map)) { + kfree(tun_qp->tx_ring[i].buf.addr); + goto tx_err; + } tun_qp->tx_ring[i].ah = NULL; } spin_lock_init(&tun_qp->tx_lock); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ed2bd6701f9b1..0766550d6ed93 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -566,6 +566,10 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); + if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { + kfree(qp->sqp_proxy_rcv[i].addr); + goto err; + } } return 0; -- cgit 1.2.3-korg From 56b5390caf73694b2f50a042542f0b43c29d5ca8 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 9 Jul 2014 15:58:22 +0200 Subject: IB/srp: Use P_Key cache for P_Key lookups This change slightly reduces the time needed to log in. Signed-off-by: Bart Van Assche Reviewed-by: Sagi Grimberg Reviewed-by: David Dillow Cc: Sebastian Parschauer Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srp/ib_srp.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 0747c0595a9d4..918814cd0f806 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -265,10 +266,10 @@ static int srp_init_qp(struct srp_target_port *target, if (!attr) return -ENOMEM; - ret = ib_find_pkey(target->srp_host->srp_dev->dev, - target->srp_host->port, - be16_to_cpu(target->pkey), - &attr->pkey_index); + ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, + target->srp_host->port, + be16_to_cpu(target->pkey), + &attr->pkey_index); if (ret) goto out; -- cgit 1.2.3-korg From 9f5d32af09b9a36ca1476c9932aac675051670c8 Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Mon, 20 Oct 2014 18:25:15 -0400 Subject: ib_srpt: convert printk's to pr_* functions The driver already defined the pr_format, it just hadn't been converted to use pr_info, pr_warn, and pr_err instead of the equivalent printks. Convert so that messages from the driver are now properly tagged with their driver name and can be more easily debugged. In addition, a number of these printk's were not newline terminated, so fix that at the same time. Reviewed-by: Bart Van Assche Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/srpt/ib_srpt.c | 188 ++++++++++++++++------------------ 1 file changed, 91 insertions(+), 97 deletions(-) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 6e0a477681e90..4b9b866e6b0d6 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -207,7 +207,7 @@ static void srpt_event_handler(struct ib_event_handler *handler, } break; default: - printk(KERN_ERR "received unrecognized IB event %d\n", + pr_err("received unrecognized IB event %d\n", event->event); break; } @@ -218,7 +218,7 @@ static void srpt_event_handler(struct ib_event_handler *handler, */ static void srpt_srq_event(struct ib_event *event, void *ctx) { - printk(KERN_INFO "SRQ event %d\n", event->event); + pr_info("SRQ event %d\n", event->event); } /** @@ -242,8 +242,7 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) ch->sess_name, srpt_get_ch_state(ch)); break; default: - printk(KERN_ERR "received unrecognized IB QP event %d\n", - event->event); + pr_err("received unrecognized IB QP event %d\n", event->event); break; } } @@ -602,7 +601,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev) sport = &sdev->port[i - 1]; WARN_ON(sport->port != i); if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0) - printk(KERN_ERR "disabling MAD processing failed.\n"); + pr_err("disabling MAD processing failed.\n"); if (sport->mad_agent) { ib_unregister_mad_agent(sport->mad_agent); sport->mad_agent = NULL; @@ -810,7 +809,7 @@ static int srpt_post_send(struct srpt_rdma_ch *ch, ret = -ENOMEM; if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) { - printk(KERN_WARNING "IB send queue full (needed 1)\n"); + pr_warn("IB send queue full (needed 1)\n"); goto out; } @@ -912,7 +911,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, if (ioctx->n_rbuf > (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { - printk(KERN_ERR "received unsupported SRP_CMD request" + pr_err("received unsupported SRP_CMD request" " type (%u out + %u in != %u / %zu)\n", srp_cmd->data_out_desc_cnt, srp_cmd->data_in_desc_cnt, @@ -1432,7 +1431,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, srpt_unmap_sg_to_ib_sge(ch, ioctx); transport_generic_free_cmd(&ioctx->cmd, 0); } else { - printk(KERN_ERR "IB completion has been received too late for" + pr_err("IB completion has been received too late for" " wr_id = %u.\n", ioctx->ioctx.index); } } @@ -1457,7 +1456,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, SRPT_STATE_DATA_IN)) target_execute_cmd(&ioctx->cmd); else - printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, + pr_err("%s[%d]: wrong state = %d\n", __func__, __LINE__, srpt_get_cmd_state(ioctx)); } else if (opcode == SRPT_RDMA_ABORT) { ioctx->rdma_aborted = true; @@ -1481,7 +1480,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, switch (opcode) { case SRPT_RDMA_READ_LAST: if (ioctx->n_rdma <= 0) { - printk(KERN_ERR "Received invalid RDMA read" + pr_err("Received invalid RDMA read" " error completion with idx %d\n", ioctx->ioctx.index); break; @@ -1490,14 +1489,13 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, if (state == SRPT_STATE_NEED_DATA) srpt_abort_cmd(ioctx); else - printk(KERN_ERR "%s[%d]: wrong state = %d\n", + pr_err("%s[%d]: wrong state = %d\n", __func__, __LINE__, state); break; case SRPT_RDMA_WRITE_LAST: break; default: - printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, - __LINE__, opcode); + pr_err("%s[%d]: opcode = %u\n", __func__, __LINE__, opcode); break; } } @@ -1549,8 +1547,8 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); if (sense_data_len > max_sense_len) { - printk(KERN_WARNING "truncated sense data from %d to %d" - " bytes\n", sense_data_len, max_sense_len); + pr_warn("truncated sense data from %d to %d" + " bytes\n", sense_data_len, max_sense_len); sense_data_len = max_sense_len; } @@ -1628,8 +1626,8 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) int addressing_method; if (unlikely(len < 2)) { - printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or " - "more", len); + pr_err("Illegal LUN length %d, expected 2 bytes or more\n", + len); goto out; } @@ -1663,7 +1661,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN: default: - printk(KERN_ERR "Unimplemented LUN addressing method %u", + pr_err("Unimplemented LUN addressing method %u\n", addressing_method); break; } @@ -1672,8 +1670,7 @@ out: return res; out_err: - printk(KERN_ERR "Support for multi-level LUNs has not yet been" - " implemented"); + pr_err("Support for multi-level LUNs has not yet been implemented\n"); goto out; } @@ -1723,7 +1720,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, } if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) { - printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n", + pr_err("0x%llx: parsing SRP descriptor table failed.\n", srp_cmd->tag); ret = TCM_INVALID_CDB_FIELD; goto send_sense; @@ -1912,7 +1909,7 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); break; case SRP_I_LOGOUT: - printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n"); + pr_err("Not yet implemented: SRP_I_LOGOUT\n"); break; case SRP_CRED_RSP: pr_debug("received SRP_CRED_RSP\n"); @@ -1921,10 +1918,10 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, pr_debug("received SRP_AER_RSP\n"); break; case SRP_RSP: - printk(KERN_ERR "Received SRP_RSP\n"); + pr_err("Received SRP_RSP\n"); break; default: - printk(KERN_ERR "received IU with unknown opcode 0x%x\n", + pr_err("received IU with unknown opcode 0x%x\n", srp_cmd->opcode); break; } @@ -1948,12 +1945,12 @@ static void srpt_process_rcv_completion(struct ib_cq *cq, req_lim = atomic_dec_return(&ch->req_lim); if (unlikely(req_lim < 0)) - printk(KERN_ERR "req_lim = %d < 0\n", req_lim); + pr_err("req_lim = %d < 0\n", req_lim); ioctx = sdev->ioctx_ring[index]; srpt_handle_new_iu(ch, ioctx, NULL); } else { - printk(KERN_INFO "receiving failed for idx %u with status %d\n", - index, wc->status); + pr_info("receiving failed for idx %u with status %d\n", + index, wc->status); } } @@ -1993,12 +1990,12 @@ static void srpt_process_send_completion(struct ib_cq *cq, } } else { if (opcode == SRPT_SEND) { - printk(KERN_INFO "sending response for idx %u failed" - " with status %d\n", index, wc->status); + pr_info("sending response for idx %u failed" + " with status %d\n", index, wc->status); srpt_handle_send_err_comp(ch, wc->wr_id); } else if (opcode != SRPT_RDMA_MID) { - printk(KERN_INFO "RDMA t %d for idx %u failed with" - " status %d", opcode, index, wc->status); + pr_info("RDMA t %d for idx %u failed with" + " status %d\n", opcode, index, wc->status); srpt_handle_rdma_err_comp(ch, send_ioctx, opcode); } } @@ -2062,15 +2059,15 @@ static int srpt_compl_thread(void *arg) ch = arg; BUG_ON(!ch); - printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n", - ch->sess_name, ch->thread->comm, current->pid); + pr_info("Session %s: kernel thread %s (PID %d) started\n", + ch->sess_name, ch->thread->comm, current->pid); while (!kthread_should_stop()) { wait_event_interruptible(ch->wait_queue, (srpt_process_completion(ch->cq, ch), kthread_should_stop())); } - printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n", - ch->sess_name, ch->thread->comm, current->pid); + pr_info("Session %s: kernel thread %s (PID %d) stopped\n", + ch->sess_name, ch->thread->comm, current->pid); return 0; } @@ -2097,7 +2094,7 @@ retry: ch->rq_size + srp_sq_size, 0); if (IS_ERR(ch->cq)) { ret = PTR_ERR(ch->cq); - printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n", + pr_err("failed to create CQ cqe= %d ret= %d\n", ch->rq_size + srp_sq_size, ret); goto out; } @@ -2123,7 +2120,7 @@ retry: goto retry; } } - printk(KERN_ERR "failed to create_qp ret= %d\n", ret); + pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } @@ -2143,7 +2140,7 @@ retry: ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl"); if (IS_ERR(ch->thread)) { - printk(KERN_ERR "failed to create kernel thread %ld\n", + pr_err("failed to create kernel thread %ld\n", PTR_ERR(ch->thread)); ch->thread = NULL; goto err_destroy_qp; @@ -2204,7 +2201,7 @@ static void __srpt_close_ch(struct srpt_rdma_ch *ch) /* fall through */ case CH_LIVE: if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) - printk(KERN_ERR "sending CM DREQ failed.\n"); + pr_err("sending CM DREQ failed.\n"); break; case CH_DISCONNECTING: break; @@ -2291,7 +2288,7 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id) ret = srpt_ch_qp_err(ch); if (ret < 0) - printk(KERN_ERR "Setting queue pair in error state" + pr_err("Setting queue pair in error state" " failed: %d\n", ret); } } @@ -2435,17 +2432,17 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, it_iu_len = be32_to_cpu(req->req_it_iu_len); - printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," - " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" - " (guid=0x%llx:0x%llx)\n", - be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), - be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), - be64_to_cpu(*(__be64 *)&req->target_port_id[0]), - be64_to_cpu(*(__be64 *)&req->target_port_id[8]), - it_iu_len, - param->port, - be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), - be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); + pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," + " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" + " (guid=0x%llx:0x%llx)\n", + be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), + be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), + be64_to_cpu(*(__be64 *)&req->target_port_id[0]), + be64_to_cpu(*(__be64 *)&req->target_port_id[8]), + it_iu_len, + param->port, + be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), + be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); rsp = kzalloc(sizeof *rsp, GFP_KERNEL); rej = kzalloc(sizeof *rej, GFP_KERNEL); @@ -2460,7 +2457,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); ret = -EINVAL; - printk(KERN_ERR "rejected SRP_LOGIN_REQ because its" + pr_err("rejected SRP_LOGIN_REQ because its" " length (%d bytes) is out of range (%d .. %d)\n", it_iu_len, 64, srp_max_req_size); goto reject; @@ -2470,7 +2467,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); ret = -EINVAL; - printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port" + pr_err("rejected SRP_LOGIN_REQ because the target port" " has not yet been enabled\n"); goto reject; } @@ -2516,7 +2513,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); ret = -ENOMEM; - printk(KERN_ERR "rejected SRP_LOGIN_REQ because it" + pr_err("rejected SRP_LOGIN_REQ because it" " has an invalid target port identifier.\n"); goto reject; } @@ -2525,7 +2522,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (!ch) { rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); - printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n"); + pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); ret = -ENOMEM; goto reject; } @@ -2562,7 +2559,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (ret) { rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); - printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating" + pr_err("rejected SRP_LOGIN_REQ because creating" " a new RDMA channel failed.\n"); goto free_ring; } @@ -2571,7 +2568,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (ret) { rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); - printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling" + pr_err("rejected SRP_LOGIN_REQ because enabling" " RTR failed (error code = %d)\n", ret); goto destroy_ib; } @@ -2586,8 +2583,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, nacl = srpt_lookup_acl(sport, ch->i_port_id); if (!nacl) { - printk(KERN_INFO "Rejected login because no ACL has been" - " configured yet for initiator %s.\n", ch->sess_name); + pr_info("Rejected login because no ACL has been" + " configured yet for initiator %s.\n", ch->sess_name); rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); goto destroy_ib; @@ -2631,7 +2628,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = ib_send_cm_rep(cm_id, rep_param); if (ret) { - printk(KERN_ERR "sending SRP_LOGIN_REQ response failed" + pr_err("sending SRP_LOGIN_REQ response failed" " (error code = %d)\n", ret); goto release_channel; } @@ -2679,7 +2676,7 @@ out: static void srpt_cm_rej_recv(struct ib_cm_id *cm_id) { - printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id); + pr_info("Received IB REJ for cm_id %p.\n", cm_id); srpt_drain_channel(cm_id); } @@ -2714,13 +2711,13 @@ static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id) static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id) { - printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id); + pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id); srpt_drain_channel(cm_id); } static void srpt_cm_rep_error(struct ib_cm_id *cm_id) { - printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id); + pr_info("Received IB REP error for cm_id %p.\n", cm_id); srpt_drain_channel(cm_id); } @@ -2755,9 +2752,9 @@ static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id) if (send_drep) { if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0) - printk(KERN_ERR "Sending IB DREP failed.\n"); - printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n", - ch->sess_name); + pr_err("Sending IB DREP failed.\n"); + pr_info("Received DREQ and sent DREP for session %s.\n", + ch->sess_name); } } @@ -2766,8 +2763,7 @@ static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id) */ static void srpt_cm_drep_recv(struct ib_cm_id *cm_id) { - printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n", - cm_id); + pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id); srpt_drain_channel(cm_id); } @@ -2811,14 +2807,13 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) srpt_cm_rep_error(cm_id); break; case IB_CM_DREQ_ERROR: - printk(KERN_INFO "Received IB DREQ ERROR event.\n"); + pr_info("Received IB DREQ ERROR event.\n"); break; case IB_CM_MRA_RECEIVED: - printk(KERN_INFO "Received IB MRA event\n"); + pr_info("Received IB MRA event\n"); break; default: - printk(KERN_ERR "received unrecognized IB CM event %d\n", - event->event); + pr_err("received unrecognized IB CM event %d\n", event->event); break; } @@ -2848,8 +2843,8 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, ret = -ENOMEM; sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail); if (sq_wr_avail < 0) { - printk(KERN_WARNING "IB send queue full (needed %d)\n", - n_rdma); + pr_warn("IB send queue full (needed %d)\n", + n_rdma); goto out; } } @@ -2889,7 +2884,7 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, } if (ret) - printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d", + pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n", __func__, __LINE__, ret, i, n_rdma); if (ret && i > 0) { wr.num_sge = 0; @@ -2897,12 +2892,12 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, wr.send_flags = IB_SEND_SIGNALED; while (ch->state == CH_LIVE && ib_post_send(ch->qp, &wr, &bad_wr) != 0) { - printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]", + pr_info("Trying to abort failed RDMA transfer [%d]\n", ioctx->ioctx.index); msleep(1000); } while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) { - printk(KERN_INFO "Waiting until RDMA abort finished [%d]", + pr_info("Waiting until RDMA abort finished [%d]\n", ioctx->ioctx.index); msleep(1000); } @@ -2923,17 +2918,17 @@ static int srpt_xfer_data(struct srpt_rdma_ch *ch, ret = srpt_map_sg_to_ib_sge(ch, ioctx); if (ret) { - printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret); + pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret); goto out; } ret = srpt_perform_rdmas(ch, ioctx); if (ret) { if (ret == -EAGAIN || ret == -ENOMEM) - printk(KERN_INFO "%s[%d] queue full -- ret=%d\n", - __func__, __LINE__, ret); + pr_info("%s[%d] queue full -- ret=%d\n", + __func__, __LINE__, ret); else - printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n", + pr_err("%s[%d] fatal error -- ret=%d\n", __func__, __LINE__, ret); goto out_unmap; } @@ -3058,7 +3053,7 @@ static void srpt_queue_response(struct se_cmd *cmd) !ioctx->queue_status_only) { ret = srpt_xfer_data(ch, ioctx); if (ret) { - printk(KERN_ERR "xfer_data failed for tag %llu\n", + pr_err("xfer_data failed for tag %llu\n", ioctx->tag); return; } @@ -3075,7 +3070,7 @@ static void srpt_queue_response(struct se_cmd *cmd) } ret = srpt_post_send(ch, ioctx, resp_len); if (ret) { - printk(KERN_ERR "sending cmd response failed for tag %llu\n", + pr_err("sending cmd response failed for tag %llu\n", ioctx->tag); srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); @@ -3154,7 +3149,7 @@ static int srpt_release_sdev(struct srpt_device *sdev) res = wait_event_interruptible(sdev->ch_releaseQ, srpt_ch_list_empty(sdev)); if (res) - printk(KERN_ERR "%s: interrupted.\n", __func__); + pr_err("%s: interrupted.\n", __func__); return 0; } @@ -3293,7 +3288,7 @@ static void srpt_add_one(struct ib_device *device) spin_lock_init(&sport->port_acl_lock); if (srpt_refresh_port(sport)) { - printk(KERN_ERR "MAD registration failed for %s-%d.\n", + pr_err("MAD registration failed for %s-%d.\n", srpt_sdev_name(sdev), i); goto err_ring; } @@ -3330,7 +3325,7 @@ free_dev: kfree(sdev); err: sdev = NULL; - printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name); + pr_info("%s(%s) failed.\n", __func__, device->name); goto out; } @@ -3344,8 +3339,7 @@ static void srpt_remove_one(struct ib_device *device) sdev = ib_get_client_data(device, &srpt_client); if (!sdev) { - printk(KERN_INFO "%s(%s): nothing to do.\n", __func__, - device->name); + pr_info("%s(%s): nothing to do.\n", __func__, device->name); return; } @@ -3464,7 +3458,7 @@ static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); if (!nacl) { - printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n"); + pr_err("Unable to allocate struct srpt_node_acl\n"); return NULL; } @@ -3615,7 +3609,7 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, u8 i_port_id[16]; if (srpt_parse_i_port_id(i_port_id, name) < 0) { - printk(KERN_ERR "invalid initiator port ID %s\n", name); + pr_err("invalid initiator port ID %s\n", name); ret = -EINVAL; goto err; } @@ -3816,12 +3810,12 @@ static ssize_t srpt_tpg_store_enable( ret = kstrtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); + pr_err("Unable to extract srpt_tpg_store_enable\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp); + pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp); return -EINVAL; } if (tmp == 1) @@ -3980,7 +3974,7 @@ static int __init srpt_init_module(void) ret = -EINVAL; if (srp_max_req_size < MIN_MAX_REQ_SIZE) { - printk(KERN_ERR "invalid value %d for kernel module parameter" + pr_err("invalid value %d for kernel module parameter" " srp_max_req_size -- must be at least %d.\n", srp_max_req_size, MIN_MAX_REQ_SIZE); goto out; @@ -3988,7 +3982,7 @@ static int __init srpt_init_module(void) if (srpt_srq_size < MIN_SRPT_SRQ_SIZE || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { - printk(KERN_ERR "invalid value %d for kernel module parameter" + pr_err("invalid value %d for kernel module parameter" " srpt_srq_size -- must be in the range [%d..%d].\n", srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); goto out; @@ -3996,7 +3990,7 @@ static int __init srpt_init_module(void) srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); if (IS_ERR(srpt_target)) { - printk(KERN_ERR "couldn't register\n"); + pr_err("couldn't register\n"); ret = PTR_ERR(srpt_target); goto out; } @@ -4018,13 +4012,13 @@ static int __init srpt_init_module(void) ret = target_fabric_configfs_register(srpt_target); if (ret < 0) { - printk(KERN_ERR "couldn't register\n"); + pr_err("couldn't register\n"); goto out_free_target; } ret = ib_register_client(&srpt_client); if (ret) { - printk(KERN_ERR "couldn't register IB client\n"); + pr_err("couldn't register IB client\n"); goto out_unregister_target; } -- cgit 1.2.3-korg From c4de4663e0fa858e3a84f9b32b2e1dd2de23fab2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:11 +0300 Subject: IB/iser: Fix unload during ep_poll wrong dereference In case the user unloaded ib_iser while ep_connect is in progress, we need to destroy the endpoint although ep_disconnect wasn't invoked (we detect this by the iser conn state != DOWN). However, if we got an REJECTED/UNREACHABLE CM event we move the connection state to DOWN which will prevent us from destroying the endpoint in the module unload stage. Fix this by setting the connection state to TERMINATING in iser_conn_error so we can still destroy the endpoint at unload stage. Reported-by: Ariel Nahum Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 4065abe28829f..070c5af28a751 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -721,7 +721,7 @@ static void iser_connect_error(struct rdma_cm_id *cma_id) struct iser_conn *iser_conn; iser_conn = (struct iser_conn *)cma_id->context; - iser_conn->state = ISER_CONN_DOWN; + iser_conn->state = ISER_CONN_TERMINATING; } /** -- cgit 1.2.3-korg From 30bf1d58ae8e87fef20248c7166777deab0d7f9c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:12 +0300 Subject: IB/iser: Handle fastreg/local_inv completion errors Fast registration and local invalidate work requests can also fail. We should call error completion handler for them. Reported-by: Roi Dayan Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_verbs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 070c5af28a751..7ee4926925d31 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1210,6 +1210,9 @@ iser_handle_comp_error(struct ib_conn *ib_conn, iscsi_conn_failure(iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); + if (wc->wr_id == ISER_FASTREG_LI_WRID) + return; + if (is_iser_tx_desc(iser_conn, wr_id)) { struct iser_tx_desc *desc = wr_id; @@ -1254,13 +1257,11 @@ static void iser_handle_wc(struct ib_wc *wc) else iser_dbg("flush error: wr id %llx\n", wc->wr_id); - if (wc->wr_id != ISER_FASTREG_LI_WRID && - wc->wr_id != ISER_BEACON_WRID) - iser_handle_comp_error(ib_conn, wc); - - /* complete in case all flush errors were consumed */ if (wc->wr_id == ISER_BEACON_WRID) + /* all flush errors were consumed */ complete(&ib_conn->flush_comp); + else + iser_handle_comp_error(ib_conn, wc); } } -- cgit 1.2.3-korg From a065fe6aa25ba6ba93c02dc13486131bb3c64d5f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:13 +0300 Subject: IB/iser: Fix wrong calculation of protection buffer length This length miss-calculation may cause a silent data corruption in the DIX case and cause the device to reference unmapped area. Fixes: d77e65350f2d ('libiscsi, iser: Adjust data_length to include protection information') Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_initiator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 20e859a6f1a63..76eb57b31a599 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn, if (scsi_prot_sg_count(sc)) { prot_buf->buf = scsi_prot_sglist(sc); prot_buf->size = scsi_prot_sg_count(sc); - prot_buf->data_len = data_buf->data_len >> - ilog2(sc->device->sector_size) * 8; + prot_buf->data_len = (data_buf->data_len >> + ilog2(sc->device->sector_size)) * 8; } if (hdr->flags & ISCSI_FLAG_CMD_READ) { -- cgit 1.2.3-korg From ecc3993a2ad2a4ce8f1a58a08e9177f21015492d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:14 +0300 Subject: IB/iser: Remove redundant cmd_data_len calculation This code was added before we had protection data length calculation (in iser_send_command), so we needed to calc the sg data length from the sg itself. This is not needed anymore. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 341040bf09849..32ccd5cea6759 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -53,12 +53,9 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct scatterlist *sgl = (struct scatterlist *)data->buf; struct scatterlist *sg; char *mem = NULL; - unsigned long cmd_data_len = 0; + unsigned long cmd_data_len = data->data_len; int dma_nents, i; - for_each_sg(sgl, sg, data->size, i) - cmd_data_len += ib_sg_dma_len(dev, sg); - if (cmd_data_len > ISER_KMALLOC_THRESHOLD) mem = (void *)__get_free_pages(GFP_ATOMIC, ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); -- cgit 1.2.3-korg From e3784bd1d9f1039f28dff2c0c0d17daabb3d6761 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:15 +0300 Subject: IB/iser: Remove a redundant struct iser_data_buf No need to keep two iser_data_buf structures just in case we use mem copy. We can avoid that just by adding a pointer to the original sg. So keep only two iser_data_buf per command (data and protection) and pass the relevant data_buf to bounce buffer routine. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 12 +++--- drivers/infiniband/ulp/iser/iser_initiator.c | 16 +++----- drivers/infiniband/ulp/iser/iser_memory.c | 58 +++++++++++----------------- 3 files changed, 34 insertions(+), 52 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index b47aea1094b2d..5c7036c90766f 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -218,20 +218,23 @@ enum iser_data_dir { /** * struct iser_data_buf - iSER data buffer * - * @buf: pointer to the sg list + * @sg: pointer to the sg list * @size: num entries of this sg * @data_len: total beffer byte len * @dma_nents: returned by dma_map_sg * @copy_buf: allocated copy buf for SGs unaligned * for rdma which are copied + * @orig_sg: pointer to the original sg list (in case + * we used a copy) * @sg_single: SG-ified clone of a non SG SC or * unaligned SG */ struct iser_data_buf { - void *buf; + struct scatterlist *sg; unsigned int size; unsigned long data_len; unsigned int dma_nents; + struct scatterlist *orig_sg; char *copy_buf; struct scatterlist sg_single; }; @@ -536,9 +539,7 @@ struct iser_conn { * @dir: iser data direction * @rdma_regd: task rdma registration desc * @data: iser data buffer desc - * @data_copy: iser data copy buffer desc (bounce buffer) * @prot: iser protection buffer desc - * @prot_copy: iser protection copy buffer desc (bounce buffer) */ struct iscsi_iser_task { struct iser_tx_desc desc; @@ -549,9 +550,7 @@ struct iscsi_iser_task { int dir[ISER_DIRS_NUM]; struct iser_regd_buf rdma_regd[ISER_DIRS_NUM]; struct iser_data_buf data[ISER_DIRS_NUM]; - struct iser_data_buf data_copy[ISER_DIRS_NUM]; struct iser_data_buf prot[ISER_DIRS_NUM]; - struct iser_data_buf prot_copy[ISER_DIRS_NUM]; }; struct iser_page_vec { @@ -621,7 +620,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn); void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, - struct iser_data_buf *mem_copy, enum iser_data_dir cmd_dir); int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 76eb57b31a599..0e414dbaa5233 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -401,13 +401,13 @@ int iser_send_command(struct iscsi_conn *conn, } if (scsi_sg_count(sc)) { /* using a scatter list */ - data_buf->buf = scsi_sglist(sc); + data_buf->sg = scsi_sglist(sc); data_buf->size = scsi_sg_count(sc); } data_buf->data_len = scsi_bufflen(sc); if (scsi_prot_sg_count(sc)) { - prot_buf->buf = scsi_prot_sglist(sc); + prot_buf->sg = scsi_prot_sglist(sc); prot_buf->size = scsi_prot_sg_count(sc); prot_buf->data_len = (data_buf->data_len >> ilog2(sc->device->sector_size)) * 8; @@ -674,35 +674,31 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) /* if we were reading, copy back to unaligned sglist, * anyway dma_unmap and free the copy */ - if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { + if (iser_task->data[ISER_DIR_IN].copy_buf) { is_rdma_data_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->data[ISER_DIR_IN], - &iser_task->data_copy[ISER_DIR_IN], ISER_DIR_IN); } - if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { + if (iser_task->data[ISER_DIR_OUT].copy_buf) { is_rdma_data_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->data[ISER_DIR_OUT], - &iser_task->data_copy[ISER_DIR_OUT], ISER_DIR_OUT); } - if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) { + if (iser_task->prot[ISER_DIR_IN].copy_buf) { is_rdma_prot_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->prot[ISER_DIR_IN], - &iser_task->prot_copy[ISER_DIR_IN], ISER_DIR_IN); } - if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) { + if (iser_task->prot[ISER_DIR_OUT].copy_buf) { is_rdma_prot_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->prot[ISER_DIR_OUT], - &iser_task->prot_copy[ISER_DIR_OUT], ISER_DIR_OUT); } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 32ccd5cea6759..beeabd0e05c18 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -46,11 +46,10 @@ */ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, - struct iser_data_buf *data_copy, enum iser_data_dir cmd_dir) { struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - struct scatterlist *sgl = (struct scatterlist *)data->buf; + struct scatterlist *sgl = data->sg; struct scatterlist *sg; char *mem = NULL; unsigned long cmd_data_len = data->data_len; @@ -72,7 +71,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, /* copy the unaligned sg the buffer which is used for RDMA */ char *p, *from; - sgl = (struct scatterlist *)data->buf; + sgl = data->sg; p = mem; for_each_sg(sgl, sg, data->size, i) { from = kmap_atomic(sg_page(sg)); @@ -84,18 +83,16 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, } } - sg_init_one(&data_copy->sg_single, mem, cmd_data_len); - data_copy->buf = &data_copy->sg_single; - data_copy->size = 1; - data_copy->copy_buf = mem; - - dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1, + sg_init_one(&data->sg_single, mem, cmd_data_len); + data->orig_sg = data->sg; + data->sg = &data->sg_single; + data->copy_buf = mem; + dma_nents = ib_dma_map_sg(dev, data->sg, 1, (cmd_dir == ISER_DIR_OUT) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); BUG_ON(dma_nents == 0); - data_copy->dma_nents = dma_nents; - data_copy->data_len = cmd_data_len; + data->dma_nents = dma_nents; return 0; } @@ -106,7 +103,6 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, - struct iser_data_buf *data_copy, enum iser_data_dir cmd_dir) { struct ib_device *dev; @@ -114,7 +110,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, dev = iser_task->iser_conn->ib_conn.device->ib_device; - ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, + ib_dma_unmap_sg(dev, data->sg, 1, (cmd_dir == ISER_DIR_OUT) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -126,9 +122,9 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, int i; /* copy back read RDMA to unaligned sg */ - mem = data_copy->copy_buf; + mem = data->copy_buf; - sgl = (struct scatterlist *)data->buf; + sgl = data->sg; sg_size = data->size; p = mem; @@ -145,12 +141,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, cmd_data_len = data->data_len; if (cmd_data_len > ISER_KMALLOC_THRESHOLD) - free_pages((unsigned long)data_copy->copy_buf, + free_pages((unsigned long)data->copy_buf, ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); else - kfree(data_copy->copy_buf); + kfree(data->copy_buf); - data_copy->copy_buf = NULL; + data->copy_buf = NULL; } #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) @@ -172,7 +168,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, struct ib_device *ibdev, u64 *pages, int *offset, int *data_size) { - struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf; + struct scatterlist *sg, *sgl = data->sg; u64 start_addr, end_addr, page, chunk_start = 0; unsigned long total_sz = 0; unsigned int dma_len; @@ -224,14 +220,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, static int iser_data_buf_aligned_len(struct iser_data_buf *data, struct ib_device *ibdev) { - struct scatterlist *sgl, *sg, *next_sg = NULL; + struct scatterlist *sg, *sgl, *next_sg = NULL; u64 start_addr, end_addr; int i, ret_len, start_check = 0; if (data->dma_nents == 1) return 1; - sgl = (struct scatterlist *)data->buf; + sgl = data->sg; start_addr = ib_sg_dma_address(ibdev, sgl); for_each_sg(sgl, sg, data->dma_nents, i) { @@ -263,11 +259,10 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data, static void iser_data_buf_dump(struct iser_data_buf *data, struct ib_device *ibdev) { - struct scatterlist *sgl = (struct scatterlist *)data->buf; struct scatterlist *sg; int i; - for_each_sg(sgl, sg, data->dma_nents, i) + for_each_sg(data->sg, sg, data->dma_nents, i) iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " "off:0x%x sz:0x%x dma_len:0x%x\n", i, (unsigned long)ib_sg_dma_address(ibdev, sg), @@ -320,7 +315,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, iser_task->dir[iser_dir] = 1; dev = iser_task->iser_conn->ib_conn.device->ib_device; - data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); + data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); if (data->dma_nents == 0) { iser_err("dma_map_sg failed!!!\n"); return -EINVAL; @@ -335,13 +330,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, struct ib_device *dev; dev = iser_task->iser_conn->ib_conn.device->ib_device; - ib_dma_unmap_sg(dev, data->buf, data->size, dir); + ib_dma_unmap_sg(dev, data->sg, data->size, dir); } static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, struct ib_device *ibdev, struct iser_data_buf *mem, - struct iser_data_buf *mem_copy, enum iser_data_dir cmd_dir, int aligned_len) { @@ -361,7 +355,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, /* allocate copy buf, if we are writing, copy the */ /* unaligned scatterlist, dma map the copy */ - if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0) + if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0) return -ENOMEM; return 0; @@ -391,18 +385,16 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { err = fall_to_bounce_buf(iser_task, ibdev, mem, - &iser_task->data_copy[cmd_dir], cmd_dir, aligned_len); if (err) { iser_err("failed to allocate bounce buffer\n"); return err; } - mem = &iser_task->data_copy[cmd_dir]; } /* if there a single dma entry, FMR is not needed */ if (mem->dma_nents == 1) { - sg = (struct scatterlist *)mem->buf; + sg = mem->sg; regd_buf->reg.lkey = device->mr->lkey; regd_buf->reg.rkey = device->mr->rkey; @@ -592,7 +584,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, /* if there a single dma entry, dma mr suffices */ if (mem->dma_nents == 1) { - struct scatterlist *sg = (struct scatterlist *)mem->buf; + struct scatterlist *sg = mem->sg; sge->lkey = device->mr->lkey; sge->addr = ib_sg_dma_address(ibdev, &sg[0]); @@ -678,13 +670,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { err = fall_to_bounce_buf(iser_task, ibdev, mem, - &iser_task->data_copy[cmd_dir], cmd_dir, aligned_len); if (err) { iser_err("failed to allocate bounce buffer\n"); return err; } - mem = &iser_task->data_copy[cmd_dir]; } if (mem->dma_nents != 1 || @@ -711,13 +701,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { err = fall_to_bounce_buf(iser_task, ibdev, mem, - &iser_task->prot_copy[cmd_dir], cmd_dir, aligned_len); if (err) { iser_err("failed to allocate bounce buffer\n"); return err; } - mem = &iser_task->prot_copy[cmd_dir]; } err = iser_fast_reg_mr(iser_task, regd_buf, mem, -- cgit 1.2.3-korg From 56408325900d380f6544460c5892bfeb0616cab3 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:16 +0300 Subject: IB/iser: Don't pass ib_device to fall_to_bounce_buff routine No need to pass that, we can take it from the task. In a later stage, this function will be invoked according to a device capability. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index beeabd0e05c18..9c60ff1d82a2e 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -334,19 +334,19 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, } static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, - struct ib_device *ibdev, struct iser_data_buf *mem, enum iser_data_dir cmd_dir, int aligned_len) { - struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; + struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; + struct iser_device *device = iser_task->iser_conn->ib_conn.device; iscsi_conn->fmr_unalign_cnt++; iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", aligned_len, mem->size); if (iser_debug_level > 0) - iser_data_buf_dump(mem, ibdev); + iser_data_buf_dump(mem, device->ib_device); /* unmap the command data before accessing it */ iser_dma_unmap_task_data(iser_task, mem, @@ -384,7 +384,7 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, ibdev, mem, + err = fall_to_bounce_buf(iser_task, mem, cmd_dir, aligned_len); if (err) { iser_err("failed to allocate bounce buffer\n"); @@ -669,7 +669,7 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, ibdev, mem, + err = fall_to_bounce_buf(iser_task, mem, cmd_dir, aligned_len); if (err) { iser_err("failed to allocate bounce buffer\n"); @@ -700,7 +700,7 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, mem = &iser_task->prot[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { - err = fall_to_bounce_buf(iser_task, ibdev, mem, + err = fall_to_bounce_buf(iser_task, mem, cmd_dir, aligned_len); if (err) { iser_err("failed to allocate bounce buffer\n"); -- cgit 1.2.3-korg From d03e61d0366c61f596ada66ab11c217e7f887d15 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:17 +0300 Subject: IB/iser: Move memory reg/dereg routines to iser_memory.c As memory registration/de-registration methods, lets move them to their natural location. While we're at it, make iser_reg_page_vec routine static. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 4 -- drivers/infiniband/ulp/iser/iser_memory.c | 88 +++++++++++++++++++++++++++++++ drivers/infiniband/ulp/iser/iser_verbs.c | 87 ------------------------------ 3 files changed, 88 insertions(+), 91 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5c7036c90766f..d5e5288fe3dd4 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -632,10 +632,6 @@ int iser_connect(struct iser_conn *iser_conn, struct sockaddr *dst_addr, int non_blocking); -int iser_reg_page_vec(struct ib_conn *ib_conn, - struct iser_page_vec *page_vec, - struct iser_mem_reg *mem_reg); - void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir); void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9c60ff1d82a2e..4e0cbbb671ccf 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -361,6 +361,94 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, return 0; } +/** + * iser_reg_page_vec - Register physical memory + * + * returns: 0 on success, errno code on failure + */ +static +int iser_reg_page_vec(struct ib_conn *ib_conn, + struct iser_page_vec *page_vec, + struct iser_mem_reg *mem_reg) +{ + struct ib_pool_fmr *mem; + u64 io_addr; + u64 *page_list; + int status; + + page_list = page_vec->pages; + io_addr = page_list[0]; + + mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, + page_list, + page_vec->length, + io_addr); + + if (IS_ERR(mem)) { + status = (int)PTR_ERR(mem); + iser_err("ib_fmr_pool_map_phys failed: %d\n", status); + return status; + } + + mem_reg->lkey = mem->fmr->lkey; + mem_reg->rkey = mem->fmr->rkey; + mem_reg->len = page_vec->length * SIZE_4K; + mem_reg->va = io_addr; + mem_reg->mem_h = (void *)mem; + + mem_reg->va += page_vec->offset; + mem_reg->len = page_vec->data_size; + + iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " + "entry[0]: (0x%08lx,%ld)] -> " + "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", + page_vec, page_vec->length, + (unsigned long)page_vec->pages[0], + (unsigned long)page_vec->data_size, + (unsigned int)mem_reg->lkey, mem_reg->mem_h, + (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); + return 0; +} + +/** + * Unregister (previosuly registered using FMR) memory. + * If memory is non-FMR does nothing. + */ +void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir) +{ + struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; + int ret; + + if (!reg->mem_h) + return; + + iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h); + + ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); + if (ret) + iser_err("ib_fmr_pool_unmap failed %d\n", ret); + + reg->mem_h = NULL; +} + +void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, + enum iser_data_dir cmd_dir) +{ + struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; + struct iser_conn *iser_conn = iser_task->iser_conn; + struct ib_conn *ib_conn = &iser_conn->ib_conn; + struct fast_reg_descriptor *desc = reg->mem_h; + + if (!desc) + return; + + reg->mem_h = NULL; + spin_lock_bh(&ib_conn->lock); + list_add_tail(&desc->list, &ib_conn->fastreg.pool); + spin_unlock_bh(&ib_conn->lock); +} + /** * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA, * using FMR (if possible) obtaining rkey and va diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 7ee4926925d31..986b5f4823eac 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -992,93 +992,6 @@ connect_failure: return err; } -/** - * iser_reg_page_vec - Register physical memory - * - * returns: 0 on success, errno code on failure - */ -int iser_reg_page_vec(struct ib_conn *ib_conn, - struct iser_page_vec *page_vec, - struct iser_mem_reg *mem_reg) -{ - struct ib_pool_fmr *mem; - u64 io_addr; - u64 *page_list; - int status; - - page_list = page_vec->pages; - io_addr = page_list[0]; - - mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, - page_list, - page_vec->length, - io_addr); - - if (IS_ERR(mem)) { - status = (int)PTR_ERR(mem); - iser_err("ib_fmr_pool_map_phys failed: %d\n", status); - return status; - } - - mem_reg->lkey = mem->fmr->lkey; - mem_reg->rkey = mem->fmr->rkey; - mem_reg->len = page_vec->length * SIZE_4K; - mem_reg->va = io_addr; - mem_reg->mem_h = (void *)mem; - - mem_reg->va += page_vec->offset; - mem_reg->len = page_vec->data_size; - - iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " - "entry[0]: (0x%08lx,%ld)] -> " - "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", - page_vec, page_vec->length, - (unsigned long)page_vec->pages[0], - (unsigned long)page_vec->data_size, - (unsigned int)mem_reg->lkey, mem_reg->mem_h, - (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); - return 0; -} - -/** - * Unregister (previosuly registered using FMR) memory. - * If memory is non-FMR does nothing. - */ -void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir) -{ - struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; - int ret; - - if (!reg->mem_h) - return; - - iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h); - - ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); - if (ret) - iser_err("ib_fmr_pool_unmap failed %d\n", ret); - - reg->mem_h = NULL; -} - -void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, - enum iser_data_dir cmd_dir) -{ - struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; - struct iser_conn *iser_conn = iser_task->iser_conn; - struct ib_conn *ib_conn = &iser_conn->ib_conn; - struct fast_reg_descriptor *desc = reg->mem_h; - - if (!desc) - return; - - reg->mem_h = NULL; - spin_lock_bh(&ib_conn->lock); - list_add_tail(&desc->list, &ib_conn->fastreg.pool); - spin_unlock_bh(&ib_conn->lock); -} - int iser_post_recvl(struct iser_conn *iser_conn) { struct ib_recv_wr rx_wr, *rx_wr_failed; -- cgit 1.2.3-korg From 6847fdeb0bcd2eade5b8183d3d2857c4fd7b70d5 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:18 +0300 Subject: IB/iser: Remove redundant assignments in iser_reg_page_vec Buffer length was assigned twice, and no reason to set va to io_addr and then add the offset, just set va to io_addr + offset. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 4e0cbbb671ccf..cb308650b94a2 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -392,12 +392,9 @@ int iser_reg_page_vec(struct ib_conn *ib_conn, mem_reg->lkey = mem->fmr->lkey; mem_reg->rkey = mem->fmr->rkey; - mem_reg->len = page_vec->length * SIZE_4K; - mem_reg->va = io_addr; - mem_reg->mem_h = (void *)mem; - - mem_reg->va += page_vec->offset; mem_reg->len = page_vec->data_size; + mem_reg->va = io_addr + page_vec->offset; + mem_reg->mem_h = (void *)mem; iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " "entry[0]: (0x%08lx,%ld)] -> " -- cgit 1.2.3-korg From b130ededff03fcee9411be379fd24b5e840c5e9e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:19 +0300 Subject: IB/iser: Get rid of struct iser_rdma_regd This struct members other than struct iser_mem_reg are unused, so remove it altogether. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 21 +---------- drivers/infiniband/ulp/iser/iser_initiator.c | 44 +++++++++++----------- drivers/infiniband/ulp/iser/iser_memory.c | 56 ++++++++++++++-------------- drivers/infiniband/ulp/iser/iser_verbs.c | 2 +- 4 files changed, 53 insertions(+), 70 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index d5e5288fe3dd4..8133678b62182 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -261,23 +261,6 @@ struct iser_mem_reg { void *mem_h; }; -/** - * struct iser_regd_buf - iSER buffer registration desc - * - * @reg: memory registration info - * @virt_addr: virtual address of buffer - * @device: reference to iser device - * @direction: dma direction (for dma_unmap) - * @data_size: data buffer size in bytes - */ -struct iser_regd_buf { - struct iser_mem_reg reg; - void *virt_addr; - struct iser_device *device; - enum dma_data_direction direction; - unsigned int data_size; -}; - enum iser_desc_type { ISCSI_TX_CONTROL , ISCSI_TX_SCSI_COMMAND, @@ -537,7 +520,7 @@ struct iser_conn { * @sc: link to scsi command * @command_sent: indicate if command was sent * @dir: iser data direction - * @rdma_regd: task rdma registration desc + * @rdma_reg: task rdma registration desc * @data: iser data buffer desc * @prot: iser protection buffer desc */ @@ -548,7 +531,7 @@ struct iscsi_iser_task { struct scsi_cmnd *sc; int command_sent; int dir[ISER_DIRS_NUM]; - struct iser_regd_buf rdma_regd[ISER_DIRS_NUM]; + struct iser_mem_reg rdma_reg[ISER_DIRS_NUM]; struct iser_data_buf data[ISER_DIRS_NUM]; struct iser_data_buf prot[ISER_DIRS_NUM]; }; diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 0e414dbaa5233..420a613fe9f59 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -50,7 +50,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; struct iser_device *device = iser_task->iser_conn->ib_conn.device; - struct iser_regd_buf *regd_buf; + struct iser_mem_reg *mem_reg; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; @@ -78,15 +78,15 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) iser_err("Failed to set up Data-IN RDMA\n"); return err; } - regd_buf = &iser_task->rdma_regd[ISER_DIR_IN]; + mem_reg = &iser_task->rdma_reg[ISER_DIR_IN]; hdr->flags |= ISER_RSV; - hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey); - hdr->read_va = cpu_to_be64(regd_buf->reg.va); + hdr->read_stag = cpu_to_be32(mem_reg->rkey); + hdr->read_va = cpu_to_be64(mem_reg->va); iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", - task->itt, regd_buf->reg.rkey, - (unsigned long long)regd_buf->reg.va); + task->itt, mem_reg->rkey, + (unsigned long long)mem_reg->va); return 0; } @@ -104,7 +104,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, { struct iscsi_iser_task *iser_task = task->dd_data; struct iser_device *device = iser_task->iser_conn->ib_conn.device; - struct iser_regd_buf *regd_buf; + struct iser_mem_reg *mem_reg; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; @@ -134,25 +134,25 @@ iser_prepare_write_cmd(struct iscsi_task *task, return err; } - regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; if (unsol_sz < edtl) { hdr->flags |= ISER_WSV; - hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey); - hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz); + hdr->write_stag = cpu_to_be32(mem_reg->rkey); + hdr->write_va = cpu_to_be64(mem_reg->va + unsol_sz); iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " "VA:%#llX + unsol:%d\n", - task->itt, regd_buf->reg.rkey, - (unsigned long long)regd_buf->reg.va, unsol_sz); + task->itt, mem_reg->rkey, + (unsigned long long)mem_reg->va, unsol_sz); } if (imm_sz > 0) { iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", task->itt, imm_sz); - tx_dsg->addr = regd_buf->reg.va; + tx_dsg->addr = mem_reg->va; tx_dsg->length = imm_sz; - tx_dsg->lkey = regd_buf->reg.lkey; + tx_dsg->lkey = mem_reg->lkey; iser_task->desc.num_sge = 2; } @@ -450,7 +450,7 @@ int iser_send_data_out(struct iscsi_conn *conn, struct iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; struct iser_tx_desc *tx_desc = NULL; - struct iser_regd_buf *regd_buf; + struct iser_mem_reg *mem_reg; unsigned long buf_offset; unsigned long data_seg_len; uint32_t itt; @@ -477,11 +477,11 @@ int iser_send_data_out(struct iscsi_conn *conn, /* build the tx desc */ iser_initialize_task_headers(task, tx_desc); - regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; tx_dsg = &tx_desc->tx_sg[1]; - tx_dsg->addr = regd_buf->reg.va + buf_offset; + tx_dsg->addr = mem_reg->va + buf_offset; tx_dsg->length = data_seg_len; - tx_dsg->lkey = regd_buf->reg.lkey; + tx_dsg->lkey = mem_reg->lkey; tx_desc->num_sge = 2; if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { @@ -658,10 +658,10 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) iser_task->prot[ISER_DIR_IN].data_len = 0; iser_task->prot[ISER_DIR_OUT].data_len = 0; - memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, - sizeof(struct iser_regd_buf)); - memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, - sizeof(struct iser_regd_buf)); + memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, + sizeof(struct iser_mem_reg)); + memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, + sizeof(struct iser_mem_reg)); } void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index cb308650b94a2..0b8656fcbc0d8 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -414,7 +414,7 @@ int iser_reg_page_vec(struct ib_conn *ib_conn, void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { - struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; int ret; if (!reg->mem_h) @@ -432,7 +432,7 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { - struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; struct iser_conn *iser_conn = iser_task->iser_conn; struct ib_conn *ib_conn = &iser_conn->ib_conn; struct fast_reg_descriptor *desc = reg->mem_h; @@ -459,13 +459,13 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, struct iser_device *device = ib_conn->device; struct ib_device *ibdev = device->ib_device; struct iser_data_buf *mem = &iser_task->data[cmd_dir]; - struct iser_regd_buf *regd_buf; + struct iser_mem_reg *mem_reg; int aligned_len; int err; int i; struct scatterlist *sg; - regd_buf = &iser_task->rdma_regd[cmd_dir]; + mem_reg = &iser_task->rdma_reg[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { @@ -481,21 +481,21 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, if (mem->dma_nents == 1) { sg = mem->sg; - regd_buf->reg.lkey = device->mr->lkey; - regd_buf->reg.rkey = device->mr->rkey; - regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); - regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); + mem_reg->lkey = device->mr->lkey; + mem_reg->rkey = device->mr->rkey; + mem_reg->len = ib_sg_dma_len(ibdev, &sg[0]); + mem_reg->va = ib_sg_dma_address(ibdev, &sg[0]); iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " "va: 0x%08lX sz: %ld]\n", - (unsigned int)regd_buf->reg.lkey, - (unsigned int)regd_buf->reg.rkey, - (unsigned long)regd_buf->reg.va, - (unsigned long)regd_buf->reg.len); + (unsigned int)mem_reg->lkey, + (unsigned int)mem_reg->rkey, + (unsigned long)mem_reg->va, + (unsigned long)mem_reg->len); } else { /* use FMR for multiple dma entries */ iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, - ®d_buf->reg); + mem_reg); if (err && err != -EAGAIN) { iser_data_buf_dump(mem, ibdev); iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", @@ -652,12 +652,12 @@ err: } static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, - struct iser_regd_buf *regd_buf, + struct iser_mem_reg *mem_reg, struct iser_data_buf *mem, enum iser_reg_indicator ind, struct ib_sge *sge) { - struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; + struct fast_reg_descriptor *desc = mem_reg->mem_h; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; struct ib_device *ibdev = device->ib_device; @@ -746,7 +746,7 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, struct iser_device *device = ib_conn->device; struct ib_device *ibdev = device->ib_device; struct iser_data_buf *mem = &iser_task->data[cmd_dir]; - struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; + struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir]; struct fast_reg_descriptor *desc = NULL; struct ib_sge data_sge; int err, aligned_len; @@ -769,10 +769,10 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, struct fast_reg_descriptor, list); list_del(&desc->list); spin_unlock_irqrestore(&ib_conn->lock, flags); - regd_buf->reg.mem_h = desc; + mem_reg->mem_h = desc; } - err = iser_fast_reg_mr(iser_task, regd_buf, mem, + err = iser_fast_reg_mr(iser_task, mem_reg, mem, ISER_DATA_KEY_VALID, &data_sge); if (err) goto err_reg; @@ -793,7 +793,7 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, } } - err = iser_fast_reg_mr(iser_task, regd_buf, mem, + err = iser_fast_reg_mr(iser_task, mem_reg, mem, ISER_PROT_KEY_VALID, &prot_sge); if (err) goto err_reg; @@ -807,19 +807,19 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, } desc->reg_indicators |= ISER_FASTREG_PROTECTED; - regd_buf->reg.lkey = sig_sge.lkey; - regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey; - regd_buf->reg.va = sig_sge.addr; - regd_buf->reg.len = sig_sge.length; + mem_reg->lkey = sig_sge.lkey; + mem_reg->rkey = desc->pi_ctx->sig_mr->rkey; + mem_reg->va = sig_sge.addr; + mem_reg->len = sig_sge.length; } else { if (desc) - regd_buf->reg.rkey = desc->data_mr->rkey; + mem_reg->rkey = desc->data_mr->rkey; else - regd_buf->reg.rkey = device->mr->rkey; + mem_reg->rkey = device->mr->rkey; - regd_buf->reg.lkey = data_sge.lkey; - regd_buf->reg.va = data_sge.addr; - regd_buf->reg.len = data_sge.length; + mem_reg->lkey = data_sge.lkey; + mem_reg->va = data_sge.addr; + mem_reg->len = data_sge.length; } return 0; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 986b5f4823eac..20eec09dc86aa 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1220,7 +1220,7 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context) u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir, sector_t *sector) { - struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; + struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; struct fast_reg_descriptor *desc = reg->mem_h; unsigned long sector_size = iser_task->sc->device->sector_size; struct ib_mr_status mr_status; -- cgit 1.2.3-korg From f0e35c27a5962cbd17fc58737c7f5f7b398bccb1 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:20 +0300 Subject: IB/iser: Merge build page-vec into register page-vec No need for these two separate. Keep it in a single routine like in the fastreg case. This will also make iser_reg_page_vec closer to iser_fast_reg_mr arguments. This is a preparation step for registration flow refactor. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 91 +++++++++++-------------------- 1 file changed, 33 insertions(+), 58 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 0b8656fcbc0d8..6e6b75319bf99 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -280,31 +280,6 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); } -static void iser_page_vec_build(struct iser_data_buf *data, - struct iser_page_vec *page_vec, - struct ib_device *ibdev) -{ - int page_vec_len = 0; - - page_vec->length = 0; - page_vec->offset = 0; - - iser_dbg("Translating sg sz: %d\n", data->dma_nents); - page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages, - &page_vec->offset, - &page_vec->data_size); - iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len); - - page_vec->length = page_vec_len; - - if (page_vec_len * SIZE_4K < page_vec->data_size) { - iser_err("page_vec too short to hold this SG\n"); - iser_data_buf_dump(data, ibdev); - iser_dump_page_vec(page_vec); - BUG(); - } -} - int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, enum iser_data_dir iser_dir, @@ -367,43 +342,44 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, * returns: 0 on success, errno code on failure */ static -int iser_reg_page_vec(struct ib_conn *ib_conn, +int iser_reg_page_vec(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, struct iser_page_vec *page_vec, - struct iser_mem_reg *mem_reg) + struct iser_mem_reg *mem_reg) { - struct ib_pool_fmr *mem; - u64 io_addr; - u64 *page_list; - int status; - - page_list = page_vec->pages; - io_addr = page_list[0]; + struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; + struct ib_pool_fmr *fmr; + int ret, plen; + + plen = iser_sg_to_page_vec(mem, device->ib_device, + page_vec->pages, + &page_vec->offset, + &page_vec->data_size); + page_vec->length = plen; + if (plen * SIZE_4K < page_vec->data_size) { + iser_err("page vec too short to hold this SG\n"); + iser_data_buf_dump(mem, device->ib_device); + iser_dump_page_vec(page_vec); + return -EINVAL; + } - mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, - page_list, + fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool, + page_vec->pages, page_vec->length, - io_addr); - - if (IS_ERR(mem)) { - status = (int)PTR_ERR(mem); - iser_err("ib_fmr_pool_map_phys failed: %d\n", status); - return status; + page_vec->pages[0]); + if (IS_ERR(fmr)) { + ret = PTR_ERR(fmr); + iser_err("ib_fmr_pool_map_phys failed: %d\n", ret); + return ret; } - mem_reg->lkey = mem->fmr->lkey; - mem_reg->rkey = mem->fmr->rkey; - mem_reg->len = page_vec->data_size; - mem_reg->va = io_addr + page_vec->offset; - mem_reg->mem_h = (void *)mem; - - iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " - "entry[0]: (0x%08lx,%ld)] -> " - "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", - page_vec, page_vec->length, - (unsigned long)page_vec->pages[0], - (unsigned long)page_vec->data_size, - (unsigned int)mem_reg->lkey, mem_reg->mem_h, - (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); + mem_reg->lkey = fmr->fmr->lkey; + mem_reg->rkey = fmr->fmr->rkey; + mem_reg->va = page_vec->pages[0] + page_vec->offset; + mem_reg->len = page_vec->data_size; + mem_reg->mem_h = fmr; + return 0; } @@ -493,8 +469,7 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); } else { /* use FMR for multiple dma entries */ - iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); - err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, + err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec, mem_reg); if (err && err != -EAGAIN) { iser_data_buf_dump(mem, ibdev); -- cgit 1.2.3-korg From bd8b944eeeb06f5dac705d2357af3800395174eb Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:21 +0300 Subject: IB/iser: Move fastreg descriptor pool get/put to helper functions Instead of open-coding connection fastreg pool get/put, we introduce iser_reg_desc[get|put] helpers. We aren't setting these static as this will be a per-device routine later on. Also, cleanup iser_unreg_rdma_mem_fastreg a bit. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 5 ++++ drivers/infiniband/ulp/iser/iser_memory.c | 50 ++++++++++++++++++++----------- 2 files changed, 37 insertions(+), 18 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 8133678b62182..185d2ec0e3d8c 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -644,4 +644,9 @@ int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max); void iser_free_fastreg_pool(struct ib_conn *ib_conn); u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir, sector_t *sector); +struct fast_reg_descriptor * +iser_reg_desc_get(struct ib_conn *ib_conn); +void +iser_reg_desc_put(struct ib_conn *ib_conn, + struct fast_reg_descriptor *desc); #endif diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 6e6b75319bf99..17a5d70dcc8a3 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -41,6 +41,32 @@ #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ +struct fast_reg_descriptor * +iser_reg_desc_get(struct ib_conn *ib_conn) +{ + struct fast_reg_descriptor *desc; + unsigned long flags; + + spin_lock_irqsave(&ib_conn->lock, flags); + desc = list_first_entry(&ib_conn->fastreg.pool, + struct fast_reg_descriptor, list); + list_del(&desc->list); + spin_unlock_irqrestore(&ib_conn->lock, flags); + + return desc; +} + +void +iser_reg_desc_put(struct ib_conn *ib_conn, + struct fast_reg_descriptor *desc) +{ + unsigned long flags; + + spin_lock_irqsave(&ib_conn->lock, flags); + list_add_tail(&desc->list, &ib_conn->fastreg.pool); + spin_unlock_irqrestore(&ib_conn->lock, flags); +} + /** * iser_start_rdma_unaligned_sg */ @@ -409,17 +435,13 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; - struct iser_conn *iser_conn = iser_task->iser_conn; - struct ib_conn *ib_conn = &iser_conn->ib_conn; - struct fast_reg_descriptor *desc = reg->mem_h; - if (!desc) + if (!reg->mem_h) return; + iser_reg_desc_put(&iser_task->iser_conn->ib_conn, + reg->mem_h); reg->mem_h = NULL; - spin_lock_bh(&ib_conn->lock); - list_add_tail(&desc->list, &ib_conn->fastreg.pool); - spin_unlock_bh(&ib_conn->lock); } /** @@ -725,7 +747,6 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, struct fast_reg_descriptor *desc = NULL; struct ib_sge data_sge; int err, aligned_len; - unsigned long flags; aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { @@ -739,11 +760,7 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, if (mem->dma_nents != 1 || scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { - spin_lock_irqsave(&ib_conn->lock, flags); - desc = list_first_entry(&ib_conn->fastreg.pool, - struct fast_reg_descriptor, list); - list_del(&desc->list); - spin_unlock_irqrestore(&ib_conn->lock, flags); + desc = iser_reg_desc_get(ib_conn); mem_reg->mem_h = desc; } @@ -799,11 +816,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, return 0; err_reg: - if (desc) { - spin_lock_irqsave(&ib_conn->lock, flags); - list_add_tail(&desc->list, &ib_conn->fastreg.pool); - spin_unlock_irqrestore(&ib_conn->lock, flags); - } + if (desc) + iser_reg_desc_put(ib_conn, desc); return err; } -- cgit 1.2.3-korg From 4dec2a27e385ac753d754af69f344625f5c50576 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:22 +0300 Subject: IB/iser: Move PI context alloc/free to routines Make iser_[create|destroy]_fastreg_desc shorter, more readable and easily extendable. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_verbs.c | 118 +++++++++++++++++-------------- 1 file changed, 63 insertions(+), 55 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 20eec09dc86aa..cc2dd35ffbc08 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -273,6 +273,65 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn) ib_conn->fmr.page_vec = NULL; } +static int +iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd, + struct fast_reg_descriptor *desc) +{ + struct iser_pi_context *pi_ctx = NULL; + struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2, + .flags = IB_MR_SIGNATURE_EN}; + int ret = 0; + + desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); + if (!desc->pi_ctx) + return -ENOMEM; + + pi_ctx = desc->pi_ctx; + + pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, + ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(pi_ctx->prot_frpl)) { + ret = PTR_ERR(pi_ctx->prot_frpl); + goto prot_frpl_failure; + } + + pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, + ISCSI_ISER_SG_TABLESIZE + 1); + if (IS_ERR(pi_ctx->prot_mr)) { + ret = PTR_ERR(pi_ctx->prot_mr); + goto prot_mr_failure; + } + desc->reg_indicators |= ISER_PROT_KEY_VALID; + + pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); + if (IS_ERR(pi_ctx->sig_mr)) { + ret = PTR_ERR(pi_ctx->sig_mr); + goto sig_mr_failure; + } + desc->reg_indicators |= ISER_SIG_KEY_VALID; + desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; + + return 0; + +sig_mr_failure: + ib_dereg_mr(desc->pi_ctx->prot_mr); +prot_mr_failure: + ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); +prot_frpl_failure: + kfree(desc->pi_ctx); + + return ret; +} + +static void +iser_free_pi_ctx(struct iser_pi_context *pi_ctx) +{ + ib_free_fast_reg_page_list(pi_ctx->prot_frpl); + ib_dereg_mr(pi_ctx->prot_mr); + ib_destroy_mr(pi_ctx->sig_mr); + kfree(pi_ctx); +} + static int iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, bool pi_enable, struct fast_reg_descriptor *desc) @@ -297,59 +356,12 @@ iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, desc->reg_indicators |= ISER_DATA_KEY_VALID; if (pi_enable) { - struct ib_mr_init_attr mr_init_attr = {0}; - struct iser_pi_context *pi_ctx = NULL; - - desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); - if (!desc->pi_ctx) { - iser_err("Failed to allocate pi context\n"); - ret = -ENOMEM; + ret = iser_alloc_pi_ctx(ib_device, pd, desc); + if (ret) goto pi_ctx_alloc_failure; - } - pi_ctx = desc->pi_ctx; - - pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(pi_ctx->prot_frpl)) { - ret = PTR_ERR(pi_ctx->prot_frpl); - iser_err("Failed to allocate prot frpl ret=%d\n", - ret); - goto prot_frpl_failure; - } - - pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, - ISCSI_ISER_SG_TABLESIZE + 1); - if (IS_ERR(pi_ctx->prot_mr)) { - ret = PTR_ERR(pi_ctx->prot_mr); - iser_err("Failed to allocate prot frmr ret=%d\n", - ret); - goto prot_mr_failure; - } - desc->reg_indicators |= ISER_PROT_KEY_VALID; - - mr_init_attr.max_reg_descriptors = 2; - mr_init_attr.flags |= IB_MR_SIGNATURE_EN; - pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); - if (IS_ERR(pi_ctx->sig_mr)) { - ret = PTR_ERR(pi_ctx->sig_mr); - iser_err("Failed to allocate signature enabled mr err=%d\n", - ret); - goto sig_mr_failure; - } - desc->reg_indicators |= ISER_SIG_KEY_VALID; } - desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; - - iser_dbg("Create fr_desc %p page_list %p\n", - desc, desc->data_frpl->page_list); return 0; -sig_mr_failure: - ib_dereg_mr(desc->pi_ctx->prot_mr); -prot_mr_failure: - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); -prot_frpl_failure: - kfree(desc->pi_ctx); pi_ctx_alloc_failure: ib_dereg_mr(desc->data_mr); fast_reg_mr_failure: @@ -416,12 +428,8 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn) list_del(&desc->list); ib_free_fast_reg_page_list(desc->data_frpl); ib_dereg_mr(desc->data_mr); - if (desc->pi_ctx) { - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); - ib_dereg_mr(desc->pi_ctx->prot_mr); - ib_destroy_mr(desc->pi_ctx->sig_mr); - kfree(desc->pi_ctx); - } + if (desc->pi_ctx) + iser_free_pi_ctx(desc->pi_ctx); kfree(desc); ++i; } -- cgit 1.2.3-korg From 8b95aa2c1bf8c936e5b0e9096b180a3e4f5327ff Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:23 +0300 Subject: IB/iser: Make fastreg pool cache friendly Memory regions are resources that are saved in the device caches. Increase the probability for a cache hit by adding the MRU descriptor to pool head. Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 17a5d70dcc8a3..45f512043ef52 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -63,7 +63,7 @@ iser_reg_desc_put(struct ib_conn *ib_conn, unsigned long flags; spin_lock_irqsave(&ib_conn->lock, flags); - list_add_tail(&desc->list, &ib_conn->fastreg.pool); + list_add(&desc->list, &ib_conn->fastreg.pool); spin_unlock_irqrestore(&ib_conn->lock, flags); } -- cgit 1.2.3-korg From 90a6684c30918786446fd062c7422a4098926891 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:24 +0300 Subject: IB/iser: Modify struct iser_mem_reg members No need to keep lkey, va, len variables, we can keep them as struct ib_sge. This will help when we change the memory registration logic. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 14 +++++-------- drivers/infiniband/ulp/iser/iser_initiator.c | 18 ++++++++--------- drivers/infiniband/ulp/iser/iser_memory.c | 30 ++++++++++++++-------------- 3 files changed, 29 insertions(+), 33 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 185d2ec0e3d8c..5fd09636fbee9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -247,18 +247,14 @@ struct iscsi_endpoint; /** * struct iser_mem_reg - iSER memory registration info * - * @lkey: MR local key - * @rkey: MR remote key - * @va: MR start address (buffer va) - * @len: MR length + * @sge: memory region sg element + * @rkey: memory region remote key * @mem_h: pointer to registration context (FMR/Fastreg) */ struct iser_mem_reg { - u32 lkey; - u32 rkey; - u64 va; - u64 len; - void *mem_h; + struct ib_sge sge; + u32 rkey; + void *mem_h; }; enum iser_desc_type { diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 420a613fe9f59..b2e3b77340b53 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -82,11 +82,11 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) hdr->flags |= ISER_RSV; hdr->read_stag = cpu_to_be32(mem_reg->rkey); - hdr->read_va = cpu_to_be64(mem_reg->va); + hdr->read_va = cpu_to_be64(mem_reg->sge.addr); iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n", task->itt, mem_reg->rkey, - (unsigned long long)mem_reg->va); + (unsigned long long)mem_reg->sge.addr); return 0; } @@ -139,20 +139,20 @@ iser_prepare_write_cmd(struct iscsi_task *task, if (unsol_sz < edtl) { hdr->flags |= ISER_WSV; hdr->write_stag = cpu_to_be32(mem_reg->rkey); - hdr->write_va = cpu_to_be64(mem_reg->va + unsol_sz); + hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " "VA:%#llX + unsol:%d\n", task->itt, mem_reg->rkey, - (unsigned long long)mem_reg->va, unsol_sz); + (unsigned long long)mem_reg->sge.addr, unsol_sz); } if (imm_sz > 0) { iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", task->itt, imm_sz); - tx_dsg->addr = mem_reg->va; + tx_dsg->addr = mem_reg->sge.addr; tx_dsg->length = imm_sz; - tx_dsg->lkey = mem_reg->lkey; + tx_dsg->lkey = mem_reg->sge.lkey; iser_task->desc.num_sge = 2; } @@ -479,9 +479,9 @@ int iser_send_data_out(struct iscsi_conn *conn, mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; tx_dsg = &tx_desc->tx_sg[1]; - tx_dsg->addr = mem_reg->va + buf_offset; - tx_dsg->length = data_seg_len; - tx_dsg->lkey = mem_reg->lkey; + tx_dsg->addr = mem_reg->sge.addr + buf_offset; + tx_dsg->length = data_seg_len; + tx_dsg->lkey = mem_reg->sge.lkey; tx_desc->num_sge = 2; if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 45f512043ef52..40d22d5c3fac6 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -400,10 +400,10 @@ int iser_reg_page_vec(struct iscsi_iser_task *iser_task, return ret; } - mem_reg->lkey = fmr->fmr->lkey; + mem_reg->sge.lkey = fmr->fmr->lkey; mem_reg->rkey = fmr->fmr->rkey; - mem_reg->va = page_vec->pages[0] + page_vec->offset; - mem_reg->len = page_vec->data_size; + mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset; + mem_reg->sge.length = page_vec->data_size; mem_reg->mem_h = fmr; return 0; @@ -479,17 +479,17 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, if (mem->dma_nents == 1) { sg = mem->sg; - mem_reg->lkey = device->mr->lkey; + mem_reg->sge.lkey = device->mr->lkey; mem_reg->rkey = device->mr->rkey; - mem_reg->len = ib_sg_dma_len(ibdev, &sg[0]); - mem_reg->va = ib_sg_dma_address(ibdev, &sg[0]); + mem_reg->sge.length = ib_sg_dma_len(ibdev, &sg[0]); + mem_reg->sge.addr = ib_sg_dma_address(ibdev, &sg[0]); iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " "va: 0x%08lX sz: %ld]\n", - (unsigned int)mem_reg->lkey, + (unsigned int)mem_reg->sge.lkey, (unsigned int)mem_reg->rkey, - (unsigned long)mem_reg->va, - (unsigned long)mem_reg->len); + (unsigned long)mem_reg->sge.addr, + (unsigned long)mem_reg->sge.length); } else { /* use FMR for multiple dma entries */ err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec, mem_reg); @@ -799,19 +799,19 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, } desc->reg_indicators |= ISER_FASTREG_PROTECTED; - mem_reg->lkey = sig_sge.lkey; + mem_reg->sge.lkey = sig_sge.lkey; mem_reg->rkey = desc->pi_ctx->sig_mr->rkey; - mem_reg->va = sig_sge.addr; - mem_reg->len = sig_sge.length; + mem_reg->sge.addr = sig_sge.addr; + mem_reg->sge.length = sig_sge.length; } else { if (desc) mem_reg->rkey = desc->data_mr->rkey; else mem_reg->rkey = device->mr->rkey; - mem_reg->lkey = data_sge.lkey; - mem_reg->va = data_sge.addr; - mem_reg->len = data_sge.length; + mem_reg->sge.lkey = data_sge.lkey; + mem_reg->sge.addr = data_sge.addr; + mem_reg->sge.length = data_sge.length; } return 0; -- cgit 1.2.3-korg From 6ef8bb837dd7e60f078084d4842a43fd163fb4a2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:25 +0300 Subject: IB/iser: Pass struct iser_mem_reg to iser_fast_reg_mr and iser_reg_sig_mr Instead of passing ib_sge as output variable, we pass the mem_reg pointer to have the routines fill the rkey as well. This reduces code duplication and extra assignments. This is a preparation step to unify some registration logics together. Also, pass iser_fast_reg_mr the fastreg descriptor directly. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 75 +++++++++++++------------------ 1 file changed, 32 insertions(+), 43 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 40d22d5c3fac6..0575052d9f8ff 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -590,8 +590,10 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, - struct fast_reg_descriptor *desc, struct ib_sge *data_sge, - struct ib_sge *prot_sge, struct ib_sge *sig_sge) + struct fast_reg_descriptor *desc, + struct iser_mem_reg *data_reg, + struct iser_mem_reg *prot_reg, + struct iser_mem_reg *sig_reg) { struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_pi_context *pi_ctx = desc->pi_ctx; @@ -615,12 +617,12 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, memset(&sig_wr, 0, sizeof(sig_wr)); sig_wr.opcode = IB_WR_REG_SIG_MR; sig_wr.wr_id = ISER_FASTREG_LI_WRID; - sig_wr.sg_list = data_sge; + sig_wr.sg_list = &data_reg->sge; sig_wr.num_sge = 1; sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; if (scsi_prot_sg_count(iser_task->sc)) - sig_wr.wr.sig_handover.prot = prot_sge; + sig_wr.wr.sig_handover.prot = &prot_reg->sge; sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; @@ -637,24 +639,24 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, } desc->reg_indicators &= ~ISER_SIG_KEY_VALID; - sig_sge->lkey = pi_ctx->sig_mr->lkey; - sig_sge->addr = 0; - sig_sge->length = scsi_transfer_length(iser_task->sc); + sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; + sig_reg->rkey = pi_ctx->sig_mr->rkey; + sig_reg->sge.addr = 0; + sig_reg->sge.length = scsi_transfer_length(iser_task->sc); - iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n", - sig_sge->addr, sig_sge->length, - sig_sge->lkey); + iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n", + sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, + sig_reg->sge.length); err: return ret; } static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, - struct iser_mem_reg *mem_reg, struct iser_data_buf *mem, + struct fast_reg_descriptor *desc, enum iser_reg_indicator ind, - struct ib_sge *sge) + struct iser_mem_reg *reg) { - struct fast_reg_descriptor *desc = mem_reg->mem_h; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; struct ib_device *ibdev = device->ib_device; @@ -668,12 +670,13 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, if (mem->dma_nents == 1) { struct scatterlist *sg = mem->sg; - sge->lkey = device->mr->lkey; - sge->addr = ib_sg_dma_address(ibdev, &sg[0]); - sge->length = ib_sg_dma_len(ibdev, &sg[0]); + reg->sge.lkey = device->mr->lkey; + reg->rkey = device->mr->rkey; + reg->sge.addr = ib_sg_dma_address(ibdev, &sg[0]); + reg->sge.length = ib_sg_dma_len(ibdev, &sg[0]); iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n", - sge->lkey, sge->addr, sge->length); + reg->sge.lkey, reg->sge.addr, reg->sge.length); return 0; } @@ -723,9 +726,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, } desc->reg_indicators &= ~ind; - sge->lkey = mr->lkey; - sge->addr = frpl->page_list[0] + offset; - sge->length = size; + reg->sge.lkey = mr->lkey; + reg->rkey = mr->rkey; + reg->sge.addr = frpl->page_list[0] + offset; + reg->sge.length = size; return ret; } @@ -745,7 +749,6 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir]; struct fast_reg_descriptor *desc = NULL; - struct ib_sge data_sge; int err, aligned_len; aligned_len = iser_data_buf_aligned_len(mem, ibdev); @@ -764,15 +767,15 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, mem_reg->mem_h = desc; } - err = iser_fast_reg_mr(iser_task, mem_reg, mem, - ISER_DATA_KEY_VALID, &data_sge); + err = iser_fast_reg_mr(iser_task, mem, desc, + ISER_DATA_KEY_VALID, mem_reg); if (err) goto err_reg; if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { - struct ib_sge prot_sge, sig_sge; + struct iser_mem_reg prot_reg; - memset(&prot_sge, 0, sizeof(prot_sge)); + memset(&prot_reg, 0, sizeof(prot_reg)); if (scsi_prot_sg_count(iser_task->sc)) { mem = &iser_task->prot[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem, ibdev); @@ -785,33 +788,19 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, } } - err = iser_fast_reg_mr(iser_task, mem_reg, mem, - ISER_PROT_KEY_VALID, &prot_sge); + err = iser_fast_reg_mr(iser_task, mem, desc, + ISER_PROT_KEY_VALID, &prot_reg); if (err) goto err_reg; } - err = iser_reg_sig_mr(iser_task, desc, &data_sge, - &prot_sge, &sig_sge); + err = iser_reg_sig_mr(iser_task, desc, mem_reg, + &prot_reg, mem_reg); if (err) { iser_err("Failed to register signature mr\n"); return err; } desc->reg_indicators |= ISER_FASTREG_PROTECTED; - - mem_reg->sge.lkey = sig_sge.lkey; - mem_reg->rkey = desc->pi_ctx->sig_mr->rkey; - mem_reg->sge.addr = sig_sge.addr; - mem_reg->sge.length = sig_sge.length; - } else { - if (desc) - mem_reg->rkey = desc->data_mr->rkey; - else - mem_reg->rkey = device->mr->rkey; - - mem_reg->sge.lkey = data_sge.lkey; - mem_reg->sge.addr = data_sge.addr; - mem_reg->sge.length = data_sge.length; } return 0; -- cgit 1.2.3-korg From ad1e5672429eadd5a7f9613bf2e59546a2eeef13 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:26 +0300 Subject: IB/iser: Remove code duplication for a single DMA entry In singleton scatterlists, DMA memory registration code is taken both for Fastreg and FMR code paths. Move it to a function. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Adir Lev Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iser_memory.c | 48 ++++++++++++++----------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 0575052d9f8ff..abc979e62ae1e 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -334,6 +334,24 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ib_dma_unmap_sg(dev, data->sg, data->size, dir); } +static int +iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, + struct iser_mem_reg *reg) +{ + struct scatterlist *sg = mem->sg; + + reg->sge.lkey = device->mr->lkey; + reg->rkey = device->mr->rkey; + reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); + reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); + + iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx," + " length=0x%x\n", reg->sge.lkey, reg->rkey, + reg->sge.addr, reg->sge.length); + + return 0; +} + static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, struct iser_data_buf *mem, enum iser_data_dir cmd_dir, @@ -461,7 +479,6 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, int aligned_len; int err; int i; - struct scatterlist *sg; mem_reg = &iser_task->rdma_reg[cmd_dir]; @@ -477,19 +494,7 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, /* if there a single dma entry, FMR is not needed */ if (mem->dma_nents == 1) { - sg = mem->sg; - - mem_reg->sge.lkey = device->mr->lkey; - mem_reg->rkey = device->mr->rkey; - mem_reg->sge.length = ib_sg_dma_len(ibdev, &sg[0]); - mem_reg->sge.addr = ib_sg_dma_address(ibdev, &sg[0]); - - iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " - "va: 0x%08lX sz: %ld]\n", - (unsigned int)mem_reg->sge.lkey, - (unsigned int)mem_reg->rkey, - (unsigned long)mem_reg->sge.addr, - (unsigned long)mem_reg->sge.length); + return iser_reg_dma(device, mem, mem_reg); } else { /* use FMR for multiple dma entries */ err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec, mem_reg); @@ -659,7 +664,6 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, { struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - struct ib_device *ibdev = device->ib_device; struct ib_mr *mr; struct ib_fast_reg_page_list *frpl; struct ib_send_wr fastreg_wr, inv_wr; @@ -667,18 +671,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, int ret, offset, size, plen; /* if there a single dma entry, dma mr suffices */ - if (mem->dma_nents == 1) { - struct scatterlist *sg = mem->sg; - - reg->sge.lkey = device->mr->lkey; - reg->rkey = device->mr->rkey; - reg->sge.addr = ib_sg_dma_address(ibdev, &sg[0]); - reg->sge.length = ib_sg_dma_len(ibdev, &sg[0]); - - iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n", - reg->sge.lkey, reg->sge.addr, reg->sge.length); - return 0; - } + if (mem->dma_nents == 1) + return iser_reg_dma(device, mem, reg); if (ind == ISER_DATA_KEY_VALID) { mr = desc->data_mr; -- cgit 1.2.3-korg From 4fcd1470a05253236576a2d166ddd6d5b5c936fc Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:27 +0300 Subject: IB/iser: Bump version to 1.6 Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5fd09636fbee9..a39645a54d82d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -69,7 +69,7 @@ #define DRV_NAME "iser" #define PFX DRV_NAME ": " -#define DRV_VER "1.5" +#define DRV_VER "1.6" #define iser_dbg(fmt, arg...) \ do { \ -- cgit 1.2.3-korg From ba943fb237ea48b01e3229f10cdb2a4274978a2d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 14 Apr 2015 18:08:28 +0300 Subject: IB/iser: Rewrite bounce buffer code path In some rare cases, IO operations may be not aligned to page boundaries. This prevents iser from performing fast memory registration. In order to overcome that iser uses a bounce buffer to carry the transaction. We basically allocate a buffer in the size of the transaction and perform a copy. The buffer allocation using kmalloc is too restrictive since it requires higher order (atomic) allocations for large transactions (which may result in memory exhaustion fairly fast for some workloads). We rewrite the bounce buffer code path to allocate scattered pages and perform a copy between the transaction sg and the bounce sg. Reported-by: Alex Lyakas Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/iser/iscsi_iser.h | 8 +- drivers/infiniband/ulp/iser/iser_initiator.c | 8 +- drivers/infiniband/ulp/iser/iser_memory.c | 209 +++++++++++++++++---------- 3 files changed, 137 insertions(+), 88 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index a39645a54d82d..262ba1f8ee507 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -222,12 +222,9 @@ enum iser_data_dir { * @size: num entries of this sg * @data_len: total beffer byte len * @dma_nents: returned by dma_map_sg - * @copy_buf: allocated copy buf for SGs unaligned - * for rdma which are copied * @orig_sg: pointer to the original sg list (in case * we used a copy) - * @sg_single: SG-ified clone of a non SG SC or - * unaligned SG + * @orig_size: num entris of orig sg list */ struct iser_data_buf { struct scatterlist *sg; @@ -235,8 +232,7 @@ struct iser_data_buf { unsigned long data_len; unsigned int dma_nents; struct scatterlist *orig_sg; - char *copy_buf; - struct scatterlist sg_single; + unsigned int orig_size; }; /* fwd declarations */ diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index b2e3b77340b53..3e2118e8ed879 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -674,28 +674,28 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) /* if we were reading, copy back to unaligned sglist, * anyway dma_unmap and free the copy */ - if (iser_task->data[ISER_DIR_IN].copy_buf) { + if (iser_task->data[ISER_DIR_IN].orig_sg) { is_rdma_data_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->data[ISER_DIR_IN], ISER_DIR_IN); } - if (iser_task->data[ISER_DIR_OUT].copy_buf) { + if (iser_task->data[ISER_DIR_OUT].orig_sg) { is_rdma_data_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->data[ISER_DIR_OUT], ISER_DIR_OUT); } - if (iser_task->prot[ISER_DIR_IN].copy_buf) { + if (iser_task->prot[ISER_DIR_IN].orig_sg) { is_rdma_prot_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->prot[ISER_DIR_IN], ISER_DIR_IN); } - if (iser_task->prot[ISER_DIR_OUT].copy_buf) { + if (iser_task->prot[ISER_DIR_OUT].orig_sg) { is_rdma_prot_aligned = 0; iser_finalize_rdma_unaligned_sg(iser_task, &iser_task->prot[ISER_DIR_OUT], diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index abc979e62ae1e..f0cdc961eb11b 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -39,7 +39,112 @@ #include "iscsi_iser.h" -#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ +static void +iser_free_bounce_sg(struct iser_data_buf *data) +{ + struct scatterlist *sg; + int count; + + for_each_sg(data->sg, sg, data->size, count) + __free_page(sg_page(sg)); + + kfree(data->sg); + + data->sg = data->orig_sg; + data->size = data->orig_size; + data->orig_sg = NULL; + data->orig_size = 0; +} + +static int +iser_alloc_bounce_sg(struct iser_data_buf *data) +{ + struct scatterlist *sg; + struct page *page; + unsigned long length = data->data_len; + int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE); + + sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC); + if (!sg) + goto err; + + sg_init_table(sg, nents); + while (length) { + u32 page_len = min_t(u32, length, PAGE_SIZE); + + page = alloc_page(GFP_ATOMIC); + if (!page) + goto err; + + sg_set_page(&sg[i], page, page_len, 0); + length -= page_len; + i++; + } + + data->orig_sg = data->sg; + data->orig_size = data->size; + data->sg = sg; + data->size = nents; + + return 0; + +err: + for (; i > 0; i--) + __free_page(sg_page(&sg[i - 1])); + kfree(sg); + + return -ENOMEM; +} + +static void +iser_copy_bounce(struct iser_data_buf *data, bool to_buffer) +{ + struct scatterlist *osg, *bsg = data->sg; + void *oaddr, *baddr; + unsigned int left = data->data_len; + unsigned int bsg_off = 0; + int i; + + for_each_sg(data->orig_sg, osg, data->orig_size, i) { + unsigned int copy_len, osg_off = 0; + + oaddr = kmap_atomic(sg_page(osg)) + osg->offset; + copy_len = min(left, osg->length); + while (copy_len) { + unsigned int len = min(copy_len, bsg->length - bsg_off); + + baddr = kmap_atomic(sg_page(bsg)) + bsg->offset; + if (to_buffer) + memcpy(baddr + bsg_off, oaddr + osg_off, len); + else + memcpy(oaddr + osg_off, baddr + bsg_off, len); + + kunmap_atomic(baddr - bsg->offset); + osg_off += len; + bsg_off += len; + copy_len -= len; + + if (bsg_off >= bsg->length) { + bsg = sg_next(bsg); + bsg_off = 0; + } + } + kunmap_atomic(oaddr - osg->offset); + left -= osg_off; + } +} + +static inline void +iser_copy_from_bounce(struct iser_data_buf *data) +{ + iser_copy_bounce(data, false); +} + +static inline void +iser_copy_to_bounce(struct iser_data_buf *data) +{ + iser_copy_bounce(data, true); +} struct fast_reg_descriptor * iser_reg_desc_get(struct ib_conn *ib_conn) @@ -75,52 +180,32 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, enum iser_data_dir cmd_dir) { struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - struct scatterlist *sgl = data->sg; - struct scatterlist *sg; - char *mem = NULL; - unsigned long cmd_data_len = data->data_len; - int dma_nents, i; - - if (cmd_data_len > ISER_KMALLOC_THRESHOLD) - mem = (void *)__get_free_pages(GFP_ATOMIC, - ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); - else - mem = kmalloc(cmd_data_len, GFP_ATOMIC); + int rc; - if (mem == NULL) { - iser_err("Failed to allocate mem size %d %d for copying sglist\n", - data->size, (int)cmd_data_len); - return -ENOMEM; + rc = iser_alloc_bounce_sg(data); + if (rc) { + iser_err("Failed to allocate bounce for data len %lu\n", + data->data_len); + return rc; } - if (cmd_dir == ISER_DIR_OUT) { - /* copy the unaligned sg the buffer which is used for RDMA */ - char *p, *from; - - sgl = data->sg; - p = mem; - for_each_sg(sgl, sg, data->size, i) { - from = kmap_atomic(sg_page(sg)); - memcpy(p, - from + sg->offset, - sg->length); - kunmap_atomic(from); - p += sg->length; - } - } - - sg_init_one(&data->sg_single, mem, cmd_data_len); - data->orig_sg = data->sg; - data->sg = &data->sg_single; - data->copy_buf = mem; - dma_nents = ib_dma_map_sg(dev, data->sg, 1, - (cmd_dir == ISER_DIR_OUT) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - BUG_ON(dma_nents == 0); + if (cmd_dir == ISER_DIR_OUT) + iser_copy_to_bounce(data); - data->dma_nents = dma_nents; + data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, + (cmd_dir == ISER_DIR_OUT) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!data->dma_nents) { + iser_err("Got dma_nents %d, something went wrong...\n", + data->dma_nents); + rc = -ENOMEM; + goto err; + } return 0; +err: + iser_free_bounce_sg(data); + return rc; } /** @@ -131,48 +216,16 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, enum iser_data_dir cmd_dir) { - struct ib_device *dev; - unsigned long cmd_data_len; - - dev = iser_task->iser_conn->ib_conn.device->ib_device; + struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; - ib_dma_unmap_sg(dev, data->sg, 1, + ib_dma_unmap_sg(dev, data->sg, data->size, (cmd_dir == ISER_DIR_OUT) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (cmd_dir == ISER_DIR_IN) { - char *mem; - struct scatterlist *sgl, *sg; - unsigned char *p, *to; - unsigned int sg_size; - int i; - - /* copy back read RDMA to unaligned sg */ - mem = data->copy_buf; - - sgl = data->sg; - sg_size = data->size; - - p = mem; - for_each_sg(sgl, sg, sg_size, i) { - to = kmap_atomic(sg_page(sg)); - memcpy(to + sg->offset, - p, - sg->length); - kunmap_atomic(to); - p += sg->length; - } - } - - cmd_data_len = data->data_len; - - if (cmd_data_len > ISER_KMALLOC_THRESHOLD) - free_pages((unsigned long)data->copy_buf, - ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); - else - kfree(data->copy_buf); + if (cmd_dir == ISER_DIR_IN) + iser_copy_from_bounce(data); - data->copy_buf = NULL; + iser_free_bounce_sg(data); } #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) -- cgit 1.2.3-korg From 59d2d18cc4e9ba30b370db18d0e02d792699da96 Mon Sep 17 00:00:00 2001 From: Honggang LI Date: Wed, 15 Apr 2015 16:36:15 +0800 Subject: mlx5: wrong page mask if CONFIG_ARCH_DMA_ADDR_T_64BIT enabled for 32Bit architectures If CONFIG_ARCH_DMA_ADDR_T_64BIT enabled for x86 systems and physical memory is more than 4GB, dma_map_page may return a valid memory address which greater than 0xffffffff. As a result, the mlx5 device page allocator RB tree will be initialized with valid addresses greater than 0xfffffff. However, (addr & PAGE_MASK) set the high four bytes to zeros. So, it's impossible for the function, free_4k, to release the pages whose addresses greater than 4GB. Memory leaks. And mlx5_ib module can't release the pages when user try to remove the module, as a result, system hang. [root@rdma05 root]# dmesg | grep addr | head addr = 3fe384000 addr & PAGE_MASK = fe384000 [root@rdma05 root]# rmmod mlx5_ib <---- hang on ---------------------- cosnole log ----------------- mlx5_ib 0000:04:00.0: irq 138 for MSI/MSI-X alloc irq_desc for 139 on node -1 alloc kstat_irqs on node -1 mlx5_ib 0000:04:00.0: irq 139 for MSI/MSI-X 0000:04:00.0:free_4k:221:(pid 1519): page not found 0000:04:00.0:free_4k:221:(pid 1519): page not found 0000:04:00.0:free_4k:221:(pid 1519): page not found 0000:04:00.0:free_4k:221:(pid 1519): page not found ---------------------- cosnole log ----------------- Fixes: bf0bf77f6519 ('mlx5: Support communicating arbitrary host page size to firmware') Signed-off-by: Honggang Li Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index df2238372ea73..8a64542abc161 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -211,26 +211,28 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) return 0; } +#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) + static void free_4k(struct mlx5_core_dev *dev, u64 addr) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & PAGE_MASK); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); if (!fwp) { mlx5_core_warn(dev, "page not found\n"); return; } - n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; + n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; fwp->free_count++; set_bit(n, &fwp->bitmask); if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { rb_erase(&fwp->rb_node, &dev->priv.page_root); if (fwp->free_count != 1) list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(fwp->page); kfree(fwp); } else if (fwp->free_count == 1) { -- cgit 1.2.3-korg