/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#include <linux/io.h>
#include <linux/of.h>
#include <linux/blkdev.h>
+#include <linux/spinlock.h>
#include <crypto/ice.h>
#include "ufs-qcom-ice.h"
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
+ unsigned long flags;
struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
qcom_host->req_pending, &ice_set, false);
+ spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+ qcom_host->req_pending = NULL;
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
/*
* Resume with requests processing. We assume config_start has been
* successful, but even if it wasn't we still must resume in order to
* allow for the request to be retried.
*/
ufshcd_scsi_unblock_requests(qcom_host->hba);
+
}
/**
struct ice_data_setting ice_set;
char cmd_op = cmd->cmnd[0];
int err;
+ unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
- qcom_host->req_pending = cmd->request;
- if (schedule_work(&qcom_host->ice_cfg_work))
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
+ if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
qcom_host->hba);
+ qcom_host->req_pending = cmd->request;
+ if (!schedule_work(
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ ufshcd_scsi_unblock_requests(
+ qcom_host->hba);
+ return err;
+ }
+ }
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+
} else {
- dev_err(qcom_host->hba->dev,
- "%s: error in ice_vops->config %d\n",
- __func__, err);
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
}
return err;
unsigned int bypass = 0;
struct request *req;
char cmd_op;
+ unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
* request processing.
*/
if (err == -EAGAIN) {
- qcom_host->req_pending = req;
- if (schedule_work(&qcom_host->ice_cfg_work))
+
+ dev_dbg(qcom_host->hba->dev,
+ "%s: scheduling task for ice setup\n",
+ __func__);
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
+ if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
+ qcom_host->hba);
+ qcom_host->req_pending = cmd->request;
+ if (!schedule_work(
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ ufshcd_scsi_unblock_requests(
qcom_host->hba);
+ return err;
+ }
+ }
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+
+ } else {
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
}
- goto out;
+
+ return err;
}
}
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
struct task_struct *thread_pending;
enum pfk_kc_entry_state state;
+
+ /* ref count for the number of requests in the HW queue for this key */
+ int loaded_ref_cnt;
int scm_error;
};
if (entry_exists) {
kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
+
+ if (async)
+ entry->loaded_ref_cnt++;
+
break;
}
case (FREE):
entry->scm_error = ret;
pr_err("%s: key load error (%d)\n", __func__, ret);
} else {
- entry->state = ACTIVE_ICE_LOADED;
kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ /*
+ * only increase ref cnt for async calls,
+ * sync calls from within work thread do not pass
+ * requests further to HW
+ */
+ if (async)
+ entry->loaded_ref_cnt++;
+
}
break;
case (ACTIVE_ICE_PRELOAD):
break;
case (ACTIVE_ICE_LOADED):
kc_update_timestamp(entry);
+
+ if (async)
+ entry->loaded_ref_cnt++;
+
break;
case(SCM_ERROR):
ret = entry->scm_error;
const unsigned char *salt, size_t salt_size)
{
struct kc_entry *entry = NULL;
+ struct task_struct *tmp_pending = NULL;
+ int ref_cnt = 0;
if (!kc_is_ready())
return;
if (!entry) {
kc_spin_unlock();
pr_err("internal error, there should an entry to unlock\n");
+
return;
}
- entry->state = INACTIVE;
+ ref_cnt = --entry->loaded_ref_cnt;
+
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
- /* wake-up invalidation if it's waiting for the entry to be released */
- if (entry->thread_pending) {
- wake_up_process(entry->thread_pending);
- entry->thread_pending = NULL;
+ if (!ref_cnt) {
+ entry->state = INACTIVE;
+ /*
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
+ if (entry->thread_pending) {
+ tmp_pending = entry->thread_pending;
+ entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
+ }
}
kc_spin_unlock();