aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/fw.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c407
1 files changed, 233 insertions, 174 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 872066317fa5..45cb4f476e76 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* CPU2 paging CSS
* CPU2 paging image (including instruction and data)
*/
- for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+ for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
sec_idx++;
break;
@@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* If paging is enabled there should be at least 2 more sections left
* (one for CSS and one for Paging data)
*/
- if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+ if (sec_idx >= image->num_sec - 1) {
IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(mvm);
return -EINVAL;
@@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
mvm->fw_paging_db[0].fw_paging_size);
+ dma_sync_single_for_device(mvm->trans->dev,
+ mvm->fw_paging_db[0].fw_paging_phys,
+ mvm->fw_paging_db[0].fw_paging_size,
+ DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d CSS bytes to first block\n",
@@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
* loop stop at num_of_paging_blk since that last block is not full.
*/
for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
- memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+ memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
- mvm->fw_paging_db[idx].fw_paging_size);
+ block->fw_paging_size);
+ dma_sync_single_for_device(mvm->trans->dev,
+ block->fw_paging_phys,
+ block->fw_paging_size,
+ DMA_BIDIRECTIONAL);
+
IWL_DEBUG_FW(mvm,
"Paging: copied %d paging bytes to block %d\n",
@@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
/* copy the last paging block */
if (mvm->num_of_pages_in_last_blk > 0) {
- memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+ struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+ memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset,
FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+ dma_sync_single_for_device(mvm->trans->dev,
+ block->fw_paging_phys,
+ block->fw_paging_size,
+ DMA_BIDIRECTIONAL);
IWL_DEBUG_FW(mvm,
"Paging: copied %d pages in the last block %d\n",
@@ -259,9 +276,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
{
struct page *block;
dma_addr_t phys = 0;
- int blk_idx = 0;
- int order, num_of_pages;
- int dma_enabled;
+ int blk_idx, order, num_of_pages, size, dma_enabled;
if (mvm->fw_paging_db[0].fw_paging_block)
return 0;
@@ -272,9 +287,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
- mvm->num_of_paging_blk = ((num_of_pages - 1) /
- NUM_OF_PAGE_PER_GROUP) + 1;
-
+ mvm->num_of_paging_blk =
+ DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
mvm->num_of_pages_in_last_blk =
num_of_pages -
NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
@@ -284,46 +298,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
mvm->num_of_paging_blk,
mvm->num_of_pages_in_last_blk);
- /* allocate block of 4Kbytes for paging CSS */
- order = get_order(FW_PAGING_SIZE);
- block = alloc_pages(GFP_KERNEL, order);
- if (!block) {
- /* free all the previous pages since we failed */
- iwl_free_fw_paging(mvm);
- return -ENOMEM;
- }
-
- mvm->fw_paging_db[blk_idx].fw_paging_block = block;
- mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
-
- if (dma_enabled) {
- phys = dma_map_page(mvm->trans->dev, block, 0,
- PAGE_SIZE << order, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(mvm->trans->dev, phys)) {
- /*
- * free the previous pages and the current one since
- * we failed to map_page.
- */
- iwl_free_fw_paging(mvm);
- return -ENOMEM;
- }
- mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
- } else {
- mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
- blk_idx << BLOCK_2_EXP_SIZE;
- }
-
- IWL_DEBUG_FW(mvm,
- "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
- order);
-
/*
- * allocate blocks in dram.
- * since that CSS allocated in fw_paging_db[0] loop start from index 1
+ * Allocate CSS and paging blocks in dram.
*/
- for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
- /* allocate block of PAGING_BLOCK_SIZE (32K) */
- order = get_order(PAGING_BLOCK_SIZE);
+ for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+ /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
+ size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
+ order = get_order(size);
block = alloc_pages(GFP_KERNEL, order);
if (!block) {
/* free all the previous pages since we failed */
@@ -332,7 +313,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
}
mvm->fw_paging_db[blk_idx].fw_paging_block = block;
- mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+ mvm->fw_paging_db[blk_idx].fw_paging_size = size;
if (dma_enabled) {
phys = dma_map_page(mvm->trans->dev, block, 0,
@@ -353,9 +334,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
blk_idx << BLOCK_2_EXP_SIZE;
}
- IWL_DEBUG_FW(mvm,
- "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
- order);
+ if (!blk_idx)
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+ order);
+ else
+ IWL_DEBUG_FW(mvm,
+ "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+ order);
}
return 0;
@@ -475,80 +461,60 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
- struct mvm_alive_resp_ver1 *palive1;
- struct mvm_alive_resp_ver2 *palive2;
+ struct mvm_alive_resp_v3 *palive3;
struct mvm_alive_resp *palive;
+ struct iwl_umac_alive *umac;
+ struct iwl_lmac_alive *lmac1;
+ struct iwl_lmac_alive *lmac2 = NULL;
+ u16 status;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+ } else {
+ palive3 = (void *)pkt->data;
+ umac = &palive3->umac_data;
+ lmac1 = &palive3->lmac_data;
+ status = le16_to_cpu(palive3->status);
+ }
- if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
- palive1 = (void *)pkt->data;
+ mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+ if (lmac2)
+ mvm->error_event_table[1] =
+ le32_to_cpu(lmac2->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
+ mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
+ mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
- mvm->support_umac_log = false;
- mvm->error_event_table =
- le32_to_cpu(palive1->error_event_table_ptr);
- mvm->log_event_table =
- le32_to_cpu(palive1->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
+ mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
- alive_data->valid = le16_to_cpu(palive1->status) ==
- IWL_ALIVE_STATUS_OK;
- IWL_DEBUG_FW(mvm,
- "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive1->status), palive1->ver_type,
- palive1->ver_subtype, palive1->flags);
- } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
- palive2 = (void *)pkt->data;
-
- mvm->error_event_table =
- le32_to_cpu(palive2->error_event_table_ptr);
- mvm->log_event_table =
- le32_to_cpu(palive2->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
- mvm->umac_error_event_table =
- le32_to_cpu(palive2->error_info_addr);
- mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
- mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
-
- alive_data->valid = le16_to_cpu(palive2->status) ==
- IWL_ALIVE_STATUS_OK;
- if (mvm->umac_error_event_table)
- mvm->support_umac_log = true;
+ alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+ alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+ if (mvm->umac_error_event_table)
+ mvm->support_umac_log = true;
- IWL_DEBUG_FW(mvm,
- "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive2->status), palive2->ver_type,
- palive2->ver_subtype, palive2->flags);
+ IWL_DEBUG_FW(mvm,
+ "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ status, lmac1->ver_type, lmac1->ver_subtype);
- IWL_DEBUG_FW(mvm,
- "UMAC version: Major - 0x%x, Minor - 0x%x\n",
- palive2->umac_major, palive2->umac_minor);
- } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
- palive = (void *)pkt->data;
+ if (lmac2)
+ IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
- mvm->error_event_table =
- le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table =
- le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
- mvm->umac_error_event_table =
- le32_to_cpu(palive->error_info_addr);
- mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
- mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
-
- alive_data->valid = le16_to_cpu(palive->status) ==
- IWL_ALIVE_STATUS_OK;
- if (mvm->umac_error_event_table)
- mvm->support_umac_log = true;
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(umac->umac_major),
+ le32_to_cpu(umac->umac_minor));
- IWL_DEBUG_FW(mvm,
- "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
+ return true;
+}
- IWL_DEBUG_FW(mvm,
- "UMAC version: Major - 0x%x, Minor - 0x%x\n",
- le32_to_cpu(palive->umac_major),
- le32_to_cpu(palive->umac_minor));
- }
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
return true;
}
@@ -568,6 +534,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
+static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
+{
+ const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
+ int ret;
+
+ /*
+ * Configure and operate fw paging mechanism.
+ * The driver configures the paging flow only once.
+ * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+ */
+ if (!fw->paging_mem_size)
+ return 0;
+
+ /*
+ * When dma is not enabled, the driver needs to copy / write
+ * the downloaded / uploaded page to / from the smem.
+ * This gets the location of the place were the pages are
+ * stored.
+ */
+ if (!is_device_dma_capable(mvm->trans->dev)) {
+ ret = iwl_trans_get_paging_item(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "failed to get FW paging item\n");
+ return ret;
+ }
+ }
+
+ ret = iwl_save_fw_paging(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to save the FW paging image\n");
+ return ret;
+ }
+
+ ret = iwl_send_paging_cmd(mvm, fw);
+ if (ret) {
+ IWL_ERR(mvm, "failed to send the paging cmd\n");
+ iwl_free_fw_paging(mvm);
+ return ret;
+ }
+
+ return 0;
+}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@@ -639,40 +647,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
- * configure and operate fw paging mechanism.
- * driver configures the paging flow only once, CPU2 paging image
- * included in the IWL_UCODE_INIT image.
- */
- if (fw->paging_mem_size) {
- /*
- * When dma is not enabled, the driver needs to copy / write
- * the downloaded / uploaded page to / from the smem.
- * This gets the location of the place were the pages are
- * stored.
- */
- if (!is_device_dma_capable(mvm->trans->dev)) {
- ret = iwl_trans_get_paging_item(mvm);
- if (ret) {
- IWL_ERR(mvm, "failed to get FW paging item\n");
- return ret;
- }
- }
-
- ret = iwl_save_fw_paging(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to save the FW paging image\n");
- return ret;
- }
-
- ret = iwl_send_paging_cmd(mvm, fw);
- if (ret) {
- IWL_ERR(mvm, "failed to send the paging cmd\n");
- iwl_free_fw_paging(mvm);
- return ret;
- }
- }
-
- /*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
* could be stopped, so wake them up. In firmware restart,
@@ -829,6 +803,75 @@ out:
return ret;
}
+int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ /* TODO: remove when integrating context info */
+ ret = iwl_mvm_init_paging(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to init paging: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (read_nvm) {
+ /* Read nvm */
+ ret = iwl_nvm_init(mvm, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto error;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name)
+ iwl_mvm_load_nvm_to_nic(mvm);
+
+ ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+ if (WARN_ON(ret))
+ goto error;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE), 0,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ return iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1089,23 +1132,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_up(struct iwl_mvm *mvm)
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{
- int ret, i;
- struct ieee80211_channel *chan;
- struct cfg80211_chan_def chandef;
-
- lockdep_assert_held(&mvm->mutex);
+ int ret;
- ret = iwl_trans_start_hw(mvm->trans);
- if (ret)
- return ret;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_run_unified_mvm_ucode(mvm, false);
- /*
- * If we haven't completed the run of the init ucode during
- * module loading, load init ucode now
- * (for example, if we were in RFKILL)
- */
ret = iwl_run_init_mvm_ucode(mvm, false);
if (iwlmvm_mod_params.init_dbg)
@@ -1116,7 +1149,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
- goto error;
+ return ret;
}
/*
@@ -1127,9 +1160,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
_iwl_trans_stop_device(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
- goto error;
+ return ret;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_init_paging(mvm);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def chandef;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error;
@@ -1156,13 +1208,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Send phy db control command and then phy db calibration*/
- ret = iwl_send_phy_db_data(mvm->phy_db);
- if (ret)
- goto error;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
- ret = iwl_send_phy_cfg_cmd(mvm);
- if (ret)
- goto error;
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+ }
/* Init RSS configuration */
if (iwl_mvm_has_new_rx_api(mvm)) {
@@ -1348,4 +1402,9 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: image size: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->image_size));
}