diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 236 |
1 files changed, 105 insertions, 131 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4622c4ea2e46..99933e89717a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -38,8 +38,6 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "4.1.0-k" -const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2018 Intel Corporation."; @@ -81,7 +79,6 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; @@ -249,8 +246,9 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) /** * ixgbevf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: transmit queue hanging (unused) **/ -static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue) +static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -783,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, new_buff->pagecnt_bias = old_buff->pagecnt_bias; } -static inline bool ixgbevf_page_is_reserved(struct page *page) -{ - return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); -} - static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; - /* avoid re-using remote pages */ - if (unlikely(ixgbevf_page_is_reserved(page))) + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) return false; #if (PAGE_SIZE < 8192) @@ -869,10 +862,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(xdp->data); -#if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); -#endif + net_prefetch(xdp->data); + /* Note, we get here by enabling legacy-rx via: * * ethtool --set-priv-flags <dev> legacy-rx on @@ -950,13 +941,10 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, * have a consumer accessing first few bytes of meta data, * and then actual data. */ - prefetch(xdp->data_meta); -#if L1_CACHE_BYTES < 128 - prefetch(xdp->data_meta + L1_CACHE_BYTES); -#endif + net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -1066,7 +1054,6 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, struct bpf_prog *xdp_prog; u32 act; - rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (!xdp_prog) @@ -1079,35 +1066,49 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, case XDP_TX: xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); + if (result == IXGBEVF_XDP_CONSUMED) + goto out_failure; break; default: - bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; case XDP_ABORTED: +out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBEVF_XDP_CONSUMED; break; } xdp_out: - rcu_read_unlock(); return ERR_PTR(-result); } +static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, unsigned int size) { -#if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; + unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else - unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : - SKB_DATA_ALIGN(size); - rx_buffer->page_offset += truesize; #endif } @@ -1116,14 +1117,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { - unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; struct ixgbevf_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbevf_desc_unused(rx_ring); struct sk_buff *skb = rx_ring->skb; bool xdp_xmit = false; struct xdp_buff xdp; - xdp.rxq = &rx_ring->xdp_rxq; + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0); +#endif + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); while (likely(total_rx_packets < budget)) { struct ixgbevf_rx_buffer *rx_buffer; @@ -1151,13 +1156,16 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, /* retrieve a buffer from the ring */ if (!skb) { - xdp.data = page_address(rx_buffer->page) + - rx_buffer->page_offset; - xdp.data_meta = xdp.data; - xdp.data_hard_start = xdp.data - - ixgbevf_rx_offset(rx_ring); - xdp.data_end = xdp.data + size; - + unsigned int offset = ixgbevf_rx_offset(rx_ring); + unsigned char *hard_start; + + hard_start = page_address(rx_buffer->page) + + rx_buffer->page_offset - offset; + xdp_prepare_buff(&xdp, hard_start, offset, size, true); +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); +#endif skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); } @@ -1976,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) return; - set_ring_build_skb_enabled(rx_ring); + if (PAGE_SIZE < 8192) + if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB) + set_ring_uses_large_buffer(rx_ring); - if (PAGE_SIZE < 8192) { - if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) - return; + /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) + return; - set_ring_uses_large_buffer(rx_ring); - } + set_ring_build_skb_enabled(rx_ring); } /** @@ -2258,6 +2267,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; static const int api[] = { + ixgbe_mbox_api_15, ixgbe_mbox_api_14, ixgbe_mbox_api_13, ixgbe_mbox_api_12, @@ -2276,13 +2286,21 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) idx++; } + if (hw->api_version >= ixgbe_mbox_api_15) { + hw->mbx.ops.init_params(hw); + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + sizeof(struct ixgbe_mbx_operations)); + } + spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; + bool state; ixgbevf_configure_msix(adapter); @@ -2295,6 +2313,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); + state = adapter->link_state; + hw->mac.ops.get_link_state(hw, &adapter->link_state); + if (state && state != adapter->link_state) + dev_info(&pdev->dev, "VF is administratively disabled\n"); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -2509,8 +2532,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) { - WARN_ON(in_interrupt()); - while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) msleep(1); @@ -2534,7 +2555,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) } if (is_valid_ether_addr(adapter->hw.mac.addr)) { - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + eth_hw_addr_set(netdev, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } @@ -2585,7 +2606,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. + * fall through conditions. * **/ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) @@ -2621,6 +2642,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -2628,6 +2650,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; + break; default: break; } @@ -2710,7 +2733,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, return -ENOMEM; /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); + netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -2737,7 +2760,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, ring->reg_idx = reg_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + adapter->tx_ring[txr_idx] = ring; /* update count and index */ txr_count--; @@ -3047,7 +3070,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); - ether_addr_copy(netdev->dev_addr, hw->mac.addr); + eth_hw_addr_set(netdev, hw->mac.addr); } if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -3065,6 +3088,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + adapter->link_state = true; + set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; @@ -3297,7 +3322,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) ixgbevf_watchdog_update_link(adapter); - if (adapter->link_up) + if (adapter->link_up && adapter->link_state) ixgbevf_watchdog_link_is_up(adapter); else ixgbevf_watchdog_link_is_down(adapter); @@ -3482,7 +3507,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, - rx_ring->queue_index) < 0) + rx_ring->queue_index, 0) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; @@ -3808,7 +3833,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; @@ -3833,15 +3858,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 1; } -static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) -{ - unsigned int offset = 0; - - ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); - - return offset == skb_checksum_start_offset(skb); -} - static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, struct ixgbevf_ipsec_tx_data *itd) @@ -3857,19 +3873,16 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): /* validate that this is actually an SCTP request */ - if (((first->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || - ((first->protocol == htons(ETH_P_IPV6)) && - ixgbevf_ipv6_csum_is_sctp(skb))) { + if (skb_csum_is_sctp(skb)) { type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto no_csum; @@ -4236,7 +4249,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) ether_addr_copy(hw->mac.addr, addr->sa_data); ether_addr_copy(hw->mac.perm_addr, addr->sa_data); - ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); return 0; } @@ -4280,13 +4293,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ixgbevf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct ixgbevf_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif rtnl_lock(); netif_device_detach(netdev); @@ -4297,37 +4307,16 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) ixgbevf_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) - pci_disable_device(pdev); - return 0; } -#ifdef CONFIG_PM -static int ixgbevf_resume(struct pci_dev *pdev) +static int __maybe_unused ixgbevf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); u32 err; - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); @@ -4348,10 +4337,9 @@ static int ixgbevf_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ static void ixgbevf_shutdown(struct pci_dev *pdev) { - ixgbevf_suspend(pdev, PMSG_SUSPEND); + ixgbevf_suspend(&pdev->dev); } static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, @@ -4485,15 +4473,9 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct ixgbevf_adapter *adapter = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } @@ -4539,22 +4521,17 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbevf_adapter *adapter = NULL; struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; - int err, pci_using_dac; bool disable_dev = false; + int err; err = pci_enable_device(pdev); if (err) return err; - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); - goto err_dma; - } - pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto err_dma; } err = pci_request_regions(pdev, ixgbevf_driver_name); @@ -4601,7 +4578,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; - memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, sizeof(struct ixgbe_mbx_operations)); /* setup the private structure */ @@ -4634,10 +4611,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_GSO_PARTIAL | IXGBEVF_GSO_PARTIAL_FEATURES; - netdev->features = netdev->hw_features; - - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_SG | @@ -4661,6 +4635,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4812,7 +4787,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_disable_device(pdev); rtnl_unlock(); - /* Request a slot slot reset. */ + /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } @@ -4871,16 +4846,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { .resume = ixgbevf_io_resume, }; +static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); + static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = ixgbevf_remove, -#ifdef CONFIG_PM + /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, -#endif + .driver.pm = &ixgbevf_pm_ops, + .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; @@ -4893,9 +4869,7 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { - pr_info("%s - version %s\n", ixgbevf_driver_string, - ixgbevf_driver_version); - + pr_info("%s\n", ixgbevf_driver_string); pr_info("%s\n", ixgbevf_copyright); ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); if (!ixgbevf_wq) { |