Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | /* SPDX-License-Identifier: GPL-2.0-only * * Copyright (C) 2020-21 Intel Corporation. */ #ifndef IOSM_IPC_PCIE_H #define IOSM_IPC_PCIE_H #include <linux/device.h> #include <linux/pci.h> #include <linux/skbuff.h> #include "iosm_ipc_irq.h" /* Device ID */ #define INTEL_CP_DEVICE_7560_ID 0x7560 #define INTEL_CP_DEVICE_7360_ID 0x7360 /* Define for BAR area usage */ #define IPC_DOORBELL_BAR0 0 #define IPC_SCRATCHPAD_BAR2 2 /* Defines for DOORBELL registers information */ #define IPC_DOORBELL_CH_OFFSET BIT(5) #define IPC_WRITE_PTR_REG_0 BIT(4) #define IPC_CAPTURE_PTR_REG_0 BIT(3) /* Number of MSI used for IPC */ #define IPC_MSI_VECTORS 1 /* Total number of Maximum IPC IRQ vectors used for IPC */ #define IPC_IRQ_VECTORS IPC_MSI_VECTORS /** * enum ipc_pcie_sleep_state - Enum type to different sleep state transitions * @IPC_PCIE_D0L12: Put the sleep state in D0L12 * @IPC_PCIE_D3L2: Put the sleep state in D3L2 */ enum ipc_pcie_sleep_state { IPC_PCIE_D0L12, IPC_PCIE_D3L2, }; /** * struct iosm_pcie - IPC_PCIE struct. * @pci: Address of the device description * @dev: Pointer to generic device structure * @ipc_regs: Remapped CP doorbell address of the irq register * set, to fire the doorbell irq. * @scratchpad: Remapped CP scratchpad address, to send the * configuration. tuple and the IPC descriptors * to CP in the ROM phase. The config tuple * information are saved on the MSI scratchpad. * @imem: Pointer to imem data struct * @ipc_regs_bar_nr: BAR number to be used for IPC doorbell * @scratchpad_bar_nr: BAR number to be used for Scratchpad * @nvec: number of requested irq vectors * @doorbell_reg_offset: doorbell_reg_offset * @doorbell_write: doorbell write register * @doorbell_capture: doorbell capture resgister * @suspend: S2IDLE sleep/active * @d3l2_support: Read WWAN RTD3 BIOS setting for D3L2 support */ struct iosm_pcie { struct pci_dev *pci; struct device *dev; void __iomem *ipc_regs; void __iomem *scratchpad; struct iosm_imem *imem; int ipc_regs_bar_nr; int scratchpad_bar_nr; int nvec; u32 doorbell_reg_offset; u32 doorbell_write; u32 doorbell_capture; unsigned long suspend; enum ipc_pcie_sleep_state d3l2_support; }; /** * struct ipc_skb_cb - Struct definition of the socket buffer which is mapped to * the cb field of sbk * @mapping: Store physical or IOVA mapped address of skb virtual add. * @direction: DMA direction * @len: Length of the DMA mapped region * @op_type: Expected values are defined about enum ipc_ul_usr_op. */ struct ipc_skb_cb { dma_addr_t mapping; int direction; int len; u8 op_type; }; /** * enum ipc_ul_usr_op - Control operation to execute the right action on * the user interface. * @UL_USR_OP_BLOCKED: The uplink app was blocked until CP confirms that the * uplink buffer was consumed triggered by the IRQ. * @UL_MUX_OP_ADB: In MUX mode the UL ADB shall be addedd to the free list. * @UL_DEFAULT: SKB in non muxing mode */ enum ipc_ul_usr_op { UL_USR_OP_BLOCKED, UL_MUX_OP_ADB, UL_DEFAULT, }; /** * ipc_pcie_addr_map - Maps the kernel's virtual address to either IOVA * address space or Physical address space, the mapping is * stored in the skb's cb. * @ipc_pcie: Pointer to struct iosm_pcie * @data: Skb mem containing data * @size: Data size * @mapping: Dma mapping address * @direction: Data direction * * Returns: 0 on success and failure value on error */ int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data, size_t size, dma_addr_t *mapping, int direction); /** * ipc_pcie_addr_unmap - Unmaps the skb memory region from IOVA address space * @ipc_pcie: Pointer to struct iosm_pcie * @size: Data size * @mapping: Dma mapping address * @direction: Data direction */ void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size, dma_addr_t mapping, int direction); /** * ipc_pcie_alloc_skb - Allocate an uplink SKB for the given size. * @ipc_pcie: Pointer to struct iosm_pcie * @size: Size of the SKB required. * @flags: Allocation flags * @mapping: Copies either mapped IOVA add. or converted Phy address * @direction: DMA data direction * @headroom: Header data offset * * Returns: Pointer to ipc_skb on Success, NULL on failure. */ struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size, gfp_t flags, dma_addr_t *mapping, int direction, size_t headroom); /** * ipc_pcie_alloc_local_skb - Allocate a local SKB for the given size. * @ipc_pcie: Pointer to struct iosm_pcie * @flags: Allocation flags * @size: Size of the SKB required. * * Returns: Pointer to ipc_skb on Success, NULL on failure. */ struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie, gfp_t flags, size_t size); /** * ipc_pcie_kfree_skb - Free skb allocated by ipc_pcie_alloc_*_skb(). * @ipc_pcie: Pointer to struct iosm_pcie * @skb: Pointer to the skb */ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb); /** * ipc_pcie_check_data_link_active - Check Data Link Layer Active * @ipc_pcie: Pointer to struct iosm_pcie * * Returns: true if active, otherwise false */ bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie); /** * ipc_pcie_suspend - Callback invoked by pm_runtime_suspend. It decrements * the device's usage count then, carry out a suspend, * either synchronous or asynchronous. * @ipc_pcie: Pointer to struct iosm_pcie * * Returns: 0 on success and failure value on error */ int ipc_pcie_suspend(struct iosm_pcie *ipc_pcie); /** * ipc_pcie_resume - Callback invoked by pm_runtime_resume. It increments * the device's usage count then, carry out a resume, * either synchronous or asynchronous. * @ipc_pcie: Pointer to struct iosm_pcie * * Returns: 0 on success and failure value on error */ int ipc_pcie_resume(struct iosm_pcie *ipc_pcie); /** * ipc_pcie_check_aspm_enabled - Check if ASPM L1 is already enabled * @ipc_pcie: Pointer to struct iosm_pcie * @parent: True if checking ASPM L1 for parent else false * * Returns: true if ASPM is already enabled else false */ bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie, bool parent); /** * ipc_pcie_config_aspm - Configure ASPM L1 * @ipc_pcie: Pointer to struct iosm_pcie */ void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie); #endif |