Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020-21 Intel Corporation. */ #include <linux/delay.h> #include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_imem.h" #include "iosm_ipc_imem_ops.h" #include "iosm_ipc_port.h" #include "iosm_ipc_task_queue.h" /* Open a packet data online channel between the network layer and CP. */ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) { dev_dbg(ipc_imem->dev, "%s if id: %d", ipc_imem_phase_get_string(ipc_imem->phase), if_id); /* The network interface is only supported in the runtime phase. */ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) { dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id, ipc_imem_phase_get_string(ipc_imem->phase)); return -EIO; } return ipc_mux_open_session(ipc_imem->mux, if_id); } /* Release a net link to CP. */ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, int channel_id) { if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) ipc_mux_close_session(ipc_imem->mux, if_id); } /* Tasklet call to do uplink transfer. */ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, void *msg, size_t size) { ipc_imem->ev_cdev_write_pending = false; ipc_imem_ul_send(ipc_imem); return 0; } /* Through tasklet to do sio write. */ static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem) { if (ipc_imem->ev_cdev_write_pending) return -1; ipc_imem->ev_cdev_write_pending = true; return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0, NULL, 0, false); } /* Function for transfer UL data */ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id, int channel_id, struct sk_buff *skb) { int ret = -EINVAL; if (!ipc_imem || channel_id < 0) goto out; /* Is CP Running? */ if (ipc_imem->phase != IPC_P_RUN) { dev_dbg(ipc_imem->dev, "phase %s transmit", ipc_imem_phase_get_string(ipc_imem->phase)); ret = -EIO; goto out; } /* Route the UL packet through IP MUX Layer */ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb); out: return ret; } /* Initialize wwan channel */ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, enum ipc_mux_protocol mux_type) { struct ipc_chnl_cfg chnl_cfg = { 0 }; ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio); /* If modem version is invalid (0xffffffff), do not initialize WWAN. */ if (ipc_imem->cp_version == -1) { dev_err(ipc_imem->dev, "invalid CP version"); return; } ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels); ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg, IRQ_MOD_OFF); /* WWAN registration. */ ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev); if (!ipc_imem->wwan) dev_err(ipc_imem->dev, "failed to register the ipc_wwan interfaces"); } /* Map SKB to DMA for transfer */ static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem, struct sk_buff *skb) { struct iosm_pcie *ipc_pcie = ipc_imem->pcie; char *buf = skb->data; int len = skb->len; dma_addr_t mapping; int ret; ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE); if (ret) goto err; BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb)); IPC_CB(skb)->mapping = mapping; IPC_CB(skb)->direction = DMA_TO_DEVICE; IPC_CB(skb)->len = len; IPC_CB(skb)->op_type = (u8)UL_DEFAULT; err: return ret; } /* return true if channel is ready for use */ static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem, struct ipc_mem_channel *channel) { enum ipc_phase phase; /* Update the current operation phase. */ phase = ipc_imem->phase; /* Select the operation depending on the execution stage. */ switch (phase) { case IPC_P_RUN: case IPC_P_PSI: case IPC_P_EBL: break; case IPC_P_ROM: /* Prepare the PSI image for the CP ROM driver and * suspend the flash app. */ if (channel->state != IMEM_CHANNEL_RESERVED) { dev_err(ipc_imem->dev, "ch[%d]:invalid channel state %d,expected %d", channel->channel_id, channel->state, IMEM_CHANNEL_RESERVED); goto channel_unavailable; } goto channel_available; default: /* Ignore uplink actions in all other phases. */ dev_err(ipc_imem->dev, "ch[%d]: confused phase %d", channel->channel_id, phase); goto channel_unavailable; } /* Check the full availability of the channel. */ if (channel->state != IMEM_CHANNEL_ACTIVE) { dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d", channel->channel_id, channel->state); goto channel_unavailable; } channel_available: return true; channel_unavailable: return false; } /* Release a sio link to CP. */ void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev) { struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem; struct ipc_mem_channel *channel = ipc_cdev->channel; enum ipc_phase curr_phase; int status = 0; u32 tail = 0; curr_phase = ipc_imem->phase; /* If current phase is IPC_P_OFF or SIO ID is -ve then * channel is already freed. Nothing to do. */ if (curr_phase == IPC_P_OFF) { dev_err(ipc_imem->dev, "nothing to do. Current Phase: %s", ipc_imem_phase_get_string(curr_phase)); return; } if (channel->state == IMEM_CHANNEL_FREE) { dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d", channel->channel_id, channel->state); return; } /* If there are any pending TDs then wait for Timeout/Completion before * closing pipe. */ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) { ipc_imem->app_notify_ul_pend = 1; /* Suspend the user app and wait a certain time for processing * UL Data. */ status = wait_for_completion_interruptible_timeout (&ipc_imem->ul_pend_sem, msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); if (status == 0) { dev_dbg(ipc_imem->dev, "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d", channel->ul_pipe.pipe_nr, channel->ul_pipe.old_head, channel->ul_pipe.old_tail); } ipc_imem->app_notify_ul_pend = 0; } /* If there are any pending TDs then wait for Timeout/Completion before * closing pipe. */ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, &channel->dl_pipe, NULL, &tail); if (tail != channel->dl_pipe.old_tail) { ipc_imem->app_notify_dl_pend = 1; /* Suspend the user app and wait a certain time for processing * DL Data. */ status = wait_for_completion_interruptible_timeout (&ipc_imem->dl_pend_sem, msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); if (status == 0) { dev_dbg(ipc_imem->dev, "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d", channel->dl_pipe.pipe_nr, channel->dl_pipe.old_head, channel->dl_pipe.old_tail); } ipc_imem->app_notify_dl_pend = 0; } /* Due to wait for completion in messages, there is a small window * between closing the pipe and updating the channel is closed. In this * small window there could be HP update from Host Driver. Hence update * the channel state as CLOSING to aviod unnecessary interrupt * towards CP. */ channel->state = IMEM_CHANNEL_CLOSING; ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe); ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe); ipc_imem_channel_free(channel); } /* Open a PORT link to CP and return the channel */ struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem, int chl_id, int hp_id) { struct ipc_mem_channel *channel; int ch_id; /* The PORT interface is only supported in the runtime phase. */ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) { dev_err(ipc_imem->dev, "PORT open refused, phase %s", ipc_imem_phase_get_string(ipc_imem->phase)); return NULL; } ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL); if (ch_id < 0) { dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed"); return NULL; } channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id); if (!channel) { dev_err(ipc_imem->dev, "PORT channel id open failed"); return NULL; } return channel; } /* transfer skb to modem */ int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb) { struct ipc_mem_channel *channel = ipc_cdev->channel; struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem; int ret = -EIO; if (!ipc_imem_is_channel_active(ipc_imem, channel) || ipc_imem->phase == IPC_P_OFF_REQ) goto out; ret = ipc_imem_map_skb_to_dma(ipc_imem, skb); if (ret) goto out; /* Add skb to the uplink skbuf accumulator. */ skb_queue_tail(&channel->ul_list, skb); ret = ipc_imem_call_cdev_write(ipc_imem); if (ret) { skb_dequeue_tail(&channel->ul_list); dev_err(ipc_cdev->dev, "channel id[%d] write failed\n", ipc_cdev->channel->channel_id); } out: return ret; } |