Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 | /*
* Copyright 2020-2023 NXP
*
* SPDX-License-Identifier: Apache-2.0
*/
#define DT_DRV_COMPAT nxp_imx_flexspi
#include <zephyr/logging/log.h>
#include <zephyr/sys/util.h>
#include <zephyr/drivers/pinctrl.h>
#include <zephyr/drivers/clock_control.h>
#include <zephyr/pm/device.h>
#include <soc.h>
#include "memc_mcux_flexspi.h"
/*
* NOTE: If CONFIG_FLASH_MCUX_FLEXSPI_XIP is selected, Any external functions
* called while interacting with the flexspi MUST be relocated to SRAM or ITCM
* at runtime, so that the chip does not access the flexspi to read program
* instructions while it is being written to
*/
#if defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP) && (CONFIG_MEMC_LOG_LEVEL > 0)
#warning "Enabling memc driver logging and XIP mode simultaneously can cause \
read-while-write hazards. This configuration is not recommended."
#endif
#define FLEXSPI_MAX_LUT 64U
LOG_MODULE_REGISTER(memc_flexspi, CONFIG_MEMC_LOG_LEVEL);
struct memc_flexspi_buf_cfg {
uint16_t prefetch;
uint16_t priority;
uint16_t master_id;
uint16_t buf_size;
} __packed;
/* Structure tracking LUT offset and usage per each port */
struct port_lut {
uint8_t lut_offset;
uint8_t lut_used;
};
/* flexspi device data should be stored in RAM to avoid read-while-write hazards */
struct memc_flexspi_data {
FLEXSPI_Type *base;
uint8_t *ahb_base;
bool xip;
bool ahb_bufferable;
bool ahb_cacheable;
bool ahb_prefetch;
bool ahb_read_addr_opt;
bool combination_mode;
bool sck_differential_clock;
flexspi_read_sample_clock_t rx_sample_clock;
const struct pinctrl_dev_config *pincfg;
size_t size[kFLEXSPI_PortCount];
struct port_lut port_luts[kFLEXSPI_PortCount];
struct memc_flexspi_buf_cfg *buf_cfg;
uint8_t buf_cfg_cnt;
const struct device *clock_dev;
clock_control_subsys_t clock_subsys;
};
void memc_flexspi_wait_bus_idle(const struct device *dev)
{
struct memc_flexspi_data *data = dev->data;
while (false == FLEXSPI_GetBusIdleStatus(data->base)) {
}
}
bool memc_flexspi_is_running_xip(const struct device *dev)
{
struct memc_flexspi_data *data = dev->data;
return data->xip;
}
int memc_flexspi_update_clock(const struct device *dev,
flexspi_device_config_t *device_config,
flexspi_port_t port, uint32_t freq_hz)
{
struct memc_flexspi_data *data = dev->data;
uint32_t rate;
uint32_t key;
int ret;
/* To reclock the FlexSPI, we should:
* - disable the module
* - set the new clock
* - reenable the module
* - reset the module
* We CANNOT XIP at any point during this process
*/
key = irq_lock();
memc_flexspi_wait_bus_idle(dev);
ret = clock_control_set_rate(data->clock_dev, data->clock_subsys,
(clock_control_subsys_rate_t)freq_hz);
if (ret < 0) {
irq_unlock(key);
return ret;
}
/*
* We need to update the DLL value before we call clock_control_get_rate,
* because this will cause XIP (flash reads) to occur. Although the
* true flash clock is not known, assume the set_rate function programmed
* a value close to what we requested.
*/
device_config->flexspiRootClk = freq_hz;
FLEXSPI_UpdateDllValue(data->base, device_config, port);
memc_flexspi_reset(dev);
memc_flexspi_wait_bus_idle(dev);
ret = clock_control_get_rate(data->clock_dev, data->clock_subsys, &rate);
if (ret < 0) {
irq_unlock(key);
return ret;
}
device_config->flexspiRootClk = rate;
FLEXSPI_UpdateDllValue(data->base, device_config, port);
memc_flexspi_reset(dev);
irq_unlock(key);
return 0;
}
int memc_flexspi_set_device_config(const struct device *dev,
const flexspi_device_config_t *device_config,
const uint32_t *lut_array,
uint8_t lut_count,
flexspi_port_t port)
{
flexspi_device_config_t tmp_config;
uint32_t tmp_lut[FLEXSPI_MAX_LUT];
struct memc_flexspi_data *data = dev->data;
const uint32_t *lut_ptr = lut_array;
uint8_t lut_used = 0U;
unsigned int key = 0;
if (port >= kFLEXSPI_PortCount) {
LOG_ERR("Invalid port number");
return -EINVAL;
}
if (data->port_luts[port].lut_used < lut_count) {
/* We cannot reuse the existing LUT slot,
* Check if the LUT table will fit into the remaining LUT slots
*/
for (uint8_t i = 0; i < kFLEXSPI_PortCount; i++) {
lut_used += data->port_luts[i].lut_used;
}
if ((lut_used + lut_count) > FLEXSPI_MAX_LUT) {
return -ENOBUFS;
}
}
data->size[port] = device_config->flashSize * KB(1);
if (memc_flexspi_is_running_xip(dev)) {
/* We need to avoid flash access while configuring the FlexSPI.
* To do this, we will copy the LUT array into stack-allocated
* temporary memory
*/
memcpy(tmp_lut, lut_array, lut_count * MEMC_FLEXSPI_CMD_SIZE);
lut_ptr = tmp_lut;
}
memcpy(&tmp_config, device_config, sizeof(tmp_config));
/* Update FlexSPI AWRSEQID and ARDSEQID values based on where the LUT
* array will actually be loaded.
*/
if (data->port_luts[port].lut_used < lut_count) {
/* Update lut offset with new value */
data->port_luts[port].lut_offset = lut_used;
}
data->port_luts[port].lut_used = lut_count;
tmp_config.ARDSeqIndex += data->port_luts[port].lut_offset;
tmp_config.AWRSeqIndex += data->port_luts[port].lut_offset;
/* Lock IRQs before reconfiguring FlexSPI, to prevent XIP */
key = irq_lock();
FLEXSPI_SetFlashConfig(data->base, &tmp_config, port);
FLEXSPI_UpdateLUT(data->base, data->port_luts[port].lut_offset,
lut_ptr, lut_count);
irq_unlock(key);
return 0;
}
int memc_flexspi_reset(const struct device *dev)
{
struct memc_flexspi_data *data = dev->data;
FLEXSPI_SoftwareReset(data->base);
return 0;
}
int memc_flexspi_transfer(const struct device *dev,
flexspi_transfer_t *transfer)
{
struct memc_flexspi_data *data = dev->data;
status_t status;
/* Adjust transfer LUT index based on port */
transfer->seqIndex += data->port_luts[transfer->port].lut_offset;
status = FLEXSPI_TransferBlocking(data->base, transfer);
if (status != kStatus_Success) {
LOG_ERR("Transfer error: %d", status);
return -EIO;
}
return 0;
}
void *memc_flexspi_get_ahb_address(const struct device *dev,
flexspi_port_t port, off_t offset)
{
struct memc_flexspi_data *data = dev->data;
int i;
if (port >= kFLEXSPI_PortCount) {
LOG_ERR("Invalid port number: %u", port);
return NULL;
}
for (i = 0; i < port; i++) {
offset += data->size[i];
}
return data->ahb_base + offset;
}
static int memc_flexspi_init(const struct device *dev)
{
struct memc_flexspi_data *data = dev->data;
flexspi_config_t flexspi_config;
/* we should not configure the device we are running on */
if (memc_flexspi_is_running_xip(dev)) {
LOG_DBG("XIP active on %s, skipping init", dev->name);
return 0;
}
/*
* SOCs such as the RT1064 and RT1024 have internal flash, and no pinmux
* settings, continue if no pinctrl state found.
*/
int ret;
ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
FLEXSPI_GetDefaultConfig(&flexspi_config);
flexspi_config.ahbConfig.enableAHBBufferable = data->ahb_bufferable;
flexspi_config.ahbConfig.enableAHBCachable = data->ahb_cacheable;
flexspi_config.ahbConfig.enableAHBPrefetch = data->ahb_prefetch;
flexspi_config.ahbConfig.enableReadAddressOpt = data->ahb_read_addr_opt;
#if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN) && \
FSL_FEATURE_FLEXSPI_HAS_NO_MCR0_COMBINATIONEN)
flexspi_config.enableCombination = data->combination_mode;
#endif
#if !(defined(FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT) && \
FSL_FEATURE_FLEXSPI_HAS_NO_MCR2_SCKBDIFFOPT)
flexspi_config.enableSckBDiffOpt = data->sck_differential_clock;
#endif
flexspi_config.rxSampleClock = data->rx_sample_clock;
/* Configure AHB RX buffers, if any configuration settings are present */
__ASSERT(data->buf_cfg_cnt < FSL_FEATURE_FLEXSPI_AHB_BUFFER_COUNT,
"Maximum RX buffer configuration count exceeded");
for (uint8_t i = 0; i < data->buf_cfg_cnt; i++) {
/* Should AHB prefetch up to buffer size? */
flexspi_config.ahbConfig.buffer[i].enablePrefetch = data->buf_cfg[i].prefetch;
/* AHB access priority (used for suspending control of AHB prefetching )*/
flexspi_config.ahbConfig.buffer[i].priority = data->buf_cfg[i].priority;
/* AHB master index, SOC specific */
flexspi_config.ahbConfig.buffer[i].masterIndex = data->buf_cfg[i].master_id;
/* RX buffer allocation (total available buffer space is instance/SOC specific) */
flexspi_config.ahbConfig.buffer[i].bufferSize = data->buf_cfg[i].buf_size;
}
FLEXSPI_Init(data->base, &flexspi_config);
return 0;
}
#ifdef CONFIG_PM_DEVICE
static int memc_flexspi_pm_action(const struct device *dev, enum pm_device_action action)
{
struct memc_flexspi_data *data = dev->data;
int ret;
switch (action) {
case PM_DEVICE_ACTION_RESUME:
ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_DEFAULT);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
break;
case PM_DEVICE_ACTION_SUSPEND:
ret = pinctrl_apply_state(data->pincfg, PINCTRL_STATE_SLEEP);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
break;
default:
return -ENOTSUP;
}
return 0;
}
#endif
#if defined(CONFIG_XIP) && defined(CONFIG_FLASH_MCUX_FLEXSPI_XIP)
/* Checks if image flash base address is in the FlexSPI AHB base region */
#define MEMC_FLEXSPI_CFG_XIP(node_id) \
((CONFIG_FLASH_BASE_ADDRESS) >= DT_REG_ADDR_BY_IDX(node_id, 1)) && \
((CONFIG_FLASH_BASE_ADDRESS) < (DT_REG_ADDR_BY_IDX(node_id, 1) + \
DT_REG_SIZE_BY_IDX(node_id, 1)))
#else
#define MEMC_FLEXSPI_CFG_XIP(node_id) false
#endif
#define MEMC_FLEXSPI(n) \
PINCTRL_DT_INST_DEFINE(n); \
static uint16_t buf_cfg_##n[] = \
DT_INST_PROP_OR(n, rx_buffer_config, {0}); \
\
static struct memc_flexspi_data \
memc_flexspi_data_##n = { \
.base = (FLEXSPI_Type *) DT_INST_REG_ADDR(n), \
.xip = MEMC_FLEXSPI_CFG_XIP(DT_DRV_INST(n)), \
.ahb_base = (uint8_t *) DT_INST_REG_ADDR_BY_IDX(n, 1), \
.ahb_bufferable = DT_INST_PROP(n, ahb_bufferable), \
.ahb_cacheable = DT_INST_PROP(n, ahb_cacheable), \
.ahb_prefetch = DT_INST_PROP(n, ahb_prefetch), \
.ahb_read_addr_opt = DT_INST_PROP(n, ahb_read_addr_opt),\
.combination_mode = DT_INST_PROP(n, combination_mode), \
.sck_differential_clock = DT_INST_PROP(n, sck_differential_clock), \
.rx_sample_clock = DT_INST_PROP(n, rx_clock_source), \
.buf_cfg = (struct memc_flexspi_buf_cfg *)buf_cfg_##n, \
.buf_cfg_cnt = sizeof(buf_cfg_##n) / \
sizeof(struct memc_flexspi_buf_cfg), \
.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
.clock_subsys = (clock_control_subsys_t) \
DT_INST_CLOCKS_CELL(n, name), \
}; \
\
PM_DEVICE_DT_INST_DEFINE(n, memc_flexspi_pm_action); \
\
DEVICE_DT_INST_DEFINE(n, \
memc_flexspi_init, \
PM_DEVICE_DT_INST_GET(n), \
&memc_flexspi_data_##n, \
NULL, \
POST_KERNEL, \
CONFIG_MEMC_INIT_PRIORITY, \
NULL);
DT_INST_FOREACH_STATUS_OKAY(MEMC_FLEXSPI)
|