提交 7c306cb1 编写于 作者: G Greg Kroah-Hartman

Merge tag 'fpga-for-v6.3-rc1' of...

Merge tag 'fpga-for-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga into char-misc-next

Xu writes:

FPGA Manager changes for 6.3-rc1

Microchip:

- Ivan's reliability improvements for Microchip Polarfire FPGA

FPGA DFL doc:

- Randy and Yilun's kernel doc fixes.
  The 2 patches, "fpga: dfl: more kernel-doc corrections" &
  "fpga: dfl: kernel-doc corrections" conflicts with Matthew's FPGA
  patch "fpga: dfl: add basic support for DFHv1" on tty-next. Yilun
  resolved the conflicts on:
  --branch for-next https://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga.git/
  On that branch, Matthew's patch is applied first then kernel doc fixes
  follow.

Intel m10 bmc MFD & sub devices:

- Lee's topic branch merged, to support new BMC board type with new
  PMCI interface to host, as well as its new sub devices.
Signed-off-by: NXu Yilun <yilun.xu@intel.com>

* tag 'fpga-for-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga:
  fpga: bridge: return errors in the show() method of the "state" attribute
  fpga: dfl: more kernel-doc corrections
  fpga: dfl: kernel-doc corrections
  fpga: microchip-spi: separate data frame write routine
  fpga: microchip-spi: rewrite status polling in a time measurable way
  fpga: microchip-spi: move SPI I/O buffers out of stack
  mfd: intel-m10-bmc: Add PMCI driver
  fpga: m10bmc-sec: Make rsu status type specific
  fpga: m10bmc-sec: Create helpers for rsu status/progress checks
  mfd: intel-m10-bmc: Prefix register defines with M10BMC_N3000
  fpga: intel-m10-bmc: Rework flash read/write
  mfd: intel-m10-bmc: Support multiple CSR register layouts
  mfd: intel-m10-bmc: Split into core and spi specific parts
  mfd: intel-m10-bmc: Rename the local variables
  mfd: intel-m10-bmc: Create m10bmc_platform_info for type specific info
  mfd: intel-m10-bmc: Add missing includes to header
What: /sys/bus/spi/devices/.../bmc_version
What: /sys/bus/.../drivers/intel-m10-bmc/.../bmc_version
Date: June 2020
KernelVersion: 5.10
Contact: Xu Yilun <yilun.xu@intel.com>
......@@ -6,7 +6,7 @@ Description: Read only. Returns the hardware build version of Intel
MAX10 BMC chip.
Format: "0x%x".
What: /sys/bus/spi/devices/.../bmcfw_version
What: /sys/bus/.../drivers/intel-m10-bmc/.../bmcfw_version
Date: June 2020
KernelVersion: 5.10
Contact: Xu Yilun <yilun.xu@intel.com>
......@@ -14,7 +14,7 @@ Description: Read only. Returns the firmware version of Intel MAX10
BMC chip.
Format: "0x%x".
What: /sys/bus/spi/devices/.../mac_address
What: /sys/bus/.../drivers/intel-m10-bmc/.../mac_address
Date: January 2021
KernelVersion: 5.12
Contact: Russ Weight <russell.h.weight@intel.com>
......@@ -25,7 +25,7 @@ Description: Read only. Returns the first MAC address in a block
space.
Format: "%02x:%02x:%02x:%02x:%02x:%02x".
What: /sys/bus/spi/devices/.../mac_count
What: /sys/bus/.../drivers/intel-m10-bmc/.../mac_count
Date: January 2021
KernelVersion: 5.12
Contact: Russ Weight <russell.h.weight@intel.com>
......
......@@ -10575,7 +10575,7 @@ S: Maintained
F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
F: Documentation/hwmon/intel-m10-bmc-hwmon.rst
F: drivers/hwmon/intel-m10-bmc-hwmon.c
F: drivers/mfd/intel-m10-bmc.c
F: drivers/mfd/intel-m10-bmc*
F: include/linux/mfd/intel-m10-bmc.h
INTEL MENLOW THERMAL DRIVER
......
......@@ -246,7 +246,7 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE
tristate "Intel MAX10 BMC Secure Update driver"
depends on MFD_INTEL_M10_BMC
depends on MFD_INTEL_M10_BMC_CORE
select FW_LOADER
select FW_UPLOAD
help
......
......@@ -39,6 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
* @pdata: afu platform device's pdata.
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
......
......@@ -41,7 +41,7 @@ struct dfl_afu_mmio_region {
};
/**
* struct fpga_afu_dma_region - afu DMA region data structure
* struct dfl_afu_dma_region - afu DMA region data structure
*
* @user_addr: region userspace virtual address.
* @length: region length.
......
......@@ -141,7 +141,7 @@
* @fab_port_id: used to indicate current working mode of fabric counters.
* @fab_lock: lock to protect fabric counters working mode.
* @cpu: active CPU to which the PMU is bound for accesses.
* @cpuhp_node: node for CPU hotplug notifier link.
* @node: node for CPU hotplug notifier link.
* @cpuhp_state: state for CPU hotplug notification;
*/
struct fme_perf_priv {
......
......@@ -164,7 +164,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
*
* @feature: sub feature info
* @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
......@@ -273,7 +273,7 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
}
/**
* dfl_fme_destroy_bridge - destroy all fpga bridge platform device
* dfl_fme_destroy_bridges - destroy all fpga bridge platform device
* @pdata: fme platform device's pdata
*/
static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
......
......@@ -58,7 +58,7 @@ struct dfl_fme_bridge {
};
/**
* struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
* struct dfl_fme_br_pdata - platform data for FME bridge platform device.
*
* @cdev: container device.
* @port_id: port id.
......
......@@ -45,7 +45,7 @@ static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
};
/**
* dfl_dev_info - dfl feature device information.
* struct dfl_dev_info - dfl feature device information.
* @name: name string of the feature platform device.
* @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
* @id: idr id of the feature dev.
......@@ -67,7 +67,7 @@ static struct dfl_dev_info dfl_devs[] = {
};
/**
* dfl_chardev_info - chardev information of dfl feature device
* struct dfl_chardev_info - chardev information of dfl feature device
* @name: nmae string of the char device.
* @devt: devt of the char device.
*/
......@@ -708,6 +708,7 @@ struct build_feature_devs_info {
* struct dfl_feature_info - sub feature info collected during feature dev build
*
* @fid: id of this sub feature.
* @revision: revision value of this sub feature.
* @mmio_res: mmio resource of this sub feature.
* @ioaddr: mapped base address of mmio resource.
* @node: node in sub_features linked list.
......
......@@ -231,6 +231,7 @@ struct dfl_feature_irq_ctx {
*
* @dev: ptr to pdev of the feature device which has the sub feature.
* @id: sub feature id.
* @revision: revision value of this sub feature.
* @resource_index: each sub feature has one mmio resource for its registers.
* this index is used to find its mmio resource from the
* feature dev (platform device)'s resources.
......
......@@ -293,12 +293,15 @@ static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fpga_bridge *bridge = to_fpga_bridge(dev);
int enable = 1;
int state = 1;
if (bridge->br_ops && bridge->br_ops->enable_show)
enable = bridge->br_ops->enable_show(bridge);
if (bridge->br_ops && bridge->br_ops->enable_show) {
state = bridge->br_ops->enable_show(bridge);
if (state < 0)
return state;
}
return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
}
static DEVICE_ATTR_RO(name);
......
......@@ -14,6 +14,12 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
struct m10bmc_sec;
struct m10bmc_sec_ops {
int (*rsu_status)(struct m10bmc_sec *sec);
};
struct m10bmc_sec {
struct device *dev;
struct intel_m10bmc *m10bmc;
......@@ -21,6 +27,7 @@ struct m10bmc_sec {
char *fw_name;
u32 fw_name_id;
bool cancel_request;
const struct m10bmc_sec_ops *ops;
};
static DEFINE_XARRAY_ALLOC(fw_upload_xa);
......@@ -31,6 +38,65 @@ static DEFINE_XARRAY_ALLOC(fw_upload_xa);
#define REH_MAGIC GENMASK(15, 0)
#define REH_SHA_NUM_BYTES GENMASK(31, 16)
static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
{
struct intel_m10bmc *m10bmc = sec->m10bmc;
unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
u32 write_count = size / stride;
u32 leftover_offset = write_count * stride;
u32 leftover_size = size - leftover_offset;
u32 leftover_tmp = 0;
int ret;
if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
return -EINVAL;
ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
buf + offset, write_count);
if (ret)
return ret;
/* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
if (leftover_size) {
memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
leftover_tmp);
if (ret)
return ret;
}
return 0;
}
static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
{
struct intel_m10bmc *m10bmc = sec->m10bmc;
unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
u32 read_count = size / stride;
u32 leftover_offset = read_count * stride;
u32 leftover_size = size - leftover_offset;
u32 leftover_tmp;
int ret;
if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
return -EINVAL;
ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
if (ret)
return ret;
/* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
if (leftover_size) {
ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
if (ret)
return ret;
memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
}
return 0;
}
static ssize_t
show_root_entry_hash(struct device *dev, u32 exp_magic,
u32 prog_addr, u32 reh_addr, char *buf)
......@@ -38,11 +104,9 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
struct m10bmc_sec *sec = dev_get_drvdata(dev);
int sha_num_bytes, i, ret, cnt = 0;
u8 hash[REH_SHA384_SIZE];
unsigned int stride;
u32 magic;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
if (ret)
return ret;
......@@ -50,19 +114,16 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return sysfs_emit(buf, "hash not programmed\n");
sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
if ((sha_num_bytes % stride) ||
(sha_num_bytes != REH_SHA256_SIZE &&
sha_num_bytes != REH_SHA384_SIZE)) {
if (sha_num_bytes != REH_SHA256_SIZE &&
sha_num_bytes != REH_SHA384_SIZE) {
dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
sha_num_bytes);
return -EINVAL;
}
ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
hash, sha_num_bytes / stride);
ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
if (ret) {
dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
reh_addr, sha_num_bytes / stride, ret);
dev_err(dev, "failed to read root entry hash\n");
return ret;
}
......@@ -73,16 +134,24 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return cnt;
}
#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
#define DEVICE_ATTR_SEC_REH_RO(_name) \
static ssize_t _name##_root_entry_hash_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
{ \
struct m10bmc_sec *sec = dev_get_drvdata(dev); \
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
\
return show_root_entry_hash(dev, csr_map->_name##_magic, \
csr_map->_name##_prog_addr, \
csr_map->_name##_reh_addr, \
buf); \
} \
static DEVICE_ATTR_RO(_name##_root_entry_hash)
DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
DEVICE_ATTR_SEC_REH_RO(bmc);
DEVICE_ATTR_SEC_REH_RO(sr);
DEVICE_ATTR_SEC_REH_RO(pr);
#define CSK_BIT_LEN 128U
#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
......@@ -90,27 +159,16 @@ DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
static ssize_t
show_canceled_csk(struct device *dev, u32 addr, char *buf)
{
unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
struct m10bmc_sec *sec = dev_get_drvdata(dev);
DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
__le32 csk_le32[CSK_32ARRAY_SIZE];
u32 csk32[CSK_32ARRAY_SIZE];
int ret;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (size % stride) {
dev_err(sec->dev,
"CSK vector size (0x%x) not aligned to stride (0x%x)\n",
size, stride);
WARN_ON_ONCE(1);
return -EINVAL;
}
ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
size / stride);
ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
if (ret) {
dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
addr, size / stride, ret);
dev_err(sec->dev, "failed to read CSK vector\n");
return ret;
}
......@@ -122,18 +180,25 @@ show_canceled_csk(struct device *dev, u32 addr, char *buf)
return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
}
#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
#define DEVICE_ATTR_SEC_CSK_RO(_name) \
static ssize_t _name##_canceled_csks_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ return show_canceled_csk(dev, _addr, buf); } \
{ \
struct m10bmc_sec *sec = dev_get_drvdata(dev); \
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
\
return show_canceled_csk(dev, \
csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
buf); \
} \
static DEVICE_ATTR_RO(_name##_canceled_csks)
#define CSK_VEC_OFFSET 0x34
DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
DEVICE_ATTR_SEC_CSK_RO(bmc);
DEVICE_ATTR_SEC_CSK_RO(sr);
DEVICE_ATTR_SEC_CSK_RO(pr);
#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
......@@ -141,31 +206,21 @@ static ssize_t flash_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct m10bmc_sec *sec = dev_get_drvdata(dev);
unsigned int stride, num_bits;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned int num_bits;
u8 *flash_buf;
int cnt, ret;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
if (FLASH_COUNT_SIZE % stride) {
dev_err(sec->dev,
"FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
FLASH_COUNT_SIZE, stride);
WARN_ON_ONCE(1);
return -EINVAL;
}
flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
if (!flash_buf)
return -ENOMEM;
ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
flash_buf, FLASH_COUNT_SIZE / stride);
ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
FLASH_COUNT_SIZE);
if (ret) {
dev_err(sec->dev,
"failed to read flash count: %x cnt %x: %d\n",
STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
dev_err(sec->dev, "failed to read flash count\n");
goto exit_free;
}
cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
......@@ -200,25 +255,81 @@ static const struct attribute_group *m10bmc_sec_attr_groups[] = {
static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 auth_result;
dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
}
static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return ret;
return FIELD_GET(DRBL_RSU_STATUS, doorbell);
}
static bool rsu_status_ok(u32 status)
{
return (status == RSU_STAT_NORMAL ||
status == RSU_STAT_NIOS_OK ||
status == RSU_STAT_USER_OK ||
status == RSU_STAT_FACTORY_OK);
}
static bool rsu_progress_done(u32 progress)
{
return (progress == RSU_PROG_IDLE ||
progress == RSU_PROG_RSU_DONE);
}
static bool rsu_progress_busy(u32 progress)
{
return (progress == RSU_PROG_AUTHENTICATING ||
progress == RSU_PROG_COPYING ||
progress == RSU_PROG_UPDATE_CANCEL ||
progress == RSU_PROG_PROGRAM_KEY_HASH);
}
static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
u32 *progress, u32 *status)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
if (ret)
return ret;
ret = sec->ops->rsu_status(sec);
if (ret < 0)
return ret;
*status = ret;
*progress = rsu_prog(*doorbell_reg);
return 0;
}
static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
if (!rsu_progress_done(rsu_prog(doorbell))) {
log_error_regs(sec, doorbell);
return FW_UPLOAD_ERR_BUSY;
}
......@@ -226,19 +337,15 @@ static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_NONE;
}
static inline bool rsu_start_done(u32 doorbell)
static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
{
u32 status, progress;
if (doorbell & DRBL_RSU_REQUEST)
if (doorbell_reg & DRBL_RSU_REQUEST)
return false;
status = rsu_stat(doorbell);
if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
return true;
progress = rsu_prog(doorbell);
if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
if (!rsu_progress_done(progress))
return true;
return false;
......@@ -246,11 +353,12 @@ static inline bool rsu_start_done(u32 doorbell)
static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
{
u32 doorbell, status;
int ret;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell_reg, progress, status;
int ret, err;
ret = regmap_update_bits(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
csr_map->base + csr_map->doorbell,
DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
DRBL_RSU_REQUEST |
FIELD_PREP(DRBL_HOST_STATUS,
......@@ -258,26 +366,25 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
doorbell,
rsu_start_done(doorbell),
ret = read_poll_timeout(m10bmc_sec_progress_status, err,
err < 0 || rsu_start_done(doorbell_reg, progress, status),
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
NIOS_HANDSHAKE_TIMEOUT_US,
false,
sec, &doorbell_reg, &progress, &status);
if (ret == -ETIMEDOUT) {
log_error_regs(sec, doorbell);
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
} else if (err) {
return FW_UPLOAD_ERR_RW_ERROR;
}
status = rsu_stat(doorbell);
if (status == RSU_STAT_WEAROUT) {
dev_warn(sec->dev, "Excessive flash update count detected\n");
return FW_UPLOAD_ERR_WEAROUT;
} else if (status == RSU_STAT_ERASE_FAIL) {
log_error_regs(sec, doorbell);
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
......@@ -286,11 +393,12 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned long poll_timeout;
u32 doorbell, progress;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
......@@ -300,7 +408,7 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
if (time_after(jiffies, poll_timeout))
break;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
......@@ -319,11 +427,12 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
{
u32 doorbell;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell_reg, status;
int ret;
ret = regmap_update_bits(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_WRITE_DONE));
......@@ -331,68 +440,58 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
doorbell,
rsu_prog(doorbell) != RSU_PROG_READY,
csr_map->base + csr_map->doorbell,
doorbell_reg,
rsu_prog(doorbell_reg) != RSU_PROG_READY,
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
log_error_regs(sec, doorbell);
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
}
switch (rsu_stat(doorbell)) {
case RSU_STAT_NORMAL:
case RSU_STAT_NIOS_OK:
case RSU_STAT_USER_OK:
case RSU_STAT_FACTORY_OK:
break;
default:
log_error_regs(sec, doorbell);
ret = sec->ops->rsu_status(sec);
if (ret < 0)
return ret;
status = ret;
if (!rsu_status_ok(status)) {
log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
return FW_UPLOAD_ERR_NONE;
}
static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
{
if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
u32 progress, status;
if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
return -EIO;
switch (rsu_stat(*doorbell)) {
case RSU_STAT_NORMAL:
case RSU_STAT_NIOS_OK:
case RSU_STAT_USER_OK:
case RSU_STAT_FACTORY_OK:
break;
default:
if (!rsu_status_ok(status))
return -EINVAL;
}
switch (rsu_prog(*doorbell)) {
case RSU_PROG_IDLE:
case RSU_PROG_RSU_DONE:
if (rsu_progress_done(progress))
return 0;
case RSU_PROG_AUTHENTICATING:
case RSU_PROG_COPYING:
case RSU_PROG_UPDATE_CANCEL:
case RSU_PROG_PROGRAM_KEY_HASH:
if (rsu_progress_busy(progress))
return -EAGAIN;
default:
return -EINVAL;
}
}
static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
{
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
......@@ -400,7 +499,7 @@ static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_BUSY;
ret = regmap_update_bits(sec->m10bmc->regmap,
M10BMC_SYS_BASE + M10BMC_DOORBELL,
csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_ABORT_RSU));
......@@ -441,19 +540,19 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
u32 offset, u32 size, u32 *written)
{
struct m10bmc_sec *sec = fwl->dd_handle;
u32 blk_size, doorbell, extra_offset;
unsigned int stride, extra = 0;
const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
struct intel_m10bmc *m10bmc = sec->m10bmc;
u32 blk_size, doorbell;
int ret;
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (sec->cancel_request)
return rsu_cancel(sec);
ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
} else if (rsu_prog(doorbell) != RSU_PROG_READY) {
......@@ -461,27 +560,11 @@ static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data
return FW_UPLOAD_ERR_HW_ERROR;
}
WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
ret = regmap_bulk_write(sec->m10bmc->regmap,
M10BMC_STAGING_BASE + offset,
(void *)data + offset,
blk_size / stride);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
/*
* If blk_size is not aligned to stride, then handle the extra
* bytes with regmap_write.
*/
if (blk_size % stride) {
extra_offset = offset + ALIGN_DOWN(blk_size, stride);
memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
ret = regmap_write(sec->m10bmc->regmap,
M10BMC_STAGING_BASE + extra_offset, extra);
ret = m10bmc_sec_write(sec, data, offset, blk_size);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
*written = blk_size;
return FW_UPLOAD_ERR_NONE;
......@@ -543,12 +626,16 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl)
static const struct fw_upload_ops m10bmc_ops = {
.prepare = m10bmc_sec_prepare,
.write = m10bmc_sec_write,
.write = m10bmc_sec_fw_write,
.poll_complete = m10bmc_sec_poll_complete,
.cancel = m10bmc_sec_cancel,
.cleanup = m10bmc_sec_cleanup,
};
static const struct m10bmc_sec_ops m10sec_n3000_ops = {
.rsu_status = m10bmc_sec_n3000_rsu_status,
};
#define SEC_UPDATE_LEN_MAX 32
static int m10bmc_sec_probe(struct platform_device *pdev)
{
......@@ -564,6 +651,7 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
sec->dev = &pdev->dev;
sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
dev_set_drvdata(&pdev->dev, sec);
ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
......@@ -604,9 +692,11 @@ static int m10bmc_sec_remove(struct platform_device *pdev)
static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
.driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{
.name = "d5005bmc-sec-update",
.driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{ }
};
......
......@@ -6,6 +6,7 @@
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
......@@ -33,7 +34,7 @@
#define MPF_BITS_PER_COMPONENT_SIZE 22
#define MPF_STATUS_POLL_RETRIES 10000
#define MPF_STATUS_POLL_TIMEOUT (2 * USEC_PER_SEC)
#define MPF_STATUS_BUSY BIT(0)
#define MPF_STATUS_READY BIT(1)
#define MPF_STATUS_SPI_VIOLATION BIT(2)
......@@ -42,46 +43,55 @@
struct mpf_priv {
struct spi_device *spi;
bool program_mode;
u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
u8 rx;
};
static int mpf_read_status(struct spi_device *spi)
static int mpf_read_status(struct mpf_priv *priv)
{
u8 status = 0, status_command = MPF_SPI_READ_STATUS;
struct spi_transfer xfers[2] = { 0 };
int ret;
/*
* HW status is returned on MISO in the first byte after CS went
* active. However, first reading can be inadequate, so we submit
* two identical SPI transfers and use result of the later one.
*/
xfers[0].tx_buf = &status_command;
xfers[1].tx_buf = &status_command;
xfers[0].rx_buf = &status;
xfers[1].rx_buf = &status;
xfers[0].len = 1;
xfers[1].len = 1;
xfers[0].cs_change = 1;
struct spi_transfer xfers[2] = {
{
.tx_buf = &priv->tx,
.rx_buf = &priv->rx,
.len = 1,
.cs_change = 1,
}, {
.tx_buf = &priv->tx,
.rx_buf = &priv->rx,
.len = 1,
},
};
u8 status;
int ret;
priv->tx = MPF_SPI_READ_STATUS;
ret = spi_sync_transfer(spi, xfers, 2);
ret = spi_sync_transfer(priv->spi, xfers, 2);
if (ret)
return ret;
status = priv->rx;
if ((status & MPF_STATUS_SPI_VIOLATION) ||
(status & MPF_STATUS_SPI_ERROR))
ret = -EIO;
return -EIO;
return ret ? : status;
return status;
}
static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
{
struct mpf_priv *priv = mgr->priv;
struct spi_device *spi;
bool program_mode;
int status;
spi = priv->spi;
program_mode = priv->program_mode;
status = mpf_read_status(spi);
status = mpf_read_status(priv);
if (!program_mode && !status)
return FPGA_MGR_STATE_OPERATING;
......@@ -185,52 +195,53 @@ static int mpf_ops_parse_header(struct fpga_manager *mgr,
return 0;
}
/* Poll HW status until busy bit is cleared and mask bits are set. */
static int mpf_poll_status(struct spi_device *spi, u8 mask)
static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
{
int status, retries = MPF_STATUS_POLL_RETRIES;
int ret, status;
while (retries--) {
status = mpf_read_status(spi);
if (status < 0)
return status;
if (status & MPF_STATUS_BUSY)
continue;
/*
* Busy poll HW status. Polling stops if any of the following
* conditions are met:
* - timeout is reached
* - mpf_read_status() returns an error
* - busy bit is cleared AND mask bits are set
*/
ret = read_poll_timeout(mpf_read_status, status,
(status < 0) ||
((status & (MPF_STATUS_BUSY | mask)) == mask),
0, MPF_STATUS_POLL_TIMEOUT, false, priv);
if (ret < 0)
return ret;
if (!mask || (status & mask))
return status;
}
return -EBUSY;
}
static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
{
int status = mpf_poll_status(spi, 0);
int status = mpf_poll_status(priv, 0);
if (status < 0)
return status;
return spi_write(spi, buf, buf_size);
return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
}
static int mpf_spi_write_then_read(struct spi_device *spi,
static int mpf_spi_write_then_read(struct mpf_priv *priv,
const void *txbuf, size_t txbuf_size,
void *rxbuf, size_t rxbuf_size)
{
const u8 read_command[] = { MPF_SPI_READ_DATA };
int ret;
ret = mpf_spi_write(spi, txbuf, txbuf_size);
ret = mpf_spi_write(priv, txbuf, txbuf_size);
if (ret)
return ret;
ret = mpf_poll_status(spi, MPF_STATUS_READY);
ret = mpf_poll_status(priv, MPF_STATUS_READY);
if (ret < 0)
return ret;
return spi_write_then_read(spi, read_command, sizeof(read_command),
return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
rxbuf, rxbuf_size);
}
......@@ -242,7 +253,6 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
struct spi_device *spi;
u32 isc_ret = 0;
int ret;
......@@ -251,9 +261,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EOPNOTSUPP;
}
spi = priv->spi;
ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
&isc_ret, sizeof(isc_ret));
if (ret || isc_ret) {
dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
......@@ -261,7 +269,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EFAULT;
}
ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
if (ret) {
dev_err(dev, "Failed to enter program mode: %d\n", ret);
return ret;
......@@ -272,13 +280,32 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return 0;
}
static int mpf_spi_frame_write(struct mpf_priv *priv, const char *buf)
{
struct spi_transfer xfers[2] = {
{
.tx_buf = &priv->tx,
.len = 1,
}, {
.tx_buf = buf,
.len = MPF_SPI_FRAME_SIZE,
},
};
int ret;
ret = mpf_poll_status(priv, 0);
if (ret < 0)
return ret;
priv->tx = MPF_SPI_FRAME;
return spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
}
static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
{
u8 spi_frame_command[] = { MPF_SPI_FRAME };
struct spi_transfer xfers[2] = { 0 };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
struct spi_device *spi;
int ret, i;
if (count % MPF_SPI_FRAME_SIZE) {
......@@ -287,19 +314,8 @@ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count
return -EINVAL;
}
spi = priv->spi;
xfers[0].tx_buf = spi_frame_command;
xfers[0].len = sizeof(spi_frame_command);
for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
xfers[1].len = MPF_SPI_FRAME_SIZE;
ret = mpf_poll_status(spi, 0);
if (ret >= 0)
ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
ret = mpf_spi_frame_write(priv, buf + i * MPF_SPI_FRAME_SIZE);
if (ret) {
dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
i, count / MPF_SPI_FRAME_SIZE);
......@@ -317,12 +333,9 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
const u8 release_command[] = { MPF_SPI_RELEASE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
struct spi_device *spi;
int ret;
spi = priv->spi;
ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
if (ret) {
dev_err(dev, "Failed to disable ISC: %d\n", ret);
return ret;
......@@ -330,7 +343,7 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
usleep_range(1000, 2000);
ret = mpf_spi_write(spi, release_command, sizeof(release_command));
ret = mpf_spi_write(priv, release_command, sizeof(release_command));
if (ret) {
dev_err(dev, "Failed to exit program mode: %d\n", ret);
return ret;
......
......@@ -2341,7 +2341,7 @@ config SENSORS_XGENE
config SENSORS_INTEL_M10_BMC_HWMON
tristate "Intel MAX10 BMC Hardware Monitoring"
depends on MFD_INTEL_M10_BMC
depends on MFD_INTEL_M10_BMC_CORE
help
This driver provides support for the hardware monitoring functionality
on Intel MAX10 BMC chip.
......
......@@ -2224,11 +2224,17 @@ config SGI_MFD_IOC3
If you have an SGI Origin, Octane, or a PCI IOC3 card,
then say Y. Otherwise say N.
config MFD_INTEL_M10_BMC
tristate "Intel MAX 10 Board Management Controller"
config MFD_INTEL_M10_BMC_CORE
tristate
select MFD_CORE
select REGMAP
default n
config MFD_INTEL_M10_BMC_SPI
tristate "Intel MAX 10 Board Management Controller with SPI"
depends on SPI_MASTER
select MFD_INTEL_M10_BMC_CORE
select REGMAP_SPI_AVMM
select MFD_CORE
help
Support for the Intel MAX 10 board management controller using the
SPI interface.
......@@ -2237,6 +2243,18 @@ config MFD_INTEL_M10_BMC
additional drivers must be enabled in order to use the functionality
of the device.
config MFD_INTEL_M10_BMC_PMCI
tristate "Intel MAX 10 Board Management Controller with PMCI"
depends on FPGA_DFL
select MFD_INTEL_M10_BMC_CORE
select REGMAP
help
Support for the Intel MAX 10 board management controller via PMCI.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
of the device.
config MFD_RSMU_I2C
tristate "Renesas Synchronization Management Unit with I2C"
depends on I2C && OF
......
......@@ -269,7 +269,10 @@ obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
obj-$(CONFIG_MFD_SMPRO) += smpro-core.o
obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_CORE) += intel-m10-bmc-core.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_SPI) += intel-m10-bmc-spi.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
......
// SPDX-License-Identifier: GPL-2.0
/*
* Intel MAX 10 Board Management Controller chip - common code
*
* Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/dev_printk.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
static ssize_t bmc_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->build_version, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmc_version);
static ssize_t bmcfw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->fw_version, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmcfw_version);
static ssize_t mac_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int macaddr_low, macaddr_high;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_low, &macaddr_low);
if (ret)
return ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_high, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE1, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE2, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE3, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE4, macaddr_low),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE5, macaddr_high),
(u8)FIELD_GET(M10BMC_N3000_MAC_BYTE6, macaddr_high));
}
static DEVICE_ATTR_RO(mac_address);
static ssize_t mac_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int macaddr_high;
int ret;
ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_high, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%u\n", (u8)FIELD_GET(M10BMC_N3000_MAC_COUNT, macaddr_high));
}
static DEVICE_ATTR_RO(mac_count);
static struct attribute *m10bmc_attrs[] = {
&dev_attr_bmc_version.attr,
&dev_attr_bmcfw_version.attr,
&dev_attr_mac_address.attr,
&dev_attr_mac_count.attr,
NULL,
};
static const struct attribute_group m10bmc_group = {
.attrs = m10bmc_attrs,
};
const struct attribute_group *m10bmc_dev_groups[] = {
&m10bmc_group,
NULL,
};
EXPORT_SYMBOL_GPL(m10bmc_dev_groups);
int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info)
{
int ret;
m10bmc->info = info;
dev_set_drvdata(m10bmc->dev, m10bmc);
ret = devm_mfd_add_devices(m10bmc->dev, PLATFORM_DEVID_AUTO,
info->cells, info->n_cells,
NULL, 0, NULL);
if (ret)
dev_err(m10bmc->dev, "Failed to register sub-devices: %d\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(m10bmc_dev_init);
MODULE_DESCRIPTION("Intel MAX 10 BMC core driver");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0
/*
* MAX10 BMC Platform Management Component Interface (PMCI) based
* interface.
*
* Copyright (C) 2020-2023 Intel Corporation.
*/
#include <linux/device.h>
#include <linux/dfl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
#include <linux/regmap.h>
struct m10bmc_pmci_device {
void __iomem *base;
struct intel_m10bmc m10bmc;
};
/*
* Intel FGPA indirect register access via hardware controller/bridge.
*/
#define INDIRECT_CMD_OFF 0
#define INDIRECT_CMD_CLR 0
#define INDIRECT_CMD_RD BIT(0)
#define INDIRECT_CMD_WR BIT(1)
#define INDIRECT_CMD_ACK BIT(2)
#define INDIRECT_ADDR_OFF 0x4
#define INDIRECT_RD_OFF 0x8
#define INDIRECT_WR_OFF 0xc
#define INDIRECT_INT_US 1
#define INDIRECT_TIMEOUT_US 10000
struct indirect_ctx {
void __iomem *base;
struct device *dev;
};
static int indirect_clear_cmd(struct indirect_ctx *ctx)
{
unsigned int cmd;
int ret;
writel(INDIRECT_CMD_CLR, ctx->base + INDIRECT_CMD_OFF);
ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, cmd,
cmd == INDIRECT_CMD_CLR,
INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
if (ret)
dev_err(ctx->dev, "timed out waiting clear cmd (residual cmd=0x%x)\n", cmd);
return ret;
}
static int indirect_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct indirect_ctx *ctx = context;
unsigned int cmd, ack, tmpval;
int ret, ret2;
cmd = readl(ctx->base + INDIRECT_CMD_OFF);
if (cmd != INDIRECT_CMD_CLR)
dev_warn(ctx->dev, "residual cmd 0x%x on read entry\n", cmd);
writel(reg, ctx->base + INDIRECT_ADDR_OFF);
writel(INDIRECT_CMD_RD, ctx->base + INDIRECT_CMD_OFF);
ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
(ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
if (ret)
dev_err(ctx->dev, "read timed out on reg 0x%x ack 0x%x\n", reg, ack);
else
tmpval = readl(ctx->base + INDIRECT_RD_OFF);
ret2 = indirect_clear_cmd(ctx);
if (ret)
return ret;
if (ret2)
return ret2;
*val = tmpval;
return 0;
}
static int indirect_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct indirect_ctx *ctx = context;
unsigned int cmd, ack;
int ret, ret2;
cmd = readl(ctx->base + INDIRECT_CMD_OFF);
if (cmd != INDIRECT_CMD_CLR)
dev_warn(ctx->dev, "residual cmd 0x%x on write entry\n", cmd);
writel(val, ctx->base + INDIRECT_WR_OFF);
writel(reg, ctx->base + INDIRECT_ADDR_OFF);
writel(INDIRECT_CMD_WR, ctx->base + INDIRECT_CMD_OFF);
ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
(ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
if (ret)
dev_err(ctx->dev, "write timed out on reg 0x%x ack 0x%x\n", reg, ack);
ret2 = indirect_clear_cmd(ctx);
if (ret)
return ret;
return ret2;
}
static const struct regmap_range m10bmc_pmci_regmap_range[] = {
regmap_reg_range(M10BMC_N6000_SYS_BASE, M10BMC_N6000_SYS_END),
};
static const struct regmap_access_table m10bmc_pmci_access_table = {
.yes_ranges = m10bmc_pmci_regmap_range,
.n_yes_ranges = ARRAY_SIZE(m10bmc_pmci_regmap_range),
};
static struct regmap_config m10bmc_pmci_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.wr_table = &m10bmc_pmci_access_table,
.rd_table = &m10bmc_pmci_access_table,
.reg_read = &indirect_reg_read,
.reg_write = &indirect_reg_write,
.max_register = M10BMC_N6000_SYS_END,
};
static struct mfd_cell m10bmc_pmci_n6000_bmc_subdevs[] = {
{ .name = "n6000bmc-hwmon" },
};
static const struct m10bmc_csr_map m10bmc_n6000_csr_map = {
.base = M10BMC_N6000_SYS_BASE,
.build_version = M10BMC_N6000_BUILD_VER,
.fw_version = NIOS2_N6000_FW_VERSION,
.mac_low = M10BMC_N6000_MAC_LOW,
.mac_high = M10BMC_N6000_MAC_HIGH,
.doorbell = M10BMC_N6000_DOORBELL,
.auth_result = M10BMC_N6000_AUTH_RESULT,
.bmc_prog_addr = M10BMC_N6000_BMC_PROG_ADDR,
.bmc_reh_addr = M10BMC_N6000_BMC_REH_ADDR,
.bmc_magic = M10BMC_N6000_BMC_PROG_MAGIC,
.sr_prog_addr = M10BMC_N6000_SR_PROG_ADDR,
.sr_reh_addr = M10BMC_N6000_SR_REH_ADDR,
.sr_magic = M10BMC_N6000_SR_PROG_MAGIC,
.pr_prog_addr = M10BMC_N6000_PR_PROG_ADDR,
.pr_reh_addr = M10BMC_N6000_PR_REH_ADDR,
.pr_magic = M10BMC_N6000_PR_PROG_MAGIC,
.rsu_update_counter = M10BMC_N6000_STAGING_FLASH_COUNT,
};
static const struct intel_m10bmc_platform_info m10bmc_pmci_n6000 = {
.cells = m10bmc_pmci_n6000_bmc_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_pmci_n6000_bmc_subdevs),
.csr_map = &m10bmc_n6000_csr_map,
};
static int m10bmc_pmci_probe(struct dfl_device *ddev)
{
struct device *dev = &ddev->dev;
struct m10bmc_pmci_device *pmci;
struct indirect_ctx *ctx;
pmci = devm_kzalloc(dev, sizeof(*pmci), GFP_KERNEL);
if (!pmci)
return -ENOMEM;
pmci->m10bmc.dev = dev;
pmci->base = devm_ioremap_resource(dev, &ddev->mmio_res);
if (IS_ERR(pmci->base))
return PTR_ERR(pmci->base);
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->base = pmci->base + M10BMC_N6000_INDIRECT_BASE;
ctx->dev = dev;
indirect_clear_cmd(ctx);
pmci->m10bmc.regmap = devm_regmap_init(dev, NULL, ctx, &m10bmc_pmci_regmap_config);
if (IS_ERR(pmci->m10bmc.regmap))
return PTR_ERR(pmci->m10bmc.regmap);
return m10bmc_dev_init(&pmci->m10bmc, &m10bmc_pmci_n6000);
}
#define FME_FEATURE_ID_M10BMC_PMCI 0x12
static const struct dfl_device_id m10bmc_pmci_ids[] = {
{ FME_ID, FME_FEATURE_ID_M10BMC_PMCI },
{ }
};
MODULE_DEVICE_TABLE(dfl, m10bmc_pmci_ids);
static struct dfl_driver m10bmc_pmci_driver = {
.drv = {
.name = "intel-m10-bmc",
.dev_groups = m10bmc_dev_groups,
},
.id_table = m10bmc_pmci_ids,
.probe = m10bmc_pmci_probe,
};
module_dfl_driver(m10bmc_pmci_driver);
MODULE_DESCRIPTION("MAX10 BMC PMCI-based interface");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
......@@ -5,39 +5,18 @@
* Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/dev_printk.h>
#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel-m10-bmc.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
enum m10bmc_type {
M10_N3000,
M10_D5005,
M10_N5010,
};
static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-hwmon" },
{ .name = "d5005bmc-sec-update" }
};
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-hwmon" },
{ .name = "n3000bmc-retimer" },
{ .name = "n3000bmc-sec-update" },
};
static struct mfd_cell m10bmc_n5010_subdevs[] = {
{ .name = "n5010bmc-hwmon" },
};
static const struct regmap_range m10bmc_regmap_range[] = {
regmap_reg_range(M10BMC_LEGACY_BUILD_VER, M10BMC_LEGACY_BUILD_VER),
regmap_reg_range(M10BMC_SYS_BASE, M10BMC_SYS_END),
regmap_reg_range(M10BMC_FLASH_BASE, M10BMC_FLASH_END),
regmap_reg_range(M10BMC_N3000_LEGACY_BUILD_VER, M10BMC_N3000_LEGACY_BUILD_VER),
regmap_reg_range(M10BMC_N3000_SYS_BASE, M10BMC_N3000_SYS_END),
regmap_reg_range(M10BMC_N3000_FLASH_BASE, M10BMC_N3000_FLASH_END),
};
static const struct regmap_access_table m10bmc_access_table = {
......@@ -51,89 +30,9 @@ static struct regmap_config intel_m10bmc_regmap_config = {
.reg_stride = 4,
.wr_table = &m10bmc_access_table,
.rd_table = &m10bmc_access_table,
.max_register = M10BMC_MEM_END,
.max_register = M10BMC_N3000_MEM_END,
};
static ssize_t bmc_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, M10BMC_BUILD_VER, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmc_version);
static ssize_t bmcfw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *ddata = dev_get_drvdata(dev);
unsigned int val;
int ret;
ret = m10bmc_sys_read(ddata, NIOS2_FW_VERSION, &val);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", val);
}
static DEVICE_ATTR_RO(bmcfw_version);
static ssize_t mac_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *max10 = dev_get_drvdata(dev);
unsigned int macaddr_low, macaddr_high;
int ret;
ret = m10bmc_sys_read(max10, M10BMC_MAC_LOW, &macaddr_low);
if (ret)
return ret;
ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
(u8)FIELD_GET(M10BMC_MAC_BYTE1, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE2, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE3, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE4, macaddr_low),
(u8)FIELD_GET(M10BMC_MAC_BYTE5, macaddr_high),
(u8)FIELD_GET(M10BMC_MAC_BYTE6, macaddr_high));
}
static DEVICE_ATTR_RO(mac_address);
static ssize_t mac_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_m10bmc *max10 = dev_get_drvdata(dev);
unsigned int macaddr_high;
int ret;
ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
if (ret)
return ret;
return sysfs_emit(buf, "%u\n",
(u8)FIELD_GET(M10BMC_MAC_COUNT, macaddr_high));
}
static DEVICE_ATTR_RO(mac_count);
static struct attribute *m10bmc_attrs[] = {
&dev_attr_bmc_version.attr,
&dev_attr_bmcfw_version.attr,
&dev_attr_mac_address.attr,
&dev_attr_mac_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(m10bmc);
static int check_m10bmc_version(struct intel_m10bmc *ddata)
{
unsigned int v;
......@@ -142,16 +41,16 @@ static int check_m10bmc_version(struct intel_m10bmc *ddata)
/*
* This check is to filter out the very old legacy BMC versions. In the
* old BMC chips, the BMC version info is stored in the old version
* register (M10BMC_LEGACY_BUILD_VER), so its read out value would have
* not been M10BMC_VER_LEGACY_INVALID (0xffffffff). But in new BMC
* register (M10BMC_N3000_LEGACY_BUILD_VER), so its read out value would have
* not been M10BMC_N3000_VER_LEGACY_INVALID (0xffffffff). But in new BMC
* chips that the driver supports, the value of this register should be
* M10BMC_VER_LEGACY_INVALID.
* M10BMC_N3000_VER_LEGACY_INVALID.
*/
ret = m10bmc_raw_read(ddata, M10BMC_LEGACY_BUILD_VER, &v);
ret = m10bmc_raw_read(ddata, M10BMC_N3000_LEGACY_BUILD_VER, &v);
if (ret)
return -ENODEV;
if (v != M10BMC_VER_LEGACY_INVALID) {
if (v != M10BMC_N3000_VER_LEGACY_INVALID) {
dev_err(ddata->dev, "bad version M10BMC detected\n");
return -ENODEV;
}
......@@ -162,19 +61,19 @@ static int check_m10bmc_version(struct intel_m10bmc *ddata)
static int intel_m10_bmc_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct intel_m10bmc_platform_info *info;
struct device *dev = &spi->dev;
struct mfd_cell *cells;
struct intel_m10bmc *ddata;
int ret, n_cell;
int ret;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
info = (struct intel_m10bmc_platform_info *)id->driver_data;
ddata->dev = dev;
ddata->regmap =
devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
ddata->regmap = devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
if (IS_ERR(ddata->regmap)) {
ret = PTR_ERR(ddata->regmap);
dev_err(dev, "Failed to allocate regmap: %d\n", ret);
......@@ -189,35 +88,66 @@ static int intel_m10_bmc_spi_probe(struct spi_device *spi)
return ret;
}
switch (id->driver_data) {
case M10_N3000:
cells = m10bmc_pacn3000_subdevs;
n_cell = ARRAY_SIZE(m10bmc_pacn3000_subdevs);
break;
case M10_D5005:
cells = m10bmc_d5005_subdevs;
n_cell = ARRAY_SIZE(m10bmc_d5005_subdevs);
break;
case M10_N5010:
cells = m10bmc_n5010_subdevs;
n_cell = ARRAY_SIZE(m10bmc_n5010_subdevs);
break;
default:
return -ENODEV;
}
return m10bmc_dev_init(ddata, info);
}
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, n_cell,
NULL, 0, NULL);
if (ret)
dev_err(dev, "Failed to register sub-devices: %d\n", ret);
static const struct m10bmc_csr_map m10bmc_n3000_csr_map = {
.base = M10BMC_N3000_SYS_BASE,
.build_version = M10BMC_N3000_BUILD_VER,
.fw_version = NIOS2_N3000_FW_VERSION,
.mac_low = M10BMC_N3000_MAC_LOW,
.mac_high = M10BMC_N3000_MAC_HIGH,
.doorbell = M10BMC_N3000_DOORBELL,
.auth_result = M10BMC_N3000_AUTH_RESULT,
.bmc_prog_addr = M10BMC_N3000_BMC_PROG_ADDR,
.bmc_reh_addr = M10BMC_N3000_BMC_REH_ADDR,
.bmc_magic = M10BMC_N3000_BMC_PROG_MAGIC,
.sr_prog_addr = M10BMC_N3000_SR_PROG_ADDR,
.sr_reh_addr = M10BMC_N3000_SR_REH_ADDR,
.sr_magic = M10BMC_N3000_SR_PROG_MAGIC,
.pr_prog_addr = M10BMC_N3000_PR_PROG_ADDR,
.pr_reh_addr = M10BMC_N3000_PR_REH_ADDR,
.pr_magic = M10BMC_N3000_PR_PROG_MAGIC,
.rsu_update_counter = M10BMC_N3000_STAGING_FLASH_COUNT,
};
return ret;
}
static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-hwmon" },
{ .name = "d5005bmc-sec-update" },
};
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
{ .name = "n3000bmc-hwmon" },
{ .name = "n3000bmc-retimer" },
{ .name = "n3000bmc-sec-update" },
};
static struct mfd_cell m10bmc_n5010_subdevs[] = {
{ .name = "n5010bmc-hwmon" },
};
static const struct intel_m10bmc_platform_info m10bmc_spi_n3000 = {
.cells = m10bmc_pacn3000_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_pacn3000_subdevs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct intel_m10bmc_platform_info m10bmc_spi_d5005 = {
.cells = m10bmc_d5005_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_d5005_subdevs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct intel_m10bmc_platform_info m10bmc_spi_n5010 = {
.cells = m10bmc_n5010_subdevs,
.n_cells = ARRAY_SIZE(m10bmc_n5010_subdevs),
.csr_map = &m10bmc_n3000_csr_map,
};
static const struct spi_device_id m10bmc_spi_id[] = {
{ "m10-n3000", M10_N3000 },
{ "m10-d5005", M10_D5005 },
{ "m10-n5010", M10_N5010 },
{ "m10-n3000", (kernel_ulong_t)&m10bmc_spi_n3000 },
{ "m10-d5005", (kernel_ulong_t)&m10bmc_spi_d5005 },
{ "m10-n5010", (kernel_ulong_t)&m10bmc_spi_n5010 },
{ }
};
MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
......@@ -225,14 +155,14 @@ MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
static struct spi_driver intel_m10bmc_spi_driver = {
.driver = {
.name = "intel-m10-bmc",
.dev_groups = m10bmc_groups,
.dev_groups = m10bmc_dev_groups,
},
.probe = intel_m10_bmc_spi_probe,
.id_table = m10bmc_spi_id,
};
module_spi_driver(intel_m10bmc_spi_driver);
MODULE_DESCRIPTION("Intel MAX 10 BMC Device Driver");
MODULE_DESCRIPTION("Intel MAX 10 BMC SPI bus interface");
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:intel-m10-bmc");
......@@ -7,40 +7,43 @@
#ifndef __MFD_INTEL_M10_BMC_H
#define __MFD_INTEL_M10_BMC_H
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/dev_printk.h>
#include <linux/regmap.h>
#define M10BMC_LEGACY_BUILD_VER 0x300468
#define M10BMC_SYS_BASE 0x300800
#define M10BMC_SYS_END 0x300fff
#define M10BMC_FLASH_BASE 0x10000000
#define M10BMC_FLASH_END 0x1fffffff
#define M10BMC_MEM_END M10BMC_FLASH_END
#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
#define M10BMC_N3000_SYS_BASE 0x300800
#define M10BMC_N3000_SYS_END 0x300fff
#define M10BMC_N3000_FLASH_BASE 0x10000000
#define M10BMC_N3000_FLASH_END 0x1fffffff
#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
#define M10BMC_STAGING_BASE 0x18000000
#define M10BMC_STAGING_SIZE 0x3800000
/* Register offset of system registers */
#define NIOS2_FW_VERSION 0x0
#define M10BMC_MAC_LOW 0x10
#define M10BMC_MAC_BYTE4 GENMASK(7, 0)
#define M10BMC_MAC_BYTE3 GENMASK(15, 8)
#define M10BMC_MAC_BYTE2 GENMASK(23, 16)
#define M10BMC_MAC_BYTE1 GENMASK(31, 24)
#define M10BMC_MAC_HIGH 0x14
#define M10BMC_MAC_BYTE6 GENMASK(7, 0)
#define M10BMC_MAC_BYTE5 GENMASK(15, 8)
#define M10BMC_MAC_COUNT GENMASK(23, 16)
#define M10BMC_TEST_REG 0x3c
#define M10BMC_BUILD_VER 0x68
#define M10BMC_VER_MAJOR_MSK GENMASK(23, 16)
#define M10BMC_VER_PCB_INFO_MSK GENMASK(31, 24)
#define M10BMC_VER_LEGACY_INVALID 0xffffffff
#define NIOS2_N3000_FW_VERSION 0x0
#define M10BMC_N3000_MAC_LOW 0x10
#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
#define M10BMC_N3000_MAC_HIGH 0x14
#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
#define M10BMC_N3000_TEST_REG 0x3c
#define M10BMC_N3000_BUILD_VER 0x68
#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
/* Secure update doorbell register, in system register region */
#define M10BMC_DOORBELL 0x400
#define M10BMC_N3000_DOORBELL 0x400
/* Authorization Result register, in system register region */
#define M10BMC_AUTH_RESULT 0x404
#define M10BMC_N3000_AUTH_RESULT 0x404
/* Doorbell register fields */
#define DRBL_RSU_REQUEST BIT(0)
......@@ -88,7 +91,6 @@
#define HOST_STATUS_ABORT_RSU 0x2
#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
#define rsu_stat(doorbell) FIELD_GET(DRBL_RSU_STATUS, doorbell)
/* interval 100ms and timeout 5s */
#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
......@@ -103,29 +105,94 @@
#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
/* Addresses for security related data in FLASH */
#define BMC_REH_ADDR 0x17ffc004
#define BMC_PROG_ADDR 0x17ffc000
#define BMC_PROG_MAGIC 0x5746
#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
#define SR_REH_ADDR 0x17ffd004
#define SR_PROG_ADDR 0x17ffd000
#define SR_PROG_MAGIC 0x5253
#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
#define PR_REH_ADDR 0x17ffe004
#define PR_PROG_ADDR 0x17ffe000
#define PR_PROG_MAGIC 0x5250
#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
/* Address of 4KB inverted bit vector containing staging area FLASH count */
#define STAGING_FLASH_COUNT 0x17ffb000
#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
#define M10BMC_N6000_INDIRECT_BASE 0x400
#define M10BMC_N6000_SYS_BASE 0x0
#define M10BMC_N6000_SYS_END 0xfff
#define M10BMC_N6000_DOORBELL 0x1c0
#define M10BMC_N6000_AUTH_RESULT 0x1c4
#define M10BMC_N6000_BUILD_VER 0x0
#define NIOS2_N6000_FW_VERSION 0x4
#define M10BMC_N6000_MAC_LOW 0x20
#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
/* Addresses for security related data in FLASH */
#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
/**
* struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
*/
struct m10bmc_csr_map {
unsigned int base;
unsigned int build_version;
unsigned int fw_version;
unsigned int mac_low;
unsigned int mac_high;
unsigned int doorbell;
unsigned int auth_result;
unsigned int bmc_prog_addr;
unsigned int bmc_reh_addr;
unsigned int bmc_magic;
unsigned int sr_prog_addr;
unsigned int sr_reh_addr;
unsigned int sr_magic;
unsigned int pr_prog_addr;
unsigned int pr_reh_addr;
unsigned int pr_magic;
unsigned int rsu_update_counter;
};
/**
* struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
* @cells: MFD cells
* @n_cells: MFD cells ARRAY_SIZE()
* @csr_map: the mappings for register definition of MAX10 BMC
*/
struct intel_m10bmc_platform_info {
struct mfd_cell *cells;
int n_cells;
const struct m10bmc_csr_map *csr_map;
};
/**
* struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
* @dev: this device
* @regmap: the regmap used to access registers by m10bmc itself
* @info: the platform information for MAX10 BMC
*/
struct intel_m10bmc {
struct device *dev;
struct regmap *regmap;
const struct intel_m10bmc_platform_info *info;
};
/*
......@@ -152,11 +219,22 @@ m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
* The base of the system registers could be configured by HW developers, and
* in HW SPEC, the base is not added to the addresses of the system registers.
*
* This macro helps to simplify the accessing of the system registers. And if
* This function helps to simplify the accessing of the system registers. And if
* the base is reconfigured in HW, SW developers could simply change the
* M10BMC_SYS_BASE accordingly.
* csr_map's base accordingly.
*/
static inline int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset,
unsigned int *val)
{
const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
}
/*
* MAX10 BMC Core support
*/
#define m10bmc_sys_read(m10bmc, offset, val) \
m10bmc_raw_read(m10bmc, M10BMC_SYS_BASE + (offset), val)
int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
extern const struct attribute_group *m10bmc_dev_groups[];
#endif /* __MFD_INTEL_M10_BMC_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册