提交 22cdbd1d 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (108 commits)
  ehea: Fixing statistics
  bonding: Fix lockdep warning after bond_vlan_rx_register()
  tunnels: Fix tunnels change rcu protection
  caif-u5500: Build config for CAIF shared mem driver
  caif-u5500: CAIF shared memory mailbox interface
  caif-u5500: CAIF shared memory transport protocol
  caif-u5500: Adding shared memory include
  drivers/isdn: delete double assignment
  drivers/net/typhoon.c: delete double assignment
  drivers/net/sb1000.c: delete double assignment
  qlcnic: define valid vlan id range
  qlcnic: reduce rx ring size
  qlcnic: fix mac learning
  ehea: fix use after free
  inetpeer: __rcu annotations
  fib_rules: __rcu annotates ctarget
  tunnels: add __rcu annotations
  net: add __rcu annotations to protocol
  ipv4: add __rcu annotations to routes.c
  qlge: bugfix: Restoring the vlan setting.
  ...
......@@ -177,18 +177,6 @@ Doing it all yourself
A convenience function to print out the PHY status neatly.
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
Clear the PHY's interrupt, and configure which ones are allowed,
respectively. Currently only supports all on, or all off.
int phy_enable_interrupts(struct phy_device *phydev);
int phy_disable_interrupts(struct phy_device *phydev);
Functions which enable/disable PHY interrupts, clearing them
before and after, respectively.
int phy_start_interrupts(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
......@@ -213,12 +201,6 @@ Doing it all yourself
Fills the phydev structure with up-to-date information about the current
settings in the PHY.
void phy_sanitize_settings(struct phy_device *phydev)
Resolves differences between currently desired settings, and
supported settings for the given PHY device. Does not make
the changes in the hardware, though.
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
......
......@@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
if (readl(&eprom->magic) != ENI155_MAGIC) {
printk("\n");
printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
"magic - expected 0x%x, got 0x%x\n",dev->number,
ENI155_MAGIC,(unsigned) readl(&eprom->magic));
printk(KERN_ERR DEV_LABEL
"(itf %d): bad magic - expected 0x%x, got 0x%x\n",
dev->number, ENI155_MAGIC,
(unsigned)readl(&eprom->magic));
error = -EINVAL;
goto unmap;
}
......
......@@ -31,48 +31,6 @@
#include <linux/connector.h>
#include <linux/delay.h>
/*
* This job is sent to the kevent workqueue.
* While no event is once sent to any callback, the connector workqueue
* is not created to avoid a useless waiting kernel task.
* Once the first event is received, we create this dedicated workqueue which
* is necessary because the flow of data can be high and we don't want
* to encumber keventd with that.
*/
static void cn_queue_create(struct work_struct *work)
{
struct cn_queue_dev *dev;
dev = container_of(work, struct cn_queue_dev, wq_creation);
dev->cn_queue = create_singlethread_workqueue(dev->name);
/* If we fail, we will use keventd for all following connector jobs */
WARN_ON(!dev->cn_queue);
}
/*
* Queue a data sent to a callback.
* If the connector workqueue is already created, we queue the job on it.
* Otherwise, we queue the job to kevent and queue the connector workqueue
* creation too.
*/
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
{
struct cn_queue_dev *pdev = cbq->pdev;
if (likely(pdev->cn_queue))
return queue_work(pdev->cn_queue, work);
/* Don't create the connector workqueue twice */
if (atomic_inc_return(&pdev->wq_requested) == 1)
schedule_work(&pdev->wq_creation);
else
atomic_dec(&pdev->wq_requested);
return schedule_work(work);
}
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
......@@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
{
/* The first jobs have been sent to kevent, flush them too */
flush_scheduled_work();
if (cbq->pdev->cn_queue)
flush_workqueue(cbq->pdev->cn_queue);
flush_workqueue(cbq->pdev->cn_queue);
kfree(cbq);
}
......@@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
atomic_set(&dev->refcnt, 0);
INIT_LIST_HEAD(&dev->queue_list);
spin_lock_init(&dev->queue_lock);
init_waitqueue_head(&dev->wq_created);
dev->nls = nls;
INIT_WORK(&dev->wq_creation, cn_queue_create);
dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
if (!dev->cn_queue) {
kfree(dev);
return NULL;
}
return dev;
}
......@@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
long timeout;
DEFINE_WAIT(wait);
/* Flush the first pending jobs queued on kevent */
flush_scheduled_work();
/* If the connector workqueue creation is still pending, wait for it */
prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
timeout = schedule_timeout(HZ * 2);
if (!timeout && !dev->cn_queue)
WARN_ON(1);
}
finish_wait(&dev->wq_created, &wait);
if (dev->cn_queue) {
flush_workqueue(dev->cn_queue);
destroy_workqueue(dev->cn_queue);
}
flush_workqueue(dev->cn_queue);
destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
......
......@@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
__cbq->data.skb == NULL)) {
__cbq->data.skb = skb;
if (queue_cn_work(__cbq, &__cbq->work))
if (queue_work(dev->cbdev->cn_queue,
&__cbq->work))
err = 0;
else
err = -EINVAL;
......@@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
d->callback = __cbq->data.callback;
d->free = __new_cbq;
__new_cbq->pdev = __cbq->pdev;
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
if (queue_cn_work(__new_cbq,
&__new_cbq->work))
if (queue_work(dev->cbdev->cn_queue,
&__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);
......
......@@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
mdelay(10);
hw->ipac.isac.adf2 = 0x87;
hw->ipac.hscx[0].slot = 0x1f;
hw->ipac.hscx[0].slot = 0x23;
hw->ipac.hscx[1].slot = 0x23;
break;
case INF_GAZEL_R753:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);
......
......@@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
char tmp[80];
struct sk_buff *skb = arg;
p = skb->data;
/* Channel Identification */
p = skb->data;
if ((p = findie(p, skb->len, WE0_chanID, 0))) {
p = findie(skb->data, skb->len, WE0_chanID, 0);
if (p) {
if (p[1] != 1) {
l3_1tr6_error(pc, "setup wrong chanID len", skb);
return;
......
......@@ -631,8 +631,6 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
extern int atl1c_up(struct atl1c_adapter *adapter);
extern void atl1c_down(struct atl1c_adapter *adapter);
extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
extern void atl1c_set_ethtool_ops(struct net_device *netdev);
......
......@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
static int atl1c_up(struct atl1c_adapter *adapter);
static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
......@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
return err;
}
int atl1c_up(struct atl1c_adapter *adapter)
static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
......@@ -2351,7 +2353,7 @@ int atl1c_up(struct atl1c_adapter *adapter)
return err;
}
void atl1c_down(struct atl1c_adapter *adapter)
static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
......
......@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
static const struct ethtool_ops atl1_ethtool_ops;
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
......@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
......@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
......@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
......@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
return 0;
}
void atl1_set_mac_addr(struct atl1_hw *hw)
static void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
......@@ -3658,7 +3660,7 @@ static int atl1_nway_reset(struct net_device *netdev)
return 0;
}
const struct ethtool_ops atl1_ethtool_ops = {
static const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
......
......@@ -56,16 +56,13 @@ struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
void atl1_set_mac_addr(struct atl1_hw *hw);
static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
static void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
extern const struct ethtool_ops atl1_ethtool_ops;
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */
......
......@@ -41,6 +41,10 @@
#include "atlx.h"
static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
static void atlx_set_mac_addr(struct atl1_hw *hw);
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
......
......@@ -1471,42 +1471,6 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
return status;
}
/* Uses sync mcc */
int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
u8 *connector)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_port_type *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
OPCODE_COMMON_READ_TRANSRECV_DATA);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
req->port = cpu_to_le32(port);
req->page_num = cpu_to_le32(TR_PAGE_A0);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
*connector = resp->data.connector;
}
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
......
......@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
u8 port_num, u32 *state);
extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
u8 *connector);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
......
......@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_mcast_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
{
u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
u8 l4_cksm, ipv6, ipcksm;
l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
if (ip_version) {
tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
}
ipv6_chk = (ip_version && (tcpf || udpf));
ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
/* Ignore ipcksm for ipv6 pkts */
return l4_cksm && (ipcksm || ipv6);
}
static struct be_rx_page_info *
......@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
skb_checksum_none_assert(skb);
else
if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
......@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
return (tcp_frame && !err) ? true : false;
}
int be_poll_rx(struct napi_struct *napi, int budget)
static int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
......@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
int mcc_compl, status = 0;
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
goto reschedule;
}
if (!adapter->stats_ioctl_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
......@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
reschedule:
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
......@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
cancel_delayed_work_sync(&adapter->work);
be_async_mcc_disable(adapter);
netif_stop_queue(netdev);
......@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
......@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
char flash_cookie[2][16] = {"*** SE FLAS",
"H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
int hdr_size)
......@@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
......@@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
cancel_delayed_work_sync(&adapter->work);
unregister_netdev(adapter->netdev);
be_clear(adapter);
......@@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
netif_carrier_off(netdev);
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
......
......@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
......@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
......
......@@ -25,6 +25,7 @@
#include "bnx2x_init.h"
static int bnx2x_setup_irqs(struct bnx2x *bp);
/* free skb in the packet ring at pos idx
* return idx of last bd freed
......@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
}
int bnx2x_setup_irqs(struct bnx2x *bp)
static int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG) {
......
......@@ -116,13 +116,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
*/
void bnx2x_int_enable(struct bnx2x *bp);
/**
* Disable HW interrupts.
*
* @param bp
*/
void bnx2x_int_disable(struct bnx2x *bp);
/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
......@@ -191,17 +184,6 @@ void bnx2x_free_mem(struct bnx2x *bp);
int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
* Bring down an eth client.
*
* @param bp
* @param p
*
* @return int
*/
int bnx2x_stop_fw_client(struct bnx2x *bp,
struct bnx2x_client_ramrod_params *p);
/**
* Set number of queues according to mode
*
......@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
#ifdef BCM_CNIC
/**
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
* MAC(s). The function will wait until the ramrod completion
* returns.
*
* @param bp driver handle
* @param set set or clear the CAM entry
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
#endif
/**
* Initialize status block in FW and HW
*
* @param bp driver handle
* @param dma_addr_t mapping
* @param int sb_id
* @param int vfid
* @param u8 vf_valid
* @param int fw_sb_id
* @param int igu_sb_id
*/
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
/**
* Set MAC filtering configurations.
*
......@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
......@@ -395,14 +348,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
*/
int bnx2x_enable_msi(struct bnx2x *bp);
/**
* Request IRQ vectors from OS.
*
* @param bp
*
* @return int
*/
int bnx2x_setup_irqs(struct bnx2x *bp);
/**
* NAPI callback
*
......
......@@ -16,7 +16,9 @@
#define BNX2X_INIT_OPS_H
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
......@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
return rc;
}
int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
......@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
}
}
void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
......@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
}
}
void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli, u8 initop)
static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli,
u8 initop)
{
int i;
......@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
......@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
......@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
......@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
int port = BP_PORT(bp);
......@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
}
/* called during init common stage */
void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
......@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
****************************************************************************/
/* called during init func stage */
void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
......
......@@ -181,6 +181,12 @@
(_bank + (_addr & 0xf)), \
_val)
static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 *ret_val);
static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 val);
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
......@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
return 0;
}
u8 bnx2x_bmac_enable(struct link_params *params,
static u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
......@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
}
}
/*
*------------------------------------------------------------------------
* bnx2x_override_led_value -
*
* Override the led value of the requested led
*
*------------------------------------------------------------------------
*/
u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
u32 led_idx, u32 value)
{
u32 reg_val;
/* If port 0 then use EMAC0, else use EMAC1*/
u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
DP(NETIF_MSG_LINK,
"bnx2x_override_led_value() port %x led_idx %d value %d\n",
port, led_idx, value);
switch (led_idx) {
case 0: /* 10MB led */
/* Read the current value of the LED register in
the EMAC block */
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 10M_OVERRIDE bit,
otherwise reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
(reg_val & ~EMAC_LED_10MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 1: /*100MB led */
/*Read the current value of the LED register in
the EMAC block */
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 100M_OVERRIDE bit,
otherwise reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
(reg_val & ~EMAC_LED_100MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 2: /* 1000MB led */
/* Read the current value of the LED register in the
EMAC block */
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 1000M_OVERRIDE bit, otherwise
reset it. */
reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
(reg_val & ~EMAC_LED_1000MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 3: /* 2500MB led */
/* Read the current value of the LED register in the
EMAC block*/
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 2500M_OVERRIDE bit, otherwise
reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
(reg_val & ~EMAC_LED_2500MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 4: /*10G led */
if (port == 0) {
REG_WR(bp, NIG_REG_LED_10G_P0,
value);
} else {
REG_WR(bp, NIG_REG_LED_10G_P1,
value);
}
break;
case 5: /* TRAFFIC led */
/* Find if the traffic control is via BMAC or EMAC */
if (port == 0)
reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
else
reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
/* Override the traffic led in the EMAC:*/
if (reg_val == 1) {
/* Read the current value of the LED register in
the EMAC block */
reg_val = REG_RD(bp, emac_base +
EMAC_REG_EMAC_LED);
/* Set the TRAFFIC_OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the TRAFFIC bit, otherwise
reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
(reg_val & ~EMAC_LED_TRAFFIC);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
} else { /* Override the traffic led in the BMAC: */
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
value);
}
break;
default:
DP(NETIF_MSG_LINK,
"bnx2x_override_led_value() unknown led index %d "
"(should be 0-5)\n", led_idx);
return -EINVAL;
}
return 0;
}
u8 bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
......@@ -4099,9 +3989,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
......@@ -6819,13 +6709,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
return 0;
}
u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
{
if (phy_idx < params->num_phys)
return params->phy[phy_idx].supported;
return 0;
}
static void set_phy_vars(struct link_params *params)
{
struct bnx2x *bp = params->bp;
......
......@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 *ret_val);
u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
......@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
......@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
/* Returns the aggregative supported attributes of the phys on board */
u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
......
......@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
/* used only at init
* locking is done by mcp
*/
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
......@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
int msglvl)
{
u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
......@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type)
static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
......@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
/* issue a dmae command over the init-channel and wailt for completion */
int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
......@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
bnx2x_issue_dmae_with_comp(bp, &dmae);
}
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
......@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
void bnx2x_int_disable(struct bnx2x *bp)
static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
......@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
}
/* must be called under rtnl_lock */
void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
u32 mask = (1 << cl_id);
......@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
bp->mac_filters.unmatched_unicast & ~mask;
}
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = {0};
u16 rss_flgs;
......@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
void bnx2x_pf_init(struct bnx2x *bp)
static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct bnx2x_rss_params rss = {0};
......@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
......@@ -6021,6 +6024,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
/*
* Init service functions
*/
static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);
int bnx2x_func_start(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
......@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
WAIT_RAMROD_COMMON);
}
int bnx2x_func_stop(struct bnx2x *bp)
static int bnx2x_func_stop(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
......@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags)
static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
......@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
return -EBUSY;
}
u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
......@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
......@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
ETH_CONNECTION_TYPE);
}
int bnx2x_setup_fw_client(struct bnx2x *bp,
struct bnx2x_client_init_params *params,
u8 activate,
struct client_init_ramrod_data *data,
dma_addr_t data_mapping)
static int bnx2x_setup_fw_client(struct bnx2x *bp,
struct bnx2x_client_init_params *params,
u8 activate,
struct client_init_ramrod_data *data,
dma_addr_t data_mapping)
{
u16 hc_usec;
int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
......@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
static int bnx2x_stop_fw_client(struct bnx2x *bp,
struct bnx2x_client_ramrod_params *p)
{
int rc;
......@@ -7440,7 +7447,7 @@ static void bnx2x_reset_task(struct work_struct *work)
* Init service functions
*/
u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
......
......@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
struct slave *slave;
int i;
write_lock(&bond->lock);
write_lock_bh(&bond->lock);
bond->vlgrp = grp;
write_unlock(&bond->lock);
write_unlock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
struct net_device *slave_dev = slave->dev;
......
......@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
Putting the next command and length in the start of the frame can
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
config CAIF_SHM
tristate "CAIF shared memory protocol driver"
depends on CAIF && U5500_MBOX
default n
---help---
The CAIF shared memory protocol driver for the STE UX5500 platform.
......@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
# SPI slave physical interfaces module
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
# Shared memory
caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
obj-$(CONFIG_CAIF_SHM) += caif_shm.o
/*
* Copyright (C) ST-Ericsson AB 2010
* Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
* Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <mach/mbox.h>
#include <net/caif/caif_shm.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
#define MAX_SHM_INSTANCES 1
enum {
MBX_ACC0,
MBX_ACC1,
MBX_DSP
};
static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
static unsigned int shm_start;
static unsigned int shm_size;
module_param(shm_size, uint , 0440);
MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
module_param(shm_start, uint , 0440);
MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
{
/* Always block until msg is written successfully */
mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
return 0;
}
static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
void *pshm_drv)
{
/*
* For UX5500, we have only 1 SHM instance which uses MBX0
* for communication with the peer modem
*/
pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
if (!pshm_dev->hmbx)
return -ENODEV;
else
return 0;
}
static int __init caif_shmdev_init(void)
{
int i, result;
/* Loop is currently overkill, there is only one instance */
for (i = 0; i < MAX_SHM_INSTANCES; i++) {
shmdev_lyr[i].shm_base_addr = shm_start;
shmdev_lyr[i].shm_total_sz = shm_size;
if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
|| (shmdev_lyr[i].shm_total_sz <= 0)) {
pr_warn("ERROR,"
"Shared memory Address and/or Size incorrect"
", Bailing out ...\n");
result = -EINVAL;
goto clean;
}
pr_info("SHM AREA (instance %d) STARTS"
" AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
shmdev_lyr[i].shm_id = i;
shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
/*
* Finally, CAIF core module is called with details in place:
* 1. SHM base address
* 2. SHM size
* 3. MBX handle
*/
result = caif_shmcore_probe(&shmdev_lyr[i]);
if (result) {
pr_warn("ERROR[%d],"
"Could not probe SHM core (instance %d)"
" Bailing out ...\n", result, i);
goto clean;
}
}
return 0;
clean:
/*
* For now, we assume that even if one instance of SHM fails, we bail
* out of the driver support completely. For this, we need to release
* any memory allocated and unregister any instance of SHM net device.
*/
for (i = 0; i < MAX_SHM_INSTANCES; i++) {
if (shmdev_lyr[i].pshm_netdev)
unregister_netdev(shmdev_lyr[i].pshm_netdev);
}
return result;
}
static void __exit caif_shmdev_exit(void)
{
int i;
for (i = 0; i < MAX_SHM_INSTANCES; i++) {
caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
kfree((void *)shmdev_lyr[i].shm_base_addr);
}
}
module_init(caif_shmdev_init);
module_exit(caif_shmdev_exit);
/*
* Copyright (C) ST-Ericsson AB 2010
* Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
* Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
* Daniel Martensson / daniel.martensson@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <net/caif/caif_device.h>
#include <net/caif/caif_shm.h>
#define NR_TX_BUF 6
#define NR_RX_BUF 6
#define TX_BUF_SZ 0x2000
#define RX_BUF_SZ 0x2000
#define CAIF_NEEDED_HEADROOM 32
#define CAIF_FLOW_ON 1
#define CAIF_FLOW_OFF 0
#define LOW_WATERMARK 3
#define HIGH_WATERMARK 4
/* Maximum number of CAIF buffers per shared memory buffer. */
#define SHM_MAX_FRMS_PER_BUF 10
/*
* Size in bytes of the descriptor area
* (With end of descriptor signalling)
*/
#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
sizeof(struct shm_pck_desc))
/*
* Offset to the first CAIF frame within a shared memory buffer.
* Aligned on 32 bytes.
*/
#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
/* Number of bytes for CAIF shared memory header. */
#define SHM_HDR_LEN 1
/* Number of padding bytes for the complete CAIF frame. */
#define SHM_FRM_PAD_LEN 4
#define CAIF_MAX_MTU 4096
#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
#define SHM_FULL_MASK (0x0F << 0)
#define SHM_EMPTY_MASK (0x0F << 4)
struct shm_pck_desc {
/*
* Offset from start of shared memory area to start of
* shared memory CAIF frame.
*/
u32 frm_ofs;
u32 frm_len;
};
struct buf_list {
unsigned char *desc_vptr;
u32 phy_addr;
u32 index;
u32 len;
u32 frames;
u32 frm_ofs;
struct list_head list;
};
struct shm_caif_frm {
/* Number of bytes of padding before the CAIF frame. */
u8 hdr_ofs;
};
struct shmdrv_layer {
/* caif_dev_common must always be first in the structure*/
struct caif_dev_common cfdev;
u32 shm_tx_addr;
u32 shm_rx_addr;
u32 shm_base_addr;
u32 tx_empty_available;
spinlock_t lock;
struct list_head tx_empty_list;
struct list_head tx_pend_list;
struct list_head tx_full_list;
struct list_head rx_empty_list;
struct list_head rx_pend_list;
struct list_head rx_full_list;
struct workqueue_struct *pshm_tx_workqueue;
struct workqueue_struct *pshm_rx_workqueue;
struct work_struct shm_tx_work;
struct work_struct shm_rx_work;
struct sk_buff_head sk_qhead;
struct shmdev_layer *pshm_dev;
};
static int shm_netdev_open(struct net_device *shm_netdev)
{
netif_wake_queue(shm_netdev);
return 0;
}
static int shm_netdev_close(struct net_device *shm_netdev)
{
netif_stop_queue(shm_netdev);
return 0;
}
int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
{
struct buf_list *pbuf;
struct shmdrv_layer *pshm_drv;
struct list_head *pos;
u32 avail_emptybuff = 0;
unsigned long flags = 0;
pshm_drv = (struct shmdrv_layer *)priv;
/* Check for received buffers. */
if (mbx_msg & SHM_FULL_MASK) {
int idx;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check whether we have any outstanding buffers. */
if (list_empty(&pshm_drv->rx_empty_list)) {
/* Release spin lock. */
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* We print even in IRQ context... */
pr_warn("No empty Rx buffers to fill: "
"mbx_msg:%x\n", mbx_msg);
/* Bail out. */
goto err_sync;
}
pbuf =
list_entry(pshm_drv->rx_empty_list.next,
struct buf_list, list);
idx = pbuf->index;
/* Check buffer synchronization. */
if (idx != SHM_GET_FULL(mbx_msg)) {
/* We print even in IRQ context... */
pr_warn(
"phyif_shm_mbx_msg_cb: RX full out of sync:"
" idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
idx, mbx_msg, SHM_GET_FULL(mbx_msg));
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Bail out. */
goto err_sync;
}
list_del_init(&pbuf->list);
list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule RX work queue. */
if (!work_pending(&pshm_drv->shm_rx_work))
queue_work(pshm_drv->pshm_rx_workqueue,
&pshm_drv->shm_rx_work);
}
/* Check for emptied buffers. */
if (mbx_msg & SHM_EMPTY_MASK) {
int idx;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check whether we have any outstanding buffers. */
if (list_empty(&pshm_drv->tx_full_list)) {
/* We print even in IRQ context... */
pr_warn("No TX to empty: msg:%x\n", mbx_msg);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Bail out. */
goto err_sync;
}
pbuf =
list_entry(pshm_drv->tx_full_list.next,
struct buf_list, list);
idx = pbuf->index;
/* Check buffer synchronization. */
if (idx != SHM_GET_EMPTY(mbx_msg)) {
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* We print even in IRQ context... */
pr_warn("TX empty "
"out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
/* Bail out. */
goto err_sync;
}
list_del_init(&pbuf->list);
/* Reset buffer parameters. */
pbuf->frames = 0;
pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
/* Check whether we have to wake up the transmitter. */
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_ON);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue,
&pshm_drv->shm_tx_work);
} else
spin_unlock_irqrestore(&pshm_drv->lock, flags);
}
return 0;
err_sync:
return -EIO;
}
static void shm_rx_work_func(struct work_struct *rx_work)
{
struct shmdrv_layer *pshm_drv;
struct buf_list *pbuf;
unsigned long flags = 0;
struct sk_buff *skb;
char *p;
int ret;
pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
while (1) {
struct shm_pck_desc *pck_desc;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check for received buffers. */
if (list_empty(&pshm_drv->rx_full_list)) {
spin_unlock_irqrestore(&pshm_drv->lock, flags);
break;
}
pbuf =
list_entry(pshm_drv->rx_full_list.next, struct buf_list,
list);
list_del_init(&pbuf->list);
/* Retrieve pointer to start of the packet descriptor area. */
pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
/*
* Check whether descriptor contains a CAIF shared memory
* frame.
*/
while (pck_desc->frm_ofs) {
unsigned int frm_buf_ofs;
unsigned int frm_pck_ofs;
unsigned int frm_pck_len;
/*
* Check whether offset is within buffer limits
* (lower).
*/
if (pck_desc->frm_ofs <
(pbuf->phy_addr - pshm_drv->shm_base_addr))
break;
/*
* Check whether offset is within buffer limits
* (higher).
*/
if (pck_desc->frm_ofs >
((pbuf->phy_addr - pshm_drv->shm_base_addr) +
pbuf->len))
break;
/* Calculate offset from start of buffer. */
frm_buf_ofs =
pck_desc->frm_ofs - (pbuf->phy_addr -
pshm_drv->shm_base_addr);
/*
* Calculate offset and length of CAIF packet while
* taking care of the shared memory header.
*/
frm_pck_ofs =
frm_buf_ofs + SHM_HDR_LEN +
(*(pbuf->desc_vptr + frm_buf_ofs));
frm_pck_len =
(pck_desc->frm_len - SHM_HDR_LEN -
(*(pbuf->desc_vptr + frm_buf_ofs)));
/* Check whether CAIF packet is within buffer limits */
if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
break;
/* Get a suitable CAIF packet and copy in data. */
skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
frm_pck_len + 1);
BUG_ON(skb == NULL);
p = skb_put(skb, frm_pck_len);
memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
skb->dev = pshm_drv->pshm_dev->pshm_netdev;
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
if (!ret) {
pshm_drv->pshm_dev->pshm_netdev->stats.
rx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.
rx_bytes += pck_desc->frm_len;
} else
++pshm_drv->pshm_dev->pshm_netdev->stats.
rx_dropped;
/* Move to next packet descriptor. */
pck_desc++;
}
list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
}
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
}
static void shm_tx_work_func(struct work_struct *tx_work)
{
u32 mbox_msg;
unsigned int frmlen, avail_emptybuff, append = 0;
unsigned long flags = 0;
struct buf_list *pbuf = NULL;
struct shmdrv_layer *pshm_drv;
struct shm_caif_frm *frm;
struct sk_buff *skb;
struct shm_pck_desc *pck_desc;
struct list_head *pos;
pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
do {
/* Initialize mailbox message. */
mbox_msg = 0x00;
avail_emptybuff = 0;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check for pending receive buffers. */
if (!list_empty(&pshm_drv->rx_pend_list)) {
pbuf = list_entry(pshm_drv->rx_pend_list.next,
struct buf_list, list);
list_del_init(&pbuf->list);
list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
/*
* Value index is never changed,
* so read access should be safe.
*/
mbox_msg |= SHM_SET_EMPTY(pbuf->index);
}
skb = skb_peek(&pshm_drv->sk_qhead);
if (skb == NULL)
goto send_msg;
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
if ((avail_emptybuff < LOW_WATERMARK) &&
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_OFF);
}
/*
* We simply return back to the caller if we do not have space
* either in Tx pending list or Tx empty list. In this case,
* we hold the received skb in the skb list, waiting to
* be transmitted once Tx buffers become available
*/
if (list_empty(&pshm_drv->tx_empty_list))
goto send_msg;
/* Get the first free Tx buffer. */
pbuf = list_entry(pshm_drv->tx_empty_list.next,
struct buf_list, list);
do {
if (append) {
skb = skb_peek(&pshm_drv->sk_qhead);
if (skb == NULL)
break;
}
frm = (struct shm_caif_frm *)
(pbuf->desc_vptr + pbuf->frm_ofs);
frm->hdr_ofs = 0;
frmlen = 0;
frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
/* Add tail padding if needed. */
if (frmlen % SHM_FRM_PAD_LEN)
frmlen += SHM_FRM_PAD_LEN -
(frmlen % SHM_FRM_PAD_LEN);
/*
* Verify that packet, header and additional padding
* can fit within the buffer frame area.
*/
if (frmlen >= (pbuf->len - pbuf->frm_ofs))
break;
if (!append) {
list_del_init(&pbuf->list);
append = 1;
}
skb = skb_dequeue(&pshm_drv->sk_qhead);
/* Copy in CAIF frame. */
skb_copy_bits(skb, 0, pbuf->desc_vptr +
pbuf->frm_ofs + SHM_HDR_LEN +
frm->hdr_ofs, skb->len);
pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
frmlen;
dev_kfree_skb(skb);
/* Fill in the shared memory packet descriptor area. */
pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
/* Forward to current frame. */
pck_desc += pbuf->frames;
pck_desc->frm_ofs = (pbuf->phy_addr -
pshm_drv->shm_base_addr) +
pbuf->frm_ofs;
pck_desc->frm_len = frmlen;
/* Terminate packet descriptor area. */
pck_desc++;
pck_desc->frm_ofs = 0;
/* Update buffer parameters. */
pbuf->frames++;
pbuf->frm_ofs += frmlen + (frmlen % 32);
} while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
/* Assign buffer as full. */
list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
append = 0;
mbox_msg |= SHM_SET_FULL(pbuf->index);
send_msg:
spin_unlock_irqrestore(&pshm_drv->lock, flags);
if (mbox_msg)
pshm_drv->pshm_dev->pshmdev_mbxsend
(pshm_drv->pshm_dev->shm_id, mbox_msg);
} while (mbox_msg);
}
static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
{
struct shmdrv_layer *pshm_drv;
unsigned long flags = 0;
pshm_drv = netdev_priv(shm_netdev);
spin_lock_irqsave(&pshm_drv->lock, flags);
skb_queue_tail(&pshm_drv->sk_qhead, skb);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule Tx work queue. for deferred processing of skbs*/
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
return 0;
}
static const struct net_device_ops netdev_ops = {
.ndo_open = shm_netdev_open,
.ndo_stop = shm_netdev_close,
.ndo_start_xmit = shm_netdev_tx,
};
static void shm_netdev_setup(struct net_device *pshm_netdev)
{
struct shmdrv_layer *pshm_drv;
pshm_netdev->netdev_ops = &netdev_ops;
pshm_netdev->mtu = CAIF_MAX_MTU;
pshm_netdev->type = ARPHRD_CAIF;
pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
pshm_netdev->tx_queue_len = 0;
pshm_netdev->destructor = free_netdev;
pshm_drv = netdev_priv(pshm_netdev);
/* Initialize structures in a clean state. */
memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
}
int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
{
int result, j;
struct shmdrv_layer *pshm_drv = NULL;
pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
"cfshm%d", shm_netdev_setup);
if (!pshm_dev->pshm_netdev)
return -ENOMEM;
pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
pshm_drv->pshm_dev = pshm_dev;
/*
* Initialization starts with the verification of the
* availability of MBX driver by calling its setup function.
* MBX driver must be available by this time for proper
* functioning of SHM driver.
*/
if ((pshm_dev->pshmdev_mbxsetup
(caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
pr_warn("Could not config. SHM Mailbox,"
" Bailing out.....\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENODEV;
}
skb_queue_head_init(&pshm_drv->sk_qhead);
pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
" INSTANCE AT pshm_drv =0x%p\n",
pshm_drv->pshm_dev->shm_id, pshm_drv);
if (pshm_dev->shm_total_sz <
(NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
pr_warn("ERROR, Amount of available"
" Phys. SHM cannot accomodate current SHM "
"driver configuration, Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
if (pshm_dev->shm_loopback)
pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
else
pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
(NR_TX_BUF * TX_BUF_SZ);
INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
INIT_LIST_HEAD(&pshm_drv->tx_full_list);
INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
INIT_LIST_HEAD(&pshm_drv->rx_full_list);
INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
pshm_drv->pshm_tx_workqueue =
create_singlethread_workqueue("shm_tx_work");
pshm_drv->pshm_rx_workqueue =
create_singlethread_workqueue("shm_rx_work");
for (j = 0; j < NR_TX_BUF; j++) {
struct buf_list *tx_buf =
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (tx_buf == NULL) {
pr_warn("ERROR, Could not"
" allocate dynamic mem. for tx_buf,"
" Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
tx_buf->index = j;
tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
tx_buf->len = TX_BUF_SZ;
tx_buf->frames = 0;
tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
if (pshm_dev->shm_loopback)
tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
else
tx_buf->desc_vptr =
ioremap(tx_buf->phy_addr, TX_BUF_SZ);
list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
}
for (j = 0; j < NR_RX_BUF; j++) {
struct buf_list *rx_buf =
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (rx_buf == NULL) {
pr_warn("ERROR, Could not"
" allocate dynamic mem.for rx_buf,"
" Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
rx_buf->index = j;
rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
rx_buf->len = RX_BUF_SZ;
if (pshm_dev->shm_loopback)
rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
else
rx_buf->desc_vptr =
ioremap(rx_buf->phy_addr, RX_BUF_SZ);
list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
}
pshm_drv->tx_empty_available = 1;
result = register_netdev(pshm_dev->pshm_netdev);
if (result)
pr_warn("ERROR[%d], SHM could not, "
"register with NW FRMWK Bailing out ...\n", result);
return result;
}
void caif_shmcore_remove(struct net_device *pshm_netdev)
{
struct buf_list *pbuf;
struct shmdrv_layer *pshm_drv = NULL;
pshm_drv = netdev_priv(pshm_netdev);
while (!(list_empty(&pshm_drv->tx_pend_list))) {
pbuf =
list_entry(pshm_drv->tx_pend_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->tx_full_list))) {
pbuf =
list_entry(pshm_drv->tx_full_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->tx_empty_list))) {
pbuf =
list_entry(pshm_drv->tx_empty_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->rx_full_list))) {
pbuf =
list_entry(pshm_drv->tx_full_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->rx_pend_list))) {
pbuf =
list_entry(pshm_drv->tx_pend_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->rx_empty_list))) {
pbuf =
list_entry(pshm_drv->rx_empty_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
/* Destroy work queues. */
destroy_workqueue(pshm_drv->pshm_tx_workqueue);
destroy_workqueue(pshm_drv->pshm_rx_workqueue);
unregister_netdev(pshm_netdev);
}
......@@ -82,6 +82,14 @@ config CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
config PCH_CAN
tristate "PCH CAN"
depends on CAN_DEV && PCI
---help---
This driver is for PCH CAN of Topcliff which is an IOH for x86
embedded processor.
This driver can access CAN bus.
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
......
......@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
......@@ -2,7 +2,7 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@linutronix.de>
* (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
* (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
......@@ -40,7 +40,6 @@
#include <mach/board.h>
#define DRV_NAME "at91_can"
#define AT91_NAPI_WEIGHT 12
/*
......@@ -172,6 +171,7 @@ struct at91_priv {
};
static struct can_bittiming_const at91_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
......@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
return readl(priv->reg_base + reg);
return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
writel(value, priv->reg_base + reg);
__raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
......@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
/* reset acceptance mask and id register */
for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
at91_write(priv, AT91_MAM(i), 0x0 );
at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
}
/* The last 4 mailboxes are used for transmitting. */
for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
......@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
static int at91_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_ecr = at91_read(priv, AT91_ECR);
bec->rxerr = reg_ecr & 0xff;
bec->txerr = reg_ecr >> 16;
return 0;
}
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
......@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
......@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
dev_err(dev->dev.parent,
"BUG! TX buffer full when queue awake!\n");
netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
......@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
struct sk_buff *skb;
struct can_frame *cf;
dev_dbg(dev->dev.parent, "RX buffer overflow\n");
netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
......@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
/* allow RX of extended frames */
at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
......@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
reg_sr & AT91_MB_RX_LOW_MASK)
dev_info(dev->dev.parent,
"order of incoming frames cannot be guaranteed\n");
netdev_info(dev,
"order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
......@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
dev_dbg(dev->dev.parent, "CERR irq\n");
netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
......@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
dev_dbg(dev->dev.parent, "SERR irq\n");
netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
......@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
dev_dbg(dev->dev.parent, "AERR irq\n");
netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
dev_dbg(dev->dev.parent, "FERR irq\n");
netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
......@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
dev_dbg(dev->dev.parent, "BERR irq\n");
netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
......@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_idr, reg_ier, reg_ecr;
u8 tec, rec;
u32 reg_idr = 0, reg_ier = 0;
struct can_berr_counter bec;
reg_ecr = at91_read(priv, AT91_ECR);
rec = reg_ecr & 0xff;
tec = reg_ecr >> 16;
at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
......@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (tec > rec) ?
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
......@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (tec > rec) ?
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
......@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
dev_dbg(dev->dev.parent, "restarted\n");
netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
......@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
dev_dbg(dev->dev.parent, "Error Active\n");
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
......@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
cf->can_id |= CAN_ERR_BUSOFF;
dev_dbg(dev->dev.parent, "bus-off\n");
netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
......@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
......@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
static int __init at91_can_probe(struct platform_device *pdev)
static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct at91_priv *priv;
......@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
......@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
return 0;
exit_free:
free_netdev(dev);
free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
......@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
clk_put(priv->clk);
free_candev(dev);
return 0;
}
......@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
.name = DRV_NAME,
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
static int __init at91_can_module_init(void)
{
printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
return platform_driver_register(&at91_can_driver);
}
static void __exit at91_can_module_exit(void)
{
platform_driver_unregister(&at91_can_driver);
printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
}
module_init(at91_can_module_init);
......@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
......@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
unregister_flexcandev(dev);
platform_set_drvdata(pdev, NULL);
free_candev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
clk_put(priv->clk);
free_candev(dev);
return 0;
}
......
......@@ -169,6 +169,7 @@
# define RXBSIDH_SHIFT 3
#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
# define RXBSIDL_IDE 0x08
# define RXBSIDL_SRR 0x10
# define RXBSIDL_EID 3
# define RXBSIDL_SHIFT 5
#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
......@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
frame->can_id =
(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
......
此差异已折叠。
......@@ -58,4 +58,16 @@ config CAN_PLX_PCI
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
SJA1000 IO base addresses are chosen heuristically (first that works).
endif
......@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
/*
* tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
*
* Copyright 2010 Andre B. Oliveira
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* References:
* - Getting started with TS-CAN1, Technologic Systems, Jun 2009
* http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "sja1000.h"
MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
MODULE_LICENSE("GPL");
/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
#define TSCAN1_MAXDEV 4
/* PLD registers address offsets */
#define TSCAN1_ID1 0
#define TSCAN1_ID2 1
#define TSCAN1_VERSION 2
#define TSCAN1_LED 3
#define TSCAN1_PAGE 4
#define TSCAN1_MODE 5
#define TSCAN1_JUMPERS 6
/* PLD board identifier registers magic values */
#define TSCAN1_ID1_VALUE 0xf6
#define TSCAN1_ID2_VALUE 0xb9
/* PLD mode register SJA1000 IO enable bit */
#define TSCAN1_MODE_ENABLE 0x40
/* PLD jumpers register bits */
#define TSCAN1_JP4 0x10
#define TSCAN1_JP5 0x20
/* PLD IO base addresses start */
#define TSCAN1_PLD_ADDRESS 0x150
/* PLD register space size */
#define TSCAN1_PLD_SIZE 8
/* SJA1000 register space size */
#define TSCAN1_SJA1000_SIZE 32
/* SJA1000 crystal frequency (16MHz) */
#define TSCAN1_SJA1000_XTAL 16000000
/* SJA1000 IO base addresses */
static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
};
/* Read SJA1000 register */
static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
{
return inb((unsigned long)priv->reg_base + reg);
}
/* Write SJA1000 register */
static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
{
outb(val, (unsigned long)priv->reg_base + reg);
}
/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
static int __devinit tscan1_probe(struct device *dev, unsigned id)
{
struct net_device *netdev;
struct sja1000_priv *priv;
unsigned long pld_base, sja1000_base;
int irq, i;
pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
return -EBUSY;
if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENODEV;
}
switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
case TSCAN1_JP4:
irq = 6;
break;
case TSCAN1_JP5:
irq = 7;
break;
case TSCAN1_JP4 | TSCAN1_JP5:
irq = 5;
break;
default:
dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
release_region(pld_base, TSCAN1_PLD_SIZE);
return -EINVAL;
}
netdev = alloc_sja1000dev(0);
if (!netdev) {
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENOMEM;
}
dev_set_drvdata(dev, netdev);
SET_NETDEV_DEV(netdev, dev);
netdev->base_addr = pld_base;
netdev->irq = irq;
priv = netdev_priv(netdev);
priv->read_reg = tscan1_read;
priv->write_reg = tscan1_write;
priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
priv->cdr = CDR_CBP | CDR_CLK_OFF;
priv->ocr = OCR_TX0_PUSHPULL;
/* Select the first SJA1000 IO address that is free and that works */
for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
sja1000_base = tscan1_sja1000_addresses[i];
if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
dev_name(dev)))
continue;
/* Set SJA1000 IO base address and enable it */
outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
priv->reg_base = (void __iomem *)sja1000_base;
if (!register_sja1000dev(netdev)) {
/* SJA1000 probe succeeded; turn LED off and return */
outb(0, pld_base + TSCAN1_LED);
netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
pld_base, sja1000_base, irq);
return 0;
}
/* SJA1000 probe failed; release and try next address */
outb(0, pld_base + TSCAN1_MODE);
release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
}
dev_err(dev, "failed to assign SJA1000 IO address\n");
dev_set_drvdata(dev, NULL);
free_sja1000dev(netdev);
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENXIO;
}
static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
{
struct net_device *netdev;
struct sja1000_priv *priv;
unsigned long pld_base, sja1000_base;
netdev = dev_get_drvdata(dev);
unregister_sja1000dev(netdev);
dev_set_drvdata(dev, NULL);
priv = netdev_priv(netdev);
pld_base = netdev->base_addr;
sja1000_base = (unsigned long)priv->reg_base;
outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
release_region(pld_base, TSCAN1_PLD_SIZE);
free_sja1000dev(netdev);
return 0;
}
static struct isa_driver tscan1_isa_driver = {
.probe = tscan1_probe,
.remove = __devexit_p(tscan1_remove),
.driver = {
.name = "tscan1",
},
};
static int __init tscan1_init(void)
{
return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
}
module_init(tscan1_init);
static void __exit tscan1_exit(void)
{
isa_unregister_driver(&tscan1_isa_driver);
}
module_exit(tscan1_exit);
......@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
}
if (!(adap->flags & QUEUES_BOUND)) {
err = bind_qsets(adap);
if (err) {
CH_ERR(adap, "failed to bind qsets, err %d\n", err);
int ret = bind_qsets(adap);
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
free_irq_resources(adap);
err = ret;
goto out;
}
adap->flags |= QUEUES_BOUND;
......
......@@ -281,7 +281,6 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
struct vlan_group *vlan_grp;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
......
......@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
pi->vlan_grp != NULL, true);
!!(dev->features & NETIF_F_HW_VLAN_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
......@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
static int set_flags(struct net_device *dev, u32 flags)
{
return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
int err;
unsigned long old_feat = dev->features;
err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
if (err)
return err;
if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
const struct port_info *pi = netdev_priv(dev);
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
-1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
true);
if (err)
dev->features = old_feat;
}
return err;
}
static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
......@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
grp != NULL, true);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
......@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
......@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = RX_CSO;
pi->port_id = i;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
......@@ -3730,6 +3736,7 @@ static int __devinit init_one(struct pci_dev *pdev,
__set_bit(i, &adapter->registered_device_map);
adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {
......
......@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
struct port_info *pi = netdev_priv(rxq->rspq.netdev);
struct vlan_group *grp = pi->vlan_grp;
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
if (likely(grp)) {
ret = vlan_gro_frags(&rxq->rspq.napi, grp,
ntohs(pkt->vlan));
goto stats;
}
}
ret = napi_gro_frags(&rxq->rspq.napi);
stats: if (ret == GRO_HELD)
if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
......@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
if (likely(grp))
vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
else
dev_kfree_skb_any(skb);
} else
netif_receive_skb(skb);
}
netif_receive_skb(skb);
return 0;
}
......
......@@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
void e1000_reinit_safe(struct e1000_adapter *adapter)
static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
......
......@@ -396,7 +396,9 @@ struct ehea_port_res {
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
......
......@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
u64 hret, rx_packets, tx_packets;
u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
......@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
for (i = 0; i < port->num_def_qps; i++)
for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
rx_bytes += port->port_res[i].rx_bytes;
}
tx_packets = 0;
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
tx_bytes += port->port_res[i].tx_bytes;
}
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
stats->rx_bytes = cb2->rxo;
stats->tx_bytes = cb2->txo;
stats->rx_bytes = rx_bytes;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
......@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
......@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_rq3++;
}
processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
......@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
......@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
tx_bytes = pr->tx_bytes;
tx_packets = pr->tx_packets;
rx_bytes = pr->rx_bytes;
rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
pr->tx_bytes = rx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
......@@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
if (vlan_tx_tag_present(skb)) {
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
swqe->vlan_tag = vlan_tx_tag_get(skb);
}
pr->tx_packets++;
pr->tx_bytes += skb->len;
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
......@@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
if (vlan_tx_tag_present(skb)) {
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
swqe->vlan_tag = vlan_tx_tag_get(skb);
}
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
......@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
ehea_post_swqe(pr->qp, swqe);
pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);
......
......@@ -2511,7 +2511,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
__skb_queue_head(&priv->rx_recycle, skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
......@@ -2594,7 +2594,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
skb = __skb_dequeue(&priv->rx_recycle);
skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
......@@ -2750,7 +2750,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
__skb_queue_head(&priv->rx_recycle, skb);
skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
......
......@@ -1623,12 +1623,12 @@ jme_open(struct net_device *netdev)
return rc;
}
#ifdef CONFIG_PM
static void
jme_set_100m_half(struct jme_adapter *jme)
{
u32 bmcr, tmp;
jme_phy_on(jme);
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
......@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
phylink = jme_linkstat_from_phy(jme);
}
}
#endif
static inline void
jme_phy_off(struct jme_adapter *jme)
......@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
}
static void
jme_powersave_phy(struct jme_adapter *jme)
{
if (jme->reg_pmcs) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
} else {
jme_phy_off(jme);
}
}
static int
jme_close(struct net_device *netdev)
{
......@@ -2991,6 +3005,16 @@ jme_remove_one(struct pci_dev *pdev)
}
static void
jme_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
jme_powersave_phy(jme);
pci_pme_active(pdev, true);
}
#ifdef CONFIG_PM
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
......@@ -3028,19 +3052,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
tasklet_hi_enable(&jme->rxempty_task);
pci_save_state(pdev);
if (jme->reg_pmcs) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
pci_enable_wake(pdev, PCI_D3cold, true);
} else {
jme_phy_off(jme);
}
pci_set_power_state(pdev, PCI_D3cold);
jme_powersave_phy(jme);
pci_enable_wake(jme->pdev, PCI_D3hot, true);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
......@@ -3087,6 +3101,7 @@ static struct pci_driver jme_driver = {
.suspend = jme_suspend,
.resume = jme_resume,
#endif /* CONFIG_PM */
.shutdown = jme_shutdown,
};
static int __init
......
......@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
if (work_done < budget)
if (work_done < budget) {
napi_complete(napi);
/*
* We've done what we can to clean the buffers. Make sure we
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
/*
* We've done what we can to clean the buffers. Make sure we
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
}
/* TODO: Handle errors */
......@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_RX_INT_FLAGS) {
/*
* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
/*
* There's no point taking any more interrupts
* until we have processed the buffers
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
__napi_schedule(&bp->napi);
......
......@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
__be64 *inbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
inbox[0] = cpu_to_be64(virt);
inbox[1] = cpu_to_be64(dma_addr);
err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
if (!err)
mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
(unsigned long long) dma_addr, (unsigned long long) virt);
return err;
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
......
......@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
......
......@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
goto out;
}
}
if (free < 0) {
err = -ENOMEM;
goto out;
}
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
......@@ -224,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
}
if (free < 0) {
err = -ENOMEM;
goto out;
}
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
......
......@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
*
* Returns 0 on success on < 0 on error.
*/
int phy_clear_interrupt(struct phy_device *phydev)
static int phy_clear_interrupt(struct phy_device *phydev)
{
int err = 0;
......@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success on < 0 on error.
*/
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
int err = 0;
......@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
* duplexes. Drop down by one in this order: 1000/FULL,
* 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
*/
void phy_sanitize_settings(struct phy_device *phydev)
static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
int idx;
......@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
}
EXPORT_SYMBOL(phy_sanitize_settings);
/**
* phy_ethtool_sset - generic ethtool sset function, handles all the details
......@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
int phy_enable_interrupts(struct phy_device *phydev)
static int phy_enable_interrupts(struct phy_device *phydev)
{
int err;
......@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
return err;
}
EXPORT_SYMBOL(phy_enable_interrupts);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
int phy_disable_interrupts(struct phy_device *phydev)
static int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
......@@ -574,7 +572,6 @@ int phy_disable_interrupts(struct phy_device *phydev)
return err;
}
EXPORT_SYMBOL(phy_disable_interrupts);
/**
* phy_start_interrupts - request and enable interrupts for a PHY device
......
......@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
/*
* Creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
......@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
static struct phy_device* phy_device_create(struct mii_bus *bus,
int addr, int phy_id)
{
struct phy_device *dev;
......@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
return dev;
}
EXPORT_SYMBOL(phy_device_create);
/**
* get_phy_id - reads the specified addr for its ID.
......@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
* If you want to monitor your own link state, don't call
* this function.
*/
void phy_prepare_link(struct phy_device *phydev,
static void phy_prepare_link(struct phy_device *phydev,
void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
......@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
......@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* (dev_flags and interface) */
return phy_init_hw(phydev);
}
EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
......@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
int genphy_config_advert(struct phy_device *phydev)
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
int oldadv, adv;
......@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
return changed;
}
EXPORT_SYMBOL(genphy_config_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
......@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
int genphy_setup_forced(struct phy_device *phydev)
static int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;
......
......@@ -146,11 +146,13 @@
#define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS_1G 4096
#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_RCV_DESCRIPTORS_VF 2048
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
#define DEFAULT_RCV_DESCRIPTORS_VF 1024
#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
......@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_LOOPBACK_TEST 2
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
struct qlcnic_filter {
......@@ -970,6 +973,8 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
u16 max_rxd;
u16 max_jumbo_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
......@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
......
......@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
if (adapter->ahw.port_type == QLCNIC_GBE) {
ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
} else {
ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
}
ring->rx_max_pending = adapter->max_rxd;
ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
......@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
u16 num_rxd, num_jumbo_rxd, num_txd;
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
if (adapter->ahw.port_type == QLCNIC_GBE) {
max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
}
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
"rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
......
......@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
if (adapter->ahw.port_type == QLCNIC_XGBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
} else {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
}
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
} else if (adapter->ahw.port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
......@@ -1860,6 +1870,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies >
(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, src_addr, vlan_id,
tx_ring);
tmp_fil->ftime = jiffies;
return;
}
......
......@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
......@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
void qlge_set_multicast_list(struct net_device *ndev);
#if 1
#define QL_ALL_DUMP
#define QL_REG_DUMP
#define QL_DEV_DUMP
#define QL_CB_DUMP
/* #define QL_ALL_DUMP */
/* #define QL_REG_DUMP */
/* #define QL_DEV_DUMP */
/* #define QL_CB_DUMP */
/* #define QL_IB_DUMP */
/* #define QL_OB_DUMP */
#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
......
......@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
static int ql_wol(struct ql_adapter *qdev);
static void qlge_set_multicast_list(struct net_device *ndev);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
......@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
{
qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
if (qdev->vlgrp) {
u16 vid;
for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(qdev->vlgrp, vid))
continue;
qlge_vlan_rx_add_vid(qdev->ndev, vid);
}
}
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
......@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
"MAC address %pM\n", ndev->dev_addr);
}
int ql_wol(struct ql_adapter *qdev)
static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
......@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
/* Restore vlan setting. */
qlge_restore_vlan(qdev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
......@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
return &ndev->stats;
}
void qlge_set_multicast_list(struct net_device *ndev)
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct netdev_hw_addr *ha;
......
......@@ -87,7 +87,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
return status;
}
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
......@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
int ql_mb_idc_ack(struct ql_adapter *qdev)
static int ql_mb_idc_ack(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
......@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
......
......@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
lp->rx_session_id[0] = 0x48;
lp->rx_session_id[0] = 0x44;
lp->rx_session_id[0] = 0x42;
lp->rx_session_id[1] = 0x48;
lp->rx_session_id[2] = 0x44;
lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
......
......@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
err = -EAGAIN;
return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);
......
......@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
return 0;
}
/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
#else /* CONFIG_INET */
int
slhc_toss(struct slcompress *comp)
{
......@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
#endif /* CONFIG_INET */
/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
......@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
#endif /* CONFIG_INET */
MODULE_LICENSE("Dual BSD/GPL");
......@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
!((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
spin_lock_bh(&tp->lock);
if (wol->wolopts & WAKE_MAGIC) {
if (device_may_wakeup(dp))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
device_set_wakeup_enable(dp, true);
} else {
else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
device_set_wakeup_enable(dp, false);
}
spin_unlock_bh(&tp->lock);
return 0;
}
......
......@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
udelay(time);
mdelay(time / 1000);
#endif
}
......
此差异已折叠。
......@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
/* features */
enum {
UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
UPT1_F_RSS = 0x0002,
UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
UPT1_F_LRO = 0x0008,
UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
UPT1_F_RSS = cpu_to_le64(0x0002),
UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
UPT1_F_LRO = cpu_to_le64(0x0008),
};
#endif
......@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
* filters */
#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
* filters */
struct Vmxnet3_PM_PktFilter {
......
此差异已折叠。
......@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
set_flag_le64(
&adapter->shared->devRead.misc.uptFeatures,
UPT1_F_RXCSUM);
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM;
else
reset_flag_le64(
&adapter->shared->devRead.misc.uptFeatures,
UPT1_F_RXCSUM);
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
......@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures |=
cpu_to_le64(UPT1_F_LRO);
UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
cpu_to_le64(~UPT1_F_LRO);
~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_ethtool_stats = vxge_get_ethtool_stats,
};
void initialize_ethtool_ops(struct net_device *ndev)
void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
common->ah = sc->ah;
common->hw = hw;
common->cachelsz = csz << 2; /* convert to bytes */
spin_lock_init(&common->cc_lock);
/* Initialize device */
ret = ath5k_hw_attach(sc);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部