mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-23 11:18:54 +00:00
Add mlx5 and mlx5en driver(s) for ConnectX-4 and ConnectX-4LX cards
from Mellanox Technologies. The current driver supports ethernet speeds up to and including 100 GBit/s. Infiniband support will be done later. The code added is not compiled by default, which will be done by a separate commit. Sponsored by: Mellanox Technologies MFC after: 2 weeks
This commit is contained in:
parent
081432a88f
commit
dc7e38ac4d
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=290650
169
sys/dev/mlx5/cq.h
Normal file
169
sys/dev/mlx5/cq.h
Normal file
@ -0,0 +1,169 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_CORE_CQ_H
|
||||
#define MLX5_CORE_CQ_H
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/mlx5_ifc.h>
|
||||
|
||||
|
||||
struct mlx5_core_cq {
|
||||
u32 cqn;
|
||||
int cqe_sz;
|
||||
__be32 *set_ci_db;
|
||||
__be32 *arm_db;
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
unsigned vector;
|
||||
int irqn;
|
||||
void (*comp) (struct mlx5_core_cq *);
|
||||
void (*event) (struct mlx5_core_cq *, int);
|
||||
struct mlx5_uar *uar;
|
||||
u32 cons_index;
|
||||
unsigned arm_sn;
|
||||
struct mlx5_rsc_debug *dbg;
|
||||
int pid;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
|
||||
MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
|
||||
MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
|
||||
MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
|
||||
MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
|
||||
MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
|
||||
MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
|
||||
MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
|
||||
MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
|
||||
MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
|
||||
MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
|
||||
MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
|
||||
MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_CQE_OWNER_MASK = 1,
|
||||
MLX5_CQE_REQ = 0,
|
||||
MLX5_CQE_RESP_WR_IMM = 1,
|
||||
MLX5_CQE_RESP_SEND = 2,
|
||||
MLX5_CQE_RESP_SEND_IMM = 3,
|
||||
MLX5_CQE_RESP_SEND_INV = 4,
|
||||
MLX5_CQE_RESIZE_CQ = 5,
|
||||
MLX5_CQE_SIG_ERR = 12,
|
||||
MLX5_CQE_REQ_ERR = 13,
|
||||
MLX5_CQE_RESP_ERR = 14,
|
||||
MLX5_CQE_INVALID = 15,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_CQ_MODIFY_PERIOD = 1 << 0,
|
||||
MLX5_CQ_MODIFY_COUNT = 1 << 1,
|
||||
MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_CQ_OPMOD_RESIZE = 1,
|
||||
MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
|
||||
MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
|
||||
MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_cq_modify_params {
|
||||
int type;
|
||||
union {
|
||||
struct {
|
||||
u32 page_offset;
|
||||
u8 log_cq_size;
|
||||
} resize;
|
||||
|
||||
struct {
|
||||
} moder;
|
||||
|
||||
struct {
|
||||
} mapping;
|
||||
} params;
|
||||
};
|
||||
|
||||
static inline int cqe_sz_to_mlx_sz(u8 size)
|
||||
{
|
||||
return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
|
||||
}
|
||||
|
||||
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
|
||||
{
|
||||
*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
|
||||
MLX5_CQ_DB_REQ_NOT = 0 << 24
|
||||
};
|
||||
|
||||
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
|
||||
void __iomem *uar_page,
|
||||
spinlock_t *doorbell_lock,
|
||||
u32 cons_index)
|
||||
{
|
||||
__be32 doorbell[2];
|
||||
u32 sn;
|
||||
u32 ci;
|
||||
|
||||
sn = cq->arm_sn & 3;
|
||||
ci = cons_index & 0xffffff;
|
||||
|
||||
*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
|
||||
|
||||
/* Make sure that the doorbell record in host memory is
|
||||
* written before ringing the doorbell via PCI MMIO.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
|
||||
doorbell[1] = cpu_to_be32(cq->cqn);
|
||||
|
||||
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
|
||||
}
|
||||
|
||||
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
struct mlx5_create_cq_mbox_in *in, int inlen);
|
||||
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
||||
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
struct mlx5_query_cq_mbox_out *out);
|
||||
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
struct mlx5_modify_cq_mbox_in *in, int in_sz);
|
||||
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_cq *cq, u16 cq_period,
|
||||
u16 cq_max_count);
|
||||
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
||||
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
||||
|
||||
#endif /* MLX5_CORE_CQ_H */
|
1187
sys/dev/mlx5/device.h
Normal file
1187
sys/dev/mlx5/device.h
Normal file
File diff suppressed because it is too large
Load Diff
74
sys/dev/mlx5/doorbell.h
Normal file
74
sys/dev/mlx5/doorbell.h
Normal file
@ -0,0 +1,74 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_DOORBELL_H
|
||||
#define MLX5_DOORBELL_H
|
||||
|
||||
#define MLX5_BF_OFFSET 0x800
|
||||
#define MLX5_CQ_DOORBELL 0x20
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/* Assume that we can just write a 64-bit doorbell atomically. s390
|
||||
* actually doesn't have writeq() but S/390 systems don't even have
|
||||
* PCI so we won't worry about it.
|
||||
*/
|
||||
|
||||
#define MLX5_DECLARE_DOORBELL_LOCK(name)
|
||||
#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
|
||||
#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
|
||||
|
||||
static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
|
||||
spinlock_t *doorbell_lock)
|
||||
{
|
||||
__raw_writeq(*(u64 *)val, dest);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* Just fall back to a spinlock to protect the doorbell if
|
||||
* BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
|
||||
* MMIO writes.
|
||||
*/
|
||||
|
||||
#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
|
||||
#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
|
||||
#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
|
||||
|
||||
static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
|
||||
spinlock_t *doorbell_lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(doorbell_lock, flags);
|
||||
__raw_writel((__force u32) val[0], dest);
|
||||
__raw_writel((__force u32) val[1], dest + 4);
|
||||
spin_unlock_irqrestore(doorbell_lock, flags);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* MLX5_DOORBELL_H */
|
941
sys/dev/mlx5/driver.h
Normal file
941
sys/dev/mlx5/driver.h
Normal file
@ -0,0 +1,941 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_DRIVER_H
|
||||
#define MLX5_DRIVER_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
#include <dev/mlx5/device.h>
|
||||
#include <dev/mlx5/doorbell.h>
|
||||
|
||||
enum {
|
||||
MLX5_BOARD_ID_LEN = 64,
|
||||
MLX5_MAX_NAME_LEN = 16,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* one minute for the sake of bringup. Generally, commands must always
|
||||
* complete and we may need to increase this timeout value
|
||||
*/
|
||||
MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
|
||||
MLX5_CMD_WQ_MAX_NAME = 32,
|
||||
};
|
||||
|
||||
enum {
|
||||
CMD_OWNER_SW = 0x0,
|
||||
CMD_OWNER_HW = 0x1,
|
||||
CMD_STATUS_SUCCESS = 0,
|
||||
};
|
||||
|
||||
enum mlx5_sqp_t {
|
||||
MLX5_SQP_SMI = 0,
|
||||
MLX5_SQP_GSI = 1,
|
||||
MLX5_SQP_IEEE_1588 = 2,
|
||||
MLX5_SQP_SNIFFER = 3,
|
||||
MLX5_SQP_SYNC_UMR = 4,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MAX_PORTS = 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_EQ_VEC_PAGES = 0,
|
||||
MLX5_EQ_VEC_CMD = 1,
|
||||
MLX5_EQ_VEC_ASYNC = 2,
|
||||
MLX5_EQ_VEC_COMP_BASE,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MAX_IRQ_NAME = 32
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
|
||||
MLX5_ATOMIC_MODE_CX = 2 << 16,
|
||||
MLX5_ATOMIC_MODE_8B = 3 << 16,
|
||||
MLX5_ATOMIC_MODE_16B = 4 << 16,
|
||||
MLX5_ATOMIC_MODE_32B = 5 << 16,
|
||||
MLX5_ATOMIC_MODE_64B = 6 << 16,
|
||||
MLX5_ATOMIC_MODE_128B = 7 << 16,
|
||||
MLX5_ATOMIC_MODE_256B = 8 << 16,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_REG_QETCR = 0x4005,
|
||||
MLX5_REG_QPDP = 0x4007,
|
||||
MLX5_REG_QTCT = 0x400A,
|
||||
MLX5_REG_PCAP = 0x5001,
|
||||
MLX5_REG_PMTU = 0x5003,
|
||||
MLX5_REG_PTYS = 0x5004,
|
||||
MLX5_REG_PAOS = 0x5006,
|
||||
MLX5_REG_PFCC = 0x5007,
|
||||
MLX5_REG_PPCNT = 0x5008,
|
||||
MLX5_REG_PMAOS = 0x5012,
|
||||
MLX5_REG_PUDE = 0x5009,
|
||||
MLX5_REG_PPTB = 0x500B,
|
||||
MLX5_REG_PBMC = 0x500C,
|
||||
MLX5_REG_PMPE = 0x5010,
|
||||
MLX5_REG_PELC = 0x500e,
|
||||
MLX5_REG_PVLC = 0x500f,
|
||||
MLX5_REG_PMLP = 0x5002,
|
||||
MLX5_REG_NODE_DESC = 0x6001,
|
||||
MLX5_REG_HOST_ENDIANNESS = 0x7004,
|
||||
MLX5_REG_MCIA = 0x9014,
|
||||
};
|
||||
|
||||
enum dbg_rsc_type {
|
||||
MLX5_DBG_RSC_QP,
|
||||
MLX5_DBG_RSC_EQ,
|
||||
MLX5_DBG_RSC_CQ,
|
||||
};
|
||||
|
||||
struct mlx5_field_desc {
|
||||
struct dentry *dent;
|
||||
int i;
|
||||
};
|
||||
|
||||
struct mlx5_rsc_debug {
|
||||
struct mlx5_core_dev *dev;
|
||||
void *object;
|
||||
enum dbg_rsc_type type;
|
||||
struct dentry *root;
|
||||
struct mlx5_field_desc fields[0];
|
||||
};
|
||||
|
||||
enum mlx5_dev_event {
|
||||
MLX5_DEV_EVENT_SYS_ERROR,
|
||||
MLX5_DEV_EVENT_PORT_UP,
|
||||
MLX5_DEV_EVENT_PORT_DOWN,
|
||||
MLX5_DEV_EVENT_PORT_INITIALIZED,
|
||||
MLX5_DEV_EVENT_LID_CHANGE,
|
||||
MLX5_DEV_EVENT_PKEY_CHANGE,
|
||||
MLX5_DEV_EVENT_GUID_CHANGE,
|
||||
MLX5_DEV_EVENT_CLIENT_REREG,
|
||||
MLX5_DEV_EVENT_VPORT_CHANGE,
|
||||
};
|
||||
|
||||
enum mlx5_port_status {
|
||||
MLX5_PORT_UP = 1 << 0,
|
||||
MLX5_PORT_DOWN = 1 << 1,
|
||||
};
|
||||
|
||||
enum mlx5_link_mode {
|
||||
MLX5_1000BASE_CX_SGMII = 0,
|
||||
MLX5_1000BASE_KX = 1,
|
||||
MLX5_10GBASE_CX4 = 2,
|
||||
MLX5_10GBASE_KX4 = 3,
|
||||
MLX5_10GBASE_KR = 4,
|
||||
MLX5_20GBASE_KR2 = 5,
|
||||
MLX5_40GBASE_CR4 = 6,
|
||||
MLX5_40GBASE_KR4 = 7,
|
||||
MLX5_56GBASE_R4 = 8,
|
||||
MLX5_10GBASE_CR = 12,
|
||||
MLX5_10GBASE_SR = 13,
|
||||
MLX5_10GBASE_ER = 14,
|
||||
MLX5_40GBASE_SR4 = 15,
|
||||
MLX5_40GBASE_LR4 = 16,
|
||||
MLX5_100GBASE_CR4 = 20,
|
||||
MLX5_100GBASE_SR4 = 21,
|
||||
MLX5_100GBASE_KR4 = 22,
|
||||
MLX5_100GBASE_LR4 = 23,
|
||||
MLX5_100BASE_TX = 24,
|
||||
MLX5_1000BASE_T = 25,
|
||||
MLX5_10GBASE_T = 26,
|
||||
MLX5_25GBASE_CR = 27,
|
||||
MLX5_25GBASE_KR = 28,
|
||||
MLX5_25GBASE_SR = 29,
|
||||
MLX5_50GBASE_CR2 = 30,
|
||||
MLX5_50GBASE_KR2 = 31,
|
||||
MLX5_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
#define MLX5_PROT_MASK(link_mode) (1 << link_mode)
|
||||
|
||||
struct mlx5_uuar_info {
|
||||
struct mlx5_uar *uars;
|
||||
int num_uars;
|
||||
int num_low_latency_uuars;
|
||||
unsigned long *bitmap;
|
||||
unsigned int *count;
|
||||
struct mlx5_bf *bfs;
|
||||
|
||||
/*
|
||||
* protect uuar allocation data structs
|
||||
*/
|
||||
struct mutex lock;
|
||||
u32 ver;
|
||||
};
|
||||
|
||||
struct mlx5_bf {
|
||||
void __iomem *reg;
|
||||
void __iomem *regreg;
|
||||
int buf_size;
|
||||
struct mlx5_uar *uar;
|
||||
unsigned long offset;
|
||||
int need_lock;
|
||||
/* protect blue flame buffer selection when needed
|
||||
*/
|
||||
spinlock_t lock;
|
||||
|
||||
/* serialize 64 bit writes when done as two 32 bit accesses
|
||||
*/
|
||||
spinlock_t lock32;
|
||||
int uuarn;
|
||||
};
|
||||
|
||||
struct mlx5_cmd_first {
|
||||
__be32 data[4];
|
||||
};
|
||||
|
||||
struct mlx5_cmd_msg {
|
||||
struct list_head list;
|
||||
struct cache_ent *cache;
|
||||
u32 len;
|
||||
struct mlx5_cmd_first first;
|
||||
struct mlx5_cmd_mailbox *next;
|
||||
};
|
||||
|
||||
struct mlx5_cmd_debug {
|
||||
struct dentry *dbg_root;
|
||||
struct dentry *dbg_in;
|
||||
struct dentry *dbg_out;
|
||||
struct dentry *dbg_outlen;
|
||||
struct dentry *dbg_status;
|
||||
struct dentry *dbg_run;
|
||||
void *in_msg;
|
||||
void *out_msg;
|
||||
u8 status;
|
||||
u16 inlen;
|
||||
u16 outlen;
|
||||
};
|
||||
|
||||
struct cache_ent {
|
||||
/* protect block chain allocations
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
struct cmd_msg_cache {
|
||||
struct cache_ent large;
|
||||
struct cache_ent med;
|
||||
|
||||
};
|
||||
|
||||
struct mlx5_cmd_stats {
|
||||
u64 sum;
|
||||
u64 n;
|
||||
struct dentry *root;
|
||||
struct dentry *avg;
|
||||
struct dentry *count;
|
||||
/* protect command average calculations */
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct mlx5_cmd {
|
||||
void *cmd_alloc_buf;
|
||||
dma_addr_t alloc_dma;
|
||||
int alloc_size;
|
||||
void *cmd_buf;
|
||||
dma_addr_t dma;
|
||||
u16 cmdif_rev;
|
||||
u8 log_sz;
|
||||
u8 log_stride;
|
||||
int max_reg_cmds;
|
||||
int events;
|
||||
u32 __iomem *vector;
|
||||
|
||||
/* protect command queue allocations
|
||||
*/
|
||||
spinlock_t alloc_lock;
|
||||
|
||||
/* protect token allocations
|
||||
*/
|
||||
spinlock_t token_lock;
|
||||
u8 token;
|
||||
unsigned long bitmask;
|
||||
char wq_name[MLX5_CMD_WQ_MAX_NAME];
|
||||
struct workqueue_struct *wq;
|
||||
struct semaphore sem;
|
||||
struct semaphore pages_sem;
|
||||
int mode;
|
||||
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
|
||||
struct pci_pool *pool;
|
||||
struct mlx5_cmd_debug dbg;
|
||||
struct cmd_msg_cache cache;
|
||||
int checksum_disabled;
|
||||
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
|
||||
int moving_to_polling;
|
||||
};
|
||||
|
||||
struct mlx5_port_caps {
|
||||
int gid_table_len;
|
||||
int pkey_table_len;
|
||||
u8 ext_port_cap;
|
||||
};
|
||||
|
||||
struct mlx5_cmd_mailbox {
|
||||
void *buf;
|
||||
dma_addr_t dma;
|
||||
struct mlx5_cmd_mailbox *next;
|
||||
};
|
||||
|
||||
struct mlx5_buf_list {
|
||||
void *buf;
|
||||
dma_addr_t map;
|
||||
};
|
||||
|
||||
struct mlx5_buf {
|
||||
struct mlx5_buf_list direct;
|
||||
struct mlx5_buf_list *page_list;
|
||||
int nbufs;
|
||||
int npages;
|
||||
int size;
|
||||
u8 page_shift;
|
||||
};
|
||||
|
||||
struct mlx5_eq {
|
||||
struct mlx5_core_dev *dev;
|
||||
__be32 __iomem *doorbell;
|
||||
u32 cons_index;
|
||||
struct mlx5_buf buf;
|
||||
int size;
|
||||
u8 irqn;
|
||||
u8 eqn;
|
||||
int nent;
|
||||
u64 mask;
|
||||
struct list_head list;
|
||||
int index;
|
||||
struct mlx5_rsc_debug *dbg;
|
||||
};
|
||||
|
||||
struct mlx5_core_psv {
|
||||
u32 psv_idx;
|
||||
struct psv_layout {
|
||||
u32 pd;
|
||||
u16 syndrome;
|
||||
u16 reserved;
|
||||
u16 bg;
|
||||
u16 app_tag;
|
||||
u32 ref_tag;
|
||||
} psv;
|
||||
};
|
||||
|
||||
struct mlx5_core_sig_ctx {
|
||||
struct mlx5_core_psv psv_memory;
|
||||
struct mlx5_core_psv psv_wire;
|
||||
#if (__FreeBSD_version >= 1100000)
|
||||
struct ib_sig_err err_item;
|
||||
#endif
|
||||
bool sig_status_checked;
|
||||
bool sig_err_exists;
|
||||
u32 sigerr_count;
|
||||
};
|
||||
|
||||
struct mlx5_core_mr {
|
||||
u64 iova;
|
||||
u64 size;
|
||||
u32 key;
|
||||
u32 pd;
|
||||
};
|
||||
|
||||
enum mlx5_res_type {
|
||||
MLX5_RES_QP,
|
||||
MLX5_RES_SRQ,
|
||||
MLX5_RES_XSRQ,
|
||||
};
|
||||
|
||||
struct mlx5_core_rsc_common {
|
||||
enum mlx5_res_type res;
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct mlx5_core_srq {
|
||||
struct mlx5_core_rsc_common common; /* must be first */
|
||||
u32 srqn;
|
||||
int max;
|
||||
int max_gs;
|
||||
int max_avail_gather;
|
||||
int wqe_shift;
|
||||
void (*event)(struct mlx5_core_srq *, int);
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct mlx5_eq_table {
|
||||
void __iomem *update_ci;
|
||||
void __iomem *update_arm_ci;
|
||||
struct list_head comp_eqs_list;
|
||||
struct mlx5_eq pages_eq;
|
||||
struct mlx5_eq async_eq;
|
||||
struct mlx5_eq cmd_eq;
|
||||
int num_comp_vectors;
|
||||
/* protect EQs list
|
||||
*/
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct mlx5_uar {
|
||||
u32 index;
|
||||
struct list_head bf_list;
|
||||
unsigned free_bf_bmap;
|
||||
void __iomem *bf_map;
|
||||
void __iomem *map;
|
||||
};
|
||||
|
||||
|
||||
struct mlx5_core_health {
|
||||
struct mlx5_health_buffer __iomem *health;
|
||||
__be32 __iomem *health_counter;
|
||||
struct timer_list timer;
|
||||
struct list_head list;
|
||||
u32 prev;
|
||||
int miss_counter;
|
||||
};
|
||||
|
||||
#define MLX5_CQ_LINEAR_ARRAY_SIZE 1024
|
||||
|
||||
struct mlx5_cq_linear_array_entry {
|
||||
spinlock_t lock;
|
||||
struct mlx5_core_cq * volatile cq;
|
||||
};
|
||||
|
||||
struct mlx5_cq_table {
|
||||
/* protect radix tree
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
struct mlx5_qp_table {
|
||||
/* protect radix tree
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
|
||||
struct mlx5_srq_table {
|
||||
/* protect radix tree
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
|
||||
struct mlx5_mr_table {
|
||||
/* protect radix tree
|
||||
*/
|
||||
rwlock_t lock;
|
||||
struct radix_tree_root tree;
|
||||
};
|
||||
|
||||
struct mlx5_irq_info {
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
};
|
||||
|
||||
struct mlx5_priv {
|
||||
char name[MLX5_MAX_NAME_LEN];
|
||||
struct mlx5_eq_table eq_table;
|
||||
struct msix_entry *msix_arr;
|
||||
struct mlx5_irq_info *irq_info;
|
||||
struct mlx5_uuar_info uuari;
|
||||
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
|
||||
|
||||
struct io_mapping *bf_mapping;
|
||||
|
||||
/* pages stuff */
|
||||
struct workqueue_struct *pg_wq;
|
||||
struct rb_root page_root;
|
||||
int fw_pages;
|
||||
int reg_pages;
|
||||
struct list_head free_list;
|
||||
|
||||
struct mlx5_core_health health;
|
||||
|
||||
struct mlx5_srq_table srq_table;
|
||||
|
||||
/* start: qp staff */
|
||||
struct mlx5_qp_table qp_table;
|
||||
struct dentry *qp_debugfs;
|
||||
struct dentry *eq_debugfs;
|
||||
struct dentry *cq_debugfs;
|
||||
struct dentry *cmdif_debugfs;
|
||||
/* end: qp staff */
|
||||
|
||||
/* start: cq staff */
|
||||
struct mlx5_cq_table cq_table;
|
||||
/* end: cq staff */
|
||||
|
||||
/* start: mr staff */
|
||||
struct mlx5_mr_table mr_table;
|
||||
/* end: mr staff */
|
||||
|
||||
/* start: alloc staff */
|
||||
int numa_node;
|
||||
|
||||
struct mutex pgdir_mutex;
|
||||
struct list_head pgdir_list;
|
||||
/* end: alloc staff */
|
||||
struct dentry *dbg_root;
|
||||
|
||||
/* protect mkey key part */
|
||||
spinlock_t mkey_lock;
|
||||
u8 mkey_key;
|
||||
|
||||
struct list_head dev_list;
|
||||
struct list_head ctx_list;
|
||||
spinlock_t ctx_lock;
|
||||
};
|
||||
|
||||
struct mlx5_special_contexts {
|
||||
int resd_lkey;
|
||||
};
|
||||
|
||||
struct mlx5_core_dev {
|
||||
struct pci_dev *pdev;
|
||||
char board_id[MLX5_BOARD_ID_LEN];
|
||||
struct mlx5_cmd cmd;
|
||||
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
|
||||
u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
|
||||
struct mlx5_init_seg __iomem *iseg;
|
||||
void (*event) (struct mlx5_core_dev *dev,
|
||||
enum mlx5_dev_event event,
|
||||
unsigned long param);
|
||||
struct mlx5_priv priv;
|
||||
struct mlx5_profile *profile;
|
||||
atomic_t num_qps;
|
||||
u32 issi;
|
||||
struct mlx5_special_contexts special_contexts;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_WOL_DISABLE = 0,
|
||||
MLX5_WOL_SECURED_MAGIC = 1 << 1,
|
||||
MLX5_WOL_MAGIC = 1 << 2,
|
||||
MLX5_WOL_ARP = 1 << 3,
|
||||
MLX5_WOL_BROADCAST = 1 << 4,
|
||||
MLX5_WOL_MULTICAST = 1 << 5,
|
||||
MLX5_WOL_UNICAST = 1 << 6,
|
||||
MLX5_WOL_PHY_ACTIVITY = 1 << 7,
|
||||
};
|
||||
|
||||
struct mlx5_db {
|
||||
__be32 *db;
|
||||
union {
|
||||
struct mlx5_db_pgdir *pgdir;
|
||||
struct mlx5_ib_user_db_page *user_page;
|
||||
} u;
|
||||
dma_addr_t dma;
|
||||
int index;
|
||||
};
|
||||
|
||||
struct mlx5_net_counters {
|
||||
u64 packets;
|
||||
u64 octets;
|
||||
};
|
||||
|
||||
struct mlx5_ptys_reg {
|
||||
u8 local_port;
|
||||
u8 proto_mask;
|
||||
u32 eth_proto_cap;
|
||||
u16 ib_link_width_cap;
|
||||
u16 ib_proto_cap;
|
||||
u32 eth_proto_admin;
|
||||
u16 ib_link_width_admin;
|
||||
u16 ib_proto_admin;
|
||||
u32 eth_proto_oper;
|
||||
u16 ib_link_width_oper;
|
||||
u16 ib_proto_oper;
|
||||
u32 eth_proto_lp_advertise;
|
||||
};
|
||||
|
||||
struct mlx5_pvlc_reg {
|
||||
u8 local_port;
|
||||
u8 vl_hw_cap;
|
||||
u8 vl_admin;
|
||||
u8 vl_operational;
|
||||
};
|
||||
|
||||
struct mlx5_pmtu_reg {
|
||||
u8 local_port;
|
||||
u16 max_mtu;
|
||||
u16 admin_mtu;
|
||||
u16 oper_mtu;
|
||||
};
|
||||
|
||||
struct mlx5_vport_counters {
|
||||
struct mlx5_net_counters received_errors;
|
||||
struct mlx5_net_counters transmit_errors;
|
||||
struct mlx5_net_counters received_ib_unicast;
|
||||
struct mlx5_net_counters transmitted_ib_unicast;
|
||||
struct mlx5_net_counters received_ib_multicast;
|
||||
struct mlx5_net_counters transmitted_ib_multicast;
|
||||
struct mlx5_net_counters received_eth_broadcast;
|
||||
struct mlx5_net_counters transmitted_eth_broadcast;
|
||||
struct mlx5_net_counters received_eth_unicast;
|
||||
struct mlx5_net_counters transmitted_eth_unicast;
|
||||
struct mlx5_net_counters received_eth_multicast;
|
||||
struct mlx5_net_counters transmitted_eth_multicast;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_COMP_EQ_SIZE = 1024,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_PTYS_IB = 1 << 0,
|
||||
MLX5_PTYS_EN = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_db_pgdir {
|
||||
struct list_head list;
|
||||
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
|
||||
__be32 *db_page;
|
||||
dma_addr_t db_dma;
|
||||
};
|
||||
|
||||
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
|
||||
|
||||
struct mlx5_cmd_work_ent {
|
||||
struct mlx5_cmd_msg *in;
|
||||
struct mlx5_cmd_msg *out;
|
||||
void *uout;
|
||||
int uout_size;
|
||||
mlx5_cmd_cbk_t callback;
|
||||
void *context;
|
||||
int idx;
|
||||
struct completion done;
|
||||
struct mlx5_cmd *cmd;
|
||||
struct work_struct work;
|
||||
struct mlx5_cmd_layout *lay;
|
||||
int ret;
|
||||
int page_queue;
|
||||
u8 status;
|
||||
u8 token;
|
||||
u64 ts1;
|
||||
u64 ts2;
|
||||
u16 op;
|
||||
};
|
||||
|
||||
struct mlx5_pas {
|
||||
u64 pa;
|
||||
u8 log_sz;
|
||||
};
|
||||
|
||||
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
|
||||
{
|
||||
if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
|
||||
return buf->direct.buf + offset;
|
||||
else
|
||||
return buf->page_list[offset >> PAGE_SHIFT].buf +
|
||||
(offset & (PAGE_SIZE - 1));
|
||||
}
|
||||
|
||||
|
||||
extern struct workqueue_struct *mlx5_core_wq;
|
||||
|
||||
#define STRUCT_FIELD(header, field) \
|
||||
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
|
||||
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
|
||||
|
||||
static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
|
||||
{
|
||||
return pci_get_drvdata(pdev);
|
||||
}
|
||||
|
||||
extern struct dentry *mlx5_debugfs_root;
|
||||
|
||||
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return ioread32be(&dev->iseg->fw_rev) & 0xffff;
|
||||
}
|
||||
|
||||
static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return ioread32be(&dev->iseg->fw_rev) >> 16;
|
||||
}
|
||||
|
||||
static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
|
||||
}
|
||||
|
||||
static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
|
||||
}
|
||||
|
||||
static inline int mlx5_get_gid_table_len(u16 param)
|
||||
{
|
||||
if (param > 4) {
|
||||
printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 8 * (1 << param);
|
||||
}
|
||||
|
||||
static inline void *mlx5_vzalloc(unsigned long size)
|
||||
{
|
||||
void *rtn;
|
||||
|
||||
rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
|
||||
return rtn;
|
||||
}
|
||||
|
||||
static inline u32 mlx5_base_mkey(const u32 key)
|
||||
{
|
||||
return key & 0xffffff00u;
|
||||
}
|
||||
|
||||
int mlx5_cmd_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
|
||||
int mlx5_cmd_status_to_err_v2(void *ptr);
|
||||
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
|
||||
enum mlx5_cap_mode cap_mode);
|
||||
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
int out_size);
|
||||
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size, mlx5_cmd_cbk_t callback,
|
||||
void *context);
|
||||
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
|
||||
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
|
||||
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
||||
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
||||
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
|
||||
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
|
||||
void mlx5_health_cleanup(void);
|
||||
void __init mlx5_health_init(void);
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct,
|
||||
struct mlx5_buf *buf, int node);
|
||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
|
||||
struct mlx5_buf *buf);
|
||||
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
|
||||
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_create_srq_mbox_in *in, int inlen,
|
||||
int is_xrc);
|
||||
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
|
||||
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_query_srq_mbox_out *out);
|
||||
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
|
||||
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq);
|
||||
void mlx5_init_mr_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
struct mlx5_create_mkey_mbox_in *in, int inlen,
|
||||
mlx5_cmd_cbk_t callback, void *context,
|
||||
struct mlx5_create_mkey_mbox_out *out);
|
||||
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
|
||||
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
struct mlx5_query_mkey_mbox_out *out, int outlen);
|
||||
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
u32 *mkey);
|
||||
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
|
||||
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
|
||||
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
|
||||
u16 opmod, u8 port);
|
||||
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s32 npages);
|
||||
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
|
||||
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
|
||||
void mlx5_register_debugfs(void);
|
||||
void mlx5_unregister_debugfs(void);
|
||||
int mlx5_eq_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
|
||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
|
||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
|
||||
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
|
||||
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
|
||||
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
int nent, u64 mask, const char *name, struct mlx5_uar *uar);
|
||||
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_start_eqs(struct mlx5_core_dev *dev);
|
||||
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
|
||||
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
|
||||
int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
||||
int size_in, void *data_out, int size_out,
|
||||
u16 reg_num, int arg, int write);
|
||||
|
||||
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
|
||||
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
|
||||
int ptys_size, int proto_mask);
|
||||
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
|
||||
u32 *proto_cap, int proto_mask);
|
||||
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
|
||||
u32 *proto_admin, int proto_mask);
|
||||
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
|
||||
int proto_mask);
|
||||
int mlx5_set_port_status(struct mlx5_core_dev *dev,
|
||||
enum mlx5_port_status status);
|
||||
int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
|
||||
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
|
||||
u32 rx_pause, u32 tx_pause);
|
||||
int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
|
||||
u32 *rx_pause, u32 *tx_pause);
|
||||
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
|
||||
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu);
|
||||
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu);
|
||||
|
||||
int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
|
||||
int mlx5_query_eeprom(struct mlx5_core_dev *dev, int i2c_addr, int page_num,
|
||||
int device_addr, int size, int module_num, u32 *data,
|
||||
int *size_read);
|
||||
|
||||
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
struct mlx5_query_eq_mbox_out *out, int outlen);
|
||||
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
|
||||
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
|
||||
int node);
|
||||
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
|
||||
|
||||
const char *mlx5_command_str(int command);
|
||||
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
|
||||
int npsvs, u32 *sig_index);
|
||||
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
|
||||
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
|
||||
u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev);
|
||||
int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode);
|
||||
int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode);
|
||||
int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
|
||||
struct mlx5_pvlc_reg *pvlc, int write);
|
||||
int mlx5_core_access_ptys(struct mlx5_core_dev *dev,
|
||||
struct mlx5_ptys_reg *ptys, int write);
|
||||
int mlx5_core_access_pmtu(struct mlx5_core_dev *dev,
|
||||
struct mlx5_pmtu_reg *pmtu, int write);
|
||||
int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port);
|
||||
int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port);
|
||||
int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
|
||||
int priority, int *is_enable);
|
||||
int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
|
||||
int priority, int enable);
|
||||
int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol,
|
||||
void *out, int out_size);
|
||||
int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev,
|
||||
void *in, int in_size);
|
||||
int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
|
||||
void *out, int out_size);
|
||||
static inline u32 mlx5_mkey_to_idx(u32 mkey)
|
||||
{
|
||||
return mkey >> 8;
|
||||
}
|
||||
|
||||
static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
|
||||
{
|
||||
return mkey_idx << 8;
|
||||
}
|
||||
|
||||
static inline u8 mlx5_mkey_variant(u32 mkey)
|
||||
{
|
||||
return mkey & 0xff;
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
|
||||
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MAX_MR_CACHE_ENTRIES = 16,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_INTERFACE_PROTOCOL_IB = 0,
|
||||
MLX5_INTERFACE_PROTOCOL_ETH = 1,
|
||||
};
|
||||
|
||||
struct mlx5_interface {
|
||||
void * (*add)(struct mlx5_core_dev *dev);
|
||||
void (*remove)(struct mlx5_core_dev *dev, void *context);
|
||||
void (*event)(struct mlx5_core_dev *dev, void *context,
|
||||
enum mlx5_dev_event event, unsigned long param);
|
||||
void * (*get_dev)(void *context);
|
||||
int protocol;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
|
||||
int mlx5_register_interface(struct mlx5_interface *intf);
|
||||
void mlx5_unregister_interface(struct mlx5_interface *intf);
|
||||
|
||||
struct mlx5_profile {
|
||||
u64 mask;
|
||||
u8 log_max_qp;
|
||||
struct {
|
||||
int size;
|
||||
int limit;
|
||||
} mr_cache[MAX_MR_CACHE_ENTRIES];
|
||||
};
|
||||
|
||||
|
||||
#define MLX5_EEPROM_MAX_BYTES 48
|
||||
#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
|
||||
#define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00
|
||||
#define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000
|
||||
#endif /* MLX5_DRIVER_H */
|
50
sys/dev/mlx5/flow_table.h
Normal file
50
sys/dev/mlx5/flow_table.h
Normal file
@ -0,0 +1,50 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_FLOW_TABLE_H
|
||||
#define MLX5_FLOW_TABLE_H
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
|
||||
struct mlx5_flow_table_group {
|
||||
u8 log_sz;
|
||||
u8 match_criteria_enable;
|
||||
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
|
||||
};
|
||||
|
||||
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
|
||||
u16 vport,
|
||||
u16 num_groups,
|
||||
struct mlx5_flow_table_group *group);
|
||||
void mlx5_destroy_flow_table(void *flow_table);
|
||||
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
|
||||
void *match_criteria, void *flow_context,
|
||||
u32 *flow_index);
|
||||
void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
|
||||
u32 mlx5_get_flow_table_id(void *flow_table);
|
||||
|
||||
#endif /* MLX5_FLOW_TABLE_H */
|
256
sys/dev/mlx5/mlx5_core/mlx5_alloc.c
Normal file
256
sys/dev/mlx5/mlx5_core/mlx5_alloc.c
Normal file
@ -0,0 +1,256 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
|
||||
/* Handling for queue buffers -- we allocate a bunch of memory and
|
||||
* register it in a memory region at HCA virtual address 0. If the
|
||||
* requested size is > max_direct, we split the allocation into
|
||||
* multiple pages, so we don't require too much contiguous memory.
|
||||
*/
|
||||
|
||||
static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
|
||||
size_t size, dma_addr_t *dma_handle,
|
||||
int node)
|
||||
{
|
||||
void *cpu_handle;
|
||||
|
||||
cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
|
||||
dma_handle, GFP_KERNEL);
|
||||
return cpu_handle;
|
||||
}
|
||||
|
||||
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct,
|
||||
struct mlx5_buf *buf, int node)
|
||||
{
|
||||
dma_addr_t t;
|
||||
|
||||
buf->size = size;
|
||||
if (size <= max_direct) {
|
||||
buf->nbufs = 1;
|
||||
buf->npages = 1;
|
||||
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
|
||||
buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
|
||||
&t, node);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
buf->direct.map = t;
|
||||
|
||||
while (t & ((1 << buf->page_shift) - 1)) {
|
||||
--buf->page_shift;
|
||||
buf->npages *= 2;
|
||||
}
|
||||
} else {
|
||||
int i;
|
||||
|
||||
buf->direct.buf = NULL;
|
||||
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
buf->npages = buf->nbufs;
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
|
||||
GFP_KERNEL);
|
||||
|
||||
for (i = 0; i < buf->nbufs; i++) {
|
||||
buf->page_list[i].buf =
|
||||
mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
|
||||
&t, node);
|
||||
|
||||
buf->page_list[i].map = t;
|
||||
}
|
||||
|
||||
if (BITS_PER_LONG == 64) {
|
||||
struct page **pages;
|
||||
|
||||
pages = kmalloc(sizeof(*pages) * (buf->nbufs + 1),
|
||||
GFP_KERNEL);
|
||||
for (i = 0; i < buf->nbufs; i++)
|
||||
pages[i] = virt_to_page(buf->page_list[i].buf);
|
||||
pages[buf->nbufs] = pages[0];
|
||||
buf->direct.buf = vmap(pages, buf->nbufs + 1, VM_MAP,
|
||||
PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
mlx5_buf_free(dev, buf);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
|
||||
struct mlx5_buf *buf)
|
||||
{
|
||||
return mlx5_buf_alloc_node(dev, size, max_direct,
|
||||
buf, dev->priv.numa_node);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
|
||||
|
||||
|
||||
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
|
||||
{
|
||||
if (buf->nbufs == 1)
|
||||
dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
|
||||
buf->direct.map);
|
||||
else {
|
||||
int i;
|
||||
if (BITS_PER_LONG == 64 && buf->direct.buf)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; i++)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
buf->page_list[i].buf,
|
||||
buf->page_list[i].map);
|
||||
kfree(buf->page_list);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_buf_free);
|
||||
|
||||
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
|
||||
int node)
|
||||
{
|
||||
struct mlx5_db_pgdir *pgdir;
|
||||
|
||||
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
|
||||
|
||||
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
|
||||
|
||||
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
|
||||
&pgdir->db_dma, node);
|
||||
if (!pgdir->db_page) {
|
||||
kfree(pgdir);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pgdir;
|
||||
}
|
||||
|
||||
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
|
||||
struct mlx5_db *db)
|
||||
{
|
||||
int offset;
|
||||
int i;
|
||||
|
||||
i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
|
||||
if (i >= MLX5_DB_PER_PAGE)
|
||||
return -ENOMEM;
|
||||
|
||||
__clear_bit(i, pgdir->bitmap);
|
||||
|
||||
db->u.pgdir = pgdir;
|
||||
db->index = i;
|
||||
offset = db->index * L1_CACHE_BYTES;
|
||||
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
|
||||
db->dma = pgdir->db_dma + offset;
|
||||
|
||||
db->db[0] = 0;
|
||||
db->db[1] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
|
||||
{
|
||||
struct mlx5_db_pgdir *pgdir;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->priv.pgdir_mutex);
|
||||
|
||||
list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
|
||||
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
|
||||
goto out;
|
||||
|
||||
pgdir = mlx5_alloc_db_pgdir(dev, node);
|
||||
if (!pgdir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add(&pgdir->list, &dev->priv.pgdir_list);
|
||||
|
||||
/* This should never fail -- we just allocated an empty page: */
|
||||
WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->priv.pgdir_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
|
||||
|
||||
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
|
||||
{
|
||||
return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
|
||||
|
||||
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
|
||||
{
|
||||
mutex_lock(&dev->priv.pgdir_mutex);
|
||||
|
||||
__set_bit(db->index, db->u.pgdir->bitmap);
|
||||
|
||||
if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
|
||||
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
||||
list_del(&db->u.pgdir->list);
|
||||
kfree(db->u.pgdir);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->priv.pgdir_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_db_free);
|
||||
|
||||
|
||||
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
|
||||
{
|
||||
u64 addr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < buf->npages; i++) {
|
||||
if (buf->nbufs == 1)
|
||||
addr = buf->direct.map + ((u64)i << buf->page_shift);
|
||||
else
|
||||
addr = buf->page_list[i].map;
|
||||
|
||||
pas[i] = cpu_to_be64(addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
|
1571
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
Normal file
1571
sys/dev/mlx5/mlx5_core/mlx5_cmd.c
Normal file
File diff suppressed because it is too large
Load Diff
92
sys/dev/mlx5/mlx5_core/mlx5_core.h
Normal file
92
sys/dev/mlx5/mlx5_core/mlx5_core.h
Normal file
@ -0,0 +1,92 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef __MLX5_CORE_H__
|
||||
#define __MLX5_CORE_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define DRIVER_NAME "mlx5_core"
|
||||
#define DRIVER_VERSION "1.23.0 (03 Mar 2015)"
|
||||
#define DRIVER_RELDATE "03 Mar 2015"
|
||||
|
||||
extern int mlx5_core_debug_mask;
|
||||
|
||||
#define mlx5_core_dbg(dev, format, ...) \
|
||||
pr_debug("%s:%s:%d:(pid %d): " format, \
|
||||
(dev)->priv.name, __func__, __LINE__, curthread->td_proc->p_pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_dbg_mask(dev, mask, format, ...) \
|
||||
do { \
|
||||
if ((mask) & mlx5_core_debug_mask) \
|
||||
mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define mlx5_core_err(dev, format, ...) \
|
||||
printf("mlx5_core: ERR: ""%s:%s:%d:(pid %d): " format, \
|
||||
(dev)->priv.name, __func__, __LINE__, curthread->td_proc->p_pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define mlx5_core_warn(dev, format, ...) \
|
||||
printf("mlx5_core: WARN: ""%s:%s:%d:(pid %d): " format, \
|
||||
(dev)->priv.name, __func__, __LINE__, curthread->td_proc->p_pid, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
enum {
|
||||
MLX5_CMD_DATA, /* print command payload only */
|
||||
MLX5_CMD_TIME, /* print command execution time */
|
||||
};
|
||||
|
||||
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
|
||||
int mlx5_query_board_id(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5e_init(void);
|
||||
void mlx5e_cleanup(void);
|
||||
|
||||
static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
|
||||
int in_size, u32 *out,
|
||||
int out_size)
|
||||
{
|
||||
int err;
|
||||
err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
|
||||
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name);
|
||||
|
||||
#endif /* __MLX5_CORE_H__ */
|
282
sys/dev/mlx5/mlx5_core/mlx5_cq.c
Normal file
282
sys/dev/mlx5/mlx5_core/mlx5_cq.c
Normal file
@ -0,0 +1,282 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <dev/mlx5/cq.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
|
||||
{
|
||||
struct mlx5_core_cq *cq;
|
||||
struct mlx5_cq_table *table = &dev->priv.cq_table;
|
||||
|
||||
if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
|
||||
struct mlx5_cq_linear_array_entry *entry;
|
||||
|
||||
entry = &table->linear_array[cqn];
|
||||
spin_lock(&entry->lock);
|
||||
cq = entry->cq;
|
||||
if (cq == NULL) {
|
||||
mlx5_core_warn(dev,
|
||||
"Completion event for bogus CQ 0x%x\n", cqn);
|
||||
} else {
|
||||
++cq->arm_sn;
|
||||
cq->comp(cq);
|
||||
}
|
||||
spin_unlock(&entry->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&table->lock);
|
||||
cq = radix_tree_lookup(&table->tree, cqn);
|
||||
if (likely(cq))
|
||||
atomic_inc(&cq->refcount);
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!cq) {
|
||||
mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
++cq->arm_sn;
|
||||
|
||||
cq->comp(cq);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
}
|
||||
|
||||
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
|
||||
{
|
||||
struct mlx5_cq_table *table = &dev->priv.cq_table;
|
||||
struct mlx5_core_cq *cq;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
|
||||
cq = radix_tree_lookup(&table->tree, cqn);
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!cq) {
|
||||
mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
|
||||
return;
|
||||
}
|
||||
|
||||
cq->event(cq, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
}
|
||||
|
||||
|
||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
struct mlx5_create_cq_mbox_in *in, int inlen)
|
||||
{
|
||||
int err;
|
||||
struct mlx5_cq_table *table = &dev->priv.cq_table;
|
||||
struct mlx5_create_cq_mbox_out out;
|
||||
struct mlx5_destroy_cq_mbox_in din;
|
||||
struct mlx5_destroy_cq_mbox_out dout;
|
||||
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
|
||||
memset(&out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out.hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
|
||||
cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
|
||||
cq->cons_index = 0;
|
||||
cq->arm_sn = 0;
|
||||
atomic_set(&cq->refcount, 1);
|
||||
init_completion(&cq->free);
|
||||
|
||||
spin_lock_irq(&table->lock);
|
||||
err = radix_tree_insert(&table->tree, cq->cqn, cq);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (err)
|
||||
goto err_cmd;
|
||||
|
||||
if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
|
||||
struct mlx5_cq_linear_array_entry *entry;
|
||||
|
||||
entry = &table->linear_array[cq->cqn];
|
||||
spin_lock_irq(&entry->lock);
|
||||
entry->cq = cq;
|
||||
spin_unlock_irq(&entry->lock);
|
||||
}
|
||||
|
||||
cq->pid = curthread->td_proc->p_pid;
|
||||
|
||||
return 0;
|
||||
|
||||
err_cmd:
|
||||
memset(&din, 0, sizeof(din));
|
||||
memset(&dout, 0, sizeof(dout));
|
||||
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
|
||||
din.cqn = cpu_to_be32(cq->cqn);
|
||||
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_cq);
|
||||
|
||||
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
||||
{
|
||||
struct mlx5_cq_table *table = &dev->priv.cq_table;
|
||||
struct mlx5_destroy_cq_mbox_in in;
|
||||
struct mlx5_destroy_cq_mbox_out out;
|
||||
struct mlx5_core_cq *tmp;
|
||||
int err;
|
||||
|
||||
if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
|
||||
struct mlx5_cq_linear_array_entry *entry;
|
||||
|
||||
entry = &table->linear_array[cq->cqn];
|
||||
spin_lock_irq(&entry->lock);
|
||||
entry->cq = NULL;
|
||||
spin_unlock_irq(&entry->lock);
|
||||
}
|
||||
|
||||
spin_lock_irq(&table->lock);
|
||||
tmp = radix_tree_delete(&table->tree, cq->cqn);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (!tmp) {
|
||||
mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tmp != cq) {
|
||||
mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
|
||||
in.cqn = cpu_to_be32(cq->cqn);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out.hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
|
||||
synchronize_irq(cq->irqn);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_cq);
|
||||
|
||||
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
struct mlx5_query_cq_mbox_out *out)
|
||||
{
|
||||
struct mlx5_query_cq_mbox_in in;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(*out));
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
|
||||
in.cqn = cpu_to_be32(cq->cqn);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out->hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out->hdr);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_query_cq);
|
||||
|
||||
|
||||
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||
struct mlx5_modify_cq_mbox_in *in, int in_sz)
|
||||
{
|
||||
struct mlx5_modify_cq_mbox_out out;
|
||||
int err;
|
||||
|
||||
memset(&out, 0, sizeof(out));
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
|
||||
err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out.hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_modify_cq);
|
||||
|
||||
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_cq *cq,
|
||||
u16 cq_period,
|
||||
u16 cq_max_count)
|
||||
{
|
||||
struct mlx5_modify_cq_mbox_in in;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
|
||||
in.cqn = cpu_to_be32(cq->cqn);
|
||||
in.ctx.cq_period = cpu_to_be16(cq_period);
|
||||
in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
|
||||
in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
|
||||
MLX5_CQ_MODIFY_COUNT);
|
||||
|
||||
return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
|
||||
}
|
||||
|
||||
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_cq_table *table = &dev->priv.cq_table;
|
||||
int err;
|
||||
int x;
|
||||
|
||||
spin_lock_init(&table->lock);
|
||||
for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++)
|
||||
spin_lock_init(&table->linear_array[x].lock);
|
||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
||||
err = 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
}
|
592
sys/dev/mlx5/mlx5_core/mlx5_eq.c
Normal file
592
sys/dev/mlx5/mlx5_core/mlx5_eq.c
Normal file
@ -0,0 +1,592 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/mlx5_ifc.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
enum {
|
||||
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
|
||||
MLX5_EQE_OWNER_INIT_VAL = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_NUM_SPARE_EQE = 0x80,
|
||||
MLX5_NUM_ASYNC_EQE = 0x100,
|
||||
MLX5_NUM_CMD_EQE = 32,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_EQ_DOORBEL_OFFSET = 0x40,
|
||||
};
|
||||
|
||||
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
|
||||
(1ull << MLX5_EVENT_TYPE_COMM_EST) | \
|
||||
(1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
|
||||
(1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
|
||||
(1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
|
||||
(1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
|
||||
(1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
|
||||
(1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
|
||||
(1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
|
||||
(1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE) | \
|
||||
(1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
|
||||
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
|
||||
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
|
||||
|
||||
struct map_eq_in {
|
||||
u64 mask;
|
||||
u32 reserved;
|
||||
u32 unmap_eqn;
|
||||
};
|
||||
|
||||
struct cre_des_eq {
|
||||
u8 reserved[15];
|
||||
u8 eqn;
|
||||
};
|
||||
|
||||
/*Function prototype*/
|
||||
static void mlx5_port_module_event(struct mlx5_core_dev *dev,
|
||||
struct mlx5_eqe *eqe);
|
||||
|
||||
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_eq_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
|
||||
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
|
||||
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
|
||||
{
|
||||
return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
|
||||
}
|
||||
|
||||
static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
|
||||
{
|
||||
struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
|
||||
|
||||
return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
|
||||
}
|
||||
|
||||
static const char *eqe_type_str(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
case MLX5_EVENT_TYPE_COMP:
|
||||
return "MLX5_EVENT_TYPE_COMP";
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
return "MLX5_EVENT_TYPE_PATH_MIG";
|
||||
case MLX5_EVENT_TYPE_COMM_EST:
|
||||
return "MLX5_EVENT_TYPE_COMM_EST";
|
||||
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
||||
return "MLX5_EVENT_TYPE_SQ_DRAINED";
|
||||
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
||||
return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
|
||||
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
|
||||
return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
|
||||
case MLX5_EVENT_TYPE_CQ_ERROR:
|
||||
return "MLX5_EVENT_TYPE_CQ_ERROR";
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
|
||||
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
|
||||
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
|
||||
case MLX5_EVENT_TYPE_INTERNAL_ERROR:
|
||||
return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
|
||||
case MLX5_EVENT_TYPE_PORT_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_PORT_CHANGE";
|
||||
case MLX5_EVENT_TYPE_GPIO_EVENT:
|
||||
return "MLX5_EVENT_TYPE_GPIO_EVENT";
|
||||
case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
|
||||
return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
|
||||
case MLX5_EVENT_TYPE_REMOTE_CONFIG:
|
||||
return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
|
||||
case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
|
||||
return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
|
||||
case MLX5_EVENT_TYPE_STALL_EVENT:
|
||||
return "MLX5_EVENT_TYPE_STALL_EVENT";
|
||||
case MLX5_EVENT_TYPE_CMD:
|
||||
return "MLX5_EVENT_TYPE_CMD";
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
|
||||
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
|
||||
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
|
||||
default:
|
||||
return "Unrecognized event";
|
||||
}
|
||||
}
|
||||
|
||||
static enum mlx5_dev_event port_subtype_event(u8 subtype)
|
||||
{
|
||||
switch (subtype) {
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
|
||||
return MLX5_DEV_EVENT_PORT_DOWN;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
|
||||
return MLX5_DEV_EVENT_PORT_UP;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
|
||||
return MLX5_DEV_EVENT_PORT_INITIALIZED;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_LID:
|
||||
return MLX5_DEV_EVENT_LID_CHANGE;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
|
||||
return MLX5_DEV_EVENT_PKEY_CHANGE;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
|
||||
return MLX5_DEV_EVENT_GUID_CHANGE;
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
|
||||
return MLX5_DEV_EVENT_CLIENT_REREG;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void eq_update_ci(struct mlx5_eq *eq, int arm)
|
||||
{
|
||||
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
|
||||
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
|
||||
__raw_writel((__force u32) cpu_to_be32(val), addr);
|
||||
/* We still want ordering, just not swabbing, so add a barrier */
|
||||
mb();
|
||||
}
|
||||
|
||||
static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||
{
|
||||
struct mlx5_eqe *eqe;
|
||||
int eqes_found = 0;
|
||||
int set_ci = 0;
|
||||
u32 cqn;
|
||||
u32 rsn;
|
||||
u8 port;
|
||||
|
||||
while ((eqe = next_eqe_sw(eq))) {
|
||||
/*
|
||||
* Make sure we read EQ entry contents after we've
|
||||
* checked the ownership bit.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
|
||||
eq->eqn, eqe_type_str(eqe->type));
|
||||
switch (eqe->type) {
|
||||
case MLX5_EVENT_TYPE_COMP:
|
||||
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
|
||||
mlx5_cq_completion(dev, cqn);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
case MLX5_EVENT_TYPE_COMM_EST:
|
||||
case MLX5_EVENT_TYPE_SQ_DRAINED:
|
||||
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
|
||||
eqe_type_str(eqe->type), eqe->type, rsn);
|
||||
mlx5_rsc_event(dev, rsn, eqe->type);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
|
||||
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
|
||||
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
|
||||
eqe_type_str(eqe->type), eqe->type, rsn);
|
||||
mlx5_srq_event(dev, rsn, eqe->type);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_CMD:
|
||||
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PORT_CHANGE:
|
||||
port = (eqe->data.port.port >> 4) & 0xf;
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_LID:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
|
||||
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
|
||||
if (dev->event)
|
||||
dev->event(dev, port_subtype_event(eqe->sub_type),
|
||||
(unsigned long)port);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
|
||||
port, eqe->sub_type);
|
||||
}
|
||||
break;
|
||||
case MLX5_EVENT_TYPE_CQ_ERROR:
|
||||
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
|
||||
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
|
||||
cqn, eqe->data.cq_err.syndrome);
|
||||
mlx5_cq_event(dev, cqn, eqe->type);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
{
|
||||
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
|
||||
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
|
||||
|
||||
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
|
||||
func_id, npages);
|
||||
mlx5_core_req_pages_handler(dev, func_id, npages);
|
||||
}
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
|
||||
mlx5_port_module_event(dev, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
|
||||
{
|
||||
struct mlx5_eqe_vport_change *vc_eqe =
|
||||
&eqe->data.vport_change;
|
||||
u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
|
||||
|
||||
if (dev->event)
|
||||
dev->event(dev,
|
||||
MLX5_DEV_EVENT_VPORT_CHANGE,
|
||||
(unsigned long)vport_num);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
|
||||
eqe->type, eq->eqn);
|
||||
break;
|
||||
}
|
||||
|
||||
++eq->cons_index;
|
||||
eqes_found = 1;
|
||||
++set_ci;
|
||||
|
||||
/* The HCA will think the queue has overflowed if we
|
||||
* don't tell it we've been processing events. We
|
||||
* create our EQs with MLX5_NUM_SPARE_EQE extra
|
||||
* entries, so we must update our consumer index at
|
||||
* least that often.
|
||||
*/
|
||||
if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
|
||||
eq_update_ci(eq, 0);
|
||||
set_ci = 0;
|
||||
}
|
||||
}
|
||||
|
||||
eq_update_ci(eq, 1);
|
||||
|
||||
return eqes_found;
|
||||
}
|
||||
|
||||
static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
|
||||
{
|
||||
struct mlx5_eq *eq = eq_ptr;
|
||||
struct mlx5_core_dev *dev = eq->dev;
|
||||
|
||||
mlx5_eq_int(dev, eq);
|
||||
|
||||
/* MSI-X vectors always belong to us */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void init_eq_buf(struct mlx5_eq *eq)
|
||||
{
|
||||
struct mlx5_eqe *eqe;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < eq->nent; i++) {
|
||||
eqe = get_eqe(eq, i);
|
||||
eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
|
||||
{
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
struct mlx5_create_eq_mbox_in *in;
|
||||
struct mlx5_create_eq_mbox_out out;
|
||||
int err;
|
||||
int inlen;
|
||||
|
||||
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
|
||||
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
|
||||
&eq->buf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
init_eq_buf(eq);
|
||||
|
||||
inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto err_buf;
|
||||
}
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
mlx5_fill_page_array(&eq->buf, in->pas);
|
||||
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
|
||||
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
|
||||
in->ctx.intr = vecidx;
|
||||
in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
in->events_mask = cpu_to_be64(mask);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
|
||||
if (err)
|
||||
goto err_in;
|
||||
|
||||
if (out.hdr.status) {
|
||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
||||
goto err_in;
|
||||
}
|
||||
|
||||
eq->eqn = out.eq_number;
|
||||
eq->irqn = vecidx;
|
||||
eq->dev = dev;
|
||||
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
|
||||
snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
|
||||
name, pci_name(dev->pdev));
|
||||
err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
|
||||
priv->irq_info[vecidx].name, eq);
|
||||
if (err)
|
||||
goto err_eq;
|
||||
|
||||
|
||||
/* EQs are created in ARMED state
|
||||
*/
|
||||
eq_update_ci(eq, 1);
|
||||
|
||||
kvfree(in);
|
||||
return 0;
|
||||
|
||||
|
||||
err_eq:
|
||||
mlx5_cmd_destroy_eq(dev, eq->eqn);
|
||||
|
||||
err_in:
|
||||
kvfree(in);
|
||||
|
||||
err_buf:
|
||||
mlx5_buf_free(dev, &eq->buf);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
|
||||
|
||||
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||
{
|
||||
int err;
|
||||
|
||||
free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
|
||||
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
|
||||
if (err)
|
||||
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
|
||||
eq->eqn);
|
||||
mlx5_buf_free(dev, &eq->buf);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
|
||||
|
||||
int mlx5_eq_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
spin_lock_init(&dev->priv.eq_table.lock);
|
||||
|
||||
err = 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
}
|
||||
|
||||
int mlx5_start_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_GEN(dev, port_module_event))
|
||||
async_event_mask |= (1ull <<
|
||||
MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
|
||||
|
||||
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
|
||||
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
|
||||
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
mlx5_cmd_use_events(dev);
|
||||
|
||||
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
|
||||
MLX5_NUM_ASYNC_EQE, async_event_mask,
|
||||
"mlx5_async_eq", &dev->priv.uuari.uars[0]);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
err = mlx5_create_map_eq(dev, &table->pages_eq,
|
||||
MLX5_EQ_VEC_PAGES,
|
||||
/* TODO: sriov max_vf + */ 1,
|
||||
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
|
||||
&dev->priv.uuari.uars[0]);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
|
||||
goto err2;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
err2:
|
||||
mlx5_destroy_unmap_eq(dev, &table->async_eq);
|
||||
|
||||
err1:
|
||||
mlx5_cmd_use_polling(dev);
|
||||
mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_stop_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
int err;
|
||||
|
||||
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_destroy_unmap_eq(dev, &table->async_eq);
|
||||
mlx5_cmd_use_polling(dev);
|
||||
|
||||
err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
|
||||
if (err)
|
||||
mlx5_cmd_use_events(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
struct mlx5_query_eq_mbox_out *out, int outlen)
|
||||
{
|
||||
struct mlx5_query_eq_mbox_in in;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(out, 0, outlen);
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
|
||||
in.eqn = eq->eqn;
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out->hdr.status)
|
||||
err = mlx5_cmd_status_to_err(&out->hdr);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
|
||||
|
||||
static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
|
||||
{
|
||||
switch (error_type) {
|
||||
case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
|
||||
return "Power Budget Exceeded";
|
||||
case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE:
|
||||
return "Long Range for non MLNX cable/module";
|
||||
case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
|
||||
return "Bus stuck(I2C or data shorted)";
|
||||
case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
|
||||
return "No EEPROM/retry timeout";
|
||||
case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
|
||||
return "Enforce part number list";
|
||||
case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER:
|
||||
return "Unknown identifier";
|
||||
case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
|
||||
return "High Temperature";
|
||||
|
||||
default:
|
||||
return "Unknown error type";
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_port_module_event(struct mlx5_core_dev *dev,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
unsigned int module_num;
|
||||
unsigned int module_status;
|
||||
unsigned int error_type;
|
||||
struct mlx5_eqe_port_module_event *module_event_eqe;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
|
||||
module_event_eqe = &eqe->data.port_module_event;
|
||||
|
||||
module_num = (unsigned int)module_event_eqe->module;
|
||||
module_status = (unsigned int)module_event_eqe->module_status &
|
||||
PORT_MODULE_EVENT_MODULE_STATUS_MASK;
|
||||
error_type = (unsigned int)module_event_eqe->error_type &
|
||||
PORT_MODULE_EVENT_ERROR_TYPE_MASK;
|
||||
|
||||
switch (module_status) {
|
||||
case MLX5_MODULE_STATUS_PLUGGED:
|
||||
device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged", module_num);
|
||||
break;
|
||||
|
||||
case MLX5_MODULE_STATUS_UNPLUGGED:
|
||||
device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged", module_num);
|
||||
break;
|
||||
|
||||
case MLX5_MODULE_STATUS_ERROR:
|
||||
device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s", module_num, mlx5_port_module_event_error_type_to_string(error_type));
|
||||
break;
|
||||
|
||||
default:
|
||||
device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status", module_num);
|
||||
}
|
||||
}
|
||||
|
432
sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
Normal file
432
sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
Normal file
@ -0,0 +1,432 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/flow_table.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
struct mlx5_ftg {
|
||||
struct mlx5_flow_table_group g;
|
||||
u32 id;
|
||||
u32 start_ix;
|
||||
};
|
||||
|
||||
struct mlx5_flow_table {
|
||||
struct mlx5_core_dev *dev;
|
||||
u8 level;
|
||||
u8 type;
|
||||
u32 id;
|
||||
u16 vport;
|
||||
struct mutex mutex; /* sync bitmap alloc */
|
||||
u16 num_groups;
|
||||
struct mlx5_ftg *group;
|
||||
unsigned long *bitmap;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
|
||||
u32 flow_index, void *flow_context)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
|
||||
u32 *in;
|
||||
void *in_flow_context;
|
||||
int fcdls =
|
||||
MLX5_GET(flow_context, flow_context, destination_list_size) *
|
||||
MLX5_ST_SZ_BYTES(dest_format_struct);
|
||||
int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(set_fte_in, in, other_vport, !!ft->vport);
|
||||
MLX5_SET(set_fte_in, in, table_type, ft->type);
|
||||
MLX5_SET(set_fte_in, in, table_id, ft->id);
|
||||
MLX5_SET(set_fte_in, in, flow_index, flow_index);
|
||||
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
|
||||
|
||||
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
|
||||
memcpy(in_flow_context, flow_context,
|
||||
MLX5_ST_SZ_BYTES(flow_context) + fcdls);
|
||||
|
||||
MLX5_SET(flow_context, in_flow_context, group_id, ft->group[group_ix].id);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
|
||||
sizeof(out));
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
|
||||
MLX5_SET_DFTEI(in, vport_number, ft->vport);
|
||||
MLX5_SET_DFTEI(in, other_vport, !!ft->vport);
|
||||
MLX5_SET_DFTEI(in, table_type, ft->type);
|
||||
MLX5_SET_DFTEI(in, table_id, ft->id);
|
||||
MLX5_SET_DFTEI(in, flow_index, flow_index);
|
||||
MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
|
||||
|
||||
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
|
||||
MLX5_SET_DFGI(in, vport_number, ft->vport);
|
||||
MLX5_SET_DFGI(in, other_vport, !!ft->vport);
|
||||
MLX5_SET_DFGI(in, table_type, ft->type);
|
||||
MLX5_SET_DFGI(in, table_id, ft->id);
|
||||
MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
|
||||
MLX5_SET_DFGI(in, group_id, ft->group[i].id);
|
||||
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
|
||||
u32 *in;
|
||||
void *in_match_criteria;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_table_group *g = &ft->group[i].g;
|
||||
u32 start_ix = ft->group[i].start_ix;
|
||||
u32 end_ix = start_ix + (1 << g->log_sz) - 1;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
|
||||
match_criteria);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
|
||||
MLX5_SET_CFGI(in, vport_number, ft->vport);
|
||||
MLX5_SET_CFGI(in, other_vport, !!ft->vport);
|
||||
MLX5_SET_CFGI(in, table_type, ft->type);
|
||||
MLX5_SET_CFGI(in, table_id, ft->id);
|
||||
MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
|
||||
MLX5_SET_CFGI(in, start_flow_index, start_ix);
|
||||
MLX5_SET_CFGI(in, end_flow_index, end_ix);
|
||||
MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
|
||||
|
||||
memcpy(in_match_criteria, g->match_criteria,
|
||||
MLX5_ST_SZ_BYTES(fte_match_param));
|
||||
|
||||
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
|
||||
sizeof(out));
|
||||
if (!err)
|
||||
ft->group[i].id = MLX5_GET(create_flow_group_out, out,
|
||||
group_id);
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ft->num_groups; i++)
|
||||
mlx5_destroy_flow_group_cmd(ft, i);
|
||||
}
|
||||
|
||||
static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ft->num_groups; i++) {
|
||||
err = mlx5_create_flow_group_cmd(ft, i);
|
||||
if (err)
|
||||
goto err_destroy_flow_table_groups;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_flow_table_groups:
|
||||
for (i--; i >= 0; i--)
|
||||
mlx5_destroy_flow_group_cmd(ft, i);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
|
||||
MLX5_SET(create_flow_table_in, in, other_vport, !!ft->vport);
|
||||
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
|
||||
MLX5_SET(create_flow_table_in, in, level, ft->level);
|
||||
MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
|
||||
|
||||
MLX5_SET(create_flow_table_in, in, opcode,
|
||||
MLX5_CMD_OP_CREATE_FLOW_TABLE);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
|
||||
sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ft->id = MLX5_GET(create_flow_table_out, out, table_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
|
||||
MLX5_SET_DFTI(in, vport_number, ft->vport);
|
||||
MLX5_SET_DFTI(in, other_vport, !!ft->vport);
|
||||
MLX5_SET_DFTI(in, table_type, ft->type);
|
||||
MLX5_SET_DFTI(in, table_id, ft->id);
|
||||
MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
|
||||
|
||||
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
|
||||
u32 *match_criteria, int *group_ix)
|
||||
{
|
||||
void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
|
||||
outer_headers);
|
||||
void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
|
||||
misc_parameters);
|
||||
void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
|
||||
inner_headers);
|
||||
int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
|
||||
int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
|
||||
int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ft->num_groups; i++) {
|
||||
struct mlx5_flow_table_group *g = &ft->group[i].g;
|
||||
void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
|
||||
g->match_criteria,
|
||||
outer_headers);
|
||||
void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
|
||||
g->match_criteria,
|
||||
misc_parameters);
|
||||
void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
|
||||
g->match_criteria,
|
||||
inner_headers);
|
||||
|
||||
if (g->match_criteria_enable != match_criteria_enable)
|
||||
continue;
|
||||
|
||||
if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
|
||||
if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
|
||||
continue;
|
||||
|
||||
if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
|
||||
if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
|
||||
continue;
|
||||
|
||||
if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
|
||||
if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
|
||||
continue;
|
||||
|
||||
*group_ix = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
|
||||
{
|
||||
struct mlx5_ftg *g = &ft->group[group_ix];
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&ft->mutex);
|
||||
|
||||
*ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
|
||||
if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
|
||||
err = -ENOSPC;
|
||||
else
|
||||
__set_bit(*ix, ft->bitmap);
|
||||
|
||||
mutex_unlock(&ft->mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
|
||||
{
|
||||
__clear_bit(ix, ft->bitmap);
|
||||
}
|
||||
|
||||
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
|
||||
void *match_criteria, void *flow_context,
|
||||
u32 *flow_index)
|
||||
{
|
||||
struct mlx5_flow_table *ft = flow_table;
|
||||
int group_ix;
|
||||
int err;
|
||||
|
||||
err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
|
||||
&group_ix);
|
||||
if (err) {
|
||||
mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = alloc_flow_index(ft, group_ix, flow_index);
|
||||
if (err) {
|
||||
mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
|
||||
if (err)
|
||||
mlx5_free_flow_index(ft, *flow_index);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_add_flow_table_entry);
|
||||
|
||||
void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
|
||||
{
|
||||
struct mlx5_flow_table *ft = flow_table;
|
||||
|
||||
mlx5_del_flow_entry_cmd(ft, flow_index);
|
||||
mlx5_free_flow_index(ft, flow_index);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_del_flow_table_entry);
|
||||
|
||||
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
|
||||
u16 vport,
|
||||
u16 num_groups,
|
||||
struct mlx5_flow_table_group *group)
|
||||
{
|
||||
struct mlx5_flow_table *ft;
|
||||
u32 start_ix = 0;
|
||||
u32 ft_size = 0;
|
||||
void *gr;
|
||||
void *bm;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_groups; i++)
|
||||
ft_size += (1 << group[i].log_sz);
|
||||
|
||||
ft = kzalloc(sizeof(*ft), GFP_KERNEL);
|
||||
gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
|
||||
bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
|
||||
|
||||
ft->group = gr;
|
||||
ft->bitmap = bm;
|
||||
ft->num_groups = num_groups;
|
||||
ft->level = level;
|
||||
ft->vport = vport;
|
||||
ft->type = table_type;
|
||||
ft->size = ft_size;
|
||||
ft->dev = dev;
|
||||
mutex_init(&ft->mutex);
|
||||
|
||||
for (i = 0; i < ft->num_groups; i++) {
|
||||
memcpy(&ft->group[i].g, &group[i], sizeof(*group));
|
||||
ft->group[i].start_ix = start_ix;
|
||||
start_ix += 1 << group[i].log_sz;
|
||||
}
|
||||
|
||||
err = mlx5_create_flow_table_cmd(ft);
|
||||
if (err)
|
||||
goto err_free_ft;
|
||||
|
||||
err = mlx5_create_flow_table_groups(ft);
|
||||
if (err)
|
||||
goto err_destroy_flow_table_cmd;
|
||||
|
||||
return ft;
|
||||
|
||||
err_destroy_flow_table_cmd:
|
||||
mlx5_destroy_flow_table_cmd(ft);
|
||||
|
||||
err_free_ft:
|
||||
mlx5_core_warn(dev, "failed to alloc flow table\n");
|
||||
kfree(bm);
|
||||
kfree(gr);
|
||||
kfree(ft);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_create_flow_table);
|
||||
|
||||
void mlx5_destroy_flow_table(void *flow_table)
|
||||
{
|
||||
struct mlx5_flow_table *ft = flow_table;
|
||||
|
||||
mlx5_destroy_flow_table_groups(ft);
|
||||
mlx5_destroy_flow_table_cmd(ft);
|
||||
kfree(ft->bitmap);
|
||||
kfree(ft->group);
|
||||
kfree(ft);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_destroy_flow_table);
|
||||
|
||||
u32 mlx5_get_flow_table_id(void *flow_table)
|
||||
{
|
||||
struct mlx5_flow_table *ft = flow_table;
|
||||
|
||||
return ft->id;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_get_flow_table_id);
|
237
sys/dev/mlx5/mlx5_core/mlx5_fw.c
Normal file
237
sys/dev/mlx5/mlx5_core/mlx5_fw.c
Normal file
@ -0,0 +1,237 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <linux/module.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
|
||||
int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_board_id(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
|
||||
int err;
|
||||
|
||||
out = kzalloc(outlen, GFP_KERNEL);
|
||||
|
||||
err = mlx5_cmd_query_adapter(dev, out, outlen);
|
||||
if (err)
|
||||
goto out_out;
|
||||
|
||||
memcpy(dev->board_id,
|
||||
MLX5_ADDR_OF(query_adapter_out, out,
|
||||
query_adapter_struct.vsd_contd_psid),
|
||||
MLX5_FLD_SZ_BYTES(query_adapter_out,
|
||||
query_adapter_struct.vsd_contd_psid));
|
||||
|
||||
out_out:
|
||||
kfree(out);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
|
||||
int err;
|
||||
|
||||
out = kzalloc(outlen, GFP_KERNEL);
|
||||
|
||||
err = mlx5_cmd_query_adapter(mdev, out, outlen);
|
||||
if (err)
|
||||
goto out_out;
|
||||
|
||||
*vendor_id = MLX5_GET(query_adapter_out, out,
|
||||
query_adapter_struct.ieee_vendor_id);
|
||||
|
||||
out_out:
|
||||
kfree(out);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_query_vendor_id);
|
||||
|
||||
static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(query_special_contexts_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
|
||||
sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
|
||||
out, resd_lkey);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, pg)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, atomic)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, roce)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, nic_flow_table)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (
|
||||
MLX5_CAP_GEN(dev, eswitch_flow_table)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, vport_group_manager)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
|
||||
HCA_CAP_OPMOD_GET_CUR);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
|
||||
HCA_CAP_OPMOD_GET_MAX);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_core_query_special_contexts(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(init_hca_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(init_hca_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
183
sys/dev/mlx5/mlx5_core/mlx5_health.c
Normal file
183
sys/dev/mlx5/mlx5_core/mlx5_health.c
Normal file
@ -0,0 +1,183 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/mlx5_ifc.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
#define MLX5_HEALTH_POLL_INTERVAL (2 * HZ)
|
||||
#define MAX_MISSES 3
|
||||
|
||||
static DEFINE_SPINLOCK(health_lock);
|
||||
static LIST_HEAD(health_list);
|
||||
static struct work_struct health_work;
|
||||
|
||||
static void health_care(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_core_health *health, *n;
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_priv *priv;
|
||||
LIST_HEAD(tlist);
|
||||
|
||||
spin_lock_irq(&health_lock);
|
||||
list_splice_init(&health_list, &tlist);
|
||||
|
||||
spin_unlock_irq(&health_lock);
|
||||
|
||||
list_for_each_entry_safe(health, n, &tlist, list) {
|
||||
priv = container_of(health, struct mlx5_priv, health);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
mlx5_core_warn(dev, "handling bad device here\n");
|
||||
/* nothing yet */
|
||||
spin_lock_irq(&health_lock);
|
||||
list_del_init(&health->list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static const char *hsynd_str(u8 synd)
|
||||
{
|
||||
switch (synd) {
|
||||
case MLX5_HEALTH_SYNDR_FW_ERR:
|
||||
return "firmware internal error";
|
||||
case MLX5_HEALTH_SYNDR_IRISC_ERR:
|
||||
return "irisc not responding";
|
||||
case MLX5_HEALTH_SYNDR_CRC_ERR:
|
||||
return "firmware CRC error";
|
||||
case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
|
||||
return "ICM fetch PCI error";
|
||||
case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
|
||||
return "HW fatal error\n";
|
||||
case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
|
||||
return "async EQ buffer overrun";
|
||||
case MLX5_HEALTH_SYNDR_EQ_ERR:
|
||||
return "EQ error";
|
||||
case MLX5_HEALTH_SYNDR_FFSER_ERR:
|
||||
return "FFSER error";
|
||||
default:
|
||||
return "unrecognized error";
|
||||
}
|
||||
}
|
||||
|
||||
static u16 read_be16(__be16 __iomem *p)
|
||||
{
|
||||
return swab16(readl((__force u16 __iomem *) p));
|
||||
}
|
||||
|
||||
static u32 read_be32(__be32 __iomem *p)
|
||||
{
|
||||
return swab32(readl((__force u32 __iomem *) p));
|
||||
}
|
||||
|
||||
static void print_health_info(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
struct mlx5_health_buffer __iomem *h = health->health;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
|
||||
printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
|
||||
|
||||
printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
|
||||
printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", read_be32(&h->assert_callra));
|
||||
printf("mlx5_core: INFO: ""fw_ver 0x%08x\n", read_be32(&h->fw_ver));
|
||||
printf("mlx5_core: INFO: ""hw_id 0x%08x\n", read_be32(&h->hw_id));
|
||||
printf("mlx5_core: INFO: ""irisc_index %d\n", readb(&h->irisc_index));
|
||||
printf("mlx5_core: INFO: ""synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
|
||||
printf("mlx5_core: INFO: ""ext_sync 0x%04x\n", read_be16(&h->ext_sync));
|
||||
}
|
||||
|
||||
static void poll_health(uintptr_t data)
|
||||
{
|
||||
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
int next;
|
||||
u32 count;
|
||||
|
||||
count = ioread32be(health->health_counter);
|
||||
if (count == health->prev)
|
||||
++health->miss_counter;
|
||||
else
|
||||
health->miss_counter = 0;
|
||||
|
||||
health->prev = count;
|
||||
if (health->miss_counter == MAX_MISSES) {
|
||||
mlx5_core_err(dev, "device's health compromised\n");
|
||||
print_health_info(dev);
|
||||
spin_lock_irq(&health_lock);
|
||||
list_add_tail(&health->list, &health_list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
|
||||
if (!queue_work(mlx5_core_wq, &health_work))
|
||||
mlx5_core_warn(dev, "failed to queue health work\n");
|
||||
} else {
|
||||
get_random_bytes(&next, sizeof(next));
|
||||
next %= HZ;
|
||||
next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
|
||||
mod_timer(&health->timer, next);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
||||
INIT_LIST_HEAD(&health->list);
|
||||
init_timer(&health->timer);
|
||||
health->health = &dev->iseg->health;
|
||||
health->health_counter = &dev->iseg->health_counter;
|
||||
|
||||
setup_timer(&health->timer, poll_health, (uintptr_t)dev);
|
||||
mod_timer(&health->timer,
|
||||
round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
|
||||
}
|
||||
|
||||
void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
|
||||
del_timer_sync(&health->timer);
|
||||
|
||||
spin_lock_irq(&health_lock);
|
||||
if (!list_empty(&health->list))
|
||||
list_del_init(&health->list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
}
|
||||
|
||||
void mlx5_health_cleanup(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init mlx5_health_init(void)
|
||||
{
|
||||
|
||||
INIT_WORK(&health_work, health_care);
|
||||
}
|
66
sys/dev/mlx5/mlx5_core/mlx5_mad.c
Normal file
66
sys/dev/mlx5/mlx5_core/mlx5_mad.c
Normal file
@ -0,0 +1,66 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
|
||||
u16 opmod, u8 port)
|
||||
{
|
||||
struct mlx5_mad_ifc_mbox_in *in = NULL;
|
||||
struct mlx5_mad_ifc_mbox_out *out = NULL;
|
||||
int err;
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
|
||||
out = kzalloc(sizeof(*out), GFP_KERNEL);
|
||||
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
|
||||
in->hdr.opmod = cpu_to_be16(opmod);
|
||||
in->port = port;
|
||||
|
||||
memcpy(in->data, inb, sizeof(in->data));
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (out->hdr.status) {
|
||||
err = mlx5_cmd_status_to_err(&out->hdr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(outb, out->data, sizeof(out->data));
|
||||
|
||||
out:
|
||||
kfree(out);
|
||||
kfree(in);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
|
1126
sys/dev/mlx5/mlx5_core/mlx5_main.c
Normal file
1126
sys/dev/mlx5/mlx5_core/mlx5_main.c
Normal file
File diff suppressed because it is too large
Load Diff
68
sys/dev/mlx5/mlx5_core/mlx5_mcg.c
Normal file
68
sys/dev/mlx5/mlx5_core/mlx5_mcg.c
Normal file
@ -0,0 +1,68 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
|
||||
MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
|
||||
memcpy(MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid), mgid,
|
||||
sizeof(*mgid));
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_attach_mcg);
|
||||
|
||||
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
|
||||
MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
|
||||
memcpy(MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid), mgid,
|
||||
sizeof(*mgid));
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_detach_mcg);
|
237
sys/dev/mlx5/mlx5_core/mlx5_mr.c
Normal file
237
sys/dev/mlx5/mlx5_core/mlx5_mr.c
Normal file
@ -0,0 +1,237 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
void mlx5_init_mr_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_mr_table *table = &dev->priv.mr_table;
|
||||
|
||||
rwlock_init(&table->lock);
|
||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
}
|
||||
|
||||
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
struct mlx5_create_mkey_mbox_in *in, int inlen,
|
||||
mlx5_cmd_cbk_t callback, void *context,
|
||||
struct mlx5_create_mkey_mbox_out *out)
|
||||
{
|
||||
struct mlx5_mr_table *table = &dev->priv.mr_table;
|
||||
struct mlx5_create_mkey_mbox_out lout;
|
||||
int err;
|
||||
u8 key;
|
||||
unsigned long irql;
|
||||
|
||||
memset(&lout, 0, sizeof(lout));
|
||||
spin_lock_irq(&dev->priv.mkey_lock);
|
||||
key = dev->priv.mkey_key++;
|
||||
spin_unlock_irq(&dev->priv.mkey_lock);
|
||||
in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
|
||||
if (callback) {
|
||||
err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
|
||||
callback, context);
|
||||
return err;
|
||||
} else {
|
||||
err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
|
||||
}
|
||||
|
||||
if (err) {
|
||||
mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (lout.hdr.status) {
|
||||
mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
|
||||
return mlx5_cmd_status_to_err(&lout.hdr);
|
||||
}
|
||||
|
||||
mr->iova = be64_to_cpu(in->seg.start_addr);
|
||||
mr->size = be64_to_cpu(in->seg.len);
|
||||
mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
|
||||
mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
|
||||
|
||||
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
|
||||
be32_to_cpu(lout.mkey), key, mr->key);
|
||||
|
||||
/* connect to MR tree */
|
||||
write_lock_irqsave(&table->lock, irql);
|
||||
err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
|
||||
write_unlock_irqrestore(&table->lock, irql);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
|
||||
mlx5_base_mkey(mr->key), err);
|
||||
mlx5_core_destroy_mkey(dev, mr);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_mkey);
|
||||
|
||||
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
|
||||
{
|
||||
struct mlx5_mr_table *table = &dev->priv.mr_table;
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)];
|
||||
struct mlx5_core_mr *deleted_mr;
|
||||
unsigned long flags;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
write_lock_irqsave(&table->lock, flags);
|
||||
deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
|
||||
write_unlock_irqrestore(&table->lock, flags);
|
||||
if (!deleted_mr) {
|
||||
mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
|
||||
mlx5_base_mkey(mr->key));
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
|
||||
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->key));
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
|
||||
|
||||
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
struct mlx5_query_mkey_mbox_out *out, int outlen)
|
||||
{
|
||||
struct mlx5_query_mkey_mbox_in in;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(out, 0, outlen);
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
|
||||
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out->hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out->hdr);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_query_mkey);
|
||||
|
||||
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
|
||||
u32 *mkey)
|
||||
{
|
||||
struct mlx5_query_special_ctxs_mbox_in in;
|
||||
struct mlx5_query_special_ctxs_mbox_out out;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out.hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
|
||||
*mkey = be32_to_cpu(out.dump_fill_mkey);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
|
||||
|
||||
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
|
||||
int npsvs, u32 *sig_index)
|
||||
{
|
||||
struct mlx5_allocate_psv_in in;
|
||||
struct mlx5_allocate_psv_out out;
|
||||
int i, err;
|
||||
|
||||
if (npsvs > MLX5_MAX_PSVS)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
|
||||
in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "cmd exec failed %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (out.hdr.status) {
|
||||
mlx5_core_err(dev, "create_psv bad status %d\n",
|
||||
out.hdr.status);
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
}
|
||||
|
||||
for (i = 0; i < npsvs; i++)
|
||||
sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_psv);
|
||||
|
||||
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
|
||||
{
|
||||
struct mlx5_destroy_psv_in in;
|
||||
struct mlx5_destroy_psv_out out;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
in.psv_number = cpu_to_be32(psv_num);
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (out.hdr.status) {
|
||||
mlx5_core_err(dev, "destroy_psv bad status %d\n",
|
||||
out.hdr.status);
|
||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_psv);
|
494
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
Normal file
494
sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
Normal file
@ -0,0 +1,494 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
struct mlx5_pages_req {
|
||||
struct mlx5_core_dev *dev;
|
||||
u16 func_id;
|
||||
s32 npages;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct fw_page {
|
||||
struct rb_node rb_node;
|
||||
u64 addr;
|
||||
struct page *page;
|
||||
u16 func_id;
|
||||
unsigned long bitmask;
|
||||
struct list_head list;
|
||||
unsigned free_count;
|
||||
};
|
||||
|
||||
struct mlx5_manage_pages_inbox {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be16 rsvd;
|
||||
__be16 func_id;
|
||||
__be32 num_entries;
|
||||
__be64 pas[0];
|
||||
};
|
||||
|
||||
struct mlx5_manage_pages_outbox {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
__be32 num_entries;
|
||||
u8 rsvd[4];
|
||||
__be64 pas[0];
|
||||
};
|
||||
|
||||
enum {
|
||||
MAX_RECLAIM_TIME_MSECS = 5000,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
|
||||
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
|
||||
};
|
||||
|
||||
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
|
||||
{
|
||||
struct rb_root *root = &dev->priv.page_root;
|
||||
struct rb_node **new = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct fw_page *nfp;
|
||||
struct fw_page *tfp;
|
||||
int i;
|
||||
|
||||
while (*new) {
|
||||
parent = *new;
|
||||
tfp = rb_entry(parent, struct fw_page, rb_node);
|
||||
if (tfp->addr < addr)
|
||||
new = &parent->rb_left;
|
||||
else if (tfp->addr > addr)
|
||||
new = &parent->rb_right;
|
||||
else
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
|
||||
|
||||
nfp->addr = addr;
|
||||
nfp->page = page;
|
||||
nfp->func_id = func_id;
|
||||
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
|
||||
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
|
||||
set_bit(i, &nfp->bitmask);
|
||||
|
||||
rb_link_node(&nfp->rb_node, parent, new);
|
||||
rb_insert_color(&nfp->rb_node, root);
|
||||
list_add(&nfp->list, &dev->priv.free_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
|
||||
{
|
||||
struct rb_root *root = &dev->priv.page_root;
|
||||
struct rb_node *tmp = root->rb_node;
|
||||
struct fw_page *result = NULL;
|
||||
struct fw_page *tfp;
|
||||
|
||||
while (tmp) {
|
||||
tfp = rb_entry(tmp, struct fw_page, rb_node);
|
||||
if (tfp->addr < addr) {
|
||||
tmp = tmp->rb_left;
|
||||
} else if (tfp->addr > addr) {
|
||||
tmp = tmp->rb_right;
|
||||
} else {
|
||||
result = tfp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
||||
s32 *npages, int boot)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_pages_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(query_pages_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
|
||||
MLX5_SET(query_pages_in, in, op_mod,
|
||||
boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*npages = MLX5_GET(query_pages_out, out, num_pages);
|
||||
*func_id = MLX5_GET(query_pages_out, out, function_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
|
||||
{
|
||||
struct fw_page *fp;
|
||||
unsigned n;
|
||||
|
||||
if (list_empty(&dev->priv.free_list))
|
||||
return -ENOMEM;
|
||||
|
||||
fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
|
||||
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
|
||||
if (n >= MLX5_NUM_4K_IN_PAGE) {
|
||||
mlx5_core_warn(dev, "alloc 4k bug\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
clear_bit(n, &fp->bitmask);
|
||||
fp->free_count--;
|
||||
if (!fp->free_count)
|
||||
list_del(&fp->list);
|
||||
|
||||
*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_4k(struct mlx5_core_dev *dev, u64 addr)
|
||||
{
|
||||
struct fw_page *fwp;
|
||||
int n;
|
||||
|
||||
fwp = find_fw_page(dev, addr & PAGE_MASK);
|
||||
if (!fwp) {
|
||||
mlx5_core_warn(dev, "page not found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
|
||||
fwp->free_count++;
|
||||
set_bit(n, &fwp->bitmask);
|
||||
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
|
||||
rb_erase(&fwp->rb_node, &dev->priv.page_root);
|
||||
if (fwp->free_count != 1)
|
||||
list_del(&fwp->list);
|
||||
dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
__free_page(fwp->page);
|
||||
kfree(fwp);
|
||||
} else if (fwp->free_count == 1) {
|
||||
list_add(&fwp->list, &dev->priv.free_list);
|
||||
}
|
||||
}
|
||||
|
||||
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
|
||||
{
|
||||
struct page *page;
|
||||
u64 addr;
|
||||
int err;
|
||||
|
||||
page = alloc_page(GFP_HIGHUSER);
|
||||
if (!page) {
|
||||
mlx5_core_warn(dev, "failed to allocate page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
addr = dma_map_page(&dev->pdev->dev, page, 0,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&dev->pdev->dev, addr)) {
|
||||
mlx5_core_warn(dev, "failed dma mapping page\n");
|
||||
err = -ENOMEM;
|
||||
goto out_alloc;
|
||||
}
|
||||
err = insert_page(dev, addr, page, func_id);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "failed to track allocated page\n");
|
||||
goto out_mapping;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_mapping:
|
||||
dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
|
||||
out_alloc:
|
||||
__free_page(page);
|
||||
return err;
|
||||
}
|
||||
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
||||
int notify_fail)
|
||||
{
|
||||
struct mlx5_manage_pages_inbox *in;
|
||||
struct mlx5_manage_pages_outbox out;
|
||||
struct mlx5_manage_pages_inbox *nin;
|
||||
int inlen;
|
||||
u64 addr;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
retry:
|
||||
err = alloc_4k(dev, &addr);
|
||||
if (err) {
|
||||
if (err == -ENOMEM)
|
||||
err = alloc_system_page(dev, func_id);
|
||||
if (err)
|
||||
goto out_4k;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
in->pas[i] = cpu_to_be64(addr);
|
||||
}
|
||||
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
||||
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
|
||||
in->func_id = cpu_to_be16(func_id);
|
||||
in->num_entries = cpu_to_be32(npages);
|
||||
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
|
||||
func_id, npages, err);
|
||||
goto out_alloc;
|
||||
}
|
||||
dev->priv.fw_pages += npages;
|
||||
|
||||
if (out.hdr.status) {
|
||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
|
||||
func_id, npages, out.hdr.status);
|
||||
goto out_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
mlx5_core_dbg(dev, "err %d\n", err);
|
||||
|
||||
goto out_free;
|
||||
|
||||
out_alloc:
|
||||
if (notify_fail) {
|
||||
nin = kzalloc(sizeof(*nin), GFP_KERNEL);
|
||||
memset(&out, 0, sizeof(out));
|
||||
nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
||||
nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
|
||||
if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
|
||||
mlx5_core_warn(dev, "page notify failed\n");
|
||||
kfree(nin);
|
||||
}
|
||||
|
||||
out_4k:
|
||||
for (i--; i >= 0; i--)
|
||||
free_4k(dev, be64_to_cpu(in->pas[i]));
|
||||
out_free:
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
int *nclaimed)
|
||||
{
|
||||
struct mlx5_manage_pages_inbox in;
|
||||
struct mlx5_manage_pages_outbox *out;
|
||||
int num_claimed;
|
||||
int outlen;
|
||||
u64 addr;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (nclaimed)
|
||||
*nclaimed = 0;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
||||
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
|
||||
in.func_id = cpu_to_be16(func_id);
|
||||
in.num_entries = cpu_to_be32(npages);
|
||||
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "failed reclaiming pages\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (out->hdr.status) {
|
||||
err = mlx5_cmd_status_to_err(&out->hdr);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_claimed = be32_to_cpu(out->num_entries);
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
dev->priv.fw_pages -= num_claimed;
|
||||
|
||||
for (i = 0; i < num_claimed; i++) {
|
||||
addr = be64_to_cpu(out->pas[i]);
|
||||
free_4k(dev, addr);
|
||||
}
|
||||
|
||||
out_free:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pages_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
|
||||
struct mlx5_core_dev *dev = req->dev;
|
||||
int err = 0;
|
||||
|
||||
if (req->npages < 0)
|
||||
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
|
||||
else if (req->npages > 0)
|
||||
err = give_pages(dev, req->func_id, req->npages, 1);
|
||||
|
||||
if (err)
|
||||
mlx5_core_warn(dev, "%s fail %d\n",
|
||||
req->npages < 0 ? "reclaim" : "give", err);
|
||||
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s32 npages)
|
||||
{
|
||||
struct mlx5_pages_req *req;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req) {
|
||||
mlx5_core_warn(dev, "failed to allocate pages request\n");
|
||||
return;
|
||||
}
|
||||
|
||||
req->dev = dev;
|
||||
req->func_id = func_id;
|
||||
req->npages = npages;
|
||||
INIT_WORK(&req->work, pages_work_handler);
|
||||
if (!queue_work(dev->priv.pg_wq, &req->work))
|
||||
mlx5_core_warn(dev, "failed to queue pages handler work\n");
|
||||
}
|
||||
|
||||
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
|
||||
{
|
||||
u16 uninitialized_var(func_id);
|
||||
s32 uninitialized_var(npages);
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
|
||||
npages, boot ? "boot" : "init", func_id);
|
||||
|
||||
return give_pages(dev, func_id, npages, 0);
|
||||
}
|
||||
|
||||
enum {
|
||||
MLX5_BLKS_FOR_RECLAIM_PAGES = 12
|
||||
};
|
||||
|
||||
static int optimal_reclaimed_pages(void)
|
||||
{
|
||||
struct mlx5_cmd_prot_block *block;
|
||||
struct mlx5_cmd_layout *lay;
|
||||
int ret;
|
||||
|
||||
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
|
||||
sizeof(struct mlx5_manage_pages_outbox)) /
|
||||
FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
|
||||
struct fw_page *fwp;
|
||||
struct rb_node *p;
|
||||
int nclaimed = 0;
|
||||
int err;
|
||||
|
||||
do {
|
||||
p = rb_first(&dev->priv.page_root);
|
||||
if (p) {
|
||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||
err = reclaim_pages(dev, fwp->func_id,
|
||||
optimal_reclaimed_pages(),
|
||||
&nclaimed);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
if (nclaimed)
|
||||
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
|
||||
}
|
||||
if (time_after(jiffies, end)) {
|
||||
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
|
||||
break;
|
||||
}
|
||||
} while (p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
dev->priv.page_root = RB_ROOT;
|
||||
INIT_LIST_HEAD(&dev->priv.free_list);
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
|
||||
{
|
||||
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
|
||||
if (!dev->priv.pg_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
|
||||
{
|
||||
destroy_workqueue(dev->priv.pg_wq);
|
||||
}
|
67
sys/dev/mlx5/mlx5_core/mlx5_pd.c
Normal file
67
sys/dev/mlx5/mlx5_core/mlx5_pd.c
Normal file
@ -0,0 +1,67 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*pdn = MLX5_GET(alloc_pd_out, out, pd);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_alloc_pd);
|
||||
|
||||
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
|
||||
MLX5_SET(dealloc_pd_in, in, pd, pdn);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
|
718
sys/dev/mlx5/mlx5_core/mlx5_port.c
Normal file
718
sys/dev/mlx5/mlx5_core/mlx5_port.c
Normal file
@ -0,0 +1,718 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
||||
int size_in, void *data_out, int size_out,
|
||||
u16 reg_num, int arg, int write)
|
||||
{
|
||||
struct mlx5_access_reg_mbox_in *in = NULL;
|
||||
struct mlx5_access_reg_mbox_out *out = NULL;
|
||||
int err = -ENOMEM;
|
||||
|
||||
in = mlx5_vzalloc(sizeof(*in) + size_in);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
out = mlx5_vzalloc(sizeof(*out) + size_out);
|
||||
if (!out)
|
||||
goto ex1;
|
||||
|
||||
memcpy(in->data, data_in, size_in);
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
|
||||
in->hdr.opmod = cpu_to_be16(!write);
|
||||
in->arg = cpu_to_be32(arg);
|
||||
in->register_id = cpu_to_be16(reg_num);
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
|
||||
sizeof(*out) + size_out);
|
||||
if (err)
|
||||
goto ex2;
|
||||
|
||||
if (out->hdr.status)
|
||||
err = mlx5_cmd_status_to_err(&out->hdr);
|
||||
|
||||
if (!err)
|
||||
memcpy(data_out, out->data, size_out);
|
||||
|
||||
ex2:
|
||||
kvfree(out);
|
||||
ex1:
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
|
||||
|
||||
|
||||
struct mlx5_reg_pcap {
|
||||
u8 rsvd0;
|
||||
u8 port_num;
|
||||
u8 rsvd1[2];
|
||||
__be32 caps_127_96;
|
||||
__be32 caps_95_64;
|
||||
__be32 caps_63_32;
|
||||
__be32 caps_31_0;
|
||||
};
|
||||
|
||||
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
|
||||
{
|
||||
struct mlx5_reg_pcap in;
|
||||
struct mlx5_reg_pcap out;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
in.caps_127_96 = cpu_to_be32(caps);
|
||||
in.port_num = port_num;
|
||||
|
||||
err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
|
||||
sizeof(out), MLX5_REG_PCAP, 0, 1);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
|
||||
|
||||
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
|
||||
int ptys_size, int proto_mask)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
MLX5_SET(ptys_reg, in, local_port, 1);
|
||||
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
|
||||
ptys_size, MLX5_REG_PTYS, 0, 0);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
|
||||
|
||||
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
|
||||
u32 *proto_cap, int proto_mask)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
int err;
|
||||
|
||||
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (proto_mask == MLX5_PTYS_EN)
|
||||
*proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
|
||||
else
|
||||
*proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
|
||||
|
||||
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
|
||||
u32 *proto_admin, int proto_mask)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
int err;
|
||||
|
||||
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (proto_mask == MLX5_PTYS_EN)
|
||||
*proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
|
||||
else
|
||||
*proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
|
||||
|
||||
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
|
||||
int proto_mask)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(ptys_reg, in, local_port, 1);
|
||||
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
|
||||
if (proto_mask == MLX5_PTYS_EN)
|
||||
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
|
||||
else
|
||||
MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PTYS, 0, 1);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
|
||||
|
||||
int mlx5_set_port_status(struct mlx5_core_dev *dev,
|
||||
enum mlx5_port_status status)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(paos_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(paos_reg)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(paos_reg, in, local_port, 1);
|
||||
|
||||
MLX5_SET(paos_reg, in, admin_status, status);
|
||||
MLX5_SET(paos_reg, in, ase, 1);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PAOS, 0, 1);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(paos_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(paos_reg)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(paos_reg, in, local_port, 1);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PAOS, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*status = MLX5_GET(paos_reg, out, oper_status);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
|
||||
int *admin_mtu, int *max_mtu, int *oper_mtu)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(pmtu_reg, in, local_port, 1);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PMTU, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (max_mtu)
|
||||
*max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
|
||||
if (oper_mtu)
|
||||
*oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
|
||||
if (admin_mtu)
|
||||
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
|
||||
MLX5_SET(pmtu_reg, in, local_port, 1);
|
||||
|
||||
return mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PMTU, 0, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
|
||||
|
||||
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu)
|
||||
{
|
||||
return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
|
||||
|
||||
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
|
||||
u32 rx_pause, u32 tx_pause)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(pfcc_reg, in, local_port, port);
|
||||
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
|
||||
MLX5_SET(pfcc_reg, in, pprx, rx_pause);
|
||||
|
||||
return mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PFCC, 0, 1);
|
||||
}
|
||||
|
||||
int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
|
||||
u32 *rx_pause, u32 *tx_pause)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(pfcc_reg, in, local_port, port);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PFCC, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*rx_pause = MLX5_GET(pfcc_reg, out, pprx);
|
||||
*tx_pause = MLX5_GET(pfcc_reg, out, pptx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
|
||||
{
|
||||
return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
|
||||
|
||||
u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u8 wol_supported = 0;
|
||||
|
||||
if (MLX5_CAP_GEN(dev, wol_s))
|
||||
wol_supported |= MLX5_WOL_SECURED_MAGIC;
|
||||
if (MLX5_CAP_GEN(dev, wol_g))
|
||||
wol_supported |= MLX5_WOL_MAGIC;
|
||||
if (MLX5_CAP_GEN(dev, wol_a))
|
||||
wol_supported |= MLX5_WOL_ARP;
|
||||
if (MLX5_CAP_GEN(dev, wol_b))
|
||||
wol_supported |= MLX5_WOL_BROADCAST;
|
||||
if (MLX5_CAP_GEN(dev, wol_m))
|
||||
wol_supported |= MLX5_WOL_MULTICAST;
|
||||
if (MLX5_CAP_GEN(dev, wol_u))
|
||||
wol_supported |= MLX5_WOL_UNICAST;
|
||||
if (MLX5_CAP_GEN(dev, wol_p))
|
||||
wol_supported |= MLX5_WOL_PHY_ACTIVITY;
|
||||
|
||||
return wol_supported;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_is_wol_supported);
|
||||
|
||||
int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
|
||||
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
|
||||
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_wol);
|
||||
|
||||
int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
|
||||
struct mlx5_pvlc_reg *pvlc, int write)
|
||||
{
|
||||
int sz = MLX5_ST_SZ_BYTES(pvlc_reg);
|
||||
u8 in[MLX5_ST_SZ_BYTES(pvlc_reg)];
|
||||
u8 out[MLX5_ST_SZ_BYTES(pvlc_reg)];
|
||||
int err;
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(pvlc_reg, in, local_port, pvlc->local_port);
|
||||
if (write)
|
||||
MLX5_SET(pvlc_reg, in, vl_admin, pvlc->vl_admin);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PVLC, 0,
|
||||
!!write);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!write) {
|
||||
pvlc->local_port = MLX5_GET(pvlc_reg, out, local_port);
|
||||
pvlc->vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
|
||||
pvlc->vl_admin = MLX5_GET(pvlc_reg, out, vl_admin);
|
||||
pvlc->vl_operational = MLX5_GET(pvlc_reg, out, vl_operational);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_access_pvlc);
|
||||
|
||||
int mlx5_core_access_ptys(struct mlx5_core_dev *dev,
|
||||
struct mlx5_ptys_reg *ptys, int write)
|
||||
{
|
||||
int sz = MLX5_ST_SZ_BYTES(ptys_reg);
|
||||
void *out = NULL;
|
||||
void *in = NULL;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(sz);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
out = mlx5_vzalloc(sz);
|
||||
if (!out) {
|
||||
kfree(in);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
MLX5_SET(ptys_reg, in, local_port, ptys->local_port);
|
||||
MLX5_SET(ptys_reg, in, proto_mask, ptys->proto_mask);
|
||||
if (write) {
|
||||
MLX5_SET(ptys_reg, in, eth_proto_capability,
|
||||
ptys->eth_proto_cap);
|
||||
MLX5_SET(ptys_reg, in, ib_link_width_capability,
|
||||
ptys->ib_link_width_cap);
|
||||
MLX5_SET(ptys_reg, in, ib_proto_capability,
|
||||
ptys->ib_proto_cap);
|
||||
MLX5_SET(ptys_reg, in, eth_proto_admin, ptys->eth_proto_admin);
|
||||
MLX5_SET(ptys_reg, in, ib_link_width_admin,
|
||||
ptys->ib_link_width_admin);
|
||||
MLX5_SET(ptys_reg, in, ib_proto_admin, ptys->ib_proto_admin);
|
||||
MLX5_SET(ptys_reg, in, eth_proto_oper, ptys->eth_proto_oper);
|
||||
MLX5_SET(ptys_reg, in, ib_link_width_oper,
|
||||
ptys->ib_link_width_oper);
|
||||
MLX5_SET(ptys_reg, in, ib_proto_oper, ptys->ib_proto_oper);
|
||||
MLX5_SET(ptys_reg, in, eth_proto_lp_advertise,
|
||||
ptys->eth_proto_lp_advertise);
|
||||
}
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PTYS, 0,
|
||||
!!write);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (!write) {
|
||||
ptys->local_port = MLX5_GET(ptys_reg, out, local_port);
|
||||
ptys->proto_mask = MLX5_GET(ptys_reg, out, proto_mask);
|
||||
ptys->eth_proto_cap = MLX5_GET(ptys_reg, out,
|
||||
eth_proto_capability);
|
||||
ptys->ib_link_width_cap = MLX5_GET(ptys_reg, out,
|
||||
ib_link_width_capability);
|
||||
ptys->ib_proto_cap = MLX5_GET(ptys_reg, out,
|
||||
ib_proto_capability);
|
||||
ptys->eth_proto_admin = MLX5_GET(ptys_reg, out,
|
||||
eth_proto_admin);
|
||||
ptys->ib_link_width_admin = MLX5_GET(ptys_reg, out,
|
||||
ib_link_width_admin);
|
||||
ptys->ib_proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
|
||||
ptys->eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
|
||||
ptys->ib_link_width_oper = MLX5_GET(ptys_reg, out,
|
||||
ib_link_width_oper);
|
||||
ptys->ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
|
||||
ptys->eth_proto_lp_advertise = MLX5_GET(ptys_reg, out,
|
||||
eth_proto_lp_advertise);
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(in);
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_access_ptys);
|
||||
|
||||
static int mtu_to_ib_mtu(int mtu)
|
||||
{
|
||||
switch (mtu) {
|
||||
case 256: return 1;
|
||||
case 512: return 2;
|
||||
case 1024: return 3;
|
||||
case 2048: return 4;
|
||||
case 4096: return 5;
|
||||
default:
|
||||
printf("mlx5_core: WARN: ""invalid mtu\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_core_access_pmtu(struct mlx5_core_dev *dev,
|
||||
struct mlx5_pmtu_reg *pmtu, int write)
|
||||
{
|
||||
int sz = MLX5_ST_SZ_BYTES(pmtu_reg);
|
||||
void *out = NULL;
|
||||
void *in = NULL;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(sz);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
out = mlx5_vzalloc(sz);
|
||||
if (!out) {
|
||||
kfree(in);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
MLX5_SET(pmtu_reg, in, local_port, pmtu->local_port);
|
||||
if (write)
|
||||
MLX5_SET(pmtu_reg, in, admin_mtu, pmtu->admin_mtu);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PMTU, 0,
|
||||
!!write);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (!write) {
|
||||
pmtu->local_port = MLX5_GET(pmtu_reg, out, local_port);
|
||||
pmtu->max_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
|
||||
max_mtu));
|
||||
pmtu->admin_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
|
||||
admin_mtu));
|
||||
pmtu->oper_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
|
||||
oper_mtu));
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(in);
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_access_pmtu);
|
||||
|
||||
int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
|
||||
int lane = 0;
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(pmlp_reg, in, local_port, 1);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_PMLP, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
lane = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
|
||||
*module_num = lane & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_module_num);
|
||||
|
||||
int mlx5_query_eeprom(struct mlx5_core_dev *dev,
|
||||
int i2c_addr, int page_num, int device_addr,
|
||||
int size, int module_num, u32 *data, int *size_read)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
|
||||
u32 *ptr = (u32 *)MLX5_ADDR_OF(mcia_reg, out, dword_0);
|
||||
int status;
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
|
||||
|
||||
MLX5_SET(mcia_reg, in, l, 0);
|
||||
MLX5_SET(mcia_reg, in, module, module_num);
|
||||
MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
|
||||
MLX5_SET(mcia_reg, in, page_number, page_num);
|
||||
MLX5_SET(mcia_reg, in, device_address, device_addr);
|
||||
MLX5_SET(mcia_reg, in, size, size);
|
||||
|
||||
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
|
||||
sizeof(out), MLX5_REG_MCIA, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
status = MLX5_GET(mcia_reg, out, status);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
memcpy(data, ptr, size);
|
||||
*size_read = size;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_eeprom);
|
||||
|
||||
int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
|
||||
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
|
||||
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed %s, port %u, err - %d",
|
||||
mlx5_command_str(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT),
|
||||
port, err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
|
||||
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
|
||||
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed %s, port %u, err - %d",
|
||||
mlx5_command_str(MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT),
|
||||
port, err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
|
||||
if (!err)
|
||||
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_wol);
|
||||
|
||||
int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
|
||||
int priority, int *is_enable)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_status_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(query_cong_status_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
*is_enable = 0;
|
||||
|
||||
MLX5_SET(query_cong_status_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_CONG_STATUS);
|
||||
MLX5_SET(query_cong_status_in, in, cong_protocol, protocol);
|
||||
MLX5_SET(query_cong_status_in, in, priority, priority);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
if (!err)
|
||||
*is_enable = MLX5_GET(query_cong_status_out, out, enable);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
|
||||
int priority, int enable)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(modify_cong_status_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(modify_cong_status_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_CONG_STATUS);
|
||||
MLX5_SET(modify_cong_status_in, in, cong_protocol, protocol);
|
||||
MLX5_SET(modify_cong_status_in, in, priority, priority);
|
||||
MLX5_SET(modify_cong_status_in, in, enable, enable);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol,
|
||||
void *out, int out_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_params_in)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_cong_params_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_CONG_PARAMS);
|
||||
MLX5_SET(query_cong_params_in, in, cong_protocol, protocol);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, out_size);
|
||||
}
|
||||
|
||||
int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev,
|
||||
void *in, int in_size)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)];
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(modify_cong_params_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, in_size, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
|
||||
void *out, int out_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_cong_statistics_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_CONG_STATISTICS);
|
||||
MLX5_SET(query_cong_statistics_in, in, clear, clear);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, out_size);
|
||||
}
|
308
sys/dev/mlx5/mlx5_core/mlx5_qp.c
Normal file
308
sys/dev/mlx5/mlx5_core/mlx5_qp.c
Normal file
@ -0,0 +1,308 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <dev/mlx5/qp.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
|
||||
static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
|
||||
u32 rsn)
|
||||
{
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
struct mlx5_core_rsc_common *common;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
|
||||
common = radix_tree_lookup(&table->tree, rsn);
|
||||
if (common)
|
||||
atomic_inc(&common->refcount);
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!common) {
|
||||
mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
|
||||
rsn);
|
||||
return NULL;
|
||||
}
|
||||
return common;
|
||||
}
|
||||
|
||||
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
|
||||
{
|
||||
if (atomic_dec_and_test(&common->refcount))
|
||||
complete(&common->free);
|
||||
}
|
||||
|
||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
|
||||
{
|
||||
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
|
||||
struct mlx5_core_qp *qp;
|
||||
|
||||
if (!common)
|
||||
return;
|
||||
|
||||
switch (common->res) {
|
||||
case MLX5_RES_QP:
|
||||
qp = (struct mlx5_core_qp *)common;
|
||||
qp->event(qp, event_type);
|
||||
break;
|
||||
|
||||
default:
|
||||
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
|
||||
}
|
||||
|
||||
mlx5_core_put_rsc(common);
|
||||
}
|
||||
|
||||
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_qp *qp,
|
||||
struct mlx5_create_qp_mbox_in *in,
|
||||
int inlen)
|
||||
{
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
struct mlx5_create_qp_mbox_out out;
|
||||
struct mlx5_destroy_qp_mbox_in din;
|
||||
struct mlx5_destroy_qp_mbox_out dout;
|
||||
int err;
|
||||
void *qpc;
|
||||
|
||||
memset(&out, 0, sizeof(out));
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
|
||||
if (dev->issi) {
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
||||
/* 0xffffff means we ask to work with cqe version 0 */
|
||||
MLX5_SET(qpc, qpc, user_index, 0xffffff);
|
||||
}
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "ret %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (out.hdr.status) {
|
||||
mlx5_core_warn(dev, "current num of QPs 0x%x\n",
|
||||
atomic_read(&dev->num_qps));
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
}
|
||||
|
||||
qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
|
||||
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
|
||||
|
||||
qp->common.res = MLX5_RES_QP;
|
||||
spin_lock_irq(&table->lock);
|
||||
err = radix_tree_insert(&table->tree, qp->qpn, qp);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "err %d\n", err);
|
||||
goto err_cmd;
|
||||
}
|
||||
|
||||
qp->pid = curthread->td_proc->p_pid;
|
||||
atomic_set(&qp->common.refcount, 1);
|
||||
atomic_inc(&dev->num_qps);
|
||||
init_completion(&qp->common.free);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cmd:
|
||||
memset(&din, 0, sizeof(din));
|
||||
memset(&dout, 0, sizeof(dout));
|
||||
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
|
||||
din.qpn = cpu_to_be32(qp->qpn);
|
||||
mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
|
||||
|
||||
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_qp *qp)
|
||||
{
|
||||
struct mlx5_destroy_qp_mbox_in in;
|
||||
struct mlx5_destroy_qp_mbox_out out;
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
|
||||
spin_lock_irqsave(&table->lock, flags);
|
||||
radix_tree_delete(&table->tree, qp->qpn);
|
||||
spin_unlock_irqrestore(&table->lock, flags);
|
||||
|
||||
mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
|
||||
wait_for_completion(&qp->common.free);
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
|
||||
in.qpn = cpu_to_be32(qp->qpn);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out.hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
|
||||
atomic_dec(&dev->num_qps);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
|
||||
|
||||
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
|
||||
enum mlx5_qp_state new_state,
|
||||
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
|
||||
struct mlx5_core_qp *qp)
|
||||
{
|
||||
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
|
||||
[MLX5_QP_STATE_RST] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
[MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
|
||||
},
|
||||
[MLX5_QP_STATE_INIT] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
[MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
|
||||
[MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
|
||||
},
|
||||
[MLX5_QP_STATE_RTR] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
|
||||
},
|
||||
[MLX5_QP_STATE_RTS] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
|
||||
},
|
||||
[MLX5_QP_STATE_SQD] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
},
|
||||
[MLX5_QP_STATE_SQER] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
|
||||
},
|
||||
[MLX5_QP_STATE_ERR] = {
|
||||
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
|
||||
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
|
||||
}
|
||||
};
|
||||
|
||||
struct mlx5_modify_qp_mbox_out out;
|
||||
int err = 0;
|
||||
u16 op;
|
||||
|
||||
if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
|
||||
!optab[cur_state][new_state])
|
||||
return -EINVAL;
|
||||
|
||||
memset(&out, 0, sizeof(out));
|
||||
op = optab[cur_state][new_state];
|
||||
in->hdr.opcode = cpu_to_be16(op);
|
||||
in->qpn = cpu_to_be32(qp->qpn);
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
|
||||
|
||||
void mlx5_init_qp_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_qp_table *table = &dev->priv.qp_table;
|
||||
|
||||
spin_lock_init(&table->lock);
|
||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
}
|
||||
|
||||
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
|
||||
struct mlx5_query_qp_mbox_out *out, int outlen)
|
||||
{
|
||||
struct mlx5_query_qp_mbox_in in;
|
||||
int err;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(out, 0, outlen);
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
|
||||
in.qpn = cpu_to_be32(qp->qpn);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (out->hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out->hdr);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
|
||||
|
||||
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
|
||||
|
||||
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
|
||||
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
|
457
sys/dev/mlx5/mlx5_core/mlx5_srq.c
Normal file
457
sys/dev/mlx5/mlx5_core/mlx5_srq.c
Normal file
@ -0,0 +1,457 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/srq.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include "mlx5_core.h"
|
||||
#include "transobj.h"
|
||||
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_core_srq *srq;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
|
||||
srq = radix_tree_lookup(&table->tree, srqn);
|
||||
if (srq)
|
||||
atomic_inc(&srq->refcount);
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
if (!srq) {
|
||||
mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
|
||||
return;
|
||||
}
|
||||
|
||||
srq->event(srq, event_type);
|
||||
|
||||
if (atomic_dec_and_test(&srq->refcount))
|
||||
complete(&srq->free);
|
||||
}
|
||||
|
||||
static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
|
||||
{
|
||||
void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
|
||||
|
||||
if (srqc_to_rmpc) {
|
||||
switch (MLX5_GET(srqc, srqc, state)) {
|
||||
case MLX5_SRQC_STATE_GOOD:
|
||||
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
|
||||
break;
|
||||
case MLX5_SRQC_STATE_ERROR:
|
||||
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
|
||||
break;
|
||||
default:
|
||||
printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state));
|
||||
}
|
||||
|
||||
MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
|
||||
MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
|
||||
MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
|
||||
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
|
||||
MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
|
||||
MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
|
||||
MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
|
||||
MLX5_SET64(wq, wq, dbr_addr,
|
||||
((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 |
|
||||
((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2);
|
||||
} else {
|
||||
switch (MLX5_GET(rmpc, rmpc, state)) {
|
||||
case MLX5_RMPC_STATE_RDY:
|
||||
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
|
||||
break;
|
||||
case MLX5_RMPC_STATE_ERR:
|
||||
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
|
||||
break;
|
||||
default:
|
||||
printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state));
|
||||
}
|
||||
|
||||
MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
|
||||
MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
|
||||
MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
|
||||
MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
|
||||
MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
|
||||
MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
|
||||
MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
|
||||
MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32);
|
||||
MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff);
|
||||
}
|
||||
}
|
||||
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_core_srq *srq;
|
||||
|
||||
spin_lock(&table->lock);
|
||||
|
||||
srq = radix_tree_lookup(&table->tree, srqn);
|
||||
if (srq)
|
||||
atomic_inc(&srq->refcount);
|
||||
|
||||
spin_unlock(&table->lock);
|
||||
|
||||
return srq;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_get_srq);
|
||||
|
||||
static int get_pas_size(void *srqc)
|
||||
{
|
||||
u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
|
||||
u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
|
||||
u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
|
||||
u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
|
||||
u32 po_quanta = 1 << (log_page_size - 6);
|
||||
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
|
||||
u32 page_size = 1 << log_page_size;
|
||||
u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
|
||||
u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
|
||||
|
||||
return rq_num_pas * sizeof(u64);
|
||||
|
||||
}
|
||||
|
||||
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_create_srq_mbox_in *in, int srq_inlen)
|
||||
{
|
||||
void *create_in;
|
||||
void *rmpc;
|
||||
void *srqc;
|
||||
int pas_size;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
|
||||
pas_size = get_pas_size(srqc);
|
||||
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
|
||||
create_in = mlx5_vzalloc(inlen);
|
||||
if (!create_in)
|
||||
return -ENOMEM;
|
||||
|
||||
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
|
||||
|
||||
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
|
||||
rmpc_srqc_reformat(srqc, rmpc, true);
|
||||
|
||||
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
|
||||
|
||||
kvfree(create_in);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
{
|
||||
return mlx5_core_destroy_rmp(dev, srq->srqn);
|
||||
}
|
||||
|
||||
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_query_srq_mbox_out *out)
|
||||
{
|
||||
u32 *rmp_out;
|
||||
void *rmpc;
|
||||
void *srqc;
|
||||
int err;
|
||||
|
||||
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
|
||||
if (!rmp_out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
|
||||
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
|
||||
rmpc_srqc_reformat(srqc, rmpc, false);
|
||||
|
||||
out:
|
||||
kvfree(rmp_out);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
|
||||
{
|
||||
return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
|
||||
}
|
||||
|
||||
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
struct mlx5_create_srq_mbox_in *in,
|
||||
int srq_inlen)
|
||||
{
|
||||
void *create_in;
|
||||
void *srqc;
|
||||
void *xrc_srqc;
|
||||
void *pas;
|
||||
int pas_size;
|
||||
int inlen;
|
||||
int err;
|
||||
|
||||
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
|
||||
pas_size = get_pas_size(srqc);
|
||||
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
|
||||
create_in = mlx5_vzalloc(inlen);
|
||||
if (!create_in)
|
||||
return -ENOMEM;
|
||||
|
||||
xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
|
||||
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
|
||||
|
||||
memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
|
||||
memcpy(pas, in->pas, pas_size);
|
||||
/* 0xffffff means we ask to work with cqe version 0 */
|
||||
MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
|
||||
|
||||
err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
kvfree(create_in);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
{
|
||||
return mlx5_core_destroy_xsrq(dev, srq->srqn);
|
||||
}
|
||||
|
||||
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
struct mlx5_query_srq_mbox_out *out)
|
||||
{
|
||||
u32 *xrcsrq_out;
|
||||
int err;
|
||||
|
||||
xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
|
||||
if (!xrcsrq_out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
kvfree(xrcsrq_out);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq, u16 lwm)
|
||||
{
|
||||
return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
|
||||
}
|
||||
|
||||
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_create_srq_mbox_in *in, int inlen)
|
||||
{
|
||||
struct mlx5_create_srq_mbox_out out;
|
||||
int err;
|
||||
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), sizeof(out));
|
||||
|
||||
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
{
|
||||
struct mlx5_destroy_srq_mbox_in in;
|
||||
struct mlx5_destroy_srq_mbox_out out;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
|
||||
in.srqn = cpu_to_be32(srq->srqn);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
|
||||
}
|
||||
|
||||
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_query_srq_mbox_out *out)
|
||||
{
|
||||
struct mlx5_query_srq_mbox_in in;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
|
||||
in.srqn = cpu_to_be32(srq->srqn);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)out, sizeof(*out));
|
||||
}
|
||||
|
||||
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq)
|
||||
{
|
||||
struct mlx5_arm_srq_mbox_in in;
|
||||
struct mlx5_arm_srq_mbox_out out;
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
|
||||
in.hdr.opmod = cpu_to_be16(!!is_srq);
|
||||
in.srqn = cpu_to_be32(srq->srqn);
|
||||
in.lwm = cpu_to_be16(lwm);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
|
||||
}
|
||||
|
||||
static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_create_srq_mbox_in *in, int inlen,
|
||||
int is_xrc)
|
||||
{
|
||||
if (!dev->issi)
|
||||
return create_srq_cmd(dev, srq, in, inlen);
|
||||
else if (srq->common.res == MLX5_RES_XSRQ)
|
||||
return create_xrc_srq_cmd(dev, srq, in, inlen);
|
||||
else
|
||||
return create_rmp_cmd(dev, srq, in, inlen);
|
||||
}
|
||||
|
||||
static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
if (!dev->issi)
|
||||
return destroy_srq_cmd(dev, srq);
|
||||
else if (srq->common.res == MLX5_RES_XSRQ)
|
||||
return destroy_xrc_srq_cmd(dev, srq);
|
||||
else
|
||||
return destroy_rmp_cmd(dev, srq);
|
||||
}
|
||||
|
||||
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_create_srq_mbox_in *in, int inlen,
|
||||
int is_xrc)
|
||||
{
|
||||
int err;
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
|
||||
srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
|
||||
|
||||
err = create_srq_split(dev, srq, in, inlen, is_xrc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
atomic_set(&srq->refcount, 1);
|
||||
init_completion(&srq->free);
|
||||
|
||||
spin_lock_irq(&table->lock);
|
||||
err = radix_tree_insert(&table->tree, srq->srqn, srq);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
|
||||
goto err_destroy_srq_split;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_srq_split:
|
||||
destroy_srq_split(dev, srq);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_create_srq);
|
||||
|
||||
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
struct mlx5_core_srq *tmp;
|
||||
int err;
|
||||
|
||||
spin_lock_irq(&table->lock);
|
||||
tmp = radix_tree_delete(&table->tree, srq->srqn);
|
||||
spin_unlock_irq(&table->lock);
|
||||
if (!tmp) {
|
||||
mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tmp != srq) {
|
||||
mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = destroy_srq_split(dev, srq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (atomic_dec_and_test(&srq->refcount))
|
||||
complete(&srq->free);
|
||||
wait_for_completion(&srq->free);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_srq);
|
||||
|
||||
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_query_srq_mbox_out *out)
|
||||
{
|
||||
if (!dev->issi)
|
||||
return query_srq_cmd(dev, srq, out);
|
||||
else if (srq->common.res == MLX5_RES_XSRQ)
|
||||
return query_xrc_srq_cmd(dev, srq, out);
|
||||
else
|
||||
return query_rmp_cmd(dev, srq, out);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_query_srq);
|
||||
|
||||
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq)
|
||||
{
|
||||
if (!dev->issi)
|
||||
return arm_srq_cmd(dev, srq, lwm, is_srq);
|
||||
else if (srq->common.res == MLX5_RES_XSRQ)
|
||||
return arm_xrc_srq_cmd(dev, srq, lwm);
|
||||
else
|
||||
return arm_rmp_cmd(dev, srq, lwm);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_arm_srq);
|
||||
|
||||
void mlx5_init_srq_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_srq_table *table = &dev->priv.srq_table;
|
||||
|
||||
spin_lock_init(&table->lock);
|
||||
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
393
sys/dev/mlx5/mlx5_core/mlx5_transobj.c
Normal file
393
sys/dev/mlx5/mlx5_core/mlx5_transobj.c
Normal file
@ -0,0 +1,393 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
|
||||
#include "mlx5_core.h"
|
||||
#include "transobj.h"
|
||||
|
||||
int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(alloc_transport_domain_in, in, opcode,
|
||||
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (!err)
|
||||
*tdn = MLX5_GET(alloc_transport_domain_out, out,
|
||||
transport_domain);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(dealloc_transport_domain_in, in, opcode,
|
||||
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
|
||||
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
|
||||
|
||||
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_rq_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*rqn = MLX5_GET(create_rq_out, out, rqn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
|
||||
|
||||
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
|
||||
MLX5_SET(destroy_rq_in, in, rqn, rqn);
|
||||
|
||||
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_sq_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*sqn = MLX5_GET(create_sq_out, out, sqn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
|
||||
|
||||
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
|
||||
MLX5_SET(destroy_sq_in, in, sqn, sqn);
|
||||
|
||||
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *tirn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_tir_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*tirn = MLX5_GET(create_tir_out, out, tirn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
|
||||
MLX5_SET(destroy_tir_in, in, tirn, tirn);
|
||||
|
||||
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *tisn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_tis_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*tisn = MLX5_GET(create_tis_out, out, tisn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
|
||||
MLX5_SET(destroy_tis_in, in, tisn, tisn);
|
||||
|
||||
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
|
||||
|
||||
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
|
||||
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
|
||||
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
|
||||
{
|
||||
void *in;
|
||||
void *rmpc;
|
||||
void *wq;
|
||||
void *bitmask;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
|
||||
bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
|
||||
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
|
||||
|
||||
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
|
||||
MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
|
||||
MLX5_SET(wq, wq, lwm, lwm);
|
||||
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
|
||||
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
|
||||
|
||||
err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *xsrqn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
|
||||
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
|
||||
sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
|
||||
void *srqc;
|
||||
void *xrc_srqc;
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
|
||||
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out,
|
||||
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
|
||||
if (!err) {
|
||||
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
|
||||
xrc_srq_context_entry);
|
||||
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
|
||||
memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
|
||||
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
|
||||
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_xrc_srq_in, in, op_mod,
|
||||
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
|
||||
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
|
||||
sizeof(out));
|
||||
|
||||
}
|
||||
|
||||
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rqtn)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
if (!err)
|
||||
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
|
||||
int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
|
||||
|
||||
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
|
||||
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
|
||||
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
|
||||
|
||||
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
214
sys/dev/mlx5/mlx5_core/mlx5_uar.c
Normal file
214
sys/dev/mlx5/mlx5_core/mlx5_uar.c
Normal file
@ -0,0 +1,214 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io-mapping.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
enum {
|
||||
NUM_DRIVER_UARS = 4,
|
||||
NUM_LOW_LAT_UUARS = 4,
|
||||
};
|
||||
|
||||
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*uarn = MLX5_GET(alloc_uar_out, out, uar);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
|
||||
|
||||
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
|
||||
MLX5_SET(dealloc_uar_in, in, uar, uarn);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_free_uar);
|
||||
|
||||
static int need_uuar_lock(int uuarn)
|
||||
{
|
||||
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
|
||||
|
||||
if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
|
||||
{
|
||||
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
|
||||
struct mlx5_bf *bf;
|
||||
phys_addr_t addr;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
uuari->num_uars = NUM_DRIVER_UARS;
|
||||
uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
|
||||
|
||||
mutex_init(&uuari->lock);
|
||||
uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
|
||||
|
||||
uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
|
||||
|
||||
uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
|
||||
GFP_KERNEL);
|
||||
|
||||
uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
|
||||
|
||||
for (i = 0; i < uuari->num_uars; i++) {
|
||||
err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
|
||||
if (err)
|
||||
goto out_count;
|
||||
|
||||
addr = pci_resource_start(dev->pdev, 0) +
|
||||
((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
|
||||
uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
|
||||
if (!uuari->uars[i].map) {
|
||||
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
|
||||
err = -ENOMEM;
|
||||
goto out_count;
|
||||
}
|
||||
mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
|
||||
uuari->uars[i].index, uuari->uars[i].map);
|
||||
}
|
||||
|
||||
for (i = 0; i < tot_uuars; i++) {
|
||||
bf = &uuari->bfs[i];
|
||||
|
||||
bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
|
||||
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
|
||||
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
|
||||
bf->reg = NULL; /* Add WC support */
|
||||
bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
|
||||
(1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
|
||||
MLX5_BF_OFFSET;
|
||||
bf->need_lock = need_uuar_lock(i);
|
||||
spin_lock_init(&bf->lock);
|
||||
spin_lock_init(&bf->lock32);
|
||||
bf->uuarn = i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_count:
|
||||
for (i--; i >= 0; i--) {
|
||||
iounmap(uuari->uars[i].map);
|
||||
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
|
||||
}
|
||||
kfree(uuari->count);
|
||||
|
||||
kfree(uuari->bitmap);
|
||||
|
||||
kfree(uuari->bfs);
|
||||
|
||||
kfree(uuari->uars);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
|
||||
{
|
||||
int i = uuari->num_uars;
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
iounmap(uuari->uars[i].map);
|
||||
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
|
||||
}
|
||||
|
||||
kfree(uuari->count);
|
||||
kfree(uuari->bitmap);
|
||||
kfree(uuari->bfs);
|
||||
kfree(uuari->uars);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
|
||||
{
|
||||
phys_addr_t pfn;
|
||||
phys_addr_t uar_bar_start;
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_alloc_uar(mdev, &uar->index);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
uar_bar_start = pci_resource_start(mdev->pdev, 0);
|
||||
pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
|
||||
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
if (!uar->map) {
|
||||
mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
|
||||
err = -ENOMEM;
|
||||
goto err_free_uar;
|
||||
}
|
||||
|
||||
if (mdev->priv.bf_mapping)
|
||||
uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
|
||||
uar->index << PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_uar:
|
||||
mlx5_cmd_free_uar(mdev, uar->index);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_alloc_map_uar);
|
||||
|
||||
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
|
||||
{
|
||||
io_mapping_unmap(uar->bf_map);
|
||||
iounmap(uar->map);
|
||||
mlx5_cmd_free_uar(mdev, uar->index);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_unmap_free_uar);
|
796
sys/dev/mlx5/mlx5_core/mlx5_vport.c
Normal file
796
sys/dev/mlx5/mlx5_core/mlx5_vport.c
Normal file
@ -0,0 +1,796 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/vport.h>
|
||||
#include "mlx5_core.h"
|
||||
|
||||
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_vport_state_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_VPORT_STATE);
|
||||
MLX5_SET(query_vport_state_in, in, op_mod, opmod);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
|
||||
sizeof(out));
|
||||
if (err)
|
||||
mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
|
||||
|
||||
return MLX5_GET(query_vport_state_out, out, state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
|
||||
|
||||
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
|
||||
u32 *out, int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_nic_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
|
||||
|
||||
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
|
||||
if (vport)
|
||||
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_in)];
|
||||
int err;
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(alloc_q_counter_in, in, opcode,
|
||||
MLX5_CMD_OP_ALLOC_Q_COUNTER);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
|
||||
counter_set_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
|
||||
int counter_set_id)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(dealloc_q_counter_in, in, opcode,
|
||||
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
|
||||
MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
|
||||
counter_set_id);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
}
|
||||
|
||||
static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
|
||||
int counter_set_id,
|
||||
int reset,
|
||||
void *out,
|
||||
int out_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
|
||||
MLX5_SET(query_q_counter_in, in, clear, reset);
|
||||
MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
|
||||
out, out_size);
|
||||
}
|
||||
|
||||
int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
|
||||
int counter_set_id,
|
||||
u32 *out_of_rx_buffer)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
|
||||
int err;
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
|
||||
sizeof(out));
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
|
||||
out_of_buffer);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u32 vport, u8 *addr)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
u8 *out_addr;
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
|
||||
nic_vport_context.permanent_address);
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ether_addr_copy(addr, &out_addr[2]);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
|
||||
|
||||
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
|
||||
nic_vport_context.system_image_guid);
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
|
||||
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
|
||||
nic_vport_context.node_guid);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
|
||||
|
||||
int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
|
||||
nic_vport_context.port_guid);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
|
||||
|
||||
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
u16 *qkey_viol_cntr)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.qkey_violation_counter);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
|
||||
|
||||
static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
|
||||
int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
|
||||
|
||||
MLX5_SET(modify_nic_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
|
||||
int enable_disable)
|
||||
{
|
||||
void *in;
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
mlx5_core_warn(mdev, "failed to allocate inbox\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
|
||||
enable_disable);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
|
||||
bool other_vport, u8 *addr)
|
||||
{
|
||||
void *in;
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
|
||||
+ MLX5_ST_SZ_BYTES(mac_address_layout);
|
||||
u8 *mac_layout;
|
||||
u8 *mac_ptr;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
mlx5_core_warn(mdev, "failed to allocate inbox\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
vport_number, vport);
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
other_vport, other_vport);
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
field_select.addresses_list, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
nic_vport_context.allowed_list_type, 0);
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
nic_vport_context.allowed_list_size, 1);
|
||||
|
||||
mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
|
||||
nic_vport_context.current_uc_mac_address);
|
||||
mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
|
||||
mac_addr_47_32);
|
||||
ether_addr_copy(mac_ptr, addr);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
|
||||
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
|
||||
u8 *addr)
|
||||
{
|
||||
void *in;
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
|
||||
u8 *mac_ptr;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in) {
|
||||
mlx5_core_warn(mdev, "failed to allocate inbox\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in,
|
||||
field_select.permanent_address, 1);
|
||||
mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
|
||||
nic_vport_context.permanent_address.mac_addr_47_32);
|
||||
ether_addr_copy(mac_ptr, addr);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
|
||||
|
||||
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_nic_vport_enable_disable_roce(mdev, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
|
||||
|
||||
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return mlx5_nic_vport_enable_disable_roce(mdev, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
|
||||
|
||||
int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
|
||||
u8 port_num, u8 vport_num, u32 *out,
|
||||
int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
|
||||
int is_group_manager;
|
||||
|
||||
is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
MLX5_SET(query_hca_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
|
||||
|
||||
if (vport_num) {
|
||||
if (is_group_manager) {
|
||||
MLX5_SET(query_hca_vport_context_in, in, other_vport,
|
||||
1);
|
||||
MLX5_SET(query_hca_vport_context_in, in, vport_number,
|
||||
vport_num);
|
||||
} else {
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, num_ports) == 2)
|
||||
MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
|
||||
|
||||
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
|
||||
hca_vport_context.system_image_guid);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
|
||||
|
||||
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
|
||||
hca_vport_context.node_guid);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
|
||||
|
||||
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
|
||||
u16 vport_num, u16 gid_index, union ib_gid *gid)
|
||||
{
|
||||
int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
|
||||
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
|
||||
int is_group_manager;
|
||||
void *out = NULL;
|
||||
void *in = NULL;
|
||||
union ib_gid *tmp;
|
||||
int tbsz;
|
||||
int nout;
|
||||
int err;
|
||||
|
||||
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
|
||||
tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
|
||||
|
||||
if (gid_index > tbsz && gid_index != 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
if (gid_index == 0xffff)
|
||||
nout = tbsz;
|
||||
else
|
||||
nout = 1;
|
||||
|
||||
out_sz += nout * sizeof(*gid);
|
||||
|
||||
in = mlx5_vzalloc(in_sz);
|
||||
out = mlx5_vzalloc(out_sz);
|
||||
if (!in || !out) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
MLX5_SET(query_hca_vport_gid_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
|
||||
if (vport_num) {
|
||||
if (is_group_manager) {
|
||||
MLX5_SET(query_hca_vport_gid_in, in, vport_number,
|
||||
vport_num);
|
||||
MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
|
||||
} else {
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
|
||||
|
||||
if (MLX5_CAP_GEN(dev, num_ports) == 2)
|
||||
MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mlx5_cmd_status_to_err_v2(out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
|
||||
gid->global.subnet_prefix = tmp->global.subnet_prefix;
|
||||
gid->global.interface_id = tmp->global.interface_id;
|
||||
|
||||
out:
|
||||
kvfree(in);
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
|
||||
|
||||
int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
|
||||
u8 port_num, u16 vf_num, u16 pkey_index,
|
||||
u16 *pkey)
|
||||
{
|
||||
int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
|
||||
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
|
||||
int is_group_manager;
|
||||
void *out = NULL;
|
||||
void *in = NULL;
|
||||
void *pkarr;
|
||||
int nout;
|
||||
int tbsz;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
|
||||
|
||||
tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
|
||||
if (pkey_index > tbsz && pkey_index != 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
if (pkey_index == 0xffff)
|
||||
nout = tbsz;
|
||||
else
|
||||
nout = 1;
|
||||
|
||||
out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
|
||||
|
||||
in = kzalloc(in_sz, GFP_KERNEL);
|
||||
out = kzalloc(out_sz, GFP_KERNEL);
|
||||
|
||||
MLX5_SET(query_hca_vport_pkey_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
|
||||
if (other_vport) {
|
||||
if (is_group_manager) {
|
||||
MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
|
||||
vf_num);
|
||||
MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
|
||||
} else {
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
|
||||
|
||||
if (MLX5_CAP_GEN(dev, num_ports) == 2)
|
||||
MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mlx5_cmd_status_to_err_v2(out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
|
||||
for (i = 0; i < nout; i++, pkey++,
|
||||
pkarr += MLX5_ST_SZ_BYTES(pkey))
|
||||
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
|
||||
|
||||
out:
|
||||
kfree(in);
|
||||
kfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
|
||||
|
||||
static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
|
||||
u16 vport, void *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
|
||||
int err;
|
||||
|
||||
memset(out, 0, sizeof(out));
|
||||
|
||||
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
||||
if (vport)
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
||||
|
||||
MLX5_SET(modify_esw_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
|
||||
|
||||
err = mlx5_cmd_exec_check_status(mdev, in, inlen,
|
||||
out, sizeof(out));
|
||||
if (err)
|
||||
mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
|
||||
u8 insert_mode, u8 strip_mode,
|
||||
u16 vlan, u8 cfi, u8 pcp)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
|
||||
|
||||
memset(in, 0, sizeof(in));
|
||||
|
||||
if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.cvlan_cfi, cfi);
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.cvlan_pcp, pcp);
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.cvlan_id, vlan);
|
||||
}
|
||||
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.vport_cvlan_insert, insert_mode);
|
||||
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.vport_cvlan_strip, strip_mode);
|
||||
|
||||
MLX5_SET(modify_esw_vport_context_in, in, field_select,
|
||||
MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
|
||||
MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
|
||||
|
||||
return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
|
||||
|
||||
int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
|
||||
u8 port_num, u16 vport_num,
|
||||
void *out, int out_size)
|
||||
{
|
||||
int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
|
||||
int is_group_manager;
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
|
||||
|
||||
in = mlx5_vzalloc(in_sz);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(query_vport_counter_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
|
||||
if (vport_num) {
|
||||
if (is_group_manager) {
|
||||
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
|
||||
MLX5_SET(query_vport_counter_in, in, vport_number,
|
||||
vport_num);
|
||||
} else {
|
||||
err = -EPERM;
|
||||
goto ex;
|
||||
}
|
||||
}
|
||||
if (MLX5_CAP_GEN(dev, num_ports) == 2)
|
||||
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
|
||||
if (err)
|
||||
goto ex;
|
||||
err = mlx5_cmd_status_to_err_v2(out);
|
||||
if (err)
|
||||
goto ex;
|
||||
|
||||
ex:
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
|
||||
|
||||
int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
|
||||
struct mlx5_vport_counters *vc)
|
||||
{
|
||||
int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
|
||||
void *out;
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(out_sz);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
|
||||
if (err)
|
||||
goto ex;
|
||||
|
||||
vc->received_errors.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_errors.packets);
|
||||
vc->received_errors.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_errors.octets);
|
||||
vc->transmit_errors.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmit_errors.packets);
|
||||
vc->transmit_errors.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmit_errors.octets);
|
||||
vc->received_ib_unicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_ib_unicast.packets);
|
||||
vc->received_ib_unicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_ib_unicast.octets);
|
||||
vc->transmitted_ib_unicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_ib_unicast.packets);
|
||||
vc->transmitted_ib_unicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_ib_unicast.octets);
|
||||
vc->received_ib_multicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_ib_multicast.packets);
|
||||
vc->received_ib_multicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_ib_multicast.octets);
|
||||
vc->transmitted_ib_multicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_ib_multicast.packets);
|
||||
vc->transmitted_ib_multicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_ib_multicast.octets);
|
||||
vc->received_eth_broadcast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_eth_broadcast.packets);
|
||||
vc->received_eth_broadcast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_eth_broadcast.octets);
|
||||
vc->transmitted_eth_broadcast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_eth_broadcast.packets);
|
||||
vc->transmitted_eth_broadcast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_eth_broadcast.octets);
|
||||
vc->received_eth_unicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_eth_unicast.octets);
|
||||
vc->received_eth_unicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_eth_unicast.packets);
|
||||
vc->transmitted_eth_unicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_eth_unicast.octets);
|
||||
vc->transmitted_eth_unicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_eth_unicast.packets);
|
||||
vc->received_eth_multicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_eth_multicast.octets);
|
||||
vc->received_eth_multicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, received_eth_multicast.packets);
|
||||
vc->transmitted_eth_multicast.octets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_eth_multicast.octets);
|
||||
vc->transmitted_eth_multicast.packets =
|
||||
MLX5_GET64(query_vport_counter_out,
|
||||
out, transmitted_eth_multicast.packets);
|
||||
|
||||
ex:
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
187
sys/dev/mlx5/mlx5_core/mlx5_wq.c
Normal file
187
sys/dev/mlx5/mlx5_core/mlx5_wq.c
Normal file
@ -0,0 +1,187 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include "wq.h"
|
||||
#include "mlx5_core.h"
|
||||
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return (u32)wq->sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return (u32)wq->sz_m1 + 1;
|
||||
}
|
||||
|
||||
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
|
||||
}
|
||||
|
||||
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return mlx5_cqwq_get_size(wq) << wq->log_stride;
|
||||
}
|
||||
|
||||
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return mlx5_wq_ll_get_size(wq) << wq->log_stride;
|
||||
}
|
||||
|
||||
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
int max_direct = param->linear ? INT_MAX : 0;
|
||||
int err;
|
||||
|
||||
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
|
||||
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
|
||||
max_direct, &wq_ctrl->buf,
|
||||
param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.direct.buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
|
||||
return 0;
|
||||
|
||||
err_db_free:
|
||||
mlx5_db_free(mdev, &wq_ctrl->db);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *cqc, struct mlx5_cqwq *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
int max_direct = param->linear ? INT_MAX : 0;
|
||||
int err;
|
||||
|
||||
wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
|
||||
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
|
||||
wq->sz_m1 = (1 << wq->log_sz) - 1;
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
|
||||
max_direct, &wq_ctrl->buf,
|
||||
param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.direct.buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
|
||||
return 0;
|
||||
|
||||
err_db_free:
|
||||
mlx5_db_free(mdev, &wq_ctrl->db);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_ll *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
struct mlx5_wqe_srq_next_seg *next_seg;
|
||||
int max_direct = param->linear ? INT_MAX : 0;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
|
||||
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
|
||||
max_direct, &wq_ctrl->buf,
|
||||
param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.direct.buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
for (i = 0; i < wq->sz_m1; i++) {
|
||||
next_seg = mlx5_wq_ll_get_wqe(wq, i);
|
||||
next_seg->next_wqe_index = cpu_to_be16(i + 1);
|
||||
}
|
||||
next_seg = mlx5_wq_ll_get_wqe(wq, i);
|
||||
wq->tail_next = &next_seg->next_wqe_index;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
|
||||
return 0;
|
||||
|
||||
err_db_free:
|
||||
mlx5_db_free(mdev, &wq_ctrl->db);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
||||
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||
}
|
63
sys/dev/mlx5/mlx5_core/transobj.h
Normal file
63
sys/dev/mlx5/mlx5_core/transobj.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef __TRANSOBJ_H__
|
||||
#define __TRANSOBJ_H__
|
||||
|
||||
int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
|
||||
void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
|
||||
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rqn);
|
||||
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 *in, int inlen);
|
||||
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
|
||||
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *sqn);
|
||||
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 *in, int inlen);
|
||||
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
|
||||
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *tirn);
|
||||
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
|
||||
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *tisn);
|
||||
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
|
||||
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn);
|
||||
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
|
||||
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
|
||||
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
|
||||
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
|
||||
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn);
|
||||
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
|
||||
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
|
||||
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
|
||||
|
||||
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
|
||||
u32 *rqtn);
|
||||
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
|
||||
int inlen);
|
||||
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
|
||||
|
||||
#endif /* __TRANSOBJ_H__ */
|
166
sys/dev/mlx5/mlx5_core/wq.h
Normal file
166
sys/dev/mlx5/mlx5_core/wq.h
Normal file
@ -0,0 +1,166 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef __MLX5_WQ_H__
|
||||
#define __MLX5_WQ_H__
|
||||
|
||||
#include <dev/mlx5/mlx5_ifc.h>
|
||||
|
||||
struct mlx5_wq_param {
|
||||
int linear;
|
||||
int buf_numa_node;
|
||||
int db_numa_node;
|
||||
};
|
||||
|
||||
struct mlx5_wq_ctrl {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_buf buf;
|
||||
struct mlx5_db db;
|
||||
};
|
||||
|
||||
struct mlx5_wq_cyc {
|
||||
void *buf;
|
||||
__be32 *db;
|
||||
u16 sz_m1;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
struct mlx5_cqwq {
|
||||
void *buf;
|
||||
__be32 *db;
|
||||
u32 sz_m1;
|
||||
u32 cc; /* consumer counter */
|
||||
u8 log_sz;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
struct mlx5_wq_ll {
|
||||
void *buf;
|
||||
__be32 *db;
|
||||
__be16 *tail_next;
|
||||
u16 sz_m1;
|
||||
u16 head;
|
||||
u16 wqe_ctr;
|
||||
u16 cur_sz;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
|
||||
|
||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *cqc, struct mlx5_cqwq *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
|
||||
|
||||
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_ll *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
|
||||
|
||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->sz_m1;
|
||||
}
|
||||
|
||||
static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
|
||||
{
|
||||
int equal = (cc1 == cc2);
|
||||
int smaller = 0x8000 & (cc1 - cc2);
|
||||
|
||||
return !equal && !smaller;
|
||||
}
|
||||
|
||||
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->cc & wq->sz_m1;
|
||||
}
|
||||
|
||||
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
}
|
||||
|
||||
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->cc >> wq->log_sz;
|
||||
}
|
||||
|
||||
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
|
||||
{
|
||||
wq->cc++;
|
||||
}
|
||||
|
||||
static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
|
||||
{
|
||||
*wq->db = cpu_to_be32(wq->cc & 0xffffff);
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return wq->cur_sz == wq->sz_m1;
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return !wq->cur_sz;
|
||||
}
|
||||
|
||||
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
|
||||
{
|
||||
wq->head = head_next;
|
||||
wq->wqe_ctr++;
|
||||
wq->cur_sz++;
|
||||
}
|
||||
|
||||
static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
|
||||
__be16 *next_tail_next)
|
||||
{
|
||||
*wq->tail_next = ix;
|
||||
wq->tail_next = next_tail_next;
|
||||
wq->cur_sz--;
|
||||
}
|
||||
static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
*wq->db = cpu_to_be32(wq->wqe_ctr);
|
||||
}
|
||||
|
||||
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
}
|
||||
|
||||
#endif /* __MLX5_WQ_H__ */
|
781
sys/dev/mlx5/mlx5_en/en.h
Normal file
781
sys/dev/mlx5/mlx5_en/en.h
Normal file
@ -0,0 +1,781 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _MLX5_EN_H_
|
||||
#define _MLX5_EN_H_
|
||||
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/page.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
|
||||
#include <netinet/in_systm.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/if_ether.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip6.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netinet/tcp_lro.h>
|
||||
#include <netinet/udp.h>
|
||||
#include <net/ethernet.h>
|
||||
#include <sys/buf_ring.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
|
||||
#ifdef HAVE_TURBO_LRO
|
||||
#include "tcp_tlro.h"
|
||||
#endif
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/qp.h>
|
||||
#include <dev/mlx5/cq.h>
|
||||
#include <dev/mlx5/vport.h>
|
||||
|
||||
#include <dev/mlx5/mlx5_core/wq.h>
|
||||
#include <dev/mlx5/mlx5_core/transobj.h>
|
||||
#include <dev/mlx5/mlx5_core/mlx5_core.h>
|
||||
|
||||
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
|
||||
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
|
||||
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
|
||||
|
||||
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
|
||||
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
|
||||
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
|
||||
|
||||
/* freeBSD HW LRO is limited by 16KB - the size of max mbuf */
|
||||
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ MJUM16BYTES
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
|
||||
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
|
||||
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
|
||||
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
|
||||
#define MLX5E_CACHELINE_SIZE CACHE_LINE_SIZE
|
||||
#define MLX5E_HW2SW_MTU(hwmtu) \
|
||||
((hwmtu) - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
|
||||
#define MLX5E_SW2HW_MTU(swmtu) \
|
||||
((swmtu) + (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
|
||||
#define MLX5E_SW2MB_MTU(swmtu) \
|
||||
(MLX5E_SW2HW_MTU(swmtu) + MLX5E_NET_IP_ALIGN)
|
||||
#define MLX5E_MTU_MIN 72 /* Min MTU allowed by the kernel */
|
||||
#define MLX5E_MTU_MAX MIN(ETHERMTU_JUMBO, MJUM16BYTES) /* Max MTU of Ethernet jumbo frames */
|
||||
|
||||
#define MLX5E_BUDGET_MAX 8192 /* RX and TX */
|
||||
#define MLX5E_RX_BUDGET_MAX 256
|
||||
#define MLX5E_SQ_BF_BUDGET 16
|
||||
#define MLX5E_SQ_TX_QUEUE_SIZE 4096 /* SQ drbr queue size */
|
||||
|
||||
#define MLX5E_MAX_TX_NUM_TC 8 /* units */
|
||||
#define MLX5E_MAX_TX_HEADER 128 /* bytes */
|
||||
#define MLX5E_MAX_TX_PAYLOAD_SIZE 65536 /* bytes */
|
||||
#define MLX5E_MAX_TX_MBUF_SIZE 65536 /* bytes */
|
||||
#define MLX5E_MAX_TX_MBUF_FRAGS \
|
||||
((MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) - \
|
||||
(MLX5E_MAX_TX_HEADER / MLX5_SEND_WQE_DS)) /* units */
|
||||
#define MLX5E_MAX_TX_INLINE \
|
||||
(MLX5E_MAX_TX_HEADER - sizeof(struct mlx5e_tx_wqe) + \
|
||||
sizeof(((struct mlx5e_tx_wqe *)0)->eth.inline_hdr_start)) /* bytes */
|
||||
|
||||
MALLOC_DECLARE(M_MLX5EN);
|
||||
|
||||
struct mlx5_core_dev;
|
||||
struct mlx5e_cq;
|
||||
|
||||
typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *);
|
||||
|
||||
#define MLX5E_STATS_COUNT(a,b,c,d) a
|
||||
#define MLX5E_STATS_VAR(a,b,c,d) b;
|
||||
#define MLX5E_STATS_DESC(a,b,c,d) c, d,
|
||||
|
||||
#define MLX5E_VPORT_STATS(m) \
|
||||
/* HW counters */ \
|
||||
m(+1, u64 rx_packets, "rx_packets", "Received packets") \
|
||||
m(+1, u64 rx_bytes, "rx_bytes", "Received bytes") \
|
||||
m(+1, u64 tx_packets, "tx_packets", "Transmitted packets") \
|
||||
m(+1, u64 tx_bytes, "tx_bytes", "Transmitted bytes") \
|
||||
m(+1, u64 rx_error_packets, "rx_error_packets", "Received error packets") \
|
||||
m(+1, u64 rx_error_bytes, "rx_error_bytes", "Received error bytes") \
|
||||
m(+1, u64 tx_error_packets, "tx_error_packets", "Transmitted error packets") \
|
||||
m(+1, u64 tx_error_bytes, "tx_error_bytes", "Transmitted error bytes") \
|
||||
m(+1, u64 rx_unicast_packets, "rx_unicast_packets", "Received unicast packets") \
|
||||
m(+1, u64 rx_unicast_bytes, "rx_unicast_bytes", "Received unicast bytes") \
|
||||
m(+1, u64 tx_unicast_packets, "tx_unicast_packets", "Transmitted unicast packets") \
|
||||
m(+1, u64 tx_unicast_bytes, "tx_unicast_bytes", "Transmitted unicast bytes") \
|
||||
m(+1, u64 rx_multicast_packets, "rx_multicast_packets", "Received multicast packets") \
|
||||
m(+1, u64 rx_multicast_bytes, "rx_multicast_bytes", "Received multicast bytes") \
|
||||
m(+1, u64 tx_multicast_packets, "tx_multicast_packets", "Transmitted multicast packets") \
|
||||
m(+1, u64 tx_multicast_bytes, "tx_multicast_bytes", "Transmitted multicast bytes") \
|
||||
m(+1, u64 rx_broadcast_packets, "rx_broadcast_packets", "Received broadcast packets") \
|
||||
m(+1, u64 rx_broadcast_bytes, "rx_broadcast_bytes", "Received broadcast bytes") \
|
||||
m(+1, u64 tx_broadcast_packets, "tx_broadcast_packets", "Transmitted broadcast packets") \
|
||||
m(+1, u64 tx_broadcast_bytes, "tx_broadcast_bytes", "Transmitted broadcast bytes") \
|
||||
/* SW counters */ \
|
||||
m(+1, u64 tso_packets, "tso_packets", "Transmitted TSO packets") \
|
||||
m(+1, u64 tso_bytes, "tso_bytes", "Transmitted TSO bytes") \
|
||||
m(+1, u64 lro_packets, "lro_packets", "Received LRO packets") \
|
||||
m(+1, u64 lro_bytes, "lro_bytes", "Received LRO bytes") \
|
||||
m(+1, u64 sw_lro_queued, "sw_lro_queued", "Packets queued for SW LRO") \
|
||||
m(+1, u64 sw_lro_flushed, "sw_lro_flushed", "Packets flushed from SW LRO") \
|
||||
m(+1, u64 rx_csum_good, "rx_csum_good", "Received checksum valid packets") \
|
||||
m(+1, u64 rx_csum_none, "rx_csum_none", "Received no checksum packets") \
|
||||
m(+1, u64 tx_csum_offload, "tx_csum_offload", "Transmit checksum offload packets") \
|
||||
m(+1, u64 tx_queue_dropped, "tx_queue_dropped", "Transmit queue dropped") \
|
||||
m(+1, u64 tx_defragged, "tx_defragged", "Transmit queue defragged") \
|
||||
m(+1, u64 rx_wqe_err, "rx_wqe_err", "Receive WQE errors")
|
||||
|
||||
#define MLX5E_VPORT_STATS_NUM (0 MLX5E_VPORT_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_vport_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_VPORT_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
#define MLX5E_PPORT_IEEE802_3_STATS(m) \
|
||||
m(+1, u64 frames_tx, "frames_tx", "Frames transmitted") \
|
||||
m(+1, u64 frames_rx, "frames_rx", "Frames received") \
|
||||
m(+1, u64 check_seq_err, "check_seq_err", "Sequence errors") \
|
||||
m(+1, u64 alignment_err, "alignment_err", "Alignment errors") \
|
||||
m(+1, u64 octets_tx, "octets_tx", "Bytes transmitted") \
|
||||
m(+1, u64 octets_received, "octets_received", "Bytes received") \
|
||||
m(+1, u64 multicast_xmitted, "multicast_xmitted", "Multicast transmitted") \
|
||||
m(+1, u64 broadcast_xmitted, "broadcast_xmitted", "Broadcast transmitted") \
|
||||
m(+1, u64 multicast_rx, "multicast_rx", "Multicast received") \
|
||||
m(+1, u64 broadcast_rx, "broadcast_rx", "Broadcast received") \
|
||||
m(+1, u64 in_range_len_errors, "in_range_len_errors", "In range length errors") \
|
||||
m(+1, u64 out_of_range_len, "out_of_range_len", "Out of range length errors") \
|
||||
m(+1, u64 too_long_errors, "too_long_errors", "Too long errors") \
|
||||
m(+1, u64 symbol_err, "symbol_err", "Symbol errors") \
|
||||
m(+1, u64 mac_control_tx, "mac_control_tx", "MAC control transmitted") \
|
||||
m(+1, u64 mac_control_rx, "mac_control_rx", "MAC control received") \
|
||||
m(+1, u64 unsupported_op_rx, "unsupported_op_rx", "Unsupported operation received") \
|
||||
m(+1, u64 pause_ctrl_rx, "pause_ctrl_rx", "Pause control received") \
|
||||
m(+1, u64 pause_ctrl_tx, "pause_ctrl_tx", "Pause control transmitted")
|
||||
|
||||
#define MLX5E_PPORT_RFC2819_STATS(m) \
|
||||
m(+1, u64 drop_events, "drop_events", "Dropped events") \
|
||||
m(+1, u64 octets, "octets", "Octets") \
|
||||
m(+1, u64 pkts, "pkts", "Packets") \
|
||||
m(+1, u64 broadcast_pkts, "broadcast_pkts", "Broadcast packets") \
|
||||
m(+1, u64 multicast_pkts, "multicast_pkts", "Multicast packets") \
|
||||
m(+1, u64 crc_align_errors, "crc_align_errors", "CRC alignment errors") \
|
||||
m(+1, u64 undersize_pkts, "undersize_pkts", "Undersized packets") \
|
||||
m(+1, u64 oversize_pkts, "oversize_pkts", "Oversized packets") \
|
||||
m(+1, u64 fragments, "fragments", "Fragments") \
|
||||
m(+1, u64 jabbers, "jabbers", "Jabbers") \
|
||||
m(+1, u64 collisions, "collisions", "Collisions")
|
||||
|
||||
#define MLX5E_PPORT_RFC2819_STATS_DEBUG(m) \
|
||||
m(+1, u64 p64octets, "p64octets", "Bytes") \
|
||||
m(+1, u64 p65to127octets, "p65to127octets", "Bytes") \
|
||||
m(+1, u64 p128to255octets, "p128to255octets", "Bytes") \
|
||||
m(+1, u64 p256to511octets, "p256to511octets", "Bytes") \
|
||||
m(+1, u64 p512to1023octets, "p512to1023octets", "Bytes") \
|
||||
m(+1, u64 p1024to1518octets, "p1024to1518octets", "Bytes") \
|
||||
m(+1, u64 p1519to2047octets, "p1519to2047octets", "Bytes") \
|
||||
m(+1, u64 p2048to4095octets, "p2048to4095octets", "Bytes") \
|
||||
m(+1, u64 p4096to8191octets, "p4096to8191octets", "Bytes") \
|
||||
m(+1, u64 p8192to10239octets, "p8192to10239octets", "Bytes")
|
||||
|
||||
#define MLX5E_PPORT_RFC2863_STATS_DEBUG(m) \
|
||||
m(+1, u64 in_octets, "in_octets", "In octets") \
|
||||
m(+1, u64 in_ucast_pkts, "in_ucast_pkts", "In unicast packets") \
|
||||
m(+1, u64 in_discards, "in_discards", "In discards") \
|
||||
m(+1, u64 in_errors, "in_errors", "In errors") \
|
||||
m(+1, u64 in_unknown_protos, "in_unknown_protos", "In unknown protocols") \
|
||||
m(+1, u64 out_octets, "out_octets", "Out octets") \
|
||||
m(+1, u64 out_ucast_pkts, "out_ucast_pkts", "Out unicast packets") \
|
||||
m(+1, u64 out_discards, "out_discards", "Out discards") \
|
||||
m(+1, u64 out_errors, "out_errors", "Out errors") \
|
||||
m(+1, u64 in_multicast_pkts, "in_multicast_pkts", "In multicast packets") \
|
||||
m(+1, u64 in_broadcast_pkts, "in_broadcast_pkts", "In broadcast packets") \
|
||||
m(+1, u64 out_multicast_pkts, "out_multicast_pkts", "Out multicast packets") \
|
||||
m(+1, u64 out_broadcast_pkts, "out_broadcast_pkts", "Out broadcast packets")
|
||||
|
||||
#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m) \
|
||||
m(+1, u64 time_since_last_clear, "time_since_last_clear", \
|
||||
"Time since the last counters clear event (msec)") \
|
||||
m(+1, u64 symbol_errors, "symbol_errors", "Symbol errors") \
|
||||
m(+1, u64 sync_headers_errors, "sync_headers_errors", "Sync header error counter") \
|
||||
m(+1, u64 bip_errors_lane0, "edpl_bip_errors_lane0", \
|
||||
"Indicates the number of PRBS errors on lane 0") \
|
||||
m(+1, u64 bip_errors_lane1, "edpl_bip_errors_lane1", \
|
||||
"Indicates the number of PRBS errors on lane 1") \
|
||||
m(+1, u64 bip_errors_lane2, "edpl_bip_errors_lane2", \
|
||||
"Indicates the number of PRBS errors on lane 2") \
|
||||
m(+1, u64 bip_errors_lane3, "edpl_bip_errors_lane3", \
|
||||
"Indicates the number of PRBS errors on lane 3") \
|
||||
m(+1, u64 fc_corrected_blocks_lane0, "fc_corrected_blocks_lane0", \
|
||||
"FEC correctable block counter lane 0") \
|
||||
m(+1, u64 fc_corrected_blocks_lane1, "fc_corrected_blocks_lane1", \
|
||||
"FEC correctable block counter lane 1") \
|
||||
m(+1, u64 fc_corrected_blocks_lane2, "fc_corrected_blocks_lane2", \
|
||||
"FEC correctable block counter lane 2") \
|
||||
m(+1, u64 fc_corrected_blocks_lane3, "fc_corrected_blocks_lane3", \
|
||||
"FEC correctable block counter lane 3") \
|
||||
m(+1, u64 rs_corrected_blocks, "rs_corrected_blocks", \
|
||||
"FEC correcable block counter") \
|
||||
m(+1, u64 rs_uncorrectable_blocks, "rs_uncorrectable_blocks", \
|
||||
"FEC uncorrecable block counter") \
|
||||
m(+1, u64 rs_no_errors_blocks, "rs_no_errors_blocks", \
|
||||
"The number of RS-FEC blocks received that had no errors") \
|
||||
m(+1, u64 rs_single_error_blocks, "rs_single_error_blocks", \
|
||||
"The number of corrected RS-FEC blocks received that had" \
|
||||
"exactly 1 error symbol") \
|
||||
m(+1, u64 rs_corrected_symbols_total, "rs_corrected_symbols_total", \
|
||||
"Port FEC corrected symbol counter") \
|
||||
m(+1, u64 rs_corrected_symbols_lane0, "rs_corrected_symbols_lane0", \
|
||||
"FEC corrected symbol counter lane 0") \
|
||||
m(+1, u64 rs_corrected_symbols_lane1, "rs_corrected_symbols_lane1", \
|
||||
"FEC corrected symbol counter lane 1") \
|
||||
m(+1, u64 rs_corrected_symbols_lane2, "rs_corrected_symbols_lane2", \
|
||||
"FEC corrected symbol counter lane 2") \
|
||||
m(+1, u64 rs_corrected_symbols_lane3, "rs_corrected_symbols_lane3", \
|
||||
"FEC corrected symbol counter lane 3") \
|
||||
|
||||
#define MLX5E_PPORT_Q_CONTERS(m) \
|
||||
m(+1, u64 out_of_rx_buffer, "out_of_rx_buffer", "out of rx buffers aka no recv wqes events")
|
||||
|
||||
/*
|
||||
* Make sure to update mlx5e_update_pport_counters()
|
||||
* when adding a new MLX5E_PPORT_STATS block
|
||||
*/
|
||||
#define MLX5E_PPORT_STATS(m) \
|
||||
MLX5E_PPORT_IEEE802_3_STATS(m) \
|
||||
MLX5E_PPORT_RFC2819_STATS(m) \
|
||||
MLX5E_PPORT_Q_CONTERS(m)
|
||||
|
||||
#define MLX5E_PORT_STATS_DEBUG(m) \
|
||||
MLX5E_PPORT_RFC2819_STATS_DEBUG(m) \
|
||||
MLX5E_PPORT_RFC2863_STATS_DEBUG(m) \
|
||||
MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m)
|
||||
|
||||
#define MLX5E_PPORT_IEEE802_3_STATS_NUM \
|
||||
(0 MLX5E_PPORT_IEEE802_3_STATS(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PPORT_RFC2819_STATS_NUM \
|
||||
(0 MLX5E_PPORT_RFC2819_STATS(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PPORT_STATS_NUM \
|
||||
(0 MLX5E_PPORT_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
#define MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PPORT_RFC2819_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PPORT_RFC2863_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
#define MLX5E_PORT_STATS_DEBUG_NUM \
|
||||
(0 MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_pport_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_PPORT_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
struct mlx5e_port_stats_debug {
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
#define MLX5E_RQ_STATS(m) \
|
||||
m(+1, u64 packets, "packets", "Received packets") \
|
||||
m(+1, u64 csum_none, "csum_none", "Received packets") \
|
||||
m(+1, u64 lro_packets, "lro_packets", "Received packets") \
|
||||
m(+1, u64 lro_bytes, "lro_bytes", "Received packets") \
|
||||
m(+1, u64 sw_lro_queued, "sw_lro_queued", "Packets queued for SW LRO") \
|
||||
m(+1, u64 sw_lro_flushed, "sw_lro_flushed", "Packets flushed from SW LRO") \
|
||||
m(+1, u64 wqe_err, "wqe_err", "Received packets")
|
||||
|
||||
#define MLX5E_RQ_STATS_NUM (0 MLX5E_RQ_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_rq_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_RQ_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
#define MLX5E_SQ_STATS(m) \
|
||||
m(+1, u64 packets, "packets", "Transmitted packets") \
|
||||
m(+1, u64 tso_packets, "tso_packets", "Transmitted packets") \
|
||||
m(+1, u64 tso_bytes, "tso_bytes", "Transmitted bytes") \
|
||||
m(+1, u64 csum_offload_none, "csum_offload_none", "Transmitted packets") \
|
||||
m(+1, u64 defragged, "defragged", "Transmitted packets") \
|
||||
m(+1, u64 dropped, "dropped", "Transmitted packets") \
|
||||
m(+1, u64 nop, "nop", "Transmitted packets")
|
||||
|
||||
#define MLX5E_SQ_STATS_NUM (0 MLX5E_SQ_STATS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_sq_stats {
|
||||
struct sysctl_ctx_list ctx;
|
||||
u64 arg [0];
|
||||
MLX5E_SQ_STATS(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
struct mlx5e_stats {
|
||||
struct mlx5e_vport_stats vport;
|
||||
struct mlx5e_pport_stats pport;
|
||||
struct mlx5e_port_stats_debug port_stats_debug;
|
||||
};
|
||||
|
||||
struct mlx5e_params {
|
||||
u8 log_sq_size;
|
||||
u8 log_rq_size;
|
||||
u16 num_channels;
|
||||
u8 default_vlan_prio;
|
||||
u8 num_tc;
|
||||
u8 rx_cq_moderation_mode;
|
||||
u16 rx_cq_moderation_usec;
|
||||
u16 rx_cq_moderation_pkts;
|
||||
u16 tx_cq_moderation_usec;
|
||||
u16 tx_cq_moderation_pkts;
|
||||
u16 min_rx_wqes;
|
||||
bool hw_lro_en;
|
||||
u32 lro_wqe_sz;
|
||||
u16 rx_hash_log_tbl_sz;
|
||||
};
|
||||
|
||||
#define MLX5E_PARAMS(m) \
|
||||
m(+1, u64 tx_pauseframe_control, "tx_pauseframe_control", "Set to enable TX pause frames. Clear to disable.") \
|
||||
m(+1, u64 rx_pauseframe_control, "rx_pauseframe_control", "Set to enable RX pause frames. Clear to disable.") \
|
||||
m(+1, u64 tx_queue_size_max, "tx_queue_size_max", "Max send queue size") \
|
||||
m(+1, u64 rx_queue_size_max, "rx_queue_size_max", "Max receive queue size") \
|
||||
m(+1, u64 tx_queue_size, "tx_queue_size", "Default send queue size") \
|
||||
m(+1, u64 rx_queue_size, "rx_queue_size", "Default receive queue size") \
|
||||
m(+1, u64 channels, "channels", "Default number of channels") \
|
||||
m(+1, u64 coalesce_usecs_max, "coalesce_usecs_max", "Maximum usecs for joining packets") \
|
||||
m(+1, u64 coalesce_pkts_max, "coalesce_pkts_max", "Maximum packets to join") \
|
||||
m(+1, u64 rx_coalesce_usecs, "rx_coalesce_usecs", "Limit in usec for joining rx packets") \
|
||||
m(+1, u64 rx_coalesce_pkts, "rx_coalesce_pkts", "Maximum number of rx packets to join") \
|
||||
m(+1, u64 rx_coalesce_mode, "rx_coalesce_mode", "0: EQE mode 1: CQE mode") \
|
||||
m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
|
||||
m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
|
||||
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro")
|
||||
|
||||
#define MLX5E_PARAMS_NUM (0 MLX5E_PARAMS(MLX5E_STATS_COUNT))
|
||||
|
||||
struct mlx5e_params_ethtool {
|
||||
u64 arg [0];
|
||||
MLX5E_PARAMS(MLX5E_STATS_VAR)
|
||||
};
|
||||
|
||||
/* EEPROM Standards for plug in modules */
|
||||
#ifndef MLX5E_ETH_MODULE_SFF_8472
|
||||
#define MLX5E_ETH_MODULE_SFF_8472 0x1
|
||||
#define MLX5E_ETH_MODULE_SFF_8472_LEN 128
|
||||
#endif
|
||||
|
||||
#ifndef MLX5E_ETH_MODULE_SFF_8636
|
||||
#define MLX5E_ETH_MODULE_SFF_8636 0x2
|
||||
#define MLX5E_ETH_MODULE_SFF_8636_LEN 256
|
||||
#endif
|
||||
|
||||
#ifndef MLX5E_ETH_MODULE_SFF_8436
|
||||
#define MLX5E_ETH_MODULE_SFF_8436 0x3
|
||||
#define MLX5E_ETH_MODULE_SFF_8436_LEN 256
|
||||
#endif
|
||||
|
||||
/* EEPROM I2C Addresses */
|
||||
#define MLX5E_I2C_ADDR_LOW 0x50
|
||||
#define MLX5E_I2C_ADDR_HIGH 0x51
|
||||
|
||||
#define MLX5E_EEPROM_LOW_PAGE 0x0
|
||||
#define MLX5E_EEPROM_HIGH_PAGE 0x3
|
||||
|
||||
#define MLX5E_EEPROM_HIGH_PAGE_OFFSET 128
|
||||
#define MLX5E_EEPROM_PAGE_LENGTH 256
|
||||
|
||||
#define MLX5E_EEPROM_INFO_BYTES 0x3
|
||||
|
||||
struct mlx5e_cq {
|
||||
/* data path - accessed per cqe */
|
||||
struct mlx5_cqwq wq;
|
||||
|
||||
/* data path - accessed per HW polling */
|
||||
struct mlx5_core_cq mcq;
|
||||
struct mlx5e_channel *channel;
|
||||
|
||||
/* control */
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
} __aligned(MLX5E_CACHELINE_SIZE);
|
||||
|
||||
struct mlx5e_rq_mbuf {
|
||||
bus_dmamap_t dma_map;
|
||||
caddr_t data;
|
||||
struct mbuf *mbuf;
|
||||
};
|
||||
|
||||
struct mlx5e_rq {
|
||||
/* data path */
|
||||
struct mlx5_wq_ll wq;
|
||||
struct mtx mtx;
|
||||
bus_dma_tag_t dma_tag;
|
||||
u32 wqe_sz;
|
||||
struct mlx5e_rq_mbuf *mbuf;
|
||||
struct device *pdev;
|
||||
struct ifnet *ifp;
|
||||
struct mlx5e_rq_stats stats;
|
||||
struct mlx5e_cq cq;
|
||||
#ifdef HAVE_TURBO_LRO
|
||||
struct tlro_ctrl lro;
|
||||
#else
|
||||
struct lro_ctrl lro;
|
||||
#endif
|
||||
volatile int enabled;
|
||||
int ix;
|
||||
|
||||
/* control */
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
u32 rqn;
|
||||
struct mlx5e_channel *channel;
|
||||
} __aligned(MLX5E_CACHELINE_SIZE);
|
||||
|
||||
struct mlx5e_sq_mbuf {
|
||||
bus_dmamap_t dma_map;
|
||||
struct mbuf *mbuf;
|
||||
u32 num_bytes;
|
||||
u32 num_wqebbs;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_SQ_READY,
|
||||
MLX5E_SQ_FULL
|
||||
};
|
||||
|
||||
struct mlx5e_sq {
|
||||
/* data path */
|
||||
struct mtx lock;
|
||||
bus_dma_tag_t dma_tag;
|
||||
struct mtx comp_lock;
|
||||
|
||||
/* dirtied @completion */
|
||||
u16 cc;
|
||||
|
||||
/* dirtied @xmit */
|
||||
u16 pc __aligned(MLX5E_CACHELINE_SIZE);
|
||||
u16 bf_offset;
|
||||
struct mlx5e_sq_stats stats;
|
||||
|
||||
struct mlx5e_cq cq;
|
||||
struct task sq_task;
|
||||
struct taskqueue *sq_tq;
|
||||
|
||||
/* pointers to per packet info: write@xmit, read@completion */
|
||||
struct mlx5e_sq_mbuf *mbuf;
|
||||
struct buf_ring *br;
|
||||
|
||||
/* read only */
|
||||
struct mlx5_wq_cyc wq;
|
||||
void __iomem *uar_map;
|
||||
void __iomem *uar_bf_map;
|
||||
u32 sqn;
|
||||
u32 bf_buf_size;
|
||||
struct device *pdev;
|
||||
u32 mkey_be;
|
||||
|
||||
/* control path */
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
struct mlx5_uar uar;
|
||||
struct mlx5e_channel *channel;
|
||||
int tc;
|
||||
unsigned int queue_state;
|
||||
} __aligned(MLX5E_CACHELINE_SIZE);
|
||||
|
||||
static inline bool
|
||||
mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
|
||||
{
|
||||
return ((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n ||
|
||||
sq->cc == sq->pc);
|
||||
}
|
||||
|
||||
struct mlx5e_channel {
|
||||
/* data path */
|
||||
struct mlx5e_rq rq;
|
||||
struct mlx5e_sq sq[MLX5E_MAX_TX_NUM_TC];
|
||||
struct device *pdev;
|
||||
struct ifnet *ifp;
|
||||
u32 mkey_be;
|
||||
u8 num_tc;
|
||||
|
||||
/* control */
|
||||
struct mlx5e_priv *priv;
|
||||
int ix;
|
||||
int cpu;
|
||||
} __aligned(MLX5E_CACHELINE_SIZE);
|
||||
|
||||
enum mlx5e_traffic_types {
|
||||
MLX5E_TT_IPV4_TCP,
|
||||
MLX5E_TT_IPV6_TCP,
|
||||
MLX5E_TT_IPV4_UDP,
|
||||
MLX5E_TT_IPV6_UDP,
|
||||
MLX5E_TT_IPV4_IPSEC_AH,
|
||||
MLX5E_TT_IPV6_IPSEC_AH,
|
||||
MLX5E_TT_IPV4_IPSEC_ESP,
|
||||
MLX5E_TT_IPV6_IPSEC_ESP,
|
||||
MLX5E_TT_IPV4,
|
||||
MLX5E_TT_IPV6,
|
||||
MLX5E_TT_ANY,
|
||||
MLX5E_NUM_TT,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_RQT_SPREADING = 0,
|
||||
MLX5E_RQT_DEFAULT_RQ = 1,
|
||||
MLX5E_NUM_RQT = 2,
|
||||
};
|
||||
|
||||
struct mlx5e_eth_addr_info {
|
||||
u8 addr [ETH_ALEN + 2];
|
||||
u32 tt_vec;
|
||||
u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
|
||||
};
|
||||
|
||||
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
|
||||
|
||||
struct mlx5e_eth_addr_hash_node;
|
||||
|
||||
struct mlx5e_eth_addr_hash_head {
|
||||
struct mlx5e_eth_addr_hash_node *lh_first;
|
||||
};
|
||||
|
||||
struct mlx5e_eth_addr_db {
|
||||
struct mlx5e_eth_addr_hash_head if_uc[MLX5E_ETH_ADDR_HASH_SIZE];
|
||||
struct mlx5e_eth_addr_hash_head if_mc[MLX5E_ETH_ADDR_HASH_SIZE];
|
||||
struct mlx5e_eth_addr_info broadcast;
|
||||
struct mlx5e_eth_addr_info allmulti;
|
||||
struct mlx5e_eth_addr_info promisc;
|
||||
bool broadcast_enabled;
|
||||
bool allmulti_enabled;
|
||||
bool promisc_enabled;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
|
||||
MLX5E_STATE_OPENED,
|
||||
};
|
||||
|
||||
struct mlx5e_vlan_db {
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
u32 active_vlans_ft_ix[VLAN_N_VID];
|
||||
u32 untagged_rule_ft_ix;
|
||||
u32 any_vlan_rule_ft_ix;
|
||||
bool filter_disabled;
|
||||
};
|
||||
|
||||
struct mlx5e_flow_table {
|
||||
void *vlan;
|
||||
void *main;
|
||||
};
|
||||
|
||||
struct mlx5e_priv {
|
||||
/* priv data path fields - start */
|
||||
int order_base_2_num_channels;
|
||||
int queue_mapping_channel_mask;
|
||||
int num_tc;
|
||||
int default_vlan_prio;
|
||||
/* priv data path fields - end */
|
||||
|
||||
unsigned long state;
|
||||
int gone;
|
||||
#define PRIV_LOCK(priv) sx_xlock(&(priv)->state_lock)
|
||||
#define PRIV_UNLOCK(priv) sx_xunlock(&(priv)->state_lock)
|
||||
#define PRIV_LOCKED(priv) sx_xlocked(&(priv)->state_lock)
|
||||
struct sx state_lock; /* Protects Interface state */
|
||||
struct mlx5_uar cq_uar;
|
||||
u32 pdn;
|
||||
u32 tdn;
|
||||
struct mlx5_core_mr mr;
|
||||
|
||||
struct mlx5e_channel * volatile *channel;
|
||||
u32 tisn[MLX5E_MAX_TX_NUM_TC];
|
||||
u32 rqtn;
|
||||
u32 tirn[MLX5E_NUM_TT];
|
||||
|
||||
struct mlx5e_flow_table ft;
|
||||
struct mlx5e_eth_addr_db eth_addr;
|
||||
struct mlx5e_vlan_db vlan;
|
||||
|
||||
struct mlx5e_params params;
|
||||
struct mlx5e_params_ethtool params_ethtool;
|
||||
struct mtx async_events_mtx; /* sync hw events */
|
||||
struct work_struct update_stats_work;
|
||||
struct work_struct update_carrier_work;
|
||||
struct work_struct set_rx_mode_work;
|
||||
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct ifnet *ifp;
|
||||
struct sysctl_ctx_list sysctl_ctx;
|
||||
struct sysctl_oid *sysctl_ifnet;
|
||||
struct sysctl_oid *sysctl_hw;
|
||||
int sysctl_debug;
|
||||
struct mlx5e_stats stats;
|
||||
int counter_set_id;
|
||||
|
||||
eventhandler_tag vlan_detach;
|
||||
eventhandler_tag vlan_attach;
|
||||
struct ifmedia media;
|
||||
int media_status_last;
|
||||
int media_active_last;
|
||||
|
||||
struct callout watchdog;
|
||||
};
|
||||
|
||||
#define MLX5E_NET_IP_ALIGN 2
|
||||
|
||||
struct mlx5e_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
};
|
||||
|
||||
struct mlx5e_rx_wqe {
|
||||
struct mlx5_wqe_srq_next_seg next;
|
||||
struct mlx5_wqe_data_seg data;
|
||||
};
|
||||
|
||||
struct mlx5e_eeprom {
|
||||
int lock_bit;
|
||||
int i2c_addr;
|
||||
int page_num;
|
||||
int device_addr;
|
||||
int module_num;
|
||||
int len;
|
||||
int type;
|
||||
int page_valid;
|
||||
u32 *data;
|
||||
};
|
||||
|
||||
enum mlx5e_link_mode {
|
||||
MLX5E_1000BASE_CX_SGMII = 0,
|
||||
MLX5E_1000BASE_KX = 1,
|
||||
MLX5E_10GBASE_CX4 = 2,
|
||||
MLX5E_10GBASE_KX4 = 3,
|
||||
MLX5E_10GBASE_KR = 4,
|
||||
MLX5E_20GBASE_KR2 = 5,
|
||||
MLX5E_40GBASE_CR4 = 6,
|
||||
MLX5E_40GBASE_KR4 = 7,
|
||||
MLX5E_56GBASE_R4 = 8,
|
||||
MLX5E_10GBASE_CR = 12,
|
||||
MLX5E_10GBASE_SR = 13,
|
||||
MLX5E_10GBASE_ER = 14,
|
||||
MLX5E_40GBASE_SR4 = 15,
|
||||
MLX5E_40GBASE_LR4 = 16,
|
||||
MLX5E_100GBASE_CR4 = 20,
|
||||
MLX5E_100GBASE_SR4 = 21,
|
||||
MLX5E_100GBASE_KR4 = 22,
|
||||
MLX5E_100GBASE_LR4 = 23,
|
||||
MLX5E_100BASE_TX = 24,
|
||||
MLX5E_100BASE_T = 25,
|
||||
MLX5E_10GBASE_T = 26,
|
||||
MLX5E_25GBASE_CR = 27,
|
||||
MLX5E_25GBASE_KR = 28,
|
||||
MLX5E_25GBASE_SR = 29,
|
||||
MLX5E_50GBASE_CR2 = 30,
|
||||
MLX5E_50GBASE_KR2 = 31,
|
||||
MLX5E_LINK_MODES_NUMBER,
|
||||
};
|
||||
|
||||
#define MLX5E_PROT_MASK(link_mode) (1 << (link_mode))
|
||||
#define MLX5E_FLD_MAX(typ, fld) ((1ULL << __mlx5_bit_sz(typ, fld)) - 1ULL)
|
||||
|
||||
int mlx5e_xmit(struct ifnet *, struct mbuf *);
|
||||
|
||||
int mlx5e_open_locked(struct ifnet *);
|
||||
int mlx5e_close_locked(struct ifnet *);
|
||||
|
||||
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
|
||||
void mlx5e_rx_cq_comp(struct mlx5_core_cq *);
|
||||
void mlx5e_tx_cq_comp(struct mlx5_core_cq *);
|
||||
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
|
||||
void mlx5e_tx_que(void *context, int pending);
|
||||
|
||||
int mlx5e_open_flow_table(struct mlx5e_priv *priv);
|
||||
void mlx5e_close_flow_table(struct mlx5e_priv *priv);
|
||||
void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
|
||||
void mlx5e_set_rx_mode_work(struct work_struct *work);
|
||||
|
||||
void mlx5e_vlan_rx_add_vid(void *, struct ifnet *, u16);
|
||||
void mlx5e_vlan_rx_kill_vid(void *, struct ifnet *, u16);
|
||||
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
|
||||
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
|
||||
int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
|
||||
void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
|
||||
|
||||
static inline void
|
||||
mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
|
||||
struct mlx5e_tx_wqe *wqe, int bf_sz)
|
||||
{
|
||||
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
|
||||
|
||||
/* ensure wqe is visible to device before updating doorbell record */
|
||||
wmb();
|
||||
|
||||
*sq->wq.db = cpu_to_be32(sq->pc);
|
||||
|
||||
/*
|
||||
* Ensure the doorbell record is visible to device before ringing
|
||||
* the doorbell:
|
||||
*/
|
||||
wmb();
|
||||
|
||||
if (bf_sz) {
|
||||
__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
|
||||
|
||||
/* flush the write-combining mapped buffer */
|
||||
wmb();
|
||||
|
||||
} else {
|
||||
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
|
||||
}
|
||||
|
||||
sq->bf_offset ^= sq->bf_buf_size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mlx5e_cq_arm(struct mlx5e_cq *cq)
|
||||
{
|
||||
struct mlx5_core_cq *mcq;
|
||||
|
||||
mcq = &cq->mcq;
|
||||
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
|
||||
}
|
||||
|
||||
extern const struct ethtool_ops mlx5e_ethtool_ops;
|
||||
void mlx5e_create_ethtool(struct mlx5e_priv *);
|
||||
void mlx5e_create_stats(struct sysctl_ctx_list *,
|
||||
struct sysctl_oid_list *, const char *,
|
||||
const char **, unsigned, u64 *);
|
||||
void mlx5e_send_nop(struct mlx5e_sq *, u32, bool);
|
||||
|
||||
#endif /* _MLX5_EN_H_ */
|
493
sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
Normal file
493
sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
Normal file
@ -0,0 +1,493 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include "en.h"
|
||||
#include <net/sff8472.h>
|
||||
|
||||
void
|
||||
mlx5e_create_stats(struct sysctl_ctx_list *ctx,
|
||||
struct sysctl_oid_list *parent, const char *buffer,
|
||||
const char **desc, unsigned num, u64 * arg)
|
||||
{
|
||||
struct sysctl_oid *node;
|
||||
unsigned x;
|
||||
|
||||
sysctl_ctx_init(ctx);
|
||||
|
||||
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO,
|
||||
buffer, CTLFLAG_RD, NULL, "Statistics");
|
||||
if (node == NULL)
|
||||
return;
|
||||
for (x = 0; x != num; x++) {
|
||||
SYSCTL_ADD_UQUAD(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
|
||||
desc[2 * x], CTLFLAG_RD, arg + x, desc[2 * x + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
struct mlx5e_priv *priv = arg1;
|
||||
uint64_t value;
|
||||
int was_opened;
|
||||
int error;
|
||||
|
||||
PRIV_LOCK(priv);
|
||||
value = priv->params_ethtool.arg[arg2];
|
||||
error = sysctl_handle_64(oidp, &value, 0, req);
|
||||
if (error || req->newptr == NULL ||
|
||||
value == priv->params_ethtool.arg[arg2])
|
||||
goto done;
|
||||
|
||||
/* assign new value */
|
||||
priv->params_ethtool.arg[arg2] = value;
|
||||
|
||||
/* check if device is gone */
|
||||
if (priv->gone) {
|
||||
error = ENXIO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (&priv->params_ethtool.arg[arg2] == &priv->params_ethtool.rx_pauseframe_control ||
|
||||
&priv->params_ethtool.arg[arg2] == &priv->params_ethtool.tx_pauseframe_control) {
|
||||
/* range check parameters */
|
||||
priv->params_ethtool.rx_pauseframe_control =
|
||||
priv->params_ethtool.rx_pauseframe_control ? 1 : 0;
|
||||
priv->params_ethtool.tx_pauseframe_control =
|
||||
priv->params_ethtool.tx_pauseframe_control ? 1 : 0;
|
||||
|
||||
/* update firmware */
|
||||
error = -mlx5_set_port_pause(priv->mdev, 1,
|
||||
priv->params_ethtool.rx_pauseframe_control,
|
||||
priv->params_ethtool.tx_pauseframe_control);
|
||||
goto done;
|
||||
}
|
||||
|
||||
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
if (was_opened)
|
||||
mlx5e_close_locked(priv->ifp);
|
||||
|
||||
/* import TX queue size */
|
||||
if (priv->params_ethtool.tx_queue_size <
|
||||
(1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
|
||||
priv->params_ethtool.tx_queue_size =
|
||||
(1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
|
||||
} else if (priv->params_ethtool.tx_queue_size >
|
||||
priv->params_ethtool.tx_queue_size_max) {
|
||||
priv->params_ethtool.tx_queue_size =
|
||||
priv->params_ethtool.tx_queue_size_max;
|
||||
}
|
||||
priv->params.log_sq_size =
|
||||
order_base_2(priv->params_ethtool.tx_queue_size);
|
||||
|
||||
/* import RX queue size */
|
||||
if (priv->params_ethtool.rx_queue_size <
|
||||
(1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
|
||||
priv->params_ethtool.rx_queue_size =
|
||||
(1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
|
||||
} else if (priv->params_ethtool.rx_queue_size >
|
||||
priv->params_ethtool.rx_queue_size_max) {
|
||||
priv->params_ethtool.rx_queue_size =
|
||||
priv->params_ethtool.rx_queue_size_max;
|
||||
}
|
||||
priv->params.log_rq_size =
|
||||
order_base_2(priv->params_ethtool.rx_queue_size);
|
||||
|
||||
priv->params.min_rx_wqes = min_t (u16,
|
||||
priv->params_ethtool.rx_queue_size - 1,
|
||||
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
|
||||
|
||||
/* import number of channels */
|
||||
if (priv->params_ethtool.channels < 1)
|
||||
priv->params_ethtool.channels = 1;
|
||||
else if (priv->params_ethtool.channels >
|
||||
(u64) priv->mdev->priv.eq_table.num_comp_vectors) {
|
||||
priv->params_ethtool.channels =
|
||||
(u64) priv->mdev->priv.eq_table.num_comp_vectors;
|
||||
}
|
||||
priv->params.num_channels = priv->params_ethtool.channels;
|
||||
|
||||
/* import RX mode */
|
||||
if (priv->params_ethtool.rx_coalesce_mode != 0)
|
||||
priv->params_ethtool.rx_coalesce_mode = 1;
|
||||
priv->params.rx_cq_moderation_mode = priv->params_ethtool.rx_coalesce_mode;
|
||||
|
||||
/* import RX coal time */
|
||||
if (priv->params_ethtool.rx_coalesce_usecs < 1)
|
||||
priv->params_ethtool.rx_coalesce_usecs = 0;
|
||||
else if (priv->params_ethtool.rx_coalesce_usecs >
|
||||
MLX5E_FLD_MAX(cqc, cq_period)) {
|
||||
priv->params_ethtool.rx_coalesce_usecs =
|
||||
MLX5E_FLD_MAX(cqc, cq_period);
|
||||
}
|
||||
priv->params.rx_cq_moderation_usec = priv->params_ethtool.rx_coalesce_usecs;
|
||||
|
||||
/* import RX coal pkts */
|
||||
if (priv->params_ethtool.rx_coalesce_pkts < 1)
|
||||
priv->params_ethtool.rx_coalesce_pkts = 0;
|
||||
else if (priv->params_ethtool.rx_coalesce_pkts >
|
||||
MLX5E_FLD_MAX(cqc, cq_max_count)) {
|
||||
priv->params_ethtool.rx_coalesce_pkts =
|
||||
MLX5E_FLD_MAX(cqc, cq_max_count);
|
||||
}
|
||||
priv->params.rx_cq_moderation_pkts = priv->params_ethtool.rx_coalesce_pkts;
|
||||
|
||||
/* import TX coal time */
|
||||
if (priv->params_ethtool.tx_coalesce_usecs < 1)
|
||||
priv->params_ethtool.tx_coalesce_usecs = 0;
|
||||
else if (priv->params_ethtool.tx_coalesce_usecs >
|
||||
MLX5E_FLD_MAX(cqc, cq_period)) {
|
||||
priv->params_ethtool.tx_coalesce_usecs =
|
||||
MLX5E_FLD_MAX(cqc, cq_period);
|
||||
}
|
||||
priv->params.tx_cq_moderation_usec = priv->params_ethtool.tx_coalesce_usecs;
|
||||
|
||||
/* import TX coal pkts */
|
||||
if (priv->params_ethtool.tx_coalesce_pkts < 1)
|
||||
priv->params_ethtool.tx_coalesce_pkts = 0;
|
||||
else if (priv->params_ethtool.tx_coalesce_pkts >
|
||||
MLX5E_FLD_MAX(cqc, cq_max_count)) {
|
||||
priv->params_ethtool.tx_coalesce_pkts = MLX5E_FLD_MAX(cqc, cq_max_count);
|
||||
}
|
||||
priv->params.tx_cq_moderation_pkts = priv->params_ethtool.tx_coalesce_pkts;
|
||||
|
||||
/* we always agree to turn off HW LRO - but not always to turn on */
|
||||
if (priv->params_ethtool.hw_lro) {
|
||||
if (priv->params_ethtool.hw_lro != 1) {
|
||||
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
|
||||
error = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (priv->ifp->if_capenable & IFCAP_LRO)
|
||||
priv->params.hw_lro_en = !!MLX5_CAP_ETH(priv->mdev, lro_cap);
|
||||
}
|
||||
else {
|
||||
priv->params.hw_lro_en = false;
|
||||
}
|
||||
|
||||
if (was_opened)
|
||||
mlx5e_open_locked(priv->ifp);
|
||||
done:
|
||||
PRIV_UNLOCK(priv);
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the first three bytes of the eeprom in order to get the needed info
|
||||
* for the whole reading.
|
||||
* Byte 0 - Identifier byte
|
||||
* Byte 1 - Revision byte
|
||||
* Byte 2 - Status byte
|
||||
*/
|
||||
static int
|
||||
mlx5e_get_eeprom_info(struct mlx5e_priv *priv, struct mlx5e_eeprom *eeprom)
|
||||
{
|
||||
struct mlx5_core_dev *dev = priv->mdev;
|
||||
u32 data = 0;
|
||||
int size_read = 0;
|
||||
int ret;
|
||||
|
||||
ret = mlx5_query_module_num(dev, &eeprom->module_num);
|
||||
if (ret) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed query module error=%d\n",
|
||||
__func__, __LINE__, ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Read the first three bytes to get Identifier, Revision and Status */
|
||||
ret = mlx5_query_eeprom(dev, eeprom->i2c_addr, eeprom->page_num,
|
||||
eeprom->device_addr, MLX5E_EEPROM_INFO_BYTES, eeprom->module_num, &data,
|
||||
&size_read);
|
||||
if (ret) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed query eeprom module error=0x%x\n",
|
||||
__func__, __LINE__, ret);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
switch (data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK) {
|
||||
case SFF_8024_ID_QSFP:
|
||||
eeprom->type = MLX5E_ETH_MODULE_SFF_8436;
|
||||
eeprom->len = MLX5E_ETH_MODULE_SFF_8436_LEN;
|
||||
break;
|
||||
case SFF_8024_ID_QSFPPLUS:
|
||||
case SFF_8024_ID_QSFP28:
|
||||
if ((data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK) == SFF_8024_ID_QSFP28 ||
|
||||
((data & MLX5_EEPROM_REVISION_ID_BYTE_MASK) >> 8) >= 0x3) {
|
||||
eeprom->type = MLX5E_ETH_MODULE_SFF_8636;
|
||||
eeprom->len = MLX5E_ETH_MODULE_SFF_8636_LEN;
|
||||
} else {
|
||||
eeprom->type = MLX5E_ETH_MODULE_SFF_8436;
|
||||
eeprom->len = MLX5E_ETH_MODULE_SFF_8436_LEN;
|
||||
}
|
||||
if ((data & MLX5_EEPROM_PAGE_3_VALID_BIT_MASK) == 0)
|
||||
eeprom->page_valid = 1;
|
||||
break;
|
||||
case SFF_8024_ID_SFP:
|
||||
eeprom->type = MLX5E_ETH_MODULE_SFF_8472;
|
||||
eeprom->len = MLX5E_ETH_MODULE_SFF_8472_LEN;
|
||||
break;
|
||||
default:
|
||||
if_printf(priv->ifp, "%s:%d: Not recognized cable type = 0x%x\n",
|
||||
__func__, __LINE__, data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK);
|
||||
return (EINVAL);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Read both low and high pages of the eeprom */
|
||||
static int
|
||||
mlx5e_get_eeprom(struct mlx5e_priv *priv, struct mlx5e_eeprom *ee)
|
||||
{
|
||||
struct mlx5_core_dev *dev = priv->mdev;
|
||||
int size_read = 0;
|
||||
int ret;
|
||||
|
||||
if (ee->len == 0)
|
||||
return (EINVAL);
|
||||
|
||||
/* Read low page of the eeprom */
|
||||
while (ee->device_addr < ee->len) {
|
||||
ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num, ee->device_addr,
|
||||
ee->len - ee->device_addr, ee->module_num,
|
||||
ee->data + (ee->device_addr/4), &size_read);
|
||||
if (ret) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
|
||||
"error = 0x%02x\n", __func__, __LINE__, ret);
|
||||
return (ret);
|
||||
}
|
||||
ee->device_addr += size_read;
|
||||
}
|
||||
|
||||
/* Read high page of the eeprom */
|
||||
if (ee->page_valid) {
|
||||
ee->device_addr = MLX5E_EEPROM_HIGH_PAGE_OFFSET;
|
||||
ee->page_num = MLX5E_EEPROM_HIGH_PAGE;
|
||||
size_read = 0;
|
||||
while (ee->device_addr < MLX5E_EEPROM_PAGE_LENGTH) {
|
||||
ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num,
|
||||
ee->device_addr, MLX5E_EEPROM_PAGE_LENGTH - ee->device_addr,
|
||||
ee->module_num, ee->data + (ee->len/4) +
|
||||
((ee->device_addr - MLX5E_EEPROM_HIGH_PAGE_OFFSET)/4),
|
||||
&size_read);
|
||||
if (ret) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
|
||||
"error = 0x%02x\n", __func__, __LINE__, ret);
|
||||
return (ret);
|
||||
}
|
||||
ee->device_addr += size_read;
|
||||
}
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_print_eeprom(struct mlx5e_eeprom *eeprom)
|
||||
{
|
||||
int i, j = 0;
|
||||
int row = 0;
|
||||
|
||||
printf("\nOffset\t\tValues\n");
|
||||
printf("------\t\t------\n");
|
||||
while (row < eeprom->len) {
|
||||
printf("0x%04x\t\t",row);
|
||||
for (i = 0; i < 16; i++) {
|
||||
printf("%02x ", ((u8*)eeprom->data)[j]);
|
||||
j++;
|
||||
row++;
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (eeprom->page_valid) {
|
||||
row = MLX5E_EEPROM_HIGH_PAGE_OFFSET;
|
||||
printf("\nUpper Page 0x03\n");
|
||||
printf("\nOffset\t\tValues\n");
|
||||
printf("------\t\t------\n");
|
||||
while (row < MLX5E_EEPROM_PAGE_LENGTH) {
|
||||
printf("0x%04x\t\t",row);
|
||||
for (i = 0; i < 16; i++) {
|
||||
printf("%02x ", ((u8*)eeprom->data)[j]);
|
||||
j++;
|
||||
row++;
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Read cable EEPROM module information by first inspecting the first
|
||||
* three bytes to get the initial information for a whole reading.
|
||||
* Information will be printed to dmesg.
|
||||
*/
|
||||
static int
|
||||
mlx5e_read_eeprom(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
struct mlx5e_priv *priv = arg1;
|
||||
struct mlx5e_eeprom eeprom;
|
||||
int error;
|
||||
int result = 0;
|
||||
|
||||
PRIV_LOCK(priv);
|
||||
error = sysctl_handle_int(oidp, &result, 0, req);
|
||||
if (error || !req->newptr)
|
||||
goto done;
|
||||
|
||||
/* Check if device is gone */
|
||||
if (priv->gone) {
|
||||
error = ENXIO;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (result == 1) {
|
||||
eeprom.i2c_addr = MLX5E_I2C_ADDR_LOW;
|
||||
eeprom.device_addr = 0;
|
||||
eeprom.page_num = MLX5E_EEPROM_LOW_PAGE;
|
||||
eeprom.page_valid = 0;
|
||||
|
||||
/* Read three first bytes to get important info */
|
||||
error = mlx5e_get_eeprom_info(priv, &eeprom);
|
||||
if (error) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom's "
|
||||
"initial information\n", __func__, __LINE__);
|
||||
error = 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Allocate needed length buffer and additional space for the 3rd */
|
||||
eeprom.data = malloc(eeprom.len + MLX5E_EEPROM_PAGE_LENGTH,
|
||||
M_MLX5EN, M_WAITOK | M_ZERO);
|
||||
|
||||
/* Read the whole eeprom information */
|
||||
error = mlx5e_get_eeprom(priv, &eeprom);
|
||||
if (error) {
|
||||
if_printf(priv->ifp, "%s:%d: Failed reading eeprom\n",
|
||||
__func__, __LINE__);
|
||||
error = 0;
|
||||
/* Continue printing partial information in case of an error */
|
||||
}
|
||||
|
||||
mlx5e_print_eeprom(&eeprom);
|
||||
free(eeprom.data, M_MLX5EN);
|
||||
}
|
||||
done:
|
||||
PRIV_UNLOCK(priv);
|
||||
return (error);
|
||||
}
|
||||
|
||||
static const char *mlx5e_params_desc[] = {
|
||||
MLX5E_PARAMS(MLX5E_STATS_DESC)
|
||||
};
|
||||
|
||||
static const char *mlx5e_port_stats_debug_desc[] = {
|
||||
MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_DESC)
|
||||
};
|
||||
|
||||
static int
|
||||
mlx5e_ethtool_debug_stats(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
struct mlx5e_priv *priv = arg1;
|
||||
int error;
|
||||
int sys_debug;
|
||||
|
||||
sys_debug = priv->sysctl_debug;
|
||||
error = sysctl_handle_int(oidp, &priv->sysctl_debug, 0, req);
|
||||
if (error || !req->newptr)
|
||||
return (error);
|
||||
priv->sysctl_debug = !!priv->sysctl_debug;
|
||||
if (sys_debug == priv->sysctl_debug)
|
||||
return (error);
|
||||
if (priv->sysctl_debug)
|
||||
mlx5e_create_stats(&priv->stats.port_stats_debug.ctx,
|
||||
SYSCTL_CHILDREN(priv->sysctl_ifnet), "debug_stats",
|
||||
mlx5e_port_stats_debug_desc, MLX5E_PORT_STATS_DEBUG_NUM,
|
||||
priv->stats.port_stats_debug.arg);
|
||||
else
|
||||
sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
|
||||
return (error);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_create_ethtool(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct sysctl_oid *node;
|
||||
const char *pnameunit;
|
||||
unsigned x;
|
||||
|
||||
/* set some defaults */
|
||||
priv->params_ethtool.tx_queue_size_max = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
|
||||
priv->params_ethtool.rx_queue_size_max = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
|
||||
priv->params_ethtool.tx_queue_size = 1 << priv->params.log_sq_size;
|
||||
priv->params_ethtool.rx_queue_size = 1 << priv->params.log_rq_size;
|
||||
priv->params_ethtool.channels = priv->params.num_channels;
|
||||
priv->params_ethtool.coalesce_pkts_max = MLX5E_FLD_MAX(cqc, cq_max_count);
|
||||
priv->params_ethtool.coalesce_usecs_max = MLX5E_FLD_MAX(cqc, cq_period);
|
||||
priv->params_ethtool.rx_coalesce_mode = priv->params.rx_cq_moderation_mode;
|
||||
priv->params_ethtool.rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
|
||||
priv->params_ethtool.rx_coalesce_pkts = priv->params.rx_cq_moderation_pkts;
|
||||
priv->params_ethtool.tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
|
||||
priv->params_ethtool.tx_coalesce_pkts = priv->params.tx_cq_moderation_pkts;
|
||||
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
|
||||
|
||||
/* create root node */
|
||||
node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
|
||||
SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
|
||||
"conf", CTLFLAG_RW, NULL, "Configuration");
|
||||
if (node == NULL)
|
||||
return;
|
||||
for (x = 0; x != MLX5E_PARAMS_NUM; x++) {
|
||||
/* check for read-only parameter */
|
||||
if (strstr(mlx5e_params_desc[2 * x], "_max") != NULL) {
|
||||
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
|
||||
mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RD |
|
||||
CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
|
||||
mlx5e_params_desc[2 * x + 1]);
|
||||
} else {
|
||||
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
|
||||
mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RWTUN |
|
||||
CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
|
||||
mlx5e_params_desc[2 * x + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
|
||||
"debug_stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv,
|
||||
0, &mlx5e_ethtool_debug_stats, "I", "Extended debug statistics");
|
||||
|
||||
pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
|
||||
|
||||
SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(node),
|
||||
OID_AUTO, "device_name", CTLFLAG_RD,
|
||||
__DECONST(void *, pnameunit), 0,
|
||||
"PCI device name");
|
||||
|
||||
/* EEPROM support */
|
||||
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO, "eeprom_info",
|
||||
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
|
||||
mlx5e_read_eeprom, "I", "EEPROM information");
|
||||
}
|
||||
|
870
sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
Normal file
870
sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
Normal file
@ -0,0 +1,870 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include "en.h"
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <dev/mlx5/flow_table.h>
|
||||
|
||||
enum {
|
||||
MLX5E_FULLMATCH = 0,
|
||||
MLX5E_ALLMULTI = 1,
|
||||
MLX5E_PROMISC = 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_UC = 0,
|
||||
MLX5E_MC_IPV4 = 1,
|
||||
MLX5E_MC_IPV6 = 2,
|
||||
MLX5E_MC_OTHER = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_ACTION_NONE = 0,
|
||||
MLX5E_ACTION_ADD = 1,
|
||||
MLX5E_ACTION_DEL = 2,
|
||||
};
|
||||
|
||||
struct mlx5e_eth_addr_hash_node {
|
||||
LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
|
||||
u8 action;
|
||||
struct mlx5e_eth_addr_info ai;
|
||||
};
|
||||
|
||||
static inline int
|
||||
mlx5e_hash_eth_addr(const u8 * addr)
|
||||
{
|
||||
return (addr[5]);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
|
||||
const u8 * addr)
|
||||
{
|
||||
struct mlx5e_eth_addr_hash_node *hn;
|
||||
int ix = mlx5e_hash_eth_addr(addr);
|
||||
|
||||
LIST_FOREACH(hn, &hash[ix], hlist) {
|
||||
if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
|
||||
if (hn->action == MLX5E_ACTION_DEL)
|
||||
hn->action = MLX5E_ACTION_NONE;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
|
||||
if (hn == NULL)
|
||||
return;
|
||||
|
||||
ether_addr_copy(hn->ai.addr, addr);
|
||||
hn->action = MLX5E_ACTION_ADD;
|
||||
|
||||
LIST_INSERT_HEAD(&hash[ix], hn, hlist);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
|
||||
{
|
||||
LIST_REMOVE(hn, hlist);
|
||||
free(hn, M_MLX5EN);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
|
||||
struct mlx5e_eth_addr_info *ai)
|
||||
{
|
||||
void *ft = priv->ft.main;
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
|
||||
|
||||
if (ai->tt_vec & (1 << MLX5E_TT_ANY))
|
||||
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_get_eth_addr_type(const u8 * addr)
|
||||
{
|
||||
if (ETHER_IS_MULTICAST(addr) == 0)
|
||||
return (MLX5E_UC);
|
||||
|
||||
if ((addr[0] == 0x01) &&
|
||||
(addr[1] == 0x00) &&
|
||||
(addr[2] == 0x5e) &&
|
||||
!(addr[3] & 0x80))
|
||||
return (MLX5E_MC_IPV4);
|
||||
|
||||
if ((addr[0] == 0x33) &&
|
||||
(addr[1] == 0x33))
|
||||
return (MLX5E_MC_IPV6);
|
||||
|
||||
return (MLX5E_MC_OTHER);
|
||||
}
|
||||
|
||||
static u32
|
||||
mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
|
||||
{
|
||||
int eth_addr_type;
|
||||
u32 ret;
|
||||
|
||||
switch (type) {
|
||||
case MLX5E_FULLMATCH:
|
||||
eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
|
||||
switch (eth_addr_type) {
|
||||
case MLX5E_UC:
|
||||
ret =
|
||||
(1 << MLX5E_TT_IPV4_TCP) |
|
||||
(1 << MLX5E_TT_IPV6_TCP) |
|
||||
(1 << MLX5E_TT_IPV4_UDP) |
|
||||
(1 << MLX5E_TT_IPV6_UDP) |
|
||||
(1 << MLX5E_TT_IPV4) |
|
||||
(1 << MLX5E_TT_IPV6) |
|
||||
(1 << MLX5E_TT_ANY) |
|
||||
0;
|
||||
break;
|
||||
|
||||
case MLX5E_MC_IPV4:
|
||||
ret =
|
||||
(1 << MLX5E_TT_IPV4_UDP) |
|
||||
(1 << MLX5E_TT_IPV4) |
|
||||
0;
|
||||
break;
|
||||
|
||||
case MLX5E_MC_IPV6:
|
||||
ret =
|
||||
(1 << MLX5E_TT_IPV6_UDP) |
|
||||
(1 << MLX5E_TT_IPV6) |
|
||||
0;
|
||||
break;
|
||||
|
||||
default:
|
||||
ret =
|
||||
(1 << MLX5E_TT_ANY) |
|
||||
0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case MLX5E_ALLMULTI:
|
||||
ret =
|
||||
(1 << MLX5E_TT_IPV4_UDP) |
|
||||
(1 << MLX5E_TT_IPV6_UDP) |
|
||||
(1 << MLX5E_TT_IPV4) |
|
||||
(1 << MLX5E_TT_IPV6) |
|
||||
(1 << MLX5E_TT_ANY) |
|
||||
0;
|
||||
break;
|
||||
|
||||
default: /* MLX5E_PROMISC */
|
||||
ret =
|
||||
(1 << MLX5E_TT_IPV4_TCP) |
|
||||
(1 << MLX5E_TT_IPV6_TCP) |
|
||||
(1 << MLX5E_TT_IPV4_UDP) |
|
||||
(1 << MLX5E_TT_IPV6_UDP) |
|
||||
(1 << MLX5E_TT_IPV4) |
|
||||
(1 << MLX5E_TT_IPV6) |
|
||||
(1 << MLX5E_TT_ANY) |
|
||||
0;
|
||||
break;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
|
||||
struct mlx5e_eth_addr_info *ai, int type,
|
||||
void *flow_context, void *match_criteria)
|
||||
{
|
||||
u8 match_criteria_enable = 0;
|
||||
void *match_value;
|
||||
void *dest;
|
||||
u8 *dmac;
|
||||
u8 *match_criteria_dmac;
|
||||
void *ft = priv->ft.main;
|
||||
u32 *tirn = priv->tirn;
|
||||
u32 tt_vec;
|
||||
int err;
|
||||
|
||||
match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, match_value,
|
||||
outer_headers.dmac_47_16);
|
||||
match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
|
||||
|
||||
MLX5_SET(flow_context, flow_context, action,
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
|
||||
MLX5_SET(flow_context, flow_context, destination_list_size, 1);
|
||||
MLX5_SET(dest_format_struct, dest, destination_type,
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
|
||||
|
||||
switch (type) {
|
||||
case MLX5E_FULLMATCH:
|
||||
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
memset(match_criteria_dmac, 0xff, ETH_ALEN);
|
||||
ether_addr_copy(dmac, ai->addr);
|
||||
break;
|
||||
|
||||
case MLX5E_ALLMULTI:
|
||||
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
match_criteria_dmac[0] = 0x01;
|
||||
dmac[0] = 0x01;
|
||||
break;
|
||||
|
||||
case MLX5E_PROMISC:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
tt_vec = mlx5e_get_tt_vec(ai, type);
|
||||
|
||||
if (tt_vec & (1 << MLX5E_TT_ANY)) {
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_ANY]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_ANY]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_ANY);
|
||||
}
|
||||
|
||||
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
outer_headers.ethertype);
|
||||
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV4)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IP);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_IPV4]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV4);
|
||||
}
|
||||
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV6)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IPV6);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_IPV6]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV6);
|
||||
}
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
outer_headers.ip_protocol);
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
|
||||
IPPROTO_UDP);
|
||||
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IP);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_IPV4_UDP]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
|
||||
}
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IPV6);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_IPV6_UDP]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
|
||||
}
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
|
||||
IPPROTO_TCP);
|
||||
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IP);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_IPV4_TCP]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
|
||||
}
|
||||
if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
|
||||
ETHERTYPE_IPV6);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
tirn[MLX5E_TT_IPV6_TCP]);
|
||||
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
|
||||
match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
|
||||
if (err) {
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, ai);
|
||||
return (err);
|
||||
}
|
||||
ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
|
||||
struct mlx5e_eth_addr_info *ai, int type)
|
||||
{
|
||||
u32 *flow_context;
|
||||
u32 *match_criteria;
|
||||
int err;
|
||||
|
||||
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
|
||||
MLX5_ST_SZ_BYTES(dest_format_struct));
|
||||
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
|
||||
if (!flow_context || !match_criteria) {
|
||||
if_printf(priv->ifp, "%s: alloc failed\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto add_eth_addr_rule_out;
|
||||
}
|
||||
|
||||
err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
|
||||
match_criteria);
|
||||
if (err)
|
||||
if_printf(priv->ifp, "%s: failed\n", __func__);
|
||||
|
||||
add_eth_addr_rule_out:
|
||||
kvfree(match_criteria);
|
||||
kvfree(flow_context);
|
||||
return (err);
|
||||
}
|
||||
|
||||
enum mlx5e_vlan_rule_type {
|
||||
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
|
||||
MLX5E_VLAN_RULE_TYPE_ANY_VID,
|
||||
MLX5E_VLAN_RULE_TYPE_MATCH_VID,
|
||||
};
|
||||
|
||||
static int
|
||||
mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
|
||||
enum mlx5e_vlan_rule_type rule_type, u16 vid)
|
||||
{
|
||||
u8 match_criteria_enable = 0;
|
||||
u32 *flow_context;
|
||||
void *match_value;
|
||||
void *dest;
|
||||
u32 *match_criteria;
|
||||
u32 *ft_ix;
|
||||
int err;
|
||||
|
||||
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
|
||||
MLX5_ST_SZ_BYTES(dest_format_struct));
|
||||
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
|
||||
if (!flow_context || !match_criteria) {
|
||||
if_printf(priv->ifp, "%s: alloc failed\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto add_vlan_rule_out;
|
||||
}
|
||||
match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
|
||||
dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
|
||||
|
||||
MLX5_SET(flow_context, flow_context, action,
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
|
||||
MLX5_SET(flow_context, flow_context, destination_list_size, 1);
|
||||
MLX5_SET(dest_format_struct, dest, destination_type,
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
|
||||
MLX5_SET(dest_format_struct, dest, destination_id,
|
||||
mlx5_get_flow_table_id(priv->ft.main));
|
||||
|
||||
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
outer_headers.vlan_tag);
|
||||
|
||||
switch (rule_type) {
|
||||
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
|
||||
ft_ix = &priv->vlan.untagged_rule_ft_ix;
|
||||
break;
|
||||
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
|
||||
ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
|
||||
1);
|
||||
break;
|
||||
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
|
||||
ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
|
||||
1);
|
||||
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
|
||||
outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
|
||||
vid);
|
||||
break;
|
||||
}
|
||||
|
||||
err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
|
||||
match_criteria, flow_context, ft_ix);
|
||||
if (err)
|
||||
if_printf(priv->ifp, "%s: failed\n", __func__);
|
||||
|
||||
add_vlan_rule_out:
|
||||
kvfree(match_criteria);
|
||||
kvfree(flow_context);
|
||||
return (err);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
|
||||
enum mlx5e_vlan_rule_type rule_type, u16 vid)
|
||||
{
|
||||
switch (rule_type) {
|
||||
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
|
||||
mlx5_del_flow_table_entry(priv->ft.vlan,
|
||||
priv->vlan.untagged_rule_ft_ix);
|
||||
break;
|
||||
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
|
||||
mlx5_del_flow_table_entry(priv->ft.vlan,
|
||||
priv->vlan.any_vlan_rule_ft_ix);
|
||||
break;
|
||||
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
|
||||
mlx5_del_flow_table_entry(priv->ft.vlan,
|
||||
priv->vlan.active_vlans_ft_ix[vid]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (priv->vlan.filter_disabled) {
|
||||
priv->vlan.filter_disabled = false;
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (!priv->vlan.filter_disabled) {
|
||||
priv->vlan.filter_disabled = true;
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
|
||||
{
|
||||
struct mlx5e_priv *priv = arg;
|
||||
|
||||
if (ifp != priv->ifp)
|
||||
return;
|
||||
|
||||
PRIV_LOCK(priv);
|
||||
set_bit(vid, priv->vlan.active_vlans);
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
|
||||
PRIV_UNLOCK(priv);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
|
||||
{
|
||||
struct mlx5e_priv *priv = arg;
|
||||
|
||||
if (ifp != priv->ifp)
|
||||
return;
|
||||
|
||||
PRIV_LOCK(priv);
|
||||
clear_bit(vid, priv->vlan.active_vlans);
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
|
||||
PRIV_UNLOCK(priv);
|
||||
}
|
||||
|
||||
int
|
||||
mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
|
||||
{
|
||||
u16 vid;
|
||||
int err;
|
||||
|
||||
for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
|
||||
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
|
||||
vid);
|
||||
if (err)
|
||||
return (err);
|
||||
}
|
||||
|
||||
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
|
||||
if (err)
|
||||
return (err);
|
||||
|
||||
if (priv->vlan.filter_disabled) {
|
||||
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
|
||||
0);
|
||||
if (err)
|
||||
return (err);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
|
||||
{
|
||||
u16 vid;
|
||||
|
||||
if (priv->vlan.filter_disabled)
|
||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
|
||||
|
||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
|
||||
|
||||
for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
|
||||
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
|
||||
}
|
||||
|
||||
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
|
||||
for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
|
||||
LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
|
||||
|
||||
static void
|
||||
mlx5e_execute_action(struct mlx5e_priv *priv,
|
||||
struct mlx5e_eth_addr_hash_node *hn)
|
||||
{
|
||||
switch (hn->action) {
|
||||
case MLX5E_ACTION_ADD:
|
||||
mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
|
||||
hn->action = MLX5E_ACTION_NONE;
|
||||
break;
|
||||
|
||||
case MLX5E_ACTION_DEL:
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
|
||||
mlx5e_del_eth_addr_from_hash(hn);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct ifnet *ifp = priv->ifp;
|
||||
struct ifaddr *ifa;
|
||||
struct ifmultiaddr *ifma;
|
||||
|
||||
/* XXX adding this entry might not be needed */
|
||||
mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
|
||||
LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
|
||||
|
||||
if_addr_rlock(ifp);
|
||||
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
||||
if (ifa->ifa_addr->sa_family != AF_LINK)
|
||||
continue;
|
||||
mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
|
||||
LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
|
||||
}
|
||||
if_addr_runlock(ifp);
|
||||
|
||||
if_maddr_rlock(ifp);
|
||||
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
||||
if (ifma->ifma_addr->sa_family != AF_LINK)
|
||||
continue;
|
||||
mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
|
||||
LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
|
||||
}
|
||||
if_maddr_runlock(ifp);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_eth_addr_hash_node *hn;
|
||||
struct mlx5e_eth_addr_hash_node *tmp;
|
||||
int i;
|
||||
|
||||
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
|
||||
mlx5e_execute_action(priv, hn);
|
||||
|
||||
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
|
||||
mlx5e_execute_action(priv, hn);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_eth_addr_hash_node *hn;
|
||||
struct mlx5e_eth_addr_hash_node *tmp;
|
||||
int i;
|
||||
|
||||
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
|
||||
hn->action = MLX5E_ACTION_DEL;
|
||||
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
|
||||
hn->action = MLX5E_ACTION_DEL;
|
||||
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_sync_ifp_addr(priv);
|
||||
|
||||
mlx5e_apply_ifp_addr(priv);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
|
||||
struct ifnet *ndev = priv->ifp;
|
||||
|
||||
bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
|
||||
bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
|
||||
bool broadcast_enabled = rx_mode_enable;
|
||||
|
||||
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
|
||||
bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
|
||||
bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
|
||||
bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
|
||||
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
|
||||
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
|
||||
|
||||
/* update broadcast address */
|
||||
ether_addr_copy(priv->eth_addr.broadcast.addr,
|
||||
priv->ifp->if_broadcastaddr);
|
||||
|
||||
if (enable_promisc)
|
||||
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
|
||||
if (enable_allmulti)
|
||||
mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
|
||||
if (enable_broadcast)
|
||||
mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
|
||||
|
||||
mlx5e_handle_ifp_addr(priv);
|
||||
|
||||
if (disable_broadcast)
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
|
||||
if (disable_allmulti)
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
|
||||
if (disable_promisc)
|
||||
mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
|
||||
|
||||
ea->promisc_enabled = promisc_enabled;
|
||||
ea->allmulti_enabled = allmulti_enabled;
|
||||
ea->broadcast_enabled = broadcast_enabled;
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_set_rx_mode_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_priv *priv =
|
||||
container_of(work, struct mlx5e_priv, set_rx_mode_work);
|
||||
|
||||
PRIV_LOCK(priv);
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_set_rx_mode_core(priv);
|
||||
PRIV_UNLOCK(priv);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_flow_table_group *g;
|
||||
u8 *dmac;
|
||||
|
||||
g = malloc(9 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
|
||||
if (g == NULL)
|
||||
return (-ENOMEM);
|
||||
|
||||
g[0].log_sz = 2;
|
||||
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
|
||||
outer_headers.ethertype);
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
|
||||
outer_headers.ip_protocol);
|
||||
|
||||
g[1].log_sz = 1;
|
||||
g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
|
||||
outer_headers.ethertype);
|
||||
|
||||
g[2].log_sz = 0;
|
||||
|
||||
g[3].log_sz = 14;
|
||||
g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
memset(dmac, 0xff, ETH_ALEN);
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
|
||||
outer_headers.ethertype);
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
|
||||
outer_headers.ip_protocol);
|
||||
|
||||
g[4].log_sz = 13;
|
||||
g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
memset(dmac, 0xff, ETH_ALEN);
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
|
||||
outer_headers.ethertype);
|
||||
|
||||
g[5].log_sz = 11;
|
||||
g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
memset(dmac, 0xff, ETH_ALEN);
|
||||
|
||||
g[6].log_sz = 2;
|
||||
g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
dmac[0] = 0x01;
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
|
||||
outer_headers.ethertype);
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
|
||||
outer_headers.ip_protocol);
|
||||
|
||||
g[7].log_sz = 1;
|
||||
g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
dmac[0] = 0x01;
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
|
||||
outer_headers.ethertype);
|
||||
|
||||
g[8].log_sz = 0;
|
||||
g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
|
||||
outer_headers.dmac_47_16);
|
||||
dmac[0] = 0x01;
|
||||
priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
|
||||
MLX5_FLOW_TABLE_TYPE_NIC_RCV,
|
||||
0, 9, g);
|
||||
free(g, M_MLX5EN);
|
||||
|
||||
return (priv->ft.main ? 0 : -ENOMEM);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5_destroy_flow_table(priv->ft.main);
|
||||
priv->ft.main = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_flow_table_group *g;
|
||||
|
||||
g = malloc(2 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
|
||||
if (g == NULL)
|
||||
return (-ENOMEM);
|
||||
|
||||
g[0].log_sz = 12;
|
||||
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
|
||||
outer_headers.vlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
|
||||
outer_headers.first_vid);
|
||||
|
||||
/* untagged + any vlan id */
|
||||
g[1].log_sz = 1;
|
||||
g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
|
||||
outer_headers.vlan_tag);
|
||||
|
||||
priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
|
||||
MLX5_FLOW_TABLE_TYPE_NIC_RCV,
|
||||
0, 2, g);
|
||||
free(g, M_MLX5EN);
|
||||
|
||||
return (priv->ft.vlan ? 0 : -ENOMEM);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5_destroy_flow_table(priv->ft.vlan);
|
||||
priv->ft.vlan = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
mlx5e_open_flow_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5e_create_main_flow_table(priv);
|
||||
if (err)
|
||||
return (err);
|
||||
|
||||
err = mlx5e_create_vlan_flow_table(priv);
|
||||
if (err)
|
||||
goto err_destroy_main_flow_table;
|
||||
|
||||
return (0);
|
||||
|
||||
err_destroy_main_flow_table:
|
||||
mlx5e_destroy_main_flow_table(priv);
|
||||
|
||||
return (err);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_close_flow_table(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_destroy_vlan_flow_table(priv);
|
||||
mlx5e_destroy_main_flow_table(priv);
|
||||
}
|
2902
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
Normal file
2902
sys/dev/mlx5/mlx5_en/mlx5_en_main.c
Normal file
File diff suppressed because it is too large
Load Diff
340
sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
Normal file
340
sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
Normal file
@ -0,0 +1,340 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include "en.h"
|
||||
#include <machine/in_cksum.h>
|
||||
|
||||
static inline int
|
||||
mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
|
||||
struct mlx5e_rx_wqe *wqe, u16 ix)
|
||||
{
|
||||
bus_dma_segment_t segs[1];
|
||||
struct mbuf *mb;
|
||||
int nsegs;
|
||||
int err;
|
||||
|
||||
if (rq->mbuf[ix].mbuf != NULL)
|
||||
return (0);
|
||||
|
||||
mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
|
||||
if (unlikely(!mb))
|
||||
return (-ENOMEM);
|
||||
|
||||
/* set initial mbuf length */
|
||||
mb->m_pkthdr.len = mb->m_len = rq->wqe_sz;
|
||||
|
||||
/* get IP header aligned */
|
||||
m_adj(mb, MLX5E_NET_IP_ALIGN);
|
||||
|
||||
err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
|
||||
mb, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
if (err != 0)
|
||||
goto err_free_mbuf;
|
||||
if (unlikely(nsegs != 1)) {
|
||||
bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
|
||||
err = -ENOMEM;
|
||||
goto err_free_mbuf;
|
||||
}
|
||||
wqe->data.addr = cpu_to_be64(segs[0].ds_addr);
|
||||
|
||||
rq->mbuf[ix].mbuf = mb;
|
||||
rq->mbuf[ix].data = mb->m_data;
|
||||
|
||||
bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map,
|
||||
BUS_DMASYNC_PREREAD);
|
||||
return (0);
|
||||
|
||||
err_free_mbuf:
|
||||
m_freem(mb);
|
||||
return (err);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
if (unlikely(rq->enabled == 0))
|
||||
return;
|
||||
|
||||
while (!mlx5_wq_ll_is_full(&rq->wq)) {
|
||||
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
|
||||
|
||||
if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head)))
|
||||
break;
|
||||
|
||||
mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
|
||||
}
|
||||
|
||||
/* ensure wqes are visible to device before updating doorbell record */
|
||||
wmb();
|
||||
|
||||
mlx5_wq_ll_update_db_record(&rq->wq);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
/* TODO: consider vlans, ip options, ... */
|
||||
struct ether_header *eh;
|
||||
uint16_t eh_type;
|
||||
struct ip6_hdr *ip6 = NULL;
|
||||
struct ip *ip4 = NULL;
|
||||
struct tcphdr *th;
|
||||
uint32_t *ts_ptr;
|
||||
|
||||
eh = mtod(mb, struct ether_header *);
|
||||
eh_type = ntohs(eh->ether_type);
|
||||
|
||||
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
||||
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
|
||||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
|
||||
|
||||
/* TODO: consider vlan */
|
||||
u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
|
||||
|
||||
switch (eh_type) {
|
||||
case ETHERTYPE_IP:
|
||||
ip4 = (struct ip *)(eh + 1);
|
||||
th = (struct tcphdr *)(ip4 + 1);
|
||||
break;
|
||||
case ETHERTYPE_IPV6:
|
||||
ip6 = (struct ip6_hdr *)(eh + 1);
|
||||
th = (struct tcphdr *)(ip6 + 1);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
ts_ptr = (uint32_t *)(th + 1);
|
||||
|
||||
if (get_cqe_lro_tcppsh(cqe))
|
||||
th->th_flags |= TH_PUSH;
|
||||
|
||||
if (tcp_ack) {
|
||||
th->th_flags |= TH_ACK;
|
||||
th->th_ack = cqe->lro_ack_seq_num;
|
||||
th->th_win = cqe->lro_tcp_win;
|
||||
|
||||
/* FreeBSD handles only 32bit aligned timestamp
|
||||
* right after the TCP hdr
|
||||
* +--------+--------+--------+--------+
|
||||
* | NOP | NOP | TSopt | 10 |
|
||||
* +--------+--------+--------+--------+
|
||||
* | TSval timestamp |
|
||||
* +--------+--------+--------+--------+
|
||||
* | TSecr timestamp |
|
||||
* +--------+--------+--------+--------+
|
||||
*/
|
||||
if (get_cqe_lro_timestamp_valid(cqe) &&
|
||||
(__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
|
||||
TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
|
||||
TCPOLEN_TIMESTAMP))) {
|
||||
/* cqe->timestamp is 64bit long.
|
||||
* [0-31] - timestamp.
|
||||
* [32-64] - timestamp echo replay.
|
||||
*/
|
||||
ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
|
||||
ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (ip4) {
|
||||
ip4->ip_ttl = cqe->lro_min_ttl;
|
||||
ip4->ip_len = cpu_to_be16(tot_len);
|
||||
ip4->ip_sum = 0;
|
||||
ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
|
||||
} else {
|
||||
ip6->ip6_hlim = cqe->lro_min_ttl;
|
||||
ip6->ip6_plen = cpu_to_be16(tot_len -
|
||||
sizeof(struct ip6_hdr));
|
||||
}
|
||||
/* TODO: handle tcp checksum */
|
||||
}
|
||||
|
||||
static inline void
|
||||
mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
|
||||
struct mlx5e_rq *rq, struct mbuf *mb,
|
||||
u32 cqe_bcnt)
|
||||
{
|
||||
struct ifnet *ifp = rq->ifp;
|
||||
int lro_num_seg; /* HW LRO session aggregated packets counter */
|
||||
|
||||
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
|
||||
if (lro_num_seg > 1) {
|
||||
mlx5e_lro_update_hdr(mb, cqe);
|
||||
rq->stats.lro_packets++;
|
||||
rq->stats.lro_bytes += cqe_bcnt;
|
||||
}
|
||||
|
||||
mb->m_pkthdr.len = mb->m_len = cqe_bcnt;
|
||||
/* check if a Toeplitz hash was computed */
|
||||
if (cqe->rss_hash_type != 0)
|
||||
mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
|
||||
else
|
||||
mb->m_pkthdr.flowid = rq->ix;
|
||||
M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
|
||||
mb->m_pkthdr.rcvif = ifp;
|
||||
|
||||
if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
|
||||
((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
|
||||
(CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) {
|
||||
mb->m_pkthdr.csum_flags =
|
||||
CSUM_IP_CHECKED | CSUM_IP_VALID |
|
||||
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
|
||||
mb->m_pkthdr.csum_data = htons(0xffff);
|
||||
} else {
|
||||
rq->stats.csum_none++;
|
||||
}
|
||||
|
||||
if (cqe_has_vlan(cqe)) {
|
||||
mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
|
||||
mb->m_flags |= M_VLANTAG;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
|
||||
{
|
||||
#ifndef HAVE_TURBO_LRO
|
||||
struct lro_entry *queued;
|
||||
#endif
|
||||
int i;
|
||||
|
||||
for (i = 0; i < budget; i++) {
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
struct mbuf *mb;
|
||||
__be16 wqe_counter_be;
|
||||
u16 wqe_counter;
|
||||
u32 byte_cnt;
|
||||
|
||||
cqe = mlx5e_get_cqe(&rq->cq);
|
||||
if (!cqe)
|
||||
break;
|
||||
|
||||
wqe_counter_be = cqe->wqe_counter;
|
||||
wqe_counter = be16_to_cpu(wqe_counter_be);
|
||||
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
|
||||
byte_cnt = be32_to_cpu(cqe->byte_cnt);
|
||||
|
||||
bus_dmamap_sync(rq->dma_tag,
|
||||
rq->mbuf[wqe_counter].dma_map,
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
|
||||
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
|
||||
rq->stats.wqe_err++;
|
||||
goto wq_ll_pop;
|
||||
}
|
||||
|
||||
if (MHLEN >= byte_cnt &&
|
||||
(mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
|
||||
bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
|
||||
byte_cnt);
|
||||
} else {
|
||||
mb = rq->mbuf[wqe_counter].mbuf;
|
||||
rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */
|
||||
|
||||
bus_dmamap_unload(rq->dma_tag,
|
||||
rq->mbuf[wqe_counter].dma_map);
|
||||
}
|
||||
|
||||
mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
|
||||
rq->stats.packets++;
|
||||
#ifdef HAVE_TURBO_LRO
|
||||
if (mb->m_pkthdr.csum_flags == 0 ||
|
||||
(rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
|
||||
rq->lro.mbuf == NULL) {
|
||||
/* normal input */
|
||||
rq->ifp->if_input(rq->ifp, mb);
|
||||
} else {
|
||||
tcp_tlro_rx(&rq->lro, mb);
|
||||
}
|
||||
#else
|
||||
if (mb->m_pkthdr.csum_flags == 0 ||
|
||||
(rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
|
||||
rq->lro.lro_cnt == 0 ||
|
||||
tcp_lro_rx(&rq->lro, mb, 0) != 0) {
|
||||
rq->ifp->if_input(rq->ifp, mb);
|
||||
}
|
||||
#endif
|
||||
wq_ll_pop:
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
mlx5_cqwq_update_db_record(&rq->cq.wq);
|
||||
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
#ifndef HAVE_TURBO_LRO
|
||||
while ((queued = SLIST_FIRST(&rq->lro.lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&rq->lro.lro_active, next);
|
||||
tcp_lro_flush(&rq->lro, queued);
|
||||
}
|
||||
#endif
|
||||
return (i);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
|
||||
{
|
||||
struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
|
||||
int i = 0;
|
||||
|
||||
#ifdef HAVE_PER_CQ_EVENT_PACKET
|
||||
struct mbuf *mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
|
||||
if (mb != NULL) {
|
||||
/* this code is used for debugging purpose only */
|
||||
mb->m_pkthdr.len = mb->m_len = 15;
|
||||
memset(mb->m_data, 255, 14);
|
||||
mb->m_data[14] = rq->ix;
|
||||
mb->m_pkthdr.rcvif = rq->ifp;
|
||||
rq->ifp->if_input(rq->ifp, mb);
|
||||
}
|
||||
#endif
|
||||
|
||||
mtx_lock(&rq->mtx);
|
||||
|
||||
/*
|
||||
* Polling the entire CQ without posting new WQEs results in
|
||||
* lack of receive WQEs during heavy traffic scenarios.
|
||||
*/
|
||||
while (1) {
|
||||
if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) !=
|
||||
MLX5E_RX_BUDGET_MAX)
|
||||
break;
|
||||
i += MLX5E_RX_BUDGET_MAX;
|
||||
if (i >= MLX5E_BUDGET_MAX)
|
||||
break;
|
||||
mlx5e_post_rx_wqes(rq);
|
||||
}
|
||||
mlx5e_post_rx_wqes(rq);
|
||||
mlx5e_cq_arm(&rq->cq);
|
||||
#ifdef HAVE_TURBO_LRO
|
||||
tcp_tlro_flush(&rq->lro, 1);
|
||||
#endif
|
||||
mtx_unlock(&rq->mtx);
|
||||
}
|
485
sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
Normal file
485
sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
Normal file
@ -0,0 +1,485 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include "en.h"
|
||||
#include <machine/atomic.h>
|
||||
|
||||
void
|
||||
mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt, bool notify_hw)
|
||||
{
|
||||
u16 pi = sq->pc & sq->wq.sz_m1;
|
||||
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
|
||||
|
||||
memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
|
||||
|
||||
wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
|
||||
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||
wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
|
||||
|
||||
sq->mbuf[pi].mbuf = NULL;
|
||||
sq->mbuf[pi].num_bytes = 0;
|
||||
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
sq->pc += sq->mbuf[pi].num_wqebbs;
|
||||
if (notify_hw)
|
||||
mlx5e_tx_notify_hw(sq, wqe, 0);
|
||||
}
|
||||
|
||||
#if (__FreeBSD_version >= 1100000)
|
||||
static uint32_t mlx5e_hash_value;
|
||||
|
||||
static void
|
||||
mlx5e_hash_init(void *arg)
|
||||
{
|
||||
mlx5e_hash_value = m_ether_tcpip_hash_init();
|
||||
}
|
||||
|
||||
/* Make kernel call mlx5e_hash_init after the random stack finished initializing */
|
||||
SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
|
||||
#endif
|
||||
|
||||
static struct mlx5e_sq *
|
||||
mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
|
||||
{
|
||||
struct mlx5e_priv *priv = ifp->if_softc;
|
||||
u32 ch;
|
||||
u32 tc;
|
||||
|
||||
/* check if channels are successfully opened */
|
||||
if (unlikely(priv->channel == NULL))
|
||||
return (NULL);
|
||||
|
||||
/* obtain VLAN information if present */
|
||||
if (mb->m_flags & M_VLANTAG) {
|
||||
tc = (mb->m_pkthdr.ether_vtag >> 13);
|
||||
if (tc >= priv->num_tc)
|
||||
tc = priv->default_vlan_prio;
|
||||
} else {
|
||||
tc = priv->default_vlan_prio;
|
||||
}
|
||||
|
||||
ch = priv->params.num_channels;
|
||||
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
|
||||
ch = (mb->m_pkthdr.flowid % 128) % ch;
|
||||
} else {
|
||||
#if (__FreeBSD_version >= 1100000)
|
||||
ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
|
||||
MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
|
||||
#else
|
||||
/*
|
||||
* m_ether_tcpip_hash not present in stable, so just
|
||||
* throw unhashed mbufs on queue 0
|
||||
*/
|
||||
ch = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* check if channel is allocated */
|
||||
if (unlikely(priv->channel[ch] == NULL))
|
||||
return (NULL);
|
||||
|
||||
return (&priv->channel[ch]->sq[tc]);
|
||||
}
|
||||
|
||||
static inline u16
|
||||
mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
|
||||
{
|
||||
return (MIN(MLX5E_MAX_TX_INLINE, mb->m_len));
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_get_header_size(struct mbuf *mb)
|
||||
{
|
||||
struct ether_vlan_header *eh;
|
||||
struct tcphdr *th;
|
||||
struct ip *ip;
|
||||
int ip_hlen, tcp_hlen;
|
||||
struct ip6_hdr *ip6;
|
||||
uint16_t eth_type;
|
||||
int eth_hdr_len;
|
||||
|
||||
eh = mtod(mb, struct ether_vlan_header *);
|
||||
if (mb->m_len < ETHER_HDR_LEN)
|
||||
return (0);
|
||||
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
|
||||
eth_type = ntohs(eh->evl_proto);
|
||||
eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
|
||||
} else {
|
||||
eth_type = ntohs(eh->evl_encap_proto);
|
||||
eth_hdr_len = ETHER_HDR_LEN;
|
||||
}
|
||||
if (mb->m_len < eth_hdr_len)
|
||||
return (0);
|
||||
switch (eth_type) {
|
||||
case ETHERTYPE_IP:
|
||||
ip = (struct ip *)(mb->m_data + eth_hdr_len);
|
||||
if (mb->m_len < eth_hdr_len + sizeof(*ip))
|
||||
return (0);
|
||||
if (ip->ip_p != IPPROTO_TCP)
|
||||
return (0);
|
||||
ip_hlen = ip->ip_hl << 2;
|
||||
eth_hdr_len += ip_hlen;
|
||||
break;
|
||||
case ETHERTYPE_IPV6:
|
||||
ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
|
||||
if (mb->m_len < eth_hdr_len + sizeof(*ip6))
|
||||
return (0);
|
||||
if (ip6->ip6_nxt != IPPROTO_TCP)
|
||||
return (0);
|
||||
eth_hdr_len += sizeof(*ip6);
|
||||
break;
|
||||
default:
|
||||
return (0);
|
||||
}
|
||||
if (mb->m_len < eth_hdr_len + sizeof(*th))
|
||||
return (0);
|
||||
th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
|
||||
tcp_hlen = th->th_off << 2;
|
||||
eth_hdr_len += tcp_hlen;
|
||||
if (mb->m_len < eth_hdr_len)
|
||||
return (0);
|
||||
return (eth_hdr_len);
|
||||
}
|
||||
|
||||
/* The return value is not going back to the stack because of
|
||||
* the drbr */
|
||||
static int
|
||||
mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
|
||||
{
|
||||
bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
|
||||
struct mlx5_wqe_data_seg *dseg;
|
||||
struct mlx5e_tx_wqe *wqe;
|
||||
struct ifnet *ifp;
|
||||
int nsegs;
|
||||
int err;
|
||||
int x;
|
||||
struct mbuf *mb = *mbp;
|
||||
u16 ds_cnt;
|
||||
u16 ihs;
|
||||
u16 pi;
|
||||
u8 opcode;
|
||||
|
||||
/* Return ENOBUFS if the queue is full, this may trigger reinsertion
|
||||
* of the mbuf into the drbr (see mlx5e_xmit_locked) */
|
||||
if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
|
||||
return (ENOBUFS);
|
||||
}
|
||||
|
||||
/* Align SQ edge with NOPs to avoid WQE wrap around */
|
||||
pi = ((~sq->pc) & sq->wq.sz_m1);
|
||||
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
|
||||
/* send one multi NOP message instead of many */
|
||||
mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS, false);
|
||||
pi = ((~sq->pc) & sq->wq.sz_m1);
|
||||
if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
|
||||
m_freem(mb);
|
||||
return (ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
/* Setup local variables */
|
||||
pi = sq->pc & sq->wq.sz_m1;
|
||||
wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
|
||||
ifp = sq->channel->ifp;
|
||||
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
|
||||
/* send a copy of the frame to the BPF listener, if any */
|
||||
if (ifp != NULL && ifp->if_bpf != NULL)
|
||||
ETHER_BPF_MTAP(ifp, mb);
|
||||
|
||||
if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
|
||||
wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
|
||||
}
|
||||
if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
|
||||
wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
if ( wqe->eth.cs_flags == 0 ) {
|
||||
sq->stats.csum_offload_none++;
|
||||
}
|
||||
|
||||
if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
|
||||
u32 payload_len;
|
||||
u32 mss = mb->m_pkthdr.tso_segsz;
|
||||
u32 num_pkts;
|
||||
|
||||
wqe->eth.mss = cpu_to_be16(mss);
|
||||
opcode = MLX5_OPCODE_LSO;
|
||||
ihs = mlx5e_get_header_size(mb);
|
||||
payload_len = mb->m_pkthdr.len - ihs;
|
||||
if (payload_len == 0)
|
||||
num_pkts = 1;
|
||||
else
|
||||
num_pkts = DIV_ROUND_UP(payload_len, mss);
|
||||
sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
|
||||
|
||||
sq->stats.tso_packets++;
|
||||
sq->stats.tso_bytes += payload_len;
|
||||
} else {
|
||||
opcode = MLX5_OPCODE_SEND;
|
||||
ihs = mlx5e_get_inline_hdr_size(sq, mb);
|
||||
sq->mbuf[pi].num_bytes = max_t (unsigned int,
|
||||
mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
|
||||
}
|
||||
if (mb->m_flags & M_VLANTAG) {
|
||||
struct ether_vlan_header *eh =
|
||||
(struct ether_vlan_header *)wqe->eth.inline_hdr_start;
|
||||
/* range checks */
|
||||
if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
|
||||
ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
|
||||
else if (ihs < ETHER_HDR_LEN) {
|
||||
err = EINVAL;
|
||||
goto tx_drop;
|
||||
}
|
||||
m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
|
||||
m_adj(mb, ETHER_HDR_LEN);
|
||||
/* insert 4 bytes VLAN tag into data stream */
|
||||
eh->evl_proto = eh->evl_encap_proto;
|
||||
eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
|
||||
eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
|
||||
/* copy rest of header data, if any */
|
||||
m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
|
||||
m_adj(mb, ihs - ETHER_HDR_LEN);
|
||||
/* extend header by 4 bytes */
|
||||
ihs += ETHER_VLAN_ENCAP_LEN;
|
||||
} else {
|
||||
m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
|
||||
m_adj(mb, ihs);
|
||||
}
|
||||
|
||||
wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
|
||||
|
||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||
if (likely(ihs > sizeof(wqe->eth.inline_hdr_start))) {
|
||||
ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
|
||||
MLX5_SEND_WQE_DS);
|
||||
}
|
||||
dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
|
||||
|
||||
/* trim off empty mbufs */
|
||||
while (mb->m_len == 0) {
|
||||
mb = m_free(mb);
|
||||
/* check if all data has been inlined */
|
||||
if (mb == NULL)
|
||||
goto skip_dma;
|
||||
}
|
||||
|
||||
err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
|
||||
mb, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
if (err == EFBIG) {
|
||||
/* Update *mbp before defrag in case it was trimmed in the loop above */
|
||||
*mbp = mb;
|
||||
/* Update statistics */
|
||||
sq->stats.defragged++;
|
||||
/* Too many mbuf fragments */
|
||||
mb = m_defrag(*mbp, M_NOWAIT);
|
||||
if (mb == NULL) {
|
||||
mb = *mbp;
|
||||
goto tx_drop;
|
||||
}
|
||||
/* Try again */
|
||||
err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
|
||||
mb, segs, &nsegs, BUS_DMA_NOWAIT);
|
||||
}
|
||||
/* catch errors */
|
||||
if (err != 0) {
|
||||
goto tx_drop;
|
||||
}
|
||||
*mbp = mb;
|
||||
|
||||
for (x = 0; x != nsegs; x++) {
|
||||
if (segs[x].ds_len == 0)
|
||||
continue;
|
||||
dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
|
||||
dseg->lkey = sq->mkey_be;
|
||||
dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
|
||||
dseg++;
|
||||
}
|
||||
skip_dma:
|
||||
ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
|
||||
|
||||
wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
|
||||
wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
|
||||
wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
|
||||
|
||||
/* store pointer to mbuf */
|
||||
sq->mbuf[pi].mbuf = mb;
|
||||
sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
sq->pc += sq->mbuf[pi].num_wqebbs;
|
||||
|
||||
/* make sure all mbuf data is written to RAM */
|
||||
if (mb != NULL)
|
||||
bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
|
||||
|
||||
mlx5e_tx_notify_hw(sq, wqe, 0);
|
||||
|
||||
sq->stats.packets++;
|
||||
return (0);
|
||||
|
||||
tx_drop:
|
||||
sq->stats.dropped++;
|
||||
*mbp = NULL;
|
||||
m_freem(mb);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
|
||||
{
|
||||
u16 sqcc;
|
||||
|
||||
/*
|
||||
* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
|
||||
* otherwise a cq overrun may occur
|
||||
*/
|
||||
sqcc = sq->cc;
|
||||
|
||||
while (budget--) {
|
||||
struct mlx5_cqe64 *cqe;
|
||||
struct mbuf *mb;
|
||||
u16 ci;
|
||||
|
||||
cqe = mlx5e_get_cqe(&sq->cq);
|
||||
if (!cqe)
|
||||
break;
|
||||
|
||||
ci = sqcc & sq->wq.sz_m1;
|
||||
mb = sq->mbuf[ci].mbuf;
|
||||
sq->mbuf[ci].mbuf = NULL; /* safety clear */
|
||||
|
||||
if (mb == NULL) {
|
||||
if (sq->mbuf[ci].num_bytes == 0) {
|
||||
/* NOP */
|
||||
sq->stats.nop++;
|
||||
}
|
||||
} else {
|
||||
bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
|
||||
|
||||
/* free transmitted mbuf */
|
||||
m_freem(mb);
|
||||
}
|
||||
sqcc += sq->mbuf[ci].num_wqebbs;
|
||||
}
|
||||
|
||||
mlx5_cqwq_update_db_record(&sq->cq.wq);
|
||||
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
|
||||
sq->cc = sqcc;
|
||||
|
||||
if (atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
|
||||
taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
|
||||
{
|
||||
struct mbuf *next;
|
||||
int err = 0;
|
||||
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
||||
if (mb)
|
||||
err = drbr_enqueue(ifp, sq->br, mb);
|
||||
return (err);
|
||||
}
|
||||
|
||||
if (mb != NULL)
|
||||
/* If we can't insert mbuf into drbr, try to xmit anyway.
|
||||
* We keep the error we got so we could return that after xmit.
|
||||
*/
|
||||
err = drbr_enqueue(ifp, sq->br, mb);
|
||||
|
||||
/* Process the queue */
|
||||
while ((next = drbr_peek(ifp, sq->br)) != NULL) {
|
||||
if (mlx5e_sq_xmit(sq, &next) != 0) {
|
||||
if (next == NULL) {
|
||||
drbr_advance(ifp, sq->br);
|
||||
} else {
|
||||
drbr_putback(ifp, sq->br, next);
|
||||
atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
|
||||
}
|
||||
break;
|
||||
}
|
||||
drbr_advance(ifp, sq->br);
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
||||
break;
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
|
||||
int
|
||||
mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
|
||||
{
|
||||
struct mlx5e_sq *sq;
|
||||
int ret;
|
||||
|
||||
sq = mlx5e_select_queue(ifp, mb);
|
||||
if (unlikely(sq == NULL)) {
|
||||
/* invalid send queue */
|
||||
m_freem(mb);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
if (mtx_trylock(&sq->lock)) {
|
||||
ret = mlx5e_xmit_locked(ifp, sq, mb);
|
||||
mtx_unlock(&sq->lock);
|
||||
} else {
|
||||
ret = drbr_enqueue(ifp, sq->br, mb);
|
||||
taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
|
||||
{
|
||||
struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
|
||||
|
||||
mtx_lock(&sq->comp_lock);
|
||||
mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
|
||||
mlx5e_cq_arm(&sq->cq);
|
||||
mtx_unlock(&sq->comp_lock);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_tx_que(void *context, int pending)
|
||||
{
|
||||
struct mlx5e_sq *sq = context;
|
||||
struct ifnet *ifp = sq->channel->ifp;
|
||||
|
||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
||||
mtx_lock(&sq->lock);
|
||||
if (!drbr_empty(ifp, sq->br))
|
||||
mlx5e_xmit_locked(ifp, sq, NULL);
|
||||
mtx_unlock(&sq->lock);
|
||||
}
|
||||
}
|
58
sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c
Normal file
58
sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include "en.h"
|
||||
|
||||
struct mlx5_cqe64 *
|
||||
mlx5e_get_cqe(struct mlx5e_cq *cq)
|
||||
{
|
||||
struct mlx5_cqe64 *cqe;
|
||||
|
||||
cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq));
|
||||
|
||||
if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
|
||||
return (NULL);
|
||||
|
||||
mlx5_cqwq_pop(&cq->wq);
|
||||
|
||||
/* ensure cqe content is read after cqe ownership bit */
|
||||
rmb();
|
||||
|
||||
return (cqe);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event)
|
||||
{
|
||||
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
|
||||
struct mlx5e_channel *c = cq->channel;
|
||||
struct mlx5e_priv *priv = c->priv;
|
||||
struct ifnet *ifp = priv->ifp;
|
||||
|
||||
if_printf(ifp, "%s: cqn=0x%.6x event=0x%.2x\n",
|
||||
__func__, mcq->cqn, event);
|
||||
}
|
697
sys/dev/mlx5/mlx5_en/tcp_tlro.c
Normal file
697
sys/dev/mlx5/mlx5_en/tcp_tlro.c
Normal file
@ -0,0 +1,697 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_inet.h"
|
||||
#include "opt_inet6.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/libkern.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/endian.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sockopt.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
#include <net/if.h>
|
||||
#include <net/if_var.h>
|
||||
#include <net/ethernet.h>
|
||||
|
||||
#if defined(INET) || defined(INET6)
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
#ifdef INET
|
||||
#include <netinet/ip.h>
|
||||
#endif
|
||||
|
||||
#ifdef INET6
|
||||
#include <netinet/ip6.h>
|
||||
#endif
|
||||
|
||||
#include <netinet/tcp_var.h>
|
||||
|
||||
#include "tcp_tlro.h"
|
||||
|
||||
#ifndef M_HASHTYPE_LRO_TCP
|
||||
#ifndef KLD_MODULE
|
||||
#warning "M_HASHTYPE_LRO_TCP is not defined"
|
||||
#endif
|
||||
#define M_HASHTYPE_LRO_TCP 254
|
||||
#endif
|
||||
|
||||
static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, tlro,
|
||||
CTLFLAG_RW, 0, "TCP turbo LRO parameters");
|
||||
|
||||
static MALLOC_DEFINE(M_TLRO, "TLRO", "Turbo LRO");
|
||||
|
||||
static int tlro_min_rate = 20; /* Hz */
|
||||
|
||||
SYSCTL_INT(_net_inet_tcp_tlro, OID_AUTO, min_rate, CTLFLAG_RWTUN,
|
||||
&tlro_min_rate, 0, "Minimum serving rate in Hz");
|
||||
|
||||
static int tlro_max_packet = IP_MAXPACKET;
|
||||
|
||||
SYSCTL_INT(_net_inet_tcp_tlro, OID_AUTO, max_packet, CTLFLAG_RWTUN,
|
||||
&tlro_max_packet, 0, "Maximum packet size in bytes");
|
||||
|
||||
typedef struct {
|
||||
uint32_t value;
|
||||
} __packed uint32_p_t;
|
||||
|
||||
static uint16_t
|
||||
tcp_tlro_csum(const uint32_p_t *p, size_t l)
|
||||
{
|
||||
const uint32_p_t *pend = p + (l / 4);
|
||||
uint64_t cs;
|
||||
|
||||
for (cs = 0; p != pend; p++)
|
||||
cs += le32toh(p->value);
|
||||
while (cs > 0xffff)
|
||||
cs = (cs >> 16) + (cs & 0xffff);
|
||||
return (cs);
|
||||
}
|
||||
|
||||
static void *
|
||||
tcp_tlro_get_header(const struct mbuf *m, const u_int off,
|
||||
const u_int len)
|
||||
{
|
||||
if (m->m_len < (off + len))
|
||||
return (NULL);
|
||||
return (mtod(m, char *) + off);
|
||||
}
|
||||
|
||||
static uint8_t
|
||||
tcp_tlro_info_save_timestamp(struct tlro_mbuf_data *pinfo)
|
||||
{
|
||||
struct tcphdr *tcp = pinfo->tcp;
|
||||
uint32_t *ts_ptr;
|
||||
|
||||
if (tcp->th_off < ((TCPOLEN_TSTAMP_APPA + sizeof(*tcp)) >> 2))
|
||||
return (0);
|
||||
|
||||
ts_ptr = (uint32_t *)(tcp + 1);
|
||||
if (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
|
||||
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
|
||||
return (0);
|
||||
|
||||
/* save timestamps */
|
||||
pinfo->tcp_ts = ts_ptr[1];
|
||||
pinfo->tcp_ts_reply = ts_ptr[2];
|
||||
return (1);
|
||||
}
|
||||
|
||||
static void
|
||||
tcp_tlro_info_restore_timestamp(struct tlro_mbuf_data *pinfoa,
|
||||
struct tlro_mbuf_data *pinfob)
|
||||
{
|
||||
struct tcphdr *tcp = pinfoa->tcp;
|
||||
uint32_t *ts_ptr;
|
||||
|
||||
if (tcp->th_off < ((TCPOLEN_TSTAMP_APPA + sizeof(*tcp)) >> 2))
|
||||
return;
|
||||
|
||||
ts_ptr = (uint32_t *)(tcp + 1);
|
||||
if (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
|
||||
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
|
||||
return;
|
||||
|
||||
/* restore timestamps */
|
||||
ts_ptr[1] = pinfob->tcp_ts;
|
||||
ts_ptr[2] = pinfob->tcp_ts_reply;
|
||||
}
|
||||
|
||||
static void
|
||||
tcp_tlro_extract_header(struct tlro_mbuf_data *pinfo, struct mbuf *m, int seq)
|
||||
{
|
||||
uint8_t *phdr = (uint8_t *)pinfo->buf;
|
||||
struct ether_header *eh;
|
||||
struct ether_vlan_header *vlan;
|
||||
#ifdef INET
|
||||
struct ip *ip;
|
||||
#endif
|
||||
#ifdef INET6
|
||||
struct ip6_hdr *ip6;
|
||||
#endif
|
||||
struct tcphdr *tcp;
|
||||
uint16_t etype;
|
||||
int diff;
|
||||
int off;
|
||||
|
||||
/* fill in information */
|
||||
pinfo->head = m;
|
||||
pinfo->last_tick = ticks;
|
||||
pinfo->sequence = seq;
|
||||
pinfo->pprev = &m_last(m)->m_next;
|
||||
|
||||
off = sizeof(*eh);
|
||||
if (m->m_len < off)
|
||||
goto error;
|
||||
eh = tcp_tlro_get_header(m, 0, sizeof(*eh));
|
||||
if (eh == NULL)
|
||||
goto error;
|
||||
memcpy(phdr, &eh->ether_dhost, ETHER_ADDR_LEN);
|
||||
phdr += ETHER_ADDR_LEN;
|
||||
memcpy(phdr, &eh->ether_type, sizeof(eh->ether_type));
|
||||
phdr += sizeof(eh->ether_type);
|
||||
etype = ntohs(eh->ether_type);
|
||||
|
||||
if (etype == ETHERTYPE_VLAN) {
|
||||
vlan = tcp_tlro_get_header(m, off, sizeof(*vlan));
|
||||
if (vlan == NULL)
|
||||
goto error;
|
||||
memcpy(phdr, &vlan->evl_tag, sizeof(vlan->evl_tag) +
|
||||
sizeof(vlan->evl_proto));
|
||||
phdr += sizeof(vlan->evl_tag) + sizeof(vlan->evl_proto);
|
||||
etype = ntohs(vlan->evl_proto);
|
||||
off += sizeof(*vlan) - sizeof(*eh);
|
||||
}
|
||||
switch (etype) {
|
||||
#ifdef INET
|
||||
case ETHERTYPE_IP:
|
||||
/*
|
||||
* Cannot LRO:
|
||||
* - Non-IP packets
|
||||
* - Fragmented packets
|
||||
* - Packets with IPv4 options
|
||||
* - Non-TCP packets
|
||||
*/
|
||||
ip = tcp_tlro_get_header(m, off, sizeof(*ip));
|
||||
if (ip == NULL ||
|
||||
(ip->ip_off & htons(IP_MF | IP_OFFMASK)) != 0 ||
|
||||
(ip->ip_p != IPPROTO_TCP) ||
|
||||
(ip->ip_hl << 2) != sizeof(*ip))
|
||||
goto error;
|
||||
|
||||
/* Legacy IP has a header checksum that needs to be correct */
|
||||
if (!(m->m_pkthdr.csum_flags & CSUM_IP_CHECKED)) {
|
||||
/* Verify IP header */
|
||||
if (tcp_tlro_csum((uint32_p_t *)ip, sizeof(*ip)) != 0xFFFF)
|
||||
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
|
||||
else
|
||||
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
|
||||
CSUM_IP_VALID;
|
||||
}
|
||||
/* Only accept valid checksums */
|
||||
if (!(m->m_pkthdr.csum_flags & CSUM_IP_VALID) ||
|
||||
!(m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
|
||||
goto error;
|
||||
memcpy(phdr, &ip->ip_src, sizeof(ip->ip_src) +
|
||||
sizeof(ip->ip_dst));
|
||||
phdr += sizeof(ip->ip_src) + sizeof(ip->ip_dst);
|
||||
if (M_HASHTYPE_GET(m) == M_HASHTYPE_LRO_TCP)
|
||||
pinfo->ip_len = m->m_pkthdr.len - off;
|
||||
else
|
||||
pinfo->ip_len = ntohs(ip->ip_len);
|
||||
pinfo->ip_hdrlen = sizeof(*ip);
|
||||
pinfo->ip.v4 = ip;
|
||||
pinfo->ip_version = 4;
|
||||
off += sizeof(*ip);
|
||||
break;
|
||||
#endif
|
||||
#ifdef INET6
|
||||
case ETHERTYPE_IPV6:
|
||||
/*
|
||||
* Cannot LRO:
|
||||
* - Non-IP packets
|
||||
* - Packets with IPv6 options
|
||||
* - Non-TCP packets
|
||||
*/
|
||||
ip6 = tcp_tlro_get_header(m, off, sizeof(*ip6));
|
||||
if (ip6 == NULL || ip6->ip6_nxt != IPPROTO_TCP)
|
||||
goto error;
|
||||
if (!(m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
|
||||
goto error;
|
||||
memcpy(phdr, &ip6->ip6_src, sizeof(struct in6_addr) +
|
||||
sizeof(struct in6_addr));
|
||||
phdr += sizeof(struct in6_addr) + sizeof(struct in6_addr);
|
||||
if (M_HASHTYPE_GET(m) == M_HASHTYPE_LRO_TCP)
|
||||
pinfo->ip_len = m->m_pkthdr.len - off;
|
||||
else
|
||||
pinfo->ip_len = ntohs(ip6->ip6_plen) + sizeof(*ip6);
|
||||
pinfo->ip_hdrlen = sizeof(*ip6);
|
||||
pinfo->ip.v6 = ip6;
|
||||
pinfo->ip_version = 6;
|
||||
off += sizeof(*ip6);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
goto error;
|
||||
}
|
||||
tcp = tcp_tlro_get_header(m, off, sizeof(*tcp));
|
||||
if (tcp == NULL)
|
||||
goto error;
|
||||
memcpy(phdr, &tcp->th_sport, sizeof(tcp->th_sport) +
|
||||
sizeof(tcp->th_dport));
|
||||
phdr += sizeof(tcp->th_sport) +
|
||||
sizeof(tcp->th_dport);
|
||||
/* store TCP header length */
|
||||
*phdr++ = tcp->th_off;
|
||||
if (tcp->th_off < (sizeof(*tcp) >> 2))
|
||||
goto error;
|
||||
|
||||
/* compute offset to data payload */
|
||||
pinfo->tcp_len = (tcp->th_off << 2);
|
||||
off += pinfo->tcp_len;
|
||||
|
||||
/* store more info */
|
||||
pinfo->data_off = off;
|
||||
pinfo->tcp = tcp;
|
||||
|
||||
/* try to save timestamp, if any */
|
||||
*phdr++ = tcp_tlro_info_save_timestamp(pinfo);
|
||||
|
||||
/* verify offset and IP/TCP length */
|
||||
if (off > m->m_pkthdr.len ||
|
||||
pinfo->ip_len < pinfo->tcp_len)
|
||||
goto error;
|
||||
|
||||
/* compute data payload length */
|
||||
pinfo->data_len = (pinfo->ip_len - pinfo->tcp_len - pinfo->ip_hdrlen);
|
||||
|
||||
/* trim any padded data */
|
||||
diff = (m->m_pkthdr.len - off) - pinfo->data_len;
|
||||
if (diff != 0) {
|
||||
if (diff < 0)
|
||||
goto error;
|
||||
else
|
||||
m_adj(m, -diff);
|
||||
}
|
||||
/* compute header length */
|
||||
pinfo->buf_length = phdr - (uint8_t *)pinfo->buf;
|
||||
/* zero-pad rest of buffer */
|
||||
memset(phdr, 0, TLRO_MAX_HEADER - pinfo->buf_length);
|
||||
return;
|
||||
error:
|
||||
pinfo->buf_length = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
tcp_tlro_cmp64(const uint64_t *pa, const uint64_t *pb)
|
||||
{
|
||||
int64_t diff = 0;
|
||||
unsigned x;
|
||||
|
||||
for (x = 0; x != TLRO_MAX_HEADER / 8; x++) {
|
||||
/*
|
||||
* NOTE: Endianness does not matter in this
|
||||
* comparisation:
|
||||
*/
|
||||
diff = pa[x] - pb[x];
|
||||
if (diff != 0)
|
||||
goto done;
|
||||
}
|
||||
done:
|
||||
if (diff < 0)
|
||||
return (-1);
|
||||
else if (diff > 0)
|
||||
return (1);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
tcp_tlro_compare_header(const void *_ppa, const void *_ppb)
|
||||
{
|
||||
const struct tlro_mbuf_ptr *ppa = _ppa;
|
||||
const struct tlro_mbuf_ptr *ppb = _ppb;
|
||||
struct tlro_mbuf_data *pinfoa = ppa->data;
|
||||
struct tlro_mbuf_data *pinfob = ppb->data;
|
||||
int ret;
|
||||
|
||||
ret = (pinfoa->head == NULL) - (pinfob->head == NULL);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
|
||||
ret = pinfoa->buf_length - pinfob->buf_length;
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
if (pinfoa->buf_length != 0) {
|
||||
ret = tcp_tlro_cmp64(pinfoa->buf, pinfob->buf);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
ret = ntohl(pinfoa->tcp->th_seq) - ntohl(pinfob->tcp->th_seq);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
ret = ntohl(pinfoa->tcp->th_ack) - ntohl(pinfob->tcp->th_ack);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
ret = pinfoa->sequence - pinfob->sequence;
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
}
|
||||
done:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
tcp_tlro_sort(struct tlro_ctrl *tlro)
|
||||
{
|
||||
if (tlro->curr == 0)
|
||||
return;
|
||||
|
||||
qsort(tlro->mbuf, tlro->curr, sizeof(struct tlro_mbuf_ptr),
|
||||
&tcp_tlro_compare_header);
|
||||
}
|
||||
|
||||
static int
|
||||
tcp_tlro_get_ticks(void)
|
||||
{
|
||||
int to = tlro_min_rate;
|
||||
|
||||
if (to < 1)
|
||||
to = 1;
|
||||
to = hz / to;
|
||||
if (to < 1)
|
||||
to = 1;
|
||||
return (to);
|
||||
}
|
||||
|
||||
static void
|
||||
tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
|
||||
{
|
||||
struct tlro_mbuf_data *pinfoa;
|
||||
struct tlro_mbuf_data *pinfob;
|
||||
uint32_t cs;
|
||||
int curr_ticks = ticks;
|
||||
int ticks_limit = tcp_tlro_get_ticks();
|
||||
unsigned x;
|
||||
unsigned y;
|
||||
unsigned z;
|
||||
int temp;
|
||||
|
||||
if (tlro->curr == 0)
|
||||
return;
|
||||
|
||||
for (y = 0; y != tlro->curr;) {
|
||||
struct mbuf *m;
|
||||
|
||||
pinfoa = tlro->mbuf[y].data;
|
||||
for (x = y + 1; x != tlro->curr; x++) {
|
||||
pinfob = tlro->mbuf[x].data;
|
||||
if (pinfoa->buf_length != pinfob->buf_length ||
|
||||
tcp_tlro_cmp64(pinfoa->buf, pinfob->buf) != 0)
|
||||
break;
|
||||
}
|
||||
if (pinfoa->buf_length == 0) {
|
||||
/* forward traffic which cannot be combined */
|
||||
for (z = y; z != x; z++) {
|
||||
/* just forward packets */
|
||||
pinfob = tlro->mbuf[z].data;
|
||||
|
||||
m = pinfob->head;
|
||||
|
||||
/* reset info structure */
|
||||
pinfob->head = NULL;
|
||||
pinfob->buf_length = 0;
|
||||
|
||||
/* do stats */
|
||||
tlro->lro_flushed++;
|
||||
|
||||
/* input packet to network layer */
|
||||
(*tlro->ifp->if_input) (tlro->ifp, m);
|
||||
}
|
||||
y = z;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* compute current checksum subtracted some header parts */
|
||||
temp = (pinfoa->ip_len - pinfoa->ip_hdrlen);
|
||||
cs = ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
|
||||
tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len);
|
||||
|
||||
/* append all fragments into one block */
|
||||
for (z = y + 1; z != x; z++) {
|
||||
|
||||
pinfob = tlro->mbuf[z].data;
|
||||
|
||||
/* check for command packets */
|
||||
if ((pinfoa->tcp->th_flags & ~(TH_ACK | TH_PUSH)) ||
|
||||
(pinfob->tcp->th_flags & ~(TH_ACK | TH_PUSH)))
|
||||
break;
|
||||
|
||||
/* check if there is enough space */
|
||||
if ((pinfoa->ip_len + pinfob->data_len) > tlro_max_packet)
|
||||
break;
|
||||
|
||||
/* try to append the new segment */
|
||||
temp = ntohl(pinfoa->tcp->th_seq) + pinfoa->data_len;
|
||||
if (temp != (int)ntohl(pinfob->tcp->th_seq))
|
||||
break;
|
||||
|
||||
temp = pinfob->ip_len - pinfob->ip_hdrlen;
|
||||
cs += ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
|
||||
tcp_tlro_csum((uint32_p_t *)pinfob->tcp, pinfob->tcp_len);
|
||||
/* remove fields which appear twice */
|
||||
cs += (IPPROTO_TCP << 8);
|
||||
if (pinfob->ip_version == 4) {
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_src, 4);
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_dst, 4);
|
||||
} else {
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_src, 16);
|
||||
cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_dst, 16);
|
||||
}
|
||||
/* remainder computation */
|
||||
while (cs > 0xffff)
|
||||
cs = (cs >> 16) + (cs & 0xffff);
|
||||
|
||||
/* update window and ack sequence number */
|
||||
pinfoa->tcp->th_ack = pinfob->tcp->th_ack;
|
||||
pinfoa->tcp->th_win = pinfob->tcp->th_win;
|
||||
|
||||
/* check if we should restore the timestamp */
|
||||
tcp_tlro_info_restore_timestamp(pinfoa, pinfob);
|
||||
|
||||
/* accumulate TCP flags */
|
||||
pinfoa->tcp->th_flags |= pinfob->tcp->th_flags;
|
||||
|
||||
/* update lengths */
|
||||
pinfoa->ip_len += pinfob->data_len;
|
||||
pinfoa->data_len += pinfob->data_len;
|
||||
|
||||
/* clear mbuf pointer - packet is accumulated */
|
||||
m = pinfob->head;
|
||||
|
||||
/* reset info structure */
|
||||
pinfob->head = NULL;
|
||||
pinfob->buf_length = 0;
|
||||
|
||||
/* append data to mbuf [y] */
|
||||
m_adj(m, pinfob->data_off);
|
||||
/* delete mbuf tags, if any */
|
||||
m_tag_delete_chain(m, NULL);
|
||||
/* clear packet header flag */
|
||||
m->m_flags &= ~M_PKTHDR;
|
||||
|
||||
/* concat mbuf(s) to end of list */
|
||||
pinfoa->pprev[0] = m;
|
||||
m = m_last(m);
|
||||
pinfoa->pprev = &m->m_next;
|
||||
pinfoa->head->m_pkthdr.len += pinfob->data_len;
|
||||
}
|
||||
/* compute new TCP header checksum */
|
||||
pinfoa->tcp->th_sum = 0;
|
||||
|
||||
temp = pinfoa->ip_len - pinfoa->ip_hdrlen;
|
||||
cs = (cs ^ 0xFFFF) +
|
||||
tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len) +
|
||||
((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8);
|
||||
|
||||
/* remainder computation */
|
||||
while (cs > 0xffff)
|
||||
cs = (cs >> 16) + (cs & 0xffff);
|
||||
|
||||
/* update new checksum */
|
||||
pinfoa->tcp->th_sum = ~htole16(cs);
|
||||
|
||||
/* update IP length, if any */
|
||||
if (pinfoa->ip_version == 4) {
|
||||
if (pinfoa->ip_len > IP_MAXPACKET) {
|
||||
M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
|
||||
pinfoa->ip.v4->ip_len = htons(IP_MAXPACKET);
|
||||
} else {
|
||||
pinfoa->ip.v4->ip_len = htons(pinfoa->ip_len);
|
||||
}
|
||||
} else {
|
||||
if (pinfoa->ip_len > (IP_MAXPACKET + sizeof(*pinfoa->ip.v6))) {
|
||||
M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
|
||||
pinfoa->ip.v6->ip6_plen = htons(IP_MAXPACKET);
|
||||
} else {
|
||||
temp = pinfoa->ip_len - sizeof(*pinfoa->ip.v6);
|
||||
pinfoa->ip.v6->ip6_plen = htons(temp);
|
||||
}
|
||||
}
|
||||
|
||||
temp = curr_ticks - pinfoa->last_tick;
|
||||
/* check if packet should be forwarded */
|
||||
if (force != 0 || z != x || temp >= ticks_limit ||
|
||||
pinfoa->data_len == 0) {
|
||||
|
||||
/* compute new IPv4 header checksum */
|
||||
if (pinfoa->ip_version == 4) {
|
||||
pinfoa->ip.v4->ip_sum = 0;
|
||||
cs = tcp_tlro_csum((uint32_p_t *)pinfoa->ip.v4,
|
||||
sizeof(*pinfoa->ip.v4));
|
||||
pinfoa->ip.v4->ip_sum = ~htole16(cs);
|
||||
}
|
||||
/* forward packet */
|
||||
m = pinfoa->head;
|
||||
|
||||
/* reset info structure */
|
||||
pinfoa->head = NULL;
|
||||
pinfoa->buf_length = 0;
|
||||
|
||||
/* do stats */
|
||||
tlro->lro_flushed++;
|
||||
|
||||
/* input packet to network layer */
|
||||
(*tlro->ifp->if_input) (tlro->ifp, m);
|
||||
}
|
||||
y = z;
|
||||
}
|
||||
|
||||
/* cleanup all NULL heads */
|
||||
for (y = 0; y != tlro->curr; y++) {
|
||||
if (tlro->mbuf[y].data->head == NULL) {
|
||||
for (z = y + 1; z != tlro->curr; z++) {
|
||||
struct tlro_mbuf_ptr ptemp;
|
||||
if (tlro->mbuf[z].data->head == NULL)
|
||||
continue;
|
||||
ptemp = tlro->mbuf[y];
|
||||
tlro->mbuf[y] = tlro->mbuf[z];
|
||||
tlro->mbuf[z] = ptemp;
|
||||
y++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
tlro->curr = y;
|
||||
}
|
||||
|
||||
static void
|
||||
tcp_tlro_cleanup(struct tlro_ctrl *tlro)
|
||||
{
|
||||
while (tlro->curr != 0 &&
|
||||
tlro->mbuf[tlro->curr - 1].data->head == NULL)
|
||||
tlro->curr--;
|
||||
}
|
||||
|
||||
void
|
||||
tcp_tlro_flush(struct tlro_ctrl *tlro, int force)
|
||||
{
|
||||
if (tlro->curr == 0)
|
||||
return;
|
||||
|
||||
tcp_tlro_sort(tlro);
|
||||
tcp_tlro_cleanup(tlro);
|
||||
tcp_tlro_combine(tlro, force);
|
||||
}
|
||||
|
||||
int
|
||||
tcp_tlro_init(struct tlro_ctrl *tlro, struct ifnet *ifp,
|
||||
int max_mbufs)
|
||||
{
|
||||
ssize_t size;
|
||||
uint32_t x;
|
||||
|
||||
/* set zero defaults */
|
||||
memset(tlro, 0, sizeof(*tlro));
|
||||
|
||||
/* compute size needed for data */
|
||||
size = (sizeof(struct tlro_mbuf_ptr) * max_mbufs) +
|
||||
(sizeof(struct tlro_mbuf_data) * max_mbufs);
|
||||
|
||||
/* range check */
|
||||
if (max_mbufs <= 0 || size <= 0 || ifp == NULL)
|
||||
return (EINVAL);
|
||||
|
||||
/* setup tlro control structure */
|
||||
tlro->mbuf = malloc(size, M_TLRO, M_WAITOK | M_ZERO);
|
||||
tlro->max = max_mbufs;
|
||||
tlro->ifp = ifp;
|
||||
|
||||
/* setup pointer array */
|
||||
for (x = 0; x != tlro->max; x++) {
|
||||
tlro->mbuf[x].data = ((struct tlro_mbuf_data *)
|
||||
&tlro->mbuf[max_mbufs]) + x;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
tcp_tlro_free(struct tlro_ctrl *tlro)
|
||||
{
|
||||
struct tlro_mbuf_data *pinfo;
|
||||
struct mbuf *m;
|
||||
uint32_t y;
|
||||
|
||||
/* check if not setup */
|
||||
if (tlro->mbuf == NULL)
|
||||
return;
|
||||
/* free MBUF array and any leftover MBUFs */
|
||||
for (y = 0; y != tlro->max; y++) {
|
||||
|
||||
pinfo = tlro->mbuf[y].data;
|
||||
|
||||
m = pinfo->head;
|
||||
|
||||
/* reset info structure */
|
||||
pinfo->head = NULL;
|
||||
pinfo->buf_length = 0;
|
||||
|
||||
m_freem(m);
|
||||
}
|
||||
free(tlro->mbuf, M_TLRO);
|
||||
/* reset buffer */
|
||||
memset(tlro, 0, sizeof(*tlro));
|
||||
}
|
||||
|
||||
void
|
||||
tcp_tlro_rx(struct tlro_ctrl *tlro, struct mbuf *m)
|
||||
{
|
||||
if (m->m_len > 0 && tlro->curr < tlro->max) {
|
||||
/* do stats */
|
||||
tlro->lro_queued++;
|
||||
|
||||
/* extract header */
|
||||
tcp_tlro_extract_header(tlro->mbuf[tlro->curr++].data,
|
||||
m, tlro->sequence++);
|
||||
} else if (tlro->ifp != NULL) {
|
||||
/* do stats */
|
||||
tlro->lro_flushed++;
|
||||
|
||||
/* input packet to network layer */
|
||||
(*tlro->ifp->if_input) (tlro->ifp, m);
|
||||
} else {
|
||||
/* packet drop */
|
||||
m_freem(m);
|
||||
}
|
||||
}
|
83
sys/dev/mlx5/mlx5_en/tcp_tlro.h
Normal file
83
sys/dev/mlx5/mlx5_en/tcp_tlro.h
Normal file
@ -0,0 +1,83 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _TCP_TLRO_H_
|
||||
#define _TCP_TLRO_H_
|
||||
|
||||
#define TLRO_MAX_HEADER 64 /* bytes */
|
||||
|
||||
struct ip;
|
||||
struct ip6_hdr;
|
||||
struct tcphdr;
|
||||
|
||||
struct tlro_mbuf_data {
|
||||
union {
|
||||
#ifdef INET
|
||||
struct ip *v4;
|
||||
#endif
|
||||
#ifdef INET6
|
||||
struct ip6_hdr *v6;
|
||||
#endif
|
||||
} ip;
|
||||
struct tcphdr *tcp;
|
||||
struct mbuf *head;
|
||||
struct mbuf **pprev;
|
||||
int last_tick;
|
||||
int sequence;
|
||||
int data_len;
|
||||
int data_off;
|
||||
int ip_hdrlen;
|
||||
int ip_len;
|
||||
uint32_t tcp_ts;
|
||||
uint32_t tcp_ts_reply;
|
||||
uint16_t tcp_len;
|
||||
uint8_t ip_version;
|
||||
uint8_t buf_length; /* in 32-bit words */
|
||||
uint64_t buf[TLRO_MAX_HEADER / 8];
|
||||
} __aligned(256);
|
||||
|
||||
struct tlro_mbuf_ptr {
|
||||
struct tlro_mbuf_data *data;
|
||||
};
|
||||
|
||||
/* NB: This is part of driver structs */
|
||||
struct tlro_ctrl {
|
||||
struct ifnet *ifp;
|
||||
struct tlro_mbuf_ptr *mbuf;
|
||||
uint64_t lro_queued;
|
||||
uint64_t lro_flushed;
|
||||
uint32_t max;
|
||||
uint32_t curr;
|
||||
int sequence;
|
||||
};
|
||||
|
||||
int tcp_tlro_init(struct tlro_ctrl *, struct ifnet *, int);
|
||||
void tcp_tlro_free(struct tlro_ctrl *);
|
||||
void tcp_tlro_flush(struct tlro_ctrl *, int);
|
||||
void tcp_tlro_rx(struct tlro_ctrl *, struct mbuf *);
|
||||
|
||||
#endif /* _TCP_TLRO_H_ */
|
8491
sys/dev/mlx5/mlx5_ifc.h
Normal file
8491
sys/dev/mlx5/mlx5_ifc.h
Normal file
File diff suppressed because it is too large
Load Diff
31
sys/dev/mlx5/mlx5_rdma_if.h
Normal file
31
sys/dev/mlx5/mlx5_rdma_if.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_RDMA_IF_H
|
||||
#define MLX5_RDMA_IF_H
|
||||
|
||||
#endif /* MLX5_RDMA_IF_H */
|
599
sys/dev/mlx5/qp.h
Normal file
599
sys/dev/mlx5/qp.h
Normal file
@ -0,0 +1,599 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_QP_H
|
||||
#define MLX5_QP_H
|
||||
|
||||
#include <dev/mlx5/device.h>
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/mlx5_ifc.h>
|
||||
|
||||
#define MLX5_INVALID_LKEY 0x100
|
||||
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
|
||||
#define MLX5_DIF_SIZE 8
|
||||
#define MLX5_STRIDE_BLOCK_OP 0x400
|
||||
#define MLX5_CPY_GRD_MASK 0xc0
|
||||
#define MLX5_CPY_APP_MASK 0x30
|
||||
#define MLX5_CPY_REF_MASK 0x0f
|
||||
#define MLX5_BSF_INC_REFTAG (1 << 6)
|
||||
#define MLX5_BSF_INL_VALID (1 << 15)
|
||||
#define MLX5_BSF_REFRESH_DIF (1 << 14)
|
||||
#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
|
||||
#define MLX5_BSF_APPTAG_ESCAPE 0x1
|
||||
#define MLX5_BSF_APPREF_ESCAPE 0x2
|
||||
|
||||
enum mlx5_qp_optpar {
|
||||
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
|
||||
MLX5_QP_OPTPAR_RRE = 1 << 1,
|
||||
MLX5_QP_OPTPAR_RAE = 1 << 2,
|
||||
MLX5_QP_OPTPAR_RWE = 1 << 3,
|
||||
MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
|
||||
MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
|
||||
MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
|
||||
MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
|
||||
MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
|
||||
MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
|
||||
MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
|
||||
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
|
||||
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
|
||||
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
|
||||
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
|
||||
MLX5_QP_OPTPAR_SRQN = 1 << 18,
|
||||
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
|
||||
MLX5_QP_OPTPAR_DC_HS = 1 << 20,
|
||||
MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
|
||||
};
|
||||
|
||||
enum mlx5_qp_state {
|
||||
MLX5_QP_STATE_RST = 0,
|
||||
MLX5_QP_STATE_INIT = 1,
|
||||
MLX5_QP_STATE_RTR = 2,
|
||||
MLX5_QP_STATE_RTS = 3,
|
||||
MLX5_QP_STATE_SQER = 4,
|
||||
MLX5_QP_STATE_SQD = 5,
|
||||
MLX5_QP_STATE_ERR = 6,
|
||||
MLX5_QP_STATE_SQ_DRAINING = 7,
|
||||
MLX5_QP_STATE_SUSPENDED = 9,
|
||||
MLX5_QP_NUM_STATE
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_QP_ST_RC = 0x0,
|
||||
MLX5_QP_ST_UC = 0x1,
|
||||
MLX5_QP_ST_UD = 0x2,
|
||||
MLX5_QP_ST_XRC = 0x3,
|
||||
MLX5_QP_ST_MLX = 0x4,
|
||||
MLX5_QP_ST_DCI = 0x5,
|
||||
MLX5_QP_ST_DCT = 0x6,
|
||||
MLX5_QP_ST_QP0 = 0x7,
|
||||
MLX5_QP_ST_QP1 = 0x8,
|
||||
MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
|
||||
MLX5_QP_ST_RAW_IPV6 = 0xa,
|
||||
MLX5_QP_ST_SNIFFER = 0xb,
|
||||
MLX5_QP_ST_SYNC_UMR = 0xe,
|
||||
MLX5_QP_ST_PTP_1588 = 0xd,
|
||||
MLX5_QP_ST_REG_UMR = 0xc,
|
||||
MLX5_QP_ST_MAX
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_NON_ZERO_RQ = 0 << 24,
|
||||
MLX5_SRQ_RQ = 1 << 24,
|
||||
MLX5_CRQ_RQ = 2 << 24,
|
||||
MLX5_ZERO_LEN_RQ = 3 << 24
|
||||
};
|
||||
|
||||
enum {
|
||||
/* params1 */
|
||||
MLX5_QP_BIT_SRE = 1 << 15,
|
||||
MLX5_QP_BIT_SWE = 1 << 14,
|
||||
MLX5_QP_BIT_SAE = 1 << 13,
|
||||
/* params2 */
|
||||
MLX5_QP_BIT_RRE = 1 << 15,
|
||||
MLX5_QP_BIT_RWE = 1 << 14,
|
||||
MLX5_QP_BIT_RAE = 1 << 13,
|
||||
MLX5_QP_BIT_RIC = 1 << 4,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
|
||||
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
|
||||
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_SEND_WQE_DS = 16,
|
||||
MLX5_SEND_WQE_BB = 64,
|
||||
};
|
||||
|
||||
#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
|
||||
|
||||
enum {
|
||||
MLX5_SEND_WQE_MAX_WQEBBS = 16,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
|
||||
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
|
||||
MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
|
||||
MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
|
||||
MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_FENCE_MODE_NONE = 0 << 5,
|
||||
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
|
||||
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
|
||||
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_QP_LAT_SENSITIVE = 1 << 28,
|
||||
MLX5_QP_BLOCK_MCAST = 1 << 30,
|
||||
MLX5_QP_ENABLE_SIG = 1 << 31,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_RCV_DBR = 0,
|
||||
MLX5_SND_DBR = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_FLAGS_INLINE = 1<<7,
|
||||
MLX5_FLAGS_CHECK_FREE = 1<<5,
|
||||
};
|
||||
|
||||
struct mlx5_wqe_fmr_seg {
|
||||
__be32 flags;
|
||||
__be32 mem_key;
|
||||
__be64 buf_list;
|
||||
__be64 start_addr;
|
||||
__be64 reg_len;
|
||||
__be32 offset;
|
||||
__be32 page_size;
|
||||
u32 reserved[2];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_ctrl_seg {
|
||||
__be32 opmod_idx_opcode;
|
||||
__be32 qpn_ds;
|
||||
u8 signature;
|
||||
u8 rsvd[2];
|
||||
u8 fm_ce_se;
|
||||
__be32 imm;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
|
||||
MLX5_ETH_WQE_L3_CSUM = 1 << 6,
|
||||
MLX5_ETH_WQE_L4_CSUM = 1 << 7,
|
||||
};
|
||||
|
||||
struct mlx5_wqe_eth_seg {
|
||||
u8 rsvd0[4];
|
||||
u8 cs_flags;
|
||||
u8 rsvd1;
|
||||
__be16 mss;
|
||||
__be32 rsvd2;
|
||||
__be16 inline_hdr_sz;
|
||||
u8 inline_hdr_start[2];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_xrc_seg {
|
||||
__be32 xrc_srqn;
|
||||
u8 rsvd[12];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_masked_atomic_seg {
|
||||
__be64 swap_add;
|
||||
__be64 compare;
|
||||
__be64 swap_add_mask;
|
||||
__be64 compare_mask;
|
||||
};
|
||||
|
||||
struct mlx5_av {
|
||||
union {
|
||||
struct {
|
||||
__be32 qkey;
|
||||
__be32 reserved;
|
||||
} qkey;
|
||||
__be64 dc_key;
|
||||
} key;
|
||||
__be32 dqp_dct;
|
||||
u8 stat_rate_sl;
|
||||
u8 fl_mlid;
|
||||
union {
|
||||
__be16 rlid;
|
||||
__be16 udp_sport;
|
||||
};
|
||||
u8 reserved0[4];
|
||||
u8 rmac[6];
|
||||
u8 tclass;
|
||||
u8 hop_limit;
|
||||
__be32 grh_gid_fl;
|
||||
u8 rgid[16];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_datagram_seg {
|
||||
struct mlx5_av av;
|
||||
};
|
||||
|
||||
struct mlx5_wqe_raddr_seg {
|
||||
__be64 raddr;
|
||||
__be32 rkey;
|
||||
u32 reserved;
|
||||
};
|
||||
|
||||
struct mlx5_wqe_atomic_seg {
|
||||
__be64 swap_add;
|
||||
__be64 compare;
|
||||
};
|
||||
|
||||
struct mlx5_wqe_data_seg {
|
||||
__be32 byte_count;
|
||||
__be32 lkey;
|
||||
__be64 addr;
|
||||
};
|
||||
|
||||
struct mlx5_wqe_umr_ctrl_seg {
|
||||
u8 flags;
|
||||
u8 rsvd0[3];
|
||||
__be16 klm_octowords;
|
||||
__be16 bsf_octowords;
|
||||
__be64 mkey_mask;
|
||||
u8 rsvd1[32];
|
||||
};
|
||||
|
||||
struct mlx5_seg_set_psv {
|
||||
__be32 psv_num;
|
||||
__be16 syndrome;
|
||||
__be16 status;
|
||||
__be32 transient_sig;
|
||||
__be32 ref_tag;
|
||||
};
|
||||
|
||||
struct mlx5_seg_get_psv {
|
||||
u8 rsvd[19];
|
||||
u8 num_psv;
|
||||
__be32 l_key;
|
||||
__be64 va;
|
||||
__be32 psv_index[4];
|
||||
};
|
||||
|
||||
struct mlx5_seg_check_psv {
|
||||
u8 rsvd0[2];
|
||||
__be16 err_coalescing_op;
|
||||
u8 rsvd1[2];
|
||||
__be16 xport_err_op;
|
||||
u8 rsvd2[2];
|
||||
__be16 xport_err_mask;
|
||||
u8 rsvd3[7];
|
||||
u8 num_psv;
|
||||
__be32 l_key;
|
||||
__be64 va;
|
||||
__be32 psv_index[4];
|
||||
};
|
||||
|
||||
struct mlx5_rwqe_sig {
|
||||
u8 rsvd0[4];
|
||||
u8 signature;
|
||||
u8 rsvd1[11];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_signature_seg {
|
||||
u8 rsvd0[4];
|
||||
u8 signature;
|
||||
u8 rsvd1[11];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_inline_seg {
|
||||
__be32 byte_count;
|
||||
};
|
||||
|
||||
enum mlx5_sig_type {
|
||||
MLX5_DIF_CRC = 0x1,
|
||||
MLX5_DIF_IPCS = 0x2,
|
||||
};
|
||||
|
||||
struct mlx5_bsf_inl {
|
||||
__be16 vld_refresh;
|
||||
__be16 dif_apptag;
|
||||
__be32 dif_reftag;
|
||||
u8 sig_type;
|
||||
u8 rp_inv_seed;
|
||||
u8 rsvd[3];
|
||||
u8 dif_inc_ref_guard_check;
|
||||
__be16 dif_app_bitmask_check;
|
||||
};
|
||||
|
||||
struct mlx5_bsf {
|
||||
struct mlx5_bsf_basic {
|
||||
u8 bsf_size_sbs;
|
||||
u8 check_byte_mask;
|
||||
union {
|
||||
u8 copy_byte_mask;
|
||||
u8 bs_selector;
|
||||
u8 rsvd_wflags;
|
||||
} wire;
|
||||
union {
|
||||
u8 bs_selector;
|
||||
u8 rsvd_mflags;
|
||||
} mem;
|
||||
__be32 raw_data_size;
|
||||
__be32 w_bfs_psv;
|
||||
__be32 m_bfs_psv;
|
||||
} basic;
|
||||
struct mlx5_bsf_ext {
|
||||
__be32 t_init_gen_pro_size;
|
||||
__be32 rsvd_epi_size;
|
||||
__be32 w_tfs_psv;
|
||||
__be32 m_tfs_psv;
|
||||
} ext;
|
||||
struct mlx5_bsf_inl w_inl;
|
||||
struct mlx5_bsf_inl m_inl;
|
||||
};
|
||||
|
||||
struct mlx5_klm {
|
||||
__be32 bcount;
|
||||
__be32 key;
|
||||
__be64 va;
|
||||
};
|
||||
|
||||
struct mlx5_stride_block_entry {
|
||||
__be16 stride;
|
||||
__be16 bcount;
|
||||
__be32 key;
|
||||
__be64 va;
|
||||
};
|
||||
|
||||
struct mlx5_stride_block_ctrl_seg {
|
||||
__be32 bcount_per_cycle;
|
||||
__be32 op;
|
||||
__be32 repeat_count;
|
||||
u16 rsvd;
|
||||
__be16 num_entries;
|
||||
};
|
||||
|
||||
struct mlx5_core_qp {
|
||||
struct mlx5_core_rsc_common common; /* must be first */
|
||||
void (*event) (struct mlx5_core_qp *, int);
|
||||
int qpn;
|
||||
struct mlx5_rsc_debug *dbg;
|
||||
int pid;
|
||||
};
|
||||
|
||||
struct mlx5_qp_path {
|
||||
u8 fl_free_ar;
|
||||
u8 rsvd3;
|
||||
__be16 pkey_index;
|
||||
u8 rsvd0;
|
||||
u8 grh_mlid;
|
||||
__be16 rlid;
|
||||
u8 ackto_lt;
|
||||
u8 mgid_index;
|
||||
u8 static_rate;
|
||||
u8 hop_limit;
|
||||
__be32 tclass_flowlabel;
|
||||
union {
|
||||
u8 rgid[16];
|
||||
u8 rip[16];
|
||||
};
|
||||
u8 f_dscp_ecn_prio;
|
||||
u8 ecn_dscp;
|
||||
__be16 udp_sport;
|
||||
u8 dci_cfi_prio_sl;
|
||||
u8 port;
|
||||
u8 rmac[6];
|
||||
};
|
||||
|
||||
struct mlx5_qp_context {
|
||||
__be32 flags;
|
||||
__be32 flags_pd;
|
||||
u8 mtu_msgmax;
|
||||
u8 rq_size_stride;
|
||||
__be16 sq_crq_size;
|
||||
__be32 qp_counter_set_usr_page;
|
||||
__be32 wire_qpn;
|
||||
__be32 log_pg_sz_remote_qpn;
|
||||
struct mlx5_qp_path pri_path;
|
||||
struct mlx5_qp_path alt_path;
|
||||
__be32 params1;
|
||||
u8 reserved2[4];
|
||||
__be32 next_send_psn;
|
||||
__be32 cqn_send;
|
||||
u8 reserved3[8];
|
||||
__be32 last_acked_psn;
|
||||
__be32 ssn;
|
||||
__be32 params2;
|
||||
__be32 rnr_nextrecvpsn;
|
||||
__be32 xrcd;
|
||||
__be32 cqn_recv;
|
||||
__be64 db_rec_addr;
|
||||
__be32 qkey;
|
||||
__be32 rq_type_srqn;
|
||||
__be32 rmsn;
|
||||
__be16 hw_sq_wqe_counter;
|
||||
__be16 sw_sq_wqe_counter;
|
||||
__be16 hw_rcyclic_byte_counter;
|
||||
__be16 hw_rq_counter;
|
||||
__be16 sw_rcyclic_byte_counter;
|
||||
__be16 sw_rq_counter;
|
||||
u8 rsvd0[5];
|
||||
u8 cgs;
|
||||
u8 cs_req;
|
||||
u8 cs_res;
|
||||
__be64 dc_access_key;
|
||||
u8 rsvd1[24];
|
||||
};
|
||||
|
||||
struct mlx5_create_qp_mbox_in {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be32 input_qpn;
|
||||
u8 rsvd0[4];
|
||||
__be32 opt_param_mask;
|
||||
u8 rsvd1[4];
|
||||
struct mlx5_qp_context ctx;
|
||||
u8 rsvd3[16];
|
||||
__be64 pas[0];
|
||||
};
|
||||
|
||||
struct mlx5_create_qp_mbox_out {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
__be32 qpn;
|
||||
u8 rsvd0[4];
|
||||
};
|
||||
|
||||
struct mlx5_destroy_qp_mbox_in {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be32 qpn;
|
||||
u8 rsvd0[4];
|
||||
};
|
||||
|
||||
struct mlx5_destroy_qp_mbox_out {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
u8 rsvd0[8];
|
||||
};
|
||||
|
||||
struct mlx5_modify_qp_mbox_in {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be32 qpn;
|
||||
u8 rsvd1[4];
|
||||
__be32 optparam;
|
||||
u8 rsvd0[4];
|
||||
struct mlx5_qp_context ctx;
|
||||
};
|
||||
|
||||
struct mlx5_modify_qp_mbox_out {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
u8 rsvd0[8];
|
||||
};
|
||||
|
||||
struct mlx5_query_qp_mbox_in {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be32 qpn;
|
||||
u8 rsvd[4];
|
||||
};
|
||||
|
||||
struct mlx5_query_qp_mbox_out {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
u8 rsvd1[8];
|
||||
__be32 optparam;
|
||||
u8 rsvd0[4];
|
||||
struct mlx5_qp_context ctx;
|
||||
u8 rsvd2[16];
|
||||
__be64 pas[0];
|
||||
};
|
||||
|
||||
struct mlx5_conf_sqp_mbox_in {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be32 qpn;
|
||||
u8 rsvd[3];
|
||||
u8 type;
|
||||
};
|
||||
|
||||
struct mlx5_conf_sqp_mbox_out {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
u8 rsvd[8];
|
||||
};
|
||||
|
||||
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
|
||||
{
|
||||
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
|
||||
}
|
||||
|
||||
static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
|
||||
{
|
||||
return radix_tree_lookup(&dev->priv.mr_table.tree, key);
|
||||
}
|
||||
|
||||
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_qp *qp,
|
||||
struct mlx5_create_qp_mbox_in *in,
|
||||
int inlen);
|
||||
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
|
||||
enum mlx5_qp_state new_state,
|
||||
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
|
||||
struct mlx5_core_qp *qp);
|
||||
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
|
||||
struct mlx5_core_qp *qp);
|
||||
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
|
||||
struct mlx5_query_qp_mbox_out *out, int outlen);
|
||||
|
||||
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
|
||||
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
|
||||
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
|
||||
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
|
||||
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
|
||||
|
||||
static inline const char *mlx5_qp_type_str(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case MLX5_QP_ST_RC: return "RC";
|
||||
case MLX5_QP_ST_UC: return "C";
|
||||
case MLX5_QP_ST_UD: return "UD";
|
||||
case MLX5_QP_ST_XRC: return "XRC";
|
||||
case MLX5_QP_ST_MLX: return "MLX";
|
||||
case MLX5_QP_ST_QP0: return "QP0";
|
||||
case MLX5_QP_ST_QP1: return "QP1";
|
||||
case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
|
||||
case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
|
||||
case MLX5_QP_ST_SNIFFER: return "SNIFFER";
|
||||
case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
|
||||
case MLX5_QP_ST_PTP_1588: return "PTP_1588";
|
||||
case MLX5_QP_ST_REG_UMR: return "REG_UMR";
|
||||
default: return "Invalid transport type";
|
||||
}
|
||||
}
|
||||
|
||||
static inline const char *mlx5_qp_state_str(int state)
|
||||
{
|
||||
switch (state) {
|
||||
case MLX5_QP_STATE_RST:
|
||||
return "RST";
|
||||
case MLX5_QP_STATE_INIT:
|
||||
return "INIT";
|
||||
case MLX5_QP_STATE_RTR:
|
||||
return "RTR";
|
||||
case MLX5_QP_STATE_RTS:
|
||||
return "RTS";
|
||||
case MLX5_QP_STATE_SQER:
|
||||
return "SQER";
|
||||
case MLX5_QP_STATE_SQD:
|
||||
return "SQD";
|
||||
case MLX5_QP_STATE_ERR:
|
||||
return "ERR";
|
||||
case MLX5_QP_STATE_SQ_DRAINING:
|
||||
return "SQ_DRAINING";
|
||||
case MLX5_QP_STATE_SUSPENDED:
|
||||
return "SUSPENDED";
|
||||
default: return "Invalid QP state";
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* MLX5_QP_H */
|
36
sys/dev/mlx5/srq.h
Normal file
36
sys/dev/mlx5/srq.h
Normal file
@ -0,0 +1,36 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef MLX5_SRQ_H
|
||||
#define MLX5_SRQ_H
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
|
||||
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
|
||||
|
||||
#endif /* MLX5_SRQ_H */
|
74
sys/dev/mlx5/vport.h
Normal file
74
sys/dev/mlx5/vport.h
Normal file
@ -0,0 +1,74 @@
|
||||
/*-
|
||||
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef __MLX5_VPORT_H__
|
||||
#define __MLX5_VPORT_H__
|
||||
|
||||
#include <dev/mlx5/driver.h>
|
||||
int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
|
||||
int *counter_set_id);
|
||||
int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
|
||||
int counter_set_id);
|
||||
int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
|
||||
int counter_set_id,
|
||||
u32 *out_of_rx_buffer);
|
||||
|
||||
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
|
||||
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u32 vport, u8 *addr);
|
||||
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
|
||||
bool other_vport, u8 *addr);
|
||||
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
|
||||
u8 *addr);
|
||||
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
|
||||
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
|
||||
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid);
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
|
||||
int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid);
|
||||
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
u16 *qkey_viol_cntr);
|
||||
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
|
||||
int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid);
|
||||
int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
|
||||
u8 port_num, u8 vport_num, u32 *out,
|
||||
int outlen);
|
||||
int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
|
||||
u8 port_num, u16 vf_num, u16 pkey_index,
|
||||
u16 *pkey);
|
||||
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
|
||||
u16 vport_num, u16 gid_index, union ib_gid *gid);
|
||||
int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
|
||||
u8 insert_mode, u8 strip_mode,
|
||||
u16 vlan, u8 cfi, u8 pcp);
|
||||
int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
|
||||
u8 port_num, u16 vport_num,
|
||||
void *out, int out_size);
|
||||
int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
|
||||
struct mlx5_vport_counters *vc);
|
||||
#endif /* __MLX5_VPORT_H__ */
|
34
sys/modules/mlx5/Makefile
Normal file
34
sys/modules/mlx5/Makefile
Normal file
@ -0,0 +1,34 @@
|
||||
# $FreeBSD$
|
||||
.PATH: ${.CURDIR}/../../dev/mlx5/mlx5_core
|
||||
|
||||
KMOD=mlx5
|
||||
SRCS= \
|
||||
mlx5_alloc.c \
|
||||
mlx5_cmd.c \
|
||||
mlx5_cq.c \
|
||||
mlx5_eq.c \
|
||||
mlx5_flow_table.c \
|
||||
mlx5_fw.c \
|
||||
mlx5_health.c \
|
||||
mlx5_mad.c \
|
||||
mlx5_main.c \
|
||||
mlx5_mcg.c \
|
||||
mlx5_mr.c \
|
||||
mlx5_pagealloc.c \
|
||||
mlx5_pd.c \
|
||||
mlx5_port.c \
|
||||
mlx5_qp.c \
|
||||
mlx5_srq.c \
|
||||
mlx5_transobj.c \
|
||||
mlx5_uar.c \
|
||||
mlx5_vport.c \
|
||||
mlx5_wq.c \
|
||||
device_if.h bus_if.h vnode_if.h pci_if.h \
|
||||
opt_inet.h opt_inet6.h opt_random.h
|
||||
|
||||
CFLAGS+= -I${.CURDIR}/../../ofed/include
|
||||
CFLAGS+= -I${.CURDIR}/../../compat/linuxkpi/common/include
|
||||
|
||||
.include <bsd.kmod.mk>
|
||||
|
||||
CFLAGS+= -Wno-cast-qual -Wno-pointer-arith ${GCC_MS_EXTENSIONS}
|
29
sys/modules/mlx5en/Makefile
Normal file
29
sys/modules/mlx5en/Makefile
Normal file
@ -0,0 +1,29 @@
|
||||
# $FreeBSD$
|
||||
.PATH: ${.CURDIR}/../../dev/mlx5/mlx5_en
|
||||
|
||||
KMOD=mlx5en
|
||||
SRCS= \
|
||||
mlx5_en_ethtool.c \
|
||||
mlx5_en_main.c \
|
||||
mlx5_en_tx.c \
|
||||
mlx5_en_flow_table.c \
|
||||
mlx5_en_rx.c \
|
||||
mlx5_en_txrx.c \
|
||||
device_if.h bus_if.h vnode_if.h pci_if.h \
|
||||
opt_inet.h opt_inet6.h
|
||||
|
||||
.if defined(HAVE_TURBO_LRO)
|
||||
CFLAGS+= -DHAVE_TURBO_LRO
|
||||
SRCS+= tcp_tlro.c
|
||||
.endif
|
||||
|
||||
.if defined(HAVE_PER_CQ_EVENT_PACKET)
|
||||
CFLAGS+= -DHAVE_PER_CQ_EVENT_PACKET
|
||||
.endif
|
||||
|
||||
CFLAGS+= -I${.CURDIR}/../../ofed/include
|
||||
CFLAGS+= -I${.CURDIR}/../../compat/linuxkpi/common/include
|
||||
|
||||
.include <bsd.kmod.mk>
|
||||
|
||||
CFLAGS+= -Wno-cast-qual -Wno-pointer-arith ${GCC_MS_EXTENSIONS}
|
Loading…
Reference in New Issue
Block a user