1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-01 08:27:59 +00:00

qat: Rename to qat_c2xxx and remove support for modern chipsets

A replacement QAT driver will be imported, but this replacement does not
support Atom C2xxx hardware.  So, the existing driver will be kept
around to provide opencrypto offload support for those chipsets.

Reviewed by:	pauamma, emaste
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D35817
This commit is contained in:
Mark Johnston 2022-07-27 10:55:40 -04:00
parent 60cb4f9a8e
commit f4f56ff43d
40 changed files with 30 additions and 5228 deletions

View File

@ -452,7 +452,7 @@ MAN= aac.4 \
pty.4 \
puc.4 \
pwmc.4 \
${_qat.4} \
${_qat_c2xxx.4} \
${_qlxge.4} \
${_qlxgb.4} \
${_qlxgbe.4} \
@ -830,7 +830,7 @@ _nvme.4= nvme.4
_nvram.4= nvram.4
_padlock.4= padlock.4
_pchtherm.4= pchtherm.4
_qat.4= qat.4
_qat_c2xxx.4= qat_c2xxx.4
_rr232x.4= rr232x.4
_speaker.4= speaker.4
_spkr.4= spkr.4

View File

@ -24,12 +24,12 @@
.\"
.\" $FreeBSD$
.\"
.Dd May 7, 2021
.Dt QAT 4
.Dd July 21, 2022
.Dt QAT_C2XXX 4
.Os
.Sh NAME
.Nm qat
.Nd Intel QuickAssist Technology (QAT) driver
.Nm qat_c2xxx
.Nd Intel QuickAssist Technology (QAT) driver for Atom C2000 chipsets
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
@ -44,12 +44,8 @@ Alternatively, to load the driver as a
module at boot time, place the following lines in
.Xr loader.conf 5 :
.Bd -literal -offset indent
qat_load="YES"
qat_c2xxx_load="YES"
qat_c2xxxfw_load="YES"
qat_c3xxxfw_load="YES"
qat_c62xfw_load="YES"
qat_d15xxfw_load="YES"
qat_dh895xccfw_load="YES"
.Ed
.Sh DESCRIPTION
The
@ -57,20 +53,15 @@ The
driver implements
.Xr crypto 4
support for some of the cryptographic acceleration functions of the Intel
QuickAssist (QAT) device.
The
.Nm
driver supports the QAT devices integrated with Atom C2000 and C3000 and Xeon
C620 and D-1500 platforms, and the Intel QAT Adapter 8950.
Other platforms and adapters not listed here may also be supported.
QuickAssist (QAT) device found on Atom C2000 devices.
QAT devices are enumerated through PCIe and are thus visible in
.Xr pciconf 8
output.
.Pp
The
.Nm
driver can accelerate AES in CBC, CTR, XTS (except for the C2000) and GCM modes,
and can perform authenticated encryption combining the CBC, CTR and XTS modes
driver can accelerate AES in CBC, CTR, and GCM modes,
and can perform authenticated encryption combining the CBC, and CTR modes
with SHA1-HMAC and SHA2-HMAC.
The
.Nm
@ -84,6 +75,7 @@ requests that do not satisfy this constraint.
.Xr crypto 4 ,
.Xr ipsec 4 ,
.Xr pci 4 ,
.Xr qat 4 ,
.Xr random 4 ,
.Xr crypto 7 ,
.Xr crypto 9

View File

@ -467,8 +467,10 @@ device vmd
device pmspcv
#
# Intel QuickAssist
device qat
# Intel QuickAssist driver with OpenCrypto support
#
# Only for legacy Atom C2XXX chipsets.
device qat_c2xxx
#
# SafeNet crypto driver: can be moved to the MI NOTES as soon as

View File

@ -288,15 +288,10 @@ dev/mana/mana_sysctl.c optional mana
dev/mana/shm_channel.c optional mana
dev/mana/hw_channel.c optional mana
dev/mana/gdma_util.c optional mana
dev/qat/qat.c optional qat
dev/qat/qat_ae.c optional qat
dev/qat/qat_c2xxx.c optional qat
dev/qat/qat_c3xxx.c optional qat
dev/qat/qat_c62x.c optional qat
dev/qat/qat_d15xx.c optional qat
dev/qat/qat_dh895xcc.c optional qat
dev/qat/qat_hw15.c optional qat
dev/qat/qat_hw17.c optional qat
dev/qat_c2xxx/qat.c optional qat_c2xxx
dev/qat_c2xxx/qat_ae.c optional qat_c2xxx
dev/qat_c2xxx/qat_c2xxx.c optional qat_c2xxx
dev/qat_c2xxx/qat_hw15.c optional qat_c2xxx
libkern/x86/crc32_sse42.c standard
#
# x86 shared code between IA32 and AMD64 architectures

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,298 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qat_c3xxxreg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
static uint32_t
qat_c3xxx_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C3XXX) &
ACCEL_MASK_C3XXX);
}
static uint32_t
qat_c3xxx_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, me_strap, me_disable, ssms_disabled;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
/* If SSMs are disabled, then disable the corresponding MEs */
ssms_disabled = (~qat_c3xxx_get_accel_mask(sc)) & ACCEL_MASK_C3XXX;
me_disable = 0x3;
while (ssms_disabled) {
if (ssms_disabled & 1)
me_strap |= me_disable;
ssms_disabled >>= 1;
me_disable <<= 2;
}
return (~(fusectl | me_strap)) & AE_MASK_C3XXX;
}
static enum qat_sku
qat_c3xxx_get_sku(struct qat_softc *sc)
{
switch (sc->sc_ae_num) {
case MAX_AE_C3XXX:
return QAT_SKU_4;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_c3xxx_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse, strap;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C3XXX)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C3XXX)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
return cap;
}
static const char *
qat_c3xxx_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_C3XXX;
}
static void
qat_c3xxx_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_C3XXX, SMIA0_MASK_C3XXX);
qat_misc_write_4(sc, SMIAPF1_C3XXX, SMIA1_MASK_C3XXX);
}
/* Worker thread to service arbiter mappings */
static uint32_t thrd_to_arb_map[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA
};
static void
qat_c3xxx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
{
int i;
for (i = 1; i < MAX_AE_C3XXX; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
thrd_to_arb_map[i] = 0;
}
*arb_map_config = thrd_to_arb_map;
}
static void
qat_c3xxx_enable_error_interrupts(struct qat_softc *sc)
{
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C3XXX); /* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C3XXX); /* ME4-ME5 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C3XXX); /* SSM2 */
/* Reset everything except VFtoPF1_16. */
qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C3XXX);
/* RI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, RICPPINTCTL_C3XXX, RICPP_EN_C3XXX);
/* TI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, TICPPINTCTL_C3XXX, TICPP_EN_C3XXX);
/* Enable CFC Error interrupts and logging. */
qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C3XXX, CPP_CFC_UE_C3XXX);
}
static void
qat_c3xxx_disable_error_interrupts(struct qat_softc *sc)
{
/* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C3XXX | ERRMSK0_CERR_C3XXX);
/* ME4-ME5 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C3XXX | ERRMSK1_CERR_C3XXX);
/* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C3XXX);
/* SSM2 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C3XXX);
}
static void
qat_c3xxx_enable_error_correction(struct qat_softc *sc)
{
u_int i, mask;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C3XXX(i),
ENABLE_AE_ECC_ERR_C3XXX);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C3XXX(i),
ENABLE_AE_ECC_PARITY_CORR_C3XXX);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C3XXX);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C3XXX);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C3XXX);
}
qat_c3xxx_enable_error_interrupts(sc);
}
const struct qat_hw qat_hw_c3xxx = {
.qhw_sram_bar_id = BAR_SRAM_ID_C3XXX,
.qhw_misc_bar_id = BAR_PMISC_ID_C3XXX,
.qhw_etr_bar_id = BAR_ETR_ID_C3XXX,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C3XXX,
.qhw_ae_offset = AE_OFFSET_C3XXX,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_C3XXX,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C3XXX,
.qhw_num_banks = ETR_MAX_BANKS_C3XXX,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_C3XXX,
.qhw_num_engines = MAX_AE_C3XXX,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_C3XXX,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C3XXX,
.qhw_clock_per_sec = CLOCK_PER_SEC_C3XXX,
.qhw_fw_auth = true,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_C3XXX,
.qhw_mmp_fwname = AE_FW_MMP_NAME_C3XXX,
.qhw_prod_type = AE_FW_PROD_TYPE_C3XXX,
.qhw_get_accel_mask = qat_c3xxx_get_accel_mask,
.qhw_get_ae_mask = qat_c3xxx_get_ae_mask,
.qhw_get_sku = qat_c3xxx_get_sku,
.qhw_get_accel_cap = qat_c3xxx_get_accel_cap,
.qhw_get_fw_uof_name = qat_c3xxx_get_fw_uof_name,
.qhw_enable_intr = qat_c3xxx_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_c3xxx_get_arb_mapping,
.qhw_enable_error_correction = qat_c3xxx_enable_error_correction,
.qhw_disable_error_interrupts = qat_c3xxx_disable_error_interrupts,
.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

View File

@ -1,178 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c3xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_C3XXXREG_H_
#define _DEV_PCI_QAT_C3XXXREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_C3XXX 3
#define MAX_AE_C3XXX 6
/* PCIe BAR index */
#define BAR_SRAM_ID_C3XXX NO_PCI_REG
#define BAR_PMISC_ID_C3XXX 0
#define BAR_ETR_ID_C3XXX 1
/* BAR PMISC sub-regions */
#define AE_OFFSET_C3XXX 0x20000
#define AE_LOCAL_OFFSET_C3XXX 0x20800
#define CAP_GLOBAL_OFFSET_C3XXX 0x30000
#define SOFTSTRAP_REG_C3XXX 0x2EC
#define SOFTSTRAP_SS_POWERGATE_CY_C3XXX __BIT(23)
#define SOFTSTRAP_SS_POWERGATE_PKE_C3XXX __BIT(24)
#define ACCEL_REG_OFFSET_C3XXX 16
#define ACCEL_MASK_C3XXX 0x7
#define AE_MASK_C3XXX 0x3F
#define SMIAPF0_C3XXX 0x3A028
#define SMIAPF1_C3XXX 0x3A030
#define SMIA0_MASK_C3XXX 0xFFFF
#define SMIA1_MASK_C3XXX 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_C3XXX(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_C3XXX(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_C3XXX __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_C3XXX (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_C3XXX __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_C3XXX (__BIT(2))
/* Mask for VF2PF interrupts */
#define VF2PF1_16_C3XXX (0xFFFF << 9)
#define ERRSOU3_VF2PF_C3XXX(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
#define ERRMSK3_VF2PF_C3XXX(vf_mask) (((vf_mask) & 0xFFFF) << 9)
/* Masks for correctable error interrupts. */
#define ERRMSK0_CERR_C3XXX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK1_CERR_C3XXX (__BIT(8) | __BIT(0))
#define ERRMSK5_CERR_C3XXX (0)
/* Masks for uncorrectable error interrupts. */
#define ERRMSK0_UERR_C3XXX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK1_UERR_C3XXX (__BIT(9) | __BIT(1))
#define ERRMSK3_UERR_C3XXX (__BIT(6) | __BIT(5) | __BIT(4) | __BIT(3) | \
__BIT(2) | __BIT(0))
#define ERRMSK5_UERR_C3XXX (__BIT(16))
/* RI CPP control */
#define RICPPINTCTL_C3XXX (0x3A000 + 0x110)
/*
* BIT(2) enables error detection and reporting on the RI Parity Error.
* BIT(1) enables error detection and reporting on the RI CPP Pull interface.
* BIT(0) enables error detection and reporting on the RI CPP Push interface.
*/
#define RICPP_EN_C3XXX (__BIT(2) | __BIT(1) | __BIT(0))
/* TI CPP control */
#define TICPPINTCTL_C3XXX (0x3A400 + 0x138)
/*
* BIT(3) enables error detection and reporting on the ETR Parity Error.
* BIT(2) enables error detection and reporting on the TI Parity Error.
* BIT(1) enables error detection and reporting on the TI CPP Pull interface.
* BIT(0) enables error detection and reporting on the TI CPP Push interface.
*/
#define TICPP_EN_C3XXX \
(__BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
/* CFC Uncorrectable Errors */
#define CPP_CFC_ERR_CTRL_C3XXX (0x30000 + 0xC00)
/*
* BIT(1) enables interrupt.
* BIT(0) enables detecting and logging of push/pull data errors.
*/
#define CPP_CFC_UE_C3XXX (__BIT(1) | __BIT(0))
#define SLICEPWRDOWN_C3XXX(i) ((i) * 0x4000 + 0x2C)
/* Enabling PKE4-PKE0. */
#define MMP_PWR_UP_MSK_C3XXX \
(__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
/* CPM Uncorrectable Errors */
#define INTMASKSSM_C3XXX(i) ((i) * 0x4000 + 0x0)
/* Disabling interrupts for correctable errors. */
#define INTMASKSSM_UERR_C3XXX \
(__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
/* MMP */
/* BIT(3) enables correction. */
#define CERRSSMMMP_EN_C3XXX (__BIT(3))
/* BIT(3) enables logging. */
#define UERRSSMMMP_EN_C3XXX (__BIT(3))
/* ETR */
#define ETR_MAX_BANKS_C3XXX 16
#define ETR_TX_RX_GAP_C3XXX 8
#define ETR_TX_RINGS_MASK_C3XXX 0xFF
#define ETR_BUNDLE_SIZE_C3XXX 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_C3XXX 0x02000000
#define AE_FW_MOF_NAME_C3XXX "qat_c3xxxfw"
#define AE_FW_MMP_NAME_C3XXX "qat_c3xxx_mmp"
#define AE_FW_UOF_NAME_C3XXX "icp_qat_ae.suof"
/* Clock frequency */
#define CLOCK_PER_SEC_C3XXX (685 * 1000000 / 16)
#endif

View File

@ -1,314 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qat_c62xreg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
static uint32_t
qat_c62x_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C62X) &
ACCEL_MASK_C62X);
}
static uint32_t
qat_c62x_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, me_strap, me_disable, ssms_disabled;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
/* If SSMs are disabled, then disable the corresponding MEs */
ssms_disabled = (~qat_c62x_get_accel_mask(sc)) & ACCEL_MASK_C62X;
me_disable = 0x3;
while (ssms_disabled) {
if (ssms_disabled & 1)
me_strap |= me_disable;
ssms_disabled >>= 1;
me_disable <<= 2;
}
return (~(fusectl | me_strap)) & AE_MASK_C62X;
}
static enum qat_sku
qat_c62x_get_sku(struct qat_softc *sc)
{
switch (sc->sc_ae_num) {
case 8:
return QAT_SKU_2;
case MAX_AE_C62X:
return QAT_SKU_4;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_c62x_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse, strap;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C62X)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C62X)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
return cap;
}
static const char *
qat_c62x_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_C62X;
}
static void
qat_c62x_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_C62X, SMIA0_MASK_C62X);
qat_misc_write_4(sc, SMIAPF1_C62X, SMIA1_MASK_C62X);
}
/* Worker thread to service arbiter mappings */
static uint32_t thrd_to_arb_map[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
static void
qat_c62x_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
{
int i;
for (i = 1; i < MAX_AE_C62X; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
thrd_to_arb_map[i] = 0;
}
*arb_map_config = thrd_to_arb_map;
}
static void
qat_c62x_enable_error_interrupts(struct qat_softc *sc)
{
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C62X); /* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C62X); /* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_C62X); /* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C62X); /* SSM2-SSM4 */
/* Reset everything except VFtoPF1_16. */
qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C62X);
/* Disable Secure RAM correctable error interrupt */
qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_C62X);
/* RI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, RICPPINTCTL_C62X, RICPP_EN_C62X);
/* TI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, TICPPINTCTL_C62X, TICPP_EN_C62X);
/* Enable CFC Error interrupts and logging. */
qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C62X, CPP_CFC_UE_C62X);
/* Enable SecureRAM to fix and log Correctable errors */
qat_misc_write_4(sc, SECRAMCERR_C62X, SECRAM_CERR_C62X);
/* Enable SecureRAM Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_C62X);
/* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_C62X);
}
static void
qat_c62x_disable_error_interrupts(struct qat_softc *sc)
{
/* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C62X | ERRMSK0_CERR_C62X);
/* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C62X | ERRMSK1_CERR_C62X);
/* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C62X | ERRMSK3_CERR_C62X);
/* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_C62X | ERRMSK4_CERR_C62X);
/* SSM2-SSM4 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C62X | ERRMSK5_CERR_C62X);
}
static void
qat_c62x_enable_error_correction(struct qat_softc *sc)
{
u_int i, mask;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C62X(i),
ENABLE_AE_ECC_ERR_C62X);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C62X(i),
ENABLE_AE_ECC_PARITY_CORR_C62X);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C62X);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C62X);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C62X);
}
qat_c62x_enable_error_interrupts(sc);
}
const struct qat_hw qat_hw_c62x = {
.qhw_sram_bar_id = BAR_SRAM_ID_C62X,
.qhw_misc_bar_id = BAR_PMISC_ID_C62X,
.qhw_etr_bar_id = BAR_ETR_ID_C62X,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C62X,
.qhw_ae_offset = AE_OFFSET_C62X,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_C62X,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C62X,
.qhw_num_banks = ETR_MAX_BANKS_C62X,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_C62X,
.qhw_num_engines = MAX_AE_C62X,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_C62X,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C62X,
.qhw_clock_per_sec = CLOCK_PER_SEC_C62X,
.qhw_fw_auth = true,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_C62X,
.qhw_mmp_fwname = AE_FW_MMP_NAME_C62X,
.qhw_prod_type = AE_FW_PROD_TYPE_C62X,
.qhw_get_accel_mask = qat_c62x_get_accel_mask,
.qhw_get_ae_mask = qat_c62x_get_ae_mask,
.qhw_get_sku = qat_c62x_get_sku,
.qhw_get_accel_cap = qat_c62x_get_accel_cap,
.qhw_get_fw_uof_name = qat_c62x_get_fw_uof_name,
.qhw_enable_intr = qat_c62x_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_c62x_get_arb_mapping,
.qhw_enable_error_correction = qat_c62x_enable_error_correction,
.qhw_disable_error_interrupts = qat_c62x_disable_error_interrupts,
.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

View File

@ -1,201 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_c62xreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_C62XREG_H_
#define _DEV_PCI_QAT_C62XREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_C62X 5
#define MAX_AE_C62X 10
/* PCIe BAR index */
#define BAR_SRAM_ID_C62X 0
#define BAR_PMISC_ID_C62X 1
#define BAR_ETR_ID_C62X 2
/* BAR PMISC sub-regions */
#define AE_OFFSET_C62X 0x20000
#define AE_LOCAL_OFFSET_C62X 0x20800
#define CAP_GLOBAL_OFFSET_C62X 0x30000
#define SOFTSTRAP_REG_C62X 0x2EC
#define SOFTSTRAP_SS_POWERGATE_CY_C62X __BIT(23)
#define SOFTSTRAP_SS_POWERGATE_PKE_C62X __BIT(24)
#define ACCEL_REG_OFFSET_C62X 16
#define ACCEL_MASK_C62X 0x1F
#define AE_MASK_C62X 0x3FF
#define SMIAPF0_C62X 0x3A028
#define SMIAPF1_C62X 0x3A030
#define SMIA0_MASK_C62X 0xFFFF
#define SMIA1_MASK_C62X 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_C62X(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_C62X(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_C62X __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_C62X (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_C62X __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_C62X (__BIT(2))
/* Mask for VF2PF interrupts */
#define VF2PF1_16_C62X (0xFFFF << 9)
#define ERRSOU3_VF2PF_C62X(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
#define ERRMSK3_VF2PF_C62X(vf_mask) (((vf_mask) & 0xFFFF) << 9)
/* Masks for correctable error interrupts. */
#define ERRMSK0_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK1_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK3_CERR_C62X (__BIT(7))
#define ERRMSK4_CERR_C62X (__BIT(8) | __BIT(0))
#define ERRMSK5_CERR_C62X (0)
/* Masks for uncorrectable error interrupts. */
#define ERRMSK0_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK1_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK3_UERR_C62X (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \
__BIT(3) | __BIT(2) | __BIT(0))
#define ERRMSK4_UERR_C62X (__BIT(9) | __BIT(1))
#define ERRMSK5_UERR_C62X (__BIT(18) | __BIT(17) | __BIT(16))
/* RI CPP control */
#define RICPPINTCTL_C62X (0x3A000 + 0x110)
/*
* BIT(2) enables error detection and reporting on the RI Parity Error.
* BIT(1) enables error detection and reporting on the RI CPP Pull interface.
* BIT(0) enables error detection and reporting on the RI CPP Push interface.
*/
#define RICPP_EN_C62X (__BIT(2) | __BIT(1) | __BIT(0))
/* TI CPP control */
#define TICPPINTCTL_C62X (0x3A400 + 0x138)
/*
* BIT(3) enables error detection and reporting on the ETR Parity Error.
* BIT(2) enables error detection and reporting on the TI Parity Error.
* BIT(1) enables error detection and reporting on the TI CPP Pull interface.
* BIT(0) enables error detection and reporting on the TI CPP Push interface.
*/
#define TICPP_EN_C62X \
(__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
/* CFC Uncorrectable Errors */
#define CPP_CFC_ERR_CTRL_C62X (0x30000 + 0xC00)
/*
* BIT(1) enables interrupt.
* BIT(0) enables detecting and logging of push/pull data errors.
*/
#define CPP_CFC_UE_C62X (__BIT(1) | __BIT(0))
/* Correctable SecureRAM Error Reg */
#define SECRAMCERR_C62X (0x3AC00 + 0x00)
/* BIT(3) enables fixing and logging of correctable errors. */
#define SECRAM_CERR_C62X (__BIT(3))
/* Uncorrectable SecureRAM Error Reg */
/*
* BIT(17) enables interrupt.
* BIT(3) enables detecting and logging of uncorrectable errors.
*/
#define SECRAM_UERR_C62X (__BIT(17) | __BIT(3))
/* Miscellaneous Memory Target Errors Register */
/*
* BIT(3) enables detecting and logging push/pull data errors.
* BIT(2) enables interrupt.
*/
#define TGT_UERR_C62X (__BIT(3) | __BIT(2))
#define SLICEPWRDOWN_C62X(i) ((i) * 0x4000 + 0x2C)
/* Enabling PKE4-PKE0. */
#define MMP_PWR_UP_MSK_C62X \
(__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
/* CPM Uncorrectable Errors */
#define INTMASKSSM_C62X(i) ((i) * 0x4000 + 0x0)
/* Disabling interrupts for correctable errors. */
#define INTMASKSSM_UERR_C62X \
(__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
/* MMP */
/* BIT(3) enables correction. */
#define CERRSSMMMP_EN_C62X (__BIT(3))
/* BIT(3) enables logging. */
#define UERRSSMMMP_EN_C62X (__BIT(3))
/* ETR */
#define ETR_MAX_BANKS_C62X 16
#define ETR_TX_RX_GAP_C62X 8
#define ETR_TX_RINGS_MASK_C62X 0xFF
#define ETR_BUNDLE_SIZE_C62X 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_C62X 0x01000000
#define AE_FW_MOF_NAME_C62X "qat_c62xfw"
#define AE_FW_MMP_NAME_C62X "qat_c62x_mmp"
#define AE_FW_UOF_NAME_C62X "icp_qat_ae.suof"
/* Clock frequency */
#define CLOCK_PER_SEC_C62X (685 * 1000000 / 16)
#endif

View File

@ -1,314 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qat_d15xxreg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
static uint32_t
qat_d15xx_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_D15XX) &
ACCEL_MASK_D15XX);
}
static uint32_t
qat_d15xx_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, me_strap, me_disable, ssms_disabled;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
/* If SSMs are disabled, then disable the corresponding MEs */
ssms_disabled = (~qat_d15xx_get_accel_mask(sc)) & ACCEL_MASK_D15XX;
me_disable = 0x3;
while (ssms_disabled) {
if (ssms_disabled & 1)
me_strap |= me_disable;
ssms_disabled >>= 1;
me_disable <<= 2;
}
return (~(fusectl | me_strap)) & AE_MASK_D15XX;
}
static enum qat_sku
qat_d15xx_get_sku(struct qat_softc *sc)
{
switch (sc->sc_ae_num) {
case 8:
return QAT_SKU_2;
case MAX_AE_D15XX:
return QAT_SKU_4;
}
return QAT_SKU_UNKNOWN;
}
static uint32_t
qat_d15xx_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse, strap;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_D15XX)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_D15XX)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
return cap;
}
static const char *
qat_d15xx_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_D15XX;
}
static void
qat_d15xx_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_D15XX, SMIA0_MASK_D15XX);
qat_misc_write_4(sc, SMIAPF1_D15XX, SMIA1_MASK_D15XX);
}
/* Worker thread to service arbiter mappings */
static uint32_t thrd_to_arb_map[] = {
0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
};
static void
qat_d15xx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
{
int i;
for (i = 1; i < MAX_AE_D15XX; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
thrd_to_arb_map[i] = 0;
}
*arb_map_config = thrd_to_arb_map;
}
static void
qat_d15xx_enable_error_interrupts(struct qat_softc *sc)
{
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_D15XX); /* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_D15XX); /* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_D15XX); /* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_D15XX); /* SSM2-SSM4 */
/* Reset everything except VFtoPF1_16. */
qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_D15XX);
/* Disable Secure RAM correctable error interrupt */
qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_D15XX);
/* RI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, RICPPINTCTL_D15XX, RICPP_EN_D15XX);
/* TI CPP bus interface error detection and reporting. */
qat_misc_write_4(sc, TICPPINTCTL_D15XX, TICPP_EN_D15XX);
/* Enable CFC Error interrupts and logging. */
qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_D15XX, CPP_CFC_UE_D15XX);
/* Enable SecureRAM to fix and log Correctable errors */
qat_misc_write_4(sc, SECRAMCERR_D15XX, SECRAM_CERR_D15XX);
/* Enable SecureRAM Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_D15XX);
/* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_D15XX);
}
static void
qat_d15xx_disable_error_interrupts(struct qat_softc *sc)
{
/* ME0-ME3 */
qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_D15XX | ERRMSK0_CERR_D15XX);
/* ME4-ME7 */
qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_D15XX | ERRMSK1_CERR_D15XX);
/* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_D15XX | ERRMSK3_CERR_D15XX);
/* ME8-ME9 */
qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_D15XX | ERRMSK4_CERR_D15XX);
/* SSM2-SSM4 */
qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_D15XX | ERRMSK5_CERR_D15XX);
}
static void
qat_d15xx_enable_error_correction(struct qat_softc *sc)
{
u_int i, mask;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_D15XX(i),
ENABLE_AE_ECC_ERR_D15XX);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_D15XX(i),
ENABLE_AE_ECC_PARITY_CORR_D15XX);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_D15XX);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_D15XX);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_D15XX);
}
qat_d15xx_enable_error_interrupts(sc);
}
const struct qat_hw qat_hw_d15xx = {
.qhw_sram_bar_id = BAR_SRAM_ID_D15XX,
.qhw_misc_bar_id = BAR_PMISC_ID_D15XX,
.qhw_etr_bar_id = BAR_ETR_ID_D15XX,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_D15XX,
.qhw_ae_offset = AE_OFFSET_D15XX,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_D15XX,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_D15XX,
.qhw_num_banks = ETR_MAX_BANKS_D15XX,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_D15XX,
.qhw_num_engines = MAX_AE_D15XX,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_D15XX,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_D15XX,
.qhw_clock_per_sec = CLOCK_PER_SEC_D15XX,
.qhw_fw_auth = true,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_D15XX,
.qhw_mmp_fwname = AE_FW_MMP_NAME_D15XX,
.qhw_prod_type = AE_FW_PROD_TYPE_D15XX,
.qhw_get_accel_mask = qat_d15xx_get_accel_mask,
.qhw_get_ae_mask = qat_d15xx_get_ae_mask,
.qhw_get_sku = qat_d15xx_get_sku,
.qhw_get_accel_cap = qat_d15xx_get_accel_cap,
.qhw_get_fw_uof_name = qat_d15xx_get_fw_uof_name,
.qhw_enable_intr = qat_d15xx_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_d15xx_get_arb_mapping,
.qhw_enable_error_correction = qat_d15xx_enable_error_correction,
.qhw_disable_error_interrupts = qat_d15xx_disable_error_interrupts,
.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

View File

@ -1,201 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_d15xxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_D15XXREG_H_
#define _DEV_PCI_QAT_D15XXREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_D15XX 5
#define MAX_AE_D15XX 10
/* PCIe BAR index */
#define BAR_SRAM_ID_D15XX 0
#define BAR_PMISC_ID_D15XX 1
#define BAR_ETR_ID_D15XX 2
/* BAR PMISC sub-regions */
#define AE_OFFSET_D15XX 0x20000
#define AE_LOCAL_OFFSET_D15XX 0x20800
#define CAP_GLOBAL_OFFSET_D15XX 0x30000
#define SOFTSTRAP_REG_D15XX 0x2EC
#define SOFTSTRAP_SS_POWERGATE_CY_D15XX __BIT(23)
#define SOFTSTRAP_SS_POWERGATE_PKE_D15XX __BIT(24)
#define ACCEL_REG_OFFSET_D15XX 16
#define ACCEL_MASK_D15XX 0x1F
#define AE_MASK_D15XX 0x3FF
#define SMIAPF0_D15XX 0x3A028
#define SMIAPF1_D15XX 0x3A030
#define SMIA0_MASK_D15XX 0xFFFF
#define SMIA1_MASK_D15XX 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_D15XX(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_D15XX(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_D15XX __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_D15XX (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_D15XX __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_D15XX (__BIT(2))
/* Mask for VF2PF interrupts */
#define VF2PF1_16_D15XX (0xFFFF << 9)
#define ERRSOU3_VF2PF_D15XX(errsou3) (((errsou3) & 0x01FFFE00) >> 9)
#define ERRMSK3_VF2PF_D15XX(vf_mask) (((vf_mask) & 0xFFFF) << 9)
/* Masks for correctable error interrupts. */
#define ERRMSK0_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK1_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0))
#define ERRMSK3_CERR_D15XX (__BIT(7))
#define ERRMSK4_CERR_D15XX (__BIT(8) | __BIT(0))
#define ERRMSK5_CERR_D15XX (0)
/* Masks for uncorrectable error interrupts. */
#define ERRMSK0_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK1_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1))
#define ERRMSK3_UERR_D15XX (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \
__BIT(3) | __BIT(2) | __BIT(0))
#define ERRMSK4_UERR_D15XX (__BIT(9) | __BIT(1))
#define ERRMSK5_UERR_D15XX (__BIT(18) | __BIT(17) | __BIT(16))
/* RI CPP control */
#define RICPPINTCTL_D15XX (0x3A000 + 0x110)
/*
* BIT(2) enables error detection and reporting on the RI Parity Error.
* BIT(1) enables error detection and reporting on the RI CPP Pull interface.
* BIT(0) enables error detection and reporting on the RI CPP Push interface.
*/
#define RICPP_EN_D15XX (__BIT(2) | __BIT(1) | __BIT(0))
/* TI CPP control */
#define TICPPINTCTL_D15XX (0x3A400 + 0x138)
/*
* BIT(3) enables error detection and reporting on the ETR Parity Error.
* BIT(2) enables error detection and reporting on the TI Parity Error.
* BIT(1) enables error detection and reporting on the TI CPP Pull interface.
* BIT(0) enables error detection and reporting on the TI CPP Push interface.
*/
#define TICPP_EN_D15XX \
(__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0))
/* CFC Uncorrectable Errors */
#define CPP_CFC_ERR_CTRL_D15XX (0x30000 + 0xC00)
/*
* BIT(1) enables interrupt.
* BIT(0) enables detecting and logging of push/pull data errors.
*/
#define CPP_CFC_UE_D15XX (__BIT(1) | __BIT(0))
/* Correctable SecureRAM Error Reg */
#define SECRAMCERR_D15XX (0x3AC00 + 0x00)
/* BIT(3) enables fixing and logging of correctable errors. */
#define SECRAM_CERR_D15XX (__BIT(3))
/* Uncorrectable SecureRAM Error Reg */
/*
* BIT(17) enables interrupt.
* BIT(3) enables detecting and logging of uncorrectable errors.
*/
#define SECRAM_UERR_D15XX (__BIT(17) | __BIT(3))
/* Miscellaneous Memory Target Errors Register */
/*
* BIT(3) enables detecting and logging push/pull data errors.
* BIT(2) enables interrupt.
*/
#define TGT_UERR_D15XX (__BIT(3) | __BIT(2))
#define SLICEPWRDOWN_D15XX(i) ((i) * 0x4000 + 0x2C)
/* Enabling PKE4-PKE0. */
#define MMP_PWR_UP_MSK_D15XX \
(__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16))
/* CPM Uncorrectable Errors */
#define INTMASKSSM_D15XX(i) ((i) * 0x4000 + 0x0)
/* Disabling interrupts for correctable errors. */
#define INTMASKSSM_UERR_D15XX \
(__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1))
/* MMP */
/* BIT(3) enables correction. */
#define CERRSSMMMP_EN_D15XX (__BIT(3))
/* BIT(3) enables logging. */
#define UERRSSMMMP_EN_D15XX (__BIT(3))
/* ETR */
#define ETR_MAX_BANKS_D15XX 16
#define ETR_TX_RX_GAP_D15XX 8
#define ETR_TX_RINGS_MASK_D15XX 0xFF
#define ETR_BUNDLE_SIZE_D15XX 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_D15XX 0x01000000
#define AE_FW_MOF_NAME_D15XX "qat_d15xxfw"
#define AE_FW_MMP_NAME_D15XX "qat_d15xx_mmp"
#define AE_FW_UOF_NAME_D15XX "icp_qat_ae.suof"
/* Clock frequency */
#define CLOCK_PER_SEC_D15XX (685 * 1000000 / 16)
#endif

View File

@ -1,271 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause */
/*
* Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 - 2020 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qatvar.h"
#include "qat_hw17reg.h"
#include "qat_hw17var.h"
#include "qat_dh895xccreg.h"
static uint32_t
qat_dh895xcc_get_accel_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4);
return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_DH895XCC) &
ACCEL_MASK_DH895XCC);
}
static uint32_t
qat_dh895xcc_get_ae_mask(struct qat_softc *sc)
{
uint32_t fusectl, strap;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4);
return (~(fusectl | strap)) & AE_MASK_DH895XCC;
}
static enum qat_sku
qat_dh895xcc_get_sku(struct qat_softc *sc)
{
uint32_t fusectl, sku;
fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4);
sku = (fusectl & FUSECTL_SKU_MASK_DH895XCC) >>
FUSECTL_SKU_SHIFT_DH895XCC;
switch (sku) {
case FUSECTL_SKU_1_DH895XCC:
return QAT_SKU_1;
case FUSECTL_SKU_2_DH895XCC:
return QAT_SKU_2;
case FUSECTL_SKU_3_DH895XCC:
return QAT_SKU_3;
case FUSECTL_SKU_4_DH895XCC:
return QAT_SKU_4;
default:
return QAT_SKU_UNKNOWN;
}
}
static uint32_t
qat_dh895xcc_get_accel_cap(struct qat_softc *sc)
{
uint32_t cap, legfuse;
legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4);
cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
QAT_ACCEL_CAP_CIPHER +
QAT_ACCEL_CAP_AUTHENTICATION +
QAT_ACCEL_CAP_COMPRESSION +
QAT_ACCEL_CAP_ZUC +
QAT_ACCEL_CAP_SHA3;
if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
cap &= ~QAT_ACCEL_CAP_CIPHER;
}
if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
cap &= ~QAT_ACCEL_CAP_COMPRESSION;
if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
cap &= ~QAT_ACCEL_CAP_ZUC;
return cap;
}
static const char *
qat_dh895xcc_get_fw_uof_name(struct qat_softc *sc)
{
return AE_FW_UOF_NAME_DH895XCC;
}
static void
qat_dh895xcc_enable_intr(struct qat_softc *sc)
{
/* Enable bundle and misc interrupts */
qat_misc_write_4(sc, SMIAPF0_DH895XCC, SMIA0_MASK_DH895XCC);
qat_misc_write_4(sc, SMIAPF1_DH895XCC, SMIA1_MASK_DH895XCC);
}
/* Worker thread to service arbiter mappings based on dev SKUs */
static uint32_t thrd_to_arb_map_sku4[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static uint32_t thrd_to_arb_map_sku6[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
};
static void
qat_dh895xcc_get_arb_mapping(struct qat_softc *sc,
const uint32_t **arb_map_config)
{
uint32_t *map, sku;
int i;
sku = qat_dh895xcc_get_sku(sc);
switch (sku) {
case QAT_SKU_1:
map = thrd_to_arb_map_sku4;
break;
case QAT_SKU_2:
case QAT_SKU_4:
map = thrd_to_arb_map_sku6;
break;
default:
*arb_map_config = NULL;
return;
}
for (i = 1; i < MAX_AE_DH895XCC; i++) {
if ((~sc->sc_ae_mask) & (1 << i))
map[i] = 0;
}
*arb_map_config = map;
}
static void
qat_dh895xcc_enable_error_correction(struct qat_softc *sc)
{
uint32_t mask;
u_int i;
/* Enable Accel Engine error detection & correction */
for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_DH895XCC(i),
ENABLE_AE_ECC_ERR_DH895XCC);
qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_DH895XCC(i),
ENABLE_AE_ECC_PARITY_CORR_DH895XCC);
}
/* Enable shared memory error detection & correction */
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_DH895XCC);
qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_DH895XCC);
qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_DH895XCC);
}
}
const struct qat_hw qat_hw_dh895xcc = {
.qhw_sram_bar_id = BAR_SRAM_ID_DH895XCC,
.qhw_misc_bar_id = BAR_PMISC_ID_DH895XCC,
.qhw_etr_bar_id = BAR_ETR_ID_DH895XCC,
.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_DH895XCC,
.qhw_ae_offset = AE_OFFSET_DH895XCC,
.qhw_ae_local_offset = AE_LOCAL_OFFSET_DH895XCC,
.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_DH895XCC,
.qhw_num_banks = ETR_MAX_BANKS_DH895XCC,
.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
.qhw_num_accel = MAX_ACCEL_DH895XCC,
.qhw_num_engines = MAX_AE_DH895XCC,
.qhw_tx_rx_gap = ETR_TX_RX_GAP_DH895XCC,
.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_DH895XCC,
.qhw_clock_per_sec = CLOCK_PER_SEC_DH895XCC,
.qhw_fw_auth = false,
.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
.qhw_ring_asym_tx = 0,
.qhw_ring_asym_rx = 8,
.qhw_ring_sym_tx = 2,
.qhw_ring_sym_rx = 10,
.qhw_mof_fwname = AE_FW_MOF_NAME_DH895XCC,
.qhw_mmp_fwname = AE_FW_MMP_NAME_DH895XCC,
.qhw_prod_type = AE_FW_PROD_TYPE_DH895XCC,
.qhw_get_accel_mask = qat_dh895xcc_get_accel_mask,
.qhw_get_ae_mask = qat_dh895xcc_get_ae_mask,
.qhw_get_sku = qat_dh895xcc_get_sku,
.qhw_get_accel_cap = qat_dh895xcc_get_accel_cap,
.qhw_get_fw_uof_name = qat_dh895xcc_get_fw_uof_name,
.qhw_enable_intr = qat_dh895xcc_enable_intr,
.qhw_init_admin_comms = qat_adm_mailbox_init,
.qhw_send_admin_init = qat_adm_mailbox_send_init,
.qhw_init_arb = qat_arb_init,
.qhw_get_arb_mapping = qat_dh895xcc_get_arb_mapping,
.qhw_enable_error_correction = qat_dh895xcc_enable_error_correction,
.qhw_check_slice_hang = qat_check_slice_hang,
.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
};

View File

@ -1,119 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014-2020 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_DH895XCCREG_H_
#define _DEV_PCI_QAT_DH895XCCREG_H_
/* Max number of accelerators and engines */
#define MAX_ACCEL_DH895XCC 6
#define MAX_AE_DH895XCC 12
/* PCIe BAR index */
#define BAR_SRAM_ID_DH895XCC 0
#define BAR_PMISC_ID_DH895XCC 1
#define BAR_ETR_ID_DH895XCC 2
/* BAR PMISC sub-regions */
#define AE_OFFSET_DH895XCC 0x20000
#define AE_LOCAL_OFFSET_DH895XCC 0x20800
#define CAP_GLOBAL_OFFSET_DH895XCC 0x30000
#define SOFTSTRAP_REG_DH895XCC 0x2EC
#define FUSECTL_SKU_MASK_DH895XCC 0x300000
#define FUSECTL_SKU_SHIFT_DH895XCC 20
#define FUSECTL_SKU_1_DH895XCC 0
#define FUSECTL_SKU_2_DH895XCC 1
#define FUSECTL_SKU_3_DH895XCC 2
#define FUSECTL_SKU_4_DH895XCC 3
#define ACCEL_REG_OFFSET_DH895XCC 13
#define ACCEL_MASK_DH895XCC 0x3F
#define AE_MASK_DH895XCC 0xFFF
#define SMIAPF0_DH895XCC 0x3A028
#define SMIAPF1_DH895XCC 0x3A030
#define SMIA0_MASK_DH895XCC 0xFFFFFFFF
#define SMIA1_MASK_DH895XCC 0x1
/* Error detection and correction */
#define AE_CTX_ENABLES_DH895XCC(i) ((i) * 0x1000 + 0x20818)
#define AE_MISC_CONTROL_DH895XCC(i) ((i) * 0x1000 + 0x20960)
#define ENABLE_AE_ECC_ERR_DH895XCC __BIT(28)
#define ENABLE_AE_ECC_PARITY_CORR_DH895XCC (__BIT(24) | __BIT(12))
#define ERRSSMSH_EN_DH895XCC __BIT(3)
/* BIT(2) enables the logging of push/pull data errors. */
#define PPERR_EN_DH895XCC (__BIT(2))
/* ETR */
#define ETR_MAX_BANKS_DH895XCC 32
#define ETR_TX_RX_GAP_DH895XCC 8
#define ETR_TX_RINGS_MASK_DH895XCC 0xFF
#define ETR_BUNDLE_SIZE_DH895XCC 0x1000
/* AE firmware */
#define AE_FW_PROD_TYPE_DH895XCC 0x00400000
#define AE_FW_MOF_NAME_DH895XCC "qat_dh895xccfw"
#define AE_FW_MMP_NAME_DH895XCC "qat_895xcc_mmp"
#define AE_FW_UOF_NAME_DH895XCC "icp_qat_ae.uof"
/* Clock frequency */
#define CLOCK_PER_SEC_DH895XCC (685 * 1000000 / 16)
#endif

View File

@ -1,674 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if 0
__KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
#endif
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/proc.h>
#include <machine/bus.h>
#include <opencrypto/xform.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "qatreg.h"
#include "qat_hw17reg.h"
#include "qatvar.h"
#include "qat_hw17var.h"
int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
void *, void *);
int qat_adm_mailbox_send(struct qat_softc *,
struct fw_init_admin_req *, struct fw_init_admin_resp *);
int qat_adm_mailbox_send_init_me(struct qat_softc *);
int qat_adm_mailbox_send_hb_timer(struct qat_softc *);
int qat_adm_mailbox_send_fw_status(struct qat_softc *);
int qat_adm_mailbox_send_constants(struct qat_softc *);
int
qat_adm_mailbox_init(struct qat_softc *sc)
{
uint64_t addr;
int error;
struct qat_dmamem *qdm;
error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 1,
PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
error = qat_alloc_dmamem(sc, qdm, 1, PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
memcpy(qdm->qdm_dma_vaddr,
mailbox_const_tab, sizeof(mailbox_const_tab));
bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
BUS_DMASYNC_PREWRITE);
error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 1,
PAGE_SIZE, PAGE_SIZE);
if (error)
return error;
addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
qat_misc_write_4(sc, ADMINMSGLR, addr);
return 0;
}
int
qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
void *in, void *out)
{
struct qat_dmamem *qdm;
uint32_t mailbox;
bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
int offset = ae * ADMINMSG_LEN * 2;
int times, received;
uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
mailbox = qat_misc_read_4(sc, mb_offset);
if (mailbox == 1)
return EAGAIN;
qdm = &sc->sc_admin_comms.qadc_dma;
memcpy(buf, in, ADMINMSG_LEN);
bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
qat_misc_write_4(sc, mb_offset, 1);
received = 0;
for (times = 0; times < 50; times++) {
DELAY(20000);
if (qat_misc_read_4(sc, mb_offset) == 0) {
received = 1;
break;
}
}
if (received) {
bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
} else {
device_printf(sc->sc_dev,
"Failed to send admin msg to accelerator\n");
}
return received ? 0 : EFAULT;
}
int
qat_adm_mailbox_send(struct qat_softc *sc,
struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
{
int error;
uint32_t mask;
uint8_t ae;
for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
if (!(mask & 1))
continue;
error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
if (error)
return error;
if (resp->init_resp_hdr.status) {
device_printf(sc->sc_dev,
"Failed to send admin msg: cmd %d\n",
req->init_admin_cmd_id);
return EFAULT;
}
}
return 0;
}
int
qat_adm_mailbox_send_init_me(struct qat_softc *sc)
{
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_INIT_ME;
return qat_adm_mailbox_send(sc, &req, &resp);
}
int
qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
{
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
req.heartbeat_ticks =
sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
return qat_adm_mailbox_send(sc, &req, &resp);
}
int
qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
{
int error;
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_STATUS_GET;
error = qat_adm_mailbox_send(sc, &req, &resp);
if (error)
return error;
return 0;
}
int
qat_adm_mailbox_send_constants(struct qat_softc *sc)
{
struct fw_init_admin_req req;
struct fw_init_admin_resp resp;
memset(&req, 0, sizeof(req));
req.init_admin_cmd_id = FW_CONSTANTS_CFG;
req.init_cfg_sz = 1024;
req.init_cfg_ptr =
sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
return qat_adm_mailbox_send(sc, &req, &resp);
}
int
qat_adm_mailbox_send_init(struct qat_softc *sc)
{
int error;
error = qat_adm_mailbox_send_init_me(sc);
if (error)
return error;
error = qat_adm_mailbox_send_hb_timer(sc);
if (error)
return error;
error = qat_adm_mailbox_send_fw_status(sc);
if (error)
return error;
return qat_adm_mailbox_send_constants(sc);
}
int
qat_arb_init(struct qat_softc *sc)
{
uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
uint32_t arb, i;
const uint32_t *thd_2_arb_cfg;
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled. */
for (arb = 0; arb < MAX_ARB; arb++)
qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
/* Map worker threads to service arbiters */
sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
if (!thd_2_arb_cfg)
return EINVAL;
for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
return 0;
}
int
qat_set_ssm_wdtimer(struct qat_softc *sc)
{
uint32_t timer;
u_int mask;
int i;
timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
if (!(mask & 1))
continue;
qat_misc_write_4(sc, SSMWDT(i), timer);
qat_misc_write_4(sc, SSMWDTPKE(i), timer);
}
return 0;
}
int
qat_check_slice_hang(struct qat_softc *sc)
{
int handled = 0;
return handled;
}
static uint32_t
qat_hw17_crypto_setup_cipher_ctrl(struct qat_crypto_desc *desc,
struct qat_session *qs, uint32_t cd_blk_offset,
struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
{
struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
(struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
desc->qcd_cipher_offset = cd_blk_offset;
cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
cipher_cd_ctrl->cipher_key_sz = qs->qs_cipher_klen >> 3;
cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
return roundup(sizeof(struct hw_cipher_config) + qs->qs_cipher_klen, 8);
}
static void
qat_hw17_crypto_setup_cipher_cdesc(const struct qat_crypto_desc *desc,
const struct qat_session *qs, const struct cryptop *crp,
union hw_cipher_algo_blk *cipher)
{
const uint8_t *key;
cipher->max.cipher_config.val =
qat_crypto_load_cipher_session(desc, qs);
if (crp != NULL && crp->crp_cipher_key != NULL)
key = crp->crp_cipher_key;
else
key = qs->qs_cipher_key;
memcpy(cipher->max.key, key, qs->qs_cipher_klen);
}
static uint32_t
qat_hw17_crypto_setup_auth_ctrl(struct qat_crypto_desc *desc,
struct qat_session *qs, uint32_t cd_blk_offset,
struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
{
struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
(struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
struct qat_sym_hash_def const *hash_def;
(void)qat_crypto_load_auth_session(desc, qs, &hash_def);
auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
auth_cd_ctrl->final_sz = hash_def->qshd_alg->qshai_sah->hashsize;
auth_cd_ctrl->inner_state1_sz =
roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
auth_cd_ctrl->inner_state2_sz =
roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
auth_cd_ctrl->inner_state2_offset =
auth_cd_ctrl->hash_cfg_offset +
((sizeof(struct hw_auth_setup) +
auth_cd_ctrl->inner_state1_sz) >> 3);
FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
desc->qcd_auth_sz = auth_cd_ctrl->final_sz;
desc->qcd_auth_offset = cd_blk_offset;
desc->qcd_gcm_aad_sz_offset1 =
cd_blk_offset + offsetof(union hw_auth_algo_blk, max.state1) +
auth_cd_ctrl->inner_state1_sz + AES_BLOCK_LEN;
return roundup(auth_cd_ctrl->inner_state1_sz +
auth_cd_ctrl->inner_state2_sz +
sizeof(struct hw_auth_setup), 8);
}
static void
qat_hw17_crypto_setup_auth_cdesc(const struct qat_crypto_desc *desc,
const struct qat_session *qs, const struct cryptop *crp,
union hw_auth_algo_blk *auth)
{
struct qat_sym_hash_def const *hash_def;
uint8_t inner_state1_sz, *state1, *state2;
const uint8_t *key;
auth->max.inner_setup.auth_config.config =
qat_crypto_load_auth_session(desc, qs, &hash_def);
auth->max.inner_setup.auth_counter.counter =
htobe32(hash_def->qshd_qat->qshqi_auth_counter);
inner_state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
state1 = auth->max.state1;
state2 = auth->max.state1 + inner_state1_sz;
switch (qs->qs_auth_algo) {
case HW_AUTH_ALGO_GALOIS_128:
key = NULL;
if (crp != NULL && crp->crp_cipher_key != NULL)
key = crp->crp_cipher_key;
else if (qs->qs_cipher_key != NULL)
key = qs->qs_cipher_key;
if (key != NULL) {
qat_crypto_gmac_precompute(desc, key,
qs->qs_cipher_klen, hash_def, state2);
}
break;
case HW_AUTH_ALGO_SHA1:
case HW_AUTH_ALGO_SHA256:
case HW_AUTH_ALGO_SHA384:
case HW_AUTH_ALGO_SHA512:
switch (qs->qs_auth_mode) {
case HW_AUTH_MODE0:
memcpy(state1, hash_def->qshd_alg->qshai_init_state,
inner_state1_sz);
/* Override for mode 0 hashes. */
auth->max.inner_setup.auth_counter.counter = 0;
break;
case HW_AUTH_MODE1:
if (crp != NULL && crp->crp_auth_key != NULL)
key = crp->crp_auth_key;
else
key = qs->qs_auth_key;
if (key != NULL) {
qat_crypto_hmac_precompute(desc, key,
qs->qs_auth_klen, hash_def, state1, state2);
}
break;
default:
panic("%s: unhandled auth mode %d", __func__,
qs->qs_auth_mode);
}
break;
default:
panic("%s: unhandled auth algorithm %d", __func__,
qs->qs_auth_algo);
}
}
static void
qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
struct fw_la_bulk_req *req)
{
union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
req_hdr->service_cmd_id = desc->qcd_cmd_id;
req_hdr->hdr_flags = FW_COMN_VALID;
req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
req_hdr->serv_specif_flags = 0;
cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
}
void
qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
struct qat_crypto_desc *desc)
{
union hw_cipher_algo_blk *cipher;
union hw_auth_algo_blk *auth;
struct fw_la_bulk_req *req_tmpl;
struct fw_comn_req_hdr *req_hdr;
uint32_t cd_blk_offset = 0;
int i;
uint8_t *cd_blk_ptr;
req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
req_hdr = &req_tmpl->comn_hdr;
cd_blk_ptr = desc->qcd_content_desc;
memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
qat_hw17_init_comn_req_hdr(desc, req_tmpl);
for (i = 0; i < MAX_FW_SLICE; i++) {
switch (desc->qcd_slices[i]) {
case FW_SLICE_CIPHER:
cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
cd_blk_offset);
cd_blk_offset += qat_hw17_crypto_setup_cipher_ctrl(desc,
qs, cd_blk_offset, req_tmpl,
desc->qcd_slices[i + 1]);
qat_hw17_crypto_setup_cipher_cdesc(desc, qs, NULL,
cipher);
break;
case FW_SLICE_AUTH:
auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
cd_blk_offset);
cd_blk_offset += qat_hw17_crypto_setup_auth_ctrl(desc,
qs, cd_blk_offset, req_tmpl,
desc->qcd_slices[i + 1]);
qat_hw17_crypto_setup_auth_cdesc(desc, qs, NULL, auth);
req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
break;
default:
MPASS(0);
break;
}
}
req_tmpl->cd_pars.s.content_desc_params_sz =
roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
req_hdr->serv_specif_flags |=
FW_LA_PROTO_GCM | FW_LA_GCM_IV_LEN_12_OCTETS;
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
}
static void
qat_hw17_crypto_req_setkey(const struct qat_crypto_desc *desc,
const struct qat_session *qs, struct qat_sym_cookie *qsc,
struct fw_la_bulk_req *bulk_req, const struct cryptop *crp)
{
union hw_auth_algo_blk *auth;
union hw_cipher_algo_blk *cipher;
uint8_t *cdesc;
int i;
cdesc = qsc->qsc_content_desc;
memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
for (i = 0; i < MAX_FW_SLICE; i++) {
switch (desc->qcd_slices[i]) {
case FW_SLICE_CIPHER:
cipher = (union hw_cipher_algo_blk *)
(cdesc + desc->qcd_cipher_offset);
qat_hw17_crypto_setup_cipher_cdesc(desc, qs, crp,
cipher);
break;
case FW_SLICE_AUTH:
auth = (union hw_auth_algo_blk *)
(cdesc + desc->qcd_auth_offset);
qat_hw17_crypto_setup_auth_cdesc(desc, qs, crp, auth);
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
break;
default:
MPASS(0);
}
}
bulk_req->cd_pars.s.content_desc_addr = qsc->qsc_content_desc_paddr;
}
void
qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
struct qat_session *qs, const struct qat_crypto_desc *desc,
struct qat_sym_cookie *qsc, struct cryptop *crp)
{
struct qat_sym_bulk_cookie *qsbc;
struct fw_la_bulk_req *bulk_req;
struct fw_la_cipher_req_params *cipher_param;
struct fw_la_auth_req_params *auth_param;
bus_addr_t digest_paddr;
uint32_t aad_sz, *aad_szp;
uint8_t *req_params_ptr;
enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
qsbc = &qsc->qsc_bulk_cookie;
bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
bulk_req->comn_mid.dest_data_addr =
qsc->qsc_obuffer_list_desc_paddr;
} else {
bulk_req->comn_mid.dest_data_addr =
qsc->qsc_buffer_list_desc_paddr;
}
if (__predict_false(crp->crp_cipher_key != NULL ||
crp->crp_auth_key != NULL))
qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
digest_paddr = 0;
if (desc->qcd_auth_sz != 0)
digest_paddr = qsc->qsc_auth_res_paddr;
req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
cipher_param = (struct fw_la_cipher_req_params *)req_params_ptr;
auth_param = (struct fw_la_auth_req_params *)
(req_params_ptr + sizeof(struct fw_la_cipher_req_params));
cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
/*
* The SG list layout is a bit different for GCM and GMAC, it's simpler
* to handle those cases separately.
*/
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
if (cmd_id != FW_LA_CMD_AUTH) {
/*
* Don't fill out the cipher block if we're doing GMAC
* only.
*/
cipher_param->cipher_offset = 0;
cipher_param->cipher_length = crp->crp_payload_length;
}
auth_param->auth_off = 0;
auth_param->auth_len = crp->crp_payload_length;
auth_param->auth_res_addr = digest_paddr;
auth_param->auth_res_sz = desc->qcd_auth_sz;
auth_param->u1.aad_adr =
crp->crp_aad_length > 0 ? qsc->qsc_gcm_aad_paddr : 0;
auth_param->u2.aad_sz =
roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN);
auth_param->hash_state_sz = auth_param->u2.aad_sz >> 3;
/*
* Update the hash state block if necessary. This only occurs
* when the AAD length changes between requests in a session and
* is synchronized by qat_process().
*/
aad_sz = htobe32(crp->crp_aad_length);
aad_szp = (uint32_t *)(
__DECONST(uint8_t *, desc->qcd_content_desc) +
desc->qcd_gcm_aad_sz_offset1);
if (__predict_false(*aad_szp != aad_sz)) {
*aad_szp = aad_sz;
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map,
BUS_DMASYNC_PREWRITE);
}
} else {
if (cmd_id != FW_LA_CMD_AUTH) {
if (crp->crp_aad_length == 0) {
cipher_param->cipher_offset = 0;
} else if (crp->crp_aad == NULL) {
cipher_param->cipher_offset =
crp->crp_payload_start - crp->crp_aad_start;
} else {
cipher_param->cipher_offset =
crp->crp_aad_length;
}
cipher_param->cipher_length = crp->crp_payload_length;
}
if (cmd_id != FW_LA_CMD_CIPHER) {
auth_param->auth_off = 0;
auth_param->auth_len =
crp->crp_payload_length + crp->crp_aad_length;
auth_param->auth_res_addr = digest_paddr;
auth_param->auth_res_sz = desc->qcd_auth_sz;
auth_param->u1.aad_adr = 0;
auth_param->u2.aad_sz = 0;
auth_param->hash_state_sz = 0;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,80 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
/* $NetBSD: qat_hw17var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright(c) 2014 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* $FreeBSD$ */
#ifndef _DEV_PCI_QAT_HW17VAR_H_
#define _DEV_PCI_QAT_HW17VAR_H_
CTASSERT(CONTENT_DESC_MAX_SIZE >=
roundup(sizeof(union hw_cipher_algo_blk), 8) +
roundup(sizeof(union hw_auth_algo_blk), 8));
int qat_adm_mailbox_init(struct qat_softc *);
int qat_adm_mailbox_send_init(struct qat_softc *);
int qat_arb_init(struct qat_softc *);
int qat_set_ssm_wdtimer(struct qat_softc *);
int qat_check_slice_hang(struct qat_softc *);
void qat_hw17_crypto_setup_desc(struct qat_crypto *,
struct qat_session *, struct qat_crypto_desc *);
void qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *,
struct qat_session *, struct qat_crypto_desc const *,
struct qat_sym_cookie *, struct cryptop *);
#endif

View File

@ -92,21 +92,9 @@ __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $");
#include "qat_aevar.h"
extern struct qat_hw qat_hw_c2xxx;
extern struct qat_hw qat_hw_c3xxx;
extern struct qat_hw qat_hw_c62x;
extern struct qat_hw qat_hw_d15xx;
extern struct qat_hw qat_hw_dh895xcc;
#define PCI_VENDOR_INTEL 0x8086
#define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18
#define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2
#define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3
#define PCI_PRODUCT_INTEL_C620_QAT 0x37c8
#define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9
#define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54
#define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55
#define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435
#define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443
static const struct qat_product {
uint16_t qatp_vendor;
@ -118,18 +106,6 @@ static const struct qat_product {
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
"Intel C2000 QuickAssist PF",
QAT_CHIP_C2XXX, &qat_hw_c2xxx },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
"Intel C3000 QuickAssist PF",
QAT_CHIP_C3XXX, &qat_hw_c3xxx },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
"Intel C620/Xeon D-2100 QuickAssist PF",
QAT_CHIP_C62X, &qat_hw_c62x },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
"Intel Xeon D-1500 QuickAssist PF",
QAT_CHIP_D15XX, &qat_hw_d15xx },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT,
"Intel 8950 QuickAssist PCIe Adapter PF",
QAT_CHIP_DH895XCC, &qat_hw_dh895xcc },
{ 0, 0, NULL, 0, NULL },
};
@ -2283,12 +2259,12 @@ static device_method_t qat_methods[] = {
};
static driver_t qat_driver = {
.name = "qat",
.name = "qat_c2xxx",
.methods = qat_methods,
.size = sizeof(struct qat_softc),
};
DRIVER_MODULE(qat, pci, qat_driver, 0, 0);
MODULE_VERSION(qat, 1);
MODULE_DEPEND(qat, crypto, 1, 1, 1);
MODULE_DEPEND(qat, pci, 1, 1, 1);
DRIVER_MODULE(qat_c2xxx, pci, qat_driver, 0, 0);
MODULE_VERSION(qat_c2xxx, 1);
MODULE_DEPEND(qat_c2xxx, crypto, 1, 1, 1);
MODULE_DEPEND(qat_c2xxx, pci, 1, 1, 1);

View File

@ -307,7 +307,7 @@ SUBDIR= \
pty \
puc \
pwm \
${_qat} \
${_qat_c2xxx} \
${_qatfw} \
${_qlxge} \
${_qlxgb} \
@ -681,7 +681,7 @@ _mgb= mgb
_nctgpio= nctgpio
_ntb= ntb
_ocs_fc= ocs_fc
_qat= qat
_qat_c2xxx= qat_c2xxx
_qatfw= qatfw
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_rdma= rdma

View File

@ -1,18 +1,13 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/dev/qat
.PATH: ${SRCTOP}/sys/dev/qat_c2xxx
KMOD= qat
KMOD= qat_c2xxx
SRCS= qat.c \
qat_ae.c \
qat_c2xxx.c \
qat_c3xxx.c \
qat_c62x.c \
qat_d15xx.c \
qat_dh895xcc.c \
qat_hw15.c \
qat_hw17.c
qat_hw15.c
SRCS+= bus_if.h cryptodev_if.h device_if.h pci_if.h

View File

@ -1,9 +1,5 @@
# $FreeBSD$
SUBDIR= qat_c2xxx \
qat_c3xxx \
qat_c62x \
qat_d15xx \
qat_dh895xcc
SUBDIR= qat_c2xxx
.include <bsd.subdir.mk>

View File

@ -1,11 +0,0 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/contrib/dev/qat
KMOD= qat_c3xxxfw
IMG1= qat_c3xxx
IMG2= qat_c3xxx_mmp
FIRMWS= ${IMG1}.bin:${KMOD}:111 ${IMG2}.bin:${IMG2}:111
.include <bsd.kmod.mk>

View File

@ -1,11 +0,0 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/contrib/dev/qat
KMOD= qat_c62xfw
IMG1= qat_c62x
IMG2= qat_c62x_mmp
FIRMWS= ${IMG1}.bin:${KMOD}:111 ${IMG2}.bin:${IMG2}:111
.include <bsd.kmod.mk>

View File

@ -1,11 +0,0 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/contrib/dev/qat
KMOD= qat_d15xxfw
IMG1= qat_d15xx
IMG2= qat_d15xx_mmp
FIRMWS= ${IMG1}.bin:${KMOD}:111 ${IMG2}.bin:${IMG2}:111
.include <bsd.kmod.mk>

View File

@ -1,11 +0,0 @@
# $FreeBSD$
.PATH: ${SRCTOP}/sys/contrib/dev/qat
KMOD= qat_dh895xccfw
IMG1= qat_895xcc
IMG2= qat_895xcc_mmp
FIRMWS= ${IMG1}.bin:${KMOD}:111 ${IMG2}.bin:${IMG2}:111
.include <bsd.kmod.mk>