Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt: "Some more P8 related bits, a bunch of fixes for our P7+/P8 HW crypto drivers, some added workarounds for those radeons that don't do proper 64-bit MSIs and a couple of other trivialities by myself." * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/pseries: Make 32-bit MSI quirk work on systems lacking firmware support powerpc/powernv: Build a zImage.epapr powerpc: Make radeon 32-bit MSI quirk work on powernv powerpc: Context switch more PMU related SPRs powerpc/powernv: Fix condition for when to invalidate the TCE cache powerpc/pci: Fix bogus message at boot about empty memory resources powerpc: Fix TLB cleanup at boot on POWER8 drivers/crypto/nx: Fixes for multiple races and issues
This commit is contained in:
commit
d4c2456a18
|
@ -174,6 +174,8 @@ struct pci_dn {
|
|||
/* Get the pointer to a device_node's pci_dn */
|
||||
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
|
||||
|
||||
extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
|
||||
|
||||
extern void * update_dn_pci_info(struct device_node *dn, void *data);
|
||||
|
||||
static inline int pci_device_from_OF_node(struct device_node *np,
|
||||
|
|
|
@ -284,6 +284,12 @@ struct thread_struct {
|
|||
unsigned long ebbrr;
|
||||
unsigned long ebbhr;
|
||||
unsigned long bescr;
|
||||
unsigned long siar;
|
||||
unsigned long sdar;
|
||||
unsigned long sier;
|
||||
unsigned long mmcr0;
|
||||
unsigned long mmcr2;
|
||||
unsigned long mmcra;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -127,6 +127,12 @@ int main(void)
|
|||
DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
|
||||
DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
|
||||
DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
|
||||
DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
|
||||
DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
|
||||
DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
|
||||
DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
|
||||
DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
|
||||
DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
|
||||
|
|
|
@ -135,8 +135,12 @@ __init_HFSCR:
|
|||
blr
|
||||
|
||||
__init_TLB:
|
||||
/* Clear the TLB */
|
||||
li r6,128
|
||||
/*
|
||||
* Clear the TLB using the "IS 3" form of tlbiel instruction
|
||||
* (invalidate by congruence class). P7 has 128 CCs, P8 has 512
|
||||
* so we just always do 512
|
||||
*/
|
||||
li r6,512
|
||||
mtctr r6
|
||||
li r7,0xc00 /* IS field = 0b11 */
|
||||
ptesync
|
||||
|
|
|
@ -465,6 +465,20 @@ BEGIN_FTR_SECTION
|
|||
std r0, THREAD_EBBHR(r3)
|
||||
mfspr r0, SPRN_EBBRR
|
||||
std r0, THREAD_EBBRR(r3)
|
||||
|
||||
/* PMU registers made user read/(write) by EBB */
|
||||
mfspr r0, SPRN_SIAR
|
||||
std r0, THREAD_SIAR(r3)
|
||||
mfspr r0, SPRN_SDAR
|
||||
std r0, THREAD_SDAR(r3)
|
||||
mfspr r0, SPRN_SIER
|
||||
std r0, THREAD_SIER(r3)
|
||||
mfspr r0, SPRN_MMCR0
|
||||
std r0, THREAD_MMCR0(r3)
|
||||
mfspr r0, SPRN_MMCR2
|
||||
std r0, THREAD_MMCR2(r3)
|
||||
mfspr r0, SPRN_MMCRA
|
||||
std r0, THREAD_MMCRA(r3)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
#endif
|
||||
|
||||
|
@ -560,6 +574,20 @@ BEGIN_FTR_SECTION
|
|||
ld r0, THREAD_EBBRR(r4)
|
||||
mtspr SPRN_EBBRR, r0
|
||||
|
||||
/* PMU registers made user read/(write) by EBB */
|
||||
ld r0, THREAD_SIAR(r4)
|
||||
mtspr SPRN_SIAR, r0
|
||||
ld r0, THREAD_SDAR(r4)
|
||||
mtspr SPRN_SDAR, r0
|
||||
ld r0, THREAD_SIER(r4)
|
||||
mtspr SPRN_SIER, r0
|
||||
ld r0, THREAD_MMCR0(r4)
|
||||
mtspr SPRN_MMCR0, r0
|
||||
ld r0, THREAD_MMCR2(r4)
|
||||
mtspr SPRN_MMCR2, r0
|
||||
ld r0, THREAD_MMCRA(r4)
|
||||
mtspr SPRN_MMCRA, r0
|
||||
|
||||
ld r0,THREAD_TAR(r4)
|
||||
mtspr SPRN_TAR,r0
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
|
|
@ -1520,9 +1520,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
|
|||
for (i = 0; i < 3; ++i) {
|
||||
res = &hose->mem_resources[i];
|
||||
if (!res->flags) {
|
||||
printk(KERN_ERR "PCI: Memory resource 0 not set for "
|
||||
"host bridge %s (domain %d)\n",
|
||||
hose->dn->full_name, hose->global_number);
|
||||
if (i == 0)
|
||||
printk(KERN_ERR "PCI: Memory resource 0 not set for "
|
||||
"host bridge %s (domain %d)\n",
|
||||
hose->dn->full_name, hose->global_number);
|
||||
continue;
|
||||
}
|
||||
offset = hose->mem_offset[i];
|
||||
|
|
|
@ -266,3 +266,13 @@ int pcibus_to_node(struct pci_bus *bus)
|
|||
}
|
||||
EXPORT_SYMBOL(pcibus_to_node);
|
||||
#endif
|
||||
|
||||
static void quirk_radeon_32bit_msi(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dn *pdn = pci_get_pdn(dev);
|
||||
|
||||
if (pdn)
|
||||
pdn->force_32bit_msi = 1;
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
|
||||
|
|
|
@ -32,6 +32,14 @@
|
|||
#include <asm/ppc-pci.h>
|
||||
#include <asm/firmware.h>
|
||||
|
||||
struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
|
||||
{
|
||||
struct device_node *dn = pci_device_to_OF_node(pdev);
|
||||
if (!dn)
|
||||
return NULL;
|
||||
return PCI_DN(dn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Traverse_func that inits the PCI fields of the device node.
|
||||
* NOTE: this *must* be done before read/write config to the device.
|
||||
|
|
|
@ -6,6 +6,7 @@ config PPC_POWERNV
|
|||
select PPC_ICP_NATIVE
|
||||
select PPC_P7_NAP
|
||||
select PPC_PCI_CHOICE if EMBEDDED
|
||||
select EPAPR_BOOT
|
||||
default y
|
||||
|
||||
config POWERNV_MSI
|
||||
|
|
|
@ -68,16 +68,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
|
|||
define_pe_printk_level(pe_warn, KERN_WARNING);
|
||||
define_pe_printk_level(pe_info, KERN_INFO);
|
||||
|
||||
static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
np = pci_device_to_OF_node(dev);
|
||||
if (!np)
|
||||
return NULL;
|
||||
return PCI_DN(np);
|
||||
}
|
||||
|
||||
static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
|
||||
{
|
||||
unsigned long pe;
|
||||
|
@ -110,7 +100,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
|
|||
{
|
||||
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
|
||||
struct pci_dn *pdn = pci_get_pdn(dev);
|
||||
|
||||
if (!pdn)
|
||||
return NULL;
|
||||
|
@ -173,7 +163,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
|
|||
|
||||
/* Add to all parents PELT-V */
|
||||
while (parent) {
|
||||
struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
|
||||
struct pci_dn *pdn = pci_get_pdn(parent);
|
||||
if (pdn && pdn->pe_number != IODA_INVALID_PE) {
|
||||
rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
|
||||
pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
|
||||
|
@ -252,7 +242,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
|
|||
{
|
||||
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
|
||||
struct pci_dn *pdn = pci_get_pdn(dev);
|
||||
struct pnv_ioda_pe *pe;
|
||||
int pe_num;
|
||||
|
||||
|
@ -323,7 +313,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
|
|||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
|
||||
struct pci_dn *pdn = pci_get_pdn(dev);
|
||||
|
||||
if (pdn == NULL) {
|
||||
pr_warn("%s: No device node associated with device !\n",
|
||||
|
@ -436,7 +426,7 @@ static void pnv_pci_ioda_setup_PEs(void)
|
|||
|
||||
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
|
||||
struct pci_dn *pdn = pci_get_pdn(pdev);
|
||||
struct pnv_ioda_pe *pe;
|
||||
|
||||
/*
|
||||
|
@ -768,6 +758,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
|
|||
unsigned int is_64, struct msi_msg *msg)
|
||||
{
|
||||
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
|
||||
struct pci_dn *pdn = pci_get_pdn(dev);
|
||||
struct irq_data *idata;
|
||||
struct irq_chip *ichip;
|
||||
unsigned int xive_num = hwirq - phb->msi_base;
|
||||
|
@ -783,6 +774,10 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
|
|||
if (pe->mve_number < 0)
|
||||
return -ENXIO;
|
||||
|
||||
/* Force 32-bit MSI on some broken devices */
|
||||
if (pdn && pdn->force_32bit_msi)
|
||||
is_64 = 0;
|
||||
|
||||
/* Assign XIVE to PE */
|
||||
rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
|
||||
if (rc) {
|
||||
|
@ -1035,7 +1030,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
|
|||
if (!phb->initialized)
|
||||
return 0;
|
||||
|
||||
pdn = pnv_ioda_get_pdn(dev);
|
||||
pdn = pci_get_pdn(dev);
|
||||
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -47,6 +47,10 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
|
|||
{
|
||||
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
||||
struct pnv_phb *phb = hose->private_data;
|
||||
struct pci_dn *pdn = pci_get_pdn(pdev);
|
||||
|
||||
if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
|
||||
return -ENODEV;
|
||||
|
||||
return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
|
||||
}
|
||||
|
@ -367,7 +371,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
|
|||
while (npages--)
|
||||
*(tcep++) = 0;
|
||||
|
||||
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
|
||||
if (tbl->it_type & TCE_PCI_SWINV_FREE)
|
||||
pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,26 +26,6 @@ static int query_token, change_token;
|
|||
#define RTAS_CHANGE_MSIX_FN 4
|
||||
#define RTAS_CHANGE_32MSI_FN 5
|
||||
|
||||
static struct pci_dn *get_pdn(struct pci_dev *pdev)
|
||||
{
|
||||
struct device_node *dn;
|
||||
struct pci_dn *pdn;
|
||||
|
||||
dn = pci_device_to_OF_node(pdev);
|
||||
if (!dn) {
|
||||
dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pdn = PCI_DN(dn);
|
||||
if (!pdn) {
|
||||
dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pdn;
|
||||
}
|
||||
|
||||
/* RTAS Helpers */
|
||||
|
||||
static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
|
||||
|
@ -91,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev)
|
|||
{
|
||||
struct pci_dn *pdn;
|
||||
|
||||
pdn = get_pdn(pdev);
|
||||
pdn = pci_get_pdn(pdev);
|
||||
if (!pdn)
|
||||
return;
|
||||
|
||||
|
@ -152,7 +132,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
|
|||
struct pci_dn *pdn;
|
||||
const u32 *req_msi;
|
||||
|
||||
pdn = get_pdn(pdev);
|
||||
pdn = pci_get_pdn(pdev);
|
||||
if (!pdn)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -394,6 +374,23 @@ static int check_msix_entries(struct pci_dev *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
|
||||
{
|
||||
u32 addr_hi, addr_lo;
|
||||
|
||||
/*
|
||||
* We should only get in here for IODA1 configs. This is based on the
|
||||
* fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
|
||||
* support, and we are in a PCIe Gen2 slot.
|
||||
*/
|
||||
dev_info(&pdev->dev,
|
||||
"rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
|
||||
pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
|
||||
addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
|
||||
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
|
||||
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
|
||||
}
|
||||
|
||||
static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
|
||||
{
|
||||
struct pci_dn *pdn;
|
||||
|
@ -401,8 +398,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
|
|||
struct msi_desc *entry;
|
||||
struct msi_msg msg;
|
||||
int nvec = nvec_in;
|
||||
int use_32bit_msi_hack = 0;
|
||||
|
||||
pdn = get_pdn(pdev);
|
||||
pdn = pci_get_pdn(pdev);
|
||||
if (!pdn)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -428,15 +426,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
|
|||
*/
|
||||
again:
|
||||
if (type == PCI_CAP_ID_MSI) {
|
||||
if (pdn->force_32bit_msi)
|
||||
if (pdn->force_32bit_msi) {
|
||||
rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
|
||||
else
|
||||
if (rc < 0) {
|
||||
/*
|
||||
* We only want to run the 32 bit MSI hack below if
|
||||
* the max bus speed is Gen2 speed
|
||||
*/
|
||||
if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
|
||||
return rc;
|
||||
|
||||
use_32bit_msi_hack = 1;
|
||||
}
|
||||
} else
|
||||
rc = -1;
|
||||
|
||||
if (rc < 0)
|
||||
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
|
||||
|
||||
if (rc < 0 && !pdn->force_32bit_msi) {
|
||||
if (rc < 0) {
|
||||
pr_debug("rtas_msi: trying the old firmware call.\n");
|
||||
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
|
||||
}
|
||||
|
||||
if (use_32bit_msi_hack && rc > 0)
|
||||
rtas_hack_32bit_msi_gen2(pdev);
|
||||
} else
|
||||
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
|
||||
|
||||
|
@ -518,12 +532,3 @@ static int rtas_msi_init(void)
|
|||
}
|
||||
arch_initcall(rtas_msi_init);
|
||||
|
||||
static void quirk_radeon(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dn *pdn = get_pdn(dev);
|
||||
|
||||
if (pdn)
|
||||
pdn->force_32bit_msi = 1;
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
|
||||
|
|
|
@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
|
|||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = nx_crypto_ctx_aes_cbc_init,
|
||||
.cra_exit = nx_crypto_ctx_exit,
|
||||
|
|
|
@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
|
|||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
|
|
|
@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
|
|||
if (enc)
|
||||
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
|
||||
else
|
||||
nbytes -= AES_BLOCK_SIZE;
|
||||
nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
|
||||
|
||||
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
* 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
*/
|
||||
if (len + sctx->count <= SHA256_BLOCK_SIZE) {
|
||||
if (len + sctx->count < SHA256_BLOCK_SIZE) {
|
||||
memcpy(sctx->buf + sctx->count, data, len);
|
||||
sctx->count += len;
|
||||
goto out;
|
||||
|
@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
|||
atomic_inc(&(nx_ctx->stats->sha256_ops));
|
||||
|
||||
/* copy the leftover back into the state struct */
|
||||
memcpy(sctx->buf, data + len - leftover, leftover);
|
||||
if (leftover)
|
||||
memcpy(sctx->buf, data + len - leftover, leftover);
|
||||
sctx->count = leftover;
|
||||
|
||||
csbcpb->cpb.sha256.message_bit_length += (u64)
|
||||
|
@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
|
|||
struct nx_sg *in_sg, *out_sg;
|
||||
int rc;
|
||||
|
||||
|
||||
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
|
||||
/* we've hit the nx chip previously, now we're finalizing,
|
||||
* so copy over the partial digest */
|
||||
|
@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
|
|||
|
||||
atomic_inc(&(nx_ctx->stats->sha256_ops));
|
||||
|
||||
atomic64_add(csbcpb->cpb.sha256.message_bit_length,
|
||||
atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
|
||||
&(nx_ctx->stats->sha256_bytes));
|
||||
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
|
||||
out:
|
||||
|
|
|
@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
* 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
|
||||
* 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
|
||||
*/
|
||||
if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
|
||||
if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
|
||||
memcpy(sctx->buf + sctx->count[0], data, len);
|
||||
sctx->count[0] += len;
|
||||
goto out;
|
||||
|
@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
|||
atomic_inc(&(nx_ctx->stats->sha512_ops));
|
||||
|
||||
/* copy the leftover back into the state struct */
|
||||
memcpy(sctx->buf, data + len - leftover, leftover);
|
||||
if (leftover)
|
||||
memcpy(sctx->buf, data + len - leftover, leftover);
|
||||
sctx->count[0] = leftover;
|
||||
|
||||
spbc_bits = csbcpb->cpb.sha512.spbc * 8;
|
||||
|
@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
|
|||
goto out;
|
||||
|
||||
atomic_inc(&(nx_ctx->stats->sha512_ops));
|
||||
atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
|
||||
atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
|
||||
&(nx_ctx->stats->sha512_bytes));
|
||||
|
||||
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
|
||||
|
|
|
@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
|
|||
{
|
||||
struct nx_sg *nx_insg = nx_ctx->in_sg;
|
||||
struct nx_sg *nx_outsg = nx_ctx->out_sg;
|
||||
struct blkcipher_walk walk;
|
||||
int rc;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (iv)
|
||||
memcpy(iv, walk.iv, AES_BLOCK_SIZE);
|
||||
memcpy(iv, desc->info, AES_BLOCK_SIZE);
|
||||
|
||||
while (walk.nbytes) {
|
||||
nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
|
||||
walk.nbytes, nx_ctx->ap->sglen);
|
||||
nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
|
||||
walk.nbytes, nx_ctx->ap->sglen);
|
||||
|
||||
rc = blkcipher_walk_done(desc, &walk, 0);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
|
||||
walk.nbytes, nx_ctx->ap->sglen);
|
||||
nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
|
||||
walk.nbytes, nx_ctx->ap->sglen);
|
||||
|
||||
rc = 0;
|
||||
}
|
||||
nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
|
||||
nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
|
||||
|
||||
/* these lengths should be negative, which will indicate to phyp that
|
||||
* the input and output parameters are scatterlists, not linear
|
||||
* buffers */
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
|
||||
out:
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -454,6 +430,8 @@ static int nx_register_algs(void)
|
|||
if (rc)
|
||||
goto out;
|
||||
|
||||
nx_driver.of.status = NX_OKAY;
|
||||
|
||||
rc = crypto_register_alg(&nx_ecb_aes_alg);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -498,8 +476,6 @@ static int nx_register_algs(void)
|
|||
if (rc)
|
||||
goto out_unreg_s512;
|
||||
|
||||
nx_driver.of.status = NX_OKAY;
|
||||
|
||||
goto out;
|
||||
|
||||
out_unreg_s512:
|
||||
|
|
Loading…
Reference in New Issue