[zinc] Add PowerPC accelerated poly1305 from openssl/cryptograms

Unfortunately I am not seeing a speed up with this patch,
but it does decrease CPU usage.

Only (currently) runs on the outbound path, as the in-bound path is in
an interrupt, but that can be fixed in Linux.

v2: - Do not include the FPU version, as +10% performance on POWER8
    (admittedly better on really old CPUs, like old world macs) is not
    worth it, especially when there is a fast VSX version available.
    - Honor CONFIG_VSX.
Signed-off-by: Shawn Landden <shawn@git.icu>
This commit is contained in:
Shawn Landden 2019-05-11 14:19:51 -03:00
parent 2b2e9c08b3
commit cc6513fd7d
7 changed files with 2127 additions and 69 deletions

View File

@ -36,6 +36,8 @@ zinc-$(CONFIG_ZINC_ARCH_ARM64) += poly1305/poly1305-arm64.o
zinc-$(CONFIG_ZINC_ARCH_MIPS) += poly1305/poly1305-mips.o
AFLAGS_poly1305-mips.o += -O2 # This is required to fill the branch delay slots
zinc-$(CONFIG_ZINC_ARCH_MIPS64) += poly1305/poly1305-mips64.o
zinc-$(CONFIG_ZINC_ARCH_PPC32) += poly1305/poly1305-ppc.o
zinc-$(CONFIG_ZINC_ARCH_PPC64) += poly1305/poly1305-ppc.o
zinc-y += chacha20poly1305.o
@ -46,16 +48,16 @@ zinc-y += curve25519/curve25519.o
zinc-$(CONFIG_ZINC_ARCH_ARM) += curve25519/curve25519-arm.o
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $< $(perlflags-y) > $@
cmd_perlasm = $(PERL) $(perlflags-y) $< $(perlargs-y) > $@
$(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perlasm)
kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
targets := $(patsubst $(kbuild-dir)/%.pl,%.S,$(wildcard $(patsubst %.o,$(kbuild-dir)/crypto/zinc/%.pl,$(zinc-y) $(zinc-m) $(zinc-))))
perlflags-$(CONFIG_ZINC_ARCH_PPC32) += linux32
perlargs-$(CONFIG_ZINC_ARCH_PPC32) += linux32
ifeq ($(CONFIG_ZINC_ARCH_PPC64),y)
perlflags-$(CONFIG_CPU_BIG_ENDIAN) += linux64
perlflags-$(CONFIG_CPU_LITTLE_ENDIAN) += linux64le
perlargs-$(CONFIG_CPU_BIG_ENDIAN) += linux64
perlargs-$(CONFIG_CPU_LITTLE_ENDIAN) += linux64le
endif
# Old kernels don't set this, which causes trouble.

View File

@ -6,6 +6,8 @@
# The original headers, including the original license headers, are
# included below for completeness.
#
# Changes: search in more places for ppc-xlate.pl
#
# Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
@ -73,6 +75,7 @@ $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../perlasm/ppc-xlate.pl" and -f $xlate) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
die "can't locate ppc-xlate.pl";

View File

@ -26,71 +26,6 @@ static void __init poly1305_fpu_init(void)
#endif
}
#if defined(CONFIG_ZINC_ARCH_ARM64)
struct poly1305_arch_internal {
union {
u32 h[5];
struct {
u64 h0, h1, h2;
};
};
u64 is_base2_26;
u64 r[2];
};
#elif defined(CONFIG_ZINC_ARCH_ARM)
struct poly1305_arch_internal {
union {
u32 h[5];
struct {
u64 h0, h1;
u32 h2;
} __packed;
};
u32 r[4];
u32 is_base2_26;
};
#endif
/* The NEON code uses base 2^26, while the scalar code uses base 2^64 on 64-bit
* and base 2^32 on 32-bit. If we hit the unfortunate situation of using NEON
* and then having to go back to scalar -- because the user is silly and has
* called the update function from two separate contexts -- then we need to
* convert back to the original base before proceeding. The below function is
* written for 64-bit integers, and so we have to swap words at the end on
* big-endian 32-bit. It is possible to reason that the initial reduction below
* is sufficient given the implementation invariants. However, for an avoidance
* of doubt and because this is not performance critical, we do the full
* reduction anyway.
*/
static void convert_to_base2_64(void *ctx)
{
struct poly1305_arch_internal *state = ctx;
u32 cy;
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !state->is_base2_26)
return;
cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
state->h0 = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
state->h1 = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
state->h2 = state->h[4] >> 24;
if (IS_ENABLED(CONFIG_ZINC_ARCH_ARM) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
state->h0 = rol64(state->h0, 32);
state->h1 = rol64(state->h1, 32);
}
#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
cy = (state->h2 >> 2) + (state->h2 & ~3ULL);
state->h2 &= 3;
state->h0 += cy;
state->h1 += (cy = ULT(state->h0, cy));
state->h2 += ULT(state->h1, cy);
#undef ULT
state->is_base2_26 = 0;
}
static inline bool poly1305_init_arch(void *ctx,
const u8 key[POLY1305_KEY_SIZE])
{

View File

@ -0,0 +1,60 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2019 Shawn Landden <shawn@git.icu>. All Rights Reserved.
*/
#include <asm/cpufeature.h>
asmlinkage void poly1305_init_int(void *ctx, const u8 key[16]);
asmlinkage void poly1305_blocks_int(void *ctx, const u8 *inp, size_t len,
u32 padbit);
asmlinkage void poly1305_emit_int(void *ctx, u8 mac[16],
const u32 nonce[4]);
asmlinkage void poly1305_blocks_vsx(void *ctx, const u8 *inp, size_t len,
u32 padbit);
static bool *const poly1305_nobs[] __initconst = {};
static void __init poly1305_fpu_init(void) {}
static inline bool poly1305_init_arch(void *ctx,
const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_int(ctx, key);
return true;
}
static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp,
size_t len, const u32 padbit,
simd_context_t *simd_context)
{
/* SIMD disables preemption, so relax after processing each page. */
BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
PAGE_SIZE % POLY1305_BLOCK_SIZE);
if (!IS_ENABLED(CONFIG_VSX) ||
!cpu_have_feature(PPC_MODULE_FEATURE_VEC_CRYPTO) ||
!simd_use(simd_context)) {
convert_to_base2_64(ctx);
poly1305_blocks_int(ctx, inp, len, padbit);
return true;
}
for (;;) {
const size_t bytes = min_t(size_t, len, PAGE_SIZE);
poly1305_blocks_vsx(ctx, inp, bytes, padbit);
len -= bytes;
if (!len)
break;
inp += bytes;
simd_relax(simd_context);
}
return true;
}
static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE],
const u32 nonce[4],
simd_context_t *simd_context)
{
poly1305_emit_int(ctx, mac, nonce);
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -16,12 +16,81 @@
#include <linux/module.h>
#include <linux/init.h>
#if defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64) || defined(CONFIG_ZINC_ARCH_PPC32) || defined(CONFIG_ZINC_ARCH_PPC64)
#if defined(CONFIG_ZINC_ARCH_ARM64) || defined(CONFIG_ZINC_ARCH_PPC64)
struct poly1305_arch_internal {
union {
u32 h[5];
struct {
u64 h0, h1, h2;
};
};
u64 is_base2_26;
u64 r[2];
};
#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_PPC32)
struct poly1305_arch_internal {
union {
u32 h[5];
struct {
u64 h0, h1;
u32 h2;
} __packed;
};
u32 r[4];
u32 is_base2_26;
};
#endif
/* The NEON and AVX code uses base 2^26, while the scalar code uses base 2^64 on 64-bit
* and base 2^32 on 32-bit. If we hit the unfortunate situation of using NEON or AVX
* and then having to go back to scalar -- because the user is silly and has
* called the update function from two separate contexts -- then we need to
* convert back to the original base before proceeding. The below function is
* written for 64-bit integers, and so we have to swap words at the end on
* big-endian 32-bit. It is possible to reason that the initial reduction below
* is sufficient given the implementation invariants. However, for an avoidance
* of doubt and because this is not performance critical, we do the full
* reduction anyway.
*/
static void convert_to_base2_64(void *ctx)
{
struct poly1305_arch_internal *state = ctx;
u32 cy;
if (!(IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || IS_ENABLED(CONFIG_AVX)) || !state->is_base2_26)
return;
cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
state->h0 = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
state->h1 = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
state->h2 = state->h[4] >> 24;
if ((IS_ENABLED(CONFIG_ZINC_ARCH_ARM) || IS_ENABLED(CONFIG_ZINC_ARCH_PPC32)) &&
IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
state->h0 = rol64(state->h0, 32);
state->h1 = rol64(state->h1, 32);
}
#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
cy = (state->h2 >> 2) + (state->h2 & ~3ULL);
state->h2 &= 3;
state->h0 += cy;
state->h1 += (cy = ULT(state->h0, cy));
state->h2 += ULT(state->h1, cy);
#undef ULT
state->is_base2_26 = 0;
}
#endif
#if defined(CONFIG_ZINC_ARCH_X86_64)
#include "poly1305-x86_64-glue.c"
#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64)
#include "poly1305-arm-glue.c"
#elif defined(CONFIG_ZINC_ARCH_MIPS) || defined(CONFIG_ZINC_ARCH_MIPS64)
#include "poly1305-mips-glue.c"
#elif defined(CONFIG_ZINC_ARCH_PPC32) || defined(CONFIG_ZINC_ARCH_PPC64)
#include "poly1305-ppc-glue.c"
#else
static inline bool poly1305_init_arch(void *ctx,
const u8 key[POLY1305_KEY_SIZE])