Commit d7d7b853 authored by Jason A. Donenfeld's avatar Jason A. Donenfeld Committed by Herbert Xu
Browse files

crypto: x86/poly1305 - wire up faster implementations for kernel



These x86_64 vectorized implementations support AVX, AVX-2, and AVX512F.
The AVX-512F implementation is disabled on Skylake, due to throttling,
but it is quite fast on >= Cannonlake.

On the left is cycle counts on a Core i7 6700HQ using the AVX-2
codepath, comparing this implementation ("new") to the implementation in
the current crypto api ("old"). On the right are benchmarks on a Xeon
Gold 5120 using the AVX-512 codepath. The new implementation is faster
on all benchmarks.

        AVX-2                  AVX-512
      ---------              -----------

    size    old     new      size   old     new
    ----    ----    ----     ----   ----    ----
    0       70      68       0      74      70
    16      92      90       16     96      92
    32      134     104      32     136     106
    48      172     120      48     184     124
    64      218     136      64     218     138
    80      254     158      80     260     160
    96      298     174      96     300     176
    112     342     192      112    342     194
    128     388     212      128    384     212
    144     428     228      144    420     226
    160     466     246      160    464     248
    176     510     264      176    504     264
    192     550     282      192    544     282
    208     594     302      208    582     300
    224     628     316      224    624     318
    240     676     334      240    662     338
    256     716     354      256    708     358
    272     764     374      272    748     372
    288     802     352      288    788     358
    304     420     366      304    422     370
    320     428     360      320    432     364
    336     484     378      336    486     380
    352     426     384      352    434     390
    368     478     400      368    480     408
    384     488     394      384    490     398
    400     542     408      400    542     412
    416     486     416      416    492     426
    432     534     430      432    538     436
    448     544     422      448    546     432
    464     600     438      464    600     448
    480     540     448      480    548     456
    496     594     464      496    594     476
    512     602     456      512    606     470
    528     656     476      528    656     480
    544     600     480      544    606     498
    560     650     494      560    652     512
    576     664     490      576    662     508
    592     714     508      592    716     522
    608     656     514      608    664     538
    624     708     532      624    710     552
    640     716     524      640    720     516
    656     770     536      656    772     526
    672     716     548      672    722     544
    688     770     562      688    768     556
    704     774     552      704    778     556
    720     826     568      720    832     568
    736     768     574      736    780     584
    752     822     592      752    826     600
    768     830     584      768    836     560
    784     884     602      784    888     572
    800     828     610      800    838     588
    816     884     628      816    884     604
    832     888     618      832    894     598
    848     942     632      848    946     612
    864     884     644      864    896     628
    880     936     660      880    942     644
    896     948     652      896    952     608
    912     1000    664      912    1004    616
    928     942     676      928    954     634
    944     994     690      944    1000    646
    960     1002    680      960    1008    646
    976     1054    694      976    1062    658
    992     1002    706      992    1012    674
    1008    1052    720      1008   1058    690

This commit wires in the prior implementation from Andy, and makes the
following changes to be suitable for kernel land.

  - Some cosmetic and structural changes, like renaming labels to
    .Lname, constants, and other Linux conventions, as well as making
    the code easy for us to maintain moving forward.

  - CPU feature checking is done in C by the glue code.

  - We avoid jumping into the middle of functions, to appease objtool,
    and instead parameterize shared code.

  - We maintain frame pointers so that stack traces make sense.

  - We remove the dependency on the perl xlate code, which transforms
    the output into things that assemblers we don't care about use.

Importantly, none of our changes affect the arithmetic or core code, but
just involve the differing environment of kernel space.

Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: default avatarSamuel Neves <sneves@dei.uc.pt>
Co-developed-by: default avatarSamuel Neves <sneves@dei.uc.pt>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 0896ca2a
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
poly1305-x86_64.S
+9 −2
Original line number Diff line number Diff line
@@ -73,6 +73,10 @@ aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o

nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
blake2s-x86_64-y := blake2s-core.o blake2s-glue.o
poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o
ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),)
targets += poly1305-x86_64-cryptogams.S
endif

ifeq ($(avx_supported),yes)
	camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
@@ -101,10 +105,8 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
ifeq ($(avx2_supported),yes)
sha1-ssse3-y += sha1_avx2_x86_64_asm.o
poly1305-x86_64-y += poly1305-avx2-x86_64.o
endif
ifeq ($(sha1_ni_supported),yes)
sha1-ssse3-y += sha1_ni_asm.o
@@ -118,3 +120,8 @@ sha256-ssse3-y += sha256_ni_asm.o
endif
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o

quiet_cmd_perlasm = PERLASM $@
      cmd_perlasm = $(PERL) $< > $@
$(obj)/%.S: $(src)/%.pl FORCE
	$(call if_changed,perlasm)
+0 −390
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
 *
 * Copyright (C) 2015 Martin Willi
 */

#include <linux/linkage.h>

.section	.rodata.cst32.ANMASK, "aM", @progbits, 32
.align 32
ANMASK:	.octa 0x0000000003ffffff0000000003ffffff
	.octa 0x0000000003ffffff0000000003ffffff

.section	.rodata.cst32.ORMASK, "aM", @progbits, 32
.align 32
ORMASK:	.octa 0x00000000010000000000000001000000
	.octa 0x00000000010000000000000001000000

.text

#define h0 0x00(%rdi)
#define h1 0x04(%rdi)
#define h2 0x08(%rdi)
#define h3 0x0c(%rdi)
#define h4 0x10(%rdi)
#define r0 0x00(%rdx)
#define r1 0x04(%rdx)
#define r2 0x08(%rdx)
#define r3 0x0c(%rdx)
#define r4 0x10(%rdx)
#define u0 0x00(%r8)
#define u1 0x04(%r8)
#define u2 0x08(%r8)
#define u3 0x0c(%r8)
#define u4 0x10(%r8)
#define w0 0x18(%r8)
#define w1 0x1c(%r8)
#define w2 0x20(%r8)
#define w3 0x24(%r8)
#define w4 0x28(%r8)
#define y0 0x30(%r8)
#define y1 0x34(%r8)
#define y2 0x38(%r8)
#define y3 0x3c(%r8)
#define y4 0x40(%r8)
#define m %rsi
#define hc0 %ymm0
#define hc1 %ymm1
#define hc2 %ymm2
#define hc3 %ymm3
#define hc4 %ymm4
#define hc0x %xmm0
#define hc1x %xmm1
#define hc2x %xmm2
#define hc3x %xmm3
#define hc4x %xmm4
#define t1 %ymm5
#define t2 %ymm6
#define t1x %xmm5
#define t2x %xmm6
#define ruwy0 %ymm7
#define ruwy1 %ymm8
#define ruwy2 %ymm9
#define ruwy3 %ymm10
#define ruwy4 %ymm11
#define ruwy0x %xmm7
#define ruwy1x %xmm8
#define ruwy2x %xmm9
#define ruwy3x %xmm10
#define ruwy4x %xmm11
#define svxz1 %ymm12
#define svxz2 %ymm13
#define svxz3 %ymm14
#define svxz4 %ymm15
#define d0 %r9
#define d1 %r10
#define d2 %r11
#define d3 %r12
#define d4 %r13

SYM_FUNC_START(poly1305_4block_avx2)
	# %rdi: Accumulator h[5]
	# %rsi: 64 byte input block m
	# %rdx: Poly1305 key r[5]
	# %rcx: Quadblock count
	# %r8:  Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],

	# This four-block variant uses loop unrolled block processing. It
	# requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
	# h = (h + m) * r  =>  h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r

	vzeroupper
	push		%rbx
	push		%r12
	push		%r13

	# combine r0,u0,w0,y0
	vmovd		y0,ruwy0x
	vmovd		w0,t1x
	vpunpcklqdq	t1,ruwy0,ruwy0
	vmovd		u0,t1x
	vmovd		r0,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy0,ruwy0

	# combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
	vmovd		y1,ruwy1x
	vmovd		w1,t1x
	vpunpcklqdq	t1,ruwy1,ruwy1
	vmovd		u1,t1x
	vmovd		r1,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy1,ruwy1
	vpslld		$2,ruwy1,svxz1
	vpaddd		ruwy1,svxz1,svxz1

	# combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
	vmovd		y2,ruwy2x
	vmovd		w2,t1x
	vpunpcklqdq	t1,ruwy2,ruwy2
	vmovd		u2,t1x
	vmovd		r2,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy2,ruwy2
	vpslld		$2,ruwy2,svxz2
	vpaddd		ruwy2,svxz2,svxz2

	# combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
	vmovd		y3,ruwy3x
	vmovd		w3,t1x
	vpunpcklqdq	t1,ruwy3,ruwy3
	vmovd		u3,t1x
	vmovd		r3,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy3,ruwy3
	vpslld		$2,ruwy3,svxz3
	vpaddd		ruwy3,svxz3,svxz3

	# combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
	vmovd		y4,ruwy4x
	vmovd		w4,t1x
	vpunpcklqdq	t1,ruwy4,ruwy4
	vmovd		u4,t1x
	vmovd		r4,t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,ruwy4,ruwy4
	vpslld		$2,ruwy4,svxz4
	vpaddd		ruwy4,svxz4,svxz4

.Ldoblock4:
	# hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
	#	 m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
	vmovd		0x00(m),hc0x
	vmovd		0x10(m),t1x
	vpunpcklqdq	t1,hc0,hc0
	vmovd		0x20(m),t1x
	vmovd		0x30(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc0,hc0
	vpand		ANMASK(%rip),hc0,hc0
	vmovd		h0,t1x
	vpaddd		t1,hc0,hc0
	# hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
	#	 (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
	vmovd		0x03(m),hc1x
	vmovd		0x13(m),t1x
	vpunpcklqdq	t1,hc1,hc1
	vmovd		0x23(m),t1x
	vmovd		0x33(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc1,hc1
	vpsrld		$2,hc1,hc1
	vpand		ANMASK(%rip),hc1,hc1
	vmovd		h1,t1x
	vpaddd		t1,hc1,hc1
	# hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
	#	 (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
	vmovd		0x06(m),hc2x
	vmovd		0x16(m),t1x
	vpunpcklqdq	t1,hc2,hc2
	vmovd		0x26(m),t1x
	vmovd		0x36(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc2,hc2
	vpsrld		$4,hc2,hc2
	vpand		ANMASK(%rip),hc2,hc2
	vmovd		h2,t1x
	vpaddd		t1,hc2,hc2
	# hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
	#	 (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
	vmovd		0x09(m),hc3x
	vmovd		0x19(m),t1x
	vpunpcklqdq	t1,hc3,hc3
	vmovd		0x29(m),t1x
	vmovd		0x39(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc3,hc3
	vpsrld		$6,hc3,hc3
	vpand		ANMASK(%rip),hc3,hc3
	vmovd		h3,t1x
	vpaddd		t1,hc3,hc3
	# hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
	#	 (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
	vmovd		0x0c(m),hc4x
	vmovd		0x1c(m),t1x
	vpunpcklqdq	t1,hc4,hc4
	vmovd		0x2c(m),t1x
	vmovd		0x3c(m),t2x
	vpunpcklqdq	t2,t1,t1
	vperm2i128	$0x20,t1,hc4,hc4
	vpsrld		$8,hc4,hc4
	vpor		ORMASK(%rip),hc4,hc4
	vmovd		h4,t1x
	vpaddd		t1,hc4,hc4

	# t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
	vpmuludq	hc0,ruwy0,t1
	# t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
	vpmuludq	hc1,svxz4,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
	vpmuludq	hc2,svxz3,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
	vpmuludq	hc3,svxz2,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
	vpmuludq	hc4,svxz1,t2
	vpaddq		t2,t1,t1
	# d0 = t1[0] + t1[1] + t[2] + t[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d0

	# t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
	vpmuludq	hc0,ruwy1,t1
	# t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
	vpmuludq	hc1,ruwy0,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
	vpmuludq	hc2,svxz4,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
	vpmuludq	hc3,svxz3,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
	vpmuludq	hc4,svxz2,t2
	vpaddq		t2,t1,t1
	# d1 = t1[0] + t1[1] + t1[3] + t1[4]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d1

	# t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
	vpmuludq	hc0,ruwy2,t1
	# t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
	vpmuludq	hc1,ruwy1,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
	vpmuludq	hc2,ruwy0,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
	vpmuludq	hc3,svxz4,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
	vpmuludq	hc4,svxz3,t2
	vpaddq		t2,t1,t1
	# d2 = t1[0] + t1[1] + t1[2] + t1[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d2

	# t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
	vpmuludq	hc0,ruwy3,t1
	# t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
	vpmuludq	hc1,ruwy2,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
	vpmuludq	hc2,ruwy1,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
	vpmuludq	hc3,ruwy0,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
	vpmuludq	hc4,svxz4,t2
	vpaddq		t2,t1,t1
	# d3 = t1[0] + t1[1] + t1[2] + t1[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d3

	# t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
	vpmuludq	hc0,ruwy4,t1
	# t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
	vpmuludq	hc1,ruwy3,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
	vpmuludq	hc2,ruwy2,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
	vpmuludq	hc3,ruwy1,t2
	vpaddq		t2,t1,t1
	# t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
	vpmuludq	hc4,ruwy0,t2
	vpaddq		t2,t1,t1
	# d4 = t1[0] + t1[1] + t1[2] + t1[3]
	vpermq		$0xee,t1,t2
	vpaddq		t2,t1,t1
	vpsrldq		$8,t1,t2
	vpaddq		t2,t1,t1
	vmovq		t1x,d4

	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
	# integers.  It's true in a single-block implementation, but not here.

	# d1 += d0 >> 26
	mov		d0,%rax
	shr		$26,%rax
	add		%rax,d1
	# h0 = d0 & 0x3ffffff
	mov		d0,%rbx
	and		$0x3ffffff,%ebx

	# d2 += d1 >> 26
	mov		d1,%rax
	shr		$26,%rax
	add		%rax,d2
	# h1 = d1 & 0x3ffffff
	mov		d1,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h1

	# d3 += d2 >> 26
	mov		d2,%rax
	shr		$26,%rax
	add		%rax,d3
	# h2 = d2 & 0x3ffffff
	mov		d2,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h2

	# d4 += d3 >> 26
	mov		d3,%rax
	shr		$26,%rax
	add		%rax,d4
	# h3 = d3 & 0x3ffffff
	mov		d3,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h3

	# h0 += (d4 >> 26) * 5
	mov		d4,%rax
	shr		$26,%rax
	lea		(%rax,%rax,4),%rax
	add		%rax,%rbx
	# h4 = d4 & 0x3ffffff
	mov		d4,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h4

	# h1 += h0 >> 26
	mov		%rbx,%rax
	shr		$26,%rax
	add		%eax,h1
	# h0 = h0 & 0x3ffffff
	andl		$0x3ffffff,%ebx
	mov		%ebx,h0

	add		$0x40,m
	dec		%rcx
	jnz		.Ldoblock4

	vzeroupper
	pop		%r13
	pop		%r12
	pop		%rbx
	ret
SYM_FUNC_END(poly1305_4block_avx2)
+0 −590
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
 *
 * Copyright (C) 2015 Martin Willi
 */

#include <linux/linkage.h>

.section	.rodata.cst16.ANMASK, "aM", @progbits, 16
.align 16
ANMASK:	.octa 0x0000000003ffffff0000000003ffffff

.section	.rodata.cst16.ORMASK, "aM", @progbits, 16
.align 16
ORMASK:	.octa 0x00000000010000000000000001000000

.text

#define h0 0x00(%rdi)
#define h1 0x04(%rdi)
#define h2 0x08(%rdi)
#define h3 0x0c(%rdi)
#define h4 0x10(%rdi)
#define r0 0x00(%rdx)
#define r1 0x04(%rdx)
#define r2 0x08(%rdx)
#define r3 0x0c(%rdx)
#define r4 0x10(%rdx)
#define s1 0x00(%rsp)
#define s2 0x04(%rsp)
#define s3 0x08(%rsp)
#define s4 0x0c(%rsp)
#define m %rsi
#define h01 %xmm0
#define h23 %xmm1
#define h44 %xmm2
#define t1 %xmm3
#define t2 %xmm4
#define t3 %xmm5
#define t4 %xmm6
#define mask %xmm7
#define d0 %r8
#define d1 %r9
#define d2 %r10
#define d3 %r11
#define d4 %r12

SYM_FUNC_START(poly1305_block_sse2)
	# %rdi: Accumulator h[5]
	# %rsi: 16 byte input block m
	# %rdx: Poly1305 key r[5]
	# %rcx: Block count

	# This single block variant tries to improve performance by doing two
	# multiplications in parallel using SSE instructions. There is quite
	# some quardword packing involved, hence the speedup is marginal.

	push		%rbx
	push		%r12
	sub		$0x10,%rsp

	# s1..s4 = r1..r4 * 5
	mov		r1,%eax
	lea		(%eax,%eax,4),%eax
	mov		%eax,s1
	mov		r2,%eax
	lea		(%eax,%eax,4),%eax
	mov		%eax,s2
	mov		r3,%eax
	lea		(%eax,%eax,4),%eax
	mov		%eax,s3
	mov		r4,%eax
	lea		(%eax,%eax,4),%eax
	mov		%eax,s4

	movdqa		ANMASK(%rip),mask

.Ldoblock:
	# h01 = [0, h1, 0, h0]
	# h23 = [0, h3, 0, h2]
	# h44 = [0, h4, 0, h4]
	movd		h0,h01
	movd		h1,t1
	movd		h2,h23
	movd		h3,t2
	movd		h4,h44
	punpcklqdq	t1,h01
	punpcklqdq	t2,h23
	punpcklqdq	h44,h44

	# h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
	movd		0x00(m),t1
	movd		0x03(m),t2
	psrld		$2,t2
	punpcklqdq	t2,t1
	pand		mask,t1
	paddd		t1,h01
	# h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
	movd		0x06(m),t1
	movd		0x09(m),t2
	psrld		$4,t1
	psrld		$6,t2
	punpcklqdq	t2,t1
	pand		mask,t1
	paddd		t1,h23
	# h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
	mov		0x0c(m),%eax
	shr		$8,%eax
	or		$0x01000000,%eax
	movd		%eax,t1
	pshufd		$0xc4,t1,t1
	paddd		t1,h44

	# t1[0] = h0 * r0 + h2 * s3
	# t1[1] = h1 * s4 + h3 * s2
	movd		r0,t1
	movd		s4,t2
	punpcklqdq	t2,t1
	pmuludq		h01,t1
	movd		s3,t2
	movd		s2,t3
	punpcklqdq	t3,t2
	pmuludq		h23,t2
	paddq		t2,t1
	# t2[0] = h0 * r1 + h2 * s4
	# t2[1] = h1 * r0 + h3 * s3
	movd		r1,t2
	movd		r0,t3
	punpcklqdq	t3,t2
	pmuludq		h01,t2
	movd		s4,t3
	movd		s3,t4
	punpcklqdq	t4,t3
	pmuludq		h23,t3
	paddq		t3,t2
	# t3[0] = h4 * s1
	# t3[1] = h4 * s2
	movd		s1,t3
	movd		s2,t4
	punpcklqdq	t4,t3
	pmuludq		h44,t3
	# d0 = t1[0] + t1[1] + t3[0]
	# d1 = t2[0] + t2[1] + t3[1]
	movdqa		t1,t4
	punpcklqdq	t2,t4
	punpckhqdq	t2,t1
	paddq		t4,t1
	paddq		t3,t1
	movq		t1,d0
	psrldq		$8,t1
	movq		t1,d1

	# t1[0] = h0 * r2 + h2 * r0
	# t1[1] = h1 * r1 + h3 * s4
	movd		r2,t1
	movd		r1,t2
	punpcklqdq 	t2,t1
	pmuludq		h01,t1
	movd		r0,t2
	movd		s4,t3
	punpcklqdq	t3,t2
	pmuludq		h23,t2
	paddq		t2,t1
	# t2[0] = h0 * r3 + h2 * r1
	# t2[1] = h1 * r2 + h3 * r0
	movd		r3,t2
	movd		r2,t3
	punpcklqdq	t3,t2
	pmuludq		h01,t2
	movd		r1,t3
	movd		r0,t4
	punpcklqdq	t4,t3
	pmuludq		h23,t3
	paddq		t3,t2
	# t3[0] = h4 * s3
	# t3[1] = h4 * s4
	movd		s3,t3
	movd		s4,t4
	punpcklqdq	t4,t3
	pmuludq		h44,t3
	# d2 = t1[0] + t1[1] + t3[0]
	# d3 = t2[0] + t2[1] + t3[1]
	movdqa		t1,t4
	punpcklqdq	t2,t4
	punpckhqdq	t2,t1
	paddq		t4,t1
	paddq		t3,t1
	movq		t1,d2
	psrldq		$8,t1
	movq		t1,d3

	# t1[0] = h0 * r4 + h2 * r2
	# t1[1] = h1 * r3 + h3 * r1
	movd		r4,t1
	movd		r3,t2
	punpcklqdq	t2,t1
	pmuludq		h01,t1
	movd		r2,t2
	movd		r1,t3
	punpcklqdq	t3,t2
	pmuludq		h23,t2
	paddq		t2,t1
	# t3[0] = h4 * r0
	movd		r0,t3
	pmuludq		h44,t3
	# d4 = t1[0] + t1[1] + t3[0]
	movdqa		t1,t4
	psrldq		$8,t4
	paddq		t4,t1
	paddq		t3,t1
	movq		t1,d4

	# d1 += d0 >> 26
	mov		d0,%rax
	shr		$26,%rax
	add		%rax,d1
	# h0 = d0 & 0x3ffffff
	mov		d0,%rbx
	and		$0x3ffffff,%ebx

	# d2 += d1 >> 26
	mov		d1,%rax
	shr		$26,%rax
	add		%rax,d2
	# h1 = d1 & 0x3ffffff
	mov		d1,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h1

	# d3 += d2 >> 26
	mov		d2,%rax
	shr		$26,%rax
	add		%rax,d3
	# h2 = d2 & 0x3ffffff
	mov		d2,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h2

	# d4 += d3 >> 26
	mov		d3,%rax
	shr		$26,%rax
	add		%rax,d4
	# h3 = d3 & 0x3ffffff
	mov		d3,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h3

	# h0 += (d4 >> 26) * 5
	mov		d4,%rax
	shr		$26,%rax
	lea		(%rax,%rax,4),%rax
	add		%rax,%rbx
	# h4 = d4 & 0x3ffffff
	mov		d4,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h4

	# h1 += h0 >> 26
	mov		%rbx,%rax
	shr		$26,%rax
	add		%eax,h1
	# h0 = h0 & 0x3ffffff
	andl		$0x3ffffff,%ebx
	mov		%ebx,h0

	add		$0x10,m
	dec		%rcx
	jnz		.Ldoblock

	# Zeroing of key material
	mov		%rcx,0x00(%rsp)
	mov		%rcx,0x08(%rsp)

	add		$0x10,%rsp
	pop		%r12
	pop		%rbx
	ret
SYM_FUNC_END(poly1305_block_sse2)


#define u0 0x00(%r8)
#define u1 0x04(%r8)
#define u2 0x08(%r8)
#define u3 0x0c(%r8)
#define u4 0x10(%r8)
#define hc0 %xmm0
#define hc1 %xmm1
#define hc2 %xmm2
#define hc3 %xmm5
#define hc4 %xmm6
#define ru0 %xmm7
#define ru1 %xmm8
#define ru2 %xmm9
#define ru3 %xmm10
#define ru4 %xmm11
#define sv1 %xmm12
#define sv2 %xmm13
#define sv3 %xmm14
#define sv4 %xmm15
#undef d0
#define d0 %r13

SYM_FUNC_START(poly1305_2block_sse2)
	# %rdi: Accumulator h[5]
	# %rsi: 16 byte input block m
	# %rdx: Poly1305 key r[5]
	# %rcx: Doubleblock count
	# %r8:  Poly1305 derived key r^2 u[5]

	# This two-block variant further improves performance by using loop
	# unrolled block processing. This is more straight forward and does
	# less byte shuffling, but requires a second Poly1305 key r^2:
	# h = (h + m) * r    =>    h = (h + m1) * r^2 + m2 * r

	push		%rbx
	push		%r12
	push		%r13

	# combine r0,u0
	movd		u0,ru0
	movd		r0,t1
	punpcklqdq	t1,ru0

	# combine r1,u1 and s1=r1*5,v1=u1*5
	movd		u1,ru1
	movd		r1,t1
	punpcklqdq	t1,ru1
	movdqa		ru1,sv1
	pslld		$2,sv1
	paddd		ru1,sv1

	# combine r2,u2 and s2=r2*5,v2=u2*5
	movd		u2,ru2
	movd		r2,t1
	punpcklqdq	t1,ru2
	movdqa		ru2,sv2
	pslld		$2,sv2
	paddd		ru2,sv2

	# combine r3,u3 and s3=r3*5,v3=u3*5
	movd		u3,ru3
	movd		r3,t1
	punpcklqdq	t1,ru3
	movdqa		ru3,sv3
	pslld		$2,sv3
	paddd		ru3,sv3

	# combine r4,u4 and s4=r4*5,v4=u4*5
	movd		u4,ru4
	movd		r4,t1
	punpcklqdq	t1,ru4
	movdqa		ru4,sv4
	pslld		$2,sv4
	paddd		ru4,sv4

.Ldoblock2:
	# hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
	movd		0x00(m),hc0
	movd		0x10(m),t1
	punpcklqdq	t1,hc0
	pand		ANMASK(%rip),hc0
	movd		h0,t1
	paddd		t1,hc0
	# hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
	movd		0x03(m),hc1
	movd		0x13(m),t1
	punpcklqdq	t1,hc1
	psrld		$2,hc1
	pand		ANMASK(%rip),hc1
	movd		h1,t1
	paddd		t1,hc1
	# hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
	movd		0x06(m),hc2
	movd		0x16(m),t1
	punpcklqdq	t1,hc2
	psrld		$4,hc2
	pand		ANMASK(%rip),hc2
	movd		h2,t1
	paddd		t1,hc2
	# hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
	movd		0x09(m),hc3
	movd		0x19(m),t1
	punpcklqdq	t1,hc3
	psrld		$6,hc3
	pand		ANMASK(%rip),hc3
	movd		h3,t1
	paddd		t1,hc3
	# hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
	movd		0x0c(m),hc4
	movd		0x1c(m),t1
	punpcklqdq	t1,hc4
	psrld		$8,hc4
	por		ORMASK(%rip),hc4
	movd		h4,t1
	paddd		t1,hc4

	# t1 = [ hc0[1] * r0, hc0[0] * u0 ]
	movdqa		ru0,t1
	pmuludq		hc0,t1
	# t1 += [ hc1[1] * s4, hc1[0] * v4 ]
	movdqa		sv4,t2
	pmuludq		hc1,t2
	paddq		t2,t1
	# t1 += [ hc2[1] * s3, hc2[0] * v3 ]
	movdqa		sv3,t2
	pmuludq		hc2,t2
	paddq		t2,t1
	# t1 += [ hc3[1] * s2, hc3[0] * v2 ]
	movdqa		sv2,t2
	pmuludq		hc3,t2
	paddq		t2,t1
	# t1 += [ hc4[1] * s1, hc4[0] * v1 ]
	movdqa		sv1,t2
	pmuludq		hc4,t2
	paddq		t2,t1
	# d0 = t1[0] + t1[1]
	movdqa		t1,t2
	psrldq		$8,t2
	paddq		t2,t1
	movq		t1,d0

	# t1 = [ hc0[1] * r1, hc0[0] * u1 ]
	movdqa		ru1,t1
	pmuludq		hc0,t1
	# t1 += [ hc1[1] * r0, hc1[0] * u0 ]
	movdqa		ru0,t2
	pmuludq		hc1,t2
	paddq		t2,t1
	# t1 += [ hc2[1] * s4, hc2[0] * v4 ]
	movdqa		sv4,t2
	pmuludq		hc2,t2
	paddq		t2,t1
	# t1 += [ hc3[1] * s3, hc3[0] * v3 ]
	movdqa		sv3,t2
	pmuludq		hc3,t2
	paddq		t2,t1
	# t1 += [ hc4[1] * s2, hc4[0] * v2 ]
	movdqa		sv2,t2
	pmuludq		hc4,t2
	paddq		t2,t1
	# d1 = t1[0] + t1[1]
	movdqa		t1,t2
	psrldq		$8,t2
	paddq		t2,t1
	movq		t1,d1

	# t1 = [ hc0[1] * r2, hc0[0] * u2 ]
	movdqa		ru2,t1
	pmuludq		hc0,t1
	# t1 += [ hc1[1] * r1, hc1[0] * u1 ]
	movdqa		ru1,t2
	pmuludq		hc1,t2
	paddq		t2,t1
	# t1 += [ hc2[1] * r0, hc2[0] * u0 ]
	movdqa		ru0,t2
	pmuludq		hc2,t2
	paddq		t2,t1
	# t1 += [ hc3[1] * s4, hc3[0] * v4 ]
	movdqa		sv4,t2
	pmuludq		hc3,t2
	paddq		t2,t1
	# t1 += [ hc4[1] * s3, hc4[0] * v3 ]
	movdqa		sv3,t2
	pmuludq		hc4,t2
	paddq		t2,t1
	# d2 = t1[0] + t1[1]
	movdqa		t1,t2
	psrldq		$8,t2
	paddq		t2,t1
	movq		t1,d2

	# t1 = [ hc0[1] * r3, hc0[0] * u3 ]
	movdqa		ru3,t1
	pmuludq		hc0,t1
	# t1 += [ hc1[1] * r2, hc1[0] * u2 ]
	movdqa		ru2,t2
	pmuludq		hc1,t2
	paddq		t2,t1
	# t1 += [ hc2[1] * r1, hc2[0] * u1 ]
	movdqa		ru1,t2
	pmuludq		hc2,t2
	paddq		t2,t1
	# t1 += [ hc3[1] * r0, hc3[0] * u0 ]
	movdqa		ru0,t2
	pmuludq		hc3,t2
	paddq		t2,t1
	# t1 += [ hc4[1] * s4, hc4[0] * v4 ]
	movdqa		sv4,t2
	pmuludq		hc4,t2
	paddq		t2,t1
	# d3 = t1[0] + t1[1]
	movdqa		t1,t2
	psrldq		$8,t2
	paddq		t2,t1
	movq		t1,d3

	# t1 = [ hc0[1] * r4, hc0[0] * u4 ]
	movdqa		ru4,t1
	pmuludq		hc0,t1
	# t1 += [ hc1[1] * r3, hc1[0] * u3 ]
	movdqa		ru3,t2
	pmuludq		hc1,t2
	paddq		t2,t1
	# t1 += [ hc2[1] * r2, hc2[0] * u2 ]
	movdqa		ru2,t2
	pmuludq		hc2,t2
	paddq		t2,t1
	# t1 += [ hc3[1] * r1, hc3[0] * u1 ]
	movdqa		ru1,t2
	pmuludq		hc3,t2
	paddq		t2,t1
	# t1 += [ hc4[1] * r0, hc4[0] * u0 ]
	movdqa		ru0,t2
	pmuludq		hc4,t2
	paddq		t2,t1
	# d4 = t1[0] + t1[1]
	movdqa		t1,t2
	psrldq		$8,t2
	paddq		t2,t1
	movq		t1,d4

	# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
	# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
	# amount.  Careful: we must not assume the carry bits 'd0 >> 26',
	# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
	# integers.  It's true in a single-block implementation, but not here.

	# d1 += d0 >> 26
	mov		d0,%rax
	shr		$26,%rax
	add		%rax,d1
	# h0 = d0 & 0x3ffffff
	mov		d0,%rbx
	and		$0x3ffffff,%ebx

	# d2 += d1 >> 26
	mov		d1,%rax
	shr		$26,%rax
	add		%rax,d2
	# h1 = d1 & 0x3ffffff
	mov		d1,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h1

	# d3 += d2 >> 26
	mov		d2,%rax
	shr		$26,%rax
	add		%rax,d3
	# h2 = d2 & 0x3ffffff
	mov		d2,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h2

	# d4 += d3 >> 26
	mov		d3,%rax
	shr		$26,%rax
	add		%rax,d4
	# h3 = d3 & 0x3ffffff
	mov		d3,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h3

	# h0 += (d4 >> 26) * 5
	mov		d4,%rax
	shr		$26,%rax
	lea		(%rax,%rax,4),%rax
	add		%rax,%rbx
	# h4 = d4 & 0x3ffffff
	mov		d4,%rax
	and		$0x3ffffff,%eax
	mov		%eax,h4

	# h1 += h0 >> 26
	mov		%rbx,%rax
	shr		$26,%rax
	add		%eax,h1
	# h0 = h0 & 0x3ffffff
	andl		$0x3ffffff,%ebx
	mov		%ebx,h0

	add		$0x20,m
	dec		%rcx
	jnz		.Ldoblock2

	pop		%r13
	pop		%r12
	pop		%rbx
	ret
SYM_FUNC_END(poly1305_2block_sse2)
+394 −288

File changed.

Preview size limit exceeded, changes collapsed.

Loading