24384997 Needs more work to fix the 32/64 bits asm instructions and args
authorMisaki Miyashita <Misaki.Miyashita@Oracle.COM>
Thu, 18 Aug 2016 07:55:50 -0700
changeset 6629 5cb24dd4b073
parent 6624 c6cfc48dd184
child 6632 b6196fa3160c
24384997 Needs more work to fix the 32/64 bits asm instructions and args
components/openssl/common/patches/049-use-srln.patch
--- a/components/openssl/common/patches/049-use-srln.patch	Wed Aug 17 16:04:09 2016 -0700
+++ b/components/openssl/common/patches/049-use-srln.patch	Thu Aug 18 07:55:50 2016 -0700
@@ -1,115 +1,86 @@
 # This patch fixes the invalid use of 64-bit instruction (srlx) by 32-bit
-# applications.  This was developed in house and submitted to the upstream:
-#    https://github.com/openssl/openssl/pull/1259
---- a/crypto/des/asm/dest4-sparcv9.pl	2016-05-03 06:44:42.000000000 -0700
-+++ b/crypto/des/asm/dest4-sparcv9.pl	2016-06-23 09:34:51.212075615 -0700
-@@ -110,7 +111,7 @@
- 	and		$out, 7, %g4
- 	alignaddrl	$out, %g0, $out
- 	srl		$omask, %g4, $omask
--	srlx		$len, 3, $len
-+	srln		$len, 3, $len
- 	movrz		%g4, 0, $omask
- 	prefetch	[$out], 22
- 
-@@ -211,7 +212,7 @@
- 	and		$out, 7, %g4
- 	alignaddrl	$out, %g0, $out
- 	srl		$omask, %g4, $omask
--	srlx		$len, 3, $len
-+	srln		$len, 3, $len
- 	movrz		%g4, 0, $omask
- 	prefetch	[$out], 22
+# applications.  This patch is from the following commit in the upstream:
+#    https://github.com/openssl/openssl/commit/f198cc43a0eca4bf1a8e7f60c51af560f4346dc8
+--- a/crypto/des/asm/dest4-sparcv9.pl	2016-08-04 14:20:26.610683970 -0700
++++ b/crypto/des/asm/dest4-sparcv9.pl	2016-08-04 14:22:33.339076315 -0700
+@@ -96,7 +96,7 @@
+ des_t4_cbc_encrypt:
+ 	cmp		$len, 0
+ 	be,pn		$::size_t_cc, .Lcbc_abort
+-	nop
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 	ld		[$ivec + 0], %f0	! load ivec
+ 	ld		[$ivec + 4], %f1
  
-@@ -319,7 +320,7 @@
- 	and		$out, 7, %g4
- 	alignaddrl	$out, %g0, $out
- 	srl		$omask, %g4, $omask
--	srlx		$len, 3, $len
-+	srln		$len, 3, $len
- 	movrz		%g4, 0, $omask
- 	prefetch	[$out], 22
+@@ -197,7 +197,7 @@
+ des_t4_cbc_decrypt:
+ 	cmp		$len, 0
+ 	be,pn		$::size_t_cc, .Lcbc_abort
+-	nop
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 	ld		[$ivec + 0], %f2	! load ivec
+ 	ld		[$ivec + 4], %f3
  
-@@ -471,7 +472,7 @@
- 	and		$out, 7, %g4
- 	alignaddrl	$out, %g0, $out
- 	srl		$omask, %g4, $omask
--	srlx		$len, 3, $len
-+	srln		$len, 3, $len
- 	movrz		%g4, 0, $omask
- 	prefetch	[$out], 22
+@@ -305,7 +305,7 @@
+ des_t4_ede3_cbc_encrypt:
+ 	cmp		$len, 0
+ 	be,pn		$::size_t_cc, .Lcbc_abort
+-	nop
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 	ld		[$ivec + 0], %f0	! load ivec
+ 	ld		[$ivec + 4], %f1
  
---- a/crypto/perlasm/sparcv9_modes.pl	2016-05-03 06:44:42.000000000 -0700
-+++ b/crypto/perlasm/sparcv9_modes.pl	2016-06-23 09:34:10.707332695 -0700
-@@ -75,7 +75,7 @@
- 	srl		$omask, $ooff, $omask
- 
- 	alignaddrl	$out, %g0, $out
--	srlx		$len, 4, $len
-+	srln		$len, 4, $len
- 	prefetch	[$out], 22
+@@ -457,7 +457,7 @@
+ des_t4_ede3_cbc_decrypt:
+ 	cmp		$len, 0
+ 	be,pn		$::size_t_cc, .Lcbc_abort
+-	nop
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 	ld		[$ivec + 0], %f2	! load ivec
+ 	ld		[$ivec + 4], %f3
  
- .L${bits}_cbc_enc_loop:
-@@ -185,7 +185,7 @@
- 	and	$blk_init, 63, $blk_init	! tail
- 	sub	$len, $blk_init, $len
- 	add	$blk_init, 15, $blk_init	! round up to 16n
--	srlx	$len, 4, $len
-+	srln	$len, 4, $len
- 	srl	$blk_init, 4, $blk_init
- 
- .L${bits}_cbc_enc_blk_loop:
-@@ -292,7 +292,7 @@
- 	srl		$omask, $ooff, $omask
+--- a/crypto/modes/asm/ghash-sparcv9.pl	2016-08-04 14:22:59.021798885 -0700
++++ b/crypto/modes/asm/ghash-sparcv9.pl	2016-08-04 14:24:07.947062045 -0700
+@@ -445,6 +445,8 @@
+ .align	32
+ gcm_ghash_vis3:
+ 	save	%sp,-$frame,%sp
++	nop
++	srl	$len,0,$len		! needed on v8+, "nop" on v9
  
- 	andcc		$len, 16, %g0		! is number of blocks even?
--	srlx		$len, 4, $len
-+	srln		$len, 4, $len
- 	alignaddrl	$out, %g0, $out
- 	bz		%icc, .L${bits}_cbc_dec_loop2x
- 	prefetch	[$out], 22
-@@ -517,7 +517,7 @@
- 	and	$blk_init, 63, $blk_init	! tail
- 	sub	$len, $blk_init, $len
- 	add	$blk_init, 15, $blk_init	! round up to 16n
--	srlx	$len, 4, $len
-+	srln	$len, 4, $len
- 	srl	$blk_init, 4, $blk_init
- 	sub	$len, 1, $len
- 	add	$blk_init, 1, $blk_init
-@@ -648,7 +648,7 @@
- 	andcc		$len, 16, %g0		! is number of blocks even?
- 	alignaddrl	$out, %g0, $out
- 	bz		%icc, .L${bits}_ctr32_loop2x
--	srlx		$len, 4, $len
-+	srln		$len, 4, $len
- .L${bits}_ctr32_loop:
- 	ldx		[$inp + 0], %o0
- 	brz,pt		$ileft, 4f
-@@ -819,7 +819,7 @@
- 	and	$blk_init, 63, $blk_init	! tail
- 	sub	$len, $blk_init, $len
- 	add	$blk_init, 15, $blk_init	! round up to 16n
--	srlx	$len, 4, $len
-+	srln	$len, 4, $len
- 	srl	$blk_init, 4, $blk_init
- 	sub	$len, 1, $len
- 	add	$blk_init, 1, $blk_init
-@@ -966,7 +966,7 @@
- $code.=<<___;
- 	alignaddrl	$out, %g0, $out
- 	bz		%icc, .L${bits}_xts_${dir}loop2x
--	srlx		$len, 4, $len
-+	srln		$len, 4, $len
- .L${bits}_xts_${dir}loop:
- 	ldx		[$inp + 0], %o0
- 	brz,pt		$ileft, 4f
-@@ -1172,7 +1172,7 @@
- 	and	$blk_init, 63, $blk_init	! tail
- 	sub	$len, $blk_init, $len
- 	add	$blk_init, 15, $blk_init	! round up to 16n
--	srlx	$len, 4, $len
-+	srln	$len, 4, $len
- 	srl	$blk_init, 4, $blk_init
- 	sub	$len, 1, $len
- 	add	$blk_init, 1, $blk_init
+ 	ldx	[$Xip+8],$C2		! load Xi
+ 	ldx	[$Xip+0],$C3
+--- a/crypto/perlasm/sparcv9_modes.pl	2016-08-04 14:24:29.877624460 -0700
++++ b/crypto/perlasm/sparcv9_modes.pl	2016-08-04 14:27:18.552931245 -0700
+@@ -37,6 +37,7 @@
+ 	save		%sp, -$::frame, %sp
+ 	cmp		$len, 0
+ 	be,pn		$::size_t_cc, .L${bits}_cbc_enc_abort
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 	sub		$inp, $out, $blk_init	! $inp!=$out
+ ___
+ $::code.=<<___ if (!$::evp);
+@@ -254,6 +255,7 @@
+ 	save		%sp, -$::frame, %sp
+ 	cmp		$len, 0
+ 	be,pn		$::size_t_cc, .L${bits}_cbc_dec_abort
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 	sub		$inp, $out, $blk_init	! $inp!=$out
+ ___
+ $::code.=<<___ if (!$::evp);
+@@ -613,6 +615,7 @@
+ .align	32
+ ${alg}${bits}_t4_ctr32_encrypt:
+ 	save		%sp, -$::frame, %sp
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 
+ 	prefetch	[$inp], 20
+ 	prefetch	[$inp + 63], 20
+@@ -916,6 +919,7 @@
+ .align	32
+ ${alg}${bits}_t4_xts_${dir}crypt:
+ 	save		%sp, -$::frame-16, %sp
++	srln		$len, 0, $len		! needed on v8+, "nop" on v9
+ 
+ 	mov		$ivec, %o0
+ 	add		%fp, $::bias-16, %o1