File openssl-CVE-2022-4304.patch of Package compat-openssl098.29205
From aefe424d7695ed13b240673298d340bc552365fe Mon Sep 17 00:00:00 2001
From: Bernd Edlinger <bernd.edlinger@hotmail.de>
Date: Mon, 13 Feb 2023 17:46:41 +0100
Subject: [PATCH] Alternative fix for CVE-2022-4304
This is about a timing leak in the topmost limb
of the internal result of RSA_private_decrypt,
before the padding check.
There are in fact at least three bugs together that
caused the timing leak:
First and probably most important is the fact that
the blinding did not use the constant time code path
at all when the RSA object was used for a private
decrypt, due to the fact that the Montgomery context
rsa->_method_mod_n was not set up early enough in
rsa_ossl_private_decrypt, when BN_BLINDING_create_param
needed it, and that was persisted as blinding->m_ctx,
although the RSA object creates the Montgomery context
just a bit later.
Then the infamous bn_correct_top was used on the
secret value right after the blinding was removed.
And finally the function BN_bn2binpad did not use
the constant-time code path since the BN_FLG_CONSTTIME
was not set on the secret value.
In order to address the first problem, this patch
makes sure that the rsa->_method_mod_n is initialized
right before the blinding context.
And to fix the second problem, we add a new utility
function bn_correct_top_consttime, a const-time
variant of bn_correct_top.
Together with the fact, that BN_bn2binpad is already
constant time if the flag BN_FLG_CONSTTIME is set,
this should eliminate the timing oracle completely.
In addition the no-asm variant may also have
branches that depend on secret values, because the last
invocation of bn_sub_words in bn_from_montgomery_word
had branches when the function is compiled by certain
gcc compiler versions, due to the clumsy coding style.
So additionally this patch stream-lined the no-asm
C-code in order to avoid branches where possible and
improve the resulting code quality.
---
CHANGES | 10 +++++
crypto/bn/bn_asm.c | 102 +++++++++++++++++++++++++++++----------------------
crypto/bn/bn_blind.c | 3 +
crypto/bn/bn_lcl.h | 26 ++++++-------
crypto/bn/bn_lib.c | 22 +++++++++++
crypto/rsa/rsa_eay.c | 8 ++--
6 files changed, 111 insertions(+), 60 deletions(-)
--- a/CHANGES
+++ b/CHANGES
@@ -4,6 +4,16 @@
Changes between 0.9.8i and 0.9.8j [07 Jan 2009]
+ *) Reworked the Fix for the Timing Oracle in RSA Decryption (CVE-2022-4304).
+ The previous fix for this timing side channel turned out to cause
+ a severe 2-3x performance regression in the typical use case
+ compared to 1.1.1s. The new fix uses existing constant time
+ code paths, and restores the previous performance level while
+ fully eliminating all existing timing side channels.
+ The fix was developed by Bernd Edlinger with testing support
+ by Hubert Kario.
+ [Bernd Edlinger]
+
*) Properly check EVP_VerifyFinal() and similar return values
(CVE-2008-5077).
[Ben Laurie, Bodo Moeller, Google Security Team]
--- a/crypto/bn/bn_asm.c
+++ b/crypto/bn/bn_asm.c
@@ -392,24 +392,40 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const
for (;;)
{
- t1=a[0]; t2=b[0];
- r[0]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
+ t1=a[0];
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[0];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[0] = t1;
+ c += (t1 > t2);
if (--n <= 0) break;
- t1=a[1]; t2=b[1];
- r[1]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
+ t1=a[1];
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[1];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[1] = t1;
+ c += (t1 > t2);
if (--n <= 0) break;
- t1=a[2]; t2=b[2];
- r[2]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
+ t1=a[2];
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[2];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[2] = t1;
+ c += (t1 > t2);
if (--n <= 0) break;
- t1=a[3]; t2=b[3];
- r[3]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
+ t1=a[3];
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[3];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[3] = t1;
+ c += (t1 > t2);
if (--n <= 0) break;
a+=4;
@@ -440,25 +456,25 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const
t=(BN_ULLONG)a*b; \
t1=(BN_ULONG)Lw(t); \
t2=(BN_ULONG)Hw(t); \
- c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
- c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+ c0=(c0+t1)&BN_MASK2; t2 += ((c0) < t1); \
+ c1=(c1+t2)&BN_MASK2; c2 += ((c1) < t2);
#define mul_add_c2(a,b,c0,c1,c2) \
t=(BN_ULLONG)a*b; \
tt=(t+t)&BN_MASK; \
- if (tt < t) c2++; \
+ c2 += (tt < t); \
t1=(BN_ULONG)Lw(tt); \
t2=(BN_ULONG)Hw(tt); \
c0=(c0+t1)&BN_MASK2; \
- if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
- c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+ c2 += ((c0 < t1) && (((++t2)&BN_MASK2) == 0)); \
+ c1=(c1+t2)&BN_MASK2; c2 += ((c1) < t2);
#define sqr_add_c(a,i,c0,c1,c2) \
t=(BN_ULLONG)a[i]*a[i]; \
t1=(BN_ULONG)Lw(t); \
t2=(BN_ULONG)Hw(t); \
- c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
- c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+ c0=(c0+t1)&BN_MASK2; t2 += ((c0) < t1); \
+ c1=(c1+t2)&BN_MASK2; c2 += ((c1) < t2);
#define sqr_add_c2(a,i,j,c0,c1,c2) \
mul_add_c2((a)[i],(a)[j],c0,c1,c2)
@@ -468,24 +484,24 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const
#define mul_add_c(a,b,c0,c1,c2) { \
BN_ULONG ta=(a),tb=(b); \
BN_UMULT_LOHI(t1,t2,ta,tb); \
- c0 += t1; t2 += (c0<t1)?1:0; \
- c1 += t2; c2 += (c1<t2)?1:0; \
+ c0 += t1; t2 += (c0<t1); \
+ c1 += t2; c2 += (c1<t2); \
}
#define mul_add_c2(a,b,c0,c1,c2) { \
BN_ULONG ta=(a),tb=(b),t0; \
BN_UMULT_LOHI(t0,t1,ta,tb); \
- c0 += t0; t2 = t1+((c0<t0)?1:0);\
- c1 += t2; c2 += (c1<t2)?1:0; \
- c0 += t0; t1 += (c0<t0)?1:0; \
- c1 += t1; c2 += (c1<t1)?1:0; \
+ c0 += t0; t2 = t1+((c0<t0));\
+ c1 += t2; c2 += (c1<t2); \
+ c0 += t0; t1 += (c0<t0); \
+ c1 += t1; c2 += (c1<t1); \
}
#define sqr_add_c(a,i,c0,c1,c2) { \
BN_ULONG ta=(a)[i]; \
BN_UMULT_LOHI(t1,t2,ta,ta); \
- c0 += t1; t2 += (c0<t1)?1:0; \
- c1 += t2; c2 += (c1<t2)?1:0; \
+ c0 += t1; t2 += (c0<t1); \
+ c1 += t2; c2 += (c1<t2); \
}
#define sqr_add_c2(a,i,j,c0,c1,c2) \
@@ -497,26 +513,26 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const
BN_ULONG ta=(a),tb=(b); \
t1 = ta * tb; \
t2 = BN_UMULT_HIGH(ta,tb); \
- c0 += t1; t2 += (c0<t1)?1:0; \
- c1 += t2; c2 += (c1<t2)?1:0; \
+ c0 += t1; t2 += (c0<t1); \
+ c1 += t2; c2 += (c1<t2); \
}
#define mul_add_c2(a,b,c0,c1,c2) { \
BN_ULONG ta=(a),tb=(b),t0; \
t1 = BN_UMULT_HIGH(ta,tb); \
t0 = ta * tb; \
- c0 += t0; t2 = t1+((c0<t0)?1:0);\
- c1 += t2; c2 += (c1<t2)?1:0; \
- c0 += t0; t1 += (c0<t0)?1:0; \
- c1 += t1; c2 += (c1<t1)?1:0; \
+ c0 += t0; t2 = t1+((c0<t0));\
+ c1 += t2; c2 += (c1<t2); \
+ c0 += t0; t1 += (c0<t0); \
+ c1 += t1; c2 += (c1<t1); \
}
#define sqr_add_c(a,i,c0,c1,c2) { \
BN_ULONG ta=(a)[i]; \
t1 = ta * ta; \
t2 = BN_UMULT_HIGH(ta,ta); \
- c0 += t1; t2 += (c0<t1)?1:0; \
- c1 += t2; c2 += (c1<t2)?1:0; \
+ c0 += t1; t2 += (c0<t1); \
+ c1 += t2; c2 += (c1<t2); \
}
#define sqr_add_c2(a,i,j,c0,c1,c2) \
@@ -527,25 +543,25 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const
t1=LBITS(a); t2=HBITS(a); \
bl=LBITS(b); bh=HBITS(b); \
mul64(t1,t2,bl,bh); \
- c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
- c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+ c0=(c0+t1)&BN_MASK2; t2 += ((c0) < t1); \
+ c1=(c1+t2)&BN_MASK2; c2 += ((c1) < t2);
#define mul_add_c2(a,b,c0,c1,c2) \
t1=LBITS(a); t2=HBITS(a); \
bl=LBITS(b); bh=HBITS(b); \
mul64(t1,t2,bl,bh); \
- if (t2 & BN_TBIT) c2++; \
+ c2 += !!(t2 & BN_TBIT); \
t2=(t2+t2)&BN_MASK2; \
- if (t1 & BN_TBIT) t2++; \
+ t2 += !!(t1 & BN_TBIT); \
t1=(t1+t1)&BN_MASK2; \
c0=(c0+t1)&BN_MASK2; \
- if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
- c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+ c2 += ((c0 < t1) && (((++t2)&BN_MASK2) == 0)); \
+ c1=(c1+t2)&BN_MASK2; c2 += ((c1) < t2);
#define sqr_add_c(a,i,c0,c1,c2) \
sqr64(t1,t2,(a)[i]); \
- c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
- c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
+ c0=(c0+t1)&BN_MASK2; t2 += ((c0) < t1); \
+ c1=(c1+t2)&BN_MASK2; c2 += ((c1) < t2);
#define sqr_add_c2(a,i,j,c0,c1,c2) \
mul_add_c2((a)[i],(a)[j],c0,c1,c2)
--- a/crypto/bn/bn_blind.c
+++ b/crypto/bn/bn_blind.c
@@ -270,7 +270,8 @@ int BN_BLINDING_invert_ex(BIGNUM *n, con
n->top = (int)(rtop & ~mask) | (ntop & mask);
n->flags |= (BN_FLG_FIXED_TOP & ~mask);
}
- ret = BN_mod_mul_montgomery(n, n, r, b->m_ctx, ctx);
+ ret = bn_mul_mont_fixed_top(n, n, r, b->m_ctx, ctx);
+ bn_correct_top_consttime(n);
} else {
ret = BN_mod_mul(n, n, r, b->mod, ctx);
}
--- a/crypto/bn/bn_lcl.h
+++ b/crypto/bn/bn_lcl.h
@@ -328,10 +328,10 @@ extern "C" {
ret = (r); \
BN_UMULT_LOHI(low,high,w,tmp); \
ret += (c); \
- (c) = (ret<(c))?1:0; \
+ (c) = (ret<(c)); \
(c) += high; \
ret += low; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -340,7 +340,7 @@ extern "C" {
BN_UMULT_LOHI(low,high,w,ta); \
ret = low + (c); \
(c) = high; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -356,10 +356,10 @@ extern "C" {
high= BN_UMULT_HIGH(w,tmp); \
ret += (c); \
low = (w) * tmp; \
- (c) = (ret<(c))?1:0; \
+ (c) = (ret<(c)); \
(c) += high; \
ret += low; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -369,7 +369,7 @@ extern "C" {
high= BN_UMULT_HIGH(w,ta); \
ret = low + (c); \
(c) = high; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -402,10 +402,10 @@ extern "C" {
lt=(bl)*(lt); \
m1=(bl)*(ht); \
ht =(bh)*(ht); \
- m=(m+m1)&BN_MASK2; if (m < m1) ht+=L2HBITS((BN_ULONG)1); \
+ m=(m+m1)&BN_MASK2; ht += L2HBITS((BN_ULONG)(m < m1)); \
ht+=HBITS(m); \
m1=L2HBITS(m); \
- lt=(lt+m1)&BN_MASK2; if (lt < m1) ht++; \
+ lt=(lt+m1)&BN_MASK2; ht += (lt < m1); \
(l)=lt; \
(h)=ht; \
}
@@ -422,7 +422,7 @@ extern "C" {
h*=h; \
h+=(m&BN_MASK2h1)>>(BN_BITS4-1); \
m =(m&BN_MASK2l)<<(BN_BITS4+1); \
- l=(l+m)&BN_MASK2; if (l < m) h++; \
+ l=(l+m)&BN_MASK2; h += (l < m); \
(lo)=l; \
(ho)=h; \
}
@@ -436,9 +436,9 @@ extern "C" {
mul64(l,h,(bl),(bh)); \
\
/* non-multiply part */ \
- l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
+ l=(l+(c))&BN_MASK2; h += (l < (c)); \
(c)=(r); \
- l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
+ l=(l+(c))&BN_MASK2; h += (l < (c)); \
(c)=h&BN_MASK2; \
(r)=l; \
}
@@ -452,7 +452,7 @@ extern "C" {
mul64(l,h,(bl),(bh)); \
\
/* non-multiply part */ \
- l+=(c); if ((l&BN_MASK2) < (c)) h++; \
+ l+=(c); h += ((l&BN_MASK2) < (c)); \
(c)=h&BN_MASK2; \
(r)=l&BN_MASK2; \
}
@@ -482,7 +482,7 @@ BN_ULONG bn_add_part_words(BN_ULONG *r,
BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int cl, int dl);
int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
-
+void bn_correct_top_consttime(BIGNUM *a);
#ifdef __cplusplus
}
#endif
--- a/crypto/bn/bn_lib.c
+++ b/crypto/bn/bn_lib.c
@@ -66,6 +66,7 @@
#include <stdio.h>
#include "cryptlib.h"
#include "bn_lcl.h"
+#include "constant_time_locl.h"
const char BN_version[]="Big Number" OPENSSL_VERSION_PTEXT;
@@ -957,3 +958,24 @@ void BN_consttime_swap(BN_ULONG conditio
#undef BN_CONSTTIME_SWAP
}
+void bn_correct_top_consttime(BIGNUM *a)
+{
+ int j, atop;
+ BN_ULONG limb;
+ unsigned int mask;
+
+ for (j = 0, atop = 0; j < a->dmax; j++) {
+ limb = a->d[j];
+ limb |= 0 - limb;
+ limb >>= BN_BITS2 - 1;
+ limb = 0 - limb;
+ mask = (unsigned int)limb;
+ mask &= constant_time_msb(j - a->top);
+ atop = constant_time_select_int(mask, j + 1, atop);
+ }
+
+ mask = constant_time_eq_int(atop, 0);
+ a->top = atop;
+ a->neg = constant_time_select_int(mask, 0, a->neg);
+ a->flags &= ~BN_FLG_FIXED_TOP;
+}
--- a/crypto/rsa/rsa_eay.c
+++ b/crypto/rsa/rsa_eay.c
@@ -328,6 +328,7 @@ static int rsa_blinding_convert(BN_BLIND
static int rsa_blinding_invert(BN_BLINDING *b, int local, BIGNUM *f,
BIGNUM *r, BN_CTX *ctx)
{
+ BN_set_flags(f, BN_FLG_CONSTTIME);
if (local)
return BN_BLINDING_invert_ex(f, NULL, b, ctx);
else
@@ -510,6 +511,10 @@ static int RSA_eay_private_decrypt(int f
goto err;
}
+ if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
+ if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, CRYPTO_LOCK_RSA, rsa->n, ctx))
+ goto err;
+
if (!(rsa->flags & RSA_FLAG_NO_BLINDING))
{
blinding = rsa_get_blinding(rsa, &local_blinding, ctx);
@@ -547,9 +552,6 @@ static int RSA_eay_private_decrypt(int f
else
d = rsa->d;
- if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
- if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, CRYPTO_LOCK_RSA, rsa->n, ctx))
- goto err;
if (!rsa->meth->bn_mod_exp(ret,f,d,rsa->n,ctx,
rsa->_method_mod_n))
goto err;