1
0
forked from cheng/wallet
wallet/src/ristretto255.h

862 lines
34 KiB
C
Raw Normal View History

#pragma once
// libsodium is typeless, which I find confusing. Everything is unsigned
// bytes pointed at by naked pointers. It is well written assembly language
// written in C It is not even C written in C, let alone C++ written in C.
//
// NaCl provides a high level C++ interface to the low level C libsodium
// interface, but I just don't like the people running NaCl and suspect them
// of being in bed with my enemies, whereas I do like the people running
// LibSodium.
//
// It is nonetheless probably stupid to write my own high level interface to
// LibSodium, when one has already been written, but I am going to do so
// anyway, because elliptic curves are just way cool, and because ...
// because ... I am having trouble thinking up a good excuse ... ah yes,
// because when I read the NaCl documentation it says "X has not been
// implemented yet" when X is something that is pretty easy to implement
// if one has direct access to elliptic curve objects, and our Merkle patricia
// blockchain is going to be a great big pile of elliptic curve objects.
// NaCl suffers from the chronic disease both of open source projects,
// and also of high level interfaces to low level data structures, of getting
// fine tuned to the implementer's pet projects, which fine tuning is apt to
// get in the way of someone else's pet project.
//
// LibSodium, being assembly language written in C, rather than C written
// in C, or C++ written in C, has the problems:
// If you mix up the pointer to one kind of object with the pointer to
// another kind of object, you are sol
// If you mix up a pointer to a thirty two byte object with a pointer to a
// sixtyfour byte object, you are really sol
// if you reference something that has been de-allocated, you are sol.
// If you reference something that has not been properly initialized,
// you are sol.
// It is all raw memory, and structure exists only in the head of the
// programmer. The compiler knows nothing of these structures.
//
// That said, there is an immense amount of cryptographic knowledge
// encapsulated in this library, and I need to lift that knowledge into the
// language of C++20, from C that is almost assembly.
// There is a huge amount of knowledge embedded, and translating it
// from what is almost a machine language representation to a C++20
// representation involves a big pile of stuff.
// We are going to need to lift
// https://paragonie.com/blog/2017/06/libsodium-quick-reference-quick-comparison-similar-functions-and-which-one-use or their ristretto equivalents to C++
// Starting with ristretto points and scalars, but they are useless without a
// pile of other things, many of those other things being in
// https://download.libsodium.org/doc/helpers/
// I went there to find out what the hell "sodium_is_zero" means, but
// found a pile of other things that I am going to need, and got distracted
// by no end of odds and ends that I am going to need to be to lift to
// C++20 in order for ristretto points and scalars to be put to any use.
void randombytes_buf(std::span<byte> in);
void randombytes_buf(std::span<char > in);
namespace ro {
// Decay to pointer is dangerously convenient,
// but in some situations it is just convenient
// This class provides an std:array one larger
// than the compile time string size, which decays
// to char*, std::string, and wxString
// In some code, this is ambiguous, so casts
// must sometimes be explicitly invoked.
template <unsigned int stringlen>
class CompileSizedString : public std::array<char, stringlen + 1>{
public:
static constexpr int length{ stringlen };
CompileSizedString() {
*(this->rbegin()) = '0';
}
CompileSizedString(char *psz) {
auto tsz{ this->rbegin() };
*tsz = '0';
if (psz != nullptr) {
auto usz = tsz + strlen;
while (tsz < usz && *psz != '\0')
*tsz++ = *psz++;
*tsz = '\0';
}
}
operator char* () & {
char* pc = &(static_cast<std::array<char, stringlen + 1>*>(this)->operator[](0));
return pc;
}
operator const char* () const& {
const char* pc = &(static_cast<const std::array<char, stringlen + 1>*>(this)->operator[](0));
return pc;
}
operator const char* () const&& {
const char* pc = &(static_cast<const std::array<char, stringlen + 1>*>(this)->operator[](0));
return pc;
}
operator std::string() const& {
return std::string((const char*)*this, this->length);
}
operator std::string() const&& {
return std::string((const char*)*this, this->length);
}
operator wxString() const& {
return wxString::FromUTF8Unchecked((const char*)(*this));
}
operator std::span<byte>() const& {
return std::span<byte>(static_cast<std::nullptr_t>((char*)*this), stringlen + 1);
}
operator wxString() const&& {
return wxString::FromUTF8Unchecked((const char*)(*this));
}
operator std::span<byte>() const&& {
return std::span<byte>(static_cast<std::nullptr_t>((char*)*this), stringlen + 1);
}
};
2022-02-23 19:59:13 -05:00
// This template generates a span over an indexable byte type,
// such as a C array or an std::array, but not pointers
template < typename T>
std::enable_if_t<
2022-02-23 19:59:13 -05:00
!std::is_pointer<T>::value &&
sizeof(std::declval<T>()[0]) == 1,
std::span<const byte>
> serialize(const T& a) {
return std::span<const byte>(static_cast<const byte *>(static_cast<std::nullptr_t>(&a[0])), std::size(a));
}
// Compile time test to see if a type has a blob array member
// This can be used in if constexpr (is_blob_field_type<T>::value)
// By convention, blob fields are an std::array of unsigned bytes
// therefore already serializable.
template <class T> struct is_blob_field_type{
template <typename U> static constexpr decltype(std::declval<U>().blob.size(), bool()) test() {
return sizeof(std::declval<U>().blob[0])==1;
}
template <typename U> static constexpr bool test(int = 0) {
return false;
}
static constexpr bool value = is_blob_field_type::template test<T>();
};
// At present our serial classes consist of std::span<uint8_t> and custom classes that publicly inherit from std::span<byte>
// To handle compound objects, add custom classes inheriting from std::span<byte>[n]
// template class that generates a std::span of bytes over the blob
// field of any object containing a blob record, which is normally sufficient
// for a machine independent representation of that object
template <typename T>
std::enable_if_t<
is_blob_field_type<T>::value,
std::span<const byte>
> serialize(const T& pt) {
return serialize(pt.blob);
}
// method that assumes that any char * pointer points a null terminated string
// and generates a std::span of bytes over the string including the terminating
// null.
// we assume the string is already machine independent, which is to say, we assume
// it comes from a utf8 locale.
inline auto serialize(const char* sp) { return std::span(static_cast<char*>(static_cast<std::nullptr_t>(sp)), strlen(sp) + 1); }
inline auto serialize(const decltype(std::declval<wxString>().ToUTF8()) sz){
return serialize(static_cast<const char*>(sz));
}
/*
inline auto serialize(const wxString& wxstr) {
return serialize(static_cast<const char*>(wxstr.ToUTF8()));
}
If we allowed wxwidgets string to be serializable, all sorts of surprising things
would be serializable in surprising ways, because wxWidgets can convert all
sorts of things into strings that you were likely not expecting, in ways
unlikely to be machine independent, so you if you give an object to be
hashed that you have not provided some correct means for serializing, C++ is
apt to unhelpfully and unexpectedly turn it into a wxString,
If you make wxStrings hashable, suprising things become hashable.
*/
// data structure containing a serialized signed integer.
template<class T, std::enable_if_t<is_standard_unsigned_integer<T>, int> = 0>
class userial : public std::span<byte> {
public:
std::array<byte, (std::numeric_limits<T>::digits + 6) / 7> bblob;
userial(T i) {
byte* p = &bblob[0] + sizeof(bblob);
*(--p) = i & 0x7f;
i >>= 7;
while (i != 0) {
*(--p) = (i & 0x7f) | 0x80;
i >>= 7;
}
assert(p >= &bblob[0]);
*static_cast<std::span<byte>*>(this) = std::span<byte>(p, &bblob[0] + sizeof(bblob));;
}
};
// data structure containing a serialized signed integer.
template<class T, std::enable_if_t<is_standard_signed_integer<T>, int> = 0>
class iserial : public std::span<byte> {
public:
std::array<byte, (std::numeric_limits<T>::digits + 7) / 7> bblob;
iserial(T i) {
// Throw away the repeated leading bits, and g
byte* p = &bblob[0] + sizeof(bblob);
unsigned count;
if (i < 0) {
size_t ui = i;
count = (std::numeric_limits<size_t>::digits - std::countl_one(ui)) / 7;
}
else {
size_t ui = i;
count = (std::numeric_limits<size_t>::digits - std::countl_zero(ui)) / 7;
}
*(--p) = i & 0x7f;
while (count-- != 0) {
i >>= 7;
*(--p) = (i & 0x7f) | 0x80;
}
assert(p >= &bblob[0]);
*static_cast<std::span<byte>*>(this) = std::span<byte>(p, &bblob[0] + sizeof(bblob));;
}
};
// converts machine dependent representation of an integer
// into a span pointin at a compact machine independent representation of an integer
// The integer is split into seven bit nibbles in big endian order, with the high
// order bit of the byte indicating that more bytes are to come.
// for an unsigned integer, all high order bytes of the form 0x80 are left out.
// for a positive signed integer, the same, except that the first byte
// of what is left must have zero at bit 6
// for a negative signed integer, all the 0xFF bytes are left out, except
// that the first byte of what is left must have a one bit at bit six.
//
// small numbers get compressed.
// primarily used by hash and hsh so that the same numbers on different
// machines will generate the same hash
template<typename T> std::enable_if_t<is_standard_unsigned_integer<T>, ro::userial<T> >
serialize(T i) {
return userial<T>(i);
/* we don't need all deserialize functions to have the same name,
indeed they have to be distinct because serialized data contains
no type information, but for the sake of template code we need all
things that may be serialized to be serialized by the serialize
command, so that one template can deal with any
number of serializable types */
}
template<typename T> std::enable_if_t<is_standard_signed_integer<T>, ro::iserial<T> >serialize(T i) {
return iserial<T>(i);
/* we don't need all deserialize functions to have the same name, but for the sake of template code we need all
things that may be serialized to be serialized by the serialize command, so that one template can deal with any
number of serializable types */
}
// Turns a compact machine independent representation of an uninteger
// into a 64 bit signed integer
template<typename T> std::enable_if_t<is_standard_signed_integer<T>, T >
deserialize(const byte* p) {
auto oldp = p;
T i;
if (*p & 0x40)i = -64;
else i = 0;
while (*p & 0x80) {
i = (i | (*p++ & 0x7F)) << 7;
}
if (p - oldp > (std::numeric_limits<int64_t>::digits + 6) / 7)throw BadDataException();
return i | *p;
}
// Turns a compact machine independent representation of an integer
// into a 64 bit unsigned integer
template<typename T> std::enable_if_t<is_standard_unsigned_integer<T>, T >
deserialize(const byte * p) {
auto oldp = p;
T i{ 0 };
while (*p & 0x80) {
i = (i | (*p++ & 0x7F)) << 7;
}
if (p - oldp > 9)throw BadDataException();
return i | *p;
}
// Turns a compact machine independent representation of an integer
// into a 64 bit signed integer
template<typename T> std::enable_if_t<is_standard_signed_integer<T> || is_standard_unsigned_integer<T>, T >
deserialize(std::span<const byte> g) {
byte* p = static_cast<std::nullptr_t>(&g[0]);
T i{ deserialize<T>(p) };
if (p > &g[0]+g.size())throw BadDataException();
return i;
}
/*
It will be about a thousand years before numbers larger than 64 bits
appear in valid well formed input, and bad data structures have to be
dealt with a much higher level that knows what the numbers mean,
and deals with them according to their meaning
Until then the low level code will arbitrarily truncate numbers larger
than sixty four bits, but numbers larger than sixty four bits are
permissible in input, are valid at the lowest level.
We return uint64_t, rather than uint_fast64_t to ensure that all
implementations misinterpret garbage and malicious input in the
same way.
We cannot protect against Machiavelli perverting the input, so we
don't try very hard to prevent Murphy perverting the input,
but we do try to prevent Machiavelli from perverting the input in
ways that will induce peers to disagree.
We use an explicit narrow_cast, rather than simply declaring th
function to be uint64_t, in order to express the intent to uniformly
force possibly garbage data being deserialized to standardized
garbage.
We protect against malicious and ill formed data would cause the
system to go off the rails at a point of the enemy's choosing,
and we protect against malicious and ill formed data that one party
might interpret in one way, and another party might interpret in a
different way.
Ill formed data that just gets converted into well formed, but
nonsense data can cause no harm that well formed nonsense data
could not cause.
It suffices, therefore, to ensure that all implementations misinterpret
input containing unreasonably large numbers as the same number.
Very large numbers are valid in themselves, but not going to be valid
as part of valid data structures for a thousand years or so.
The largest numbers occurring in well formed valid data will be
currency amounts, and the total number of the smallest unit of
currency is fixed at 2^64-1 which will suffice for a thousand years.
Or we might allow arbitrary precision floating point with powers of
a thousand, so that sensible numbers to a human are represented by
sensible numbers in the actuall representation.
secret keys, scalars are actually much larger numbers, modulo
0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ecU
but they are represented in a different format, their binary format
being fixed size low endian format, as 256 bit numbers, though only
253 bits are actually needed and used, and their human readable
format being 44 digits in a base 58 representation.*/
// a compile time test to check if an object class has a machine independent representation
template <typename T, typename... Args> struct is_serializable{
template <typename U, typename... Args2>
static constexpr decltype(ro::serialize(std::declval<U>()), bool()) test() {
if constexpr (sizeof...(Args2) > 0) {
return is_serializable::template test<Args2...>();
}
else {
return true;
}
}
template <typename U, typename... Args2> static constexpr bool test(int = 0) {
return false;
}
static constexpr bool value = is_serializable::template test<T,Args...>();
};
static_assert(ro::is_serializable<std::span<const char>>::value);
static_assert(ro::is_serializable<std::span<const byte>>::value);
template<class T> ro::CompileSizedString< (2 * sizeof(T))>bin2hex(const T& pt) {
ro::CompileSizedString< (2 * sizeof(T))>sz;
sodium_bin2hex(&sz[0], sizeof(pt.blob) * 2 + 1, &pt.blob[0], pt.blob.size());
return sz;
}
template<class T> T hex2bin(const ro::CompileSizedString< (2 * sizeof(T))>& sz){
T pt;
size_t bin_len{ sizeof(T) };
sodium_hex2bin(
reinterpret_cast <unsigned char* const>(&pt),
sizeof(T),
&sz[0], 2 * sizeof(T),
nullptr, &bin_len, nullptr
);
return pt;
}
template <class T>decltype(std::declval<T>().blob, ro::CompileSizedString < (sizeof(T) * 8 + 5) / 6>()) to_base64_string(const T& p_blob) {
ro::CompileSizedString < (sizeof(T) * 8 + 5) / 6> sz;
bits2base64(
&(p_blob.blob[0]), 0, sizeof(p_blob.blob) * 8,
std::span<char>(sz)
);
return sz;
}
} //End ro namespace
namespace ristretto255 {
using ro::to_base64_string, ro::is_serializable;
// a class representing ristretto255 elliptic points
class point;
// a class representing ristretto255 scalars
class scalar;
template<unsigned int>class hsh;
template<unsigned int>class hash;
template <class T>decltype(std::declval<T>().blob, bool()) ConstantTimeEqualityTest(const T& x, const T& y) {
if (T::constant_time_required) {
return 0 == sodium_memcmp(&x.blob[0], &y.blob[0], sizeof(x.blob));
}
else return x == y;
}
template <class T>decltype(std::declval<T>().blob, wxString()) to_wxString(const T& p_blob) {
std::array<char, (sizeof(p_blob.blob) * 8 + 11) / 6> sz;
bits2base64(&(p_blob.blob[0]), 0, sizeof(p_blob.blob) * 8, std::span<char>(sz));
return wxString::FromUTF8Unchecked(&sz[0]);
}
// hsh constructs a partial hash, not yet finalized
// normally never explicitly used in code, but rather a nameless rtype value on the stack.
// To which more stuff can be added with the << operator
// usage:
// hash( hsh(a, b) << c << f << g );
// assert( hash(a, b, c, d, e) == hash(hsh(a, b, c) << d << e) );
// hash finalizes hsh.
// Of course you would never use it that way, because you would only
// use it explicitly if you wanted to keep it around
// attempting hash more things into an hsh object that has already
// been finalized will throw an exception.
// Old type C byte array arguments after the first are vulnerable to
// array decay to pointer, so wrap them in std::span(OldTypeCarray)
// or hash them using "<<" rather than putting them in the initializer.
// It always a wise precaution to not use old type C arays, or wrap them
// in a span.
// Old type zero terminated strings work. The trailing zero is included
// in the hash
template<unsigned int hashsize = 256> class hsh {
public:
static_assert(hashsize > 63 && hashsize % 64 == 0 && crypto_generichash_BYTES_MIN * 8 <= hashsize && hashsize <= crypto_generichash_BYTES_MAX * 8, "Bad hash size.");
static constexpr unsigned int type_indentifier = 2 + (hashsize + 0x90 * 8) / 64;
static_assert(crypto_generichash_BYTES_MAX < 0x90, "Change in max hash has broken our type ids");
crypto_generichash_blake2b_state st;
hsh() {
int i{ crypto_generichash_blake2b_init(
&st,
nullptr,0,
hashsize / 8)
};
assert(i == 0);
}
template<typename T>
typename std::enable_if<
ro::is_serializable<const T>::value,
ristretto255::hsh<hashsize>&
>::type operator << (const T& j) {
int i{ 1 };
if constexpr (std::is_same_v<std::remove_cvref_t<T>, std::span<const byte> >) {
i = crypto_generichash_blake2b_update(
&(this->st),
&j[0],
j.size()
);
}
else if constexpr (std::is_same_v<std::remove_cvref_t<T>, const char*>) {
i = crypto_generichash_blake2b_update(
&(this->st),
(const unsigned char*)(j),
strlen(j) + 1
);
}
else {
static_assert(is_serializable<const T>::value, "don't know a machine and compiler independent representation of this type");
auto sj = ro::serialize(j);
i = crypto_generichash_blake2b_update(
&(this->st),
(const unsigned char*)&sj[0],
sj.size()
);
}
if (i) throw HashReuseException();
return *this;
}
template<typename T, typename... Args,
typename std::enable_if< is_serializable<const T, Args...>::value, int >::type dummy_arg = 0
>explicit hsh(const T first, Args... args) {
int i{ crypto_generichash_blake2b_init(
&st,
nullptr,0,
hashsize / 8)
};
assert(i == 0);
(*this) << first;
if constexpr (sizeof...(args) > 0) {
(*this).hashinto(args...);
}
}
template<typename T, typename... Args,
typename std::enable_if< ro::is_serializable<const T>::value, int >::type dummy_arg = 0
> void hashinto(const T first, Args... args) {
*this << first;
if constexpr (sizeof...(args) > 0) {
(*this).hashinto(args...);
}
}
};
// This constructs a finalized hash.
// If it has one argument, and that argument is hsh (unfinalized hash) object,
// it finalizes the hash. (see hsh)
// Usage
// hash(a, b, c ...);
// hash and hsh serialize all their arguments, converting them into machine
// and compiler independent form. If they don't know how to serialize an
// argument type, you get a compile time error. To serialize a new type,
// create a new overload for the function "serialize"
// to hash a wxString, use its ToUTF8 member
// wxString wxsHello(L"Hello world");
// hash hashHello(wxsHello.ToUTF8());
// C array arguments after the first are vulnerable to array decay to pointer, so use hsh and "<<"
// for them. or wrap them in std::span(OldTypeCarray)
// It always a wise precaution to not use old type C arays, or wrap them
// in a span.
// Old type zero terminated utf8 strings work. The trailing zero is included.
// The program should by running in the UTF8 locale, attempts to set that
// locale on startup. and tests for success in the unit test.
template<unsigned int hashsize = 256> class hash {
static_assert(
hashsize > 63 && hashsize % 64 == 0
&& crypto_generichash_BYTES_MIN * 8 <= hashsize
&& hashsize <= crypto_generichash_BYTES_MAX * 8,
"Bad hash size."
);
friend point;
friend scalar;
friend hsh<hashsize>;
public:
static constexpr unsigned int type_indentifier = 2 + hashsize / 64;
hash() = default;
std::array<uint8_t, hashsize / 8> blob;
~hash() = default;
hash(hash&&) = default; // Move constructor
hash(const hash&) = default; // Copy constructor
hash& operator=(hash&&) = default; // Move assignment.
hash& operator=(const hash&) = default; // Copy assignment.
explicit hash(hsh<hashsize>& in) {
int i = crypto_generichash_blake2b_final(
&in.st,
&blob[0], hashsize / 8);
assert(i == 0);
if (i) throw HashReuseException();
}
static_assert(!ro::is_serializable<hsh<hashsize> >::value, "Don't want to partially hash partial hashes");
template<typename T, typename... Args,
typename std::enable_if< ro::is_serializable<const T, Args...>::value, int >::type dummy_arg = 0
>explicit hash(const T& first, Args... args) {
hsh<hashsize> in;
in << first;
if constexpr (sizeof...(args) > 0) {
in.hashinto(args...);
}
int i = crypto_generichash_blake2b_final(
&in.st,
&blob[0], hashsize / 8);
assert(i == 0);
}
hash& operator=(hsh<hashsize>&& in) {
int i = crypto_generichash_blake2b_final(
&in.st,
&blob[0], hashsize / 8);
if (i) throw HashReuseException();
}
bool operator==(const hash<hashsize>& pt) const& {
return blob == pt.blob; //Do not need constant time equality test on hashes
}
bool operator!=(const hash<hashsize>& pt) const& {
return blob != pt.blob; //Do not need constant time equality test on hashes
}
};
// a class representing ristretto255 elliptic points,
// which are conveniently of prime order.
class point
{
// We will be reading points from the database, as blobs,
// reading them from the network as blobs,
// and reading them from human entered text as base52 encoded blobs.
// Therefore, invalid point initialization data is all too possible.
public:
static constexpr unsigned int type_indentifier = 1;
std::array<uint8_t, crypto_core_ristretto255_BYTES> blob;
static_assert(
std::is_trivially_copyable<std::array<uint8_t, crypto_core_ristretto255_BYTES>>(),
"does not support memcpy init"
);
static_assert(sizeof(blob) == 32,
"watch for size and alignment bugs. "
"Everyone should standarize on 256 bit public keys except for special needs"
);
static point ptZero;
static point ptBase;
explicit point() = default;
// After loading a point as a blog from the network, from the database,
// or from user data typed as text, have to check for validity.
bool valid(void) const { return 0 != crypto_core_ristretto255_is_valid_point(&blob[0]); }
2022-05-23 02:06:01 -04:00
explicit constexpr point(std::array<uint8_t, crypto_core_ristretto255_BYTES>&& in) :
blob{ std::forward<std::array<uint8_t, crypto_core_ristretto255_BYTES>>(in) } { };
static_assert(crypto_core_ristretto255_BYTES == 32, "256 bit points expected");
~point() = default;
point(point&&) = default; // Move constructor
point(const point&) = default; // Copy constructor
point& operator=(point&&) = default; // Move assignment.
point& operator=(const point&) = default; // Copy assignment.
bool operator==(const point& pt) const& {
return ConstantTimeEqualityTest(*this, pt);
}
point operator+(const point &pt) const& {
point me;
auto i{ crypto_core_ristretto255_add(&me.blob[0], &blob[0], &pt.blob[0]) };
assert(i == 0);
if (i != 0)throw NonRandomScalarException();
return me;
}
point operator-(const point& pt) const& {
point me;
auto i{ crypto_core_ristretto255_sub(&me.blob[0], &blob[0], &pt.blob[0]) };
assert(i == 0);
if (i != 0)throw NonRandomScalarException();
return me;
static_assert(sizeof(blob) == 32, "alignment?");
}
point operator*(const scalar&) const &noexcept;
point operator*(int) const& noexcept;
explicit point(const hash<512>& x) noexcept {
static_assert(crypto_core_ristretto255_HASHBYTES * 8 == 512,
"we need 512 bit randoms to ensure our points and scalars are uniformly distributed"
);
// There should be scalar from hash, not point from hash
int i{
crypto_core_ristretto255_from_hash(&blob[0], &(x.blob)[0]) };
assert(i == 0);
}
static point random(void) {
point me;
crypto_core_ristretto255_random(&(me.blob[0]));
return me;
}
bool operator !() const& {
return 0 != sodium_is_zero(&blob[0], sizeof(blob));
}
static bool constant_time_required;
};
class scalar
{
friend point;
public:
static constexpr unsigned int type_indentifier = 2;
std::array<uint8_t, crypto_core_ristretto255_SCALARBYTES> blob;
static_assert(sizeof(blob) == 32, "watch for size and alignment bugs. Everyone should standarize on 256 bit secret keys except for special needs");
explicit scalar() = default;
~scalar() = default;
explicit constexpr scalar(std::array<uint8_t, crypto_core_ristretto255_BYTES>&& in) : blob{ in } {};
explicit constexpr scalar(std::array<uint8_t, crypto_core_ristretto255_BYTES>* in) :blob(*in) {};
explicit constexpr scalar(uintmax_t k){ for (auto& j : blob) { j = k; k = k >> 8; } }
template <class T>
explicit constexpr scalar(std::enable_if_t < ro::is_standard_unsigned_integer<T>, T> i) :scalar(uintmax_t(i)) {}
template <class T, class U = std::enable_if_t<ro::is_standard_signed_integer<T>, uintmax_t>>
explicit constexpr scalar(T i) : scalar(U(ro::iabs<T>(i))) {
static_assert (ro::is_standard_signed_integer<T>);
if (i < 0) crypto_core_ristretto255_scalar_negate(&blob[0], &blob[0]);
}
scalar(scalar&&) = default; // Move constructor
scalar(const scalar&) = default; // Copy constructor
scalar& operator=(scalar&&) = default; // Move assignment.
scalar& operator=(const scalar&) = default; // Copy assignment.
/* Don't need constant time equality test
bool operator==(const scalar& sc) const& {
return ConstantTimeEqualityTest(*this, sc);
}*/
std::strong_ordering operator <=>(const scalar& sc) const& noexcept;
// strangely, contrary to documentation, compiler generates operator>
// but fails to generate operator==
bool operator==(const scalar& sc) const& = default;
/* {
return (*this <=> sc) == 0;
}*/
//bool operator!=(const scalar& sc) const& {
// return !ConstantTimeEqualityTest(*this, sc);
// }
scalar operator+(const scalar sclr) const& {
scalar me;
crypto_core_ristretto255_scalar_add(&me.blob[0], &blob[0], &sclr.blob[0]);
return me;
}
static_assert(sizeof(scalar::blob) == 32, "compiled");
scalar multiplicative_inverse() const &{
scalar me;
auto i = crypto_core_ristretto255_scalar_invert(&me.blob[0], &blob[0]);
assert(i == 0);
if (i != 0)throw NonRandomScalarException();
return me;
}
scalar operator-(const scalar& sclr) const& {
scalar me;
crypto_core_ristretto255_scalar_sub(&me.blob[0], &blob[0], &sclr.blob[0]);
return me;
}
scalar operator*(const scalar& sclr) const& {
scalar me;
crypto_core_ristretto255_scalar_mul(&me.blob[0], &blob[0], &sclr.blob[0]);
return me;
}
scalar operator/(const scalar sclr) const& {
return operator*(sclr.multiplicative_inverse());
}
scalar operator*(int64_t i) const& {
return operator * (scalar(i));
}
point operator*(const point& pt) const& {
point me;
auto i{ crypto_scalarmult_ristretto255(&me.blob[0], &blob[0], &pt.blob[0]) };
assert(i == 0);
if (i != 0)throw NonRandomScalarException();
return me;
}
point timesBase() const& {
point me;
auto i{ crypto_scalarmult_ristretto255_base(&me.blob[0], &blob[0]) };
assert(i == 0);
if (i != 0)throw NonRandomScalarException();
return me;
}
explicit scalar(const hash<512>& x) {
static_assert(crypto_core_ristretto255_HASHBYTES == 64, "inconsistent use of magic numbers");
crypto_core_ristretto255_scalar_reduce(&blob[0], &(x.blob)[0]);
}
static scalar random(void) {
scalar me;
crypto_core_ristretto255_scalar_random(&me.blob[0]);
return me;
}
bool valid(void) const& {
int x = sodium_is_zero(&blob[0], crypto_core_ed25519_SCALARBYTES);
return x==0 && *this<scOrder;
// The libsodium library allows unreduced scalars, but I do not
// except, of course, for scOrder itself
}
// We don't need constant time equality test on scalars
// since they normally appear in signatures, or are
// checked against a hash or a point
/*bool operator !() const& {
return 0 != sodium_is_zero(&blob[0], sizeof(blob));
}*/
/* explicit operator wxString() const&;
explicit operator std::string() const&;*/
static bool constant_time_required;
static const scalar& scOrder;
private:
void reverse(std::array < uint8_t, crypto_core_ristretto255_SCALARBYTES>const& ac) {
auto p = blob.rbegin();
for (auto x : ac) {
*p++ = x;
}
}
};
static_assert(ro::is_blob_field_type<scalar>::value);
static_assert(ro::is_blob_field_type<scalar&>::value);
static_assert(ro::is_blob_field_type<point>::value);
static_assert(ro::is_blob_field_type<hash<256> >::value);
static_assert(false == ro::is_blob_field_type<char*>::value);
static_assert(ro::is_serializable<scalar&>::value);
static_assert(ro::is_serializable<hash<512>&>::value);
static_assert(ro::is_blob_field_type<int>::value == false);
static_assert(ro::is_serializable<unsigned int>::value);
static_assert(ro::is_serializable<char*>::value);
static_assert(ro::is_serializable<uint8_t*>::value == false); //false because uint8_t * has no inband terminator
static_assert(false == ro::is_serializable<wxString>::value && !ro::is_constructible_from_v<hash<256>, wxString>, "wxStrings are apt to convert anything to anything, with surprising and unexpected results");
static_assert(ro::is_serializable<decltype(std::declval<wxString>().ToUTF8())>::value == true);
static_assert(ro::is_constructible_from_all_of<scalar, int, hash<512>, std::array<uint8_t, crypto_core_ristretto255_BYTES>>);
static_assert(ro::is_constructible_from_all_of<hash<256>, char*, short, unsigned short, hash<512>, point, scalar>, "want to be able to hash anything serializable");
static_assert(false == ro::is_constructible_from_any_of<int, scalar, point, hsh<512>, hash<256>>);
static_assert(false == ro::is_constructible_from_any_of <scalar, wxString, hash<256>, byte*>, "do not want indiscriminate casts");
static_assert(false == ro::is_constructible_from_any_of <point, wxString, hash<256>, byte*>, "do not want indiscriminate casts ");
static_assert(false == ro::is_constructible_from_v<hash<256>, float>);
static_assert(ro::is_serializable<float>::value == false);//Need to convert floats to
// their machine independent representation, possibly through idexp, frexp
// and DBL_MANT_DIG
static_assert(sizeof(decltype(ro::serialize(std::declval<scalar>())[0])) == 1);
static_assert (std::is_standard_layout<scalar>(), "scalar for some reason is not standard layout");
static_assert (std::is_trivial<scalar>(), "scalar for some reason is not trivial");
static_assert(sizeof(point) == 32, "funny alignment");
static_assert(sizeof(scalar) == 32, "funny alignment");
class signed_text {
public:
std::span<char> txt;
scalar c;
scalar s;
point K;
signed_text(
const scalar&, // Signer's secret key
const point&, // Signer's public key
std::span<char> // Text to be signed.
);
bool verify();
};
class CMasterSecret :public scalar {
public:
CMasterSecret() = default;
CMasterSecret(const scalar& pt) :scalar(pt) {}
CMasterSecret(const scalar&& pt) :scalar(pt) {}
CMasterSecret(CMasterSecret&&) = default; // Move constructor
CMasterSecret(const CMasterSecret&) = default; // Copy constructor
CMasterSecret& operator=(CMasterSecret&&) = default; // Move assignment.
CMasterSecret& operator=(const CMasterSecret&) = default; // Copy assignment.
template<class T> auto operator()(T psz) {
scalar& t(*this);
return scalar(hash<512>(t, psz));
}
};
} //End ristretto255 namespace
// Ristretto255 scalars are defined to be little endian on all hardware
// regardless of the endianess of the underlying hardware.
// though it is entirely possible that sometime in the future, this
// definition will be changed should big endian hardware ever be
// sufficiently popular for anyone to care.
// So, because scalars are in fact integers, displaying them as
// biendian on all hardware when displayed in hex
// or base64. Everything else gets displayed in memory order.
template<> ristretto255::scalar ro::hex2bin <ristretto255::scalar >(const ro::CompileSizedString< (2 * sizeof(ristretto255::scalar))>&);
template<> ro::CompileSizedString< (2 * sizeof(ristretto255::scalar)) > ro::bin2hex<ristretto255::scalar>(const ristretto255::scalar&);
template<> ro::CompileSizedString< (8 * sizeof(ristretto255::scalar) + 5) / 6> ro::to_base64_string <ristretto255::scalar>(const ristretto255::scalar&);