forked from cheng/wallet
cleanup, and just do not like pdfs
Also, needed to understand Byzantine fault tolerant paxos better. Still do not.
This commit is contained in:
parent
e49662106b
commit
5238cda077
37
.gitconfig
37
.gitconfig
@ -10,23 +10,24 @@
|
||||
graph = log --max-count=18 --graph --pretty=format:'%C(auto)%h %s %Cgreen(%cr) %C(bold blue)%cn %G?%Creset' --abbrev-commit
|
||||
alias = ! git config --get-regexp ^alias\\. | sed -e s/^alias\\.// -e s/\\ /\\ =\\ / | grep -v ^'alias ' | sort
|
||||
fixws = !"\
|
||||
if (! git diff-files --quiet .) && \
|
||||
(! git diff-index --quiet --cached HEAD) ; then \
|
||||
git commit -m FIXWS_SAVE_INDEX && \
|
||||
git add -u :/ && \
|
||||
git commit -m Fix_whitespace && \
|
||||
git rebase --whitespace=fix HEAD~2 && \
|
||||
git reset HEAD~ && \
|
||||
git reset --soft HEAD~ ; \
|
||||
elif (! git diff-files --quiet .) ; then \
|
||||
git add -u :/ && \
|
||||
git commit -m Fix_whitespace && \
|
||||
git rebase --whitespace=fix HEAD~ && \
|
||||
git reset HEAD~ ; \
|
||||
elif (! git diff-index --quiet --cached HEAD) ; then \
|
||||
git commit -m FIXWS_SAVE_INDEX && \
|
||||
git rebase --whitespace=fix HEAD~ && \
|
||||
git reset --soft HEAD~ ; \
|
||||
fi"
|
||||
if (! git diff-files --quiet .) && \
|
||||
(! git diff-index --quiet --cached HEAD) ; then \
|
||||
git commit -m FIXWS_SAVE_INDEX && \
|
||||
git add -u :/ && \
|
||||
git commit -m Fix_whitespace && \
|
||||
git rebase --whitespace=fix HEAD~2 && \
|
||||
git reset HEAD~ && \
|
||||
git reset --soft HEAD~ ; \
|
||||
elif (! git diff-files --quiet .) ; then \
|
||||
git add -u :/ && \
|
||||
git commit -m Fix_whitespace && \
|
||||
git rebase --whitespace=fix HEAD~ && \
|
||||
git reset HEAD~ ; \
|
||||
elif (! git diff-index --quiet --cached HEAD) ; then \
|
||||
git commit -m FIXWS_SAVE_INDEX && \
|
||||
git rebase --whitespace=fix HEAD~ && \
|
||||
git reset --soft HEAD~ ; \
|
||||
fi"
|
||||
check-whitespace = !"git diff --check $(git hash-object -t tree /dev/null) HEAD"
|
||||
[commit]
|
||||
gpgSign = true
|
||||
|
1
ILog.cpp
1
ILog.cpp
@ -38,4 +38,3 @@ void queue_fatal_error(const char* psz) {
|
||||
queue_error_message(psz);
|
||||
singletonFrame->Close();
|
||||
}
|
||||
|
||||
|
@ -93,4 +93,3 @@ void sqlite3_init();
|
||||
extern "C" {
|
||||
int sqlite3_shutdown(void);
|
||||
}
|
||||
|
||||
|
54
LICENSE.html
54
LICENSE.html
@ -6,38 +6,38 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
|
||||
<title>LICENSE</title>
|
||||
<style>
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
ul.task-list{list-style: none;}
|
||||
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
ul.task-list{list-style: none;}
|
||||
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
|
||||
</style>
|
||||
<link rel="stylesheet" href="docs/pandoc_templates//style.css" />
|
||||
<!--[if lt IE 9]>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<![endif]-->
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border: 1px solid #999;
|
||||
padding: 0.5rem;
|
||||
text-align: left;
|
||||
}
|
||||
h1.title{
|
||||
text-align: center; font-size: xxx-large;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border: 1px solid #999;
|
||||
padding: 0.5rem;
|
||||
text-align: left;
|
||||
}
|
||||
h1.title{
|
||||
text-align: center; font-size: xxx-large;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
|
||||
</head>
|
||||
<body>
|
||||
|
@ -1,5 +1,4 @@
|
||||
---
|
||||
generator:
|
||||
title: LICENSE
|
||||
---
|
||||
Copyright © 2021 reaction.la gpg key 154588427F2709CD9D7146B01C99BB982002C39F
|
||||
|
54
NOTICE.html
54
NOTICE.html
@ -6,38 +6,38 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
|
||||
<title>NOTICE</title>
|
||||
<style>
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
ul.task-list{list-style: none;}
|
||||
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
ul.task-list{list-style: none;}
|
||||
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
|
||||
</style>
|
||||
<link rel="stylesheet" href="docs/pandoc_templates//style.css" />
|
||||
<!--[if lt IE 9]>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<![endif]-->
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border: 1px solid #999;
|
||||
padding: 0.5rem;
|
||||
text-align: left;
|
||||
}
|
||||
h1.title{
|
||||
text-align: center; font-size: xxx-large;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border: 1px solid #999;
|
||||
padding: 0.5rem;
|
||||
text-align: left;
|
||||
}
|
||||
h1.title{
|
||||
text-align: center; font-size: xxx-large;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
|
||||
</head>
|
||||
<body>
|
||||
|
23
README.html
23
README.html
@ -101,7 +101,6 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<header id="title-block-header">
|
||||
@ -121,20 +120,7 @@ build the program and run unit test for the first time, launch the Visual
|
||||
Studio X64 native tools command prompt in the cloned directory, then:</p>
|
||||
<pre class="bat"><code>winConfigure.bat</code></pre>
|
||||
<p>winConfigure.bat also configures the repository you just created to use
|
||||
<<<<<<< HEAD
|
||||
<code>.gitconfig</code> in the repository, causing git to rquire to implement gpg signed
|
||||
commits – because cryptographic software is under attack from NSA,
|
||||
entryists, and shills, who seek to introduce backdoors.</p>
|
||||
<p>This may be inconvenient if you do not have gpg installed and set up.</p>
|
||||
<p>It adds several git aliases:</p>
|
||||
<ol type="1">
|
||||
<li><code>git lg</code> to display the gpg trust information for the las three commits.
|
||||
For this to be useful you need to import the repository public key
|
||||
public_key.gpg` into gpg, and locally sign that key.</li>
|
||||
<li><code>git fixws</code> to standardise white space to the project standards</li>
|
||||
<li><code>git graph</code> to graph the commit tree, and git alias to display the git aliases.</li>
|
||||
=======
|
||||
<code>.gitconfig</code> in the repository, causing git to require to implement GPG signed
|
||||
<code>.gitconfig</code> in the repository, causing git to to implement GPG signed
|
||||
commits – because <a href="./docs/contributor_code_of_conduct.html#code-will-be-cryptographically-signed" target="_blank" title="Contributor Code of Conduct">cryptographic software is under attack</a> from NSA
|
||||
entryists, and shills, who seek to introduce backdoors.</p>
|
||||
<p>This may be inconvenient if you do not have <code>gpg</code> installed and set up.</p>
|
||||
@ -146,13 +132,12 @@ For this to be useful you need to import the repository public key
|
||||
<li><code>git fixws</code> to standardise white space to the project standards</li>
|
||||
<li><code>git graph</code> to graph the commit tree</li>
|
||||
<li><code>git alias</code> to display the git aliases.</li>
|
||||
>>>>>>> origin/master
|
||||
</ol>
|
||||
<div class="sourceCode" id="cb3"><pre class="sourceCode bash"><code class="sourceCode bash"><span id="cb3-1"><a href="#cb3-1" aria-hidden="true" tabindex="-1"></a><span class="co"># To verify that the signature on future pulls is unchanged. </span></span>
|
||||
<div class="sourceCode" id="cb3"><pre class="sourceCode bash"><code class="sourceCode bash"><span id="cb3-1"><a href="#cb3-1" aria-hidden="true" tabindex="-1"></a><span class="co"># To verify that the signature on future pulls is unchanged.</span></span>
|
||||
<span id="cb3-2"><a href="#cb3-2" aria-hidden="true" tabindex="-1"></a><span class="ex">gpg</span> <span class="at">--import</span> public_key.gpg</span>
|
||||
<span id="cb3-3"><a href="#cb3-3" aria-hidden="true" tabindex="-1"></a><span class="ex">gpg</span> <span class="at">--lsign</span> 096EAE16FB8D62E75D243199BC4482E49673711C</span>
|
||||
<span id="cb3-4"><a href="#cb3-4" aria-hidden="true" tabindex="-1"></a><span class="co"># We ignore the Gpg Web of Trust model and instead use</span></span>
|
||||
<span id="cb3-5"><a href="#cb3-5" aria-hidden="true" tabindex="-1"></a><span class="co"># the Zooko identity model. </span></span>
|
||||
<span id="cb3-5"><a href="#cb3-5" aria-hidden="true" tabindex="-1"></a><span class="co"># the Zooko identity model.</span></span>
|
||||
<span id="cb3-6"><a href="#cb3-6" aria-hidden="true" tabindex="-1"></a><span class="co"># We use Gpg signatures to verify that remote repository</span></span>
|
||||
<span id="cb3-7"><a href="#cb3-7" aria-hidden="true" tabindex="-1"></a><span class="co"># code is coming from an unchanging entity, not for</span></span>
|
||||
<span id="cb3-8"><a href="#cb3-8" aria-hidden="true" tabindex="-1"></a><span class="co"># Gpg Web of Trust. Web of Trust is too complicated</span></span>
|
||||
@ -165,7 +150,7 @@ For this to be useful you need to import the repository public key
|
||||
<span id="cb3-15"><a href="#cb3-15" aria-hidden="true" tabindex="-1"></a><span class="co"># or the email of someone whom you do not like.</span></span></code></pre></div>
|
||||
<p>To build the documentation in its intended html form from the markdown
|
||||
files, execute the bash script file <code>docs/mkdocs.sh</code>, in an environment where
|
||||
<code>pandoc</code> is available. On Windows, if Git Bash and Pandoc has bee
|
||||
<code>pandoc</code> is available. On Windows, if Git Bash and Pandoc has been
|
||||
installed, you should be able to run a shell file in bash by double clicking on it.</p>
|
||||
<p><a href="./RELEASE_NOTES.html">Pre alpha release</a>, which means it does not yet work even well enough for
|
||||
it to be apparent what it would do if it did work.</p>
|
||||
|
@ -6,38 +6,38 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
|
||||
<title>Release Notes</title>
|
||||
<style>
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
ul.task-list{list-style: none;}
|
||||
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
ul.task-list{list-style: none;}
|
||||
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
|
||||
</style>
|
||||
<link rel="stylesheet" href="docs/pandoc_templates//style.css" />
|
||||
<!--[if lt IE 9]>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<![endif]-->
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border: 1px solid #999;
|
||||
padding: 0.5rem;
|
||||
text-align: left;
|
||||
}
|
||||
h1.title{
|
||||
text-align: center; font-size: xxx-large;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td, th {
|
||||
border: 1px solid #999;
|
||||
padding: 0.5rem;
|
||||
text-align: left;
|
||||
}
|
||||
h1.title{
|
||||
text-align: center; font-size: xxx-large;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
|
||||
</head>
|
||||
<body>
|
||||
|
27
bit_hacks.h
27
bit_hacks.h
@ -73,30 +73,3 @@ inline auto trailing_zero_bits(uint64_t v) {
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -10,7 +10,3 @@ private:
|
||||
wxBoxSizer* m_lSizer;
|
||||
wxBoxSizer* m_rSizer;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Massive Parallelism</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Massive Parallelism</title>
|
||||
</head>
|
||||
<body><p>
|
||||
Digital Ocean, Docker, microservices, Rest, Json and protocol buffers.</p><p>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -15,93 +15,93 @@
|
||||
<link rel="shortcut icon" href="../rho.ico"><title>May scale of monetary hardness</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>May scale of monetary hardness</h1>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<p>
|
||||
J.C. May defined the following scale of monetary hardness.
|
||||
The following is mostly his words, edited to bring them up to
|
||||
date.</p>
|
||||
<table border="1" cellpadding="6" cellspacing="0" width="95%">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td colspan="2" style="background-color: #99CC66;
|
||||
<h1>May scale of monetary hardness</h1>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<p>
|
||||
J.C. May defined the following scale of monetary hardness.
|
||||
The following is mostly his words, edited to bring them up to
|
||||
date.</p>
|
||||
<table border="1" cellpadding="6" cellspacing="0" width="95%">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td colspan="2" style="background-color: #99CC66;
|
||||
text-align:center;">May Scale of monetary hardness </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="text-align:center;"><b> Hardness</b> </td>
|
||||
<td> <br/>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="2" style=" text-align:center;">Hard</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>1</b></td>
|
||||
<td>Street cash, US dollars</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>2</b></td>
|
||||
<td>Street cash, euro currencies, japan</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>3</b></td>
|
||||
<td>Major crypto currencies, such as Bitcoin and Monaro</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>4</b></td>
|
||||
<td>Street cash, other regions</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>5</b></td>
|
||||
<td>Interbank transfers of various sorts (wires etc),
|
||||
bank checks</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>6</b></td>
|
||||
<td>personal checks</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>7</b>
|
||||
</td>
|
||||
<td>Consumer-level electronic account transfers (eg
|
||||
bPay)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>8</b></td>
|
||||
<td>Business-account-level retail transfer systems</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="2" style=" text-align:center;">Soft</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>9</b></td>
|
||||
<td>Paypal and similar 'new money' entities, beenz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>10</b></td>
|
||||
<td>Credit cards</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="text-align:center;"><b> Hardness</b> </td>
|
||||
<td> <br/>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="2" style=" text-align:center;">Hard</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>1</b></td>
|
||||
<td>Street cash, US dollars</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>2</b></td>
|
||||
<td>Street cash, euro currencies, japan</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>3</b></td>
|
||||
<td>Major crypto currencies, such as Bitcoin and Monaro</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>4</b></td>
|
||||
<td>Street cash, other regions</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>5</b></td>
|
||||
<td>Interbank transfers of various sorts (wires etc),
|
||||
bank checks</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>6</b></td>
|
||||
<td>personal checks</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>7</b>
|
||||
</td>
|
||||
<td>Consumer-level electronic account transfers (eg
|
||||
bPay)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>8</b></td>
|
||||
<td>Business-account-level retail transfer systems</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colspan="2" style=" text-align:center;">Soft</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>9</b></td>
|
||||
<td>Paypal and similar 'new money' entities, beenz</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="center"><b>10</b></td>
|
||||
<td>Credit cards</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h2 class="green">Three essays from different periods follow</h2>
|
||||
<hr><p>Observe that say stock brokerages definitely do not accept credit cards or
|
||||
paypal to fund an account. They will only accept instruments that are very hard,
|
||||
such as wire transfers or certified bank checks.</p><p>
|
||||
|
||||
When hard money is required, only money-types with a hardness of about 5
|
||||
or better will do the job.</p><p>
|
||||
or better will do the job.</p><p>
|
||||
|
||||
On the other hand, if you're purchasing an online subscription, or
|
||||
consumer goods from a large retailer, softer money-types are more acceptable.</p><p>
|
||||
On the other hand, if you're purchasing an online subscription, or
|
||||
consumer goods from a large retailer, softer money-types are more acceptable.</p><p>
|
||||
|
||||
When dealing with conversions <b>between</b> different types of money,
|
||||
generally you can only go "downwards" on the May scale.</p><p>
|
||||
When dealing with conversions <b>between</b> different types of money,
|
||||
generally you can only go "downwards" on the May scale.</p><p>
|
||||
|
||||
Thus, for example it is very easy to accept cash-dollars, and handout
|
||||
paypal-dollars in return. But it would be almost impossible to accept credit cards or
|
||||
paypal-dollars,and hand out cash in return.</p>
|
||||
Thus, for example it is very easy to accept cash-dollars, and handout
|
||||
paypal-dollars in return. But it would be almost impossible to accept credit cards or
|
||||
paypal-dollars,and hand out cash in return.</p>
|
||||
|
||||
<hr/>
|
||||
<hr/>
|
||||
<p><em>It is extremely significant that <b>individuals</b> tend to require harder money in their transactions.</em></p><p>
|
||||
|
||||
Corporations and large bodies <b>can get away with</b> using softer money, as they have more political (in the broad sense) power to affect the outcome of dubious or revoked transactions.</p><p>
|
||||
@ -128,36 +128,36 @@
|
||||
|
||||
<hr>
|
||||
|
||||
<p class="green">
|
||||
Original (oldest) essay, where Tim May first proposed the May Scale of Monetary Hardness:<br/>
|
||||
<p class="green">
|
||||
Original (oldest) essay, where Tim May first proposed the May Scale of Monetary Hardness:<br/>
|
||||
|
||||
This essay was written in the time when e-gold appeared to be successful. E-gold attempted to do what Bitcoin is attempting to, and failed. Bitcoin was inspired in substantial part to fix the problems that killed e-gold. The centralized single-point-of-failure ledgers of e-gold came under attack by the state, by scammers, and by state backed scammers.</p>
|
||||
<pre>
|
||||
This essay was written in the time when e-gold appeared to be successful. E-gold attempted to do what Bitcoin is attempting to, and failed. Bitcoin was inspired in substantial part to fix the problems that killed e-gold. The centralized single-point-of-failure ledgers of e-gold came under attack by the state, by scammers, and by state backed scammers.</p>
|
||||
<pre>
|
||||
>Your question provokes us to focus on a major factor inhibiting the growth
|
||||
>of e-gold – that there’s no common way now to put money into an account fast
|
||||
>(as in a matter of minutes instead of hours or more likely, days and weeks).
|
||||
>An ironic situation, considering that e-gold is destined for greatness as
|
||||
>the currency of the internet.
|
||||
</pre><p>
|
||||
It’s worth noting that funding – say – a trading account with your
|
||||
stock broker is just as "difficult" as buying e-gold. </p><p>
|
||||
It’s worth noting that funding – say – a trading account with your
|
||||
stock broker is just as "difficult" as buying e-gold. </p><p>
|
||||
|
||||
For that matter, funding a new BANK ACCOUNT is just as difficult as
|
||||
buying e-gold.</p><p>
|
||||
For that matter, funding a new BANK ACCOUNT is just as difficult as
|
||||
buying e-gold.</p><p>
|
||||
|
||||
When you open a stock broking account at etrade or whatever, you
|
||||
certainly cannotget funds there instantly – your options are wire
|
||||
and wait days, bank check or cashier’s check and wait a week or a
|
||||
personal check and wait a couple of weeks.</p><p>
|
||||
When you open a stock broking account at etrade or whatever, you
|
||||
certainly cannotget funds there instantly – your options are wire
|
||||
and wait days, bank check or cashier’s check and wait a week or a
|
||||
personal check and wait a couple of weeks.</p><p>
|
||||
|
||||
A stock broking account, like buying e-gold, is a very HARD form of
|
||||
money. Whenever you are trying to buy a very HARD form of money,
|
||||
using a softer form of money.
|
||||
</p>
|
||||
<p>
|
||||
Here is the "May Scale" of money hardness (comments invited)
|
||||
</p>
|
||||
<pre> --hard--
|
||||
A stock broking account, like buying e-gold, is a very HARD form of
|
||||
money. Whenever you are trying to buy a very HARD form of money,
|
||||
using a softer form of money.
|
||||
</p>
|
||||
<p>
|
||||
Here is the "May Scale" of money hardness (comments invited)
|
||||
</p>
|
||||
<pre> --hard--
|
||||
1 street cash, US dollars
|
||||
2 street cash, euro currencies, Aus, japan
|
||||
3 egold
|
||||
@ -171,16 +171,16 @@
|
||||
10 credit cards
|
||||
--ludicrously soft!--
|
||||
</pre>
|
||||
It is not meant to be definitive (eg, 6 and 7 could perhaps be
|
||||
swapped; I left out cash on call at your stock broker, which is
|
||||
probably around "2", etc) but gives a framework to think in.<p>
|
||||
It is not meant to be definitive (eg, 6 and 7 could perhaps be
|
||||
swapped; I left out cash on call at your stock broker, which is
|
||||
probably around "2", etc) but gives a framework to think in.<p>
|
||||
|
||||
Now if you're a retailer and you're selling VCRs, sure, you can take
|
||||
poxy money around the May Scale of 8, 9 or 10.</p><p>
|
||||
|
||||
But if you're a "retailer" and what you're selling is money itself
|
||||
– ie, you are selling e-gold, or you are Quick & Reilly – it
|
||||
is EXCEEDINGLY DIFFICULT to accept anything with May Scale > about 5.</p><p>
|
||||
– ie, you are selling e-gold, or you are Quick & Reilly – it
|
||||
is EXCEEDINGLY DIFFICULT to accept anything with May Scale > about 5.</p><p>
|
||||
|
||||
(Note that at coconutgold, we simply only accept wires! All the exchange providers for e-gold who accept money on the May Scale of 9 or 10 are very brave, tough, and quite understandably have to charge fairly high premiums to do so!)</p><p>
|
||||
|
||||
|
@ -1,92 +1,92 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Bitrot, Protocol Negotiation, and the Confused Deputy Problem</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Bitrot, Protocol Negotiation, and the Confused Deputy Problem</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Bitrot and Protocol Negotiation</h1>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Bitrot and Protocol Negotiation</h1>
|
||||
<h2>The problem</h2><p>
|
||||
One particular case of the bitrot problem was the Microsoft Windows
|
||||
problem known as “DLL Hell”, DLLs being binary dynamically linked
|
||||
libraries in Microsoft Windows. </p>
|
||||
problem known as “DLL Hell”, DLLs being binary dynamically linked
|
||||
libraries in Microsoft Windows. </p>
|
||||
|
||||
<p> Over time these libraries tended to be upgraded, improved, and changed,
|
||||
and programs written for the old libraries would develop bugs with the new
|
||||
libraries, sometimes these bugs were crash and burn bugs, “bitrot”,
|
||||
sometimes there were unexpected interactions between programs using the
|
||||
same library, which caused one program to accidentally foul up another, or
|
||||
enabled one program to maliciously manipulate another. </p>
|
||||
<p> Over time these libraries tended to be upgraded, improved, and changed,
|
||||
and programs written for the old libraries would develop bugs with the new
|
||||
libraries, sometimes these bugs were crash and burn bugs, “bitrot”,
|
||||
sometimes there were unexpected interactions between programs using the
|
||||
same library, which caused one program to accidentally foul up another, or
|
||||
enabled one program to maliciously manipulate another. </p>
|
||||
|
||||
<p> This problem was solved. The solution was “COM”. In COM, dynamic
|
||||
linking necessarily involves version negotiation. Mandatory version
|
||||
negotiation largely relieves bitrot. </p>
|
||||
<p> This problem was solved. The solution was “COM”. In COM, dynamic
|
||||
linking necessarily involves version negotiation. Mandatory version
|
||||
negotiation largely relieves bitrot. </p>
|
||||
|
||||
<p> In COM, in accordance with Zooko’s triangle, each version of a library’s
|
||||
behavior, each library interface, has three names. Describing those names
|
||||
and their behavior from the point of view of Zooko’s triangle, which is
|
||||
not how most Microsoft programmers would describe them or think about
|
||||
them: </p>
|
||||
<p> In COM, in accordance with Zooko’s triangle, each version of a library’s
|
||||
behavior, each library interface, has three names. Describing those names
|
||||
and their behavior from the point of view of Zooko’s triangle, which is
|
||||
not how most Microsoft programmers would describe them or think about
|
||||
them: </p>
|
||||
|
||||
<ol>
|
||||
<ol>
|
||||
<li>The GUID, the globally unique identifier, a very large random number,
|
||||
a number so large that it was unlikely that any two libraries or two
|
||||
versions would randomly choose the same number. Compiled software
|
||||
interacts with other compiled software using this identifier.</li>
|
||||
a number so large that it was unlikely that any two libraries or two
|
||||
versions would randomly choose the same number. Compiled software
|
||||
interacts with other compiled software using this identifier.</li>
|
||||
|
||||
<li>The nickname, a human readable user friendly name and version number,
|
||||
which is not necessarily globally unique. “Nickname” is Zooko’s
|
||||
terminology, not what Microsoft calls them. Humans writing code to be
|
||||
interpreted may use the nickname, though the correct behavior would be
|
||||
for the code writer to use the petname, and for the development
|
||||
environment to insert the appropriate GUID, if no GUID is specified, and
|
||||
adjust the petname to its local value if the GUID is specified. </li>
|
||||
which is not necessarily globally unique. “Nickname” is Zooko’s
|
||||
terminology, not what Microsoft calls them. Humans writing code to be
|
||||
interpreted may use the nickname, though the correct behavior would be
|
||||
for the code writer to use the petname, and for the development
|
||||
environment to insert the appropriate GUID, if no GUID is specified, and
|
||||
adjust the petname to its local value if the GUID is specified. </li>
|
||||
|
||||
<li>It may, and should, have a petname, its registry key, a humanly
|
||||
readable user friendly local name which is guaranteed unique on the
|
||||
particular computer on which the library (ActiveX object) has been
|
||||
installed, but is not necessarily meaningful to the world at large,
|
||||
though this is not quite implemented. Again, petname is Zooko’s
|
||||
terminology, not what Microsoft calls them. The petname, if it
|
||||
exists, is automatically generated from the nickname. Error
|
||||
messages should use the petname, though they tend to use the nickname. </li>
|
||||
</ol>
|
||||
particular computer on which the library (ActiveX object) has been
|
||||
installed, but is not necessarily meaningful to the world at large,
|
||||
though this is not quite implemented. Again, petname is Zooko’s
|
||||
terminology, not what Microsoft calls them. The petname, if it
|
||||
exists, is automatically generated from the nickname. Error
|
||||
messages should use the petname, though they tend to use the nickname. </li>
|
||||
</ol>
|
||||
|
||||
<p> In order for a program to connect to any COM library (what Microsoft
|
||||
calls an ActiveX object), it has to do protocol negotiation in order to
|
||||
get an interface, has to ask for the interface by its globally unique
|
||||
identifier, so the library always knows what version of the library the
|
||||
program expects, and will provide that behavior, or, if it cannot provide
|
||||
that behavior, the program will fail immediately with an error message
|
||||
explaining the problem. </p>
|
||||
<p> In order for a program to connect to any COM library (what Microsoft
|
||||
calls an ActiveX object), it has to do protocol negotiation in order to
|
||||
get an interface, has to ask for the interface by its globally unique
|
||||
identifier, so the library always knows what version of the library the
|
||||
program expects, and will provide that behavior, or, if it cannot provide
|
||||
that behavior, the program will fail immediately with an error message
|
||||
explaining the problem. </p>
|
||||
|
||||
<p> <em>This solution worked. It solved DLL hell, solved bitrot. </em>
|
||||
</p>
|
||||
<p> <em>This solution worked. It solved DLL hell, solved bitrot. </em>
|
||||
</p>
|
||||
|
||||
<p> Windows implementation of this solution was less successful in dealing
|
||||
with another problem – library calls often cross thread and process
|
||||
boundaries. They provided a general purpose threading solution, also part
|
||||
of COM, which was hideously complicated and failed dismally. But
|
||||
they fixed bitrot. </p>
|
||||
<p> Windows implementation of this solution was less successful in dealing
|
||||
with another problem – library calls often cross thread and process
|
||||
boundaries. They provided a general purpose threading solution, also part
|
||||
of COM, which was hideously complicated and failed dismally. But
|
||||
they fixed bitrot. </p>
|
||||
|
||||
<p> Cross thread and cross process interactions usually wind up being
|
||||
implemented as message streams and message queues. The correct
|
||||
approach is to make this explicit, to define the interface as a message
|
||||
protocol, rather than attempting to hide the underlying message queue
|
||||
behavior as Microsoft did and pretend it is an ordinary synchronous object
|
||||
method. Where COM runs on top of message queues, as it does whenever
|
||||
a call crosses thread or process boundaries, the result is intolerable
|
||||
obscurity, complexity, and inefficiency – which is still a lot better than
|
||||
the bitrot that it fixed. </p>
|
||||
<p> Cross thread and cross process interactions usually wind up being
|
||||
implemented as message streams and message queues. The correct
|
||||
approach is to make this explicit, to define the interface as a message
|
||||
protocol, rather than attempting to hide the underlying message queue
|
||||
behavior as Microsoft did and pretend it is an ordinary synchronous object
|
||||
method. Where COM runs on top of message queues, as it does whenever
|
||||
a call crosses thread or process boundaries, the result is intolerable
|
||||
obscurity, complexity, and inefficiency – which is still a lot better than
|
||||
the bitrot that it fixed. </p>
|
||||
|
||||
<h2>The blockchain solution</h2><p>
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Block Google Analytics</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Block Google Analytics</title>
|
||||
</head>
|
||||
<body><a href="./index.html"> To Home page</a>
|
||||
<h1>Block Google Analytics</h1><p>
|
||||
|
644
docs/byzantine_paxos.md
Normal file
644
docs/byzantine_paxos.md
Normal file
@ -0,0 +1,644 @@
|
||||
---
|
||||
# katex
|
||||
# notmine
|
||||
title: >-
|
||||
Practical Byzantine Fault Tolerance
|
||||
---
|
||||
|
||||
::: centre
|
||||
Appears in the Proceedings of the Third Symposium on Operating Systems Design and Implementation, New Orleans, USA, February 1999
|
||||
|
||||
Miguel Castro and Barbara Liskov
|
||||
|
||||
Laboratory for Computer Science, Massachusetts Institute of Technology, 545
|
||||
|
||||
Technology Square, Cambridge, MA 02139
|
||||
:::
|
||||
|
||||
# Abstract
|
||||
|
||||
This paper describes a new replication algorithm that is able to tolerate Byzantine faults. We believe that Byzantine-fault-tolerant algorithms will be increasingly important in the future because malicious attacks and software errors are increasingly common and can cause faulty nodes to exhibit arbitrary behavior. Whereas previous algorithms assumed a synchronous system or were too slow to be used in practice, the algorithm described in this paper is practical: it works in asynchronous environments like the Internet and incorporates several important optimizations that improve the response time of previous algorithms by more than an order of magnitude. We implemented a Byzantine-fault-tolerant NFS service using our algorithm and measured its performance. The results show that our service is only 3% slower than a standard unreplicated NFS.
|
||||
|
||||
# Introduction
|
||||
Malicious attacks and software errors are increasingly common. The growing reliance of industry and government on online information services makes malicious attacks more attractive and makes the consequences of successful attacks more serious. In addition, the number of software errors is increasing due to the growth in size and complexity of software. Since malicious attacks and software errors can cause faulty nodes to exhibit Byzantine (i.e., arbitrary) behavior, Byzantine-fault-tolerant algorithms are increasingly important.
|
||||
|
||||
This paper presents a new, *practical* algorithm for state machine replication [17, 34] that tolerates Byzantine faults. The algorithm offers both liveness and safety
|
||||
provided at most $\lfloor\frac{(n-1)}{3}\rfloor$ out of a total of $n$
|
||||
replicas are simultaneously faulty. This means that clients eventually receive replies to their requests and those replies are correct according to linearizability [14, 4]. The algorithm works in asynchronous systems like the Internet and it incorporates important optimizations that enable it to perform efficiently. \
|
||||
|
||||
There is a significant body of work on agreement and replication techniques that tolerate Byzantine faults (starting with [19]). However, most earlier work (e.g., [3, 24, 10]) either concerns techniques designed to demonstrate theoretical feasibility that are too inefficient to be used in practice, or assumes synchrony, i.e., relies on known bounds on message delays and process speeds. The systems closest to ours, Rampart [30] and SecureRing [16], were designed to be practical, but they rely on the synchrony assumption for correctness, which is dangerous in the presence of malicious attacks. An attacker may compromise the safety of a service by delaying non-faulty nodes or the communication between them until they are tagged as faulty and excluded from the replica group. Such a denial-of-service attack is generally easier than gaining control over a non-faulty node.
|
||||
|
||||
Our algorithm is not vulnerable to this type of attack because it does not rely on synchrony for safety. In addition, it improves the performance of Rampart and SecureRing by more than an order of magnitude as explained in Section 7. It uses only one message round trip to execute read-only operations and two to execute read-write operations. Also, it uses an efficient authentication scheme based on message authentication codes during normal operation; public-key cryptography, which was cited as the major latency [29] and throughput [22] bottleneck in Rampart, is used only when there are faults.
|
||||
|
||||
To evaluate our approach, we implemented a replication library and used it to implement a real service: a Byzantine-fault-tolerant distributed file system that supports the NFS protocol. We used the Andrew benchmark [15] to evaluate the performance of our system. The results show that our system is only 3% slower than the standard NFS daemon in the Digital Unix kernel during normal-case operation.
|
||||
|
||||
Thus, the paper makes the following contributions:
|
||||
|
||||
* It describes the first state-machine replication protocol that correctly survives Byzantine faults in asynchronous networks.
|
||||
|
||||
* It describes a number of important optimizations that allow the algorithm to perform well so that it can be used in real systems.
|
||||
|
||||
* It describes the implementation of a Byzantine-fault<6C>tolerant distributed file system.
|
||||
|
||||
* It provides experimental results that quantify the cost of the replication technique.
|
||||
|
||||
The remainder of the paper is organized as follows. We begin by describing our system model, including our failure assumptions. Section 3 describes the problem solved by the algorithm and states correctness conditions. The algorithm is described in Section 4 and some important optimizations are described in Section 5. Section 6 describes our replication library and how we used it to implement a Byzantine-fault-tolerant NFS. Section 7 presents the results of our experiments. Section 8 discusses related work. We conclude with a summary of what we have accomplished and a discussion of future research directions.
|
||||
|
||||
# System Model
|
||||
|
||||
We assume an asynchronous distributed system where nodes are connected by a network. The network may fail to deliver messages, delay them, duplicate them, or deliver them out of order.
|
||||
|
||||
We use a Byzantine failure model, i.e., faulty nodes may behave arbitrarily, subject only to the restriction mentioned below. We assume independent node failures. For this assumption to be true in the presence of malicious attacks, some steps need to be taken, e.g., each node should run different implementations of the service code and operating system and should have a different root password and a different administrator. It is possible to obtain different implementations from the same code base [28] and for low degrees of replication one can buy operating systems from different vendors. N-version programming, i.e., different teams of programmers produce different implementations, is another option for some services.
|
||||
|
||||
We use cryptographic techniques to prevent spoofing and replays and to detect corrupted messages. Our messages contain public-key signatures [33], message authentication codes [36], and message digests produced by collision-resistant hash functions [32]. We denote a message $m$ signed by node $i$ as $\{m\}σ_i$
|
||||
and the digest of message $m$ by $D(m)$ . We follow the common practice of signing a digest of a message and appending it to the plaintext of the message rather than signing the full message $(\{m\}σ_i)$ should be interpreted in this way$)$. All replicas know the others' public keys to verify signatures.
|
||||
|
||||
We allow for a very strong adversary that can coordinate faulty nodes, delay communication, or delay correct nodes in order to cause the most damage to the replicated service. We do assume that the adversary cannot delay correct nodes indefinitely. We also assume that the adversary (and the faulty nodes it controls) are computationally bound so that (with very high probability) it is unable to subvert the cryptographic techniques mentioned above. For example, the adversary cannot produce a valid signature of a non-faulty node, compute the information summarized by a digest from the digest, or fnd two messages with the same digest. The cryptographic techniques we use are thought to have these properties [33, 36, 32].
|
||||
|
||||
# Service Properties
|
||||
|
||||
Our algorithm can be used to implement any deterministic replicated *service* with a *state* and some *operations*. The operations are not restricted to simple reads or writes of portions of the service state; they can perform arbitrary deterministic computations using the state and operation arguments. Clients issue requests to the replicated service to invoke operations and block waiting for a reply. The replicated service is implemented by $n$ replicas. Clients and replicas are non-faulty if they follow the algorithm in Section 4 and if no attacker can forge their signature.
|
||||
|
||||
The algorithm provides both *safety* and *liveness* assuming no more than
|
||||
$\lfloor\frac{(n-1)}{3}\rfloor$ replicas are faulty. Safety means that the replicated service satisfes linearizability [14] (modified to account for Byzantine-faulty clients [4]): it behaves like a centralized implementation that executes operations atomically one at a time. Safety requires the bound on the number of faulty replicas because a faulty replica can behave arbitrarily, e.g., it can destroy its state.
|
||||
|
||||
Safety is provided regardless of how many faulty clients are using the service (even if they collude with faulty replicas): all operations performed by faulty clients are observed in a consistent way by non-faulty clients. In particular, if the service operations are designed to preserve some invariants on the service state, faulty clients cannot break those invariants.
|
||||
|
||||
The safety property is insufficient to guard against faulty clients, e.g., in a file system a faulty client can write garbage data to some shared file. However, we limit the amount of damage a faulty client can do by providing access control: we authenticate clients and deny access if the client issuing a request does not have the right to invoke the operation. Also, services may provide operations to change the access permissions for a client. Since the algorithm ensures that the effects of access revocation operations are observed consistently by all clients, this provides a powerful mechanism to recover from attacks by faulty clients.
|
||||
|
||||
The algorithm does not rely on synchrony to provide safety. Therefore, it must rely on synchrony to provide liveness; otherwise it could be used to implement consensus in an asynchronous system, which is not possible [9]. We guarantee liveness, i.e., clients eventually receive replies to their requests, provided at most $\lfloor\frac{(n-1)}{3}\rfloor$ replicas are faulty and $delay(t)$ does not grow faster than $t$ indefinitely. Here, delay is the time between the moment when a message is sent for the first time and the moment when it is received by its destination (assuming the sender keeps retransmitting the message until it is received). (A more precise definition can be found in [4].) This is a rather weak synchrony assumption that is likely to be true in any real system provided network faults are eventually repaired, yet it enables us to circumvent the impossibility result in [9].
|
||||
|
||||
The resiliency of our algorithm is optimal: $3f+1$ is the minimum number of replicas that allow an asynchronous system to provide the safety and liveness properties when up to $f$ replicas are faulty (see [2] for a proof). This many replicas are needed because it must be possible to proceed after communicating with $n-f$ replicas, since $f$ replicas might be faulty and not responding. However, it is possible that the replicas that did not respond are not faulty and, therefore, $f$ of those that responded might be faulty. Even so, there must still be enough responses that those from non-faulty replicas outnumber those from faulty ones, i.e., $n-2f>f$. Therefore $n>3f$.
|
||||
|
||||
The algorithm does not address the problem of fault-tolerant privacy: a faulty replica may leak information to an attacker. It is not feasible to offer fault-tolerant privacy in the general case because service operations may perform arbitrary computations using their arguments and the service state; replicas need this information in the clear to execute such operations efficiently. It is possible to use secret sharing schemes [35] to obtain privacy even in the presence of a threshold of malicious replicas [13] for the arguments and portions of the state that are opaque to the service operations. We plan to investigate these techniques in the future.
|
||||
|
||||
# The Algorithm
|
||||
|
||||
Our algorithm is a form of ${state}\, {machine}$ replication [17, 34]: the service is modelled as a state machine that is replicated across different nodes in a distributed system. Each state machine replica maintains the service state and implements the service operations. We denote the set of replicas by $R$ and identify each replica using an integer in $\{0, ..., |R|-1\}$. For simplicity, we assume
|
||||
$|R|=3f+1$
|
||||
where $f$ is the maximum number of replicas that may be faulty; although there could be more than $3f+1$
|
||||
replicas, the additional replicas degrade performance (since more and bigger messages are being exchanged) without providing improved resiliency.
|
||||
|
||||
The replicas move through a succession of configurations called views. In a view one replica is the primary and the others are backups. Views are numbered consecutively. The primary of a view is replica $p$ such that $p=v\mod|R|$, where $v$
|
||||
is the view number. View changes are carried out when it appears that the primary has failed. View stamped Replication [26] and Paxos [18] used a similar approach to tolerate benign faults (as discussed in Section 8.)
|
||||
|
||||
The algorithm works roughly as follows:
|
||||
|
||||
1. A client sends a request to invoke a service operation to the primary
|
||||
|
||||
1. The primary multicasts the request to the backups
|
||||
|
||||
1. Replicas execute the request and send a reply to the client
|
||||
|
||||
1. The client waits for $f+1$ replies from different replicas with the same result; this is the result of the operation.
|
||||
|
||||
Like all state machine replication techniques [34], we impose two requirements on replicas: they must be *deterministic* (i.e., the execution of an operation in a given state and with a given set of arguments must always produce the same result) and they must start in the same state. Given these two requirements, the algorithm ensures the safety property by guaranteeing that *all non-faulty replicas agree on a total order for the execution of requests despite failures*.
|
||||
|
||||
The remainder of this section describes a simplified version of the algorithm. We omit discussion of how nodes recover from faults due to lack of space. We also omit details related to message retransmissions. Furthermore, we assume that message authentication is achieved using digital signatures rather than the more efficient scheme based on message authentication codes; Section 5 discusses this issue further. A detailed formalization of the algorithm using the I/O automaton model [21] is presented in [4].
|
||||
|
||||
## The Client
|
||||
|
||||
A client $c$ requests the execution of state machine operation $o$ by sending\
|
||||
$\{$REQUEST$,o,t,c\}σ_i$
|
||||
message to the primary. Timestamp $t$
|
||||
is used to ensure *exactly-once* semantics for the execution of client requests. Timestamps for
|
||||
$c$'s requests are totally ordered such that later requests have higher timestamps than earlier ones; for example, the timestamp could be the value of the client's local clock when the request is issued.
|
||||
|
||||
Each message sent by the replicas to the client includes the current view number, allowing the client to track the view and hence the current primary. A client sends a request to what it believes is the current primary using a point-to-point message. The primary atomically multicasts the request to all the backups using the protocol described in the next section.
|
||||
|
||||
A replica sends the reply to the request directly to the client. The reply has the form\
|
||||
$\{$REPLY$, v, t, c, i, r\}σ_i$ where $v$
|
||||
is the current view number,
|
||||
$t$ is the timestamp of the corresponding request,
|
||||
$i$ is the replica number, and
|
||||
$r$ is the result of executing the requested operation.
|
||||
|
||||
The client waits for
|
||||
$f+1$ replies with valid signatures from different replicas, and with the same $t$ and $r$ before accepting the result $r$. This ensures that the result is valid, since at most $f$ replicas can be faulty.
|
||||
|
||||
If the client does not receive replies soon enough, it broadcasts the request to all replicas. If the request has already been processed, the replicas simply re-send the reply; replicas remember the last reply message they sent to each client. Otherwise, if the replica is not the primary, it relays the request to the primary. If the primary does not multicast the request to the group, it will eventually be suspected to be faulty by enough replicas to cause a view change.
|
||||
|
||||
In this paper we assume that the client waits for one request to complete before sending the next one. But we can allow a client to make asynchronous requests, yet preserve ordering constraints on them.
|
||||
|
||||
## Normal-Case Operation
|
||||
|
||||
The state of each replica includes the state of the service, a $message\,log$ containing messages the replica has accepted, and an integer denoting the replica's current view. We describe how to truncate the log in Section 5.3.
|
||||
|
||||
When the primary, $p$
|
||||
, receives a client request, $m$
|
||||
, it starts a three-phase protocol to atomically multicast the request to the replicas. The primary starts the protocol immediately unless the number of messages for which the protocol is in progress exceeds a given maximum. In this case, it buffers the request. Buffered requests are multicast later as a group to cut down on message traffic and CPU overheads under heavy load;this optimization is similar to a group commit in transactional systems [11]. For simplicity, we ignore this optimization in the description below.
|
||||
|
||||
The three phases are *pre-prepare, prepare*, and *commit*. The pre-prepare and prepare phases are used to totally order requests sent in the same view even when the primary, which proposes the ordering of requests, is faulty. The prepare and commit phases are used to ensure that requests that commit are totally ordered across views.
|
||||
|
||||
In the pre-prepare phase, the primary assigns a sequence number, $n$
|
||||
, to the request, multicasts a pre-prepare message with $m$
|
||||
piggybacked to all the backups, and appends the message to its log. The message has the form $\{\{$PRE-PREPARE$, v, n, d\}σ_i,m\}$, where $v$ indicates the view in which the message is being sent,
|
||||
$m$ is the client's request message, and
|
||||
is $d$ is $m$'s digest.
|
||||
|
||||
Requests are not included in pre-prepare messages to keep them small. This is important because pre-prepare messages are used as a proof that the request was assigned sequence number $n$
|
||||
in view $v$
|
||||
in view changes. Additionally, it decouples the protocol to totally order requests from the protocol to transmit the request to the replicas; allowing us to use a transport optimized for small messages for protocol messages and a transport optimized for large messages for large requests.
|
||||
|
||||
A backup accepts a pre-prepare message provided:
|
||||
|
||||
* the signatures in the request and the pre-prepare message are correct and
|
||||
$d$ is the digest for $m$:
|
||||
* it is in view $v$:
|
||||
* it has not accepted a pre-prepare message for view $v$ and sequence number $n$
|
||||
containing a different digest;
|
||||
* the sequence number in the pre-prepare message is between a low water mark, $h$
|
||||
, and a high water mark, $H$.
|
||||
|
||||
The last condition prevents a faulty primary from exhausting the space of sequence numbers by selecting a very large one. We discuss how $H$ and $h$
|
||||
advance in Section 5.3.
|
||||
|
||||
If backup $i$
|
||||
accepts the $\{$PRE-PREPARE$, v, n, d\}σ_i,m\}$
|
||||
message, it enters the prepare phase by multicasting a\
|
||||
$\{$PREPARE$,v,n,d,i\}σ_i$
|
||||
message to all other replicas and adds both messages to its log. Otherwise, it does nothing.
|
||||
|
||||
A replica (including the primary) accepts prepare messages and adds them to its log provided their signatures are correct, their view number equals the replica's current view, and their sequence number is between $h$
|
||||
and $H$.
|
||||
|
||||
We define the predicate *prepared*$(m,v,n,i)$
|
||||
to be true if and only if replica $i$
|
||||
has inserted in its log: the request $m$, a pre-prepare for $m$
|
||||
in view $v$
|
||||
with sequence number $n$
|
||||
, and $2f$
|
||||
prepares from different backups that match the pre-prepare. The replicas verify whether the prepares match the pre-prepare by checking that they have the same view, sequence number, and digest.
|
||||
|
||||
The pre-prepare and prepare phases of the algorithm guarantee that non-faulty replicas agree on a total order for the requests within a view. More precisely, they ensure the following invariant: if *prepared*$(m,v,n,i)$
|
||||
is true then *prepared*$(m',v,n,j)$
|
||||
is false for any non-faulty replica $j$
|
||||
(including $i=j$) and any $m'$
|
||||
such that $D(m')\not =D(m)$. This is true because prepared *prepared*$(m,v,n,i)$
|
||||
and $|R|=3f+1$ imply that at least $f+1$ non-faulty replicas have sent a pre-prepare or prepare for $m$
|
||||
in view $v$
|
||||
with sequence number $n$. Thus, for *prepared*$(m',v,n,j)$ to be true at least one of these replicas needs to have sent two conflicting prepares (or pre-prepares if it is the primary for $v$), i.e., two prepares with the same view and sequence number and a different digest. But this is not possible because the replica is not faulty. Finally, our assumption about the strength of message digests ensures that the probability that $m\not=m'$ and $D(m)=D(m')$
|
||||
is negligible.
|
||||
|
||||
Replica $i$ multicasts a $\{$COMMIT$,v,n,D(m),i\}σ_i$ to the other replicas when *prepared*$(m',v,n,i)$ becomes true. This starts the commit phase. Replicas accept commit messages and insert them in their log provided they are properly signed, the view number in the message is equal to the replica's current view, and the sequence number is between $h$ and $H$.
|
||||
|
||||
We define the *committed* and *committed-local* predicates as follows: *committed*$(m,v,n,i)$
|
||||
is true if and only if *prepared*$(m,v,n,i)$
|
||||
is true for all $i$
|
||||
in some set of $f+1$ non-faulty replicas; and *committed-local*$(m,v,n,i)$ is true if and only if *prepared*$(m,v,n,i)$
|
||||
is true and $i$
|
||||
has accepted $2f+1$ commits (possibly including its own) from different replicas that match the pre-prepare for
|
||||
$m$; a commit matches a pre-prepare if they have the same view, sequence number, and digest.
|
||||
|
||||
The commit phase ensures the following invariant: if *committed-local*$(m,v,n,i)$
|
||||
is true for some non-faulty $i$ then *committed*$(m,v,n,i)$
|
||||
is true. This invariant and the view-change protocol described in Section 5.4 ensure that non-faulty replicas agree on the sequence numbers of requests that commit locally even if they commit in different views at each replica. Furthermore, it ensures that any request that commits locally at a non-faulty replica will commit at
|
||||
$f+1$ or more non-faulty replicas eventually.
|
||||
|
||||
Each replica $i$
|
||||
executes the operation requested by $m$ after *committed-local*$(m,v,n,i)$
|
||||
is true and
|
||||
$i$'s state reflects the sequential execution of all requests with lower sequence numbers. This ensures that all non-faulty replicas execute requests in the same order as required to provide the safety property. After executing the requested operation, replicas send a reply to the client. Replicas discard requests whose timestamp is lower than the timestamp in the last reply they sent to the client to guarantee exactly-once semantics.
|
||||
|
||||
We do not rely on ordered message delivery, and therefore it is possible for a replica to commit requests out of order. This does not matter since it keeps the pre-prepare, prepare, and commit messages logged until the corresponding request can be executed.
|
||||
|
||||
Figure 1 shows the operation of the algorithm in the normal case of no primary faults. Replica 0 is the primary,
|
||||
|
||||
![Normal Case Operation](./images/practical_byzantine_consensus_fig_1.webp){width=100%}
|
||||
|
||||
## Garbage Collection
|
||||
|
||||
This section discusses the mechanism used to discard messages from the log. For the safety condition to hold, messages must be kept in a replica's log until it knows that the requests they concern have been executed by at least $f+1$ non-faulty replicas and it can prove this to others in view changes. In addition, if some replica misses messages that were discarded by all non-faulty replicas, it will need to be brought up to date by transferring all or a portion of the service state. Therefore, replicas also need some proof that the state is correct.
|
||||
|
||||
Generating
|
||||
these proofs after executing every operation would be expensive. Instead, they are generated periodically, when a request with a sequence number divisible by some constant (e.g., 100) is executed. We will refer to the states produced by the execution of these requests as *checkpoints* and we will say that a checkpoint with a proof is a *stable checkpoint*.
|
||||
|
||||
A replica maintains several logical copies of the service state: the last stable checkpoint, zero or more checkpoints that are not stable, and a current state. Copy-on-write techniques can be used to reduce the space overhead to store the extra copies of the state, as discussed in Section 7.3.
|
||||
|
||||
The proof of correctness for a checkpoint is generated as follows. When a replica
|
||||
produces a checkpoint, it multicasts a message
|
||||
$\{$CHECKPOINT$,n,d,i\}σ_i$
|
||||
to the other replicas, where $n$ is the sequence number of the last request whose execution is reflected in the state and $d$
|
||||
is the digest of the state. Each replica collects checkpoint messages in its log until it has $2f+1$ of them for sequence number $n$
|
||||
with the same digest signed by different replicas (including possibly its own such message). These $2f+1$ messages are the proof of correctness for the checkpoint.
|
||||
|
||||
A checkpoint with a proof becomes stable and the replica discards all pre-prepare, prepare, and commit messages with sequence number less than or equal to
|
||||
$n$ from its log; it also discards all earlier checkpoints and checkpoint messages.
|
||||
requests.
|
||||
|
||||
Computing the proofs is efficient because the digest
|
||||
can be computed using incremental cryptography [1] as
|
||||
discussed in Section 7.3, and proofs are generated rarely.
|
||||
The checkpoint protocol is used to advance the low
|
||||
and high water marks (which limit what messages will
|
||||
be accepted). The low-water mark $h$ is equal to the
|
||||
sequence number of the last stable checkpoint. The high
|
||||
water mark , $H=h+k$, where $k$ is big enough so that
|
||||
replicas do not stall waiting for a checkpoint to become
|
||||
stable. For example, if checkpoints are taken every 100
|
||||
requests, $k$ might be 200.
|
||||
|
||||
## View Changes
|
||||
|
||||
The view-change protocol provides liveness by allowing the system to make progress when the primary fails. View changes are triggered by timeouts that prevent backups from waiting indefinitely for requests to execute. A backup is *waiting* for a request if it received a valid request and has not executed it. A backup starts a timer when it receives a request and the timer is not already running. It stops the timer when it is no longer waiting to execute the request, but restarts it if at that point it is waiting to execute some other request.
|
||||
|
||||
If the timer of backup $i$ expires in view $v$, the backup starts a view change to move the system to view $v+1$. It stops accepting messages (other than checkpoint, view-change, and new-view messages) and multicasts a $\{$VIEW-CHANGE$,v+1,n,C,P,i\}σ_i$ message to all replicas. Here $n$ is the sequence number of the last stable checkpoint $s$ known to , $C$ is a set of $2f+1$ valid checkpoint messages proving the correctness of , and $P$
|
||||
is a set containing a set $P_m$ for each request that $m$ prepared at $i$ with a sequence number higher than $n$. Each set $P_m$ contains a valid pre-prepare message (without the corresponding client message) and $2f$ matching, valid prepare messages signed by different backups with the same view, sequence number, and the digest of $m$.
|
||||
|
||||
When the primary $p$ of view $v+1$ receives $2f$ valid
|
||||
view-change messages for view $v+1$ from other replicas,
|
||||
it multicasts a $\{$NEW-VIEW$,v+1,n,V,O\}σ_i$ message to all
|
||||
other replicas, where $V$ is a set containing the valid view-change messages received by the primary plus the view-change message for $v+1$ the primary sent (or would have sent), and $O$ is a set of pre-prepare messages (without the piggybacked request). $O$is computed as follows:
|
||||
|
||||
1. The primary determines the sequence number *min-s* of the latest stable checkpoint in $V$ and the highest sequence number *max-s* in a prepare message in .
|
||||
|
||||
1. The primary creates a new pre-prepare message for view $v+1$ for each sequence number $n$ between *min-s* and *max-s*. There are two cases: (1) there is at least one set in the $P$ component of some view-change message in $V$with sequence number , or (2) there is no such set. In the first case, the primary creates a new message $\{$ PRE-PREPARE,v+1,n,d\}σ_p$, where $d$
|
||||
is the request digest in the pre-prepare message for sequence number $n$ with the highest view number in $V$. In the second case, it creates a new pre-prepare message $\{$ PRE-PREPARE$,v+1,n,d^{null}\}σ_p$, where $d^{null}$ is the digest of a special *null* request; a null request goes through the protocol like other requests, but its execution is a no-op. (Paxos [18] used a similar technique to fill in gaps.)
|
||||
|
||||
Next the primary appends the messages in $O$ to its log. If *min-s* is greater than the sequence number of its latest stable checkpoint, the primary also inserts the proof of stability for the checkpoint with sequence number *min-s* in its log, and discards information from the log as discussed in Section 5.3. Then it enters view $v+1$: at this point it is able to accept messages for view $v+1$.
|
||||
|
||||
A backup accepts a new-view message for view $v+1$ if it is signed properly, if the view-change messages it contains are valid for view $v+1$, and if the set
|
||||
is correct; it verifies the correctness of $O$
|
||||
by performing a computation similar to the one used by the primary to create $O$.
|
||||
Then it adds the new information to its log as described for the primary, multicasts a prepare for each message in $O$
|
||||
to all the other replicas, adds these prepares to its log, and enters view $v+1$.
|
||||
|
||||
Thereafter, the protocol proceeds as described in Section 5.2. Replicas
|
||||
redo the protocol for messages between *min-s* and *max-s* but they avoid
|
||||
re-executing client requests (by using their stored information about the
|
||||
last reply sent to each client).
|
||||
|
||||
A replica may be missing some request message $m$ or a stable checkpoint (since these are not sent in new-view messages.) It can obtain missing information from another replica. For example, replica $i$
|
||||
can obtain a missing checkpoint state $S$
|
||||
from one of the replicas whose checkpoint messages certified its correctness
|
||||
in $V$. Since
|
||||
$f+$1 of those replicas are correct, replica $i$
|
||||
will always obtain $S$
|
||||
or a later certified stable checkpoint. We can avoid sending the entire checkpoint by partitioning the state and stamping each partition with the sequence number of the last request that modified it. To bring a replica up to date, it is only necessary to send it the partitions where it is out of date, rather than the whole checkpoint.
|
||||
|
||||
## Correctness
|
||||
This section sketches the proof that the algorithm provides safety and liveness; details can be found in [4].
|
||||
|
||||
### Safety
|
||||
|
||||
As discussed earlier, the algorithm provides safety if all non-faulty replicas agree on the sequence numbers of requests that commit locally.
|
||||
|
||||
In Section 5.2, we showed that if *prepared*$(m,v,n,i)$ is true,
|
||||
*prepared*$(m',v,n,j)$
|
||||
is false for any non-faulty replica $j$
|
||||
(including $i=j$) and any $m'$
|
||||
such that $D(m')\not=D(m)$. This implies that two non-faulty replicas agree on the sequence number of requests that commit locally in the same view at the two replicas.
|
||||
|
||||
The view-change protocol ensures that non-faulty replicas also agree on the sequence number of requests that commit locally in different views at different replicas. A request $m$
|
||||
commits locally at a non-faulty replica with sequence number $n$
|
||||
in view $v$
|
||||
only if *committed*$(m,v,n)$ is true. This means that there is a set $R_1$
|
||||
containing at least 1 non-faulty replicas such that *prepared*$(m,v,n,i)$ is true for every replica
|
||||
in the set.
|
||||
|
||||
Non-faulty replicas will not accept a pre-prepare for view $v'\gt v$
|
||||
without having received a new-view message for
|
||||
(since only at that point do they enter the view). But any correct new-view message for view $v'\gt v$
|
||||
contains correct view-change messages from every replica $i$
|
||||
in a
|
||||
set
|
||||
$R_2$ of $2f+1$ replicas. Since there are $3f+1$ replicas, $R_1$ and
|
||||
$R_2$ must intersect in at least one replica $k$
|
||||
that is not faulty.
|
||||
$k$'s view-change message will ensure that the fact that
|
||||
prepared in a previous view is propagated to subsequent views, unless the new-view message contains a view-change message with a stable checkpoint with a sequence number higher than $n$.
|
||||
In the first case, the algorithm redoes the three phases of the atomic multicast protocol for $m$
|
||||
with the same sequence number $n$
|
||||
and the new view number. This is important because it prevents any different request that was assigned the sequence number $n$
|
||||
in a previous view from ever committing. In the second case no replica in the new view will accept any message with sequence number lower than $n$.
|
||||
In either case, the replicas will agree on the request that commits locally with sequence number $n$.
|
||||
|
||||
### Liveness
|
||||
|
||||
To provide liveness, replicas must move to a new view if they are unable to execute a request. But it is important to maximize the period of time when at least $2f+1$ non-faulty replicas are in the same view, and to ensure that this period of time increases exponentially until some requested operation executes. We achieve these goals by three means.
|
||||
|
||||
First, to avoid starting a view change too soon, a replica that multicasts
|
||||
a view-change message for view $v+1 waits for $2f+1$ view-change messages for view $v+1$ and then starts its timer to expire after some time $T$.
|
||||
If the timer expires before it receives a valid new-view message for
|
||||
$v+1$ or before it executes a request in the new view that it had not executed previously, it starts the view change for view $v+2$
|
||||
but this time it will wait $2T$ before starting a view change for view
|
||||
$v+3$.
|
||||
|
||||
Second, if a replica receives a set of $f+1$ valid view-change messages from other replicas for views greater than its current view, it sends a view-change message for the smallest view in the set, even if its timer has not expired; this prevents it from starting the next view change too late.
|
||||
|
||||
Third, faulty replicas are unable to impede progress by forcing frequent view changes. A faulty replica cannot cause a view change by sending a view-change message, because a view change will happen only if at least
|
||||
$f+1$ replicas send view-change messages, but it can cause a view change when it is the primary (by not sending messages or sending bad messages). However, because the primary of view $v$
|
||||
is the replica $p$
|
||||
such that $p=v\mod|R|$, the primary cannot be faulty for more than
|
||||
consecutive $f$ views.
|
||||
|
||||
These three techniques guarantee liveness unless message delays grow faster than the timeout period indefinitely, which is unlikely in a real system.
|
||||
|
||||
## Non-Determinism
|
||||
|
||||
State machine replicas must be deterministic but many services involve some form of non-determinism. For example, the time-last-modified in NFS is set by reading the server's local clock; if this were done independently at each replica, the states of non-faulty replicas would diverge. Therefore, some mechanism to ensure that all replicas select the same value is needed. In general, the client cannot select the value because it does not have enough information; for example, it does not know how its request will be ordered relative to concurrent requests by other clients. Instead, the primary needs to select the value either independently or based on values provided by the backups.
|
||||
|
||||
If the primary selects the non-deterministic value independently, it concatenates the value with the associated request and executes the three phase protocol to ensure that non-faulty replicas agree on a sequence number for the request and value. This prevents a faulty primary from causing replica state to diverge by sending different values to different replicas. However, a faulty primary might send the same, incorrect, value to all replicas. Therefore, replicas must be able to decide deterministically whether the value is correct (and what to do if it is not) based only on the service state.
|
||||
|
||||
This protocol is adequate for most services (including NFS) but
|
||||
occasionally replicas must participate in selecting the value to satisfy a
|
||||
service's specification. This can be accomplished by adding an extra phase
|
||||
to the protocol: the primary obtains authenticated values proposed by the
|
||||
backups, concatenates $2f+1$ of them with the associated request, and
|
||||
starts the three phase protocol for the concatenated message. Replicas
|
||||
choose the value by a deterministic computation on the $2f+1$ value and
|
||||
their state, e.g., taking the median. The extra phase can be optimized away
|
||||
in the common case. For example, if replicas need a value that is "close enough" to that of their local clock, the extra phase can be avoided when their clocks are synchronized within some delta.
|
||||
|
||||
# Optimizations
|
||||
|
||||
This section describes some optimizations that improve the performance of the algorithm during normal-case operation. All the optimizations preserve the liveness and safety properties.
|
||||
|
||||
## Reducing Communication
|
||||
|
||||
We use three optimizations to reduce the cost of communication. The first avoids sending most large replies. A client request designates a replica to send the result; all other replicas send replies containing just the digest of the result. The digests allow the client to check the correctness of the result while reducing network bandwidth consumption and CPU overhead significantly for large replies. If the client does not receive a correct result from the designated replica, it retransmits the request as usual, requesting all replicas to send full replies.
|
||||
|
||||
The second optimization reduces the number of message delays for an operation invocation from 5 to 4. Replicas execute a request *tentatively* as soon as the prepared predicate holds for the request, their state reflects the execution of all requests with lower sequence number, and these requests are all known to have committed. After executing the request, the replicas send tentative replies to the client. The client waits for $2f+1$ matching tentative replies. If it receives this
|
||||
many, the request is guaranteed to commit eventually. Otherwise, the client retransmits the request and waits for $f+1$ non-tentative replies.
|
||||
|
||||
A request that has executed tentatively may abort if there is a view change and it is replaced by a null request. In this case the replica reverts its state to the last stable checkpoint in the new-view message or to its last checkpointed state (depending on which one has the higher sequence number).
|
||||
|
||||
The third optimization improves the performance of read-only operations that do not modify the service state. A client multicasts a read-only request to all replicas. Replicas execute the request immediately in their tentative state after checking that the request is properly authenticated, that the client has access, and that the request is in fact read-only. They send the reply only after all requests refected in the tentative state have committed; this is necessary to prevent the client from observing uncommitted state. The client waits for $2f+1$ replies from different replicas with the same result.
|
||||
The client may be unable to collect $2f+1$ such replies if there are concurrent writes to data that affect the result; in this case, it retransmits the request as a regular read-write request after its retransmission timer expires.
|
||||
|
||||
## Cryptography
|
||||
|
||||
In Section 5, we described an algorithm that uses digital signatures to authenticate all messages. However, we actually use digital signatures only for view-change and new-view messages, which are sent rarely, and authenticate all other messages using message authentication codes (MACs). This eliminates the main performance bottleneck in previous systems [29, 22].
|
||||
|
||||
However, MACs have a fundamental limitation relative to digital signatures -- the inability to prove that a message is authentic to a third party. The algorithm in Section 5 and previous Byzantine-fault-tolerant algorithms [31, 16] for state machine replication rely on the extra power of digital signatures. We modified our algorithm to circumvent the problem by taking advantage of specific invariants, e.g, the invariant
|
||||
that no two different requests prepare with the same view and sequence number at two non-faulty replicas.
|
||||
The modified algorithm is described in [5]. Here we sketch the main implications of using MACs.
|
||||
|
||||
MACs can be computed three orders of magnitude faster than digital signatures. For example, a 200MHz Pentium Pro takes 43ms to generate a 1024-bit modulus RSA signature of an MD5 digest and 0.6ms to verify the signature [37], whereas it takes only 10.3µs
|
||||
s to compute the MAC of a 64-byte message on the same hardware in our implementation. There are other public-key cryptosystems that generate signatures faster, e.g., elliptic curve public-key cryptosystems, but signature verification is slower [37] and in our algorithm each signature is verified many times.
|
||||
|
||||
Each node (including active clients) shares a 16-byte secret session key with each replica. We compute message authentication codes by applying MD5 to the concatenation of the message with the secret key. Rather than using the 16 bytes of the final MD5 digest, we use only the 10 least significant bytes. This truncation has the obvious advantage of reducing the size of MACs and it also improves their resilience to certain attacks [27]. This is a variant of the secret suffix method [36], which is secure as long as MD5 is collision resistant [27, 8].
|
||||
|
||||
The digital signature in a reply message is replaced by a single MAC, which is sufficient because these messages have a single intended recipient. The signatures in all other messages (including client requests but excluding view changes) are replaced by vectors of MACs that we call authenticators. An authenticator has an entry for every replica other than the sender; each entry is the MAC computed with the key shared by the sender and the replica corresponding to the entry.
|
||||
|
||||
The time to verify an authenticator is constant but the time to generate one grows linearly with the number of replicas. This is not a problem because we do not expect to have a large number of replicas and there is a huge performance gap between MAC and digital signature computation. Furthermore, we compute authenticators efficiently; MD5 is applied to the message once and the resulting context is used to compute each vector entry by applying MD5 to the corresponding session key. For example, in a system with 37 replicas (i.e., a system that can tolerate 12 simultaneous faults) an authenticator can still be computed much more than two orders of magnitude faster than a 1024-bit modulus RSA signature.
|
||||
|
||||
The size of authenticators grows linearly with the number of replicas but it grows slowly: it is equal to $30*\lfloor\frac{n-1)}{3}\rfloor$ bytes. An authenticator is
|
||||
smaller than an RSA signature with a 1024-bit modulus for
|
||||
$n\le13$ (i.e., systems that can tolerate up to 4 simultaneous faults), which we expect to be true in most configurations.
|
||||
|
||||
# Implementation
|
||||
|
||||
This section describes our implementation. First we discuss the replication library, which can be used as a basis for any replicated service. In Section 7.2 we describe how we implemented a replicated NFS on top of the replication library. Then we describe how we maintain checkpoints and compute checkpoint digests efficiently.
|
||||
|
||||
## The Replication Library
|
||||
|
||||
The client interface to the replication library consists of a single procedure, *invoke*, with one argument, an input buffer containing a request to invoke a state machine operation. The *invoke* procedure uses our protocol to execute the requested operation at the replicas and select the correct reply from among the replies of the individual replicas. It returns a pointer to a buffer containing the operation result.
|
||||
|
||||
On the server side, the replication code makes a number of upcalls to procedures that the server part of the application must implement. There are procedures to execute requests (*execute*), to maintain checkpoints of the service state (*make checkpoint, delete checkpoint)8, to obtain the digest of a specifIed checkpoint (*get digest*), and to obtain missing information (*get checkpoint, set checkpoint*).
|
||||
The *execute* procedure receives as input a buffer containing the requested operation, executes the operation, and places the result in an output buffer. The other procedures are discussed further in Sections 7.3 and 7.4.
|
||||
|
||||
Point-to-point communication between nodes is implemented using UDP, and multicast to the group of replicas is implemented using UDP over IP multicast [7]. There is a single IP multicast group for each service, which contains all the replicas. These communication protocols are unreliable; they may duplicate or lose messages or deliver them out of order.
|
||||
|
||||
The algorithm tolerates out-of-order delivery and rejects duplicates. View changes can be used to recover from lost messages, but this is expensive and therefore it is important to perform retransmissions. During normal operation recovery from lost messages is driven by the receiver: backups send negative acknowledgments to the primary when they are out of date and the primary retransmits pre-prepare messages after a long timeout. A reply to a negative acknowledgment may include both a portion of a stable checkpoint and missing messages. During view changes, replicas retransmit view-change messages until they receive a matching new-view message or they move on to a later view.
|
||||
|
||||
The replication library does not implement view changes or retransmissions at present. This does not compromise the accuracy of the results given in Section 7 because the rest of the algorithm is completely implemented (including the manipulation of the timers that trigger view changes) and because we have formalized the complete algorithm and proved its correctness [4].
|
||||
|
||||
## BFS: A Byzantine-Fault-tolerant File System
|
||||
|
||||
We implemented BFS, a Byzantine-fault-tolerant NFS service, using the replication library. Figure 2 shows the architecture of BFS. We opted not to modify the kernel NFS client and server because we did not have the sources for the Digital Unix kernel.
|
||||
|
||||
A file system exported by the fault-tolerant NFS service is mounted on the client machine like any regular NFS file system. Application processes run unmodified and interact with the mounted file system through the NFS client in the kernel. We rely on user level relay processes to mediate communication between the standard NFS client and the replicas. A relay receives NFS protocol requests, calls the invoke procedure of our replication library, and sends the result back to the NFS client.
|
||||
|
||||
![Replicated File System Architecture](./images/practical_byzantine_consensus_fig_2.webp){width=100%}
|
||||
|
||||
Each replica runs a user-level process with the replication library and our NFS V2 daemon, which we will refer to as *snfsd* (for simple *nfsd*). The replication library receives requests from the relay, interacts with *snfsd* by making upcalls, and packages NFS replies into replication protocol replies that it sends to the relay.
|
||||
|
||||
We implemented *snfsd* using a fixed-size memory-mapped file. All the file system data structures, e.g., inodes, blocks and their free lists, are in the mapped file. We rely on the operating system to manage the cache of memory-mapped file pages and to write modified pages to disk asynchronously. The current implementation uses 8KB blocks and inodes contain the NFS status information plus 256 bytes of data, which is used to store directory entries in directories, pointers to blocks in files, and text in symbolic links. Directories and files may also use indirect blocks in a way similar to Unix.
|
||||
|
||||
Our implementation ensures that all state machine replicas start in the same initial state and are deterministic, which are necessary conditions for the correctness of a service implemented using our protocol. The primary proposes the values for time-last-modified and time-last-accessed, and replicas select the larger of the proposed value and one greater than the maximum of all values selected for earlier requests. We do not require synchronous writes to implement NFS V2 protocol semantics because BFS achieves stability of modified data and meta-data through replication [20].
|
||||
|
||||
## Maintaining Checkpoints
|
||||
|
||||
This section describes how *snfsd* maintains checkpoints of the file system state. Recall that each replica maintains several logical copies of the state: the current state, some number of checkpoints that are not yet stable, and the last stable checkpoint.
|
||||
|
||||
*snfsd* executes file system operations directly in the memory mapped file to preserve locality,and it uses copy-on-write to reduce the space and time overhead associated with maintaining checkpoints. *snfsd* maintains a copy-on-write bit for every 512-byte block in the memory mapped file. When the replication code invokes the *make_checkpoint* upcall, *snfsd* sets all the copy-on-write bits and creates a (volatile) checkpoint record, containing the current sequence number, which it receives as an argument to the upcall, and a list of blocks. This list contains the copies of the blocks that were modified since the checkpoint was taken, and therefore, it is initially empty. The record also contains the digest of the current state; we discuss how the digest is computed in Section 7.4.
|
||||
|
||||
When a block of the memory mapped file is modified while executing a client request, *snfsd* checks the copy-on-write bit for the block and, if it is set, stores the block's current contents and its identifier in the checkpoint record for the last checkpoint. Then, it overwrites the block with its new value and resets its copy-on-write bit. *snfsd* retains a checkpoint record until told to discard it via a delete checkpoint upcall, which is made by the replication code when a later checkpoint becomes stable.
|
||||
|
||||
If the replication code requires a checkpoint to send to another replica, it calls the get checkpoint upcall. To obtain the value for a block, *snfsd* first searches for the block in the checkpoint record of the stable checkpoint, and then searches the checkpoint records of any later checkpoints. If the block is not in any checkpoint record, it returns the value from the current state.
|
||||
The use of the copy-on-write technique and the fact that we keep at most 2 checkpoints ensure that the space and time overheads of keeping several logical copies of the state are low. For example, in the Andrew benchmark experiments described in Section 7, the average checkpoint record size is only 182 blocks with a maximum of 500.
|
||||
|
||||
## Computing Checkpoint Digests
|
||||
|
||||
*snfsd* computes a digest of a checkpoint state as part of a make checkpoint upcall. Although checkpoints are only taken occasionally, it is important to compute the state digest incrementally because the state may be large. *snfsd* uses an incremental collision-resistant one-way hash function called AdHash [1]. This function divides the state into fixed-size blocks and uses some other hash function (e.g., MD5) to compute the digest of the string obtained by concatenating the block index with the block value for each block. The digest of the state is the sum of the digests of the blocks modulo some large integer. In our current implementation, we use the 512-byte blocks from the copy-on-write technique and compute their digest using MD5.
|
||||
|
||||
To compute the digest for the state incrementally, *snfsd* maintains a table with a hash value for each 512-byte block. This hash value is obtained by applying MD5 to the block index concatenated with the block value at the time of the last checkpoint. When make checkpoint is called, *snfsd* obtains the digest $d$
|
||||
for the previous checkpoint state (from the associated checkpoint record). It computes new hash values for each block whose copy-on-write bit is reset by applying MD5 to the block index concatenated with the current block value. Then, it adds the new hash value to $d$, subtracts the old hash value from $d$, and updates the table to contain the new hash value. This process is efficient provided the number of modified blocks is small; as mentioned above, on average 182 blocks are modified per checkpoint for the Andrew benchmark.
|
||||
|
||||
# Performance Evaluation
|
||||
|
||||
This section evaluates the performance of our system using two benchmarks: a micro-benchmark and the Andrew benchmark [15]. The micro-benchmark provides a service-independent evaluation of the performance of the replication library; it measures the latency to invoke a null operation, i.e., an operation that does nothing.
|
||||
|
||||
The Andrew benchmark is used to compare BFS with two other file systems: one is the NFS V2 implementation in Digital Unix, and the other is identical to BFS except without replication. The first comparison demonstrates that our system is practical by showing that its latency is similar to the latency of a commercial system that is used daily by many users. The second comparison allows us to evaluate the overhead of our algorithm accurately within an implementation of a real service.
|
||||
|
||||
## Experimental Setup
|
||||
|
||||
The experiments measure normal-case behavior (i.e., there are no view changes), because this is the behavior that determines the performance of the system. All experiments ran with one client running two relay processes, and four replicas. Four replicas can tolerate one Byzantine fault; we expect this reliability level to suffice for most applications. The replicas and the client ran on identical DEC 3000/400 Alpha workstations. These workstations have a 133 MHz Alpha 21064 processor, 128 MB of memory, and run Digital Unix version 4.0. The file system was stored by each replica on a DEC RZ26 disk. All the workstations were connected by a 10Mbit/s switched Ethernet and had DEC LANCE Ethernet interfaces. The switch was a DEC EtherWORKS 8T/TX. The experiments were run on an isolated network.
|
||||
|
||||
The interval between checkpoints was 128 requests, which causes garbage collection to occur several times in any of the experiments. The maximum sequence number accepted by replicas in pre-prepare messages was 256 plus the sequence number of the last stable checkpoint.
|
||||
|
||||
## Micro-Benchmark
|
||||
|
||||
The micro-benchmark measures the latency to invoke a null operation. It evaluates the performance of two implementations of a simple service with no state that implements null operations with arguments and results of different sizes. The first implementation is replicated using our library and the second is unreplicated and uses UDP directly. Table 1 reports the response times measured at the client for both read-only and read-write operations. They were obtained by timing 10,000 operation invocations in three separate runs and we report the median value of the three runs. The maximum deviation from the median was always below 0.3% of the reported value. We denote each operation by a/b, where a and b are the sizes of the operation argument and result in KBytes.
|
||||
|
||||
+---------+-------------------+------------+------------+
|
||||
|arg./res | replicated | replicated | without |
|
||||
|(KB) | read write | read-only | replication|
|
||||
+:=======:+==================:+===========:+===========:+
|
||||
| 0/0 | 3.35 (309%) | 1.62 (98%)| 0.82 |
|
||||
+---------+-------------------+------------+------------+
|
||||
| 4/0 | 14.19 (207%) | 6.98 (51%) | 4.62 |
|
||||
+---------+-------------------+------------+------------+
|
||||
| 0/4 |8.01 ( 72%) | 5.94 (27%) | 4.66 |
|
||||
+---------+-------------------+------------+------------+
|
||||
|
||||
Table 1: Micro-benchmark results (in milliseconds); the percentage overhead is relative to the unreplicated case.
|
||||
|
||||
The overhead introduced by the replication library is due to extra computation and communication. For example, the computation overhead for the read-write 0/0 operation is approximately 1.06ms, which includes 0.55ms spent executing cryptographic operations. The remaining 1.47ms of overhead are due to extra communication; the replication library introduces an extra message round-trip, it sends larger messages, and it increases the number of messages received by each node relative to the service without replication.
|
||||
|
||||
The overhead for read-only operations is significantly lower because the optimization discussed in Section 5.1 reduces both computation and communication overheads. For example, the computation overhead for the read-only 0/0 operation is approximately 0.43ms, which includes 0.23ms spent executing cryptographic operations, and the communication overhead is only 0.37ms because the protocol to execute read-only operations uses a single round-trip.
|
||||
|
||||
Table 1 shows that the relative overhead is lower for the 4/0 and 0/4 operations. This is because a significant fraction of the overhead introduced by the replication library is independent of the size of operation arguments and results. For example, in the read-write 0/4 operation, the large message (the reply) goes over the network only once (as discussed in Section 5.1) and only the cryptographic overhead to process the reply message is increased. The overhead is higher for the read-write 4/0 operation because the large message (the request) goes over the network twice and increases the cryptographic overhead for processing both request and pre-prepare messages.
|
||||
|
||||
It is important to note that this micro-benchmark represents the worst case overhead for our algorithm because the operations perform no work and the unreplicated server provides very weak guarantees. Most services will require stronger guarantees, e.g., authenticated connections, and the overhead introduced by our algorithm relative to a server that implements these guarantees will be lower. For example, the overhead of the replication library relative to a version of the unreplicated service that uses MACs for authentication is only 243% for the read-write 0/0 operation and 4% for the read-only 4/0 operation.
|
||||
|
||||
We can estimate a rough lower bound on the performance gain afforded by our algorithm relative to Rampart [30]. Reiter reports that Rampart has a latency of 45ms for a multi-RPC of a null message in a 10 Mbit/s Ethernet network of 4 SparcStation 10s [30]. The multi-RPC is sufficient for the primary to invoke a state machine operation but for an arbitrary client to invoke an operation it would be necessary to add an extra message delay and an extra RSA signature and verification to authenticate the client; this would lead to a latency of at least 65ms (using the RSA timings reported in [29].) Even if we divide this latency by 1.7, the ratio of the SPECint92 ratings of the DEC 3000/400 and the SparcStation 10, our algorithm still reduces the latency to invoke the read-write and read-only 0/0 operations by factors of more than 10 and 20, respectively. Note that this scaling is conservative because the network accounts for a significant fraction of Rampart's latency [29] and Rampart's results were obtained using 300-bit modulus RSA signatures, which are not considered secure today unless the keys used to generate them are refreshed very frequently.
|
||||
|
||||
There are no published performance numbers for SecureRing [16] but it would be slower than Rampart because its algorithm has more message delays and signature operations in the critical path.
|
||||
|
||||
## Andrew Benchmark
|
||||
|
||||
The Andrew benchmark [15] emulates a software development workload. It has fve phases: (1) creates subdirectories recursively; (2) copies a source tree; (3) examines the status of all the files in the tree without examining their data; (4) examines every byte of data in all the files; and (5) compiles and links the files.
|
||||
|
||||
We use the Andrew benchmark to compare BFS with two other file system configurations: NFS-std, which is the NFS V2 implementation in Digital Unix, and BFS-nr, which is identical to BFS but with no replication. BFS-nr ran two simple UDP relays on the client, and on the server it ran a thin veneer linked with a version of *snfsd* from which all the checkpoint management code was removed. This configuration does not write modified file system state to disk before replying to the client. Therefore, it does not implement NFS V2 protocol semantics, whereas both BFS and NFS-std do.
|
||||
|
||||
Out of the 18 operations in the NFS V2 protocol only getattr is read-only because the time-last-accessed attribute of files and directories is set by operations that would otherwise be read-only, e.g., read and lookup. The result is that our optimization for read-only operations can rarely be used. To show the impact of this optimization, we also ran the Andrew benchmark on a second version of BFS that modifies the lookup operation to be read-only. This modification violates strict Unix file system semantics but is unlikely to have adverse effects in practice.
|
||||
|
||||
For all configurations, the actual benchmark code ran at the client workstation using the standard NFS client implementation in the Digital Unix kernel with the same mount options. The most relevant of these options for the benchmark are: UDP transport, 4096-byte read and write buffers, allowing asynchronous client writes, and allowing attribute caching.
|
||||
|
||||
We report the mean of 10 runs of the benchmark for each configuration. The sample standard deviation for the total time to run the benchmark was always below 2.6% of the reported value but it was as high as 14% for the individual times of the first four phases. This high variance was also present in the NFS-std configuration. The estimated error for the reported mean was below 4.5% for the individual phases and 0.8% for the total.
|
||||
|
||||
Table 2 shows the results for BFS and BFS-nr. The comparison between BFS-strict and BFS-nr shows that the overhead of Byzantine fault tolerance for this service is low -- BFS-strict takes only 26% more time to run Table 2: Andrew benchmark: BFS vs BFS-nr. The times are in seconds.
|
||||
|
||||
+---------+------------+-----------+---------+
|
||||
| phase |BFS strict | BFS r/o | NFS-std |
|
||||
| | | lookup | |
|
||||
+:=======:+===========:+==========:+========:+
|
||||
| 1 | 0.55 (57%) |0.47 (34%) | 0.35 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 2 | 9.24 (82%) |7.91 (56%) | 5.08 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 3 |7.24 (18%) |6.45 (6%) | 6.11 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 4 | 8.77 (18%) | 7.87 (6%) | 7.41 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 5 |38.68 (20%) |38.38 (19%)| 32.12 |
|
||||
+---------+------------+-----------+---------+
|
||||
| total | 64.48 (26%)|61.07 (20%)| 51.07 |
|
||||
+---------+------------+-----------+---------+
|
||||
|
||||
the complete benchmark. The overhead is lower than what was observed for the micro-benchmarks because the client spends a significant fraction of the elapsed time computing between operations, i.e., between receiving the reply to an operation and issuing the next request, and operations at the server perform some computation. But the overhead is not uniform across the benchmark phases. The main reason for this is a variation in the amount of time the client spends computing between operations; the first two phases have a higher relative overhead because the client spends approximately 40% of the total time computing between operations, whereas it spends approximately 70% during the last three phases.
|
||||
|
||||
The table shows that applying the read-only optimization to lookup improves the performance of BFS significantly and reduces the overhead relative to BFS-nr to 20%. This optimization has a significant impact in the first four phases because the time spent waiting for lookup operations to complete in BFS-strict is at least 20% of the elapsed time for these phases, whereas it is less than 5% of the elapsed time for the last phase.
|
||||
|
||||
+---------+------------+-----------+---------+
|
||||
| phase |BFS strict | BFS r/o | NFS-std |
|
||||
| | | lookup | |
|
||||
+:=======:+===========:+==========:+========:+
|
||||
| 1 |0.55 (-69%) |0.47 (-73%)| 1.75 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 2 |9.24 ( -2%) |7.91 (-16%)| 9.46 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 3 |7.24 (35%) | 6.45 (20%)| 5.36 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 4 |8.77 (32%) |7.87 (19%) | 6.60 |
|
||||
+---------+------------+-----------+---------+
|
||||
| 5 |38.68 (-2%) |38.38 (-2%)| 39.35 |
|
||||
+---------+------------+-----------+---------+
|
||||
| total |64.48 (3%) |61.07 (-2%)| 62.52 |
|
||||
+---------+------------+-----------+---------+
|
||||
|
||||
Table 3: Andrew benchmark: BFS vs NFS-std. The times are in seconds.
|
||||
|
||||
Table 3 shows the results for BFS vs NFS-std. These results show that BFS can be used in practice -- BFS-strict takes only 3% more time to run the complete benchmark. Thus, one could replace the NFS V2 implementation in Digital Unix, which is used daily by many users, by BFS without affecting the latency perceived by those users. Furthermore, BFS with the read-only optimization for the *lookup* operation is actually 2% faster than NFS-std.
|
||||
|
||||
The overhead of BFS relative to NFS-std is not the same for all phases. Both versions of BFS are faster than NFS-std for phases 1, 2, and 5 but slower for the other phases. This is because during phases 1, 2, and 5 a large fraction (between 21% and 40%) of the operations issued by the client are *synchronous*, i.e., operations that require the NFS implementation to ensure stability of modified file system state before replying to the client. NFS-std achieves stability by writing modified state to disk whereas BFS achieves stability with lower latency using replication (as in Harp [20]). NFS-std is faster than BFS (and BFS-nr) in phases 3 and 4 because the client issues no synchronous operations during these phases.
|
||||
|
||||
# Related Work
|
||||
|
||||
Most previous work on replication techniques ignored Byzantine faults or assumed a synchronous system model (e.g., [17, 26, 18, 34, 6, 10]). View stamped replication [26] and Paxos [18] use views with a primary and backups to tolerate benign faults in an asynchronous system. Tolerating Byzantine faults requires a much more complex protocol with cryptographic authentication, an extra pre-prepare phase, and a different technique to trigger view changes and select primaries. Furthermore, our system uses view changes only to select a new primary but never to select a different set of replicas to form the new view as in [26, 18].
|
||||
|
||||
Some agreement and consensus algorithms tolerate Byzantine faults in asynchronous systems (e.g,[2, 3, 24]). However, they do not provide a complete solution for state machine replication, and furthermore, most of them were designed to demonstrate theoretical feasibility and are too slow to be used in practice. Our algorithm during normal-case operation is similar to the Byzantine agreement algorithm in [2] but that algorithm is unable to survive primary failures.
|
||||
|
||||
The two systems that are most closely related to our work are Rampart [29, 30, 31, 22] and SecureRing [16]. They implement state machine replication but are more than an order of magnitude slower than our system and, most importantly, they rely on synchrony assumptions.
|
||||
|
||||
Both Rampart and SecureRing must exclude faulty replicas from the group to make progress (e.g., to remove a faulty primary and elect a new one), and to perform garbage collection. They rely on failure detectors to determine which replicas are faulty. However, failure detectors cannot be accurate in an asynchronous system [21], i.e., they may misclassify a replica as faulty. Since correctness requires that fewer than $\frac13$ of group members be faulty, a misclassification can compromise correctness by removing a non-faulty replica from the group. This opens an avenue of attack: an attacker gains control over a single replica but does not change its behavior in any detectable way; then it slows correct replicas or the communication between them until enough are excluded from the group.
|
||||
|
||||
To reduce the probability of misclassification, failure detectors can be calibrated to delay classifying a replica as faulty. However, for the probability to be negligible the delay must be very large, which is undesirable. For example, if the primary has actually failed, the group will be unable to process client requests until the delay has expired. Our algorithm is not vulnerable to this problem because it never needs to exclude replicas from the group.
|
||||
|
||||
Phalanx [23, 25] applies quorum replication techniques [12] to achieve Byzantine fault-tolerance in asynchronous systems. This work does not provide generic state machine replication; instead, it offers a data repository with operations to read and write individual variables and to acquire locks. The semantics it provides for read and write operations are weaker than those offered by our algorithm; we can implement arbitrary operations that access any number of variables,whereas in Phalanx it would be necessary to acquire and release locks to execute such operations. There are no published performance numbers for Phalanx but we believe our algorithm is faster because it has fewer message delays in the critical path and because of our use of MACs rather than public key cryptography. The approach in Phalanx offers the potential for improved scalability; each operation is processed by only a subset of replicas. But this approach to scalability is expensive: it requires $n\gt4f+1$ to tolerate faults; each replica needs a copy of the state; and the load on each replica decreases slowly with $n$
|
||||
(it is $\bigcirc(1/\sqrt{n})$.
|
||||
|
||||
# Conclusions
|
||||
|
||||
This paper has described a new state-machine replication algorithm that is able to tolerate Byzantine faults and can be used in practice: it is the first to work correctly in an asynchronous system like the Internet and it improves the performance of previous algorithms by more than an order of magnitude.
|
||||
|
||||
The paper also described BFS, a Byzantine-fault tolerant implementation of NFS. BFS demonstrates that it is possible to use our algorithm to implement real services with performance close to that of an unreplicated service -- the performance of BFS is only 3% worse than that of the standard NFS implementation in Digital Unix. This good performance is due to a number of important optimizations, including replacing public-key signatures by vectors of message authentication codes, reducing the size and number of messages, and the incremental checkpoint-management techniques.
|
||||
|
||||
One reason why Byzantine-fault-tolerant algorithms will be important in the future is that they can allow systems to continue to work correctly even when there are software errors. Not all errors are survivable; our approach cannot mask a software error that occurs at all replicas. However, it can mask errors that occur independently at different replicas, including nondeterministic software errors, which are the most problematic and persistent errors since they are the hardest to detect. In fact, we encountered such a software bug while running our system, and our algorithm was able to continue running correctly in spite of it.
|
||||
|
||||
There is still much work to do on improving our system. One problem of special interest is reducing the amount of resources required to implement our algorithm. The number of replicas can be reduced by using
|
||||
replicas as witnesses that are involved in the protocol only when some full replica fails. We also believe that it is possible to reduce the number of copies of the state to
|
||||
1 but the details remain to be worked out.
|
||||
|
||||
# Acknowledgments
|
||||
|
||||
We would like to thank Atul Adya, Chandrasekhar Boyapati, Nancy Lynch, Sape Mullender, Andrew Myers, Liuba Shrira, and the anonymous referees for their helpful comments on drafts of this paper.
|
||||
|
||||
# References
|
||||
|
||||
[1] M. Bellare and D. Micciancio. A New Paradigm for Collision-free Hashing: Incrementality at Reduced Cost. In Advances in Cryptology -- Eurocrypt 97, 1997.
|
||||
|
||||
[2] G. Bracha and S. Toueg. Asynchronous Consensus and Broadcast Protocols. Journal of the ACM, 32(4), 1995.
|
||||
|
||||
[3] R. Canneti and T. Rabin. Optimal Asynchronous Byzantine Agreement. Technical Report #92-15, Computer Science Department, Hebrew University, 1992.
|
||||
|
||||
[4] M. Castro and B. Liskov. A Correctness Proof for a Practical Byzantine-Fault-Tolerant Replication Algorithm. Technical Memo MIT/LCS/TM-590, MIT Laboratory for Computer Science, 1999.
|
||||
|
||||
[5] M. Castro and B. Liskov. Authenticated Byzantine Fault Tolerance Without Public-Key Cryptography. Technical Memo MIT/LCS/TM-589, MIT Laboratory for Computer Science, 1999.
|
||||
|
||||
[6] F. Cristian, H. Aghili, H. Strong, and D. Dolev. Atomic Broadcast: From Simple Message Diffusion to Byzantine Agreement. In International Conference on Fault Tolerant Computing, 1985.
|
||||
|
||||
[7] S. Deering and D. Cheriton. Multicast Routing in Datagram Internetworks and Extended LANs. ACM Transactions on Computer Systems, 8(2), 1990.
|
||||
|
||||
[8] H. Dobbertin. The Status of MD5 After a Recent Attack. RSA Laboratories' CryptoBytes, 2(2), 1996.
|
||||
|
||||
[9] M. Fischer, N. Lynch, and M. Paterson. Impossibility of Distributed Consensus With One Faulty Process. Journal of the ACM, 32(2), 1985.
|
||||
|
||||
[10] J. Garay and Y. Moses. Fully Polynomial Byzantine Agreement for n
|
||||
3t Processors in t+1 Rounds. SIAM Journal of Computing, 27(1), 1998.
|
||||
|
||||
[11] D. Gawlick and D. Kinkade. Varieties of Concurrency Control in IMS/VS Fast Path. Database Engineering, 8(2), 1985.
|
||||
|
||||
[12] D. Gifford. Weighted Voting for Replicated Data. In Symposium on Operating Systems Principles, 1979.
|
||||
|
||||
[13] M. Herlihy and J. Tygar. How to make replicated data secure. Advances in Cryptology (LNCS 293), 1988.
|
||||
|
||||
[14] M. Herlihy and J. Wing. Axioms for Concurrent Objects. In ACM Symposium on Principles of Programming Languages, 1987.
|
||||
|
||||
[15] J. Howard et al. Scale and performance in a distributed file system. ACM Transactions on Computer Systems, 6(1), 1988.
|
||||
|
||||
[16] K. Kihlstrom, L. Moser, and P. Melliar-Smith. The SecureRing Protocols for Securing Group Communication. In Hawaii International Conference on System Sciences, 1998.
|
||||
|
||||
[17] L. Lamport. Time, Clocks, and the Ordering of Events in a Distributed System. Commun. ACM, 21(7), 1978.
|
||||
|
||||
[18] L. Lamport. The Part-Time Parliament. Technical Report 49, DEC Systems Research Center, 1989.
|
||||
|
||||
[19] L. Lamport, R. Shostak, and M. Pease. The Byzantine Generals Problem. ACM Transactions on Programming Languages and Systems, 4(3), 1982.
|
||||
|
||||
[20] B. Liskov et al. Replication in the Harp File System. In ACM Symposium on Operating System Principles, 1991.
|
||||
|
||||
[21] N. Lynch. Distributed Algorithms. Morgan Kaufmann Publishers, 1996.
|
||||
|
||||
[22] D. Malkhi and M. Reiter. A High-Throughput Secure Reliable Multicast Protocol. In Computer Security Foundations Workshop, 1996.
|
||||
|
||||
[23] D. Malkhi and M. Reiter. Byzantine Quorum Systems. In ACM Symposium on Theory of Computing, 1997.
|
||||
|
||||
[24] D. Malkhi and M. Reiter. Unreliable Intrusion Detection in Distributed Computations. In Computer Security Foundations Workshop, 1997.
|
||||
|
||||
[25] D. Malkhi and M. Reiter. Secure and Scalable Replication in Phalanx. In IEEE Symposium on Reliable Distributed Systems, 1998.
|
||||
|
||||
[26] B. Oki and B. Liskov. Viewstamped Replication: A New Primary Copy Method to Support Highly-Available Distributed Systems. In ACM Symposium on Principles of Distributed Computing, 1988.
|
||||
|
||||
[27] B. Preneel and P. Oorschot. MDx-MAC and Building Fast MACs from Hash Functions. In Crypto 95, 1995.
|
||||
|
||||
[28] C. Pu, A. Black, C. Cowan, and J. Walpole. A Specialization Toolkit to Increase the Diversity of Operating Systems. In ICMAS Workshop on Immunity-Based Systems, 1996.
|
||||
|
||||
[29] M. Reiter. Secure Agreement Protocols. In ACM Conference on Computer and Communication Security, 1994.
|
||||
|
||||
[30] M. Reiter. The Rampart Toolkit for Building High-Integrity Services. Theory and Practice in Distributed Systems (LNCS 938), 1995.
|
||||
|
||||
[31] M. Reiter. A Secure Group Membership Protocol. IEEE Transactions on Software Engineering, 22(1), 1996.
|
||||
|
||||
[32] R. Rivest. The MD5 Message-Digest Algorithm. Internet RFC--1321, 1992.
|
||||
|
||||
[33] R. Rivest, A. Shamir, and L. Adleman. A Method for Obtaining Digital Signatures and Public-Key Cryptosystems. Communications of the ACM, 21(2), 1978.
|
||||
|
||||
[34] F. Schneider. Implementing Fault-Tolerant Services Using The State Machine Approach: A Tutorial. ACM Computing Surveys, 22(4), 1990.
|
||||
|
||||
[35] A. Shamir. How to share a secret. Communications of the ACM, 22(11), 1979.
|
||||
|
||||
[36] G. Tsudik. Message Authentication with One-Way Hash Functions. ACM Computer Communications Review, 22(5), 1992.
|
||||
|
||||
[37] M. Wiener. Performance Comparison of Public-Key Cryptosystems. RSA Laboratories' CryptoBytes, 4(1), 1998.
|
46117
docs/byzantine_paxos.pdf
46117
docs/byzantine_paxos.pdf
File diff suppressed because it is too large
Load Diff
@ -39,4 +39,3 @@ licensed under the <a rel="license" href="http://creativecommons.org/licenses/by
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
|
||||
</body></html>
|
||||
|
@ -6,6 +6,3 @@ do
|
||||
tidy -lang en_us --doctype html5 -utf8 -access 4 -e -q -o $TMP/fred.html "$f"
|
||||
done
|
||||
echo "checked all html files for html5 compliance."
|
||||
|
||||
|
||||
|
||||
|
@ -38,7 +38,6 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a></p>
|
||||
|
@ -9,7 +9,7 @@
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Crypto Currency and the Beast</title> </head>
|
||||
|
||||
<body>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -12,11 +12,11 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Crypto Currency Launch</title>
|
||||
<title>Crypto Currency Launch</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Crypto Currency Launch</h1><p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Crypto Currency Launch</h1><p>
|
||||
|
||||
The total value held in the form of gold is ten trillion. But gold has problems – if you try to transport it through an airport, security will likely take it from you. Hard to travel with it hidden. </p><p>
|
||||
|
||||
|
@ -64,4 +64,3 @@ licensed under the <a rel="license" href="http://creativecommons.org/licenses/by
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
|
||||
</body></html>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
}
|
||||
@ -11,11 +11,11 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Crypto Currency on wide area distributed database</title>
|
||||
<title>Crypto Currency on wide area distributed database</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Crypto Currency on wide area distributed database</h1><p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Crypto Currency on wide area distributed database</h1><p>
|
||||
|
||||
|
||||
Much of this material is shamelessly plaigarized without <a href="http://docplayer.net/14501083-Blockchain-throughput-and-big-data-trent-mcconaghy.html">attribution.</a></p><p>
|
||||
|
@ -1,20 +1,20 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Transaction Volume</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Transaction Volume</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Transaction Volume</h1>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Transaction Volume</h1>
|
||||
<hr/>
|
||||
<h2>Total number of bitcoin transactions </h2>
|
||||
|
||||
@ -38,7 +38,7 @@
|
||||
<p>But our canonical tree is going to have to contain namecoins ordered by name order, rather than transaction order, to enable short proofs of the valid authority over a name.</p>
|
||||
<hr/>
|
||||
<h2>Bandwidth</h2>
|
||||
<p>A bitcoin transaction is typically around 512 bytes, could be a lot less: A transaction needs a transaction type, one or more inputs, one or more outputs. A simple input or output would consist of the type, the amount, the time, and a public key. Type sixteen bits, time forty eight bits bits, amount sixty four bits, public key two hundred and fifty six bits, total forty eight bytes. A typical transaction has two inputs and two outputs, total 192 bytes. Frequently need another hash to link to relevant information, such as what this payment is for, another thirty two bytes, total 224 bytes. </p>
|
||||
<p>A bitcoin transaction is typically around 512 bytes, could be a lot less: A transaction needs a transaction type, one or more inputs, one or more outputs. A simple input or output would consist of the type, the amount, the time, and a public key. Type sixteen bits, time forty eight bits bits, amount sixty four bits, public key two hundred and fifty six bits, total forty eight bytes. A typical transaction has two inputs and two outputs, total 192 bytes. Frequently need another hash to link to relevant information, such as what this payment is for, another thirty two bytes, total 224 bytes. </p>
|
||||
|
||||
<p>We will frequently store the transaction and the resulting balances, as if together, though likely for internal optimization reasons, actually stored separately, so 196 bytes. </p>
|
||||
<p>Visa handles about 2000 transactions per second. Burst rate about four thousand per second. </p>
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
description: >-
|
||||
“A Cypherpunk’s Manifesto” was written by Eric Hughes and published on March 9, 1993.
|
||||
“A Cypherpunk’s Manifesto” was written by Eric Hughes and published on March 9, 1993.
|
||||
robots: 'index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1'
|
||||
title: >-
|
||||
Eric Hughes: A Cypherpunk’s Manifesto
|
||||
Eric Hughes: A Cypherpunk’s Manifesto
|
||||
viewport: 'width=device-width, initial-scale=1.0'
|
||||
---
|
||||
**Privacy is necessary for an open society in the electronic age. Privacy is not secrecy. A private matter is something one doesn’t want the whole world to know, but a secret matter is something one doesn’t want anybody to know. Privacy is the power to selectively reveal oneself to the world.**
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -12,11 +12,11 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Crypto currency</title>
|
||||
<title>Crypto currency</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Delegated proof of stake</h1>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Delegated proof of stake</h1>
|
||||
|
||||
<h2>The consensus problem</h2>
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
---
|
||||
lang: en
|
||||
title: Install Dovecot on Debian 10
|
||||
---
|
||||
# Purpose
|
||||
@ -129,11 +128,11 @@ Delete the old `service auth` definition, and replace it with:
|
||||
```bash
|
||||
# Postfix smtp-auth
|
||||
service auth {
|
||||
unix_listener /var/spool/postfix/private/auth {
|
||||
mode = 0660
|
||||
user = postfix
|
||||
group = postfix
|
||||
}
|
||||
unix_listener /var/spool/postfix/private/auth {
|
||||
mode = 0660
|
||||
user = postfix
|
||||
group = postfix
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@ -160,28 +159,28 @@ Add the line `auto = subscribe` to the special folders entries:
|
||||
|
||||
```default
|
||||
mailbox Trash {
|
||||
`auto = subscribe
|
||||
special_use = \Trash
|
||||
`auto = subscribe
|
||||
special_use = \Trash
|
||||
}
|
||||
|
||||
mailbox Junk {
|
||||
`auto = subscribe
|
||||
special_use = \Junk
|
||||
`auto = subscribe
|
||||
special_use = \Junk
|
||||
}
|
||||
|
||||
mailbox Drafts {
|
||||
`auto = subscribe
|
||||
special_use = \Drafts
|
||||
special_use = \Drafts
|
||||
}
|
||||
|
||||
mailbox Trash {
|
||||
`auto = subscribe
|
||||
special_use = \Trash
|
||||
`auto = subscribe
|
||||
special_use = \Trash
|
||||
}
|
||||
|
||||
mailbox Sent {
|
||||
`auto = subscribe
|
||||
special_use = \Sent
|
||||
`auto = subscribe
|
||||
special_use = \Sent
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -94,4 +94,3 @@ the precise type is irrelevant noise and a useless distraction. You
|
||||
generally want to know what is being done, not how it is being done.
|
||||
Further, if you explicitly specify how it is being done, you are likely to
|
||||
get it wrong, resulting in mysterious and disastrous type conversions.
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -12,10 +12,10 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Hello World</title>
|
||||
<title>Hello World</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Hello World</h1>
|
||||
|
||||
<p>In this day and age, a program that lives only on one machine, and a program without a gui (possibly a gui on another machine a thousand kilometers away) really is not much of a program</p>
|
||||
@ -33,7 +33,7 @@
|
||||
// attached to it by the run command, thus the threads
|
||||
// must be created in an inner scope</pre>
|
||||
|
||||
Create a work object work to stop its run() function from exiting if it has nothing else to do:<pre>
|
||||
Create a work object work to stop its run() function from exiting if it has nothing else to do:<pre>
|
||||
{ boost::asio::io_service::work work(io_service);
|
||||
// The work object prevents io_context from
|
||||
// returning from the run command when it has
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
title:
|
||||
Identity
|
||||
Identity
|
||||
---
|
||||
# Syntax and semantics of identity
|
||||
|
||||
@ -606,19 +606,19 @@ up to entity to the immediate left of the slash to interpret, and if it
|
||||
contains spaces and suchlike, use windows command line string
|
||||
representation rules, quote marks and escape codes.
|
||||
|
||||
rho:#4397439879483774378943798
|
||||
rho:Bob#4397439879483774378943798
|
||||
Bob@#4397439879483774378943798
|
||||
Receivables.#4397439879483774378943798
|
||||
rho:#4397439879483774378943798
|
||||
rho:Bob#4397439879483774378943798
|
||||
Bob@#4397439879483774378943798
|
||||
Receivables.#4397439879483774378943798
|
||||
|
||||
fit into the Uniform Resource Identifier scheme, poorly.
|
||||
|
||||
#4397439879483774378943798/foo
|
||||
#4397439879483774378943798/foo
|
||||
|
||||
fits into the catchall leftover part of the Uniform Resource Identifier
|
||||
scheme.
|
||||
|
||||
rho:Bob@Carol.Dave#4397439879483774378943798/foo
|
||||
rho:Bob@Carol.Dave#4397439879483774378943798/foo
|
||||
|
||||
Does not fit into it in the slightest, and I think the idea of
|
||||
compatibility with the URN system is a lost cause.
|
||||
@ -742,11 +742,11 @@ between his network address and his public key.
|
||||
|
||||
signed
|
||||
: anyone can check that some data is signed by key, and such data can
|
||||
be passed around in a pool, usenet style.
|
||||
be passed around in a pool, usenet style.
|
||||
|
||||
authenticated
|
||||
: You got the data directly from an entity that has the key. You know
|
||||
it came from that key, but cannot prove it to anyone else.
|
||||
it came from that key, but cannot prove it to anyone else.
|
||||
|
||||
access
|
||||
: A key with authorization from another key does something.
|
||||
@ -756,10 +756,10 @@ authorization
|
||||
|
||||
authority
|
||||
: A key with authority can give other keys authorization. Every key
|
||||
has unlimited authority to do whatever it wants on its own
|
||||
computers, and with its own reputation. It may grant other keys
|
||||
authorization to access certain services on its computers and to
|
||||
perform certain acts in the name of its reputation.
|
||||
has unlimited authority to do whatever it wants on its own
|
||||
computers, and with its own reputation. It may grant other keys
|
||||
authorization to access certain services on its computers and to
|
||||
perform certain acts in the name of its reputation.
|
||||
|
||||
We do not want the key on the server to be the master key that owns the
|
||||
server name, because keys on servers are too easily stolen. So we want
|
||||
@ -793,15 +793,15 @@ So, we need a collection of data akin to
|
||||
|
||||
`/etc/hosts`
|
||||
: public data, the broad consensus, agreed data known to the wider
|
||||
community.
|
||||
community.
|
||||
|
||||
`~/.ssh/known_hosts`
|
||||
: privately known data about the community that cannot be widely
|
||||
shared because others might not trust it, and you might not trust
|
||||
others. You may want to share this with those you trust, and get it
|
||||
from those you trust, but your set of people that you trust is
|
||||
unlikely to agree with someone else’s and needs to be curated by a
|
||||
human.
|
||||
shared because others might not trust it, and you might not trust
|
||||
others. You may want to share this with those you trust, and get it
|
||||
from those you trust, but your set of people that you trust is
|
||||
unlikely to agree with someone else’s and needs to be curated by a
|
||||
human.
|
||||
|
||||
`~/.ssh/config`
|
||||
: And there is data you want to keep secret.
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 302 KiB |
Binary file not shown.
Before Width: | Height: | Size: 125 KiB |
Binary file not shown.
Before Width: | Height: | Size: 94 KiB |
Binary file not shown.
Before Width: | Height: | Size: 48 KiB |
BIN
docs/images/practical_byzantine_consensus_fig_1.webp
Normal file
BIN
docs/images/practical_byzantine_consensus_fig_1.webp
Normal file
Binary file not shown.
After Width: | Height: | Size: 19 KiB |
BIN
docs/images/practical_byzantine_consensus_fig_2.webp
Normal file
BIN
docs/images/practical_byzantine_consensus_fig_2.webp
Normal file
Binary file not shown.
After Width: | Height: | Size: 19 KiB |
@ -25,41 +25,41 @@ used to write any Interlockedxxx operation. </p>
|
||||
|
||||
<pre>
|
||||
long InterlockedXxx(
|
||||
__inout long volatile *Target,
|
||||
, whatever parameters we need for Xxx
|
||||
)
|
||||
{
|
||||
long prevValue, prevCopy;
|
||||
__inout long volatile *Target,
|
||||
, whatever parameters we need for Xxx
|
||||
)
|
||||
{
|
||||
long prevValue, prevCopy;
|
||||
|
||||
prevValue = *Target;
|
||||
prevValue = *Target;
|
||||
|
||||
do {
|
||||
if Xxx operations is illegal on prevValue, return with error code
|
||||
do {
|
||||
if Xxx operations is illegal on prevValue, return with error code
|
||||
|
||||
prevCopy = prevValue;
|
||||
prevCopy = prevValue;
|
||||
|
||||
//
|
||||
// prevValue will be the value that used to be Target if the exchange was made
|
||||
// or its current value if the exchange was not made.
|
||||
//
|
||||
prevValue = InterlockedCompareExchange(Target, Xxx operation on prevCopy, prevValue);
|
||||
//
|
||||
// prevValue will be the value that used to be Target if the exchange was made
|
||||
// or its current value if the exchange was not made.
|
||||
//
|
||||
prevValue = InterlockedCompareExchange(Target, Xxx operation on prevCopy, prevValue);
|
||||
|
||||
//
|
||||
// If prevCopy == prevValue, then no one updated Target in between the deref at the top
|
||||
// and the InterlockecCompareExchange afterward and we are done
|
||||
//
|
||||
} while (prevCopy != prevValue);
|
||||
//
|
||||
// If prevCopy == prevValue, then no one updated Target in between the deref at the top
|
||||
// and the InterlockecCompareExchange afterward and we are done
|
||||
//
|
||||
} while (prevCopy != prevValue);
|
||||
|
||||
//
|
||||
// [value] can be anything you want, but it is typically either
|
||||
// a) The new value stored in Target. This is the type of return value that
|
||||
// InterlockedIncrement returns
|
||||
// or
|
||||
// b) The new value is the previous value that was in Target. This si the
|
||||
// type of return value that InterlockedOr or InterlockedExchange return
|
||||
//
|
||||
return [value];
|
||||
}</pre><p>
|
||||
//
|
||||
// [value] can be anything you want, but it is typically either
|
||||
// a) The new value stored in Target. This is the type of return value that
|
||||
// InterlockedIncrement returns
|
||||
// or
|
||||
// b) The new value is the previous value that was in Target. This si the
|
||||
// type of return value that InterlockedOr or InterlockedExchange return
|
||||
//
|
||||
return [value];
|
||||
}</pre><p>
|
||||
|
||||
Structures larger than a long can be handled by
|
||||
using InterlockedCompareExchange to add to a
|
||||
@ -72,4 +72,3 @@ it for you. </p>
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
</body></html>
|
||||
|
||||
|
@ -398,15 +398,15 @@ Which leads me to digress how we are going to handle protocol updates:
|
||||
1. Distribute software capable of handling the update.
|
||||
1. A proposed protocol update transaction is placed on the blockchain.
|
||||
1. Peers indicate capability to handle the protocol update. Or ignore it,
|
||||
or indicate that they cannot. If a significant number of peers
|
||||
indicate capability, peers that lack capability push their owners for
|
||||
an update.
|
||||
or indicate that they cannot. If a significant number of peers
|
||||
indicate capability, peers that lack capability push their owners for
|
||||
an update.
|
||||
1. A proposal to start emitting data that can only handled by more
|
||||
recent peers is placed on the blockchain.
|
||||
recent peers is placed on the blockchain.
|
||||
1. If a significant number of peers vote yes, older peers push more
|
||||
vigorously for an update.
|
||||
vigorously for an update.
|
||||
1. If a substantial supermajority votes yes by a date specified in the
|
||||
proposal, then they start emitting data in the new format on a date
|
||||
proposal, then they start emitting data in the new format on a date
|
||||
shortly afterwards. If no supermajority by the due date, the
|
||||
proposal is dead.
|
||||
|
||||
@ -712,11 +712,11 @@ wxWidget wraps WSASelect, which is the behavior we need.
|
||||
Microsoft has written the asynch sockets you need, and wxWidgets has wrapped
|
||||
them in an OS independent fashion.
|
||||
|
||||
WSAAsyncSelect
|
||||
WSAAsyncSelect
|
||||
|
||||
WSAEventSelect
|
||||
WSAEventSelect
|
||||
|
||||
select
|
||||
select
|
||||
|
||||
Using wxSockets commits us to having a single thread managing everything. To
|
||||
get around the power limit inherent in that, have multiple peers under
|
||||
|
@ -18,4 +18,3 @@ bool app::OnInit()
|
||||
}
|
||||
|
||||
wxIMPLEMENT_APP(app);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
title:
|
||||
C++ Automatic Memory Management
|
||||
C++ Automatic Memory Management
|
||||
---
|
||||
# Memory Safety
|
||||
Modern, mostly memory safe C++, is enforced by:\
|
||||
@ -14,7 +14,7 @@ Modern, mostly memory safe C++, is enforced by:\
|
||||
complains about, in practice usually all of them, though I suppose that as
|
||||
the project gets bigger, some will slip through.
|
||||
|
||||
static_assert(__cplusplus >= 201703, "C version of out of date");
|
||||
static_assert(__cplusplus >= 201703, "C version of out of date");
|
||||
|
||||
The gsl adds span for pointer arithmetic, where the
|
||||
size of the array pointed to is kept with the pointer for safe iteration and
|
||||
@ -29,75 +29,75 @@ std::make_unique, std::make_shared create pointers to memory managed
|
||||
objects. (But single objects, not an array, use spans for pointer
|
||||
arithmetic)
|
||||
|
||||
auto sp = std::make_shared<int>(42);
|
||||
std::weak_ptr<T> wp{sp};
|
||||
auto sp = std::make_shared<int>(42);
|
||||
std::weak_ptr<T> wp{sp};
|
||||
|
||||
# Array sizing and allocation
|
||||
|
||||
/* This code creates a bunch of "brown dog" strings on the heap to test automatic memory management. */
|
||||
char ca[]{ "red dog" }; //Automatic array sizing
|
||||
std::array<char,8> arr{"red dog"}; //Requires #include <array>
|
||||
/* No automatic array sizing, going to have to count your initializer list. */
|
||||
/* The pointer of the underlying array is referenced by &arr[0] but arr is not the underlying array, nor a pointer to it. */
|
||||
/* [0] invokes operator[], and operator[] is the member function that accesses the underlying array.*/
|
||||
/* The size of the underlying array is referenced by arr.size();*/
|
||||
/* size known at compile time, array can be returned from a function getting the benefits of stack allocation.*/
|
||||
// can be passed around like POD
|
||||
char *p = new char[10]{ "brown dog" }; //No automatic array
|
||||
// sizing for new
|
||||
std::unique_ptr<char[]>puc{ p }; // Now you do not have
|
||||
// to remember to delete p
|
||||
auto puc2 = std::move(puc); /* No copy constructor. Pass by reference, or pass a view, such as a span.*/
|
||||
std::unique_ptr<char> puc3{ new char[10]{ "brown dog" } };
|
||||
/* Array size unknown at compile or run time, needs a span, and you have to manually count the initialization list. */
|
||||
/* Compiler guards against overflow, but does not default to the correct size.*/
|
||||
/* You can just guess a way too small size, and the compiler in its error message will tell you what the size should be. */
|
||||
auto pu = std::make_unique<char[]>(10); // uninitialized,
|
||||
// needs procedural initialization.
|
||||
/* This code creates a bunch of "brown dog" strings on the heap to test automatic memory management. */
|
||||
char ca[]{ "red dog" }; //Automatic array sizing
|
||||
std::array<char,8> arr{"red dog"}; //Requires #include <array>
|
||||
/* No automatic array sizing, going to have to count your initializer list. */
|
||||
/* The pointer of the underlying array is referenced by &arr[0] but arr is not the underlying array, nor a pointer to it. */
|
||||
/* [0] invokes operator[], and operator[] is the member function that accesses the underlying array.*/
|
||||
/* The size of the underlying array is referenced by arr.size();*/
|
||||
/* size known at compile time, array can be returned from a function getting the benefits of stack allocation.*/
|
||||
// can be passed around like POD
|
||||
char *p = new char[10]{ "brown dog" }; //No automatic array
|
||||
// sizing for new
|
||||
std::unique_ptr<char[]>puc{ p }; // Now you do not have
|
||||
// to remember to delete p
|
||||
auto puc2 = std::move(puc); /* No copy constructor. Pass by reference, or pass a view, such as a span.*/
|
||||
std::unique_ptr<char> puc3{ new char[10]{ "brown dog" } };
|
||||
/* Array size unknown at compile or run time, needs a span, and you have to manually count the initialization list. */
|
||||
/* Compiler guards against overflow, but does not default to the correct size.*/
|
||||
/* You can just guess a way too small size, and the compiler in its error message will tell you what the size should be. */
|
||||
auto pu = std::make_unique<char[]>(10); // uninitialized,
|
||||
// needs procedural initialization.
|
||||
|
||||
/* span can be trivially created from a compile time declared array, an std:array or from a run time std:: vector, but then these things already have the characteristics of a span, and they own their own storage. */
|
||||
/* You would use a span to point into an array, for example a large blob containing smaller blobs.*/
|
||||
/* span can be trivially created from a compile time declared array, an std:array or from a run time std:: vector, but then these things already have the characteristics of a span, and they own their own storage. */
|
||||
/* You would use a span to point into an array, for example a large blob containing smaller blobs.*/
|
||||
|
||||
// Placement New:
|
||||
char *buf = new char[1000]; //pre-allocated buffer
|
||||
char *p = buf;
|
||||
MyObject *pMyObject = new (p) MyObject();
|
||||
p += (sizeof(MyObject+7)/8)*8
|
||||
/* Problem is that you will have to explictly call the destructor on each object before freeing your buffer. */
|
||||
/* If your objects are POD plus code for operating on POD, you don’t have to worry about destructors.*/
|
||||
// A POD object cannot do run time polymorphism.
|
||||
/* The pointer referencing it has to be of the correct compile time type, and it has to explicitly have the default constructor when constructed with no arguments.*/
|
||||
/* If, however, you are building a tree in the pre-allocated buffer, no sweat. */
|
||||
/* You just destruct the root of the tree, and it recursively destructs all its children. */
|
||||
/* If you want an arbitrary graph, just make sure you have owning and non owning pointers, and the owning pointers form a tree. */
|
||||
/* Anything you can do with run time polymorphism, you can likely do with a type flag.*/
|
||||
// Placement New:
|
||||
char *buf = new char[1000]; //pre-allocated buffer
|
||||
char *p = buf;
|
||||
MyObject *pMyObject = new (p) MyObject();
|
||||
p += (sizeof(MyObject+7)/8)*8
|
||||
/* Problem is that you will have to explictly call the destructor on each object before freeing your buffer. */
|
||||
/* If your objects are POD plus code for operating on POD, you don’t have to worry about destructors.*/
|
||||
// A POD object cannot do run time polymorphism.
|
||||
/* The pointer referencing it has to be of the correct compile time type, and it has to explicitly have the default constructor when constructed with no arguments.*/
|
||||
/* If, however, you are building a tree in the pre-allocated buffer, no sweat. */
|
||||
/* You just destruct the root of the tree, and it recursively destructs all its children. */
|
||||
/* If you want an arbitrary graph, just make sure you have owning and non owning pointers, and the owning pointers form a tree. */
|
||||
/* Anything you can do with run time polymorphism, you can likely do with a type flag.*/
|
||||
|
||||
static_assert ( std::is_pod<MyType>() , "MyType for some reason is not POD" );
|
||||
class MyClass
|
||||
{
|
||||
public:
|
||||
MyClass()=default; // Otherwise unlikely to be POD
|
||||
MyClass& operator=(const MyClass&) = default; // default assignment Not actually needed, but just a reminder.
|
||||
};
|
||||
static_assert ( std::is_pod<MyType>() , "MyType for some reason is not POD" );
|
||||
class MyClass
|
||||
{
|
||||
public:
|
||||
MyClass()=default; // Otherwise unlikely to be POD
|
||||
MyClass& operator=(const MyClass&) = default; // default assignment Not actually needed, but just a reminder.
|
||||
};
|
||||
|
||||
### alignment
|
||||
### alignment
|
||||
|
||||
```c++
|
||||
// every object of type struct_float will be aligned to alignof(float) boundary
|
||||
```c++
|
||||
// every object of type struct_float will be aligned to alignof(float) boundary
|
||||
// (usually 4)
|
||||
struct alignas(float) struct_float {
|
||||
// your definition here
|
||||
// your definition here
|
||||
};
|
||||
|
||||
// every object of type sse_t will be aligned to 256-byte boundary
|
||||
struct alignas(256) sse_t
|
||||
{
|
||||
float sse_data[4];
|
||||
float sse_data[4];
|
||||
};
|
||||
|
||||
// the array "cacheline" will be aligned to 128-byte boundary
|
||||
alignas(128) char cacheline[128];
|
||||
```
|
||||
```
|
||||
|
||||
# Construction, assignment, and destruction
|
||||
|
||||
@ -119,25 +119,25 @@ deleted.
|
||||
|
||||
Copy constructors
|
||||
|
||||
A(const A& a)
|
||||
A(const A& a)
|
||||
|
||||
Copy assignment
|
||||
|
||||
A& operator=(const A other)
|
||||
A& operator=(const A other)
|
||||
|
||||
Move constructors
|
||||
|
||||
class_name ( class_name && other)
|
||||
A(A&& o)
|
||||
D(D&&) = default;
|
||||
class_name ( class_name && other)
|
||||
A(A&& o)
|
||||
D(D&&) = default;
|
||||
|
||||
Move assignment operator
|
||||
|
||||
V& operator=(V&& other)
|
||||
V& operator=(V&& other)
|
||||
|
||||
Move constructors
|
||||
|
||||
class_name ( class_name && )
|
||||
class_name ( class_name && )
|
||||
|
||||
## rvalue references
|
||||
|
||||
@ -161,21 +161,21 @@ forwarding the resources.
|
||||
|
||||
where `std::forward` is defined as follows:
|
||||
|
||||
template< class T > struct remove_reference {
|
||||
typedef T type;
|
||||
};
|
||||
template< class T > struct remove_reference<T&> {
|
||||
typedef T type;
|
||||
};
|
||||
template< class T > struct remove_reference<T&&> {
|
||||
typedef T type;
|
||||
};
|
||||
template< class T > struct remove_reference {
|
||||
typedef T type;
|
||||
};
|
||||
template< class T > struct remove_reference<T&> {
|
||||
typedef T type;
|
||||
};
|
||||
template< class T > struct remove_reference<T&&> {
|
||||
typedef T type;
|
||||
};
|
||||
|
||||
template<class S>
|
||||
S&& forward(typename std::remove_reference<S>::type& a) noexcept
|
||||
{
|
||||
return static_cast<S&&>(a);
|
||||
}
|
||||
template<class S>
|
||||
S&& forward(typename std::remove_reference<S>::type& a) noexcept
|
||||
{
|
||||
return static_cast<S&&>(a);
|
||||
}
|
||||
|
||||
`std::move(t)` and `std::forward(t)` don't actually perform any action
|
||||
in themselves, rather they cause the code referencing `t` to use the intended
|
||||
@ -192,12 +192,12 @@ anyway.
|
||||
When you declare your own constructors, copiers, movers, and deleters,
|
||||
you should generally mark them noexcept.
|
||||
|
||||
struct foo {
|
||||
foo() noexcept {}
|
||||
foo( const foo & ) noexcept { }
|
||||
foo( foo && ) noexcept { }
|
||||
~foo() {}
|
||||
};
|
||||
struct foo {
|
||||
foo() noexcept {}
|
||||
foo( const foo & ) noexcept { }
|
||||
foo( foo && ) noexcept { }
|
||||
~foo() {}
|
||||
};
|
||||
|
||||
Destructors are noexcept by default. If a destructor throws an exception as
|
||||
a result of a destruction caused by an exception, the result is undefined,
|
||||
@ -207,8 +207,8 @@ ways that are unlikely to be satisfactory.
|
||||
If you need to define a copy constructor, probably also need to define
|
||||
an assignment operator.
|
||||
|
||||
t2 = t1; /* calls assignment operator, same as "t2.operator=(t1);" */
|
||||
Test t3 = t1; /* calls copy constructor, same as "Test t3(t1);" */
|
||||
t2 = t1; /* calls assignment operator, same as "t2.operator=(t1);" */
|
||||
Test t3 = t1; /* calls copy constructor, same as "Test t3(t1);" */
|
||||
|
||||
## casts
|
||||
|
||||
@ -219,12 +219,12 @@ in the source class instead of the destination class, hence most useful
|
||||
when you are converting to a generic C type, or to the type of an
|
||||
external library that you do not want to change.
|
||||
|
||||
struct X {
|
||||
int y;
|
||||
operator int(){ return y; }
|
||||
operator const int&(){ return y; } /* C habits would lead you to incorrectly expect "return &y;", which is what is implied under the hood. */
|
||||
operator int*(){ return &y; } // Hood is opened.
|
||||
};
|
||||
struct X {
|
||||
int y;
|
||||
operator int(){ return y; }
|
||||
operator const int&(){ return y; } /* C habits would lead you to incorrectly expect "return &y;", which is what is implied under the hood. */
|
||||
operator int*(){ return &y; } // Hood is opened.
|
||||
};
|
||||
|
||||
Mpir, the Visual Studio skew of GMP infinite precision library, has some
|
||||
useful and ingenious template code for converting C type functions of
|
||||
@ -257,20 +257,20 @@ allocation and redundant copy.
|
||||
|
||||
# Template specialization
|
||||
|
||||
namespace N {
|
||||
template<class T> class Y { /*...*/ }; // primary template
|
||||
template<> class Y<double> ; // forward declare specialization for double
|
||||
}
|
||||
template<>
|
||||
class N::Y<double> { /*...*/ }; // OK: specialization in same namespace
|
||||
namespace N {
|
||||
template<class T> class Y { /*...*/ }; // primary template
|
||||
template<> class Y<double> ; // forward declare specialization for double
|
||||
}
|
||||
template<>
|
||||
class N::Y<double> { /*...*/ }; // OK: specialization in same namespace
|
||||
|
||||
is used when you have sophisticated template code, because you have to
|
||||
use recursion for looping as the Mpir system uses it to evaluate an
|
||||
arbitrarily complex recursive expression – but I think my rather crude
|
||||
implementation will not be nearly so clever.
|
||||
|
||||
extern template int fun(int);
|
||||
/*prevents redundant instantiation of fun in this compilation unit – and thus renders the code for fun unnecessary in this compilation unit.*/
|
||||
extern template int fun(int);
|
||||
/*prevents redundant instantiation of fun in this compilation unit – and thus renders the code for fun unnecessary in this compilation unit.*/
|
||||
|
||||
# Template traits, introspection
|
||||
|
||||
@ -308,34 +308,34 @@ implements that functionality entirely up to the derived class.
|
||||
Interface classes are often named beginning with an I. Here’s a sample
|
||||
interface class:.
|
||||
|
||||
class IErrorLog
|
||||
{
|
||||
public:
|
||||
virtual bool openLog(const char *filename) = 0;
|
||||
virtual bool closeLog() = 0;
|
||||
class IErrorLog
|
||||
{
|
||||
public:
|
||||
virtual bool openLog(const char *filename) = 0;
|
||||
virtual bool closeLog() = 0;
|
||||
|
||||
virtual bool writeError(const char *errorMessage) = 0;
|
||||
virtual bool writeError(const char *errorMessage) = 0;
|
||||
|
||||
virtual ~IErrorLog() {} // make a virtual destructor in case we delete an IErrorLog pointer, so the proper derived destructor is called
|
||||
// Notice that the virtual destructor is declared to be trivial, but not declared =0;
|
||||
};
|
||||
virtual ~IErrorLog() {} // make a virtual destructor in case we delete an IErrorLog pointer, so the proper derived destructor is called
|
||||
// Notice that the virtual destructor is declared to be trivial, but not declared =0;
|
||||
};
|
||||
|
||||
[Override
|
||||
specifier](https://en.cppreference.com/w/cpp/language/override)
|
||||
|
||||
struct A
|
||||
{
|
||||
virtual void foo();
|
||||
void bar();
|
||||
};
|
||||
struct A
|
||||
{
|
||||
virtual void foo();
|
||||
void bar();
|
||||
};
|
||||
|
||||
struct B : A
|
||||
{
|
||||
void foo() const override; // Error: B::foo does not override A::foo
|
||||
// (signature mismatch)
|
||||
void foo() override; // OK: B::foo overrides A::foo
|
||||
void bar() override; // Error: A::bar is not virtual
|
||||
};
|
||||
struct B : A
|
||||
{
|
||||
void foo() const override; // Error: B::foo does not override A::foo
|
||||
// (signature mismatch)
|
||||
void foo() override; // OK: B::foo overrides A::foo
|
||||
void bar() override; // Error: A::bar is not virtual
|
||||
};
|
||||
|
||||
Similarly [Final
|
||||
specifier](https://en.cppreference.com/w/cpp/language/final)
|
||||
@ -344,11 +344,11 @@ specifier](https://en.cppreference.com/w/cpp/language/final)
|
||||
storage](http://www.cplusplus.com/reference/type_traits/aligned_storage/)for
|
||||
use with placement new
|
||||
|
||||
void* p = aligned_alloc(sizeof(NotMyClass));
|
||||
MyClass* pmc = new (p) MyClass; //Placement new.
|
||||
// ...
|
||||
pmc->~MyClass(); //Explicit call to destructor.
|
||||
aligned_free(p);.
|
||||
void* p = aligned_alloc(sizeof(NotMyClass));
|
||||
MyClass* pmc = new (p) MyClass; //Placement new.
|
||||
// ...
|
||||
pmc->~MyClass(); //Explicit call to destructor.
|
||||
aligned_free(p);.
|
||||
|
||||
# GSL: Guideline Support Library
|
||||
|
||||
@ -357,10 +357,10 @@ are suggested for use by the C++ Core Guidelines maintained by the
|
||||
Standard C++ Foundation. This repo contains [Microsoft’s implementation
|
||||
of GSL](https://github.com/Microsoft/GSL).
|
||||
|
||||
git clone https://github.com/Microsoft/GSL.git
|
||||
cd gsl
|
||||
git tag
|
||||
git checkout tags/v2.0.0
|
||||
git clone https://github.com/Microsoft/GSL.git
|
||||
cd gsl
|
||||
git tag
|
||||
git checkout tags/v2.0.0
|
||||
|
||||
Which implementation mostly works on gcc/Linux, but is canonical on
|
||||
Visual Studio.
|
||||
@ -393,37 +393,37 @@ makes the relationship between the templated base class or classes and
|
||||
the derived class cyclic, so that the derived class tends to function as
|
||||
real base class. Useful for mixin classes.
|
||||
|
||||
template <typename T> class Mixin1{
|
||||
public:
|
||||
// ...
|
||||
void doSomething() //using the other mixin classes and the derived class T
|
||||
{
|
||||
T& derived = static_cast<T&>(*this);
|
||||
// use derived...
|
||||
}
|
||||
private:
|
||||
mixin1(){}; // prevents the class from being used outside the mix)
|
||||
friend T;
|
||||
};
|
||||
template <typename T> class Mixin1{
|
||||
public:
|
||||
// ...
|
||||
void doSomething() //using the other mixin classes and the derived class T
|
||||
{
|
||||
T& derived = static_cast<T&>(*this);
|
||||
// use derived...
|
||||
}
|
||||
private:
|
||||
mixin1(){}; // prevents the class from being used outside the mix)
|
||||
friend T;
|
||||
};
|
||||
|
||||
template <typename T> class Mixin2{
|
||||
{
|
||||
public:
|
||||
// ...
|
||||
void doSomethingElse()
|
||||
{
|
||||
T& derived = static_cast<T&>(*this);
|
||||
// use derived...
|
||||
}
|
||||
private:
|
||||
Mixin2(){};
|
||||
friend T;
|
||||
};
|
||||
template <typename T> class Mixin2{
|
||||
{
|
||||
public:
|
||||
// ...
|
||||
void doSomethingElse()
|
||||
{
|
||||
T& derived = static_cast<T&>(*this);
|
||||
// use derived...
|
||||
}
|
||||
private:
|
||||
Mixin2(){};
|
||||
friend T;
|
||||
};
|
||||
|
||||
class composite: public mixin1<composite>, public mixin2<composite>{
|
||||
composite( int x, char * y): mixin1(x), mixin2(y[0]) { ...}
|
||||
composite():composite(7,"a" ){ ...}
|
||||
}
|
||||
class composite: public mixin1<composite>, public mixin2<composite>{
|
||||
composite( int x, char * y): mixin1(x), mixin2(y[0]) { ...}
|
||||
composite():composite(7,"a" ){ ...}
|
||||
}
|
||||
|
||||
# Aggregate initialization
|
||||
|
||||
@ -432,12 +432,12 @@ constructor is implied default.
|
||||
|
||||
A class can be explicitly defined to take aggregate initialization
|
||||
|
||||
Class T{
|
||||
T(std::initializer_list<const unsigned char> in){
|
||||
for (auto i{in.begin); i<in.end(); i++){
|
||||
do stuff with i
|
||||
}
|
||||
}
|
||||
Class T{
|
||||
T(std::initializer_list<const unsigned char> in){
|
||||
for (auto i{in.begin); i<in.end(); i++){
|
||||
do stuff with i
|
||||
}
|
||||
}
|
||||
|
||||
but that does not make it of aggregate type. Aggregate type has *no*
|
||||
constructors except default and deleted constructors
|
||||
@ -446,7 +446,7 @@ constructors except default and deleted constructors
|
||||
|
||||
To construct a lambda in the heap:
|
||||
|
||||
auto p = new auto([a,b,c](){})
|
||||
auto p = new auto([a,b,c](){})
|
||||
|
||||
Objects inside the lambda are constructed in the heap.
|
||||
|
||||
@ -454,22 +454,22 @@ similarly placement `new`, and `unique_ptr`.
|
||||
|
||||
To template a function that takes a lambda argument:
|
||||
|
||||
template <typename F>
|
||||
void myFunction(F&& lambda){
|
||||
//some things
|
||||
template <typename F>
|
||||
void myFunction(F&& lambda){
|
||||
//some things
|
||||
|
||||
You can put a lambda in a class using decltype,and pass it around for
|
||||
continuations, though you would probably need to template the class:
|
||||
|
||||
template<class T>class foo {
|
||||
public:
|
||||
T func;
|
||||
foo(T in) :func{ in } {}
|
||||
auto test(int x) { return func(x); }
|
||||
};
|
||||
....
|
||||
auto bar = [](int x)->int {return x + 1; };
|
||||
foo<(bar)>foobar(bar);
|
||||
template<class T>class foo {
|
||||
public:
|
||||
T func;
|
||||
foo(T in) :func{ in } {}
|
||||
auto test(int x) { return func(x); }
|
||||
};
|
||||
....
|
||||
auto bar = [](int x)->int {return x + 1; };
|
||||
foo<(bar)>foobar(bar);
|
||||
|
||||
But we had to introduce a name, bar, so that decltype would have
|
||||
something to work with, which lambdas are intended to avoid. If we are
|
||||
@ -480,11 +480,11 @@ is very possibly pod.
|
||||
If we are sticking a lambda around to be called later, might copy it by
|
||||
value into a templated class, or might put it on the heap.
|
||||
|
||||
auto bar = []() {return 5;};
|
||||
auto bar = []() {return 5;};
|
||||
|
||||
You can give it to a std::function:
|
||||
|
||||
auto func_bar = std::function<int()>(bar);
|
||||
auto func_bar = std::function<int()>(bar);
|
||||
|
||||
In this case, it will get a copy of the value of bar. If bar had
|
||||
captured anything by value, there would be two copies of those values on
|
||||
@ -495,9 +495,9 @@ bar, as per the rules of cleaning up stack variables.
|
||||
|
||||
You could just as easily allocate one on the heap:
|
||||
|
||||
auto bar_ptr = std::make_unique(bar);
|
||||
auto bar_ptr = std::make_unique(bar);
|
||||
|
||||
std::function <int(int)> increm{[](int arg{return arg+1;}}
|
||||
std::function <int(int)> increm{[](int arg{return arg+1;}}
|
||||
|
||||
presumably does this behind the scenes
|
||||
|
||||
@ -549,43 +549,43 @@ which can result in messy reallocations.
|
||||
One way is to drop back into old style C, and tell C++ not to fuck
|
||||
around.
|
||||
|
||||
struct Packet
|
||||
{
|
||||
unsigned int bytelength;
|
||||
unsigned int data[];
|
||||
struct Packet
|
||||
{
|
||||
unsigned int bytelength;
|
||||
unsigned int data[];
|
||||
|
||||
private:
|
||||
// Will cause compiler error if you misuse this struct
|
||||
void Packet(const Packet&);
|
||||
void operator=(const Packet&);
|
||||
};
|
||||
Packet* CreatePacket(unsigned int length)
|
||||
{
|
||||
Packet *output = (Packet*) malloc((length+1)*sizeof(Packet));
|
||||
output->bytelength = length;
|
||||
return output;
|
||||
}
|
||||
private:
|
||||
// Will cause compiler error if you misuse this struct
|
||||
void Packet(const Packet&);
|
||||
void operator=(const Packet&);
|
||||
};
|
||||
Packet* CreatePacket(unsigned int length)
|
||||
{
|
||||
Packet *output = (Packet*) malloc((length+1)*sizeof(Packet));
|
||||
output->bytelength = length;
|
||||
return output;
|
||||
}
|
||||
|
||||
Another solution is to work around C++’s inability to handle variable
|
||||
sized objects by fixing your hash function to handle disconnected data.
|
||||
|
||||
# for_each
|
||||
|
||||
template<class InputIterator, class Function>
|
||||
Function for_each(InputIterator first, InputIterator last, Function fn){
|
||||
while (first!=last) {
|
||||
fn (*first);
|
||||
++first;
|
||||
}
|
||||
return move(fn);
|
||||
}
|
||||
template<class InputIterator, class Function>
|
||||
Function for_each(InputIterator first, InputIterator last, Function fn){
|
||||
while (first!=last) {
|
||||
fn (*first);
|
||||
++first;
|
||||
}
|
||||
return move(fn);
|
||||
}
|
||||
|
||||
# Range-based for loop
|
||||
|
||||
for(auto x: temporary_with_begin_and_end_members{ code;}
|
||||
for(auto& x: temporary_with_begin_and_end_members{ code;}
|
||||
for(auto&& x: temporary_with_begin_and_end_members{ code;}
|
||||
for (T thing = foo(); auto& x : thing.items()) { code; }
|
||||
for(auto x: temporary_with_begin_and_end_members{ code;}
|
||||
for(auto& x: temporary_with_begin_and_end_members{ code;}
|
||||
for(auto&& x: temporary_with_begin_and_end_members{ code;}
|
||||
for (T thing = foo(); auto& x : thing.items()) { code; }
|
||||
|
||||
The types of the begin_expr and the end_expr do not have to be the same,
|
||||
and in fact the type of the end_expr does not have to be an iterator: it
|
||||
@ -598,14 +598,14 @@ member named begin and a member named end (regardless of the type or
|
||||
accessibility of such member), then begin_expr is \_\_range.begin() and
|
||||
end_expr is \_\_range.end();
|
||||
|
||||
for (T thing = foo(); auto x : thing.items()) { code; }
|
||||
for (T thing = foo(); auto x : thing.items()) { code; }
|
||||
|
||||
Produces code equivalent to:
|
||||
|
||||
T thing = foo();
|
||||
auto bar = thing.items();
|
||||
auto enditer = bar.end;
|
||||
for (auto iter = bar.begin(); iter != enditer; ++iter) {
|
||||
x = *iter;
|
||||
code;
|
||||
}
|
||||
T thing = foo();
|
||||
auto bar = thing.items();
|
||||
auto enditer = bar.end;
|
||||
for (auto iter = bar.begin(); iter != enditer; ++iter) {
|
||||
x = *iter;
|
||||
code;
|
||||
}
|
||||
|
@ -287,21 +287,21 @@ is not in fact terribly useful for anything you are interested in doing.
|
||||
|
||||
```C++
|
||||
typedef enum memory_order {
|
||||
memory_order_relaxed, // relaxed
|
||||
memory_order_consume, // consume
|
||||
/* No one, least of all compiler writers, understands what
|
||||
"consume" does.
|
||||
It has consequences which are difficult to understand or predict,
|
||||
and which are apt to be inconsistent between architectures,
|
||||
libraries, and compilers. */
|
||||
memory_order_acquire, // acquire
|
||||
memory_order_release, // release
|
||||
memory_order_acq_rel, // acquire/release
|
||||
memory_order_seq_cst // sequentially consistent
|
||||
/* "sequentially consistent" interacts with the more commonly\
|
||||
used acquire and release in ways difficult to understand or
|
||||
predict, and in ways that compiler and library writers
|
||||
disagree on. */
|
||||
memory_order_relaxed, // relaxed
|
||||
memory_order_consume, // consume
|
||||
/* No one, least of all compiler writers, understands what
|
||||
"consume" does.
|
||||
It has consequences which are difficult to understand or predict,
|
||||
and which are apt to be inconsistent between architectures,
|
||||
libraries, and compilers. */
|
||||
memory_order_acquire, // acquire
|
||||
memory_order_release, // release
|
||||
memory_order_acq_rel, // acquire/release
|
||||
memory_order_seq_cst // sequentially consistent
|
||||
/* "sequentially consistent" interacts with the more commonly\
|
||||
used acquire and release in ways difficult to understand or
|
||||
predict, and in ways that compiler and library writers
|
||||
disagree on. */
|
||||
} memory_order;
|
||||
```
|
||||
|
||||
@ -440,54 +440,54 @@ static_assert(__STDCPP_THREADS__==1, "Needs threads");
|
||||
// As thread resources have to be managed, need to be wrapped in
|
||||
// RAII
|
||||
class ThreadRAII {
|
||||
std::thread & m_thread;
|
||||
std::thread & m_thread;
|
||||
public:
|
||||
// As a thread object is moveable but not copyable, the thread obj
|
||||
// needs to be constructed inside the invocation of the ThreadRAII
|
||||
// constructor. */
|
||||
ThreadRAII(std::thread & threadObj) : m_thread(threadObj){}
|
||||
~ThreadRAII(){
|
||||
// Check if thread is joinable then detach the thread
|
||||
if(m_thread.joinable()){
|
||||
m_thread.detach();
|
||||
}
|
||||
}
|
||||
};
|
||||
ThreadRAII(std::thread & threadObj) : m_thread(threadObj){}
|
||||
~ThreadRAII(){
|
||||
// Check if thread is joinable then detach the thread
|
||||
if(m_thread.joinable()){
|
||||
m_thread.detach();
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
Examples of thread construction
|
||||
|
||||
```C++
|
||||
void foo(char *){
|
||||
…
|
||||
}
|
||||
void foo(char *){
|
||||
…
|
||||
}
|
||||
|
||||
class foo_functor
|
||||
{
|
||||
public:
|
||||
void operator()(char *){
|
||||
…
|
||||
}
|
||||
};
|
||||
class foo_functor
|
||||
{
|
||||
public:
|
||||
void operator()(char *){
|
||||
…
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
int main(){
|
||||
ThreadRAII thread_one(std::thread (foo, "one"));
|
||||
ThreadRAII thread_two(
|
||||
std::thread (
|
||||
(foo_functor()),
|
||||
"two"
|
||||
)
|
||||
);
|
||||
const char three[]{"three"};
|
||||
ThreadRAII thread_lambda(
|
||||
std::thread(
|
||||
[three](){
|
||||
…
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
int main(){
|
||||
ThreadRAII thread_one(std::thread (foo, "one"));
|
||||
ThreadRAII thread_two(
|
||||
std::thread (
|
||||
(foo_functor()),
|
||||
"two"
|
||||
)
|
||||
);
|
||||
const char three[]{"three"};
|
||||
ThreadRAII thread_lambda(
|
||||
std::thread(
|
||||
[three](){
|
||||
…
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
C++ has a bunch of threading facilities that are designed for the case that
|
||||
|
@ -9,7 +9,7 @@ other processes that do the actual work. While git-bash.exe is undocumented, `m
|
||||
|
||||
Example Windows shortcut to bash script: `/x/src/wallet/docs/mkdocs.sh`
|
||||
|
||||
"C:\Program Files\Git\git-bash.exe" --cd=X:\src\wallet --needs-console --no-hide --command=usr\bin\bash.exe --login -i docs/mkdocs.sh
|
||||
"C:\Program Files\Git\git-bash.exe" --cd=X:\src\wallet --needs-console --no-hide --command=usr\bin\bash.exe --login -i docs/mkdocs.sh
|
||||
|
||||
Notice that the paths to the left of the invocation of `bash` are in Windows
|
||||
format, and the paths to the right of the invocation of bash are in gnu
|
||||
|
@ -18,4 +18,3 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../../rho.ico">
|
||||
|
@ -1,45 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Scripting</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Scripting</h1><p>
|
||||
|
||||
Initially we intend to implement human to human secret messaging, with money that can be transferred in the message, and the capability to make messages public and provably linked with an identity</p><p>
|
||||
|
||||
But obviously we are eventually going to need bot responses, and bot scripts that can interact with the recipient within a sandbox. Not wanting to repeat the mistakes of the internet, we will want the same bot language generating responses, and interacting with the recipient.</p><p>
|
||||
|
||||
There is a <a href="https://github.com/dbohdan/embedded-scripting-languages">list</a> of embeddable scripting languages.</p><p>
|
||||
|
||||
Lua and python are readily embeddable, but <a href="https://benchmarksgame-team.pages.debian.net/benchmarksgame/">the language shootout</a> tells us they are terribly slow.</p><p>
|
||||
|
||||
Lisp is sort of embeddable, startlingly fast, and is enormously capable, but it is huge, and not all that portable.</p><p>
|
||||
|
||||
ES (javascript) is impressively fast in its node.js implementation, which does not necessarily imply the embeddable versions are fast.</p><p>
|
||||
|
||||
Very few of the scripting languages make promises about sandbox capability, and I know there is enormous grief over sandboxing Javascript. It can be done, but it is a big project.</p><p>
|
||||
|
||||
Angelscript <em>does</em> make promises about sandbox capability, but I have absolutely no information its capability and performance.</p><p>
|
||||
|
||||
Tcl is event loop oriented.</p><p>
|
||||
|
||||
But hell, I have an event loop. I want my events to put data in memory, then launch a script for the event, the script does something with the data, generates some new data, fires some events that will make use of the data, and finishes.</p><p>
|
||||
|
||||
Given that I want programs to be short and quickly terminate, maybe we do not need dynamic memory management and garbage collection. Maybe arkscript would handle it.</p>
|
||||
|
||||
<p style="background-color : #ccffcc; font-size:80%">This document is licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">CreativeCommons Attribution-Share Alike 3.0 License</a></p>
|
||||
</body>
|
||||
</html>
|
52
docs/libraries/scripting.md
Normal file
52
docs/libraries/scripting.md
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
title: Scripting
|
||||
---
|
||||
|
||||
Initially we intend to implement human to human secret messaging, with
|
||||
money that can be transferred in the message, and the capability to make
|
||||
messages public and provably linked with an identity
|
||||
|
||||
But obviously we are eventually going to need bot responses, and bot
|
||||
scripts that can interact with the recipient within a sandbox. Not wanting
|
||||
to repeat the mistakes of the internet, we will want the same bot language
|
||||
generating responses, and interacting with the recipient.
|
||||
|
||||
There is a [list](https://github.com/dbohdan/embedded-scripting-languages) of embeddable scripting languages.
|
||||
|
||||
Lua and python are readily embeddable, but [the language shootout](https://benchmarksgame-team.pages.debian.net/benchmarksgame/) tells us
|
||||
they are terribly slow.
|
||||
|
||||
Lua, however, has `LuaJIT`, which is about ten times faster than `Lua`, which
|
||||
makes it only about four or five times slower than JavaScript under
|
||||
`node.js`. It is highly portable, though I get the feeling that porting it to
|
||||
windows is going to be a pain, but then it is never going to be expected to
|
||||
call the windows file and gui operations.
|
||||
|
||||
Lisp is sort of embeddable, startlingly fast, and is enormously capable, but
|
||||
it is huge, and not all that portable.
|
||||
|
||||
ES (JavaScript) is impressively fast in its node.js implementation, which does
|
||||
not necessarily imply the embeddable versions are fast.
|
||||
|
||||
Very few of the scripting languages make promises about sandbox
|
||||
capability, and I know there is enormous grief over sandboxing JavaScript.
|
||||
It can be done, but it is a big project.
|
||||
|
||||
Angelscript *does* make promises about sandbox capability, but I have
|
||||
absolutely no information its capability and performance.
|
||||
|
||||
Tcl is event loop oriented.
|
||||
|
||||
But hell, I have an event loop. I want my events to put data in memory,
|
||||
then launch a script for the event, the script does something with the data,
|
||||
generates some new data, fires some events that will make use of the data, and
|
||||
finishes.
|
||||
|
||||
Given that I want programs to be short and quickly terminate, maybe we
|
||||
do not need dynamic memory management and garbage collection.
|
||||
|
||||
Lua is slowed by dynamic memory management. But with event
|
||||
orientation, dynamic memory management is complete waste, since your
|
||||
only memory management is allocating continuation objects to be fired on
|
||||
the next event - which is to say, all memory management is explicit, when
|
||||
an event handler detaches.
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -17,11 +17,11 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../../rho.ico">
|
||||
<title>Serialization and Canonical form</title>
|
||||
<title>Serialization and Canonical form</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="../libraries.html"> To Home page</a> </p>
|
||||
<h1>Serialization and Canonical form</h1><p>
|
||||
<p><a href="../libraries.html"> To Home page</a> </p>
|
||||
<h1>Serialization and Canonical form</h1><p>
|
||||
|
||||
On reflection, using a serialization library is massive overkill, since we are serializing records that always have a record type identifier, and we are serializing hashes, signatures, and utf8 strings, which should already be in network order, so the only thing we have to serialize is ints, for which we might as well write our own serialization code, in an object of type serialization buffer<pre>
|
||||
namespace ro {
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
title:
|
||||
Lightning Layer
|
||||
Lightning Layer
|
||||
---
|
||||
# This discussion of the lightning layer may well be obsoleted
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Logon protocol</title> </head>
|
||||
|
||||
<body>
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
title:
|
||||
Merkle-patricia Dac
|
||||
Merkle-patricia Dac
|
||||
# katex
|
||||
---
|
||||
|
||||
@ -913,7 +913,7 @@ solve the problem of the number of items not being a power of two?
|
||||
<use transform="translate(136 -44)" xlink:href="#merkle_vertex"/>
|
||||
<use transform="translate(144)" xlink:href="#height_1_tree"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="blockchain_id" >
|
||||
<ellipse cx="14" cy="249" fill="#80e080" rx="8" ry="5"/>
|
||||
<text>
|
||||
|
@ -3,16 +3,16 @@ set -e
|
||||
cd `dirname $0`
|
||||
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
osoptions=""
|
||||
osoptions=""
|
||||
elif [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
osoptions=""
|
||||
osoptions=""
|
||||
elif [[ "$OSTYPE" == "cygwin" ]]; then
|
||||
osoptions="--fail-if-warnings --eol=lf "
|
||||
osoptions="--fail-if-warnings --eol=lf "
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
osoptions="--fail-if-warnings --eol=lf "
|
||||
osoptions="--fail-if-warnings --eol=lf "
|
||||
fi
|
||||
templates="./pandoc_templates/"
|
||||
options=$osoptions"--toc -N --toc-depth=5 --wrap=preserve --metadata=lang:en --include-in-header=$templates/header.pandoc --include-before-body=$templates/before.pandoc --include-after-body=$templates/after.pandoc --css=$templates/style.css -o"
|
||||
options=$osoptions"--toc -N --toc-depth=5 --wrap=preserve --metadata=lang:en --include-in-header=$templates/header.pandoc --include-before-body=$templates/before.pandoc --css=$templates/style.css -o"
|
||||
for f in *.md
|
||||
do
|
||||
len=${#f}
|
||||
@ -20,16 +20,22 @@ do
|
||||
if [ $f -nt $base.html ];
|
||||
then
|
||||
katex=""
|
||||
for i in 1 2 3 4
|
||||
mine="--include-after-body=$templates/after.pandoc "
|
||||
for i in 1 2 3 4 5 6
|
||||
do
|
||||
read line
|
||||
read line
|
||||
if [[ $line =~ katex$ ]];
|
||||
then
|
||||
katex=" --katex=./"
|
||||
fi
|
||||
then
|
||||
katex=" --katex=./"
|
||||
fi
|
||||
if [[ $line =~ notmine$ ]];
|
||||
then
|
||||
mine=" "
|
||||
fi
|
||||
done <$f
|
||||
pandoc $katex $options $base.html $base.md
|
||||
pandoc $katex $mine $options $base.html $base.md
|
||||
echo "$base.html from $f"
|
||||
|
||||
#else
|
||||
# echo " $base.html up to date"
|
||||
fi
|
||||
@ -44,13 +50,13 @@ do
|
||||
katex=""
|
||||
for i in 1 2 3 4
|
||||
do
|
||||
read line
|
||||
read line
|
||||
if [[ $line =~ katex ]];
|
||||
then
|
||||
katex=" --katex=./"
|
||||
fi
|
||||
then
|
||||
katex=" --katex=./"
|
||||
fi
|
||||
done <$f
|
||||
pandoc $katex $options $base.html $base.md
|
||||
pandoc $katex $mine $options $base.html $base.md
|
||||
echo "$base.html from $f"
|
||||
#else
|
||||
# echo " $base.html up to date"
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -11,62 +11,62 @@
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Multicore</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Multicore</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Multicore</h1>
|
||||
<p> At the same time as the security crisis has hit, the multicore crisis
|
||||
has hit. A short while ago we were dealing with isolated serial
|
||||
machines, now our programs must utilize a sea of machines. </p>
|
||||
<p> Multithreading is hard. Doing it routinely, doing it with large
|
||||
programs, invariably fails. </p>
|
||||
<p> Intel, NVIDIA, and Google however have this crisis well in hand. </p>
|
||||
<p> Big Businesses are attacking the problem, with competence and success,
|
||||
and we can leave them to it and not worry too much. Google is pioneering
|
||||
the way, and Intel and NVIDIA are making similar tools available to the
|
||||
masses. </p>
|
||||
<p> Since massive parallelism is a hard problem, requiring good people, much
|
||||
thought, and much care, the meta solution is to solve that problem as few
|
||||
times as possible, and re-use the resulting solutions as much as
|
||||
possible. If, for example, one uses the hash table provided by
|
||||
Intel’s threaded building blocks library, the Intel library and Intel
|
||||
compiler takes care of hash table related coordination issues that
|
||||
otherwise the programmer would have to take care of, and would probably
|
||||
foul up. </p>
|
||||
<p> Intel has provided a bunch of utilities that make it a good deal easier,
|
||||
Vtune, thread checker, OpenMP, compiler auto parallelism, Intel Thread
|
||||
Checker, Intel VTune Performance Analyzer, and most importantly, Threaded
|
||||
Building Blocks. it is still hard – but no longer damn near
|
||||
impossible. </p>
|
||||
<p> Back in the days when there was one hardware thread of execution driving
|
||||
multiple software threads, locking worked well. These day, not so
|
||||
well. Rather, it is often more desirable to use a lockless
|
||||
transactional approach to handle any shared state. Shared state is
|
||||
hard, better to share nothing – or to leave any sharing to those utilities
|
||||
that someone else has already written and debugged. If rolling your
|
||||
own, better to use InterlockedXxx than Lock. Note that you construct
|
||||
your <a href="interlockedxxx.html">own InterlockedXxx</a> operation for
|
||||
any Xxx using InterlockedCompareExchange. </p>
|
||||
<p> The big solution, however is that pioneered by Google. Rather than each
|
||||
programmer designing his own multithreading and multicore design, one has
|
||||
a small number of very general massively parallel algorithms embodied in
|
||||
useful software for massaging masses of data. The programmer then
|
||||
calls that software and lets it handle the parallelism. Google’s Map
|
||||
Reduce is the classic example of this, but every database servicing a web
|
||||
application is also an example of this, since one typically has many web
|
||||
servers running many processes all of which might potentially update the
|
||||
same data at the same time, and the database is supposed to sort out any
|
||||
resulting problems, while the developers write in single threaded python
|
||||
or ruby on rails, and let the database handle any problems related to
|
||||
massive parallelism. </p>
|
||||
<p> Google’s “app engine” allows programmers to write straightforward single
|
||||
threaded python code in the easy to use Django framework that can be
|
||||
executed in a massively parallel manner with coordination between many
|
||||
parallel processes being performed by Google’s datastore. </p>
|
||||
<p> In short, the multicore crisis, unlike the other crises I describe in
|
||||
this group of web pages, is well in hand. </p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Multicore</h1>
|
||||
<p> At the same time as the security crisis has hit, the multicore crisis
|
||||
has hit. A short while ago we were dealing with isolated serial
|
||||
machines, now our programs must utilize a sea of machines. </p>
|
||||
<p> Multithreading is hard. Doing it routinely, doing it with large
|
||||
programs, invariably fails. </p>
|
||||
<p> Intel, NVIDIA, and Google however have this crisis well in hand. </p>
|
||||
<p> Big Businesses are attacking the problem, with competence and success,
|
||||
and we can leave them to it and not worry too much. Google is pioneering
|
||||
the way, and Intel and NVIDIA are making similar tools available to the
|
||||
masses. </p>
|
||||
<p> Since massive parallelism is a hard problem, requiring good people, much
|
||||
thought, and much care, the meta solution is to solve that problem as few
|
||||
times as possible, and re-use the resulting solutions as much as
|
||||
possible. If, for example, one uses the hash table provided by
|
||||
Intel’s threaded building blocks library, the Intel library and Intel
|
||||
compiler takes care of hash table related coordination issues that
|
||||
otherwise the programmer would have to take care of, and would probably
|
||||
foul up. </p>
|
||||
<p> Intel has provided a bunch of utilities that make it a good deal easier,
|
||||
Vtune, thread checker, OpenMP, compiler auto parallelism, Intel Thread
|
||||
Checker, Intel VTune Performance Analyzer, and most importantly, Threaded
|
||||
Building Blocks. it is still hard – but no longer damn near
|
||||
impossible. </p>
|
||||
<p> Back in the days when there was one hardware thread of execution driving
|
||||
multiple software threads, locking worked well. These day, not so
|
||||
well. Rather, it is often more desirable to use a lockless
|
||||
transactional approach to handle any shared state. Shared state is
|
||||
hard, better to share nothing – or to leave any sharing to those utilities
|
||||
that someone else has already written and debugged. If rolling your
|
||||
own, better to use InterlockedXxx than Lock. Note that you construct
|
||||
your <a href="interlockedxxx.html">own InterlockedXxx</a> operation for
|
||||
any Xxx using InterlockedCompareExchange. </p>
|
||||
<p> The big solution, however is that pioneered by Google. Rather than each
|
||||
programmer designing his own multithreading and multicore design, one has
|
||||
a small number of very general massively parallel algorithms embodied in
|
||||
useful software for massaging masses of data. The programmer then
|
||||
calls that software and lets it handle the parallelism. Google’s Map
|
||||
Reduce is the classic example of this, but every database servicing a web
|
||||
application is also an example of this, since one typically has many web
|
||||
servers running many processes all of which might potentially update the
|
||||
same data at the same time, and the database is supposed to sort out any
|
||||
resulting problems, while the developers write in single threaded python
|
||||
or ruby on rails, and let the database handle any problems related to
|
||||
massive parallelism. </p>
|
||||
<p> Google’s “app engine” allows programmers to write straightforward single
|
||||
threaded python code in the easy to use Django framework that can be
|
||||
executed in a massively parallel manner with coordination between many
|
||||
parallel processes being performed by Google’s datastore. </p>
|
||||
<p> In short, the multicore crisis, unlike the other crises I describe in
|
||||
this group of web pages, is well in hand. </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
|
@ -8,7 +8,7 @@
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Networking Protocol</title> </head><body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
|
||||
@ -28,7 +28,7 @@ Thus we need a system with proof of stake, and not only proof of stake, but proo
|
||||
|
||||
If the system handles an enormous number of transactions, peers are going to be big and expensive, thus vulnerable to people like Mueller armed with vague and open ended charges of tax evasion and money laundering. Hence the power of peer over the currency needs to be proportional to the wealth controlled by the secrets held by that peer’s clients. And that peer’s clients need to be free to move from one peer to the next, and apt to move to peers that make it difficult for Mueller to find their clients. </p><p>
|
||||
|
||||
Need a crypto currency where Bob can prove to the whole world that he paid Ann such and such amount, in accord with such and such a bill, but no one else can prove he paid Ann, nor that there ever was such a bill, except he shows them. Bitcoin is far too traceable. We need controlled traceability, where the parrticipants can prove a transaction to third parties and the world, but the world cannot. And Bob needs to be able to prove what the payment was about, that it was part of a conversation, a meeting of minds. </p><p>
|
||||
Need a crypto currency where Bob can prove to the whole world that he paid Ann such and such amount, in accord with such and such a bill, but no one else can prove he paid Ann, nor that there ever was such a bill, except he shows them. Bitcoin is far too traceable. We need controlled traceability, where the participants can prove a transaction to third parties and the world, but the world cannot. And Bob needs to be able to prove what the payment was about, that it was part of a conversation, a meeting of minds. </p><p>
|
||||
|
||||
The reason we have end user demand for crypto currency is the same as the reason we have end user demand for gold. </p><p>
|
||||
|
||||
@ -65,4 +65,3 @@ licensed under the <a rel="license" href="http://creativecommons.org/licenses/by
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
|
||||
</body></html>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -11,12 +11,12 @@
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Normalizing unicode strings</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Normalizing unicode strings</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Normalizing unicode strings</h1><p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Normalizing unicode strings</h1><p>
|
||||
|
||||
I would like strings that look similar to humans to map to the same item. Obviously trailing and leading whitespace needs to go, and whitespace map a single space.</p><p>
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Openvpn</title> </head>
|
||||
|
||||
<body>
|
||||
|
@ -1,10 +1,10 @@
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
max-width: 30em;
|
||||
margin-left: 1em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
|
||||
div.center {text-align:center;}
|
||||
div.centre {text-align:center;}
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
@ -18,4 +18,3 @@
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
|
@ -1,21 +1,21 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Passphrases</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Passphrases</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Passphrases</h1>
|
||||
<p>Passphrases are of course stronger and easier to
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Passphrases</h1>
|
||||
<p>Passphrases are of course stronger and easier to
|
||||
remember than passwords, but whitespace causes endless
|
||||
problems, which in turn cause endless support calls</p>
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -11,36 +11,36 @@
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Payments, Contracts, Invoices, and Reputational data</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Payments, Contracts, Invoices, and Reputational data</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Payments, Contracts, Invoices, and Reputational data</h1>
|
||||
<h2>The problem to be solved</h2>
|
||||
<h2>*****</h2>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<h2>*****</h2>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<h2>Value and Exchange</h2>
|
||||
<p>An exchange of value consists of a contract to exchange ("trade"), two
|
||||
transfers of value (value for value), coordination problems
|
||||
("settlement"), and dispute resolution services ("failure").</p>
|
||||
<p> Hence, reliability of exchange is dominated by reliability of transfers.
|
||||
And, transfers of value are dominated by reliability of basic issues of
|
||||
value, including storage.</p>
|
||||
<p> What might be seen as sort of semantic short-cut is that a value system
|
||||
may be considered reliable if and only if it can participate in an
|
||||
exchange. </p>
|
||||
<p> </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Payments, Contracts, Invoices, and Reputational data</h1>
|
||||
<h2>The problem to be solved</h2>
|
||||
<h2>*****</h2>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<h2>*****</h2>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<p>. </p>
|
||||
<h2>Value and Exchange</h2>
|
||||
<p>An exchange of value consists of a contract to exchange ("trade"), two
|
||||
transfers of value (value for value), coordination problems
|
||||
("settlement"), and dispute resolution services ("failure").</p>
|
||||
<p> Hence, reliability of exchange is dominated by reliability of transfers.
|
||||
And, transfers of value are dominated by reliability of basic issues of
|
||||
value, including storage.</p>
|
||||
<p> What might be seen as sort of semantic short-cut is that a value system
|
||||
may be considered reliable if and only if it can participate in an
|
||||
exchange. </p>
|
||||
<p> </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
<p>. </p>
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
<p>. </p>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
title:
|
||||
Proof of Stake
|
||||
Proof of Stake
|
||||
---
|
||||
::: {style="background-color : #ffdddd; font-size:120%"}
|
||||
![run!](tealdeer.gif)[TL;DR Map a blockdag algorithm equivalent to the
|
||||
@ -681,4 +681,3 @@ for](trust_and_privacy_on_the_blockchain.html).
|
||||
|
||||
Peers may have human readable names, and wallets may have names of the
|
||||
form `LoginName@PeerName`.
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -11,110 +11,110 @@
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Protocol Negotiation</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Protocol Negotiation</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Protocol Negotiation</h1>
|
||||
<p> Once a protocol is in use, it becomes very hard to change. If one person
|
||||
updates the server, and the client is not updated, everything breaks. </p>
|
||||
<p> And so, we are stuck with a lot of frozen protocols, many of which are
|
||||
in urgent need of change, but to change, requires wide consensus, which
|
||||
requires a big bunch of people showing up at a meeting, but at such
|
||||
meetings very little gets done, and what gets done is stupid.</p>
|
||||
<p> If a standard is successful, more and more people want to be in the
|
||||
committee, many of whom represent business profit centers and government
|
||||
special interests, and who really do not understand much about the
|
||||
technology, except that any change might be adverse to the very important
|
||||
people who sent them there.</p>
|
||||
<p> As the committee gets larger, it gets more unworkable, and as it
|
||||
represents more and more special interests, it gets more unworkable</p>
|
||||
<p> In order to have to have a system where the internet’s protocols can be
|
||||
upgraded, and new protocols introduced, without central coordination,
|
||||
protocol negotiation, where client and server first discuss what protocol
|
||||
version they will be using, has to be part of every protocol, all the way
|
||||
down to the level of TCP and UDP.</p>
|
||||
<p>These days everyone builds in protocol negotiation, often on top of SSL, which is on top of TCP, resulting in three additional round trips.</p>
|
||||
<p>And then a widely distributed client or server breaks the protocol negotiation, which no one notices because it interorperates with all existing implementations, until someone tries to introduce a new protocol, whereupon the new code implementing the new protocol is blamed for its failure to interoperate with the existing clients and/or servers, and then we get another layer of protocol negotiation on top of all the existing layers of protocol negotiation.</p>
|
||||
<p>TCP has built in protocol negotiation, eight bits worth, which turned
|
||||
out, unsurprisingly, to be inadequate.</p>
|
||||
<p> For the content of the internet to be free from central control, we need
|
||||
to ensure that the address spaces and protocols are free from central
|
||||
control.</p>
|
||||
<p> When an old protocol is broken, clients and servers that have not
|
||||
upgraded to a new improved protocol will remain forever, so the old
|
||||
defective protocol has to be supported forever – without, however,
|
||||
allowing an attacker a downgrade attack. </p>
|
||||
<p>To prevent a downgrade attack, there has to be some way of disabling
|
||||
protocols in the field, where the signed ban on certain protocols flood
|
||||
fills from one program to the next.</p>
|
||||
<p> Often, it is impossible to support the old clients, because protocol
|
||||
negotiation was never adequately designed in, or because it was designed
|
||||
in but was designed vulnerable to a downgrade attack.</p>
|
||||
<p>But let us suppose the protocol negotiation was well designed: The
|
||||
committee has to assign a code. And of course, they will only assign
|
||||
this code to a protocol that they agree is right – and nothing gets done,
|
||||
for there is always some vested interest that for some strange and obscure
|
||||
reason does not want this protocol to exist.</p>
|
||||
<p>One solution is to have quite large protocol identifiers, or arbitrarily
|
||||
large variable length protocol identifiers, so that anyone can whip up a
|
||||
protocol and assign it an identifier, and hack a client and server to use
|
||||
it, without having to walk it past three dozen members of the committee. </p>
|
||||
<p>But then, of course, we would probably wind up with a lot of protocols.
|
||||
This could potentially lead to a lot of protocol negotiation round
|
||||
trips </p>
|
||||
<blockquote>
|
||||
<p>Do you speak protocol A? No.</p>
|
||||
<p>Do you speak protocol B? No.</p>
|
||||
<p>Do you speak protocol C? No.</p>
|
||||
<p>Do you speak protocol D? No.</p>
|
||||
<p>Do you speak protocol E? Yes. </p>
|
||||
</blockquote>
|
||||
<p>One solution to this problem is to have complete lists of protocols, call
|
||||
it a protocol dictionary, which dictionary maps the long probabilistically
|
||||
globally unique protocol names to short deterministically unique local
|
||||
protocol names, and gives an order of preference. If the client
|
||||
names a dictionary that it supports, and/or the server names a dictionary
|
||||
that it supports, then they can usually come to immediate agreement. <br/>
|
||||
</p>
|
||||
<p>If, for example, the client wants to talk protocol X, it proposes one or
|
||||
more dictionaries of updates to protocol X, implying that it can talk all
|
||||
the updates listed in each dictionary, and an order of preference among
|
||||
dictionaries</p>
|
||||
<p>If the server recognizes one or more of the dictionaries, it then
|
||||
responds with one of the protocols listed in the first dictionary that it
|
||||
recognizes, by its short dictionary name, and the conversation proceeds.</p>
|
||||
<p>An ordered list of dictionaries is identified by a public key and a short
|
||||
human readable type name. The typename is only unique with respect
|
||||
to the dictionaries signed by this public key, thus ftp version 1, ftp
|
||||
version 2, ftp version 4 ... </p>
|
||||
<p>The globally unique identifier of a dictionary is the hash of the rule
|
||||
identifying its public key, plus its typename and version number.</p>
|
||||
<p>If the server recognizes the hash of the rule identifying the dictionary
|
||||
public key, but not the version number, it responds with the highest
|
||||
version number that it does recognize, and the most favored protocol in
|
||||
that dictionary. Thus if the client requests a protocol of
|
||||
dictionary version n, it has to know dictionaries versions 1 to n, and be
|
||||
able to deal with all protocols in versions 1 to n, if only to the extent
|
||||
that it is able to fail the protocol gracefully. </p>
|
||||
<h3>The one true ciphersuite</h3>
|
||||
<p>Why would you want multiple ciphers?</p>
|
||||
<p>In case one turns out to be weak. </p>
|
||||
<p>OK, suppose one turns out to be weak. Oops, Malloc can now launch a
|
||||
downgrade attack.</p>
|
||||
<p>So, if supporting multiple ciphers, you need a floodfill mechanism where
|
||||
you can disable the bad ciphersuite in the field.</p>
|
||||
<p>Each program supporting a set of ciphersuits has a set of signatures it
|
||||
recognizes as authoritative. If another program that it talks to has
|
||||
a revocation of ciphersuite, and it recognizes one of the signatures on the
|
||||
revocation, the revocation floodfills.</p>
|
||||
<p>So, ideally you should support multiple ciphersuites – but if you do,
|
||||
have a mechanism for field revocation.</p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Protocol Negotiation</h1>
|
||||
<p> Once a protocol is in use, it becomes very hard to change. If one person
|
||||
updates the server, and the client is not updated, everything breaks. </p>
|
||||
<p> And so, we are stuck with a lot of frozen protocols, many of which are
|
||||
in urgent need of change, but to change, requires wide consensus, which
|
||||
requires a big bunch of people showing up at a meeting, but at such
|
||||
meetings very little gets done, and what gets done is stupid.</p>
|
||||
<p> If a standard is successful, more and more people want to be in the
|
||||
committee, many of whom represent business profit centers and government
|
||||
special interests, and who really do not understand much about the
|
||||
technology, except that any change might be adverse to the very important
|
||||
people who sent them there.</p>
|
||||
<p> As the committee gets larger, it gets more unworkable, and as it
|
||||
represents more and more special interests, it gets more unworkable</p>
|
||||
<p> In order to have to have a system where the internet’s protocols can be
|
||||
upgraded, and new protocols introduced, without central coordination,
|
||||
protocol negotiation, where client and server first discuss what protocol
|
||||
version they will be using, has to be part of every protocol, all the way
|
||||
down to the level of TCP and UDP.</p>
|
||||
<p>These days everyone builds in protocol negotiation, often on top of SSL, which is on top of TCP, resulting in three additional round trips.</p>
|
||||
<p>And then a widely distributed client or server breaks the protocol negotiation, which no one notices because it interorperates with all existing implementations, until someone tries to introduce a new protocol, whereupon the new code implementing the new protocol is blamed for its failure to interoperate with the existing clients and/or servers, and then we get another layer of protocol negotiation on top of all the existing layers of protocol negotiation.</p>
|
||||
<p>TCP has built in protocol negotiation, eight bits worth, which turned
|
||||
out, unsurprisingly, to be inadequate.</p>
|
||||
<p> For the content of the internet to be free from central control, we need
|
||||
to ensure that the address spaces and protocols are free from central
|
||||
control.</p>
|
||||
<p> When an old protocol is broken, clients and servers that have not
|
||||
upgraded to a new improved protocol will remain forever, so the old
|
||||
defective protocol has to be supported forever – without, however,
|
||||
allowing an attacker a downgrade attack. </p>
|
||||
<p>To prevent a downgrade attack, there has to be some way of disabling
|
||||
protocols in the field, where the signed ban on certain protocols flood
|
||||
fills from one program to the next.</p>
|
||||
<p> Often, it is impossible to support the old clients, because protocol
|
||||
negotiation was never adequately designed in, or because it was designed
|
||||
in but was designed vulnerable to a downgrade attack.</p>
|
||||
<p>But let us suppose the protocol negotiation was well designed: The
|
||||
committee has to assign a code. And of course, they will only assign
|
||||
this code to a protocol that they agree is right – and nothing gets done,
|
||||
for there is always some vested interest that for some strange and obscure
|
||||
reason does not want this protocol to exist.</p>
|
||||
<p>One solution is to have quite large protocol identifiers, or arbitrarily
|
||||
large variable length protocol identifiers, so that anyone can whip up a
|
||||
protocol and assign it an identifier, and hack a client and server to use
|
||||
it, without having to walk it past three dozen members of the committee. </p>
|
||||
<p>But then, of course, we would probably wind up with a lot of protocols.
|
||||
This could potentially lead to a lot of protocol negotiation round
|
||||
trips </p>
|
||||
<blockquote>
|
||||
<p>Do you speak protocol A? No.</p>
|
||||
<p>Do you speak protocol B? No.</p>
|
||||
<p>Do you speak protocol C? No.</p>
|
||||
<p>Do you speak protocol D? No.</p>
|
||||
<p>Do you speak protocol E? Yes. </p>
|
||||
</blockquote>
|
||||
<p>One solution to this problem is to have complete lists of protocols, call
|
||||
it a protocol dictionary, which dictionary maps the long probabilistically
|
||||
globally unique protocol names to short deterministically unique local
|
||||
protocol names, and gives an order of preference. If the client
|
||||
names a dictionary that it supports, and/or the server names a dictionary
|
||||
that it supports, then they can usually come to immediate agreement. <br/>
|
||||
</p>
|
||||
<p>If, for example, the client wants to talk protocol X, it proposes one or
|
||||
more dictionaries of updates to protocol X, implying that it can talk all
|
||||
the updates listed in each dictionary, and an order of preference among
|
||||
dictionaries</p>
|
||||
<p>If the server recognizes one or more of the dictionaries, it then
|
||||
responds with one of the protocols listed in the first dictionary that it
|
||||
recognizes, by its short dictionary name, and the conversation proceeds.</p>
|
||||
<p>An ordered list of dictionaries is identified by a public key and a short
|
||||
human readable type name. The typename is only unique with respect
|
||||
to the dictionaries signed by this public key, thus ftp version 1, ftp
|
||||
version 2, ftp version 4 ... </p>
|
||||
<p>The globally unique identifier of a dictionary is the hash of the rule
|
||||
identifying its public key, plus its typename and version number.</p>
|
||||
<p>If the server recognizes the hash of the rule identifying the dictionary
|
||||
public key, but not the version number, it responds with the highest
|
||||
version number that it does recognize, and the most favored protocol in
|
||||
that dictionary. Thus if the client requests a protocol of
|
||||
dictionary version n, it has to know dictionaries versions 1 to n, and be
|
||||
able to deal with all protocols in versions 1 to n, if only to the extent
|
||||
that it is able to fail the protocol gracefully. </p>
|
||||
<h3>The one true ciphersuite</h3>
|
||||
<p>Why would you want multiple ciphers?</p>
|
||||
<p>In case one turns out to be weak. </p>
|
||||
<p>OK, suppose one turns out to be weak. Oops, Malloc can now launch a
|
||||
downgrade attack.</p>
|
||||
<p>So, if supporting multiple ciphers, you need a floodfill mechanism where
|
||||
you can disable the bad ciphersuite in the field.</p>
|
||||
<p>Each program supporting a set of ciphersuits has a set of signatures it
|
||||
recognizes as authoritative. If another program that it talks to has
|
||||
a revocation of ciphersuite, and it recognizes one of the signatures on the
|
||||
revocation, the revocation floodfills.</p>
|
||||
<p>So, ideally you should support multiple ciphersuites – but if you do,
|
||||
have a mechanism for field revocation.</p>
|
||||
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -11,12 +11,12 @@
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Protocol Specification</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Protocol Specification</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Protocol Specification</h1><p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Protocol Specification</h1><p>
|
||||
|
||||
In general, supposing your server, written in Javascript in the node.js environment, is assembling a response to an end user from services provided by other servers, is there any reasonable alternative to having them all talk JSON?</p><p>
|
||||
|
||||
@ -45,18 +45,18 @@ Your avro is in javascript, and is, I assume, expected to run under node.js Apac
|
||||
|
||||
https://cryptonotestarter.org/create-wallet.html</p><p>
|
||||
|
||||
What is a protocol? Protocols wind up being defined by implementations,
|
||||
which people attempt, not very successfully, to reverse engineer. By trial
|
||||
and error they get their client to work with the existing server, and
|
||||
their server to work with the existing client, and so an ill defined
|
||||
protocol becomes over time even worse defined.</p><p>
|
||||
What is a protocol? Protocols wind up being defined by implementations,
|
||||
which people attempt, not very successfully, to reverse engineer. By trial
|
||||
and error they get their client to work with the existing server, and
|
||||
their server to work with the existing client, and so an ill defined
|
||||
protocol becomes over time even worse defined.</p><p>
|
||||
|
||||
To address this problem, we have ASN.1, ASN.1 PER, and ASN.1 DER</p><p>
|
||||
To address this problem, we have ASN.1, ASN.1 PER, and ASN.1 DER</p><p>
|
||||
|
||||
ASN.1 is a language for describing data.</p><p>
|
||||
|
||||
|
||||
It is also a compiler for generating C code to process the data described. Some people complain that DER is too complex for anyone to get right.</p><p>
|
||||
It is also a compiler for generating C code to process the data described. Some people complain that DER is too complex for anyone to get right.</p><p>
|
||||
|
||||
The library https://github.com/vlm/asn1c supports canonical per, but does not entirely support Visual Studio.</p><p>
|
||||
|
||||
@ -94,52 +94,52 @@ Your avro is in javascript, and is, I assume, expected to run under node.js Apac
|
||||
There is a review of more serializers than you can shake a stick at <a href="https://github.com/thekvs/cpp-serializers">https://github.com/thekvs/cpp-serializers</a>
|
||||
|
||||
If you attempt to hand generate code for processing packets described by
|
||||
ASN.1, you will probably get it wrong and your head will explode.
|
||||
Hence ASN.1 is much cursed and condemned. </p><p>
|
||||
ASN.1, you will probably get it wrong and your head will explode.
|
||||
Hence ASN.1 is much cursed and condemned. </p><p>
|
||||
|
||||
Don’t do that. Don’t hand write code to generate or interpret ASN.1
|
||||
data packets. You are unlikely to succeed, and your code will have
|
||||
mystery bugs.</p>
|
||||
Don’t do that. Don’t hand write code to generate or interpret ASN.1
|
||||
data packets. You are unlikely to succeed, and your code will have
|
||||
mystery bugs.</p>
|
||||
|
||||
<p>ASN.1 PER is ASN.1 data description compiled to produce efficiently
|
||||
compressed data packets that conform to a description in ASN.1, and
|
||||
efficiently decompresses them.</p>
|
||||
<p>ASN.1 PER is ASN.1 data description compiled to produce efficiently
|
||||
compressed data packets that conform to a description in ASN.1, and
|
||||
efficiently decompresses them.</p>
|
||||
|
||||
<p>ASN.1 DER that data description that generates data packets with a
|
||||
description of what the data packet means, so that if two programs sign
|
||||
the same ASN.1 DER data, they agree not only on the data, but on the
|
||||
meaning of that data, and if one program means the same thing as the other
|
||||
program, the signatures will come out the same.</p>
|
||||
<p>Use it. ASN.1, used right, is what is needed to rigorously define a
|
||||
protocol so that a client written by one person will work with a server
|
||||
written by another.</p>
|
||||
<p>There is much loud cursing about the fact that the data on the wire is
|
||||
humanly incomprehensible, and that the code that converts it into program
|
||||
data structures is humanly incomprehensible. No one should be
|
||||
looking at machine generated code, because machine generated code is
|
||||
notoriously incomprehensible. The question then is, does the
|
||||
compiler work, and is the compiler usable.</p><p>
|
||||
<p>ASN.1 DER that data description that generates data packets with a
|
||||
description of what the data packet means, so that if two programs sign
|
||||
the same ASN.1 DER data, they agree not only on the data, but on the
|
||||
meaning of that data, and if one program means the same thing as the other
|
||||
program, the signatures will come out the same.</p>
|
||||
<p>Use it. ASN.1, used right, is what is needed to rigorously define a
|
||||
protocol so that a client written by one person will work with a server
|
||||
written by another.</p>
|
||||
<p>There is much loud cursing about the fact that the data on the wire is
|
||||
humanly incomprehensible, and that the code that converts it into program
|
||||
data structures is humanly incomprehensible. No one should be
|
||||
looking at machine generated code, because machine generated code is
|
||||
notoriously incomprehensible. The question then is, does the
|
||||
compiler work, and is the compiler usable.</p><p>
|
||||
There is an <a href="http://lionet.info/asn1c/faq.html">internet tool for compiling asn.1 specifications into C code</a>.</p><p>
|
||||
|
||||
If a program reads DER or BER data, the result is apt to be disastrous. BER and DER can express an arbitrary data structure – and thus can crash the program receiving the data, probably causing it to execute transmitted data as code.</p>
|
||||
<p>You can’t depend on a DER or BER bit string being able to map back into any well-defined ASN.1 objectthat the program was designed to deal with. </p>
|
||||
<p> Incoming data should be parsed as the expected and bounded size data
|
||||
structure, thus we need something that can generate parsing code from a
|
||||
description of the data at compile time. We need compile time
|
||||
descriptions of the data, not run time descriptions, because the program
|
||||
that uses the incoming data will unavoidably rely on compile time
|
||||
description of the data.
|
||||
<br/>
|
||||
<br/>
|
||||
PER, however cannot receive unexpected data structures, because the
|
||||
expected data structure is specified at compile time, not run time.
|
||||
Malicious or faulty data will generate an error, not a crash.<br/>
|
||||
<br/>
|
||||
Thus all data should be received as PER or by a format with the properties
|
||||
of PER. </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
<p>You can’t depend on a DER or BER bit string being able to map back into any well-defined ASN.1 objectthat the program was designed to deal with. </p>
|
||||
<p> Incoming data should be parsed as the expected and bounded size data
|
||||
structure, thus we need something that can generate parsing code from a
|
||||
description of the data at compile time. We need compile time
|
||||
descriptions of the data, not run time descriptions, because the program
|
||||
that uses the incoming data will unavoidably rely on compile time
|
||||
description of the data.
|
||||
<br/>
|
||||
<br/>
|
||||
PER, however cannot receive unexpected data structures, because the
|
||||
expected data structure is specified at compile time, not run time.
|
||||
Malicious or faulty data will generate an error, not a crash.<br/>
|
||||
<br/>
|
||||
Thus all data should be received as PER or by a format with the properties
|
||||
of PER. </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,16 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>RPC to the blockchain</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>RPC to the blockchain</title>
|
||||
</head>
|
||||
<body><p><a href="./index.html"> To Home page</a></p>
|
||||
<h1>RPC to the blockchain</h1><p>
|
||||
|
@ -11,7 +11,7 @@
|
||||
p.center {text-align:center;}
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Safe Operating System</title>
|
||||
</head><body>
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
bottom: 0pt;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Configuration Data in a Safe Operating System</title></head>
|
||||
<body>
|
||||
|
||||
@ -97,4 +97,3 @@ message. </p>
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
</body></html>
|
||||
|
||||
|
@ -62,4 +62,3 @@ With forty colors, we have a trillion different composite colors, so we randomly
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
</body></html>
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
@ -16,11 +16,11 @@
|
||||
border: solid 1px black;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Name System</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Name System</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Seed phrase wallets</h1>
|
||||
|
||||
<p>Bitcoin has had big problems with wallet security, and eventually converged on the <a href="https://en.bitcoin.it/wiki/Seed_phrase">seed phrase system</a>. A seed phrase generates a sequence of key pairs.</p>
|
||||
|
@ -12,7 +12,7 @@
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>How to Save the World</title>
|
||||
</head>
|
||||
<body>
|
||||
|
@ -9,7 +9,7 @@
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Spam filtering</title>
|
||||
</head>
|
||||
<body>
|
||||
|
@ -1,20 +1,20 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {text-align:center;}
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Squaring Zooko’s triangle</title>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>Squaring Zooko’s triangle</title>
|
||||
</head>
|
||||
<body>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Squaring Zooko’s triangle</h1>
|
||||
<p><a href="./index.html"> To Home page</a> </p>
|
||||
<h1>Squaring Zooko’s triangle</h1>
|
||||
|
||||
<p>
|
||||
Need a system for handing one’s keys around that protects end users from the horrifying sight of actual keys or actual strong hashes of keys.</p>
|
||||
@ -24,89 +24,89 @@ But at the same time the system has to not say, "I can’t deliver your message
|
||||
|
||||
|
||||
|
||||
<p>It seems like the clever bit of CT is the insight that some actions, like
|
||||
a CA signing a cert, are intended to be public, and so should be forced
|
||||
(via clever crypto) to take place in public. This makes me wonder what
|
||||
other crypto actions should also take place in public, in a way that
|
||||
doesn’t permit hiding them from the world. </p>
|
||||
<p>Revocation </p>
|
||||
<p>Software releases </p>
|
||||
<p>Mapping of email address to public key </p>
|
||||
<p>Delegation of DNSSEC keys </p>
|
||||
<p> </p>
|
||||
<p>Of course, globally visible events need to take place at a globally
|
||||
visible time. The most widely available time is GPS time (which is 19
|
||||
seconds off the commonly used time), and which is available from the
|
||||
seldom connected pps line.</p>
|
||||
<p>At present, unfortunately, anyone who wants gps time has to do his own
|
||||
soldering and hack his own software. There is a pre soldered device
|
||||
available, but it is hard to get. </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p>
|
||||
Imagine skype as originally designed, (central authority maps public and
|
||||
private keys to user names) plus a key continuity feature, plus the seldom
|
||||
used option of doing a zero knowledge shared passphrase to detect man in
|
||||
the middle. </p>
|
||||
<p>
|
||||
The possibility that the zero knowledge check could be used would deter
|
||||
powerful adversaries, even if seldom used in practice. The more powerful,
|
||||
the greater the deterrent effect. </p>
|
||||
<p>
|
||||
It is not totally end to end, central authority can listen in, but the
|
||||
check would limit the amount of listening. </p>
|
||||
<p>
|
||||
It can be made completely end to end for strong passwords. Assume login is
|
||||
by zero knowledge password protocol, which means that the central
|
||||
authority does not know the end user’s password, for strong
|
||||
passwords. </p>
|
||||
<p>
|
||||
The secret key is generated from the strong secret supplied by central
|
||||
authority, plus the password. </p>
|
||||
<p>
|
||||
When you change your password, you generate a certificate mapping your new
|
||||
public key to your old public key, which certificate makes other people’s
|
||||
key continuity check happy. </p>
|
||||
<p>
|
||||
If key continuity fails, people get a warning, but they don’t have to
|
||||
click it away, for that just trains people to click it away. They can just
|
||||
continue right on and not pay attention to it. </p>
|
||||
<p>
|
||||
Or they could use the zero knowledge shared passphrase procedure to detect
|
||||
man in the middle. </p>
|
||||
<p>
|
||||
So, if non paranoid, and using easy passwords, works like skype used to
|
||||
work. No interception except by central authority, and central authority
|
||||
cannot intercept everyone, or even large numbers of people. </p>
|
||||
<p>
|
||||
If paranoid and using strong passwords, provides OTR like end to end
|
||||
capability. </p>
|
||||
<p><br/>
|
||||
</p>
|
||||
<p><br/>
|
||||
</p>
|
||||
<p>Key management is an unsolved problem. In my biased opinion the
|
||||
best<br/>
|
||||
solution was my Crypto Kong, which received limited takeup.<br/>
|
||||
<br/>
|
||||
So, in conclusion, don’t make people manage keys, though that should be an
|
||||
option for the seriously paranoid.<br/>
|
||||
<br/>
|
||||
Instead, autogenerate the keys with zero knowledge passphrase logon.<br/>
|
||||
<br/>
|
||||
If he uses a password weak enough to fall to an offline dictionary attack,
|
||||
this is equivalent to the old skype system, where central authority
|
||||
manages his keys and he has password logon. If he uses a stronger
|
||||
password, equivalent to a salted strong passphrase system.</p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
<p>It seems like the clever bit of CT is the insight that some actions, like
|
||||
a CA signing a cert, are intended to be public, and so should be forced
|
||||
(via clever crypto) to take place in public. This makes me wonder what
|
||||
other crypto actions should also take place in public, in a way that
|
||||
doesn’t permit hiding them from the world. </p>
|
||||
<p>Revocation </p>
|
||||
<p>Software releases </p>
|
||||
<p>Mapping of email address to public key </p>
|
||||
<p>Delegation of DNSSEC keys </p>
|
||||
<p> </p>
|
||||
<p>Of course, globally visible events need to take place at a globally
|
||||
visible time. The most widely available time is GPS time (which is 19
|
||||
seconds off the commonly used time), and which is available from the
|
||||
seldom connected pps line.</p>
|
||||
<p>At present, unfortunately, anyone who wants gps time has to do his own
|
||||
soldering and hack his own software. There is a pre soldered device
|
||||
available, but it is hard to get. </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p>
|
||||
Imagine skype as originally designed, (central authority maps public and
|
||||
private keys to user names) plus a key continuity feature, plus the seldom
|
||||
used option of doing a zero knowledge shared passphrase to detect man in
|
||||
the middle. </p>
|
||||
<p>
|
||||
The possibility that the zero knowledge check could be used would deter
|
||||
powerful adversaries, even if seldom used in practice. The more powerful,
|
||||
the greater the deterrent effect. </p>
|
||||
<p>
|
||||
It is not totally end to end, central authority can listen in, but the
|
||||
check would limit the amount of listening. </p>
|
||||
<p>
|
||||
It can be made completely end to end for strong passwords. Assume login is
|
||||
by zero knowledge password protocol, which means that the central
|
||||
authority does not know the end user’s password, for strong
|
||||
passwords. </p>
|
||||
<p>
|
||||
The secret key is generated from the strong secret supplied by central
|
||||
authority, plus the password. </p>
|
||||
<p>
|
||||
When you change your password, you generate a certificate mapping your new
|
||||
public key to your old public key, which certificate makes other people’s
|
||||
key continuity check happy. </p>
|
||||
<p>
|
||||
If key continuity fails, people get a warning, but they don’t have to
|
||||
click it away, for that just trains people to click it away. They can just
|
||||
continue right on and not pay attention to it. </p>
|
||||
<p>
|
||||
Or they could use the zero knowledge shared passphrase procedure to detect
|
||||
man in the middle. </p>
|
||||
<p>
|
||||
So, if non paranoid, and using easy passwords, works like skype used to
|
||||
work. No interception except by central authority, and central authority
|
||||
cannot intercept everyone, or even large numbers of people. </p>
|
||||
<p>
|
||||
If paranoid and using strong passwords, provides OTR like end to end
|
||||
capability. </p>
|
||||
<p><br/>
|
||||
</p>
|
||||
<p><br/>
|
||||
</p>
|
||||
<p>Key management is an unsolved problem. In my biased opinion the
|
||||
best<br/>
|
||||
solution was my Crypto Kong, which received limited takeup.<br/>
|
||||
<br/>
|
||||
So, in conclusion, don’t make people manage keys, though that should be an
|
||||
option for the seriously paranoid.<br/>
|
||||
<br/>
|
||||
Instead, autogenerate the keys with zero knowledge passphrase logon.<br/>
|
||||
<br/>
|
||||
If he uses a password weak enough to fall to an offline dictionary attack,
|
||||
this is equivalent to the old skype system, where central authority
|
||||
manages his keys and he has password logon. If he uses a stronger
|
||||
password, equivalent to a salted strong passphrase system.</p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p style="background-color : #ccffcc; font-size:80%">These documents are
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
Attribution-Share Alike 3.0 License</a></p>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -253,7 +253,7 @@ by the Venetian Friar
|
||||
<a href=3D"http://www-groups.dcs.st-andrews.ac.uk/~history/Mathematicians/P=
|
||||
acioli.html">
|
||||
Luca Pacioli</a>
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_LP" na=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_LP" na=
|
||||
me=3D"back_LP">LP</a>]</small>.
|
||||
In his treatise, Pacioli documented many standard
|
||||
techniques, including a chapter on accounting.
|
||||
@ -284,7 +284,7 @@ was never capable of being fielded. The replacement
|
||||
double entry system was fielded in early 1996 and
|
||||
has never lost a transaction
|
||||
(although there have been some close shaves
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_IG1" n=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_IG1" n=
|
||||
ame=3D"back_IG1">IG1</a>]</small>).
|
||||
</p>
|
||||
|
||||
@ -417,7 +417,7 @@ effective form of signature, and
|
||||
<i>public key cryptosystems</i> provide
|
||||
another form where signers hold a private
|
||||
key and verifiers hold a public key
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_MB" na=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_MB" na=
|
||||
me=3D"back_MB">MB</a>]</small>.
|
||||
|
||||
There are also many ways to attack the
|
||||
@ -437,14 +437,14 @@ At first it was suggested that a
|
||||
variant known as the
|
||||
<i>blinded signature</i>
|
||||
would enable digital cash
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_DC" na=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_DC" na=
|
||||
me=3D"back_DC">DC</a>]</small>.
|
||||
Then, <i>certificates</i> would
|
||||
circulate as rights or contracts, in much
|
||||
the same way as the share certificates
|
||||
of old and thus replace centralised accounting
|
||||
systems
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_RAH" n=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_RAH" n=
|
||||
ame=3D"back_RAH">RAH</a>]</small>.
|
||||
|
||||
These ideas took financial cryptography part of
|
||||
@ -469,31 +469,31 @@ eipt">The Initial Role of a Receipt</a></h3>
|
||||
<center>
|
||||
<table bgcolor=3D"#99FFFF" border=3D"1">
|
||||
<tbody><tr><td>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td>From</td>
|
||||
<td>Alice</td>
|
||||
</tr><tr>
|
||||
<td>To</td>
|
||||
<td>Bob</td>
|
||||
</tr><tr>
|
||||
<td>Unit</td>
|
||||
<td>Euro</td>
|
||||
</tr><tr>
|
||||
<td>Quantity</td>
|
||||
<td>100</td>
|
||||
</tr><tr>
|
||||
<td>Date</td>
|
||||
<td>2005.12.25</td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td>From</td>
|
||||
<td>Alice</td>
|
||||
</tr><tr>
|
||||
<td>To</td>
|
||||
<td>Bob</td>
|
||||
</tr><tr>
|
||||
<td>Unit</td>
|
||||
<td>Euro</td>
|
||||
</tr><tr>
|
||||
<td>Quantity</td>
|
||||
<td>100</td>
|
||||
</tr><tr>
|
||||
<td>Date</td>
|
||||
<td>2005.12.25</td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td><i>digital signature</i></td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td><i>digital signature</i></td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
</tbody></table></center>
|
||||
|
||||
@ -508,7 +508,7 @@ the Internet, the capabilities of cryptography
|
||||
and the needs of governance
|
||||
led to the development of the
|
||||
<i>signed receipt</i>
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_GH" na=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_GH" na=
|
||||
me=3D"back_GH">GH</a>]</small>.
|
||||
In order to develop this concept, let us assume
|
||||
a simple three party payment system,
|
||||
@ -571,66 +571,66 @@ of risks that we decided to address.
|
||||
<p align=3D"center"><i>2: A Signed Receipt</i></p>
|
||||
<center><table bgcolor=3D"#99FFFF" border=3D"1">
|
||||
<tbody><tr><td>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td>User's Cheque</td>
|
||||
<td>
|
||||
<table bgcolor=3D"#FFBBFF" border=3D"1">
|
||||
<tbody><tr><td>
|
||||
<table>
|
||||
<tbody><tr><td>
|
||||
</td>
|
||||
</tr><tr>
|
||||
<td>From</td>
|
||||
<td>Alice</td>
|
||||
</tr><tr>
|
||||
<td>To</td>
|
||||
<td>Bob</td>
|
||||
</tr><tr>
|
||||
<td>Unit</td>
|
||||
<td>Euro</td>
|
||||
</tr><tr>
|
||||
<td>Qty</td>
|
||||
<td>100</td>
|
||||
</tr><tr>
|
||||
<td>Com</td>
|
||||
<td>Pens</td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td><i>Alice's sig</i></td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
</td>
|
||||
</tr><tr>
|
||||
<td>From</td>
|
||||
<td>Alice</td>
|
||||
</tr><tr>
|
||||
<td>To</td>
|
||||
<td>Bob</td>
|
||||
</tr><tr>
|
||||
<td>Unit</td>
|
||||
<td>Euro</td>
|
||||
</tr><tr>
|
||||
<td>Quantity</td>
|
||||
<td>100</td>
|
||||
</tr><tr>
|
||||
<td>Date</td>
|
||||
<td>2005.04.10</td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td>User's Cheque</td>
|
||||
<td>
|
||||
<table bgcolor=3D"#FFBBFF" border=3D"1">
|
||||
<tbody><tr><td>
|
||||
<table>
|
||||
<tbody><tr><td>
|
||||
</td>
|
||||
</tr><tr>
|
||||
<td>From</td>
|
||||
<td>Alice</td>
|
||||
</tr><tr>
|
||||
<td>To</td>
|
||||
<td>Bob</td>
|
||||
</tr><tr>
|
||||
<td>Unit</td>
|
||||
<td>Euro</td>
|
||||
</tr><tr>
|
||||
<td>Qty</td>
|
||||
<td>100</td>
|
||||
</tr><tr>
|
||||
<td>Com</td>
|
||||
<td>Pens</td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td><i>Alice's sig</i></td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
</td>
|
||||
</tr><tr>
|
||||
<td>From</td>
|
||||
<td>Alice</td>
|
||||
</tr><tr>
|
||||
<td>To</td>
|
||||
<td>Bob</td>
|
||||
</tr><tr>
|
||||
<td>Unit</td>
|
||||
<td>Euro</td>
|
||||
</tr><tr>
|
||||
<td>Quantity</td>
|
||||
<td>100</td>
|
||||
</tr><tr>
|
||||
<td>Date</td>
|
||||
<td>2005.04.10</td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td><i>Ivan's signature</i></td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
<table cellspacing=3D"5">
|
||||
<tbody><tr>
|
||||
<td><i>Ivan's signature</i></td>
|
||||
</tr>
|
||||
</tbody></table>
|
||||
</td></tr>
|
||||
</tbody></table></center>
|
||||
|
||||
@ -780,7 +780,7 @@ directs that we store the primary records,
|
||||
in this case the set of receipts, and we
|
||||
construct derivative records, the accounting
|
||||
books, on the fly
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_4NF" n=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_4NF" n=
|
||||
ame=3D"back_4NF">4NF</a>]</small>.
|
||||
</p>
|
||||
|
||||
@ -982,21 +982,21 @@ Todd Boyle looked at a similar problem from the point
|
||||
of view of small business needs in an Internet age,
|
||||
and reached the same conclusion - triple entry
|
||||
accounting
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_1" nam=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_1" nam=
|
||||
e=3D"back_1">1</a>]</small>.
|
||||
|
||||
His starting premises were that:
|
||||
</p>
|
||||
|
||||
<ol><li><p>
|
||||
The major need is not accounting or payments, per se,
|
||||
but patterns of exchange - complex patterns of trade;
|
||||
The major need is not accounting or payments, per se,
|
||||
but patterns of exchange - complex patterns of trade;
|
||||
</p></li><li><p>
|
||||
Small businesses could not afford large complex
|
||||
systems that understood these patterns;
|
||||
Small businesses could not afford large complex
|
||||
systems that understood these patterns;
|
||||
</p></li><li><p>
|
||||
They would not lock themselves into proprietary
|
||||
frameworks;
|
||||
They would not lock themselves into proprietary
|
||||
frameworks;
|
||||
</p></li></ol>
|
||||
|
||||
<p>
|
||||
@ -1137,9 +1137,9 @@ that it imposes well recognised.
|
||||
<p>
|
||||
Below are the list of requirements that we
|
||||
believed to be important
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_2" nam=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_2" nam=
|
||||
e=3D"back_2">2</a>]</small>
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_3" nam=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_3" nam=
|
||||
e=3D"back_3">3</a>]</small>.
|
||||
</p>
|
||||
|
||||
@ -1310,7 +1310,7 @@ would have shown a clear audit trail of transactions
|
||||
and thus late timing and otherwise perverted or
|
||||
dropped transactions would have been clearly
|
||||
identified or eliminated completely
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_NG" na=
|
||||
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_NG" na=
|
||||
me=3D"back_NG">NG</a>]</small>.
|
||||
The emerging scandal in the USA known as
|
||||
<i>Stockgate</i> would have been impossible
|
||||
@ -1435,30 +1435,30 @@ A Relational Model of Data for Large Shared Data Banks
|
||||
|
||||
<p>
|
||||
<b><a name=3D"ref_1">[1]</a></b>
|
||||
Todd Boyle,
|
||||
"<a href=3D"http://ledgerism.net/GLT-GLR.htm">
|
||||
GLT and GLR: conceptual architecture for general ledgers</a>,"
|
||||
Ledgerism.net, 1997-2005.
|
||||
Todd Boyle,
|
||||
"<a href=3D"http://ledgerism.net/GLT-GLR.htm">
|
||||
GLT and GLR: conceptual architecture for general ledgers</a>,"
|
||||
Ledgerism.net, 1997-2005.
|
||||
|
||||
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<b><a name=3D"ref_2">[2]</a></b>
|
||||
Todd Boyle,
|
||||
"<a href=3D"http://www.ledgerism.net/STR.htm">
|
||||
STR software specification</a>,"
|
||||
Goals, 1-5.
|
||||
This section adopts that numbering convention.
|
||||
Todd Boyle,
|
||||
"<a href=3D"http://www.ledgerism.net/STR.htm">
|
||||
STR software specification</a>,"
|
||||
Goals, 1-5.
|
||||
This section adopts that numbering convention.
|
||||
|
||||
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<b><a name=3D"ref_3">[3]</a></b>
|
||||
Ian Grigg,
|
||||
various design and requirements documents,
|
||||
Systemics, unpublished.
|
||||
Ian Grigg,
|
||||
various design and requirements documents,
|
||||
Systemics, unpublished.
|
||||
|
||||
|
||||
</p>
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
title: >-
|
||||
Triple Entry Accounting
|
||||
Triple Entry Accounting
|
||||
---
|
||||
See [Sox accounting], for why we need to replace Sox accounting with triple entry accounting.
|
||||
|
||||
|
@ -2,19 +2,19 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
<style>
|
||||
<style>
|
||||
body {
|
||||
max-width: 30em;
|
||||
margin-left: 2em;
|
||||
}
|
||||
p.center {
|
||||
p.center {
|
||||
text-align:center;
|
||||
}
|
||||
</style>
|
||||
</style>
|
||||
<link rel="shortcut icon" href="../rho.ico">
|
||||
<title>
|
||||
<title>
|
||||
True Names and TCP
|
||||
</title>
|
||||
</title>
|
||||
</head><body>
|
||||
<h1>True Names and TCP
|
||||
</h1><p>
|
||||
@ -73,4 +73,3 @@ Often the server wants to make sure that the client at one end of a connection i
|
||||
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
</body></html>
|
||||
|
||||
|
@ -29,4 +29,3 @@ licensed under the <a rel="license" href="http://creativecommons.org/licenses/by
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
|
||||
</body></html>
|
||||
|
@ -28,4 +28,3 @@ licensed under the <a rel="license" href="http://creativecommons.org/licenses/by
|
||||
Commons Attribution-Share Alike 3.0 License</a></p>
|
||||
|
||||
</body></html>
|
||||
|
@ -472,7 +472,7 @@ orange|3.09
|
||||
|
||||
Counting spaces to align. Only editable in fixed font
|
||||
|
||||
This allows multiline, but visual studio code does not like it. Visual Studio Code only supports tables that can be intellibly layed out in visual studio code.
|
||||
This allows multiline, but visual studio code does not like it. Visual Studio Code only supports tables that can be intelligibly laid out in visual studio code.
|
||||
|
||||
-------------------------------------------------------------
|
||||
Centered Default Right Left
|
||||
@ -501,7 +501,7 @@ In this table, edited in a fixed font, you are using whitespace and blank lines
|
||||
rows.
|
||||
----------- ------- --------------- -------------------------
|
||||
|
||||
## Grid tabless are safer, hard to count spaces
|
||||
## Grid tables
|
||||
|
||||
Allows multiline, and alignment, but visual studio does not like it, and you still have to count those spacees
|
||||
|
||||
@ -533,6 +533,3 @@ Alignments can be specified as with pipe tables, by putting colons at the bounda
|
||||
+--------------:+:--------------+:------------------:+
|
||||
| Right | Left | Centered |
|
||||
+---------------+---------------+--------------------+
|
||||
|
||||
|
||||
|
||||
|
@ -456,4 +456,3 @@ bool IsValidUtf8String(const char* sz){
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
wallet.exe: stdafx.pch app.obj frame.obj Ilog.obj ISqlit3Impl.obj sqlite3.obj
|
||||
link /OUT:wallet.exe /LIBPATH:%WXWIN%\lib\vc_x64_lib *.obj kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib Comctl32.lib Rpcrt4.lib
|
||||
link /OUT:wallet.exe /LIBPATH:%WXWIN%\lib\vc_x64_lib *.obj kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib Comctl32.lib Rpcrt4.lib
|
||||
|
||||
stdafx.pch: ..\stdafx.cpp ..\app.h ..\frame.h ..\stdafx.h ..\ISqlite3.h
|
||||
CL.exe /c /ZI /I%ROINCLUDE% /I%WXWIN%\lib\vc_x64_lib\mswud /JMC /W3 /WX- /diagnostics:classic /sdl /Od /D _DEBUG /D _WINDOWS /D _UNICODE /D UNICODE /Gm- /EHsc /RTC1 /MDd /GS /fp:precise /permissive- /Zc:wchar_t /Zc:forScope /Zc:inline /std:c++17 /Yc"stdafx.h" /Gd /TP /FC /errorReport:prompt ..\stdafx.cpp
|
||||
@ -17,7 +17,7 @@ ILog.obj: ..\ILog.cpp stdafx.pch
|
||||
# don't need /I%ROINCLUDE% /I%WXWIN%\lib\vc_x64_lib\mswud because it is included in \Yu"stdafx.h"
|
||||
|
||||
ISqlit3Impl.obj: ..\ISqlit3Impl.cpp ..\ISqlite3.h ..\sqlite3.h
|
||||
CL.exe /c /ZI /I%ROINCLUDE% /JMC /W3 /WX- /diagnostics:classic /sdl /Od /D _DEBUG /D _WINDOWS /D _UNICODE /D UNICODE /Gm- /EHsc /RTC1 /MDd /GS /fp:precise /permissive- /Zc:wchar_t /Zc:forScope /Zc:inline /std:c++17 /Gd /TP /FC /errorReport:prompt ..\ISqlit3Impl.cpp
|
||||
CL.exe /c /ZI /I%ROINCLUDE% /JMC /W3 /WX- /diagnostics:classic /sdl /Od /D _DEBUG /D _WINDOWS /D _UNICODE /D UNICODE /Gm- /EHsc /RTC1 /MDd /GS /fp:precise /permissive- /Zc:wchar_t /Zc:forScope /Zc:inline /std:c++17 /Gd /TP /FC /errorReport:prompt ..\ISqlit3Impl.cpp
|
||||
|
||||
sqlite3.obj: ..\sqlite3.c ..\sqlite3.h
|
||||
CL.exe /c /ZI /JMC /nologo /W3 /WX- /diagnostics:classic /sdl /Od /D _DEBUG /D _WINDOWS /D _UNICODE /D UNICODE /Gm- /EHsc /RTC1 /MDd /GS /fp:precise /permissive- /Zc:wchar_t /Zc:forScope /Zc:inline /std:c++17 /Gd /TC /FC /errorReport:prompt ..\sqlite3.c
|
||||
|
@ -811,4 +811,3 @@ namespace ristretto255 {
|
||||
template<> ristretto255::scalar ro::hex2bin <ristretto255::scalar >(const ro::CompileSizedString< (2 * sizeof(ristretto255::scalar))>&);
|
||||
template<> ro::CompileSizedString< (2 * sizeof(ristretto255::scalar)) > ro::bin2hex<ristretto255::scalar>(const ristretto255::scalar&);
|
||||
template<> ro::CompileSizedString< (8 * sizeof(ristretto255::scalar) + 5) / 6> ro::to_base64_string <ristretto255::scalar>(const ristretto255::scalar&);
|
||||
|
||||
|
@ -86,7 +86,4 @@ namespace ro {
|
||||
snprintf(&retval[0], retval.size(), "%7u.%03u", seconds, milliseconds);
|
||||
return retval.data();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
4
slash6.h
4
slash6.h
@ -31,7 +31,3 @@ std::enable_if_t<
|
||||
>base64_to_bytes( T& byteRange, const char* base64Numerals) {
|
||||
return base64_to_bytes(static_cast<std::nullptr_t>(&byteRange[0]), static_cast<uint_fast32_t>(std::size(byteRange)), base64Numerals);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -158,4 +158,3 @@ When we hash a char *, we assume the data is zero delimited, and include the del
|
||||
hash<256> x <<="quick brown "<<"fox";
|
||||
hash<256> y <<="the quick" <<"brown fox";
|
||||
will produce two different hashes.
|
||||
|
||||
|
@ -11,4 +11,3 @@ AAArho ICON "rho.ico"
|
||||
// this is not always needed but doesn't hurt (except making the executable
|
||||
// very slightly larger): this file contains the standard icons, cursors, ...
|
||||
#include "wx/msw/wx.rc"
|
||||
|
||||
|
@ -143,4 +143,3 @@ welcome_to_rhocoin::~welcome_to_rhocoin() {
|
||||
assert(singletonWelcome ==this);
|
||||
singletonWelcome = nullptr;
|
||||
}
|
||||
|
||||
|
@ -68,4 +68,3 @@ src=$wxwin/include/wx/msw/setup.h
|
||||
cat $src | sed 's/^#define\([[:blank:]]\+\)wxUSE_IPV6\([[:blank:]]\+\).*$/#define\1wxUSE_IPV6\21/g'| sed 's/^#define\([[:blank:]]\+\)WXWIN_COMPATIBILITY_3_0\([[:blank:]]\+\).*$/#define\1WXWIN_COMPATIBILITY_3_0\20/g'| sed 's/^#define\([[:blank:]]\+\)wxUSE_COMPILER_TLS\([[:blank:]]\+\).*$/#define\1wxUSE_COMPILER_TLS\22/g'| sed 's/^#define\([[:blank:]]\+\)wxUSE_STD_CONTAINERS\([[:blank:]]\+\).*$/#define\1wxUSE_STD_CONTAINERS\21/g'| sed 's/^#define\([[:blank:]]\+\)wxUSE_DIALUP_MANAGER\([[:blank:]]\+\).*$/#define\1wxUSE_DIALUP_MANAGER\20/g'| sed 's/^#define\([[:blank:]]\+\)WXWIN_COMPATIBILITY_3_0\([[:blank:]]\+\).*$/#define\1WXWIN_COMPATIBILITY_3_0\20/g'| sed 's/^#define\([[:blank:]]\+\)wxUSE_STD_STRING_CONV_IN_WXSTRING\([[:blank:]]\+\).*$/#define\1wxUSE_STD_STRING_CONV_IN_WXSTRING\21/g'> tempx
|
||||
mv tempx $src
|
||||
docs/mkdocs.sh
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user