1
0
forked from cheng/wallet

cleanup, and just do not like pdfs

Also, needed to understand Byzantine fault tolerant paxos better.

Still do not.
This commit is contained in:
reaction.la 2022-02-19 06:59:12 +10:00
parent e49662106b
commit 5238cda077
No known key found for this signature in database
GPG Key ID: 99914792148C8388
121 changed files with 9740 additions and 55283 deletions

2
.gitattributes vendored
View File

@ -3,7 +3,7 @@
# and leave all files detected as binary untouched.
* text=auto
# Force the following filetypes to have unix eols and encoding, so that Windows does not break them.
# If a file is going to be used on linux and windows, we want it invariant,
# If a file is going to be used on linux and windows, we want it invariant,
# rather than automatically translated, because automatic translation always screw things up.
.gitignore text eol=lf encoding=utf-8 whitespace=trailing-space,space-before-tab,tabwidth=4
.gitattributes text eol=lf encoding=utf-8 whitespace=trailing-space,space-before-tab,tabwidth=4

View File

@ -10,23 +10,24 @@
graph = log --max-count=18 --graph --pretty=format:'%C(auto)%h %s %Cgreen(%cr) %C(bold blue)%cn %G?%Creset' --abbrev-commit
alias = ! git config --get-regexp ^alias\\. | sed -e s/^alias\\.// -e s/\\ /\\ =\\ / | grep -v ^'alias ' | sort
fixws = !"\
if (! git diff-files --quiet .) && \
(! git diff-index --quiet --cached HEAD) ; then \
git commit -m FIXWS_SAVE_INDEX && \
git add -u :/ && \
git commit -m Fix_whitespace && \
git rebase --whitespace=fix HEAD~2 && \
git reset HEAD~ && \
git reset --soft HEAD~ ; \
elif (! git diff-files --quiet .) ; then \
git add -u :/ && \
git commit -m Fix_whitespace && \
git rebase --whitespace=fix HEAD~ && \
git reset HEAD~ ; \
elif (! git diff-index --quiet --cached HEAD) ; then \
git commit -m FIXWS_SAVE_INDEX && \
git rebase --whitespace=fix HEAD~ && \
git reset --soft HEAD~ ; \
fi"
if (! git diff-files --quiet .) && \
(! git diff-index --quiet --cached HEAD) ; then \
git commit -m FIXWS_SAVE_INDEX && \
git add -u :/ && \
git commit -m Fix_whitespace && \
git rebase --whitespace=fix HEAD~2 && \
git reset HEAD~ && \
git reset --soft HEAD~ ; \
elif (! git diff-files --quiet .) ; then \
git add -u :/ && \
git commit -m Fix_whitespace && \
git rebase --whitespace=fix HEAD~ && \
git reset HEAD~ ; \
elif (! git diff-index --quiet --cached HEAD) ; then \
git commit -m FIXWS_SAVE_INDEX && \
git rebase --whitespace=fix HEAD~ && \
git reset --soft HEAD~ ; \
fi"
check-whitespace = !"git diff --check $(git hash-object -t tree /dev/null) HEAD"
[commit]
gpgSign = true

2
.gitignore vendored
View File

@ -10,7 +10,7 @@
*.sln.docstates
*.exe
*.idb
*.vcxproj.filters
*.vcxproj.filters
*.html
*.htm
wallet.cppcheck

View File

@ -20,7 +20,7 @@ void ILogVerbose(const char* sz) {
void ILogDebug(const char* sz) {
wxLogDebug(_wx("%s"), _wx(sz));
} //is the right function for debug output. It only does anything at all in the
//debug mode(when the preprocessor symbol WXDEBUG is defined) and expands to
//debug mode(when the preprocessor symbol WXDEBUG is defined) and expands to
//nothing in release mode(otherwise).Note that under Windows, you must either
//run the program under debugger or use a 3rd party program such as DebugView
@ -38,4 +38,3 @@ void queue_fatal_error(const char* psz) {
queue_error_message(psz);
singletonFrame->Close();
}

View File

@ -93,4 +93,3 @@ void sqlite3_init();
extern "C" {
int sqlite3_shutdown(void);
}

View File

@ -6,39 +6,39 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>LICENSE</title>
<style>
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
span.underline{text-decoration: underline;}
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
span.underline{text-decoration: underline;}
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
</style>
<link rel="stylesheet" href="docs/pandoc_templates//style.css" />
<!--[if lt IE 9]>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<![endif]-->
<style>
body {
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
td, th {
border: 1px solid #999;
padding: 0.5rem;
text-align: left;
}
h1.title{
text-align: center; font-size: xxx-large;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<style>
body {
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
td, th {
border: 1px solid #999;
padding: 0.5rem;
text-align: left;
}
h1.title{
text-align: center; font-size: xxx-large;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
</head>
<body>
<header id="title-block-header">

View File

@ -1,5 +1,4 @@
---
generator:
title: LICENSE
---
Copyright © 2021 reaction.la gpg key 154588427F2709CD9D7146B01C99BB982002C39F

View File

@ -6,39 +6,39 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>NOTICE</title>
<style>
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
span.underline{text-decoration: underline;}
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
span.underline{text-decoration: underline;}
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
</style>
<link rel="stylesheet" href="docs/pandoc_templates//style.css" />
<!--[if lt IE 9]>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<![endif]-->
<style>
body {
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
td, th {
border: 1px solid #999;
padding: 0.5rem;
text-align: left;
}
h1.title{
text-align: center; font-size: xxx-large;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<style>
body {
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
td, th {
border: 1px solid #999;
padding: 0.5rem;
text-align: left;
}
h1.title{
text-align: center; font-size: xxx-large;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
</head>
<body>
<header id="title-block-header">

View File

@ -87,7 +87,7 @@
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
@ -101,7 +101,6 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">
</head>
<body>
<header id="title-block-header">
@ -121,20 +120,7 @@ build the program and run unit test for the first time, launch the Visual
Studio X64 native tools command prompt in the cloned directory, then:</p>
<pre class="bat"><code>winConfigure.bat</code></pre>
<p>winConfigure.bat also configures the repository you just created to use
<<<<<<< HEAD
<code>.gitconfig</code> in the repository, causing git to rquire to implement gpg signed
commits because cryptographic software is under attack from NSA,
entryists, and shills, who seek to introduce backdoors.</p>
<p>This may be inconvenient if you do not have gpg installed and set up.</p>
<p>It adds several git aliases:</p>
<ol type="1">
<li><code>git lg</code> to display the gpg trust information for the las three commits.
For this to be useful you need to import the repository public key
public_key.gpg` into gpg, and locally sign that key.</li>
<li><code>git fixws</code> to standardise white space to the project standards</li>
<li><code>git graph</code> to graph the commit tree, and git alias to display the git aliases.</li>
=======
<code>.gitconfig</code> in the repository, causing git to require to implement GPG signed
<code>.gitconfig</code> in the repository, causing git to to implement GPG signed
commits because <a href="./docs/contributor_code_of_conduct.html#code-will-be-cryptographically-signed" target="_blank" title="Contributor Code of Conduct">cryptographic software is under attack</a> from NSA
entryists, and shills, who seek to introduce backdoors.</p>
<p>This may be inconvenient if you do not have <code>gpg</code> installed and set up.</p>
@ -146,13 +132,12 @@ For this to be useful you need to import the repository public key
<li><code>git fixws</code> to standardise white space to the project standards</li>
<li><code>git graph</code> to graph the commit tree</li>
<li><code>git alias</code> to display the git aliases.</li>
>>>>>>> origin/master
</ol>
<div class="sourceCode" id="cb3"><pre class="sourceCode bash"><code class="sourceCode bash"><span id="cb3-1"><a href="#cb3-1" aria-hidden="true" tabindex="-1"></a><span class="co"># To verify that the signature on future pulls is unchanged. </span></span>
<div class="sourceCode" id="cb3"><pre class="sourceCode bash"><code class="sourceCode bash"><span id="cb3-1"><a href="#cb3-1" aria-hidden="true" tabindex="-1"></a><span class="co"># To verify that the signature on future pulls is unchanged.</span></span>
<span id="cb3-2"><a href="#cb3-2" aria-hidden="true" tabindex="-1"></a><span class="ex">gpg</span> <span class="at">--import</span> public_key.gpg</span>
<span id="cb3-3"><a href="#cb3-3" aria-hidden="true" tabindex="-1"></a><span class="ex">gpg</span> <span class="at">--lsign</span> 096EAE16FB8D62E75D243199BC4482E49673711C</span>
<span id="cb3-4"><a href="#cb3-4" aria-hidden="true" tabindex="-1"></a><span class="co"># We ignore the Gpg Web of Trust model and instead use</span></span>
<span id="cb3-5"><a href="#cb3-5" aria-hidden="true" tabindex="-1"></a><span class="co"># the Zooko identity model. </span></span>
<span id="cb3-5"><a href="#cb3-5" aria-hidden="true" tabindex="-1"></a><span class="co"># the Zooko identity model.</span></span>
<span id="cb3-6"><a href="#cb3-6" aria-hidden="true" tabindex="-1"></a><span class="co"># We use Gpg signatures to verify that remote repository</span></span>
<span id="cb3-7"><a href="#cb3-7" aria-hidden="true" tabindex="-1"></a><span class="co"># code is coming from an unchanging entity, not for</span></span>
<span id="cb3-8"><a href="#cb3-8" aria-hidden="true" tabindex="-1"></a><span class="co"># Gpg Web of Trust. Web of Trust is too complicated</span></span>
@ -165,7 +150,7 @@ For this to be useful you need to import the repository public key
<span id="cb3-15"><a href="#cb3-15" aria-hidden="true" tabindex="-1"></a><span class="co"># or the email of someone whom you do not like.</span></span></code></pre></div>
<p>To build the documentation in its intended html form from the markdown
files, execute the bash script file <code>docs/mkdocs.sh</code>, in an environment where
<code>pandoc</code> is available. On Windows, if Git Bash and Pandoc has bee
<code>pandoc</code> is available. On Windows, if Git Bash and Pandoc has been
installed, you should be able to run a shell file in bash by double clicking on it.</p>
<p><a href="./RELEASE_NOTES.html">Pre alpha release</a>, which means it does not yet work even well enough for
it to be apparent what it would do if it did work.</p>

View File

@ -48,11 +48,11 @@ This may be inconvenient if you do not have `gpg` installed and set up.
1. `git alias` to display the git aliases.
```bash
# To verify that the signature on future pulls is unchanged.
# To verify that the signature on future pulls is unchanged.
gpg --import public_key.gpg
gpg --lsign 096EAE16FB8D62E75D243199BC4482E49673711C
# We ignore the Gpg Web of Trust model and instead use
# the Zooko identity model.
# the Zooko identity model.
# We use Gpg signatures to verify that remote repository
# code is coming from an unchanging entity, not for
# Gpg Web of Trust. Web of Trust is too complicated

View File

@ -6,39 +6,39 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>Release Notes</title>
<style>
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
span.underline{text-decoration: underline;}
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
span.underline{text-decoration: underline;}
div.column{display: inline-block; vertical-align: top; width: 50%;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
ul.task-list{list-style: none;}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
</style>
<link rel="stylesheet" href="docs/pandoc_templates//style.css" />
<!--[if lt IE 9]>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<![endif]-->
<style>
body {
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
td, th {
border: 1px solid #999;
padding: 0.5rem;
text-align: left;
}
h1.title{
text-align: center; font-size: xxx-large;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<style>
body {
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
td, th {
border: 1px solid #999;
padding: 0.5rem;
text-align: left;
}
h1.title{
text-align: center; font-size: xxx-large;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
</head>
<body>
<header id="title-block-header">

View File

@ -73,30 +73,3 @@ inline auto trailing_zero_bits(uint64_t v) {
}
return c;
}

View File

@ -10,7 +10,3 @@ private:
wxBoxSizer* m_lSizer;
wxBoxSizer* m_rSizer;
};

View File

@ -1,40 +1,40 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Massive Parallelism</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Massive Parallelism</title>
</head>
<body><p>
Digital Ocean, Docker, microservices, Rest, Json and protocol buffers.</p><p>
The world is drifting towards handling massive parallelism through https microservices.</p><p>
Typically you have an nginx reverse proxy distributing https requests to a swarm of docker instances of node.js</p><p>
These communicate by rest, which means that http get and post map to wrapped database operations. On the wire the data is represented as JSON, protocol buffers, or ASN.1.</p><p>
JSON being by far the most popular, despite its inefficiency. It is a strictly text format, that is in principle human readable, though YAML is JSON variant that is far more human readable.</p><p>
Numerous docker instances keep in agreement through the Mongo database, which handles the syncrhonization of massive parallelism, possibly through a sharded cluster. Mongo communicates in binary, but everyone wants to talk to it in JSON, so it has a sort of bastard binary JSON, and can also talk real JSON.</p><p>
The great use of Mongo is coordinating numerous instances, which rapidly runs into scaling problems. Mongo is shardable, albeit sharding it is non trivial.</p><p>
Each instance of these massively parallel applications are contained in docker containers, which are themselves contained in VMs. A docker container is a sort of lightweight VM, which always represents a live, fully configured, running machine, which provides various services over the network, and any permanent effects are stored on a database that is apt to be external to the docker container, so that the virtual machine can be casually destroyed and restarted.</p><p>
To provide private keys to the docker container, have a volume that only one local user has access to, and mount it to the docker container. It then uses that key to get all the other secret keys, possibly by using crypt. But this seems kind of stupid. How about it generates its own unique private key, and then gets that key blessed in the process of accepting the authority of the blessing key.</p><p>
When launched, hits up a service to get its key blessed and register its availability, and thereafter accepts whatever commands are issued on the key chain issued when first it showed up.
</p><p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -15,149 +15,149 @@
<link rel="shortcut icon" href="../rho.ico"><title>May scale of monetary hardness</title>
</head>
<body>
<h1>May scale of monetary hardness</h1>
<p><a href="./index.html"> To Home page</a> </p>
<p>
J.C. May defined the following scale of monetary hardness.
The following is mostly his words, edited to bring them up to
date.</p>
<table border="1" cellpadding="6" cellspacing="0" width="95%">
<tbody>
<tr>
<td colspan="2" style="background-color: #99CC66;
<h1>May scale of monetary hardness</h1>
<p><a href="./index.html"> To Home page</a> </p>
<p>
J.C. May defined the following scale of monetary hardness.
The following is mostly his words, edited to bring them up to
date.</p>
<table border="1" cellpadding="6" cellspacing="0" width="95%">
<tbody>
<tr>
<td colspan="2" style="background-color: #99CC66;
text-align:center;">May Scale of monetary hardness </td>
</tr>
<tr>
<td style="text-align:center;"><b> Hardness</b> </td>
<td> <br/>
</td>
</tr>
<tr>
<td colspan="2" style=" text-align:center;">Hard</td>
</tr>
<tr>
<td class="center"><b>1</b></td>
<td>Street cash, US dollars</td>
</tr>
<tr>
<td class="center"><b>2</b></td>
<td>Street cash, euro currencies, japan</td>
</tr>
<tr>
<td class="center"><b>3</b></td>
<td>Major crypto currencies, such as Bitcoin and Monaro</td>
</tr>
<tr>
<td class="center"><b>4</b></td>
<td>Street cash, other regions</td>
</tr>
<tr>
<td class="center"><b>5</b></td>
<td>Interbank transfers of various sorts (wires etc),
bank checks</td>
</tr>
<tr>
<td class="center"><b>6</b></td>
<td>personal checks</td>
</tr>
<tr>
<td class="center"><b>7</b>
</td>
<td>Consumer-level electronic account transfers (eg
bPay)</td>
</tr>
<tr>
<td class="center"><b>8</b></td>
<td>Business-account-level retail transfer systems</td>
</tr>
<tr>
<td colspan="2" style=" text-align:center;">Soft</td>
</tr>
<tr>
<td class="center"><b>9</b></td>
<td>Paypal and similar 'new money' entities, beenz</td>
</tr>
<tr>
<td class="center"><b>10</b></td>
<td>Credit cards</td>
</tr>
</tbody>
</table>
</tr>
<tr>
<td style="text-align:center;"><b> Hardness</b> </td>
<td> <br/>
</td>
</tr>
<tr>
<td colspan="2" style=" text-align:center;">Hard</td>
</tr>
<tr>
<td class="center"><b>1</b></td>
<td>Street cash, US dollars</td>
</tr>
<tr>
<td class="center"><b>2</b></td>
<td>Street cash, euro currencies, japan</td>
</tr>
<tr>
<td class="center"><b>3</b></td>
<td>Major crypto currencies, such as Bitcoin and Monaro</td>
</tr>
<tr>
<td class="center"><b>4</b></td>
<td>Street cash, other regions</td>
</tr>
<tr>
<td class="center"><b>5</b></td>
<td>Interbank transfers of various sorts (wires etc),
bank checks</td>
</tr>
<tr>
<td class="center"><b>6</b></td>
<td>personal checks</td>
</tr>
<tr>
<td class="center"><b>7</b>
</td>
<td>Consumer-level electronic account transfers (eg
bPay)</td>
</tr>
<tr>
<td class="center"><b>8</b></td>
<td>Business-account-level retail transfer systems</td>
</tr>
<tr>
<td colspan="2" style=" text-align:center;">Soft</td>
</tr>
<tr>
<td class="center"><b>9</b></td>
<td>Paypal and similar 'new money' entities, beenz</td>
</tr>
<tr>
<td class="center"><b>10</b></td>
<td>Credit cards</td>
</tr>
</tbody>
</table>
<h2 class="green">Three essays from different periods follow</h2>
<hr><p>Observe that say stock brokerages definitely do not accept credit cards or
paypal to fund an account. They will only accept instruments that are very hard,
such as wire transfers or certified bank checks.</p><p>
When hard money is required, only money-types with a hardness of about 5
or better will do the job.</p><p>
On the other hand, if you're purchasing an online subscription, or
consumer goods from a large retailer, softer money-types are more acceptable.</p><p>
When dealing with conversions <b>between</b> different types of money,
generally you can only go "downwards" on the May scale.</p><p>
Thus, for example it is very easy to accept cash-dollars, and handout
paypal-dollars in return. But it would be almost impossible to accept credit cards or
paypal-dollars,and hand out cash in return.</p>
<hr/>
When hard money is required, only money-types with a hardness of about 5
or better will do the job.</p><p>
On the other hand, if you're purchasing an online subscription, or
consumer goods from a large retailer, softer money-types are more acceptable.</p><p>
When dealing with conversions <b>between</b> different types of money,
generally you can only go "downwards" on the May scale.</p><p>
Thus, for example it is very easy to accept cash-dollars, and handout
paypal-dollars in return. But it would be almost impossible to accept credit cards or
paypal-dollars,and hand out cash in return.</p>
<hr/>
<p><em>It is extremely significant that <b>individuals</b> tend to require harder money in their transactions.</em></p><p>
Corporations and large bodies <b>can get away with</b> using softer money, as they have more political (in the broad sense) power to affect the outcome of dubious or revoked transactions.</p><p>
For instance, selling you a car, I could only trust you if you pay me
with a hard money. Say, no softer than 5 on the may scale.
No-one takes a personal check when selling a car.</p><p>
A car dealership, though, can trust you with somewhat softer money .. say up to 7/8 on the May scale (they probably would not take credit cards, though).</p><p>
WalMart can trust you all the way through to 10 when you buy goods at WalMart. (WalMart have more political recourse if a payment repudiates.)</p><p>
<b>We are entering the age of the "sovereign individual" where individuals will have ever-more power.</b> More and more, individuals will be able to behave in ways previously reserved for large government or corporate entities. More and more, individuals will be able to fulfill functions previously dominated by large government or corporate entities.</p><p>
For instance, it would have been in inconceivable in <b>1900</b> for one individual to, say, set up and operate a stock market. That would be and could only be the work of a large, powerful, social-political-corporate group.</p><p>
However in <b>2000</b>, one individual could completely program and operate stock market with a few hours programming and a web site.</p><p>
Money systems that are higher up on the may scale are <b>more suitable for individuals</b>.</p><p>
As we move more and more into the age of the "sovereign individual", where individuals will replace many of the functions of corporate/government entities, <b>there will be more and more demand for money systems that are higher-up on the may scale</b>.</p>
<p class="green"> The above essay turned out to be optimistic, but a successor to bitcoin may accomplish what e-gold failed to accomplish.
<hr>
<p class="green">
Original (oldest) essay, where Tim May first proposed the May Scale of Monetary Hardness:<br/>
This essay was written in the time when e-gold appeared to be successful. E-gold attempted to do what Bitcoin is attempting to, and failed. Bitcoin was inspired in substantial part to fix the problems that killed e-gold. The centralized single-point-of-failure ledgers of e-gold came under attack by the state, by scammers, and by state backed scammers.</p>
<pre>
<p class="green">
Original (oldest) essay, where Tim May first proposed the May Scale of Monetary Hardness:<br/>
This essay was written in the time when e-gold appeared to be successful. E-gold attempted to do what Bitcoin is attempting to, and failed. Bitcoin was inspired in substantial part to fix the problems that killed e-gold. The centralized single-point-of-failure ledgers of e-gold came under attack by the state, by scammers, and by state backed scammers.</p>
<pre>
&gt;Your question provokes us to focus on a major factor inhibiting the growth
&gt;of e-gold that theres no common way now to put money into an account fast
&gt;(as in a matter of minutes instead of hours or more likely, days and weeks).
&gt;An ironic situation, considering that e-gold is destined for greatness as
&gt;the currency of the internet.
</pre><p>
Its worth noting that funding say a trading account with your
stock broker is just as "difficult" as buying e-gold. </p><p>
For that matter, funding a new BANK ACCOUNT is just as difficult as
buying e-gold.</p><p>
When you open a stock broking account at etrade or whatever, you
certainly cannotget funds there instantly your options are wire
and wait days, bank check or cashiers check and wait a week or a
personal check and wait a couple of weeks.</p><p>
A stock broking account, like buying e-gold, is a very HARD form of
money. Whenever you are trying to buy a very HARD form of money,
using a softer form of money.
</p>
<p>
Here is the "May Scale" of money hardness (comments invited)
</p>
<pre> --hard--
Its worth noting that funding say a trading account with your
stock broker is just as "difficult" as buying e-gold. </p><p>
For that matter, funding a new BANK ACCOUNT is just as difficult as
buying e-gold.</p><p>
When you open a stock broking account at etrade or whatever, you
certainly cannotget funds there instantly your options are wire
and wait days, bank check or cashiers check and wait a week or a
personal check and wait a couple of weeks.</p><p>
A stock broking account, like buying e-gold, is a very HARD form of
money. Whenever you are trying to buy a very HARD form of money,
using a softer form of money.
</p>
<p>
Here is the "May Scale" of money hardness (comments invited)
</p>
<pre> --hard--
1 street cash, US dollars
2 street cash, euro currencies, Aus, japan
3 egold
@ -171,16 +171,16 @@
10 credit cards
--ludicrously soft!--
</pre>
It is not meant to be definitive (eg, 6 and 7 could perhaps be
swapped; I left out cash on call at your stock broker, which is
probably around "2", etc) but gives a framework to think in.<p>
It is not meant to be definitive (eg, 6 and 7 could perhaps be
swapped; I left out cash on call at your stock broker, which is
probably around "2", etc) but gives a framework to think in.<p>
Now if you're a retailer and you're selling VCRs, sure, you can take
poxy money around the May Scale of 8, 9 or 10.</p><p>
But if you're a "retailer" and what you're selling is money itself
ie, you are selling e-gold, or you are Quick &amp; Reilly it
is EXCEEDINGLY DIFFICULT to accept anything with May Scale &gt; about 5.</p><p>
ie, you are selling e-gold, or you are Quick &amp; Reilly it
is EXCEEDINGLY DIFFICULT to accept anything with May Scale &gt; about 5.</p><p>
(Note that at coconutgold, we simply only accept wires! All the exchange providers for e-gold who accept money on the May Scale of 9 or 10 are very brave, tough, and quite understandably have to charge fairly high premiums to do so!)</p><p>
@ -188,8 +188,8 @@
Observe that at Bananagold, we TAKE IN #3 and PUT OUT #8 .. so thats a very 'secure' transaction. The #3 transactions is essentially not reversible, whereas the #8 transaction is a joke, we could reverse it anytime with a short argument on the phone.)</p><p>
What a surprise! that banks will only accept money that is at the 1 to 4 end of the May Scale, and they are only really happy giving you money on the 6 to 10 end of the May Scale!</p>
What a surprise! that banks will only accept money that is at the 1 to 4 end of the May Scale, and they are only really happy giving you money on the 6 to 10 end of the May Scale!</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -1,154 +1,154 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Bitrot, Protocol Negotiation, and the Confused Deputy Problem</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Bitrot, Protocol Negotiation, and the Confused Deputy Problem</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Bitrot and Protocol Negotiation</h1>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Bitrot and Protocol Negotiation</h1>
<h2>The problem</h2><p>
One particular case of the bitrot problem was the Microsoft Windows
problem known as “DLL Hell”, DLLs being binary dynamically linked
libraries in Microsoft Windows.&nbsp; </p>
<p> Over time these libraries tended to be upgraded, improved, and changed,
and programs written for the old libraries would develop bugs with the new
libraries, sometimes these bugs were crash and burn bugs, “bitrot”,
sometimes there were unexpected interactions between programs using the
same library, which caused one program to accidentally foul up another, or
enabled one program to maliciously manipulate another. </p>
<p> This problem was solved. The solution was “COM”.&nbsp; In COM, dynamic
linking necessarily involves version negotiation.&nbsp; Mandatory version
negotiation largely relieves bitrot.&nbsp; </p>
<p> In COM, in accordance with Zookos triangle, each version of a librarys
behavior, each library interface, has three names. Describing those names
and their behavior from the point of view of Zookos triangle, which is
not how most Microsoft programmers would describe them or think about
them: </p>
<ol>
problem known as “DLL Hell”, DLLs being binary dynamically linked
libraries in Microsoft Windows.&nbsp; </p>
<p> Over time these libraries tended to be upgraded, improved, and changed,
and programs written for the old libraries would develop bugs with the new
libraries, sometimes these bugs were crash and burn bugs, “bitrot”,
sometimes there were unexpected interactions between programs using the
same library, which caused one program to accidentally foul up another, or
enabled one program to maliciously manipulate another. </p>
<p> This problem was solved. The solution was “COM”.&nbsp; In COM, dynamic
linking necessarily involves version negotiation.&nbsp; Mandatory version
negotiation largely relieves bitrot.&nbsp; </p>
<p> In COM, in accordance with Zookos triangle, each version of a librarys
behavior, each library interface, has three names. Describing those names
and their behavior from the point of view of Zookos triangle, which is
not how most Microsoft programmers would describe them or think about
them: </p>
<ol>
<li>The GUID, the globally unique identifier, a very large random number,
a number so large that it was unlikely that any two libraries or two
versions would randomly choose the same number. Compiled software
interacts with other compiled software using this identifier.</li>
a number so large that it was unlikely that any two libraries or two
versions would randomly choose the same number. Compiled software
interacts with other compiled software using this identifier.</li>
<li>The nickname, a human readable user friendly name and version number,
which is not necessarily globally unique.&nbsp; “Nickname” is Zookos
terminology, not what Microsoft calls them. Humans writing code to be
interpreted may use the nickname, though the correct behavior would be
for the code writer to use the petname, and for the development
environment to insert the appropriate GUID, if no GUID is specified, and
adjust the petname to its local value if the GUID is specified. </li>
which is not necessarily globally unique.&nbsp; “Nickname” is Zookos
terminology, not what Microsoft calls them. Humans writing code to be
interpreted may use the nickname, though the correct behavior would be
for the code writer to use the petname, and for the development
environment to insert the appropriate GUID, if no GUID is specified, and
adjust the petname to its local value if the GUID is specified. </li>
<li>It may, and should, have a petname, its registry key, a humanly
readable user friendly local name which is guaranteed unique on the
particular computer on which the library (ActiveX object) has been
installed, but is not necessarily meaningful to the world at large,
though this is not quite implemented.&nbsp; Again, petname is Zookos
terminology, not what Microsoft calls them.&nbsp; The petname, if it
exists, is automatically generated from the nickname.&nbsp; Error
messages should use the petname, though they tend to use the nickname. </li>
</ol>
<p> In order for a program to connect to any COM library (what Microsoft
calls an ActiveX object), it has to do protocol negotiation in order to
get an interface, has to ask for the interface by its globally unique
identifier, so the library always knows what version of the library the
program expects, and will provide that behavior, or, if it cannot provide
that behavior, the program will fail immediately with an error message
explaining the problem.&nbsp; </p>
<p> <em>This solution worked. It solved DLL hell, solved bitrot.&nbsp;</em>
</p>
<p> Windows implementation of this solution was less successful in dealing
with another problem library calls often cross thread and process
boundaries. They provided a general purpose threading solution, also part
of COM, which was hideously complicated and failed dismally.&nbsp; But
they fixed bitrot.&nbsp; </p>
<p> Cross thread and cross process interactions usually wind up being
implemented as message streams and message queues.&nbsp; The correct
approach is to make this explicit, to define the interface as a message
protocol, rather than attempting to hide the underlying message queue
behavior as Microsoft did and pretend it is an ordinary synchronous object
method.&nbsp; Where COM runs on top of message queues, as it does whenever
a call crosses thread or process boundaries, the result is intolerable
obscurity, complexity, and inefficiency which is still a lot better than
the bitrot that it fixed.&nbsp; </p>
particular computer on which the library (ActiveX object) has been
installed, but is not necessarily meaningful to the world at large,
though this is not quite implemented.&nbsp; Again, petname is Zookos
terminology, not what Microsoft calls them.&nbsp; The petname, if it
exists, is automatically generated from the nickname.&nbsp; Error
messages should use the petname, though they tend to use the nickname. </li>
</ol>
<p> In order for a program to connect to any COM library (what Microsoft
calls an ActiveX object), it has to do protocol negotiation in order to
get an interface, has to ask for the interface by its globally unique
identifier, so the library always knows what version of the library the
program expects, and will provide that behavior, or, if it cannot provide
that behavior, the program will fail immediately with an error message
explaining the problem.&nbsp; </p>
<p> <em>This solution worked. It solved DLL hell, solved bitrot.&nbsp;</em>
</p>
<p> Windows implementation of this solution was less successful in dealing
with another problem library calls often cross thread and process
boundaries. They provided a general purpose threading solution, also part
of COM, which was hideously complicated and failed dismally.&nbsp; But
they fixed bitrot.&nbsp; </p>
<p> Cross thread and cross process interactions usually wind up being
implemented as message streams and message queues.&nbsp; The correct
approach is to make this explicit, to define the interface as a message
protocol, rather than attempting to hide the underlying message queue
behavior as Microsoft did and pretend it is an ordinary synchronous object
method.&nbsp; Where COM runs on top of message queues, as it does whenever
a call crosses thread or process boundaries, the result is intolerable
obscurity, complexity, and inefficiency which is still a lot better than
the bitrot that it fixed.&nbsp; </p>
<h2>The blockchain solution</h2><p>
A pool is a shared collection of transactions with a single schema and protocol, but no global transaction order.</p><p>
A blockchain is a shared collection of transactions with a single schema and protocol with a global order and a sequence number for every transaction. Every blockchain has a pool, and transactions are added from the pool to the blockchain by a process for constructing consensus on order.</p><p>
There will be many blockchains, though we would prefer only one. One is likely to emerge supreme, but there will always be forks and competing blockchains, and forks and competition have to be lived with until they are resolved, which is apt to take a long time.</p><p>
Because establishing a global order is costly, there will be many pools without blockchains. If you dont need a global order on transactions, dont pay the costs of constructing one. Usenet was an immensely valuable pool without global order, and it was a great pity that it died. I want to replace it.</p><p>
There will be necessarily be many schemas and many protocols. A blockchain should assign a globally unique arbitrary precision number to each pool, schema, and protocol, but there will be more than one blockchain, and pools outside any one blockchain.</p><p>
Although the number is in principle arbitrary precision, each peer, host, and client will have an arbitrary limit to the precision of the identifiers that they will handle. They have to be able to handle at least sixty three bits, often sixty four bits, and have to be able to fail gracefully with identifiers that exceed their precision. Anything with an identifier that exceeds their precision limit will not exist for them. In practice, most identifiers are likely to less than eight bits.</p><p>
A peer continually checks in full that its blockchain follows or follows from the blockchain of its peers, an operation that is costly. It terminates communication with any peer that is forking. A client treats its host peers version of the blockchain is the authoritative one true version of the blockchain.</p><p>
A client necessarily has communications with many peer hosts. If one of its peer hosts has a block number and root hash for that block of the blockchain that is different from that of another peer host, it has to terminate communications with one peer host or the other, and terminate interactions concerning that blockchain with other clients that rely on a peer host with discrepant block for what is purportedly the same blockchain.</p><p>
Every blockchain, every pool, every schema, and every protocol has a short human readable name, but this name is not necessarily globally unique. Indeed, for schemas and protocols, certainly not globally unique, because schemas and protocols are always being updated, and we dont want to change the name every time, and pools are continually being updated, with no two peers on a pool necessarily having exactly the same pool.</p><p>
A blockchain on one peer is the same as the blockchain on another peer if its root hash is the same, or follows, or follows from, the root hash on the other peer, but for pools, we have no definition of a pool being the same that a computer can check. But a pool has a schema and a protocol, and that the computer can check.</p><p>
Schemas and protocols have version numbers, which are deweydecimal sequences of arbitrary precision integers, but even these are not guaranteed to be globally unique, though any one blockchain may choose to ensure that the list of schemas and protocols for which it provides unique arbitrary precision identifers have globally unique names and deweydecimal numbers.</p><p>
The globally unique identifier of a schema or protocol is a thirty two byte probabilistically unique number, which may be the twenty byte globally unique identifier of a git commit, preceded by as much of the start of the name and the trailing end of the dewey decimal sequence as fits, albeit likely not much fits in twelve bytes.</p><p>
When establishing communication, the setup relies on a hash value that employs the globally unique identifiers of everything relevant to the communication, so that communication will immediately and gracefully fail if the parties establishing communication are in disagreement about what protocols and and schemas they are employing. But first they have to figure out what protocols and schemas the other party is using, relying on identifiers that could potentially have conflicting meanings.</p><p>
When establishing communication in a blockchain context, they rely on blockchain unique arbitrary precision integer identifying the schema or protocol. But there may be no such agreed context, or they may be communicating about a pool that is not on the blockchain, or not on any blockchain, for example a transient pool set up to establish a shared multiparty transaction to mingle crypto currency, which requires a blockchain context, but will not be identified by the blockchain, though it will rely on a schema and protocol that is identified by the blockchain.</p><p>
A transient pool set up to organize a multiparty transaction has a nickname, petname, and deweydecimal number, which deweydecimal is likely to be the number of pools of that name the entity starting the pool has attempted, hence highly likely to not be globally unique, or even blockchain unique. Maybe not even unique to that entity.</p><p>
To identify a pool, a schema, or a protocol in a context where there is no authortative guarantee of unique integer identifier, the parties send a fragment of the probabilistically unique thirty two byte identifier, consisting of a four bit multiple of sixteen bits of offset into that identifier, and sixty bits of the intentifier. Thus each full 256 bit identifier has sixteen possible 64 bit identifers. The parties systematically or randomly change which of the sixteen they use, in order to escape collisions.</p><p>
The parties look up a hash table of recently used 64 bit identifiers, and if that fails, an associative array of sixty four bit identifers. If there is a collision between sixty four bit identifiers, neither of the colliding sixty four bit entries are entered into the associative array, but when a twofiftysix bit identifier is used that was not found in the hash table, all sixteen sixty four bit identifiers go into the hash table, so that a seldom used two hundred and fifty six bit identifier that has a colliding sixty four bit identifier has its colliding identifier masked by a frequently used two hundred and fifty six bit identifier.</p><p>
In the unlikely event that a collision exists in the space of all sixtyfour bit identifiers known to a particular entity in the associative array, it cannot collide in the space of sixtyfour bit recently used identifiers in the hash table.</p><p>
Thus a pool, a schema, a blockchain, or a protocol is identified by its far from unique nickname and petname, its name plus its usually but not necessarily unique deweydecimal number, its globally unique 256 bit identifier, and sixteen sixty four bit identifiers, which are overwhelmingly likely to be unique, but can fail (very very rarely). If the sixtyfour bit identifier fails then the communication fails that one time, but will succeed, with at least fifteen sixteenths probability, another time.</p><p>
In the highly unlikely event that an identifier has a sixty four bit collision with a commonly used identifier, this is a a problem for the less commonly used identifier, since one in sixteen connections will fail. If a hash table has eight thousand entries, corresponding to five hundred commonly used entities, the likelihood of any one randomly generated long identifier having a collision of one of its sixteen short identiers with a commonly used identifier is one in 100 000 000 000 000, which is too low to worry about. Potential collisions between two rarely used identifiers do not matter, because if someone is using a rarely used identifier, it likely to be commonly used identifier for him and the people he is communicating with.</p><p>
The worst case is a collision between two commonly used identifiers, but the more restrictively we define "common" the less likely such a collision. If a group of people are commonly using fifty entities, then the chance that two of those entities have a collision in one of their sixty four bit identifers, resulting in one of their entities leading to communication failure one sixteenth of the time, is one in 30 000 000 000 0000.</p><p>
When setting up a pool for a multi party transaction, the parties initially communicate with each other in a namespace controlled by a single blockchain, hence have blockchain unique identifiers for protocols and schemas, hence the only collision possible is between pools, and there will not be all that many multiparty transactions going at any one time among the people a party is interacting with, though the number of possible multiparty transactions being organized at any one time is potentially very large.</p><p>
If the user is looking for subscrbers to a particular pool among subscribers to a hundred thousand pools, each subscriber subscribing to a hundred pools, it will be enormously rare for him to misidentify a subscriber to a different pool as a subscriber to his own pool and then attempt to communicate with that subscriber, and such misidentification will merely slow things down imperceptibly, not halt things. The communication with that subscriber will simply fail. Failure for other reasons is enormously more probable.</p><p>
Sixtyfour bit identifiers suffice for most practical purposes, provided that the communication protocol is such that misidentification of the corresponding twohundredfiftysix bit identifier results in immediate graceful communication failure.</p><p>
There are some cases where sixty four bits do not suffice, for example identifying a transaction on the blockchain. In such cases, we use the blockchain transaction sequence number, which is globally unique to the blockchain, or the full twofiftysix bit identifier, which is proabilistically unique. An entity in the context of a blockchain is identified by its transaction number, and its output/input number within the transaction, though the blockchain itself is identified merely by a short human readable string, which in the presence of forks and competition, is likely to cause potential misidentification. The sequence number of inputs to a transaction follows, rather than precedes, the sequence number of inputs, which resolves potential ambiguity at an average waste of one bit in the rare case that one references an input, rather than an output.</p>
<h3>Fully general solution</h3><p>
If there was a consensus on what identifiers were common, we could get away with variable length identifiers. But this leads to the TLA problem. Hard to establish a consensus, hard to know what it is. If you have a system that drifts towards consensus, such as the english language, what drives the drift is that collision problems happen. But you dont want collision problems to happen, because then you need rather complex code for handling that case, and a complex data structure to describe estimates of what the consensus likely is.</p><p>
So, we have an identifer that establishes an explicit context, a map of probabilistically unique twofiftysix bit identifiers to indefinite precision integers and to short human readable strings. The blockchain provides such a mapping, and the pool references the blockchain. But a pool cannot contain such a mapping, unless it has its own blockchain.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -1,22 +1,22 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Block Google Analytics</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Block Google Analytics</title>
</head>
<body><a href="./index.html"> To Home page</a>
<h1>Block Google Analytics</h1><p>
Most internet sites use Google Analytics, which downloads an enormous pile of javascript on your browser, which systematically probes your system for one thousand and one privacy holes and weaknesses and reports back to Google Analytics, which then shares some of their spy data with the site that surreptitiously downloaded their enormous pile of hostile spy attack code onto your computer.</p><p>
Modify your hosts file to include the following lines:</p><pre>
0.0.0.0 google-analytics.com
0.0.0.0 www.google-analytics.com

View File

@ -303,7 +303,7 @@ For a bunch of algorithms that plausibly claim to approach the upload
limit, see:
* [Scalable and probabilistic leaderless bft consensus through metastability](https://files.avalabs.org/papers/consensus.pdf)
This explains the underlying concept, that a peer looks at the dag,
make its best guess as to which way consensus is going, and joins
the seeming consensus, which make it more likely to become the
@ -361,7 +361,7 @@ limit, see:
Another blockdag algorithm, but one whose performance has been tested. Can handle high bandwidth, lots of transactions, and achieves fast Byzantine fault resistant total order consensus in time $O(6λ)$, where λ is the upper bound of the networks gossip period.
* [Blockchaifree cryptocurrencies: A framework for truly decentralised fast transactions](https://eprint.iacr.org/2016/871.pdf)
These transactions are indeed truly decentralized, fast, and free from
blocks, assuming all participants download the entire set of
transactions all the time.
@ -404,7 +404,7 @@ understood as academic illustrations of the idea of the general algorithm
for fast and massive blockdag consensus, and not necessarily intended as
ready to roll implementations of that idea.
Here is an even more vague outline of my variant of this idea, I name
Here is an even more vague outline of my variant of this idea, I name
Yabca “Yet another blockdag consensus algorithm”,
I propose proof of stake. The stake of a peer is not the stake it owns, but

644
docs/byzantine_paxos.md Normal file
View File

@ -0,0 +1,644 @@
---
# katex
# notmine
title: >-
Practical Byzantine Fault Tolerance
---
::: centre
Appears in the Proceedings of the Third Symposium on Operating Systems Design and Implementation, New Orleans, USA, February 1999
Miguel Castro and Barbara Liskov
Laboratory for Computer Science, Massachusetts Institute of Technology, 545
Technology Square, Cambridge, MA 02139
:::
# Abstract
This paper describes a new replication algorithm that is able to tolerate Byzantine faults. We believe that Byzantine-fault-tolerant algorithms will be increasingly important in the future because malicious attacks and software errors are increasingly common and can cause faulty nodes to exhibit arbitrary behavior. Whereas previous algorithms assumed a synchronous system or were too slow to be used in practice, the algorithm described in this paper is practical: it works in asynchronous environments like the Internet and incorporates several important optimizations that improve the response time of previous algorithms by more than an order of magnitude. We implemented a Byzantine-fault-tolerant NFS service using our algorithm and measured its performance. The results show that our service is only 3% slower than a standard unreplicated NFS.
# Introduction
Malicious attacks and software errors are increasingly common. The growing reliance of industry and government on online information services makes malicious attacks more attractive and makes the consequences of successful attacks more serious. In addition, the number of software errors is increasing due to the growth in size and complexity of software. Since malicious attacks and software errors can cause faulty nodes to exhibit Byzantine (i.e., arbitrary) behavior, Byzantine-fault-tolerant algorithms are increasingly important.
This paper presents a new, *practical* algorithm for state machine replication [17, 34] that tolerates Byzantine faults. The algorithm offers both liveness and safety
provided at most $\lfloor\frac{(n-1)}{3}\rfloor$ out of a total of $n$
replicas are simultaneously faulty. This means that clients eventually receive replies to their requests and those replies are correct according to linearizability [14, 4]. The algorithm works in asynchronous systems like the Internet and it incorporates important optimizations that enable it to perform efficiently. \
There is a significant body of work on agreement and replication techniques that tolerate Byzantine faults (starting with [19]). However, most earlier work (e.g., [3, 24, 10]) either concerns techniques designed to demonstrate theoretical feasibility that are too inefficient to be used in practice, or assumes synchrony, i.e., relies on known bounds on message delays and process speeds. The systems closest to ours, Rampart [30] and SecureRing [16], were designed to be practical, but they rely on the synchrony assumption for correctness, which is dangerous in the presence of malicious attacks. An attacker may compromise the safety of a service by delaying non-faulty nodes or the communication between them until they are tagged as faulty and excluded from the replica group. Such a denial-of-service attack is generally easier than gaining control over a non-faulty node.
Our algorithm is not vulnerable to this type of attack because it does not rely on synchrony for safety. In addition, it improves the performance of Rampart and SecureRing by more than an order of magnitude as explained in Section 7. It uses only one message round trip to execute read-only operations and two to execute read-write operations. Also, it uses an efficient authentication scheme based on message authentication codes during normal operation; public-key cryptography, which was cited as the major latency [29] and throughput [22] bottleneck in Rampart, is used only when there are faults.
To evaluate our approach, we implemented a replication library and used it to implement a real service: a Byzantine-fault-tolerant distributed file system that supports the NFS protocol. We used the Andrew benchmark [15] to evaluate the performance of our system. The results show that our system is only 3% slower than the standard NFS daemon in the Digital Unix kernel during normal-case operation.
Thus, the paper makes the following contributions:
* It describes the first state-machine replication protocol that correctly survives Byzantine faults in asynchronous networks.
* It describes a number of important optimizations that allow the algorithm to perform well so that it can be used in real systems.
* It describes the implementation of a Byzantine-fault<6C>tolerant distributed file system.
* It provides experimental results that quantify the cost of the replication technique.
The remainder of the paper is organized as follows. We begin by describing our system model, including our failure assumptions. Section 3 describes the problem solved by the algorithm and states correctness conditions. The algorithm is described in Section 4 and some important optimizations are described in Section 5. Section 6 describes our replication library and how we used it to implement a Byzantine-fault-tolerant NFS. Section 7 presents the results of our experiments. Section 8 discusses related work. We conclude with a summary of what we have accomplished and a discussion of future research directions.
# System Model
We assume an asynchronous distributed system where nodes are connected by a network. The network may fail to deliver messages, delay them, duplicate them, or deliver them out of order.
We use a Byzantine failure model, i.e., faulty nodes may behave arbitrarily, subject only to the restriction mentioned below. We assume independent node failures. For this assumption to be true in the presence of malicious attacks, some steps need to be taken, e.g., each node should run different implementations of the service code and operating system and should have a different root password and a different administrator. It is possible to obtain different implementations from the same code base [28] and for low degrees of replication one can buy operating systems from different vendors. N-version programming, i.e., different teams of programmers produce different implementations, is another option for some services.
We use cryptographic techniques to prevent spoofing and replays and to detect corrupted messages. Our messages contain public-key signatures [33], message authentication codes [36], and message digests produced by collision-resistant hash functions [32]. We denote a message $m$ signed by node $i$ as $\{m\}σ_i$
and the digest of message $m$ by $D(m)$ . We follow the common practice of signing a digest of a message and appending it to the plaintext of the message rather than signing the full message $(\{m\}σ_i)$ should be interpreted in this way$)$. All replicas know the others' public keys to verify signatures.
We allow for a very strong adversary that can coordinate faulty nodes, delay communication, or delay correct nodes in order to cause the most damage to the replicated service. We do assume that the adversary cannot delay correct nodes indefinitely. We also assume that the adversary (and the faulty nodes it controls) are computationally bound so that (with very high probability) it is unable to subvert the cryptographic techniques mentioned above. For example, the adversary cannot produce a valid signature of a non-faulty node, compute the information summarized by a digest from the digest, or fnd two messages with the same digest. The cryptographic techniques we use are thought to have these properties [33, 36, 32].
# Service Properties
Our algorithm can be used to implement any deterministic replicated *service* with a *state* and some *operations*. The operations are not restricted to simple reads or writes of portions of the service state; they can perform arbitrary deterministic computations using the state and operation arguments. Clients issue requests to the replicated service to invoke operations and block waiting for a reply. The replicated service is implemented by $n$ replicas. Clients and replicas are non-faulty if they follow the algorithm in Section 4 and if no attacker can forge their signature.
The algorithm provides both *safety* and *liveness* assuming no more than
$\lfloor\frac{(n-1)}{3}\rfloor$ replicas are faulty. Safety means that the replicated service satisfes linearizability [14] (modified to account for Byzantine-faulty clients [4]): it behaves like a centralized implementation that executes operations atomically one at a time. Safety requires the bound on the number of faulty replicas because a faulty replica can behave arbitrarily, e.g., it can destroy its state.
Safety is provided regardless of how many faulty clients are using the service (even if they collude with faulty replicas): all operations performed by faulty clients are observed in a consistent way by non-faulty clients. In particular, if the service operations are designed to preserve some invariants on the service state, faulty clients cannot break those invariants.
The safety property is insufficient to guard against faulty clients, e.g., in a file system a faulty client can write garbage data to some shared file. However, we limit the amount of damage a faulty client can do by providing access control: we authenticate clients and deny access if the client issuing a request does not have the right to invoke the operation. Also, services may provide operations to change the access permissions for a client. Since the algorithm ensures that the effects of access revocation operations are observed consistently by all clients, this provides a powerful mechanism to recover from attacks by faulty clients.
The algorithm does not rely on synchrony to provide safety. Therefore, it must rely on synchrony to provide liveness; otherwise it could be used to implement consensus in an asynchronous system, which is not possible [9]. We guarantee liveness, i.e., clients eventually receive replies to their requests, provided at most $\lfloor\frac{(n-1)}{3}\rfloor$ replicas are faulty and $delay(t)$ does not grow faster than $t$ indefinitely. Here, delay is the time between the moment when a message is sent for the first time and the moment when it is received by its destination (assuming the sender keeps retransmitting the message until it is received). (A more precise definition can be found in [4].) This is a rather weak synchrony assumption that is likely to be true in any real system provided network faults are eventually repaired, yet it enables us to circumvent the impossibility result in [9].
The resiliency of our algorithm is optimal: $3f+1$ is the minimum number of replicas that allow an asynchronous system to provide the safety and liveness properties when up to $f$ replicas are faulty (see [2] for a proof). This many replicas are needed because it must be possible to proceed after communicating with $n-f$ replicas, since $f$ replicas might be faulty and not responding. However, it is possible that the replicas that did not respond are not faulty and, therefore, $f$ of those that responded might be faulty. Even so, there must still be enough responses that those from non-faulty replicas outnumber those from faulty ones, i.e., $n-2f>f$. Therefore $n>3f$.
The algorithm does not address the problem of fault-tolerant privacy: a faulty replica may leak information to an attacker. It is not feasible to offer fault-tolerant privacy in the general case because service operations may perform arbitrary computations using their arguments and the service state; replicas need this information in the clear to execute such operations efficiently. It is possible to use secret sharing schemes [35] to obtain privacy even in the presence of a threshold of malicious replicas [13] for the arguments and portions of the state that are opaque to the service operations. We plan to investigate these techniques in the future.
# The Algorithm
Our algorithm is a form of ${state}\, {machine}$ replication [17, 34]: the service is modelled as a state machine that is replicated across different nodes in a distributed system. Each state machine replica maintains the service state and implements the service operations. We denote the set of replicas by $R$ and identify each replica using an integer in $\{0, ..., |R|-1\}$. For simplicity, we assume
$|R|=3f+1$
where $f$ is the maximum number of replicas that may be faulty; although there could be more than $3f+1$
replicas, the additional replicas degrade performance (since more and bigger messages are being exchanged) without providing improved resiliency.
The replicas move through a succession of configurations called views. In a view one replica is the primary and the others are backups. Views are numbered consecutively. The primary of a view is replica $p$ such that $p=v\mod|R|$, where $v$
is the view number. View changes are carried out when it appears that the primary has failed. View stamped Replication [26] and Paxos [18] used a similar approach to tolerate benign faults (as discussed in Section 8.)
The algorithm works roughly as follows:
1. A client sends a request to invoke a service operation to the primary
1. The primary multicasts the request to the backups
1. Replicas execute the request and send a reply to the client
1. The client waits for $f+1$ replies from different replicas with the same result; this is the result of the operation.
Like all state machine replication techniques [34], we impose two requirements on replicas: they must be *deterministic* (i.e., the execution of an operation in a given state and with a given set of arguments must always produce the same result) and they must start in the same state. Given these two requirements, the algorithm ensures the safety property by guaranteeing that *all non-faulty replicas agree on a total order for the execution of requests despite failures*.
The remainder of this section describes a simplified version of the algorithm. We omit discussion of how nodes recover from faults due to lack of space. We also omit details related to message retransmissions. Furthermore, we assume that message authentication is achieved using digital signatures rather than the more efficient scheme based on message authentication codes; Section 5 discusses this issue further. A detailed formalization of the algorithm using the I/O automaton model [21] is presented in [4].
## The Client
A client $c$ requests the execution of state machine operation $o$ by sending\
$\{$REQUEST$,o,t,c\}σ_i$
message to the primary. Timestamp $t$
is used to ensure *exactly-once* semantics for the execution of client requests. Timestamps for
$c$'s requests are totally ordered such that later requests have higher timestamps than earlier ones; for example, the timestamp could be the value of the client's local clock when the request is issued.
Each message sent by the replicas to the client includes the current view number, allowing the client to track the view and hence the current primary. A client sends a request to what it believes is the current primary using a point-to-point message. The primary atomically multicasts the request to all the backups using the protocol described in the next section.
A replica sends the reply to the request directly to the client. The reply has the form\
$\{$REPLY$, v, t, c, i, r\}σ_i$ where $v$
is the current view number,
$t$ is the timestamp of the corresponding request,
$i$ is the replica number, and
$r$ is the result of executing the requested operation.
The client waits for
$f+1$ replies with valid signatures from different replicas, and with the same $t$ and $r$ before accepting the result $r$. This ensures that the result is valid, since at most $f$ replicas can be faulty.
If the client does not receive replies soon enough, it broadcasts the request to all replicas. If the request has already been processed, the replicas simply re-send the reply; replicas remember the last reply message they sent to each client. Otherwise, if the replica is not the primary, it relays the request to the primary. If the primary does not multicast the request to the group, it will eventually be suspected to be faulty by enough replicas to cause a view change.
In this paper we assume that the client waits for one request to complete before sending the next one. But we can allow a client to make asynchronous requests, yet preserve ordering constraints on them.
## Normal-Case Operation
The state of each replica includes the state of the service, a $message\,log$ containing messages the replica has accepted, and an integer denoting the replica's current view. We describe how to truncate the log in Section 5.3.
When the primary, $p$
, receives a client request, $m$
, it starts a three-phase protocol to atomically multicast the request to the replicas. The primary starts the protocol immediately unless the number of messages for which the protocol is in progress exceeds a given maximum. In this case, it buffers the request. Buffered requests are multicast later as a group to cut down on message traffic and CPU overheads under heavy load;this optimization is similar to a group commit in transactional systems [11]. For simplicity, we ignore this optimization in the description below.
The three phases are *pre-prepare, prepare*, and *commit*. The pre-prepare and prepare phases are used to totally order requests sent in the same view even when the primary, which proposes the ordering of requests, is faulty. The prepare and commit phases are used to ensure that requests that commit are totally ordered across views.
In the pre-prepare phase, the primary assigns a sequence number, $n$
, to the request, multicasts a pre-prepare message with $m$
piggybacked to all the backups, and appends the message to its log. The message has the form $\{\{$PRE-PREPARE$, v, n, d\}σ_i,m\}$, where $v$ indicates the view in which the message is being sent,
$m$ is the client's request message, and
is $d$ is $m$'s digest.
Requests are not included in pre-prepare messages to keep them small. This is important because pre-prepare messages are used as a proof that the request was assigned sequence number $n$
in view $v$
in view changes. Additionally, it decouples the protocol to totally order requests from the protocol to transmit the request to the replicas; allowing us to use a transport optimized for small messages for protocol messages and a transport optimized for large messages for large requests.
A backup accepts a pre-prepare message provided:
* the signatures in the request and the pre-prepare message are correct and
$d$ is the digest for $m$:
* it is in view $v$:
* it has not accepted a pre-prepare message for view $v$ and sequence number $n$
containing a different digest;
* the sequence number in the pre-prepare message is between a low water mark, $h$
, and a high water mark, $H$.
The last condition prevents a faulty primary from exhausting the space of sequence numbers by selecting a very large one. We discuss how $H$ and $h$
advance in Section 5.3.
If backup $i$
accepts the $\{$PRE-PREPARE$, v, n, d\}σ_i,m\}$
message, it enters the prepare phase by multicasting a\
$\{$PREPARE$,v,n,d,i\}σ_i$
message to all other replicas and adds both messages to its log. Otherwise, it does nothing.
A replica (including the primary) accepts prepare messages and adds them to its log provided their signatures are correct, their view number equals the replica's current view, and their sequence number is between $h$
and $H$.
We define the predicate *prepared*$(m,v,n,i)$
to be true if and only if replica $i$
has inserted in its log: the request $m$, a pre-prepare for $m$
in view $v$
with sequence number $n$
, and $2f$
prepares from different backups that match the pre-prepare. The replicas verify whether the prepares match the pre-prepare by checking that they have the same view, sequence number, and digest.
The pre-prepare and prepare phases of the algorithm guarantee that non-faulty replicas agree on a total order for the requests within a view. More precisely, they ensure the following invariant: if *prepared*$(m,v,n,i)$
is true then *prepared*$(m',v,n,j)$
is false for any non-faulty replica $j$
(including $i=j$) and any $m'$
such that $D(m')\not =D(m)$. This is true because prepared *prepared*$(m,v,n,i)$
and $|R|=3f+1$ imply that at least $f+1$ non-faulty replicas have sent a pre-prepare or prepare for $m$
in view $v$
with sequence number $n$. Thus, for *prepared*$(m',v,n,j)$ to be true at least one of these replicas needs to have sent two conflicting prepares (or pre-prepares if it is the primary for $v$), i.e., two prepares with the same view and sequence number and a different digest. But this is not possible because the replica is not faulty. Finally, our assumption about the strength of message digests ensures that the probability that $m\not=m'$ and $D(m)=D(m')$
is negligible.
Replica $i$ multicasts a $\{$COMMIT$,v,n,D(m),i\}σ_i$ to the other replicas when *prepared*$(m',v,n,i)$ becomes true. This starts the commit phase. Replicas accept commit messages and insert them in their log provided they are properly signed, the view number in the message is equal to the replica's current view, and the sequence number is between $h$ and $H$.
We define the *committed* and *committed-local* predicates as follows: *committed*$(m,v,n,i)$
is true if and only if *prepared*$(m,v,n,i)$
is true for all $i$
in some set of $f+1$ non-faulty replicas; and *committed-local*$(m,v,n,i)$ is true if and only if *prepared*$(m,v,n,i)$
is true and $i$
has accepted $2f+1$ commits (possibly including its own) from different replicas that match the pre-prepare for
$m$; a commit matches a pre-prepare if they have the same view, sequence number, and digest.
The commit phase ensures the following invariant: if *committed-local*$(m,v,n,i)$
is true for some non-faulty $i$ then *committed*$(m,v,n,i)$
is true. This invariant and the view-change protocol described in Section 5.4 ensure that non-faulty replicas agree on the sequence numbers of requests that commit locally even if they commit in different views at each replica. Furthermore, it ensures that any request that commits locally at a non-faulty replica will commit at
$f+1$ or more non-faulty replicas eventually.
Each replica $i$
executes the operation requested by $m$ after *committed-local*$(m,v,n,i)$
is true and
$i$'s state reflects the sequential execution of all requests with lower sequence numbers. This ensures that all non-faulty replicas execute requests in the same order as required to provide the safety property. After executing the requested operation, replicas send a reply to the client. Replicas discard requests whose timestamp is lower than the timestamp in the last reply they sent to the client to guarantee exactly-once semantics.
We do not rely on ordered message delivery, and therefore it is possible for a replica to commit requests out of order. This does not matter since it keeps the pre-prepare, prepare, and commit messages logged until the corresponding request can be executed.
Figure 1 shows the operation of the algorithm in the normal case of no primary faults. Replica 0 is the primary,
![Normal Case Operation](./images/practical_byzantine_consensus_fig_1.webp){width=100%}
## Garbage Collection
This section discusses the mechanism used to discard messages from the log. For the safety condition to hold, messages must be kept in a replica's log until it knows that the requests they concern have been executed by at least $f+1$ non-faulty replicas and it can prove this to others in view changes. In addition, if some replica misses messages that were discarded by all non-faulty replicas, it will need to be brought up to date by transferring all or a portion of the service state. Therefore, replicas also need some proof that the state is correct.
Generating
these proofs after executing every operation would be expensive. Instead, they are generated periodically, when a request with a sequence number divisible by some constant (e.g., 100) is executed. We will refer to the states produced by the execution of these requests as *checkpoints* and we will say that a checkpoint with a proof is a *stable checkpoint*.
A replica maintains several logical copies of the service state: the last stable checkpoint, zero or more checkpoints that are not stable, and a current state. Copy-on-write techniques can be used to reduce the space overhead to store the extra copies of the state, as discussed in Section 7.3.
The proof of correctness for a checkpoint is generated as follows. When a replica
produces a checkpoint, it multicasts a message
$\{$CHECKPOINT$,n,d,i\}σ_i$
to the other replicas, where $n$ is the sequence number of the last request whose execution is reflected in the state and $d$
is the digest of the state. Each replica collects checkpoint messages in its log until it has $2f+1$ of them for sequence number $n$
with the same digest signed by different replicas (including possibly its own such message). These $2f+1$ messages are the proof of correctness for the checkpoint.
A checkpoint with a proof becomes stable and the replica discards all pre-prepare, prepare, and commit messages with sequence number less than or equal to
$n$ from its log; it also discards all earlier checkpoints and checkpoint messages.
requests.
Computing the proofs is efficient because the digest
can be computed using incremental cryptography [1] as
discussed in Section 7.3, and proofs are generated rarely.
The checkpoint protocol is used to advance the low
and high water marks (which limit what messages will
be accepted). The low-water mark $h$ is equal to the
sequence number of the last stable checkpoint. The high
water mark , $H=h+k$, where $k$ is big enough so that
replicas do not stall waiting for a checkpoint to become
stable. For example, if checkpoints are taken every 100
requests, $k$ might be 200.
## View Changes
The view-change protocol provides liveness by allowing the system to make progress when the primary fails. View changes are triggered by timeouts that prevent backups from waiting indefinitely for requests to execute. A backup is *waiting* for a request if it received a valid request and has not executed it. A backup starts a timer when it receives a request and the timer is not already running. It stops the timer when it is no longer waiting to execute the request, but restarts it if at that point it is waiting to execute some other request.
If the timer of backup $i$ expires in view $v$, the backup starts a view change to move the system to view $v+1$. It stops accepting messages (other than checkpoint, view-change, and new-view messages) and multicasts a $\{$VIEW-CHANGE$,v+1,n,C,P,i\}σ_i$ message to all replicas. Here $n$ is the sequence number of the last stable checkpoint $s$ known to , $C$ is a set of $2f+1$ valid checkpoint messages proving the correctness of , and $P$
is a set containing a set $P_m$ for each request that $m$ prepared at $i$ with a sequence number higher than $n$. Each set $P_m$ contains a valid pre-prepare message (without the corresponding client message) and $2f$ matching, valid prepare messages signed by different backups with the same view, sequence number, and the digest of $m$.
When the primary $p$ of view $v+1$ receives $2f$ valid
view-change messages for view $v+1$ from other replicas,
it multicasts a $\{$NEW-VIEW$,v+1,n,V,O\}σ_i$ message to all
other replicas, where $V$ is a set containing the valid view-change messages received by the primary plus the view-change message for $v+1$ the primary sent (or would have sent), and $O$ is a set of pre-prepare messages (without the piggybacked request). $O$is computed as follows:
1. The primary determines the sequence number *min-s* of the latest stable checkpoint in $V$ and the highest sequence number *max-s* in a prepare message in .
1. The primary creates a new pre-prepare message for view $v+1$ for each sequence number $n$ between *min-s* and *max-s*. There are two cases: (1) there is at least one set in the $P$ component of some view-change message in $V$with sequence number , or (2) there is no such set. In the first case, the primary creates a new message $\{$ PRE-PREPARE,v+1,n,d\}σ_p$, where $d$
is the request digest in the pre-prepare message for sequence number $n$ with the highest view number in $V$. In the second case, it creates a new pre-prepare message $\{$ PRE-PREPARE$,v+1,n,d^{null}\}σ_p$, where $d^{null}$ is the digest of a special *null* request; a null request goes through the protocol like other requests, but its execution is a no-op. (Paxos [18] used a similar technique to fill in gaps.)
Next the primary appends the messages in $O$ to its log. If *min-s* is greater than the sequence number of its latest stable checkpoint, the primary also inserts the proof of stability for the checkpoint with sequence number *min-s* in its log, and discards information from the log as discussed in Section 5.3. Then it enters view $v+1$: at this point it is able to accept messages for view $v+1$.
A backup accepts a new-view message for view $v+1$ if it is signed properly, if the view-change messages it contains are valid for view $v+1$, and if the set
is correct; it verifies the correctness of $O$
by performing a computation similar to the one used by the primary to create $O$.
Then it adds the new information to its log as described for the primary, multicasts a prepare for each message in $O$
to all the other replicas, adds these prepares to its log, and enters view $v+1$.
Thereafter, the protocol proceeds as described in Section 5.2. Replicas
redo the protocol for messages between *min-s* and *max-s* but they avoid
re-executing client requests (by using their stored information about the
last reply sent to each client).
A replica may be missing some request message $m$ or a stable checkpoint (since these are not sent in new-view messages.) It can obtain missing information from another replica. For example, replica $i$
can obtain a missing checkpoint state $S$
from one of the replicas whose checkpoint messages certified its correctness
in $V$. Since
$f+$1 of those replicas are correct, replica $i$
will always obtain $S$
or a later certified stable checkpoint. We can avoid sending the entire checkpoint by partitioning the state and stamping each partition with the sequence number of the last request that modified it. To bring a replica up to date, it is only necessary to send it the partitions where it is out of date, rather than the whole checkpoint.
## Correctness
This section sketches the proof that the algorithm provides safety and liveness; details can be found in [4].
### Safety
As discussed earlier, the algorithm provides safety if all non-faulty replicas agree on the sequence numbers of requests that commit locally.
In Section 5.2, we showed that if *prepared*$(m,v,n,i)$ is true,
*prepared*$(m',v,n,j)$
is false for any non-faulty replica $j$
(including $i=j$) and any $m'$
such that $D(m')\not=D(m)$. This implies that two non-faulty replicas agree on the sequence number of requests that commit locally in the same view at the two replicas.
The view-change protocol ensures that non-faulty replicas also agree on the sequence number of requests that commit locally in different views at different replicas. A request $m$
commits locally at a non-faulty replica with sequence number $n$
in view $v$
only if *committed*$(m,v,n)$ is true. This means that there is a set $R_1$
containing at least 1 non-faulty replicas such that *prepared*$(m,v,n,i)$ is true for every replica
in the set.
Non-faulty replicas will not accept a pre-prepare for view $v'\gt v$
without having received a new-view message for
(since only at that point do they enter the view). But any correct new-view message for view $v'\gt v$
contains correct view-change messages from every replica $i$
in a
set
$R_2$ of $2f+1$ replicas. Since there are $3f+1$ replicas, $R_1$ and
$R_2$ must intersect in at least one replica $k$
that is not faulty.
$k$'s view-change message will ensure that the fact that
prepared in a previous view is propagated to subsequent views, unless the new-view message contains a view-change message with a stable checkpoint with a sequence number higher than $n$.
In the first case, the algorithm redoes the three phases of the atomic multicast protocol for $m$
with the same sequence number $n$
and the new view number. This is important because it prevents any different request that was assigned the sequence number $n$
in a previous view from ever committing. In the second case no replica in the new view will accept any message with sequence number lower than $n$.
In either case, the replicas will agree on the request that commits locally with sequence number $n$.
### Liveness
To provide liveness, replicas must move to a new view if they are unable to execute a request. But it is important to maximize the period of time when at least $2f+1$ non-faulty replicas are in the same view, and to ensure that this period of time increases exponentially until some requested operation executes. We achieve these goals by three means.
First, to avoid starting a view change too soon, a replica that multicasts
a view-change message for view $v+1 waits for $2f+1$ view-change messages for view $v+1$ and then starts its timer to expire after some time $T$.
If the timer expires before it receives a valid new-view message for
$v+1$ or before it executes a request in the new view that it had not executed previously, it starts the view change for view $v+2$
but this time it will wait $2T$ before starting a view change for view
$v+3$.
Second, if a replica receives a set of $f+1$ valid view-change messages from other replicas for views greater than its current view, it sends a view-change message for the smallest view in the set, even if its timer has not expired; this prevents it from starting the next view change too late.
Third, faulty replicas are unable to impede progress by forcing frequent view changes. A faulty replica cannot cause a view change by sending a view-change message, because a view change will happen only if at least
$f+1$ replicas send view-change messages, but it can cause a view change when it is the primary (by not sending messages or sending bad messages). However, because the primary of view $v$
is the replica $p$
such that $p=v\mod|R|$, the primary cannot be faulty for more than
consecutive $f$ views.
These three techniques guarantee liveness unless message delays grow faster than the timeout period indefinitely, which is unlikely in a real system.
## Non-Determinism
State machine replicas must be deterministic but many services involve some form of non-determinism. For example, the time-last-modified in NFS is set by reading the server's local clock; if this were done independently at each replica, the states of non-faulty replicas would diverge. Therefore, some mechanism to ensure that all replicas select the same value is needed. In general, the client cannot select the value because it does not have enough information; for example, it does not know how its request will be ordered relative to concurrent requests by other clients. Instead, the primary needs to select the value either independently or based on values provided by the backups.
If the primary selects the non-deterministic value independently, it concatenates the value with the associated request and executes the three phase protocol to ensure that non-faulty replicas agree on a sequence number for the request and value. This prevents a faulty primary from causing replica state to diverge by sending different values to different replicas. However, a faulty primary might send the same, incorrect, value to all replicas. Therefore, replicas must be able to decide deterministically whether the value is correct (and what to do if it is not) based only on the service state.
This protocol is adequate for most services (including NFS) but
occasionally replicas must participate in selecting the value to satisfy a
service's specification. This can be accomplished by adding an extra phase
to the protocol: the primary obtains authenticated values proposed by the
backups, concatenates $2f+1$ of them with the associated request, and
starts the three phase protocol for the concatenated message. Replicas
choose the value by a deterministic computation on the $2f+1$ value and
their state, e.g., taking the median. The extra phase can be optimized away
in the common case. For example, if replicas need a value that is "close enough" to that of their local clock, the extra phase can be avoided when their clocks are synchronized within some delta.
# Optimizations
This section describes some optimizations that improve the performance of the algorithm during normal-case operation. All the optimizations preserve the liveness and safety properties.
## Reducing Communication
We use three optimizations to reduce the cost of communication. The first avoids sending most large replies. A client request designates a replica to send the result; all other replicas send replies containing just the digest of the result. The digests allow the client to check the correctness of the result while reducing network bandwidth consumption and CPU overhead significantly for large replies. If the client does not receive a correct result from the designated replica, it retransmits the request as usual, requesting all replicas to send full replies.
The second optimization reduces the number of message delays for an operation invocation from 5 to 4. Replicas execute a request *tentatively* as soon as the prepared predicate holds for the request, their state reflects the execution of all requests with lower sequence number, and these requests are all known to have committed. After executing the request, the replicas send tentative replies to the client. The client waits for $2f+1$ matching tentative replies. If it receives this
many, the request is guaranteed to commit eventually. Otherwise, the client retransmits the request and waits for $f+1$ non-tentative replies.
A request that has executed tentatively may abort if there is a view change and it is replaced by a null request. In this case the replica reverts its state to the last stable checkpoint in the new-view message or to its last checkpointed state (depending on which one has the higher sequence number).
The third optimization improves the performance of read-only operations that do not modify the service state. A client multicasts a read-only request to all replicas. Replicas execute the request immediately in their tentative state after checking that the request is properly authenticated, that the client has access, and that the request is in fact read-only. They send the reply only after all requests refected in the tentative state have committed; this is necessary to prevent the client from observing uncommitted state. The client waits for $2f+1$ replies from different replicas with the same result.
The client may be unable to collect $2f+1$ such replies if there are concurrent writes to data that affect the result; in this case, it retransmits the request as a regular read-write request after its retransmission timer expires.
## Cryptography
In Section 5, we described an algorithm that uses digital signatures to authenticate all messages. However, we actually use digital signatures only for view-change and new-view messages, which are sent rarely, and authenticate all other messages using message authentication codes (MACs). This eliminates the main performance bottleneck in previous systems [29, 22].
However, MACs have a fundamental limitation relative to digital signatures -- the inability to prove that a message is authentic to a third party. The algorithm in Section 5 and previous Byzantine-fault-tolerant algorithms [31, 16] for state machine replication rely on the extra power of digital signatures. We modified our algorithm to circumvent the problem by taking advantage of specific invariants, e.g, the invariant
that no two different requests prepare with the same view and sequence number at two non-faulty replicas.
The modified algorithm is described in [5]. Here we sketch the main implications of using MACs.
MACs can be computed three orders of magnitude faster than digital signatures. For example, a 200MHz Pentium Pro takes 43ms to generate a 1024-bit modulus RSA signature of an MD5 digest and 0.6ms to verify the signature [37], whereas it takes only 10.3µs
s to compute the MAC of a 64-byte message on the same hardware in our implementation. There are other public-key cryptosystems that generate signatures faster, e.g., elliptic curve public-key cryptosystems, but signature verification is slower [37] and in our algorithm each signature is verified many times.
Each node (including active clients) shares a 16-byte secret session key with each replica. We compute message authentication codes by applying MD5 to the concatenation of the message with the secret key. Rather than using the 16 bytes of the final MD5 digest, we use only the 10 least significant bytes. This truncation has the obvious advantage of reducing the size of MACs and it also improves their resilience to certain attacks [27]. This is a variant of the secret suffix method [36], which is secure as long as MD5 is collision resistant [27, 8].
The digital signature in a reply message is replaced by a single MAC, which is sufficient because these messages have a single intended recipient. The signatures in all other messages (including client requests but excluding view changes) are replaced by vectors of MACs that we call authenticators. An authenticator has an entry for every replica other than the sender; each entry is the MAC computed with the key shared by the sender and the replica corresponding to the entry.
The time to verify an authenticator is constant but the time to generate one grows linearly with the number of replicas. This is not a problem because we do not expect to have a large number of replicas and there is a huge performance gap between MAC and digital signature computation. Furthermore, we compute authenticators efficiently; MD5 is applied to the message once and the resulting context is used to compute each vector entry by applying MD5 to the corresponding session key. For example, in a system with 37 replicas (i.e., a system that can tolerate 12 simultaneous faults) an authenticator can still be computed much more than two orders of magnitude faster than a 1024-bit modulus RSA signature.
The size of authenticators grows linearly with the number of replicas but it grows slowly: it is equal to $30*\lfloor\frac{n-1)}{3}\rfloor$ bytes. An authenticator is
smaller than an RSA signature with a 1024-bit modulus for
$n\le13$ (i.e., systems that can tolerate up to 4 simultaneous faults), which we expect to be true in most configurations.
# Implementation
This section describes our implementation. First we discuss the replication library, which can be used as a basis for any replicated service. In Section 7.2 we describe how we implemented a replicated NFS on top of the replication library. Then we describe how we maintain checkpoints and compute checkpoint digests efficiently.
## The Replication Library
The client interface to the replication library consists of a single procedure, *invoke*, with one argument, an input buffer containing a request to invoke a state machine operation. The *invoke* procedure uses our protocol to execute the requested operation at the replicas and select the correct reply from among the replies of the individual replicas. It returns a pointer to a buffer containing the operation result.
On the server side, the replication code makes a number of upcalls to procedures that the server part of the application must implement. There are procedures to execute requests (*execute*), to maintain checkpoints of the service state (*make checkpoint, delete checkpoint)8, to obtain the digest of a specifIed checkpoint (*get digest*), and to obtain missing information (*get checkpoint, set checkpoint*).
The *execute* procedure receives as input a buffer containing the requested operation, executes the operation, and places the result in an output buffer. The other procedures are discussed further in Sections 7.3 and 7.4.
Point-to-point communication between nodes is implemented using UDP, and multicast to the group of replicas is implemented using UDP over IP multicast [7]. There is a single IP multicast group for each service, which contains all the replicas. These communication protocols are unreliable; they may duplicate or lose messages or deliver them out of order.
The algorithm tolerates out-of-order delivery and rejects duplicates. View changes can be used to recover from lost messages, but this is expensive and therefore it is important to perform retransmissions. During normal operation recovery from lost messages is driven by the receiver: backups send negative acknowledgments to the primary when they are out of date and the primary retransmits pre-prepare messages after a long timeout. A reply to a negative acknowledgment may include both a portion of a stable checkpoint and missing messages. During view changes, replicas retransmit view-change messages until they receive a matching new-view message or they move on to a later view.
The replication library does not implement view changes or retransmissions at present. This does not compromise the accuracy of the results given in Section 7 because the rest of the algorithm is completely implemented (including the manipulation of the timers that trigger view changes) and because we have formalized the complete algorithm and proved its correctness [4].
## BFS: A Byzantine-Fault-tolerant File System
We implemented BFS, a Byzantine-fault-tolerant NFS service, using the replication library. Figure 2 shows the architecture of BFS. We opted not to modify the kernel NFS client and server because we did not have the sources for the Digital Unix kernel.
A file system exported by the fault-tolerant NFS service is mounted on the client machine like any regular NFS file system. Application processes run unmodified and interact with the mounted file system through the NFS client in the kernel. We rely on user level relay processes to mediate communication between the standard NFS client and the replicas. A relay receives NFS protocol requests, calls the invoke procedure of our replication library, and sends the result back to the NFS client.
![Replicated File System Architecture](./images/practical_byzantine_consensus_fig_2.webp){width=100%}
Each replica runs a user-level process with the replication library and our NFS V2 daemon, which we will refer to as *snfsd* (for simple *nfsd*). The replication library receives requests from the relay, interacts with *snfsd* by making upcalls, and packages NFS replies into replication protocol replies that it sends to the relay.
We implemented *snfsd* using a fixed-size memory-mapped file. All the file system data structures, e.g., inodes, blocks and their free lists, are in the mapped file. We rely on the operating system to manage the cache of memory-mapped file pages and to write modified pages to disk asynchronously. The current implementation uses 8KB blocks and inodes contain the NFS status information plus 256 bytes of data, which is used to store directory entries in directories, pointers to blocks in files, and text in symbolic links. Directories and files may also use indirect blocks in a way similar to Unix.
Our implementation ensures that all state machine replicas start in the same initial state and are deterministic, which are necessary conditions for the correctness of a service implemented using our protocol. The primary proposes the values for time-last-modified and time-last-accessed, and replicas select the larger of the proposed value and one greater than the maximum of all values selected for earlier requests. We do not require synchronous writes to implement NFS V2 protocol semantics because BFS achieves stability of modified data and meta-data through replication [20].
## Maintaining Checkpoints
This section describes how *snfsd* maintains checkpoints of the file system state. Recall that each replica maintains several logical copies of the state: the current state, some number of checkpoints that are not yet stable, and the last stable checkpoint.
*snfsd* executes file system operations directly in the memory mapped file to preserve locality,and it uses copy-on-write to reduce the space and time overhead associated with maintaining checkpoints. *snfsd* maintains a copy-on-write bit for every 512-byte block in the memory mapped file. When the replication code invokes the *make_checkpoint* upcall, *snfsd* sets all the copy-on-write bits and creates a (volatile) checkpoint record, containing the current sequence number, which it receives as an argument to the upcall, and a list of blocks. This list contains the copies of the blocks that were modified since the checkpoint was taken, and therefore, it is initially empty. The record also contains the digest of the current state; we discuss how the digest is computed in Section 7.4.
When a block of the memory mapped file is modified while executing a client request, *snfsd* checks the copy-on-write bit for the block and, if it is set, stores the block's current contents and its identifier in the checkpoint record for the last checkpoint. Then, it overwrites the block with its new value and resets its copy-on-write bit. *snfsd* retains a checkpoint record until told to discard it via a delete checkpoint upcall, which is made by the replication code when a later checkpoint becomes stable.
If the replication code requires a checkpoint to send to another replica, it calls the get checkpoint upcall. To obtain the value for a block, *snfsd* first searches for the block in the checkpoint record of the stable checkpoint, and then searches the checkpoint records of any later checkpoints. If the block is not in any checkpoint record, it returns the value from the current state.
The use of the copy-on-write technique and the fact that we keep at most 2 checkpoints ensure that the space and time overheads of keeping several logical copies of the state are low. For example, in the Andrew benchmark experiments described in Section 7, the average checkpoint record size is only 182 blocks with a maximum of 500.
## Computing Checkpoint Digests
*snfsd* computes a digest of a checkpoint state as part of a make checkpoint upcall. Although checkpoints are only taken occasionally, it is important to compute the state digest incrementally because the state may be large. *snfsd* uses an incremental collision-resistant one-way hash function called AdHash [1]. This function divides the state into fixed-size blocks and uses some other hash function (e.g., MD5) to compute the digest of the string obtained by concatenating the block index with the block value for each block. The digest of the state is the sum of the digests of the blocks modulo some large integer. In our current implementation, we use the 512-byte blocks from the copy-on-write technique and compute their digest using MD5.
To compute the digest for the state incrementally, *snfsd* maintains a table with a hash value for each 512-byte block. This hash value is obtained by applying MD5 to the block index concatenated with the block value at the time of the last checkpoint. When make checkpoint is called, *snfsd* obtains the digest $d$
for the previous checkpoint state (from the associated checkpoint record). It computes new hash values for each block whose copy-on-write bit is reset by applying MD5 to the block index concatenated with the current block value. Then, it adds the new hash value to $d$, subtracts the old hash value from $d$, and updates the table to contain the new hash value. This process is efficient provided the number of modified blocks is small; as mentioned above, on average 182 blocks are modified per checkpoint for the Andrew benchmark.
# Performance Evaluation
This section evaluates the performance of our system using two benchmarks: a micro-benchmark and the Andrew benchmark [15]. The micro-benchmark provides a service-independent evaluation of the performance of the replication library; it measures the latency to invoke a null operation, i.e., an operation that does nothing.
The Andrew benchmark is used to compare BFS with two other file systems: one is the NFS V2 implementation in Digital Unix, and the other is identical to BFS except without replication. The first comparison demonstrates that our system is practical by showing that its latency is similar to the latency of a commercial system that is used daily by many users. The second comparison allows us to evaluate the overhead of our algorithm accurately within an implementation of a real service.
## Experimental Setup
The experiments measure normal-case behavior (i.e., there are no view changes), because this is the behavior that determines the performance of the system. All experiments ran with one client running two relay processes, and four replicas. Four replicas can tolerate one Byzantine fault; we expect this reliability level to suffice for most applications. The replicas and the client ran on identical DEC 3000/400 Alpha workstations. These workstations have a 133 MHz Alpha 21064 processor, 128 MB of memory, and run Digital Unix version 4.0. The file system was stored by each replica on a DEC RZ26 disk. All the workstations were connected by a 10Mbit/s switched Ethernet and had DEC LANCE Ethernet interfaces. The switch was a DEC EtherWORKS 8T/TX. The experiments were run on an isolated network.
The interval between checkpoints was 128 requests, which causes garbage collection to occur several times in any of the experiments. The maximum sequence number accepted by replicas in pre-prepare messages was 256 plus the sequence number of the last stable checkpoint.
## Micro-Benchmark
The micro-benchmark measures the latency to invoke a null operation. It evaluates the performance of two implementations of a simple service with no state that implements null operations with arguments and results of different sizes. The first implementation is replicated using our library and the second is unreplicated and uses UDP directly. Table 1 reports the response times measured at the client for both read-only and read-write operations. They were obtained by timing 10,000 operation invocations in three separate runs and we report the median value of the three runs. The maximum deviation from the median was always below 0.3% of the reported value. We denote each operation by a/b, where a and b are the sizes of the operation argument and result in KBytes.
+---------+-------------------+------------+------------+
|arg./res | replicated | replicated | without |
|(KB) | read write | read-only | replication|
+:=======:+==================:+===========:+===========:+
| 0/0 | 3.35 (309%) | 1.62 (98%)| 0.82 |
+---------+-------------------+------------+------------+
| 4/0 | 14.19 (207%) | 6.98 (51%) | 4.62 |
+---------+-------------------+------------+------------+
| 0/4 |8.01 ( 72%) | 5.94 (27%) | 4.66 |
+---------+-------------------+------------+------------+
Table 1: Micro-benchmark results (in milliseconds); the percentage overhead is relative to the unreplicated case.
The overhead introduced by the replication library is due to extra computation and communication. For example, the computation overhead for the read-write 0/0 operation is approximately 1.06ms, which includes 0.55ms spent executing cryptographic operations. The remaining 1.47ms of overhead are due to extra communication; the replication library introduces an extra message round-trip, it sends larger messages, and it increases the number of messages received by each node relative to the service without replication.
The overhead for read-only operations is significantly lower because the optimization discussed in Section 5.1 reduces both computation and communication overheads. For example, the computation overhead for the read-only 0/0 operation is approximately 0.43ms, which includes 0.23ms spent executing cryptographic operations, and the communication overhead is only 0.37ms because the protocol to execute read-only operations uses a single round-trip.
Table 1 shows that the relative overhead is lower for the 4/0 and 0/4 operations. This is because a significant fraction of the overhead introduced by the replication library is independent of the size of operation arguments and results. For example, in the read-write 0/4 operation, the large message (the reply) goes over the network only once (as discussed in Section 5.1) and only the cryptographic overhead to process the reply message is increased. The overhead is higher for the read-write 4/0 operation because the large message (the request) goes over the network twice and increases the cryptographic overhead for processing both request and pre-prepare messages.
It is important to note that this micro-benchmark represents the worst case overhead for our algorithm because the operations perform no work and the unreplicated server provides very weak guarantees. Most services will require stronger guarantees, e.g., authenticated connections, and the overhead introduced by our algorithm relative to a server that implements these guarantees will be lower. For example, the overhead of the replication library relative to a version of the unreplicated service that uses MACs for authentication is only 243% for the read-write 0/0 operation and 4% for the read-only 4/0 operation.
We can estimate a rough lower bound on the performance gain afforded by our algorithm relative to Rampart [30]. Reiter reports that Rampart has a latency of 45ms for a multi-RPC of a null message in a 10 Mbit/s Ethernet network of 4 SparcStation 10s [30]. The multi-RPC is sufficient for the primary to invoke a state machine operation but for an arbitrary client to invoke an operation it would be necessary to add an extra message delay and an extra RSA signature and verification to authenticate the client; this would lead to a latency of at least 65ms (using the RSA timings reported in [29].) Even if we divide this latency by 1.7, the ratio of the SPECint92 ratings of the DEC 3000/400 and the SparcStation 10, our algorithm still reduces the latency to invoke the read-write and read-only 0/0 operations by factors of more than 10 and 20, respectively. Note that this scaling is conservative because the network accounts for a significant fraction of Rampart's latency [29] and Rampart's results were obtained using 300-bit modulus RSA signatures, which are not considered secure today unless the keys used to generate them are refreshed very frequently.
There are no published performance numbers for SecureRing [16] but it would be slower than Rampart because its algorithm has more message delays and signature operations in the critical path.
## Andrew Benchmark
The Andrew benchmark [15] emulates a software development workload. It has fve phases: (1) creates subdirectories recursively; (2) copies a source tree; (3) examines the status of all the files in the tree without examining their data; (4) examines every byte of data in all the files; and (5) compiles and links the files.
We use the Andrew benchmark to compare BFS with two other file system configurations: NFS-std, which is the NFS V2 implementation in Digital Unix, and BFS-nr, which is identical to BFS but with no replication. BFS-nr ran two simple UDP relays on the client, and on the server it ran a thin veneer linked with a version of *snfsd* from which all the checkpoint management code was removed. This configuration does not write modified file system state to disk before replying to the client. Therefore, it does not implement NFS V2 protocol semantics, whereas both BFS and NFS-std do.
Out of the 18 operations in the NFS V2 protocol only getattr is read-only because the time-last-accessed attribute of files and directories is set by operations that would otherwise be read-only, e.g., read and lookup. The result is that our optimization for read-only operations can rarely be used. To show the impact of this optimization, we also ran the Andrew benchmark on a second version of BFS that modifies the lookup operation to be read-only. This modification violates strict Unix file system semantics but is unlikely to have adverse effects in practice.
For all configurations, the actual benchmark code ran at the client workstation using the standard NFS client implementation in the Digital Unix kernel with the same mount options. The most relevant of these options for the benchmark are: UDP transport, 4096-byte read and write buffers, allowing asynchronous client writes, and allowing attribute caching.
We report the mean of 10 runs of the benchmark for each configuration. The sample standard deviation for the total time to run the benchmark was always below 2.6% of the reported value but it was as high as 14% for the individual times of the first four phases. This high variance was also present in the NFS-std configuration. The estimated error for the reported mean was below 4.5% for the individual phases and 0.8% for the total.
Table 2 shows the results for BFS and BFS-nr. The comparison between BFS-strict and BFS-nr shows that the overhead of Byzantine fault tolerance for this service is low -- BFS-strict takes only 26% more time to run Table 2: Andrew benchmark: BFS vs BFS-nr. The times are in seconds.
+---------+------------+-----------+---------+
| phase |BFS strict | BFS r/o | NFS-std |
| | | lookup | |
+:=======:+===========:+==========:+========:+
| 1 | 0.55 (57%) |0.47 (34%) | 0.35 |
+---------+------------+-----------+---------+
| 2 | 9.24 (82%) |7.91 (56%) | 5.08 |
+---------+------------+-----------+---------+
| 3 |7.24 (18%) |6.45 (6%) | 6.11 |
+---------+------------+-----------+---------+
| 4 | 8.77 (18%) | 7.87 (6%) | 7.41 |
+---------+------------+-----------+---------+
| 5 |38.68 (20%) |38.38 (19%)| 32.12 |
+---------+------------+-----------+---------+
| total | 64.48 (26%)|61.07 (20%)| 51.07 |
+---------+------------+-----------+---------+
the complete benchmark. The overhead is lower than what was observed for the micro-benchmarks because the client spends a significant fraction of the elapsed time computing between operations, i.e., between receiving the reply to an operation and issuing the next request, and operations at the server perform some computation. But the overhead is not uniform across the benchmark phases. The main reason for this is a variation in the amount of time the client spends computing between operations; the first two phases have a higher relative overhead because the client spends approximately 40% of the total time computing between operations, whereas it spends approximately 70% during the last three phases.
The table shows that applying the read-only optimization to lookup improves the performance of BFS significantly and reduces the overhead relative to BFS-nr to 20%. This optimization has a significant impact in the first four phases because the time spent waiting for lookup operations to complete in BFS-strict is at least 20% of the elapsed time for these phases, whereas it is less than 5% of the elapsed time for the last phase.
+---------+------------+-----------+---------+
| phase |BFS strict | BFS r/o | NFS-std |
| | | lookup | |
+:=======:+===========:+==========:+========:+
| 1 |0.55 (-69%) |0.47 (-73%)| 1.75 |
+---------+------------+-----------+---------+
| 2 |9.24 ( -2%) |7.91 (-16%)| 9.46 |
+---------+------------+-----------+---------+
| 3 |7.24 (35%) | 6.45 (20%)| 5.36 |
+---------+------------+-----------+---------+
| 4 |8.77 (32%) |7.87 (19%) | 6.60 |
+---------+------------+-----------+---------+
| 5 |38.68 (-2%) |38.38 (-2%)| 39.35 |
+---------+------------+-----------+---------+
| total |64.48 (3%) |61.07 (-2%)| 62.52 |
+---------+------------+-----------+---------+
Table 3: Andrew benchmark: BFS vs NFS-std. The times are in seconds.
Table 3 shows the results for BFS vs NFS-std. These results show that BFS can be used in practice -- BFS-strict takes only 3% more time to run the complete benchmark. Thus, one could replace the NFS V2 implementation in Digital Unix, which is used daily by many users, by BFS without affecting the latency perceived by those users. Furthermore, BFS with the read-only optimization for the *lookup* operation is actually 2% faster than NFS-std.
The overhead of BFS relative to NFS-std is not the same for all phases. Both versions of BFS are faster than NFS-std for phases 1, 2, and 5 but slower for the other phases. This is because during phases 1, 2, and 5 a large fraction (between 21% and 40%) of the operations issued by the client are *synchronous*, i.e., operations that require the NFS implementation to ensure stability of modified file system state before replying to the client. NFS-std achieves stability by writing modified state to disk whereas BFS achieves stability with lower latency using replication (as in Harp [20]). NFS-std is faster than BFS (and BFS-nr) in phases 3 and 4 because the client issues no synchronous operations during these phases.
# Related Work
Most previous work on replication techniques ignored Byzantine faults or assumed a synchronous system model (e.g., [17, 26, 18, 34, 6, 10]). View stamped replication [26] and Paxos [18] use views with a primary and backups to tolerate benign faults in an asynchronous system. Tolerating Byzantine faults requires a much more complex protocol with cryptographic authentication, an extra pre-prepare phase, and a different technique to trigger view changes and select primaries. Furthermore, our system uses view changes only to select a new primary but never to select a different set of replicas to form the new view as in [26, 18].
Some agreement and consensus algorithms tolerate Byzantine faults in asynchronous systems (e.g,[2, 3, 24]). However, they do not provide a complete solution for state machine replication, and furthermore, most of them were designed to demonstrate theoretical feasibility and are too slow to be used in practice. Our algorithm during normal-case operation is similar to the Byzantine agreement algorithm in [2] but that algorithm is unable to survive primary failures.
The two systems that are most closely related to our work are Rampart [29, 30, 31, 22] and SecureRing [16]. They implement state machine replication but are more than an order of magnitude slower than our system and, most importantly, they rely on synchrony assumptions.
Both Rampart and SecureRing must exclude faulty replicas from the group to make progress (e.g., to remove a faulty primary and elect a new one), and to perform garbage collection. They rely on failure detectors to determine which replicas are faulty. However, failure detectors cannot be accurate in an asynchronous system [21], i.e., they may misclassify a replica as faulty. Since correctness requires that fewer than $\frac13$ of group members be faulty, a misclassification can compromise correctness by removing a non-faulty replica from the group. This opens an avenue of attack: an attacker gains control over a single replica but does not change its behavior in any detectable way; then it slows correct replicas or the communication between them until enough are excluded from the group.
To reduce the probability of misclassification, failure detectors can be calibrated to delay classifying a replica as faulty. However, for the probability to be negligible the delay must be very large, which is undesirable. For example, if the primary has actually failed, the group will be unable to process client requests until the delay has expired. Our algorithm is not vulnerable to this problem because it never needs to exclude replicas from the group.
Phalanx [23, 25] applies quorum replication techniques [12] to achieve Byzantine fault-tolerance in asynchronous systems. This work does not provide generic state machine replication; instead, it offers a data repository with operations to read and write individual variables and to acquire locks. The semantics it provides for read and write operations are weaker than those offered by our algorithm; we can implement arbitrary operations that access any number of variables,whereas in Phalanx it would be necessary to acquire and release locks to execute such operations. There are no published performance numbers for Phalanx but we believe our algorithm is faster because it has fewer message delays in the critical path and because of our use of MACs rather than public key cryptography. The approach in Phalanx offers the potential for improved scalability; each operation is processed by only a subset of replicas. But this approach to scalability is expensive: it requires $n\gt4f+1$ to tolerate faults; each replica needs a copy of the state; and the load on each replica decreases slowly with $n$
(it is $\bigcirc(1/\sqrt{n})$.
# Conclusions
This paper has described a new state-machine replication algorithm that is able to tolerate Byzantine faults and can be used in practice: it is the first to work correctly in an asynchronous system like the Internet and it improves the performance of previous algorithms by more than an order of magnitude.
The paper also described BFS, a Byzantine-fault tolerant implementation of NFS. BFS demonstrates that it is possible to use our algorithm to implement real services with performance close to that of an unreplicated service -- the performance of BFS is only 3% worse than that of the standard NFS implementation in Digital Unix. This good performance is due to a number of important optimizations, including replacing public-key signatures by vectors of message authentication codes, reducing the size and number of messages, and the incremental checkpoint-management techniques.
One reason why Byzantine-fault-tolerant algorithms will be important in the future is that they can allow systems to continue to work correctly even when there are software errors. Not all errors are survivable; our approach cannot mask a software error that occurs at all replicas. However, it can mask errors that occur independently at different replicas, including nondeterministic software errors, which are the most problematic and persistent errors since they are the hardest to detect. In fact, we encountered such a software bug while running our system, and our algorithm was able to continue running correctly in spite of it.
There is still much work to do on improving our system. One problem of special interest is reducing the amount of resources required to implement our algorithm. The number of replicas can be reduced by using
replicas as witnesses that are involved in the protocol only when some full replica fails. We also believe that it is possible to reduce the number of copies of the state to
1 but the details remain to be worked out.
# Acknowledgments
We would like to thank Atul Adya, Chandrasekhar Boyapati, Nancy Lynch, Sape Mullender, Andrew Myers, Liuba Shrira, and the anonymous referees for their helpful comments on drafts of this paper.
# References
[1] M. Bellare and D. Micciancio. A New Paradigm for Collision-free Hashing: Incrementality at Reduced Cost. In Advances in Cryptology -- Eurocrypt 97, 1997.
[2] G. Bracha and S. Toueg. Asynchronous Consensus and Broadcast Protocols. Journal of the ACM, 32(4), 1995.
[3] R. Canneti and T. Rabin. Optimal Asynchronous Byzantine Agreement. Technical Report #92-15, Computer Science Department, Hebrew University, 1992.
[4] M. Castro and B. Liskov. A Correctness Proof for a Practical Byzantine-Fault-Tolerant Replication Algorithm. Technical Memo MIT/LCS/TM-590, MIT Laboratory for Computer Science, 1999.
[5] M. Castro and B. Liskov. Authenticated Byzantine Fault Tolerance Without Public-Key Cryptography. Technical Memo MIT/LCS/TM-589, MIT Laboratory for Computer Science, 1999.
[6] F. Cristian, H. Aghili, H. Strong, and D. Dolev. Atomic Broadcast: From Simple Message Diffusion to Byzantine Agreement. In International Conference on Fault Tolerant Computing, 1985.
[7] S. Deering and D. Cheriton. Multicast Routing in Datagram Internetworks and Extended LANs. ACM Transactions on Computer Systems, 8(2), 1990.
[8] H. Dobbertin. The Status of MD5 After a Recent Attack. RSA Laboratories' CryptoBytes, 2(2), 1996.
[9] M. Fischer, N. Lynch, and M. Paterson. Impossibility of Distributed Consensus With One Faulty Process. Journal of the ACM, 32(2), 1985.
[10] J. Garay and Y. Moses. Fully Polynomial Byzantine Agreement for n
3t Processors in t+1 Rounds. SIAM Journal of Computing, 27(1), 1998.
[11] D. Gawlick and D. Kinkade. Varieties of Concurrency Control in IMS/VS Fast Path. Database Engineering, 8(2), 1985.
[12] D. Gifford. Weighted Voting for Replicated Data. In Symposium on Operating Systems Principles, 1979.
[13] M. Herlihy and J. Tygar. How to make replicated data secure. Advances in Cryptology (LNCS 293), 1988.
[14] M. Herlihy and J. Wing. Axioms for Concurrent Objects. In ACM Symposium on Principles of Programming Languages, 1987.
[15] J. Howard et al. Scale and performance in a distributed file system. ACM Transactions on Computer Systems, 6(1), 1988.
[16] K. Kihlstrom, L. Moser, and P. Melliar-Smith. The SecureRing Protocols for Securing Group Communication. In Hawaii International Conference on System Sciences, 1998.
[17] L. Lamport. Time, Clocks, and the Ordering of Events in a Distributed System. Commun. ACM, 21(7), 1978.
[18] L. Lamport. The Part-Time Parliament. Technical Report 49, DEC Systems Research Center, 1989.
[19] L. Lamport, R. Shostak, and M. Pease. The Byzantine Generals Problem. ACM Transactions on Programming Languages and Systems, 4(3), 1982.
[20] B. Liskov et al. Replication in the Harp File System. In ACM Symposium on Operating System Principles, 1991.
[21] N. Lynch. Distributed Algorithms. Morgan Kaufmann Publishers, 1996.
[22] D. Malkhi and M. Reiter. A High-Throughput Secure Reliable Multicast Protocol. In Computer Security Foundations Workshop, 1996.
[23] D. Malkhi and M. Reiter. Byzantine Quorum Systems. In ACM Symposium on Theory of Computing, 1997.
[24] D. Malkhi and M. Reiter. Unreliable Intrusion Detection in Distributed Computations. In Computer Security Foundations Workshop, 1997.
[25] D. Malkhi and M. Reiter. Secure and Scalable Replication in Phalanx. In IEEE Symposium on Reliable Distributed Systems, 1998.
[26] B. Oki and B. Liskov. Viewstamped Replication: A New Primary Copy Method to Support Highly-Available Distributed Systems. In ACM Symposium on Principles of Distributed Computing, 1988.
[27] B. Preneel and P. Oorschot. MDx-MAC and Building Fast MACs from Hash Functions. In Crypto 95, 1995.
[28] C. Pu, A. Black, C. Cowan, and J. Walpole. A Specialization Toolkit to Increase the Diversity of Operating Systems. In ICMAS Workshop on Immunity-Based Systems, 1996.
[29] M. Reiter. Secure Agreement Protocols. In ACM Conference on Computer and Communication Security, 1994.
[30] M. Reiter. The Rampart Toolkit for Building High-Integrity Services. Theory and Practice in Distributed Systems (LNCS 938), 1995.
[31] M. Reiter. A Secure Group Membership Protocol. IEEE Transactions on Software Engineering, 22(1), 1996.
[32] R. Rivest. The MD5 Message-Digest Algorithm. Internet RFC--1321, 1992.
[33] R. Rivest, A. Shamir, and L. Adleman. A Method for Obtaining Digital Signatures and Public-Key Cryptosystems. Communications of the ACM, 21(2), 1978.
[34] F. Schneider. Implementing Fault-Tolerant Services Using The State Machine Approach: A Tutorial. ACM Computing Surveys, 22(4), 1990.
[35] A. Shamir. How to share a secret. Communications of the ACM, 22(11), 1979.
[36] G. Tsudik. Message Authentication with One-Way Hash Functions. ACM Computer Communications Review, 22(5), 1992.
[37] M. Wiener. Performance Comparison of Public-Key Cryptosystems. RSA Laboratories' CryptoBytes, 4(1), 1998.

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Canonicalizing Human Readable Identifiers</title> </head><body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Canonicalizing Human Readable Identifiers</h1><p>
@ -36,7 +36,6 @@ Isolated homoglyphs, homoglyphs that do not look like any member of the scripts
If these rules result in any changes, the rule set is reapplied until no further changes ensue.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -6,6 +6,3 @@ do
tidy -lang en_us --doctype html5 -utf8 -access 4 -e -q -o $TMP/fred.html "$f"
done
echo "checked all html files for html5 compliance."

View File

@ -786,7 +786,7 @@ that make it possible to have the same API as xchacha20poly1305.
[OCB patents were abandoned in February 2021](https://www.metzdowd.com/pipermail/cryptography/2021-February/036762.html)
One of these days I will produce a fork of libsodium that supports ``crypto_box_ristretto25519aes256ocb.\*easy.\*`, but that is hardly urgent.
One of these days I will produce a fork of libsodium that supports ``crypto_box_ristretto25519aes256ocb.\*easy.\*`, but that is hardly urgent.
Just make sure the protocol negotiation allows new ciphers to be dropped in.
# Getting something up and running

View File

@ -24,7 +24,7 @@
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
@ -38,7 +38,6 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">
</head>
<body>
<p><a href="./index.html"> To Home page</a></p>

View File

@ -9,7 +9,7 @@
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Crypto Currency and the Beast</title> </head>
<body>
@ -106,4 +106,4 @@ people who prefer banking in secrecy and do not trust each other all that much.<
licensed under the <a href="http://creativecommons.org/licenses/by-sa/3.0/" rel="license">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>
</html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -12,32 +12,32 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Crypto Currency Launch</title>
<title>Crypto Currency Launch</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Crypto Currency Launch</h1><p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Crypto Currency Launch</h1><p>
The total value held in the form of gold is ten trillion. But gold has problems if you try to transport it through an airport, security will likely take it from you. Hard to travel with it hidden. </p><p>
Hard to transfer it from one person to another, or from one identity to another. Hard to do international transactions in gold, hard to pay for oil with gold, or be paid for oil with gold, because transporting large amounts of gold is slow and dangerous.</p><p>
So, something better than gold, more transportable, more hideable, people would probably keep more than ten trillion in that form.</p><p>
The current value of bitcoin is about three hundred billion. Arguably crypto currency, if it works, if safe against the state, should be rather more than ten trillion. Say thirty trillion. This provides an upside of another hundred fold increase in value. On the other hand, the bitcoin is traceable in ways that gold is not. People are waiting to see what happens when the government cracks down.</p><p>
A crypto currency needs to be totally traceable and totally untraceable. Ann wants to be able to prove to Carol that she paid Bob, and that therefore her debt to Bob is cleared, or Bob has an obligation the Ann. But Ann and Bob likely do not want a powerful hostile party to be able to discover that Ann made a payment to Bob. Existing crypto currencies suffer from total traceability.</p><p>
Money is a store of value, a medium of exchange, and a measure of value. Gold sucks as a medium of exchange, because of transportation risks and costs. Crypto currency is very good as a medium of exchange, better than anything else, because banks are so remarkably incompetent, inefficient, and lawless. </p><p>
As a measure of value, gold has immense and ancient history, which makes it the best for long term measure of value. If you graph the prices of something, such as oil, over decades and centuries, you get far saner and more plausible data when you graph in terms of gold than in dollars, or even supposedly inflation adjusted dollars. Gold is the best measure of value over time. Inflation adjusted dollars give results that smell of politics and propaganda. Bitcoin, because of volatility and extremely rapid deflation, is really bad as a measure of value, but time will slowly fix this.</p><p>
The current price of bitcoin reflects a substantial possibility that it replaces the dollar as the currency of international transactions, in which case the dollar finds itself on the bitcoin standard, like it or not.</p><p>
To attract a significant portion of the wealth of the world, we do not want to have any mining, since this basically a fee against large accounts. We want a per account fee, because every account results in accountancy costs, and a transaction fee, because every transaction results in transaction costs, but not a charge against holding enormous amounts of wealth in an account. Mining is a charge against the value of accounts, which is a bad idea if we want wealth holders to hold their wealth in our crypto currency.</p><p>
We want it to be impossible to find who holds a large account if he does not want to be found, so that he is safe from rubber hose cryptography. We want it to be easy for him to keep control, and hard for anyone else to get control. He should be able to take the wallet that controls the bulk of his money offline, so that it cannot sign anything, because he has the signing key on a scrap of paper hidden somewhere, or on several such scraps of paper.</p><p>
And then, bringing together the scraps of paper that are the secret number that controls his account paper, he can sit down at a computer anywhere in the world, and send that money hither and yon.</p><p>
Gold has problems as the medium of international exchange, because of the problems of moving it. So everyone left their gold in Fort Knox, and moved ownership of that gold around, but it gradually became more and more obvious that America has embezzled all that gold.</p><p>
@ -48,7 +48,7 @@ Bitcoin is moveable. Big advantage over gold.</p><p>
Bitcoin is governed by consensus, which has serious problems because it is a consensus of miners, rather than a consensus of people who hold large amounts of bitcoin, but it has the advantage that the miners are rational, self interested, and competent, and are therefore predictable, while the US government is increasing crazy, self destructive, and criminal, and therefore unpredictable.</p><p>
The coin to invest in needs to be able to scale all the way to wiping out the US$ as a world currency. But it also needs to gain initial critical mass.</p><p>

View File

@ -25,7 +25,7 @@ Trouble with bitcoin is that it is not centerless proof of work winds up bei
Thus we need a system with proof of stake, and not only proof of stake, but proof of client stake the power over the system needs to reside with peers that have a lot of wealthy clients and it needs to be hard to find who the clients are, and where they are keeping their secrets, so that even if Mueller seizes important peers on charges of tax evasion and money laundering, does not thereby gain control. </p><p>
If the system handles an enormous number of transactions, peers are going to be big and expensive, thus vulnerable to people like Mueller armed with vague and open ended charges of tax evasion and money laundering. Hence the power of peer over the currency needs to be proportional to the wealth controlled by the secrets held by that peers clients. And that peers clients need to be free to move from one peer to the next, and apt to move to peers that make it difficult for Mueller to find their clients. </p><p>
If the system handles an enormous number of transactions, peers are going to be big and expensive, thus vulnerable to people like Mueller armed with vague and open ended charges of tax evasion and money laundering. Hence the power of peer over the currency needs to be proportional to the wealth controlled by the secrets held by that peers clients. And that peers clients need to be free to move from one peer to the next, and apt to move to peers that make it difficult for Mueller to find their clients. </p><p>
Need a crypto currency where Bob can prove to the whole world that he paid Ann such and such amount, in accord with such and such a bill, but no one else can prove he paid Ann, nor that there ever was such a bill, except he shows them. Bitcoin is far too traceable. We need controlled traceability, where the parrticipants can prove a transaction to third parties and the world, but the world cannot. And Bob needs to be able to prove what the payment was about, that it was part of a conversation, a meeting of minds. </p><p>
@ -61,7 +61,6 @@ So, to accomplish the goal of shutting down crypto currency requires world wide
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
}
@ -11,12 +11,12 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Crypto Currency on wide area distributed database</title>
<title>Crypto Currency on wide area distributed database</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Crypto Currency on wide area distributed database</h1><p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Crypto Currency on wide area distributed database</h1><p>
Much of this material is shamelessly plaigarized without <a href="http://docplayer.net/14501083-Blockchain-throughput-and-big-data-trent-mcconaghy.html">attribution.</a></p><p>

View File

@ -1,30 +1,30 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Transaction Volume</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Transaction Volume</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Transaction Volume</h1>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Transaction Volume</h1>
<hr/>
<h2>Total number of bitcoin transactions </h2>
<p>Was four hundred million on 2019-05-04, occupying four hundred gigabytes
on disk. I dont know how many spent and unspent transaction outputs that
adds up to, but probably a billion or two. Thus a hash table mapping coin
public keys to transactions occupies 64 bytes for each coin, so fits in
couple of hundred gigabytes. Not a huge problem today when four terabyte
hard disks are standard, but in early versions of our competing rhocoin
system, simpler just to store all transaction outputs in an sqlite3
system, simpler just to store all transaction outputs in an sqlite3
database, where an unspent transaction output goes in a table with a field
linking it to its transaction, and a spent transaction output goes in
another table with a field linking it to the transaction from which it is
@ -34,12 +34,12 @@
valid or invalid will work by will contain the hash chain deriving from
the transaction, in transactions ordered by block number, and within blocks
by hash.</p>
<p>But our canonical tree is going to have to contain namecoins ordered by name order, rather than transaction order, to enable short proofs of the valid authority over a name.</p>
<hr/>
<h2>Bandwidth</h2>
<p>A bitcoin transaction is typically around 512 bytes, could be a lot less:&nbsp; A transaction needs a transaction type, one or more inputs, one or more outputs. A simple input or output would consist of the type, the amount, the time, and a public key. Type sixteen bits, time forty eight bits bits, amount sixty four bits, public key two hundred and fifty six bits, total forty eight bytes.&nbsp; A typical transaction has two inputs and two outputs, total 192 bytes. Frequently need another hash to link to relevant information, such as what this payment is for, another thirty two bytes, total 224 bytes.&nbsp;</p>
<p>A bitcoin transaction is typically around 512 bytes, could be a lot less:&nbsp; A transaction needs a transaction type, one or more inputs, one or more outputs. A simple input or output would consist of the type, the amount, the time, and a public key. Type sixteen bits, time forty eight bits bits, amount sixty four bits, public key two hundred and fifty six bits, total forty eight bytes.&nbsp; A typical transaction has two inputs and two outputs, total 192 bytes. Frequently need another hash to link to relevant information, such as what this payment is for, another thirty two bytes, total 224 bytes.&nbsp;</p>
<p>We will frequently store the transaction and the resulting balances, as if together, though likely for internal optimization reasons, actually stored separately, so 196 bytes.&nbsp; </p>
<p>Visa handles about 2000 transactions per second.&nbsp; Burst rate about four thousand per second.&nbsp; </p>
<p>Paypal 115 transactions per second.&nbsp; </p>
@ -51,15 +51,15 @@
<p>Common home internet connections are twelve mbps, common small business internet connections are one hundred mbps.&nbsp; So you will be able to peer with even when we reach Visa volumes, but you probably will not want to unless you are moderately wealthy or you are operating a business, so need a high capability connection anyway.</p>
<p>The i2p hidden network can handle about 30 kbps, so we can put the whole thing on the i2p hidden network, until we are wiping the floor with Paypal.&nbsp; Ideally, we want a client download, a host and peer download, and a business host and peer download, where you just say "install and next thing you know you have a website offering “&lt;item name here> for &lt;99.99999&gt; megaros”. (During initial development, we will fix the price at one megaro per US$)</p>
<p>A peer on a laptop can easily handle Paypal volumes on a typical free advertising supported cofffee shop connection, which is around 10mbs down, 5mbs up. We need to allow peers to drop in and out. If you are a client, and the peer that hosts you drops out, you can go to another peer and ask another peer to host your wallet. If you are doing seriously anonymous transactions, well, just bring the laptop running your peer to the coffee shop.</p><p>
Assuming a block happens ever five minutes, then visa is three hundred megabye blocks, 8Mbps.</p><p>
Paypal is ten megabyte blocks, 0.5Mps.</p><p>
Bitcoin is one megabyte blocks, 28Kbps.</p><p>
After two years at bitcoin volumes, our blockchain will be two hundred gigabytes, at which point we might want to think of a custom format for sequential patricia trees as a collection of immutable files, append only files which grow into immutable files, and wal files that get idempotently accumulated into the append only files.</p><p>
Initially, our volume will probably only be one transaction every thirty seconds or so, at which rate it will take two years to reach a gigabyte, and everything fits in memory, making thinking about the efficiency of disk access irrelevant. We just load up everything as necessary, and keep it till shutdown.</p><p>
During initial development, need an ICO, including a market place buying and selling megaros.&nbsp; </p>
<hr/>
<h2>Sorting</h2>
@ -75,89 +75,89 @@
<hr/>
<h2>Storage</h2>
<p>Instead of only keeping transactions, we keep transactions and accounts. Thus peers can place old transactions in offline storage, or just plain dump them. The hash tree ensures that they are immutable, but you dont actually have to keep old transactions and old account balances around. Thus, total chain volume bounded. We can replace branches of the Merkle tree in old blocks that lead only to used transactions by their hash we operate with a potentially incomplete Merkle-patricia dac</p>
<p>On the other hand, perhaps I worry too much about disk storage, which has been doubling every year. Since we only plan to have a thousand or so full peers, when we have seven billion client wallets, they will be able to afford to store every transaction for everyone ever. Visa volume of 2000 transactions per second implies fifty gigabytes per day. So, if when we reach visa volumes, a typical peer has sixteen disks each of sixty four terabytes, it will take fifty years to fill them up, by which time we will likely have figured out a way of ditching old transactions.,</p>
<p>Suppose we have a thousand peers, and seven billion clients, and each client wants ten thousand transactions stored forever. Each peer has to store forty thousand terabytes of data. Each peer has seven million clients, so each peer is going to be quite large business, and by that time standard hard disks will probably be about one hundred terabytes, so each peer is going to need about four hundred hard disks on a local network of a hundred computers, twenty thousand clients per hard disk, which is not going to be a burdensome cost other scaling problems will no doubt bite us hard before then. At present two hundred terabyte systems are common, though not among private individuals. Forty thousand is gigantic, though perhaps it will be less gigantic in a few years. OpenZFS handles hundreds of terabytes just fine. Not sure what will happen with tens of thousands of terabytes.</p>
<p>The trouble with disk storage is that sector failure rates have not been falling. When one sector fails you tend to lose everything. One bad sector can take down the disk operating system, and one bad sector will take down an Sqlite database. We can mitigate this problem by having several Sqlite databases, with the most active blocks in solid state drive storage, and the immutable consensus blocks (immutable in having root hashes that have been the subject of consensus, though they can lose some branches) spread over several very large disk drives. Perhaps Sqlite will address this problem, so that bad sectors only cause minor local losses, which can easily be replaced by data from another peer, or perhaps when we get big enough that it is a problem we will then write a system that directly writes Merkle-patricia dac to raw disk storage in such a manner that failed sectors only cause minor losses of a few elements of the Merkle-patricia dac, easily fixed, by getting replacement data from peers.</p><p>
The way Sqlite works at present, having all our data in one big Sqlite database is not going to work when the block chain gets too big, no matter how big disks become, for sooner or later one sector will fail, and we dont want the failure of one sector to require a full rebuild of a potentially gigantic database.</p><p>
A data structure that dies on the loss of one sector becomes unusable when it reaches two terabytes or so, and the trouble is that our existing disk operating systems, and databases such as Sqlite, are frequently fragile to the loss of single sector. To scale to our target size (every transaction, for everyone in the world, forever) we have to have a system that tolerates a few sectors dropping out here and there, and replaces them by requesting the lost data on those few sectors from peers. For the moment, however, we can get by using multiple Sqlite databases, each one limited to about six hundred gigabytes. They will die infrequently, and when they die, the peers should reload the lost data from other peers. The cost of disk sector bitrot increases as the square of the size of the Sqlite database, because the likelihood of the entire database dying of bit rot of a single sector increases proportionally to size, and the cost of recopying the data from peers increases proportionally to the size. Lose one sector of an Sqlite database, you may well lose the entire database, which is an increasing problem as disks and databases get bigger, and likely to become unbearable at around two to four terabytes.</p><p>
But for the moment, three six terabyte drives dont cost all that much, and with split databases, with some blocks in one Sqlite database, and other blocks in another, we could scale to twenty times the current size of bitcoin storage, and when we hit that limit, solve the problem when it becomes a problem. We dont really need to purge old transactions, though we build the architecture so that it is doable.</p>
<p>The architecture is that it is all one gigantic Merkle-patricia dac organized into blocks, with the older blocks immutable, and the way this data is stored on a particular peer is an implementation detail that can differ between one peer and the next. Each peer sees the other peers Merkle-patricia dac, not the way the other peers tree is stored on disk. It sees the canonical representation, not the way the particular peer represents the tree internally.This approach enables us to be flexible about storage as technology progresses, and as the amount of data to be stored increases. Maybe we will wind up storing the tree as architecture dependent representations of records written directly to raw disk sectors without any file system other than the tree itself. By the time sector bitrot becomes a problem, Sqlite may well be fixed so that it can lose a bit of data here and there to sector bitrot, and report that data lost and in need of recovery, rather than the entire database dying on its ass. And if not, by the time it becomes a problem, the community will be big enough and wealthy enough to issue a fix, either to Sqllite, or by creating a disk system that represents arbitrarily large Merkle-patricia dacs directly on disk sectors over multiple disks over multiple computers, rather than a database stored on files that are then represented on disk sectors.</p><p>
For a database of two terabytes or so, can keep them in one Sqlite database, though probably more efficient to have tha active blocks on a solid state drive, and the older blocks on a disk drive, running on top of a standard operating system. Eight terabytes can store two billion transactions, which will fail horribly at handling all the worlds transactions, and can only keep up with visa for a few days, but we can keep up with paypal for long enough to hack up something that can handle massive disk arrays, and ditches stale transactions.</p><p>
Assume clients keep their transactions forever, peers keep their own client transactions for a long time, but dump the transactions of other clients after a month or so.&nbsp; </p>
<p>Then to compete with Bitcoin, need about three gigabytes of storage, ever, about the size of a movie.&nbsp; Typical home hard disks these days are one thousand gigabytes.&nbsp; </p>
<p>Then to compete with Paypal, need about fifty gigabytes of storage, ever, about the size of a television series.&nbsp; Typical home hard disks these days are one thousand gigabytes.&nbsp; </p>
<p>Then to compete with Visa, need about one terabyte of storage, ever, about the size of a typical home consumer hard disk drive.&nbsp; </p>
<p>So when we are wiping the floor with visa, <em>then</em> only wealthy people with good internet connections will be peers.&nbsp; </p>
<p>If keeping only live transactions, and assume each entity has only a hundred live transactions, then an eight terabyte hard drive can support a billion accounts. Paypal has about three hundred million accounts, so we can beat paypal using Sqlite on standard disk drives, without doing anything clever. And then we can start correspondent banking, and start using a custom storage system designed to support Merkle trees directly on raw disks.</p>
<p>If we put a block chain Merkle tree oriented storage system on top of Sqlite, then we can shard, write direct to disk, whatever, without disturbing the rest of the software, allowing us to beat visa. We always query the Merkle tree of the current block, with most of the lower branches of the Merkle tree pointing back into previous blocks. So if you ask, what is the record whose identifier is such and such in the current block, you will probably get the answer, "it is record such and such in a previous block", which will likely go through several steps of recursion, as if we had a billion sqllite databases. And why not have a billion Sqlite databases, in which a hundred are in one sqllite database, and hundred in another? And many processes. If we have one block every 256 seconds, then in ten years we have a million blocks, and a table locating each block in some arbitrary place, associated with some arbitrary channel to some arbitrary process is manageable, even without the trival optimization of handling ranges of blocks. This implies that once a block is immutable, it is handed off to some process whose job is to resolve read only Merkle queries on blocks on a certain range. So we have a table locating immutable blocks, and the processes that can read them, which allows us to trivially shard to many databases, each on its own hard disk, and near trivially shard to clusters of computers. Assume a per peer table that assigns groups of blocks to directories on particular disks on particular hosts with only one of these groups being mutable. Then we can have shardable storage on day one. And assume we can have multiple processes, some responsible for some blocks and some responsible for others. Each peer could be a large local network over many machines. Of course, we are still potentially bottlenecked on assembling the current block, which will be done by a single cpu attached to single local disk, but we are not bottlenecked for disk space.</p>
<hr/>
<h2>Storage and transmission structure</h2>
<h2>Storage and transmission structure</h2>
<p>We will not have a Bitcoin style chain of blocks, instead a binary Merkle-patricia dac.&nbsp; But operations on this tree will be grouped into 256 second phases so we will be doing something very like a block every 256 seconds. (Or whatever time the global consensus configuration file specifies there is global consensus config data, per peer config data, and per client config data, all of them in yaml.)</p>
<p>For block syncronization we need a tree of transaction events organized by block, hence organized by time, at least for the high order part. To prove balance to a client without giving him the entire tree, need a tree of balances. To prove to a client name key association, need a tree of names. To prove no monkey business with names, need a tree of events affecting names organized by names. To prove no monkey business with balances, need a tree of transactions organized by client. ((Leaves on the tree of transactions would consist of the key to the actual transaction) And, of course, a rather trivial tree to hold the global config data. There will be a chain of leap seconds in anticipation of the day that the standards authorities pull their fingers out of their ass. The leap second data has to be signed by an authority specified in the global configuration data, but this will serve no actual useful function until the day there really is such an authority. For the moment the world represents time as if leap seconds never happened, as for example in communication between mail servers, and no one cares about the precise value of durations it only matters that time is monotonic and approximately linear.</p>
<p>When we want more precise time comparisons, as when we are synchronizing and worried abouth the round trip time, we just get data on both computers clocks as relevant and give up on a global true time. So if one computer ticks a thousand times a second, and the other two fifty six times a second, then, for round trip concerns, no biggie.</p>
<p>Data will be ordered, both in storage and transmission, by key, where the high order bits of the key are block number, which is the high order bits of the time. Transactions are applied according to the time designated by the sender, rounded to some multiple of a thousand milliseconds, and then the public key of the recipient. Order in which transactions are applied matters for bouncing transactions. Transactions between the same sender and the same recipient shall be limited to one per block period. If someone wants to transact more often than that, has to batch transactions using the microtransaction protocol. For internal optimization, there will be additional indexes, but that is what we are going to hash over, and that will be the order of our hash tree, our storage, and our transmissions.&nbsp; </p>
<p>Our global hash will be a binary Merkle-patricia dac of hashes, with the leaf hashes corresponding to the above.&nbsp; </p>
<p>We use two kinds of time millisecond time modulo 2^32 for managing connections, and second time for times that go into the global consensus block chain.</p>
<p>C++11 <code>std::chrono::</code> seems to be the library that actually fixes these problems, with a steady clock for duration, which we will access in milliseconds modulo 2^32, and a system clock which we will access for global time in seconds since the epoch modulo 2^64</p>
<p>Both kinds of time ignore the leap second issue. Whenever seconds are presented for human readership, then ISO 8601 format which evades the leap second issue. Whenever durations are employed internally, we use milliseconds past the epoch modulo 2^32, and do not assume computer clocks are synchronized, or even both running at the same rate, though we do assume that both are measuring something rather close to milliseconds.</p>
<p>Our global hash will be a binary Merkle-patricia dac of hashes, with the leaf hashes corresponding to the above.&nbsp; </p>
<p>We use two kinds of time millisecond time modulo 2^32 for managing connections, and second time for times that go into the global consensus block chain.</p>
<p>C++11 <code>std::chrono::</code> seems to be the library that actually fixes these problems, with a steady clock for duration, which we will access in milliseconds modulo 2^32, and a system clock which we will access for global time in seconds since the epoch modulo 2^64</p>
<p>Both kinds of time ignore the leap second issue. Whenever seconds are presented for human readership, then ISO 8601 format which evades the leap second issue. Whenever durations are employed internally, we use milliseconds past the epoch modulo 2^32, and do not assume computer clocks are synchronized, or even both running at the same rate, though we do assume that both are measuring something rather close to milliseconds.</p>
<p><code>boost::posix_time::ptime</code> sucks.</p>
<p>We give up on handling leap seconds until the standards people get their act together and make it easy for everyone.</p>
<p>Time will be stored as a binary UT1 value, and not stored as local time. When displayed for humans, time will be displayed as if UTC time, in accordance with <a href="https://www.ietf.org/rfc/rfc3339.txt">RFC 3339</a> though it is actually an approximation to UT1. If the data is being displayed to the local user momentarily, will be displayed as local time plus the offset to UT1, for example 2019-04-12T23:20:50.52-5:00, representing 2019 April the twelfth, 8:50.52PM UT1 time in New York. If the data is not just being displayed momentarily, but will be recorded in a log file, it will always be recorded as UT1 time, as for example 2019-04-12T15:50.52Z, not UT1 time plus local time offset, because humans are likely to compare log files in different time zones, and when looking at logs long after the event, dont care much what the local time was.</p>
<p>We give up on handling leap seconds until the standards people get their act together and make it easy for everyone.</p>
<p>Time will be stored as a binary UT1 value, and not stored as local time. When displayed for humans, time will be displayed as if UTC time, in accordance with <a href="https://www.ietf.org/rfc/rfc3339.txt">RFC 3339</a> though it is actually an approximation to UT1. If the data is being displayed to the local user momentarily, will be displayed as local time plus the offset to UT1, for example 2019-04-12T23:20:50.52-5:00, representing 2019 April the twelfth, 8:50.52PM UT1 time in New York. If the data is not just being displayed momentarily, but will be recorded in a log file, it will always be recorded as UT1 time, as for example 2019-04-12T15:50.52Z, not UT1 time plus local time offset, because humans are likely to compare log files in different time zones, and when looking at logs long after the event, dont care much what the local time was.</p>
<p>If we are recording the local time plus offset, remember that confusingly 1996-12-19T15:39:57-08:00 means that to get universal time from local time, we have to add eight hours, not substract eight hours.<br/>
<code>1996-12-19T15:39:57-08:00</code> is, surprisingly and illogically, the same moment as:<br/>
<code>1996-12-19T23:39:57+00:00</code> add it, dont subtract it.</p><p>
<code>1996-12-19T24:00:00+00:00</code> is, unsurprisingly and logically the same moment as:<br/>
<code>1996-12-20T00:00:00+00:00</code></p>
<p>We can store in any format, and transmit in any mutually agreed format, but the protocol will hash as if in full length binary. Any protocol is transmitting representation of the binary data, which should be uniquely and reversibly mapped to a human representation of the data, but we do not need global agreement on a human representation of the data. Because multiple human representations of the data are possible, any such representation should contain an identifier of the representation being used. Similarly storage and transmission.&nbsp;. Since global agreement on storage, transmission, and human representations of the data is not guaranteed, format identifiers for storage and transmission formats such will be 128 bits in the binary representation. Human readable formats will be in yaml.&nbsp;</p>
<p>For a hash to be well defined, we need a well defined mapping between a yaml text stream, and a binary stream. A single yaml document can have many equivalent text representations, so we have to hash the binary data specified by the yaml document. Which means a yaml document must be stored in an object of specific type defined outside the document, and we hash the canonical form of that object. The object type will be identified by a yaml directive %<code>&lt;typename&gt;</code>, and if the named fields of the yaml document fail to correspond to the location fields of the binary object, it is an error, and the document is rejected.&nbsp;</p>
<p>The yaml document representing a binary object has a human readable type name in the form of a percent directive, and human readable field names in the form of a mapping between namesd values, but the binary object it represents does not contain these field names, and its type identifier, it has one, is an integer.&nbsp; The mapping between human readable names in the yaml document and offset locations within the binary object occurs outside the object, and outside the yaml document.&nbsp;</p>
<p>Different compilers tend to wind up implementing object padding differently, and of course there is always the endianness issue, big endian versus little endian. For a binary object to have one unique representation really requires ASN.1.&nbsp;</p>
<p>ASN.1 can generate aligned Canonical Packed encoding, which is what we are going to need to hash, and aligned packed encoding, which is what we will need for everything except human readability.&nbsp; It can also generate JSON encoding rules, which is type information in JSON, which we manually edit into a human readable description of how the object should look in yaml, and manually edit into a yaml to and from Canonical Aligned packed encoding.
For the data represented by a yaml document to have a unique well defined hash, the yaml document has to reference an ASN.1 specification in a percent directive. Try the III ASN.1 Mozilla library to compile ASN.1 into C++. Our yaml library should generate code to read yaml into and out of an object whose representation in memory is compiler and machine dependent, and our ASN.1 library should generate code to read data into and out of the same object whose representation in memory is compiler and machine dependent.&nbsp;</p>
<p>A connection between two peers will respresent the data in a ASN.1 PER format, but not necessarily canonical aligned PER format. The representation will be idiosyncratic to that particular connection. Every connection could use a different representation. They probably all use the same representation, but nothing requires them to do so.&nbsp;A machine could have a hundred connections active, each with a different representation, though it probably will not.&nbsp;</p>
<p>A branch our immutable binary tree, or the portion of the entire global immutable tree leading to a particular branch, can be output as yaml, or input from yaml in order that we can figure out what the machines are doing, and when we read a branch in yaml, we can always immediately learn whether than branch chains to the global consensus, albeit a portion of that chain may run through a local wallet, then to a transaction in that wallet, which transaction is globally known.&nbsp;</p>
<p>Our immutable tree can only contain types known to the code, and compiled into the code. ASN.1 and yaml can represent arbitrary structures, but our code will not be able to handle arbitrary structures, and shall swiftly reject lots of valid, but unexpected, yaml, though a wallet might well know types that are not globally known. ASN.1 canonical aligned PER can only represent types that are expected by the code, thus everything that can be read, has to be expected and capable of beig handled by canonical aligned PER.&nbsp;</p>
<p>A conversation involving many people is a tree in reverse temporal order to hash tree. However, we are only going to link such conversations into the block chain in relation to a financial transaction, in which case we want to link it in hash chain order the bill or offer, and any documents explicitly included in the bill or offer. Texts in chat contain a time, an author, a chatroom identifier, and that they are a reply to a previous text. Constructing a tree forward in times involve a search for all replies.&nbsp; It is not stored in tree form, texts do not contain links to what replies to them.&nbsp; Within wallet storage such a text is globally identified by its hash. Which is globally meaningful for any participant in the conversation. Of course for payments, there are usually only two people in the conversation, but if a text is referenced by a transaction in the global consensus chain, you can send it to anyone, and what it is a reply to, and what that it is a reply to, whereupon they become meaningful, and immutable, for the recipients. So you can send a text to a third party that includes another text, or several such texts. If you include a third party text, and what this third party text is a reply to, then the reply link will become meaningul to the recipient. Otherwise, when he clicks on the reply link, will get an error. The wallet contains a bunch of records that are indexed by hash, the hash globally and uniquely identifying immutable data. Texts are almost free from, having a time, an orginator, a title of under 140 characters, which is likely to be the entire content of the text, and a destination, which may be a chatroom, or a particular sutype of chatroom, a chatroom with only two permitted members, and free form text which may contain references to other texts, generally in the form of a quote or title, which links to the actual prior in time text.&nbsp;</p>
<p>Clicking on the link expands it inline with a four tab indent, and a tree outline rooted on the left. If the tree gets too deep, say four tabs deep (configurable) we collapse it by hiding sister expansions, collapsing parent nodes, and applying tail recursion.&nbsp; The tree of a conversation thread is similarly displayed, also in correct time order (collapsing and tail recursing as needed to prevent it drifting too far right) but it is not a hash tree, and you cannot link it into the block chain.&nbsp; You can only link a particular text, and prior texts that it explicitly includes by hash, into the blockchain.&nbsp; Only their root hash is actually stored in the blockchain.&nbsp; The actual texts, though their hash globally identifies them across all wallets, are only stored in some particular wallets, and can easily wind up lost or erased.&nbsp;</p>
<p>When peers negotiate a protocol, they negotiate a representation of the data, which may be different for any pair of peers, but what is being represented is globally agreed.&nbsp; </p>
<p>What is being represented is a global binary Merkle-patricia dac of blocks of variable length binary data. In transmission we generally do not transmit the entire hash if the entire subtree is available, but a sample of the hash, which sample is different in every connection between every pair of peers. This means that a peer cannot easily fake keeping the data. Since the entire tree represents a global consensus, the content of every block must be understandable to every peer so that every peer can determine that every block is compliant with the global rules, so every block begins with a type identifier, which normally defines a fixed length for the block and the rule for generating a hash for that block. The tree never carries opaque data, though it may well carry a hash code that identifies off block chain opaque data.</p>
<p>Whatever protocol is employed, the software can and will routinely express the representation in its global canonical binary form, and in human readable form, so that when disagreements occur in reaching consensus the human masters of the machines can agree on what is going on and what should be done about it, regardless of the representation used internally to store and transmit data..&nbsp; </p>
<p>Although the consensus will be represented by both a human and a robotic board and CEO, there shall be no automatic rule update mechanism, except according to individual human choices. Each new consensus shall represent a consensus of peers managing a majority of the shares, and to change the rules will require that the board and the CEO persuade a substantial majority of the human masters of the peers to install new software, which they may do, or may refrain from doing.&nbsp; The consensus will be continually updated according to robotic choices, but the rules the robots are following are installed by humans or not.</p>
<p>When peers negotiate a protocol, they negotiate a representation of the data, which may be different for any pair of peers, but what is being represented is globally agreed.&nbsp; </p>
<p>What is being represented is a global binary Merkle-patricia dac of blocks of variable length binary data. In transmission we generally do not transmit the entire hash if the entire subtree is available, but a sample of the hash, which sample is different in every connection between every pair of peers. This means that a peer cannot easily fake keeping the data. Since the entire tree represents a global consensus, the content of every block must be understandable to every peer so that every peer can determine that every block is compliant with the global rules, so every block begins with a type identifier, which normally defines a fixed length for the block and the rule for generating a hash for that block. The tree never carries opaque data, though it may well carry a hash code that identifies off block chain opaque data.</p>
<p>Whatever protocol is employed, the software can and will routinely express the representation in its global canonical binary form, and in human readable form, so that when disagreements occur in reaching consensus the human masters of the machines can agree on what is going on and what should be done about it, regardless of the representation used internally to store and transmit data..&nbsp; </p>
<p>Although the consensus will be represented by both a human and a robotic board and CEO, there shall be no automatic rule update mechanism, except according to individual human choices. Each new consensus shall represent a consensus of peers managing a majority of the shares, and to change the rules will require that the board and the CEO persuade a substantial majority of the human masters of the peers to install new software, which they may do, or may refrain from doing.&nbsp; The consensus will be continually updated according to robotic choices, but the rules the robots are following are installed by humans or not.</p>
<p>During each block period, peers will be accumulating and sharing transactions for the current and previous block periods.&nbsp; </p>
<p>They will be trying to reach consensus for earlier block periods by sharing transactions <em>or by excluding transactions that did not get widely shared.&nbsp;</em> </p>
<p>They will attempt the Paxos protocol to announce a hash reflecting a widely shared consensus for block periods before that (more than three block periods before the present), starting with the block for which no widely shared consensus has yet been announced..&nbsp; </p>
@ -184,12 +184,12 @@
<hr/>
<h2>Zookos quadrangle, human readable names</h2>
<p>The root of the identity of every client and peer will be a public key, or the hash thereof. A client can reserve a human readable name, provided that they have &lt;<code>name_reservation_deposit</code>&gt; amount of money in the account.&nbsp; </p>
<p>The client UI will not let the user accidentally spend this money and lose the name, so it will be presented separately from the rest of his balance. A peer has to have such a name, which is also the name of the special client account that controls the peer.&nbsp; </p><p>
Retaining a human readable username require a minimum balance, which may be changed from time to time with due warning. This implies global consensus on a configuration YAML file. We have a rule that a new configuration file takes effect &lt;<code>transition_time</code>&gt; days after being voted in, provided that it is never voted out in the meantime, where &lt;<code>transition_time</code>&gt; is a value set by the configuration file currently in effect.&nbsp; </p><p>
You can reserve as many names as you please, but each requires a not altogether trivial amount of money.&nbsp; </p><p>
You can sell a username to another public key.&nbsp; </p><p>
The blockchain keeps the usernames, the owning key, and keeps the host for the owning key. You have to ask the host for further details, like contacting the actual owner of the user name.&nbsp; Host links you to the wallet, wallet links you to a small graphic, a 140 character text, optionaly a larger text, and a web page.&nbsp; The guy who controls the web page will be able to create a websocket connection to his wallet that logs you in, and sends messages to both wallets.&nbsp; </p><p>
@ -207,9 +207,9 @@ It is useless to attempt immutable records of html pages, since they can, and do
To get a logged on browser to server connection between two blockchain identities, your wallet client will talk to the other guys wallet client, which will talk to his webserver, and your wallet will launch the browser with a regular https connection with logon cookie representing that a browser obeying a wallet with one blockchain key is logged on to a webserver that obeys a wallet with the other blockchain key.</p><p>
When the time comes to do a transation, the wallet clients will exchange encrypted non html messages, in response to websocket messages by browser and webserver, and the payment will be linked to one such signed message, held off blockchain by both parties in their wallets. Logging on will always be done in the blockchain client, not the browser and the keystroke that actually agrees to make the payment will be done in the blockchain client, not the browser.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -1,9 +1,9 @@
---
description: >-
“A Cypherpunks Manifesto” was written by Eric Hughes and published on March 9, 1993.
“A Cypherpunks Manifesto” was written by Eric Hughes and published on March 9, 1993.
robots: 'index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1'
title: >-
Eric Hughes: A Cypherpunks Manifesto
Eric Hughes: A Cypherpunks Manifesto
viewport: 'width=device-width, initial-scale=1.0'
---
**Privacy is necessary for an open society in the electronic age. Privacy is not secrecy. A private matter is something one doesnt want the whole world to know, but a secret matter is something one doesnt want anybody to know. Privacy is the power to selectively reveal oneself to the world.**

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -12,15 +12,15 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Crypto currency</title>
<title>Crypto currency</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Delegated proof of stake</h1>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Delegated proof of stake</h1>
<h2>The consensus problem</h2>
In blockchains we call it <em>the consensus problem</em>. In the financial cryptography world, its <em>the double spend problem</em>, and in databases, <em>atomicity</em>. Which is to say this is a fundamental problem in all of computing science, not just blockchain.<p>
But with crypto currency, and replacements for the corporate form, we want to solve this problem in ways that prevent someone powerful from capturing the system.</p><p>

View File

@ -11,7 +11,7 @@
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Development Plan</title> </head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
@ -35,7 +35,7 @@ Then we create one where the records are Zooko identities, and the petnames are,
Then we create one where if you query with the private secret, you get the proof for public key. (This has a separate database table linking keys to Zooko identities, implemented in SQLite.</p><p>
And one where, for a public key, you get Merkle proof about what network address will serve that public key, or what public key or authoritative petname will serve the network address for that public key.</p><p>
Then a simple command line single threaded single client tcp time server, which we test using Putty in raw mode, and a simple command line client to test it, where the client outputs the result then shuts down the connection immediately.</p><p>
Then, a simple command line multi client tcp time server, and a simple command line client to test it, where the clients shuts down the connection when the user hits carriage return.</p><p>
@ -43,7 +43,7 @@ Then, a simple command line multi client tcp time server, and a simple command l
And then, implement the Merkle-patricia stuff, only with queries over the network between one keyed identity and another.</p><p>
And then, implement the same thing, only with key chains over the network, and the capacity to look up rapidly changing keys from an unchanging authority.</p><p>
Then, a client that runs a unit test when executed with the command line argument <code>test</code>, and displays all test results <code>test v</code>, only fail results <code>test a</code>, or halts awaiting user input at each fail <code>test</code>. The unit test, if run with an output file argument, outputs a file containing all failed test results.</p><p>
Then, finally, transactions in a block chain, where each transaction threads to the previous transactions. And then we have a cash and name system but not yet a decentralized distributed name system.</p><p>

View File

@ -1,5 +1,4 @@
---
lang: en
title: Install Dovecot on Debian 10
---
# Purpose
@ -129,11 +128,11 @@ Delete the old `service auth` definition, and replace it with:
```bash
# Postfix smtp-auth
service auth {
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
```
@ -160,28 +159,28 @@ Add the line `auto = subscribe` to the special folders entries:
```default
mailbox Trash {
`auto = subscribe
special_use = \Trash
`auto = subscribe
special_use = \Trash
}
mailbox Junk {
`auto = subscribe
special_use = \Junk
`auto = subscribe
special_use = \Junk
}
mailbox Drafts {
`auto = subscribe
special_use = \Drafts
special_use = \Drafts
}
mailbox Trash {
`auto = subscribe
special_use = \Trash
`auto = subscribe
special_use = \Trash
}
mailbox Sent {
`auto = subscribe
special_use = \Sent
`auto = subscribe
special_use = \Sent
}
```
@ -227,7 +226,7 @@ You did set ufw to default deny incoming, so that IMAP and POP3 are blocked.
As before:
```bash
cat /var/log/mail.log | grep -E '(warning|error|fatal|panic)'
cat /var/log/mail.log | grep -E '(warning|error|fatal|panic)'
```
# Next steps

View File

@ -94,4 +94,3 @@ the precise type is irrelevant noise and a useless distraction.  You
generally want to know what is being done, not how it is being done.
Further, if you explicitly specify how it is being done, you are likely to
get it wrong, resulting in mysterious and disastrous type conversions.

View File

@ -77,6 +77,6 @@ proposed transaction, and then you speak the magic passphrase for that
transaction.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -12,16 +12,16 @@
<link rel="shortcut icon" href="../rho.ico">
<title>Furtive Fork Attack</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Furtive Fork Attack</h1>
<p style="background-color: #ccffcc; font-size: 80%;">These documents are
licensed under the <a href="http://creativecommons.org/licenses/by-sa/3.0/" rel="license">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>
</html>

View File

@ -24,7 +24,7 @@ can't see.
NTP is really great, I love it, so cool. It can set my computer's clock with a
precision measured in milliseconds. Very impressive it can do this just by
applying algorithms to hardware that is already present.
applying algorithms to hardware that is already present.
If one wants better, the next option is to get time from GPS. According to
gps.gov a specialized receiver, at a fixed location, can know the time
@ -69,7 +69,7 @@ changed.
Is there "entropy" in that system clock? Some, but only some. The PLL will
have some jitter, the precision of the lower frequency input clock will have
iffy precision and be subject to drift.
iffy precision and be subject to drift.
Is there "unguessability" in that system clock? Plenty! At least to any
observer at any distance (i.e., outside the computer).

View File

@ -335,5 +335,5 @@ client threads and no prospect of getting new ones. 
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -337,6 +337,6 @@ client threads and no prospect of getting new ones.&nbsp;
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -12,45 +12,45 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Hello World</title>
<title>Hello World</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Hello World</h1>
<p>In this day and age, a program that lives only on one machine, and a program without a gui (possibly a gui on another machine a thousand kilometers away) really is not much of a program</p>
<p>So, the minimum hello world program has to request the indentifier of another machine, creat a connection to that machine, and then display the response that machine.</p>
<p>And the minimum hello world program should preserve state from one invocation to the next, so should have an SQLite database.</p>
<p>And we also have to support unit test, which requires a conventional procedural single thread with conventional unix style output stream.</p>
<p>So the minimum hello world program is going to have a boost.asio multi thread event handler for IO events, io_service and boost::thread_group, and an event oriented GUI thread, which may well be running under boost::thread group. To communicate with the GUI, we use <code>std::packaged_task, std::promise,</code> and <code>std::future.</code> The GUI OnIdle event checks if the packaged task is ready for it to do, and executes it. Any thread that wants to talk to the GUI waits for any existing <code>std::packaged_task</code> waiting on OnIdle to be completed, sets a new packaged task, and awaits the future to be returned, the future typically being a unique smart pointer containing a return struct, but may be merely an error code.</p>
<p>Create an io_service:<pre>boost::asio::io_service io_service;
// This object must outlive the threads that are
// attached to it by the run command, thus the threads
// This object must outlive the threads that are
// attached to it by the run command, thus the threads
// must be created in an inner scope</pre>
Create a work object work to stop its run() function from exiting if it has nothing else to do:<pre>
{ boost::asio::io_service::work work(io_service);
// The work object prevents io_context from
// returning from the run command when it has
Create a work object work to stop its run() function from exiting if it has nothing else to do:<pre>
{ boost::asio::io_service::work work(io_service);
// The work object prevents io_context from
// returning from the run command when it has
// consumed all available tasks.
// Start some worker threads:
// Probably should use C++14 threads,
// Start some worker threads:
// Probably should use C++14 threads,
// but the example code uses boost threads.
boost::thread_group threads;
for (std::size_t i = 0; i &lt; my_thread_count; ++i)
threads.create_thread(boost::bind(&amp;asio::io_service::run, &amp;io_service));
// This executes the run() command in each thread.
// Boost::bind is a currying function, that converts a function
// Boost::bind is a currying function, that converts a function
// with arguments into a function with no arguments.
// A half assed substitute for lambda
// Post the tasks to the io_service so they can be
// Post the tasks to the io_service so they can be
// performed by the worker threads:
io_service.post(boost::bind(an_expensive_calculation, 42));
@ -62,66 +62,66 @@
// );
io_service.post(boost::bind(a_long_running_task, 123));
// Finally, before the program exits shut down the
// Finally, before the program exits shut down the
// io_service and wait for all threads to exit:
... Do unit test, wait for the ui thread to join
io_service.stop();
io_service.stop();
// Stop will prevent new tasks from being run, even if they have been queued.
// If, on the other hand, we just let the work object go out of scope,
// If, on the other hand, we just let the work object go out of scope,
// and stop new tasks from being queued,
// the threads will return
// from the io_service object when it has run out of work
// the threads will return
// from the io_service object when it has run out of work
}
threads.join_all();
//Thread group is just a vector of threads Maybe should just use a vector,
//Thread group is just a vector of threads Maybe should just use a vector,
//so that I can address the gui thread separately.
// Now we can allow io_service to go out of scope.</pre>
So the starting thread of the program will run GUI unit tests through interthread communication to the OnIdle event of the GUI.
<p>Alternatively we could have a separate daemo process that communicates with the GUI through SQLite, which has the advantage that someone else has already written a wxSQLite wrapper.</p>
<p>So we are going to need a program with a UI, and a program that is a <a href="https://stackoverflow.com/questions/12810717/boost-asio-as-a-template-for-a-daemon#12825992">true daemon</a>. The daemon program will be network asynch, and the UI program will be UI asynch, but the UI program will get and send network jobs that are complete in very short time with the daemon, its networking operations will be procedural. This is acceptable because interprocess communication is high bandwidth and highly reliable</p>
<p>We are going to need to support chat capability, to combat seizure of names by the state, and to enable transactions to be firmly linked to agreements, and to enable people to transfer money without having to deal directly with cryptographic identities. The control and health interface to the daemon should be a chat capability on IPC. Which can be configured to only accept control from a certain wallet. Ideally one would like a general chatbot capability running python, but that is a project to be done after the minimum viable product release.</p>
<p>We will eventually need a chat capability that supports forms and automation, using the existing html facilities, but under chat protocol, not under https protocol. But if we allow generic web interactions, generic web interactions will block. One solution is a browser to chat interface. Your browser can launch a chat message in your wallet, and a received chat message may contain a link. The link launches a browser window, which contains a form, which generates a chat message. End user has to click to launch the form, fill it out, click on the browser to submit, then click again to send the chat message to submit from username1 to username2. End user may configure certain links to autofire, so that a local server and local browser can deal with messages encapsulated by the chat system.</p>
<p>We dont want the SSL problem, where the same client continually negotiates new connections with the same host, resulting in many unnecessary round trips and gaping security holes. We want every connection between one identity on one machine, and another identity on another machine, to use the same shared secret for as long as both machines are up and talk to each other from time to time, so every identity on one machine talks to other identities through a single daemon on its own machine, and a single daemon on the other identitys machine. If two identities on the same machine, talk though the one daemon. Each client machine talks IPC to the daemon.</p>
<p>There might be many processes, each engaging in procedural IPC with one daemon, but the daemon is asynch, and talks internet protocol with other daemons on other systems in the outside world. Processes need to talk through the daemon, because we only want to have one system global body of data concerning network congestion, bandwidth, transient network addresses, and shared secrets. System Ann in Paris might have many conversations running with System Bob in Shanghai, but we want one body of data concerning network address, shared secrets, cryptographic identity, flow control, and bandwidth, shared by each of these many conversations.</p>
<p>We assume conversations with the local daemon are IPC, hence fast, reliable, and high bandwidth. Failure of a conversation with the local daemon is a crash and debug. Failure of conversations across the internet is normal and frequent there is no reliability layer outside your local system. Sending a request and not getting an answer is entirel normal and on the main path.</p>
<p>The one asynch daemon has the job of accumulating block chains which implement mutable Merkle-patricia dacs out of a series of immutable Merkle-patricia dacs. Sometimes part of the data from transient mutable Merkle-patricia trees is extracted and recorded in a more durable Merkle patricial trie. Every connection is a mutable Merkle-patricia dac, which is deleted (and some of the information preserved in another mutable Merkle-patricia tree) when the connection is shut down.</p>
<p>Every connection is itself a tree of connections, with flow control, bandwidth, crypto key, and network address information at the root connection, and every connection is the root of a mutable Merkle-patricia dac.</p>
<p>Each daemon is a host, so everyone runs a host on his own machine. That host might be behind a NAT, so to rendevous, would need a rendevous mediated by another host.</p>
<p>See also <a href="./generic_client_server_program.html">Generic Client Server</a> Program, and <a href="./generic_test.html">Generic Test</a>.</p>
<p>And in this day and age, we have a problem with the name system, in that the name system is subject to centralized state control, and the tcp-ssl system is screwed by the state, which is currently seizing crimethink domain names, and will eventually seize untraceable currency domain names.</p>
<p>So we not only need a decentralized system capable of generating consensus on who owns what cash, <a href="./name_system.html">we need a system capable of generating consensus on who who owns which human readable globally unique names, and the mapping between human readable names, Zooko triangle names (which correspond to encryption public keys), and network addresses</a>.</p>
<p>Both a gui and inter machine communication daemon imply asynch, an event oriented architecture, which we should probably base on boost asynch. <a href="https://www.boost.org/doc/libs/1_67_0/doc/html/boost_asio/reference/signal_set.html">Have to implement the boost signal handling for shutdown signals</a>.</p>
<p>If, however, you are communicating between different machines, then the type has to be known at run time on both machines both machines have to be working of the identical Cap'n Proto type declaration but might, however, not know until run time which particular type declared in their identical Cap'n Proto declarations is arriving down the wire.</p>
<p>When setting up a communication channel at run time, have to verify the hash of the type declaration of the objects coming down the wire at run time, so that a connection cannot be set up, except both bodies of source code have the identical source for values on the wire on this particular connection.
<p>Obviously in this day and age, a program isolated on one solitary machine is kind of useless, so, what is needed is a machine that has a host of connections, each connection being an object, and the one running thread activates each connection object when data arrives for it, and then the object requests more data, and the thread abandons the object for a while, to deal with the next object for which data has arrived. When an object issues a request that may take a while to fullfill, provides a block of code, a function literal for that event.</p>
<p>A language that has to handle Gui and a language that has to handle communication is event oriented. You continually declare handlers for events. Each handler is a particular method of a particular object, and new events of the same kind cannot be processed until the method returns. If handling the event might take a long time (disk access, user interface) the method declares new event handlers, and fires actions which will immediately or eventually lead to new events, and then immediately returns.</p>
<p>A language that has to handle Gui and a language that has to handle communication is event oriented. You continually declare handlers for events. Each handler is a particular method of a particular object, and new events of the same kind cannot be processed until the method returns. If handling the event might take a long time (disk access, user interface) the method declares new event handlers, and fires actions which will immediately or eventually lead to new events, and then immediately returns.</p>
<p>And when I say "immediately lead to new events", I mean "immediately after the method returns" the method may add a new event handler and possibly also a new event to the event queue. Steal Boost asynch, if Cap'n Proto has not already stolen something like Boost Asynch.</p>
<p>Each event is a message, and each message is a Cap'n Proto buffer, whose precise type may not necessarily be known until it actually arrives, but which has to be handled by code capable of handling that message type.</p>
<p style="background-color : #ccffcc; font-size:80%">This document is licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">CreativeCommons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -215,6 +215,6 @@ can result in changes to user records on the server.&nbsp;
</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -194,5 +194,5 @@ needed.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -1,6 +1,6 @@
---
title:
Identity
Identity
---
# Syntax and semantics of identity
@ -606,19 +606,19 @@ up to entity to the immediate left of the slash to interpret, and if it
contains spaces and suchlike, use windows command line string
representation rules, quote marks and escape codes.
rho:#4397439879483774378943798
rho:Bob#4397439879483774378943798
Bob@#4397439879483774378943798
Receivables.#4397439879483774378943798
rho:#4397439879483774378943798
rho:Bob#4397439879483774378943798
Bob@#4397439879483774378943798
Receivables.#4397439879483774378943798
fit into the Uniform Resource Identifier scheme, poorly.
#4397439879483774378943798/foo
#4397439879483774378943798/foo
fits into the catchall leftover part of the Uniform Resource Identifier
scheme.
rho:Bob@Carol.Dave#4397439879483774378943798/foo
rho:Bob@Carol.Dave#4397439879483774378943798/foo
Does not fit into it in the slightest, and I think the idea of
compatibility with the URN system is a lost cause.
@ -742,11 +742,11 @@ between his network address and his public key.
signed
: anyone can check that some data is signed by key, and such data can
be passed around in a pool, usenet style.
be passed around in a pool, usenet style.
authenticated
: You got the data directly from an entity that has the key. You know
it came from that key, but cannot prove it to anyone else.
it came from that key, but cannot prove it to anyone else.
access
: A key with authorization from another key does something.
@ -756,10 +756,10 @@ authorization
authority
: A key with authority can give other keys authorization. Every key
has unlimited authority to do whatever it wants on its own
computers, and with its own reputation. It may grant other keys
authorization to access certain services on its computers and to
perform certain acts in the name of its reputation.
has unlimited authority to do whatever it wants on its own
computers, and with its own reputation. It may grant other keys
authorization to access certain services on its computers and to
perform certain acts in the name of its reputation.
We do not want the key on the server to be the master key that owns the
server name, because keys on servers are too easily stolen. So we want
@ -793,15 +793,15 @@ So, we need a collection of data akin to
`/etc/hosts`
: public data, the broad consensus, agreed data known to the wider
community.
community.
`~/.ssh/known_hosts`
: privately known data about the community that cannot be widely
shared because others might not trust it, and you might not trust
others. You may want to share this with those you trust, and get it
from those you trust, but your set of people that you trust is
unlikely to agree with someone elses and needs to be curated by a
human.
shared because others might not trust it, and you might not trust
others. You may want to share this with those you trust, and get it
from those you trust, but your set of people that you trust is
unlikely to agree with someone elses and needs to be curated by a
human.
`~/.ssh/config`
: And there is data you want to keep secret.
@ -1063,7 +1063,7 @@ behalf of this master key, sign as the entity at the root of the chain.
One typically signs data that will be delivered to the recipient
through an untrusted intermediary, as for example, downloading a
rhocoin wallet, or a peer making an assertion about the most recent
root or block of the blockchain.
root or block of the blockchain.
0. Bit indicates that this key may be used to make an offer in the
identity of the entity at the root of the chain.

View File

@ -14,9 +14,9 @@ p.center {text-align:center;}
<h1>IFF</h1>
<h2>Identification Friend or Foe</h2>
<p><a href="./index.html"> To Home page</a> </p><p>
The objective is to provide Identity Friend or Foe information, while leaking as little individual identity information as possible to identify oneself as a member of a group, for example employees entitled to access certain information, or guests entitled to access certain services, without flagging their specific individual identity far and wide.</p><p>
This outlines a scheme capable of being used for world domination and replacing everyones car keys, house keys, employee identification badge, and military dog tag, but before attempting that, we are going to start small by moving the corporate form to the blockchain, and putting your employee id in your crypto currency wallet, using it to escape dominion, rather than enforce it, a multiplicity of separate groups, rather than one enormous group. Then we conquer the world and enslave all the women, but our more modest initial target market is to support freedom of association, to put the corporate form on the blockchain.</p><p>
Any id scheme has potentially totalitarian uses, and I can easily imagine a whole lot of oppressive ways for using this scheme, but a scheme that links every interaction to one unique identity rooted in state documents, which is what we have now, is a lot more totalitarian than this proposal, because this proposal only shows you are fellow member of the group, or a person authorized to use a certain facility. Unlike the current system, it does not show which particular member of the group you are to all.
@ -36,11 +36,11 @@ If a member of the larger group misbehaves to another member of the larger group
This scheme inherently identifies you as a fellow member of a subgroup to another member of the same subgroup, but only identifies you as a member of the larger group to a member of the larger group who is not a member of your subgroup. We are going to start small (Hail fellow thought crime thinker, hail fellow black marketeer, hail fellow mafia member, hail fellow plotter, hail fellow employee of grey market block chain based business) but when we scale to embracing all of society someone is likely to be a member of multiple groups (reservist, employee, club member, family member) and will respond with the group of the pinger, if he is a member of that group as well. Your employee iff will unlock the door of your employers office building, and your family iff will unlock the door of your dads house.</p>
<h3>Fine grained control</h3><p>
The signature on the individual members frequently changing public key may contain additional information, above and beyond "friend", and he may receive several such signatures, each of which contains a different item of information. However, a basic ping should only reveal the information necessary.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 302 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@ -129,7 +129,7 @@
the server needs to model client guesses about client state, an
estimate about an estimate.</p>
<p>Server ids contain a link to a list of protocols they accept, signed by an authority responsible for those protocols. Protocols are identified by stream of bytes, in which zero or bytes have their high bit set, the stream being terminated by a byte with the high bit cleared. Lists of protocols are maintained by the system responsible for the list of server ids, with new lists only being added by a human decision at the distinguished proposer.</p>
<p>These lists only identify protocols capable of setting up a connection. When a connection is made, the client offers a list of subprotocols that it wants, and the server accepts the first one in the list that it recognizes and wants to handle. We will have no central authority for such subprotocol lists. Anyone can roll there own, and in the event that there come to be a lot of them, the implementer just chooses an identifier long enough and random enough that the risk of collision is small.</p>
<p style="background-color : #ccffcc;
font-size:80%">These documents are licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative

View File

@ -25,41 +25,41 @@ used to write any Interlockedxxx operation.  </p>
<pre>
long InterlockedXxx(
__inout long volatile *Target,
, whatever parameters we need for Xxx
)
{
long prevValue, prevCopy;
__inout long volatile *Target,
, whatever parameters we need for Xxx
)
{
long prevValue, prevCopy;
prevValue = *Target;
prevValue = *Target;
do {
if Xxx operations is illegal on prevValue, return with error code
do {
if Xxx operations is illegal on prevValue, return with error code
prevCopy = prevValue;
prevCopy = prevValue;
//
// prevValue will be the value that used to be Target if the exchange was made
// or its current value if the exchange was not made.
//
prevValue = InterlockedCompareExchange(Target, Xxx operation on prevCopy, prevValue);
//
// prevValue will be the value that used to be Target if the exchange was made
// or its current value if the exchange was not made.
//
prevValue = InterlockedCompareExchange(Target, Xxx operation on prevCopy, prevValue);
//
// If prevCopy == prevValue, then no one updated Target in between the deref at the top
// and the InterlockecCompareExchange afterward and we are done
//
} while (prevCopy != prevValue);
//
// If prevCopy == prevValue, then no one updated Target in between the deref at the top
// and the InterlockecCompareExchange afterward and we are done
//
} while (prevCopy != prevValue);
//
// [value] can be anything you want, but it is typically either
// a) The new value stored in Target. This is the type of return value that
// InterlockedIncrement returns
// or
// b) The new value is the previous value that was in Target. This si the
// type of return value that InterlockedOr or InterlockedExchange return
//
return [value];
}</pre><p>
//
// [value] can be anything you want, but it is typically either
// a) The new value stored in Target. This is the type of return value that
// InterlockedIncrement returns
// or
// b) The new value is the previous value that was in Target. This si the
// type of return value that InterlockedOr or InterlockedExchange return
//
return [value];
}</pre><p>
Structures larger than a long can be handled by
using InterlockedCompareExchange to add to a
@ -70,6 +70,5 @@ it for you.  </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -280,11 +280,11 @@ than polling it. And that second thread cannot use wxSockets.
[recently updated version on github]:https://github.com/wxWidgets/wxWidgets/tree/master/samples/sockets
Programming sockets and networking in C is a mess. The [much praised guide
to sockets](https://beej.us/guide/bgnet/html/single/bgnet.html) goes on for
pages and pages describing a “simple” example client server. Trouble is that
C, and old type Cish C++ exposes all the dangly bits. The [QT client server
example](https://stackoverflow.com/questions/5773390/c-network-programming),
Programming sockets and networking in C is a mess. The [much praised guide
to sockets](https://beej.us/guide/bgnet/html/single/bgnet.html) goes on for
pages and pages describing a “simple” example client server. Trouble is that
C, and old type Cish C++ exposes all the dangly bits. The [QT client server
example](https://stackoverflow.com/questions/5773390/c-network-programming),
on the other hand, is elegant, short, and self explanatory.
The code project has [example code written in C++](https://www.codeproject.com/Articles/13071/Programming-Windows-TCP-Sockets-in-C-for-the-Begin), but it is still mighty intimidating compared to the QT client server example. I have yet to look at the wxWidgets client server examples but looking for wxWidgets networking code has me worried that it is a casual afterthought, not adequately supported or adequately used.
@ -398,15 +398,15 @@ Which leads me to digress how we are going to handle protocol updates:
1. Distribute software capable of handling the update.
1. A proposed protocol update transaction is placed on the blockchain.
1. Peers indicate capability to handle the protocol update. Or ignore it,
or indicate that they cannot. If a significant number of peers
indicate capability, peers that lack capability push their owners for
an update.
or indicate that they cannot. If a significant number of peers
indicate capability, peers that lack capability push their owners for
an update.
1. A proposal to start emitting data that can only handled by more
recent peers is placed on the blockchain.
recent peers is placed on the blockchain.
1. If a significant number of peers vote yes, older peers push more
vigorously for an update.
vigorously for an update.
1. If a substantial supermajority votes yes by a date specified in the
proposal, then they start emitting data in the new format on a date
proposal, then they start emitting data in the new format on a date
shortly afterwards. If no supermajority by the due date, the
proposal is dead.
@ -546,12 +546,12 @@ nothing message passing between threads.
------------------------------------------------------------------------
[Facebook Folly library]provides many tools, with such documentation as
exists amounting to “read the f\*\*\*\*\*g header files”. They are reputed
to have the highest efficiency queuing for interthread communication, and it
is plausible that they do, because facebook views efficiency as critical.
[Facebook Folly library]provides many tools, with such documentation as
exists amounting to “read the f\*\*\*\*\*g header files”. They are reputed
to have the highest efficiency queuing for interthread communication, and it
is plausible that they do, because facebook views efficiency as critical.
Their [queuing header file]
(https://github.com/facebook/folly/blob/master/folly/MPMCQueue.h) gives us
(https://github.com/facebook/folly/blob/master/folly/MPMCQueue.h) gives us
`MPMCQueue`.
[Facebook Folly library]:https://github.com/facebook/folly/blob/master/folly/
@ -712,11 +712,11 @@ wxWidget wraps WSASelect, which is the behavior we need.
Microsoft has written the asynch sockets you need, and wxWidgets has wrapped
them in an OS independent fashion.
WSAAsyncSelect
WSAAsyncSelect
WSAEventSelect
WSAEventSelect
select
select
Using wxSockets commits us to having a single thread managing everything. To
get around the power limit inherent in that, have multiple peers under

View File

@ -18,4 +18,3 @@ bool app::OnInit()
}
wxIMPLEMENT_APP(app);

View File

@ -1,6 +1,6 @@
---
title:
C++ Automatic Memory Management
C++ Automatic Memory Management
---
# Memory Safety
Modern, mostly memory safe C++, is enforced by:\
@ -14,7 +14,7 @@ Modern, mostly memory safe C++, is enforced by:\
complains about, in practice usually all of them, though I suppose that as
the project gets bigger, some will slip through.
static_assert(__cplusplus >= 201703, "C version of out of date");
static_assert(__cplusplus >= 201703, "C version of out of date");
The gsl adds span for pointer arithmetic, where the
size of the array pointed to is kept with the pointer for safe iteration and
@ -29,75 +29,75 @@ std::make_unique, std::make_shared create pointers to memory managed
objects. (But single objects, not an array, use spans for pointer
arithmetic)
auto sp = std::make_shared<int>(42);
std::weak_ptr<T> wp{sp};
auto sp = std::make_shared<int>(42);
std::weak_ptr<T> wp{sp};
# Array sizing and allocation
/* This code creates a bunch of "brown dog" strings on the heap to test automatic memory management. */
char ca[]{ "red dog" }; //Automatic array sizing
std::array<char,8> arr{"red dog"}; //Requires #include <array>
/* No automatic array sizing, going to have to count your initializer list. */
/* The pointer of the underlying array is referenced by &arr[0] but arr is not the underlying array, nor a pointer to it. */
/* [0] invokes operator[], and operator[] is the member function that accesses the underlying array.*/
/* The size of the underlying array is referenced by arr.size();*/
/* size known at compile time, array can be returned from a function getting the benefits of stack allocation.*/
// can be passed around like POD
char *p = new char[10]{ "brown dog" }; //No automatic array
// sizing for new
std::unique_ptr<char[]>puc{ p }; // Now you do not have
// to remember to delete p
auto puc2 = std::move(puc); /* No copy constructor. Pass by reference, or pass a view, such as a span.*/
std::unique_ptr<char> puc3{ new char[10]{ "brown dog" } };
/* Array size unknown at compile or run time, needs a span, and you have to manually count the initialization list. */
/* Compiler guards against overflow, but does not default to the correct size.*/
/* You can just guess a way too small size, and the compiler in its error message will tell you what the size should be. */
auto pu = std::make_unique<char[]>(10); // uninitialized,
// needs procedural initialization.
/* This code creates a bunch of "brown dog" strings on the heap to test automatic memory management. */
char ca[]{ "red dog" }; //Automatic array sizing
std::array<char,8> arr{"red dog"}; //Requires #include <array>
/* No automatic array sizing, going to have to count your initializer list. */
/* The pointer of the underlying array is referenced by &arr[0] but arr is not the underlying array, nor a pointer to it. */
/* [0] invokes operator[], and operator[] is the member function that accesses the underlying array.*/
/* The size of the underlying array is referenced by arr.size();*/
/* size known at compile time, array can be returned from a function getting the benefits of stack allocation.*/
// can be passed around like POD
char *p = new char[10]{ "brown dog" }; //No automatic array
// sizing for new
std::unique_ptr<char[]>puc{ p }; // Now you do not have
// to remember to delete p
auto puc2 = std::move(puc); /* No copy constructor. Pass by reference, or pass a view, such as a span.*/
std::unique_ptr<char> puc3{ new char[10]{ "brown dog" } };
/* Array size unknown at compile or run time, needs a span, and you have to manually count the initialization list. */
/* Compiler guards against overflow, but does not default to the correct size.*/
/* You can just guess a way too small size, and the compiler in its error message will tell you what the size should be. */
auto pu = std::make_unique<char[]>(10); // uninitialized,
// needs procedural initialization.
/* span can be trivially created from a compile time declared array, an std:array or from a run time std:: vector, but then these things already have the characteristics of a span, and they own their own storage. */
/* You would use a span to point into an array, for example a large blob containing smaller blobs.*/
/* span can be trivially created from a compile time declared array, an std:array or from a run time std:: vector, but then these things already have the characteristics of a span, and they own their own storage. */
/* You would use a span to point into an array, for example a large blob containing smaller blobs.*/
// Placement New:
char *buf = new char[1000]; //pre-allocated buffer
char *p = buf;
MyObject *pMyObject = new (p) MyObject();
p += (sizeof(MyObject+7)/8)*8
/* Problem is that you will have to explictly call the destructor on each object before freeing your buffer. */
/* If your objects are POD plus code for operating on POD, you dont have to worry about destructors.*/
// A POD object cannot do run time polymorphism.
/* The pointer referencing it has to be of the correct compile time type, and it has to explicitly have the default constructor when constructed with no arguments.*/
/* If, however, you are building a tree in the pre-allocated buffer, no sweat. */
/* You just destruct the root of the tree, and it recursively destructs all its children. */
/* If you want an arbitrary graph, just make sure you have owning and non owning pointers, and the owning pointers form a tree. */
/* Anything you can do with run time polymorphism, you can likely do with a type flag.*/
// Placement New:
char *buf = new char[1000]; //pre-allocated buffer
char *p = buf;
MyObject *pMyObject = new (p) MyObject();
p += (sizeof(MyObject+7)/8)*8
/* Problem is that you will have to explictly call the destructor on each object before freeing your buffer. */
/* If your objects are POD plus code for operating on POD, you dont have to worry about destructors.*/
// A POD object cannot do run time polymorphism.
/* The pointer referencing it has to be of the correct compile time type, and it has to explicitly have the default constructor when constructed with no arguments.*/
/* If, however, you are building a tree in the pre-allocated buffer, no sweat. */
/* You just destruct the root of the tree, and it recursively destructs all its children. */
/* If you want an arbitrary graph, just make sure you have owning and non owning pointers, and the owning pointers form a tree. */
/* Anything you can do with run time polymorphism, you can likely do with a type flag.*/
static_assert ( std::is_pod<MyType>() , "MyType for some reason is not POD" );
class MyClass
{
public:
MyClass()=default; // Otherwise unlikely to be POD
MyClass& operator=(const MyClass&) = default; // default assignment Not actually needed, but just a reminder.
};
static_assert ( std::is_pod<MyType>() , "MyType for some reason is not POD" );
class MyClass
{
public:
MyClass()=default; // Otherwise unlikely to be POD
MyClass& operator=(const MyClass&) = default; // default assignment Not actually needed, but just a reminder.
};
### alignment
### alignment
```c++
// every object of type struct_float will be aligned to alignof(float) boundary
```c++
// every object of type struct_float will be aligned to alignof(float) boundary
// (usually 4)
struct alignas(float) struct_float {
// your definition here
// your definition here
};
// every object of type sse_t will be aligned to 256-byte boundary
struct alignas(256) sse_t
{
float sse_data[4];
float sse_data[4];
};
// the array "cacheline" will be aligned to 128-byte boundary
alignas(128) char cacheline[128];
```
```
# Construction, assignment, and destruction
@ -119,25 +119,25 @@ deleted.
Copy constructors
A(const A& a)
A(const A& a)
Copy assignment
A& operator=(const A other)
A& operator=(const A other)
Move constructors
class_name ( class_name && other)
A(A&& o)
D(D&&) = default;
class_name ( class_name && other)
A(A&& o)
D(D&&) = default;
Move assignment operator
V& operator=(V&& other)
V& operator=(V&& other)
Move constructors
class_name ( class_name && )
class_name ( class_name && )
## rvalue references
@ -161,21 +161,21 @@ forwarding the resources.
where `std::forward` is defined as follows:
template< class T > struct remove_reference {
typedef T type;
};
template< class T > struct remove_reference<T&> {
typedef T type;
};
template< class T > struct remove_reference<T&&> {
typedef T type;
};
template< class T > struct remove_reference {
typedef T type;
};
template< class T > struct remove_reference<T&> {
typedef T type;
};
template< class T > struct remove_reference<T&&> {
typedef T type;
};
template<class S>
S&& forward(typename std::remove_reference<S>::type& a) noexcept
{
return static_cast<S&&>(a);
}
template<class S>
S&& forward(typename std::remove_reference<S>::type& a) noexcept
{
return static_cast<S&&>(a);
}
`std::move(t)` and `std::forward(t)` don't actually perform any action
in themselves, rather they cause the code referencing `t` to use the intended
@ -192,12 +192,12 @@ anyway.
When you declare your own constructors, copiers, movers, and deleters,
you should generally mark them noexcept.
struct foo {
foo() noexcept {}
foo( const foo & ) noexcept { }
foo( foo && ) noexcept { }
~foo() {}
};
struct foo {
foo() noexcept {}
foo( const foo & ) noexcept { }
foo( foo && ) noexcept { }
~foo() {}
};
Destructors are noexcept by default. If a destructor throws an exception as
a result of a destruction caused by an exception, the result is undefined,
@ -207,8 +207,8 @@ ways that are unlikely to be satisfactory.
If you need to define a copy constructor, probably also need to define
an assignment operator.
t2 = t1; /* calls assignment operator, same as "t2.operator=(t1);" */
Test t3 = t1; /* calls copy constructor, same as "Test t3(t1);" */
t2 = t1; /* calls assignment operator, same as "t2.operator=(t1);" */
Test t3 = t1; /* calls copy constructor, same as "Test t3(t1);" */
## casts
@ -219,12 +219,12 @@ in the source class instead of the destination class, hence most useful
when you are converting to a generic C type, or to the type of an
external library that you do not want to change.
struct X {
int y;
operator int(){ return y; }
operator const int&(){ return y; } /* C habits would lead you to incorrectly expect "return &y;", which is what is implied under the hood. */
operator int*(){ return &y; } // Hood is opened.
};
struct X {
int y;
operator int(){ return y; }
operator const int&(){ return y; } /* C habits would lead you to incorrectly expect "return &y;", which is what is implied under the hood. */
operator int*(){ return &y; } // Hood is opened.
};
Mpir, the Visual Studio skew of GMP infinite precision library, has some
useful and ingenious template code for converting C type functions of
@ -257,20 +257,20 @@ allocation and redundant copy.
# Template specialization
namespace N {
template<class T> class Y { /*...*/ }; // primary template
template<> class Y<double> ; // forward declare specialization for double
}
template<>
class N::Y<double> { /*...*/ }; // OK: specialization in same namespace
namespace N {
template<class T> class Y { /*...*/ }; // primary template
template<> class Y<double> ; // forward declare specialization for double
}
template<>
class N::Y<double> { /*...*/ }; // OK: specialization in same namespace
is used when you have sophisticated template code, because you have to
use recursion for looping as the Mpir system uses it to evaluate an
arbitrarily complex recursive expression but I think my rather crude
implementation will not be nearly so clever.
extern template int fun(int);
/*prevents redundant instantiation of fun in this compilation unit and thus renders the code for fun unnecessary in this compilation unit.*/
extern template int fun(int);
/*prevents redundant instantiation of fun in this compilation unit and thus renders the code for fun unnecessary in this compilation unit.*/
# Template traits, introspection
@ -308,34 +308,34 @@ implements that functionality entirely up to the derived class.
Interface classes are often named beginning with an I. Heres a sample
interface class:.
class IErrorLog
{
public:
virtual bool openLog(const char *filename) = 0;
virtual bool closeLog() = 0;
class IErrorLog
{
public:
virtual bool openLog(const char *filename) = 0;
virtual bool closeLog() = 0;
virtual bool writeError(const char *errorMessage) = 0;
virtual bool writeError(const char *errorMessage) = 0;
virtual ~IErrorLog() {} // make a virtual destructor in case we delete an IErrorLog pointer, so the proper derived destructor is called
// Notice that the virtual destructor is declared to be trivial, but not declared =0;
};
virtual ~IErrorLog() {} // make a virtual destructor in case we delete an IErrorLog pointer, so the proper derived destructor is called
// Notice that the virtual destructor is declared to be trivial, but not declared =0;
};
[Override
specifier](https://en.cppreference.com/w/cpp/language/override)
struct A
{
virtual void foo();
void bar();
};
struct A
{
virtual void foo();
void bar();
};
struct B : A
{
void foo() const override; // Error: B::foo does not override A::foo
// (signature mismatch)
void foo() override; // OK: B::foo overrides A::foo
void bar() override; // Error: A::bar is not virtual
};
struct B : A
{
void foo() const override; // Error: B::foo does not override A::foo
// (signature mismatch)
void foo() override; // OK: B::foo overrides A::foo
void bar() override; // Error: A::bar is not virtual
};
Similarly [Final
specifier](https://en.cppreference.com/w/cpp/language/final)
@ -344,11 +344,11 @@ specifier](https://en.cppreference.com/w/cpp/language/final)
storage](http://www.cplusplus.com/reference/type_traits/aligned_storage/)for
use with placement new
void* p = aligned_alloc(sizeof(NotMyClass));
MyClass* pmc = new (p) MyClass; //Placement new.
// ...
pmc->~MyClass(); //Explicit call to destructor.
aligned_free(p);.
void* p = aligned_alloc(sizeof(NotMyClass));
MyClass* pmc = new (p) MyClass; //Placement new.
// ...
pmc->~MyClass(); //Explicit call to destructor.
aligned_free(p);.
# GSL: Guideline Support Library
@ -357,10 +357,10 @@ are suggested for use by the C++ Core Guidelines maintained by the
Standard C++ Foundation. This repo contains [Microsofts implementation
of GSL](https://github.com/Microsoft/GSL).
git clone https://github.com/Microsoft/GSL.git
cd gsl
git tag
git checkout tags/v2.0.0
git clone https://github.com/Microsoft/GSL.git
cd gsl
git tag
git checkout tags/v2.0.0
Which implementation mostly works on gcc/Linux, but is canonical on
Visual Studio.
@ -393,37 +393,37 @@ makes the relationship between the templated base class or classes and
the derived class cyclic, so that the derived class tends to function as
real base class. Useful for mixin classes.
template <typename T> class Mixin1{
public:
// ...
void doSomething() //using the other mixin classes and the derived class T
{
T& derived = static_cast<T&>(*this);
// use derived...
}
private:
mixin1(){}; // prevents the class from being used outside the mix)
friend T;
};
template <typename T> class Mixin1{
public:
// ...
void doSomething() //using the other mixin classes and the derived class T
{
T& derived = static_cast<T&>(*this);
// use derived...
}
private:
mixin1(){}; // prevents the class from being used outside the mix)
friend T;
};
template <typename T> class Mixin2{
{
public:
// ...
void doSomethingElse()
{
T& derived = static_cast<T&>(*this);
// use derived...
}
private:
Mixin2(){};
friend T;
};
template <typename T> class Mixin2{
{
public:
// ...
void doSomethingElse()
{
T& derived = static_cast<T&>(*this);
// use derived...
}
private:
Mixin2(){};
friend T;
};
class composite: public mixin1<composite>, public mixin2<composite>{
composite( int x, char * y): mixin1(x), mixin2(y[0]) { ...}
composite():composite(7,"a" ){ ...}
}
class composite: public mixin1<composite>, public mixin2<composite>{
composite( int x, char * y): mixin1(x), mixin2(y[0]) { ...}
composite():composite(7,"a" ){ ...}
}
# Aggregate initialization
@ -432,12 +432,12 @@ constructor is implied default.
A class can be explicitly defined to take aggregate initialization
Class T{
T(std::initializer_list<const unsigned char> in){
for (auto i{in.begin); i<in.end(); i++){
do stuff with i
}
}
Class T{
T(std::initializer_list<const unsigned char> in){
for (auto i{in.begin); i<in.end(); i++){
do stuff with i
}
}
but that does not make it of aggregate type. Aggregate type has *no*
constructors except default and deleted constructors
@ -446,7 +446,7 @@ constructors except default and deleted constructors
To construct a lambda in the heap:
auto p = new auto([a,b,c](){})
auto p = new auto([a,b,c](){})
Objects inside the lambda are constructed in the heap.
@ -454,22 +454,22 @@ similarly placement `new`, and `unique_ptr`.
To template a function that takes a lambda argument:
template <typename F>
void myFunction(F&& lambda){
//some things
template <typename F>
void myFunction(F&& lambda){
//some things
You can put a lambda in a class using decltype,and pass it around for
continuations, though you would probably need to template the class:
template<class T>class foo {
public:
T func;
foo(T in) :func{ in } {}
auto test(int x) { return func(x); }
};
....
auto bar = [](int x)->int {return x + 1; };
foo<(bar)>foobar(bar);
template<class T>class foo {
public:
T func;
foo(T in) :func{ in } {}
auto test(int x) { return func(x); }
};
....
auto bar = [](int x)->int {return x + 1; };
foo<(bar)>foobar(bar);
But we had to introduce a name, bar, so that decltype would have
something to work with, which lambdas are intended to avoid. If we are
@ -480,11 +480,11 @@ is very possibly pod.
If we are sticking a lambda around to be called later, might copy it by
value into a templated class, or might put it on the heap.
auto bar = []() {return 5;};
auto bar = []() {return 5;};
You can give it to a std::function:
auto func_bar = std::function<int()>(bar);
auto func_bar = std::function<int()>(bar);
In this case, it will get a copy of the value of bar. If bar had
captured anything by value, there would be two copies of those values on
@ -495,9 +495,9 @@ bar, as per the rules of cleaning up stack variables.
You could just as easily allocate one on the heap:
auto bar_ptr = std::make_unique(bar);
auto bar_ptr = std::make_unique(bar);
std::function <int(int)> increm{[](int arg{return arg+1;}}
std::function <int(int)> increm{[](int arg{return arg+1;}}
presumably does this behind the scenes
@ -549,43 +549,43 @@ which can result in messy reallocations.
One way is to drop back into old style C, and tell C++ not to fuck
around.
struct Packet
{
unsigned int bytelength;
unsigned int data[];
struct Packet
{
unsigned int bytelength;
unsigned int data[];
private:
// Will cause compiler error if you misuse this struct
void Packet(const Packet&);
void operator=(const Packet&);
};
Packet* CreatePacket(unsigned int length)
{
Packet *output = (Packet*) malloc((length+1)*sizeof(Packet));
output->bytelength = length;
return output;
}
private:
// Will cause compiler error if you misuse this struct
void Packet(const Packet&);
void operator=(const Packet&);
};
Packet* CreatePacket(unsigned int length)
{
Packet *output = (Packet*) malloc((length+1)*sizeof(Packet));
output->bytelength = length;
return output;
}
Another solution is to work around C++s inability to handle variable
sized objects by fixing your hash function to handle disconnected data.
# for_each
template<class InputIterator, class Function>
Function for_each(InputIterator first, InputIterator last, Function fn){
while (first!=last) {
fn (*first);
++first;
}
return move(fn);
}
template<class InputIterator, class Function>
Function for_each(InputIterator first, InputIterator last, Function fn){
while (first!=last) {
fn (*first);
++first;
}
return move(fn);
}
# Range-based for loop
for(auto x: temporary_with_begin_and_end_members{ code;}
for(auto& x: temporary_with_begin_and_end_members{ code;}
for(auto&& x: temporary_with_begin_and_end_members{ code;}
for (T thing = foo(); auto& x : thing.items()) { code; }
for(auto x: temporary_with_begin_and_end_members{ code;}
for(auto& x: temporary_with_begin_and_end_members{ code;}
for(auto&& x: temporary_with_begin_and_end_members{ code;}
for (T thing = foo(); auto& x : thing.items()) { code; }
The types of the begin_expr and the end_expr do not have to be the same,
and in fact the type of the end_expr does not have to be an iterator: it
@ -598,14 +598,14 @@ member named begin and a member named end (regardless of the type or
accessibility of such member), then begin_expr is \_\_range.begin() and
end_expr is \_\_range.end();
for (T thing = foo(); auto x : thing.items()) { code; }
for (T thing = foo(); auto x : thing.items()) { code; }
Produces code equivalent to:
T thing = foo();
auto bar = thing.items();
auto enditer = bar.end;
for (auto iter = bar.begin(); iter != enditer; ++iter) {
x = *iter;
code;
}
T thing = foo();
auto bar = thing.items();
auto enditer = bar.end;
for (auto iter = bar.begin(); iter != enditer; ++iter) {
x = *iter;
code;
}

View File

@ -201,7 +201,7 @@ and whose extension to atomic locks is not obvious.
Suppose you are doing atomic operations, but some operations might be
expensive and lengthy. You really only want to spin lock on amending data
that is small and all in close together in memory, so on your second spin,
the lock has likely been released.
the lock has likely been released.
Well, if you might need to sleep a thread, you need a regular mutex, but
how are you going to interface spin locks and regular mutexes?
@ -258,7 +258,7 @@ Thus one winds up with what suspect it the Tokio solution, a stack that
is a tree, rather than a stack.
Hence the equivalence between node.js and nginx event oriented
programming, and Go concurrent programming.
programming, and Go concurrent programming.
# costs
@ -287,21 +287,21 @@ is not in fact terribly useful for anything you are interested in doing.
```C++
typedef enum memory_order {
memory_order_relaxed, // relaxed
memory_order_consume, // consume
/* No one, least of all compiler writers, understands what
"consume" does.
It has consequences which are difficult to understand or predict,
and which are apt to be inconsistent between architectures,
libraries, and compilers. */
memory_order_acquire, // acquire
memory_order_release, // release
memory_order_acq_rel, // acquire/release
memory_order_seq_cst // sequentially consistent
/* "sequentially consistent" interacts with the more commonly\
used acquire and release in ways difficult to understand or
predict, and in ways that compiler and library writers
disagree on. */
memory_order_relaxed, // relaxed
memory_order_consume, // consume
/* No one, least of all compiler writers, understands what
"consume" does.
It has consequences which are difficult to understand or predict,
and which are apt to be inconsistent between architectures,
libraries, and compilers. */
memory_order_acquire, // acquire
memory_order_release, // release
memory_order_acq_rel, // acquire/release
memory_order_seq_cst // sequentially consistent
/* "sequentially consistent" interacts with the more commonly\
used acquire and release in ways difficult to understand or
predict, and in ways that compiler and library writers
disagree on. */
} memory_order;
```
@ -337,8 +337,8 @@ between the two coprocesses, without any need for locks or atomics, but
with a need for stack fixups. But Node.js seems to get by fine with code
continuations instead of Gos stack fixups.
A buffered channel is just a fixed size block of memory with alignment,
size, and atomic wrapping read and write pointers.
A buffered channel is just a fixed size block of memory with alignment,
size, and atomic wrapping read and write pointers.
Why do they need to be atomic?
@ -384,7 +384,7 @@ immediately after firing the notify.
But it could happen that if we try to avoid unnecessarily grabbing the
mutex, one thread sees the other thread awake, just when it is going to
sleep, so I fear I have missed a spin lock somewhere in this story.
sleep, so I fear I have missed a spin lock somewhere in this story.
If we want to avoid unnecessary resort to mutex, we have to spin lock on a
state machine that governs entry into mutex resolution. Each thread makes
@ -440,54 +440,54 @@ static_assert(__STDCPP_THREADS__==1, "Needs threads");
// As thread resources have to be managed, need to be wrapped in
// RAII
class ThreadRAII {
std::thread & m_thread;
std::thread & m_thread;
public:
// As a thread object is moveable but not copyable, the thread obj
// needs to be constructed inside the invocation of the ThreadRAII
// constructor. */
ThreadRAII(std::thread & threadObj) : m_thread(threadObj){}
~ThreadRAII(){
// Check if thread is joinable then detach the thread
if(m_thread.joinable()){
m_thread.detach();
}
}
};
ThreadRAII(std::thread & threadObj) : m_thread(threadObj){}
~ThreadRAII(){
// Check if thread is joinable then detach the thread
if(m_thread.joinable()){
m_thread.detach();
}
}
};
```
Examples of thread construction
```C++
void foo(char *){
}
void foo(char *){
}
class foo_functor
{
public:
void operator()(char *){
}
};
class foo_functor
{
public:
void operator()(char *){
}
};
int main(){
ThreadRAII thread_one(std::thread (foo, "one"));
ThreadRAII thread_two(
std::thread (
(foo_functor()),
"two"
)
);
const char three[]{"three"};
ThreadRAII thread_lambda(
std::thread(
[three](){
}
)
);
}
int main(){
ThreadRAII thread_one(std::thread (foo, "one"));
ThreadRAII thread_two(
std::thread (
(foo_functor()),
"two"
)
);
const char three[]{"three"};
ThreadRAII thread_lambda(
std::thread(
[three](){
}
)
);
}
```
C++ has a bunch of threading facilities that are designed for the case that

View File

@ -9,7 +9,7 @@ other processes that do the actual work. While git-bash.exe is undocumented, `m
Example Windows shortcut to bash script: `/x/src/wallet/docs/mkdocs.sh`
"C:\Program Files\Git\git-bash.exe" --cd=X:\src\wallet --needs-console --no-hide --command=usr\bin\bash.exe --login -i docs/mkdocs.sh
"C:\Program Files\Git\git-bash.exe" --cd=X:\src\wallet --needs-console --no-hide --command=usr\bin\bash.exe --login -i docs/mkdocs.sh
Notice that the paths to the left of the invocation of `bash` are in Windows
format, and the paths to the right of the invocation of bash are in gnu
@ -46,7 +46,7 @@ Fails to ensure that there is a Win32 console
`--hide`
Hides the console window. This makes sense if you are launching a script and
not expecting any feedback. But it means that the script has no means to
give you an error message.
give you an error message.
`--no-hide`
Does not hide the console window.

View File

@ -4,7 +4,7 @@
margin-left: 1em;
}
p.center {text-align:center;}
table {
border-collapse: collapse;
}
@ -18,4 +18,3 @@
}
</style>
<link rel="shortcut icon" href="../../rho.ico">

View File

@ -1,45 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Scripting</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Scripting</h1><p>
Initially we intend to implement human to human secret messaging, with money that can be transferred in the message, and the capability to make messages public and provably linked with an identity</p><p>
But obviously we are eventually going to need bot responses, and bot scripts that can interact with the recipient within a sandbox. Not wanting to repeat the mistakes of the internet, we will want the same bot language generating responses, and interacting with the recipient.</p><p>
There is a <a href="https://github.com/dbohdan/embedded-scripting-languages">list</a> of embeddable scripting languages.</p><p>
Lua and python are readily embeddable, but <a href="https://benchmarksgame-team.pages.debian.net/benchmarksgame/">the language shootout</a> tells us they are terribly slow.</p><p>
Lisp is sort of embeddable, startlingly fast, and is enormously capable, but it is huge, and not all that portable.</p><p>
ES (javascript) is impressively fast in its node.js implementation, which does not necessarily imply the embeddable versions are fast.</p><p>
Very few of the scripting languages make promises about sandbox capability, and I know there is enormous grief over sandboxing Javascript. It can be done, but it is a big project.</p><p>
Angelscript <em>does</em> make promises about sandbox capability, but I have absolutely no information its capability and performance.</p><p>
Tcl is event loop oriented.</p><p>
But hell, I have an event loop. I want my events to put data in memory, then launch a script for the event, the script does something with the data, generates some new data, fires some events that will make use of the data, and finishes.</p><p>
Given that I want programs to be short and quickly terminate, maybe we do not need dynamic memory management and garbage collection. Maybe arkscript would handle it.</p>
<p style="background-color : #ccffcc; font-size:80%">This document is licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">CreativeCommons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -0,0 +1,52 @@
---
title: Scripting
---
Initially we intend to implement human to human secret messaging, with
money that can be transferred in the message, and the capability to make
messages public and provably linked with an identity
But obviously we are eventually going to need bot responses, and bot
scripts that can interact with the recipient within a sandbox. Not wanting
to repeat the mistakes of the internet, we will want the same bot language
generating responses, and interacting with the recipient.
There is a [list](https://github.com/dbohdan/embedded-scripting-languages) of embeddable scripting languages.
Lua and python are readily embeddable, but [the language shootout](https://benchmarksgame-team.pages.debian.net/benchmarksgame/) tells us
they are terribly slow.
Lua, however, has `LuaJIT`, which is about ten times faster than `Lua`, which
makes it only about four or five times slower than JavaScript under
`node.js`. It is highly portable, though I get the feeling that porting it to
windows is going to be a pain, but then it is never going to be expected to
call the windows file and gui operations.
Lisp is sort of embeddable, startlingly fast, and is enormously capable, but
it is huge, and not all that portable.
ES (JavaScript) is impressively fast in its node.js implementation, which does
not necessarily imply the embeddable versions are fast.
Very few of the scripting languages make promises about sandbox
capability, and I know there is enormous grief over sandboxing JavaScript.
It can be done, but it is a big project.
Angelscript *does* make promises about sandbox capability, but I have
absolutely no information its capability and performance.
Tcl is event loop oriented.
But hell, I have an event loop. I want my events to put data in memory,
then launch a script for the event, the script does something with the data,
generates some new data, fires some events that will make use of the data, and
finishes.
Given that I want programs to be short and quickly terminate, maybe we
do not need dynamic memory management and garbage collection.
Lua is slowed by dynamic memory management. But with event
orientation, dynamic memory management is complete waste, since your
only memory management is allocating continuation objects to be fired on
the next event - which is to say, all memory management is explicit, when
an event handler detaches.

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -14,15 +14,15 @@
td, th {
padding: 6px;
border: solid 1px black;
}
}
</style>
<link rel="shortcut icon" href="../../rho.ico">
<title>Serialization and Canonical form</title>
<title>Serialization and Canonical form</title>
</head>
<body>
<p><a href="../libraries.html"> To Home page</a> </p>
<h1>Serialization and Canonical form</h1><p>
<p><a href="../libraries.html"> To Home page</a> </p>
<h1>Serialization and Canonical form</h1><p>
On reflection, using a serialization library is massive overkill, since we are serializing records that always have a record type identifier, and we are serializing hashes, signatures, and utf8 strings, which should already be in network order, so the only thing we have to serialize is ints, for which we might as well write our own serialization code, in an object of type serialization buffer<pre>
namespace ro {
template&lt;class T&gt; class iserial : public gsl::span&lt;uint8_t&gt; {
@ -67,10 +67,10 @@ namespace ro {
But all our money amounts will typically be around 32 bits or longer, maximum 64 bits, hence untranslatable to valid utf8. Might represent them as a decimal exponent and small integer.</p><p>
We might need floating point for graph analysis, but that is a long way in the future. Sqlite3 uses big-endian IEEE 754-2008 64-bit floating point numbers as its canonical interchange format, (no longer widely used by modern computers) but even if we do the analysis in floating point, we may well find it more convenient to interchange the data as integers, since the floating point values of groups of interest are all likely to be in a narrow range, and we may not care to interchange the graph analysis data at all, only the groupings and rankings. Really you only care about one group, the cooperating group.
We also need to serialize signatures to human readable format, for embedding in human readable messages base 58, because we are suppressing O, o, I, and lower case l, or base 64, which we get by including -+_$!* (permitted in urls) For error check, prefigure with enough hash bits to round up to a multiple of six bits. and make the output fixed size. Human readable messages that include sensitive records will always end with a hash of the entire human readable message, truncated to a multiple of three bytes, hence a multiple of six bits. Perhaps thirty bytes, two hundred and forty bits, forty u characters of u encoding, and the signed transaction output inside the message will always have the full signature of the full record. A full signature of a full record will thirty three bytes, 32 bytes of signature and one byte of hash of the signature, to make human transmission of signatures possible, though very difficult. </p><p>
<a href="https://github.com/thekvs/cpp-serializers">Review of serializers</a>.</p><p>
We dont want the schema agility of protobuf and Avro. We want header only, and do not come with half a dozen tools that do half a dozen complicated things. We just want to serialize stuff to canonical form so that it can transported between different architectures and code generated by different compilers, and so that the same object always generates the same hash.</p>
@ -84,12 +84,12 @@ We dont want the schema agility of protobuf and Avro. We want header only, a
A typical high speed connection is 1Gbps one Gigabit per second. (GBps is gigabyte persecond, Gbps is gigabit per second.</p><p>
Yas compact can handle 4Gbps, so storage and bandwidth are likely to be bottlenecks and we can probably throw more cpus at the system easier than more bandwidth and storage. So we make canonical yas-compact, or a variant thereof, with customization on index fields.</p>
<h2>libnop: C++ Native Object Protocols</h2><p> only seems to support Clang compiler. Visual Studio throws up.</p>
<h2><a href="./capnproto.html">Cap'n Proto</a></h2><p>Overkill. Too much stuff. But their zero compression is cool.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons Attribution-Share Alike 3.0 License</a></p>
<h2>libnop: C++ Native Object Protocols</h2><p> only seems to support Clang compiler. Visual Studio throws up.</p>
<h2><a href="./capnproto.html">Cap'n Proto</a></h2><p>Overkill. Too much stuff. But their zero compression is cool.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -1,6 +1,6 @@
---
title:
Lightning Layer
Lightning Layer
---
# This discussion of the lightning layer may well be obsoleted
@ -15,7 +15,7 @@ Contingent payments can reveal a key to an alien blockchain on the bitcoin block
[Anonymous Multi-Hop Locks]: anonymous_multihop_locks_lightning_network.pdf
"Anonymous Multi-Hop Locks for Blockchain Scalability and Interoperability"
[zero knowledge contingent payments on the bitcoin chain]:https://bitcoincore.org/en/2016/02/26/zero-knowledge-contingent-payments-announcement/
[zero knowledge contingent payments on the bitcoin chain]:https://bitcoincore.org/en/2016/02/26/zero-knowledge-contingent-payments-announcement/
"The first successful Zero-Knowledge Contingent Bitcoin Payment"
I need to understand the [Anonymous Multi-Hop Locks] primitive, and

View File

@ -10,20 +10,20 @@
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Logon protocol</title> </head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Logon Protocol</h1>
<h2>User functionality</h2><p>
Our immediate objective, in the minimum viable product, is that peers should be able to talk to each other to reach a consensus on who owns what coin, and clients should be able to connect to peers, should be hosted by peers, to get information about the consensus, to pay each other, and to communicate bills and contracts to each other.</p><p>
Our ultimate objective is to replace the existing centralized internet name system, but for starters, just have a name system so that clients can make payments to entities identified by name.</p><p>
We want our minimum viable system to be compatible with a future system where you can sign up with a host by giving it a name analogous to an email address, a name at which you can be contacted, and then the host contacts your client program on that machine on that machine, the client says "do you want to sign up", and thereafter you have automatic sign in with your client and a bookmark for that host on your client. Also the host can send you messages, and these messages can contain links that have the same effect as that bookmark link, except that the host knows what message you are responding to. But that is future compatibility that the minimum viable system needs to be compatible with, not what the minimum viable product will implement.</p><p>
The minimum viable product will implement a subset of that as necessary for the payment system, capable of being compatibly extended to that in future.</p>
@ -80,4 +80,4 @@ But what does the host know about the client.
licensed under the <a href="http://creativecommons.org/licenses/by-sa/3.0/" rel="license">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>
</html>

View File

@ -1,6 +1,6 @@
---
title:
Merkle-patricia Dac
Merkle-patricia Dac
# katex
---
@ -864,7 +864,7 @@ solve the problem of the number of items not being a power of two?
d="
M71.36 234.686s2.145-.873 3.102 0c1.426 1.303 14.645 21.829 16.933 23.136 1.302.745 4.496.45 5-2.3
M145.916 220c0-.93.124-.992.992-1.364.869-.373 2.42-.373 3.04.558.62.93-2.852-4.94 18.607 38.394.715 1.443 2.348 1.186 4-2
M147.218 218.5c1.303-.124 1.675.062 2.11.93.434.868.558 3.846.558 3.846-.25 2.496.31 3.597-1.365 19.166-1.675 15.568-1.54 21.825-.744 24.872.744 3.853 3.0 2.853 5.2 .295
M147.218 218.5c1.303-.124 1.675.062 2.11.93.434.868.558 3.846.558 3.846-.25 2.496.31 3.597-1.365 19.166-1.675 15.568-1.54 21.825-.744 24.872.744 3.853 3.0 2.853 5.2 .295
M71.36 234.686c2.42-.434 2.916-.93 6.079-.186 3.163.745 4.466 1.551 12.715 5.52 8.25 3.97 37.774 3.66 41.31 2.606C134.999 241.57 136 240 137 239
M71.36 234.686s1.551-.558 2.171.186c.62.745 2.481 4.528 1.8 10.545-.683 6.016-2.854 20.719-2.854 22.577 0 2.171 1.116 2.482 2.543 1.8C76.447 269.11 76 268 77 264"/>
<path stroke="#80e080"
@ -913,7 +913,7 @@ solve the problem of the number of items not being a power of two?
<use transform="translate(136 -44)" xlink:href="#merkle_vertex"/>
<use transform="translate(144)" xlink:href="#height_1_tree"/>
</g>
</g>
</g>
<g id="blockchain_id" >
<ellipse cx="14" cy="249" fill="#80e080" rx="8" ry="5"/>
<text>

View File

@ -3,39 +3,45 @@ set -e
cd `dirname $0`
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
osoptions=""
osoptions=""
elif [[ "$OSTYPE" == "darwin"* ]]; then
osoptions=""
osoptions=""
elif [[ "$OSTYPE" == "cygwin" ]]; then
osoptions="--fail-if-warnings --eol=lf "
osoptions="--fail-if-warnings --eol=lf "
elif [[ "$OSTYPE" == "msys" ]]; then
osoptions="--fail-if-warnings --eol=lf "
osoptions="--fail-if-warnings --eol=lf "
fi
templates="./pandoc_templates/"
options=$osoptions"--toc -N --toc-depth=5 --wrap=preserve --metadata=lang:en --include-in-header=$templates/header.pandoc --include-before-body=$templates/before.pandoc --include-after-body=$templates/after.pandoc --css=$templates/style.css -o"
for f in *.md
options=$osoptions"--toc -N --toc-depth=5 --wrap=preserve --metadata=lang:en --include-in-header=$templates/header.pandoc --include-before-body=$templates/before.pandoc --css=$templates/style.css -o"
for f in *.md
do
len=${#f}
base=${f:0:($len-3)}
if [ $f -nt $base.html ];
then
katex=""
for i in 1 2 3 4
mine="--include-after-body=$templates/after.pandoc "
for i in 1 2 3 4 5 6
do
read line
read line
if [[ $line =~ katex$ ]];
then
katex=" --katex=./"
fi
then
katex=" --katex=./"
fi
if [[ $line =~ notmine$ ]];
then
mine=" "
fi
done <$f
pandoc $katex $options $base.html $base.md
pandoc $katex $mine $options $base.html $base.md
echo "$base.html from $f"
#else
# echo " $base.html up to date"
fi
done
cd libraries
for f in *.md
for f in *.md
do
len=${#f}
base=${f:0:($len-3)}
@ -44,13 +50,13 @@ do
katex=""
for i in 1 2 3 4
do
read line
read line
if [[ $line =~ katex ]];
then
katex=" --katex=./"
fi
then
katex=" --katex=./"
fi
done <$f
pandoc $katex $options $base.html $base.md
pandoc $katex $mine $options $base.html $base.md
echo "$base.html from $f"
#else
# echo " $base.html up to date"
@ -58,7 +64,7 @@ do
done
cd ../..
templates=docs/pandoc_templates/
for f in *.md
for f in *.md
do
len=${#f}
base=${f:0:($len-3)}

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -11,64 +11,64 @@
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Multicore</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Multicore</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Multicore</h1>
<p> At the same time as the security crisis has hit, the multicore crisis
has hit.&nbsp; A short while ago we were dealing with isolated serial
machines, now our programs must utilize a sea of machines.&nbsp; </p>
<p> Multithreading is hard.&nbsp; Doing it routinely, doing it with large
programs, invariably fails.&nbsp; </p>
<p> Intel, NVIDIA, and Google however have this crisis well in hand.&nbsp; </p>
<p> Big Businesses are attacking the problem, with competence and success,
and we can leave them to it and not worry too much. Google is pioneering
the way, and Intel and NVIDIA are making similar tools available to the
masses.&nbsp; </p>
<p> Since massive parallelism is a hard problem, requiring good people, much
thought, and much care, the meta solution is to solve that problem as few
times as possible, and re-use the resulting solutions as much as
possible.&nbsp; If, for example, one uses the hash table provided by
Intels threaded building blocks library, the Intel library and Intel
compiler takes care of hash table related coordination issues that
otherwise the programmer would have to take care of, and would probably
foul up.&nbsp; </p>
<p> Intel has provided a bunch of utilities that make it a good deal easier,
Vtune, thread checker, OpenMP, compiler auto parallelism, Intel Thread
Checker, Intel VTune Performance Analyzer, and most importantly, Threaded
Building Blocks.&nbsp; it is still hard but no longer damn near
impossible.&nbsp; </p>
<p> Back in the days when there was one hardware thread of execution driving
multiple software threads, locking worked well. These day, not so
well.&nbsp; Rather, it is often more desirable to use a lockless
transactional approach to handle any shared state.&nbsp; Shared state is
hard, better to share nothing or to leave any sharing to those utilities
that someone else has already written and debugged.&nbsp; If rolling your
own, better to use InterlockedXxx than Lock.&nbsp; Note that you construct
your <a href="interlockedxxx.html">own InterlockedXxx</a> operation for
any Xxx using InterlockedCompareExchange. </p>
<p> The big solution, however is that pioneered by Google. Rather than each
programmer designing his own multithreading and multicore design, one has
a small number of very general massively parallel algorithms embodied in
useful software for massaging masses of data.&nbsp; The programmer then
calls that software and lets it handle the parallelism.&nbsp; Googles Map
Reduce is the classic example of this, but every database servicing a web
application is also an example of this, since one typically has many web
servers running many processes all of which might potentially update the
same data at the same time, and the database is supposed to sort out any
resulting problems, while the developers write in single threaded python
or ruby on rails, and let the database handle any problems related to
massive parallelism.&nbsp; </p>
<p> Googles “app engine” allows programmers to write straightforward single
threaded python code in the easy to use Django framework that can be
executed in a massively parallel manner with coordination between many
parallel processes being performed by Googles datastore.&nbsp; </p>
<p> In short, the multicore crisis, unlike the other crises I describe in
this group of web pages, is well in hand.&nbsp; </p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Multicore</h1>
<p> At the same time as the security crisis has hit, the multicore crisis
has hit.&nbsp; A short while ago we were dealing with isolated serial
machines, now our programs must utilize a sea of machines.&nbsp; </p>
<p> Multithreading is hard.&nbsp; Doing it routinely, doing it with large
programs, invariably fails.&nbsp; </p>
<p> Intel, NVIDIA, and Google however have this crisis well in hand.&nbsp; </p>
<p> Big Businesses are attacking the problem, with competence and success,
and we can leave them to it and not worry too much. Google is pioneering
the way, and Intel and NVIDIA are making similar tools available to the
masses.&nbsp; </p>
<p> Since massive parallelism is a hard problem, requiring good people, much
thought, and much care, the meta solution is to solve that problem as few
times as possible, and re-use the resulting solutions as much as
possible.&nbsp; If, for example, one uses the hash table provided by
Intels threaded building blocks library, the Intel library and Intel
compiler takes care of hash table related coordination issues that
otherwise the programmer would have to take care of, and would probably
foul up.&nbsp; </p>
<p> Intel has provided a bunch of utilities that make it a good deal easier,
Vtune, thread checker, OpenMP, compiler auto parallelism, Intel Thread
Checker, Intel VTune Performance Analyzer, and most importantly, Threaded
Building Blocks.&nbsp; it is still hard but no longer damn near
impossible.&nbsp; </p>
<p> Back in the days when there was one hardware thread of execution driving
multiple software threads, locking worked well. These day, not so
well.&nbsp; Rather, it is often more desirable to use a lockless
transactional approach to handle any shared state.&nbsp; Shared state is
hard, better to share nothing or to leave any sharing to those utilities
that someone else has already written and debugged.&nbsp; If rolling your
own, better to use InterlockedXxx than Lock.&nbsp; Note that you construct
your <a href="interlockedxxx.html">own InterlockedXxx</a> operation for
any Xxx using InterlockedCompareExchange. </p>
<p> The big solution, however is that pioneered by Google. Rather than each
programmer designing his own multithreading and multicore design, one has
a small number of very general massively parallel algorithms embodied in
useful software for massaging masses of data.&nbsp; The programmer then
calls that software and lets it handle the parallelism.&nbsp; Googles Map
Reduce is the classic example of this, but every database servicing a web
application is also an example of this, since one typically has many web
servers running many processes all of which might potentially update the
same data at the same time, and the database is supposed to sort out any
resulting problems, while the developers write in single threaded python
or ruby on rails, and let the database handle any problems related to
massive parallelism.&nbsp; </p>
<p> Googles “app engine” allows programmers to write straightforward single
threaded python code in the easy to use Django framework that can be
executed in a massively parallel manner with coordination between many
parallel processes being performed by Googles datastore.&nbsp; </p>
<p> In short, the multicore crisis, unlike the other crises I describe in
this group of web pages, is well in hand.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -169,6 +169,6 @@ quite a bit of work to design these things, and to get them
deployed in a form that ordinary mortals can use. </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -8,7 +8,7 @@
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Networking Protocol</title> </head><body>
<p><a href="./index.html"> To Home page</a> </p>
@ -26,9 +26,9 @@ Trouble with bitcoin is that it is not centerless proof of work winds up bei
Thus we need a system with proof of stake, and not only proof of stake, but proof of client stake the power over the system needs to reside with peers that have a lot of wealthy clients and it needs to be hard to find who the clients are, and where they are keeping their secrets, so that even if Mueller seizes important peers on charges of tax evasion and money laundering, does not thereby gain control. </p><p>
If the system handles an enormous number of transactions, peers are going to be big and expensive, thus vulnerable to people like Mueller armed with vague and open ended charges of tax evasion and money laundering. Hence the power of peer over the currency needs to be proportional to the wealth controlled by the secrets held by that peers clients. And that peers clients need to be free to move from one peer to the next, and apt to move to peers that make it difficult for Mueller to find their clients. </p><p>
If the system handles an enormous number of transactions, peers are going to be big and expensive, thus vulnerable to people like Mueller armed with vague and open ended charges of tax evasion and money laundering. Hence the power of peer over the currency needs to be proportional to the wealth controlled by the secrets held by that peers clients. And that peers clients need to be free to move from one peer to the next, and apt to move to peers that make it difficult for Mueller to find their clients. </p><p>
Need a crypto currency where Bob can prove to the whole world that he paid Ann such and such amount, in accord with such and such a bill, but no one else can prove he paid Ann, nor that there ever was such a bill, except he shows them. Bitcoin is far too traceable. We need controlled traceability, where the parrticipants can prove a transaction to third parties and the world, but the world cannot. And Bob needs to be able to prove what the payment was about, that it was part of a conversation, a meeting of minds. </p><p>
Need a crypto currency where Bob can prove to the whole world that he paid Ann such and such amount, in accord with such and such a bill, but no one else can prove he paid Ann, nor that there ever was such a bill, except he shows them. Bitcoin is far too traceable. We need controlled traceability, where the participants can prove a transaction to third parties and the world, but the world cannot. And Bob needs to be able to prove what the payment was about, that it was part of a conversation, a meeting of minds. </p><p>
The reason we have end user demand for crypto currency is the same as the reason we have end user demand for gold. </p><p>
@ -62,7 +62,6 @@ So, to accomplish the goal of shutting down crypto currency requires world wide
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -11,23 +11,23 @@
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Normalizing unicode strings</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Normalizing unicode strings</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Normalizing unicode strings</h1><p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Normalizing unicode strings</h1><p>
I would like strings that look similar to humans to map to the same item. Obviously trailing and leading whitespace needs to go, and whitespace map a single space.</p><p>
The hard part, however is that unicode has an enormous number of near duplicate symbols.</p><p>
Have you already read<br/>
Have you already read<br/>
<a href="https://www.unicode.org/reports/tr15/tr15-45.html">https://www.unicode.org/reports/tr15/tr15-45.html</a> ?</p><p>
Our normalization code is in<br/>
Our normalization code is in<br/>
<a href="http://www.openldap.org/devel/gitweb.cgi?p=openldap.git;a=tree;f=libraries/liblunicode;h=4896a6dc9ee5d3e78c15ed6c2e2ed2f21be70247;hb=HEAD">http://www.openldap.org/devel/gitweb.cgi?p=openldap.git;a=tree;f=libraries/liblunicode;h=4896a6dc9ee5d3e78c15ed6c2e2ed2f21be70247;hb=HEAD</a></p><p>
I am going to have to use NFKC canonical form for the key, and NFC canonical form for the display of the key.</p><p>
Which once in a blue moon will drive someone crazy. "Its broken" he will say </p>

View File

@ -9,14 +9,14 @@
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Openvpn</title> </head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Openvpn on the cloud</h1><p>
Figure out what version of debian you are running:<pre>
cat /etc/*-release && cat /proc/version
</pre><p>

View File

@ -1,10 +1,10 @@
<style>
body {
max-width: 30em;
margin-left: 1em;
max-width: 30em;
margin-left: 1em;
}
p.center {text-align:center;}
div.center {text-align:center;}
div.centre {text-align:center;}
table {
border-collapse: collapse;
}
@ -18,4 +18,3 @@
}
</style>
<link rel="shortcut icon" href="../rho.ico">

View File

@ -1,35 +1,35 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Passphrases</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Passphrases</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Passphrases</h1>
<p>Passphrases are of course stronger and easier to
<p><a href="./index.html"> To Home page</a> </p>
<h1>Passphrases</h1>
<p>Passphrases are of course stronger and easier to
remember than passwords, but whitespace causes endless
problems, which in turn cause endless support calls</p>
<p>Whitespace in a passphrase should therefore be
canonicalized before hashing. Multiple spaces, tabs, and
carriage returns should become a single space. Leading and
trailing whitespace should be stripped.</p>
trailing whitespace should be stripped.</p>
<p style="background-color :
#ccffcc; font-size:80%">These documents are licensed under
the <a rel="license"
href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</body>
</html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -11,36 +11,36 @@
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Payments, Contracts, Invoices, and Reputational data</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Payments, Contracts, Invoices, and Reputational data</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Payments, Contracts, Invoices, and Reputational data</h1>
<h2>The problem to be solved</h2>
<h2>*****</h2>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<h2>*****</h2>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<h2>Value and Exchange</h2>
<p>An exchange of value consists of a contract to exchange ("trade"), two
transfers of value (value for value), coordination problems
("settlement"), and dispute resolution services ("failure").</p>
<p> Hence, reliability of exchange is dominated by reliability of transfers.
And, transfers of value are dominated by reliability of basic issues of
value, including storage.</p>
<p> What might be seen as sort of semantic short-cut is that a value system
may be considered reliable if and only if it can participate in an
exchange. </p>
<p>&nbsp;</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
<p><a href="./index.html"> To Home page</a> </p>
<h1>Payments, Contracts, Invoices, and Reputational data</h1>
<h2>The problem to be solved</h2>
<h2>*****</h2>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<h2>*****</h2>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<p>.&nbsp;</p>
<h2>Value and Exchange</h2>
<p>An exchange of value consists of a contract to exchange ("trade"), two
transfers of value (value for value), coordination problems
("settlement"), and dispute resolution services ("failure").</p>
<p> Hence, reliability of exchange is dominated by reliability of transfers.
And, transfers of value are dominated by reliability of basic issues of
value, including storage.</p>
<p> What might be seen as sort of semantic short-cut is that a value system
may be considered reliable if and only if it can participate in an
exchange. </p>
<p>&nbsp;</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons
Attribution-Share Alike 3.0 License</a></p>
<p>.&nbsp;</p>
Attribution-Share Alike 3.0 License</a></p>
<p>.&nbsp;</p>
</body>
</html>

View File

@ -1,6 +1,6 @@
---
title:
Proof of Stake
Proof of Stake
---
::: {style="background-color : #ffdddd; font-size:120%"}
![run!](tealdeer.gif)[TL;DR Map a blockdag algorithm equivalent to the
@ -681,4 +681,3 @@ for](trust_and_privacy_on_the_blockchain.html).
Peers may have human readable names, and wallets may have names of the
form `LoginName@PeerName`.

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -11,110 +11,110 @@
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Protocol Negotiation</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Protocol Negotiation</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Protocol Negotiation</h1>
<p> Once a protocol is in use, it becomes very hard to change. If one person
updates the server, and the client is not updated, everything breaks. </p>
<p> And so, we are stuck with a lot of frozen protocols, many of which are
in urgent need of change, but to change, requires wide consensus, which
requires a big bunch of people showing up at a meeting, but at such
meetings very little gets done, and what gets done is stupid.</p>
<p> If a standard is successful, more and more people want to be in the
committee, many of whom represent business profit centers and government
special interests, and who really do not understand much about the
technology, except that any change might be adverse to the very important
people who sent them there.</p>
<p> As the committee gets larger, it gets more unworkable, and as it
represents more and more special interests, it gets more unworkable</p>
<p> In order to have to have a system where the internets protocols can be
upgraded, and new protocols introduced, without central coordination,
protocol negotiation, where client and server first discuss what protocol
version they will be using, has to be part of every protocol, all the way
down to the level of TCP and UDP.</p>
<p>These days everyone builds in protocol negotiation, often on top of SSL, which is on top of TCP, resulting in three additional round trips.</p>
<p>And then a widely distributed client or server breaks the protocol negotiation, which no one notices because it interorperates with all existing implementations, until someone tries to introduce a new protocol, whereupon the new code implementing the new protocol is blamed for its failure to interoperate with the existing clients and/or servers, and then we get another layer of protocol negotiation on top of all the existing layers of protocol negotiation.</p>
<p>TCP has built in protocol negotiation, eight bits worth, which turned
out, unsurprisingly, to be inadequate.</p>
<p> For the content of the internet to be free from central control, we need
to ensure that the address spaces and protocols are free from central
control.</p>
<p> When an old protocol is broken, clients and servers that have not
upgraded to a new improved protocol will remain forever, so the old
defective protocol has to be supported forever without, however,
allowing an attacker a downgrade attack. </p>
<p>To prevent a downgrade attack, there has to be some way of disabling
protocols in the field, where the signed ban on certain protocols flood
fills from one program to the next.</p>
<p> Often, it is impossible to support the old clients, because protocol
negotiation was never adequately designed in, or because it was designed
in but was designed vulnerable to a downgrade attack.</p>
<p>But let us suppose the protocol negotiation was well designed:&nbsp; The
committee has to assign a code.&nbsp; And of course, they will only assign
this code to a protocol that they agree is right and nothing gets done,
for there is always some vested interest that for some strange and obscure
reason does not want this protocol to exist.</p>
<p>One solution is to have quite large protocol identifiers, or arbitrarily
large variable length protocol identifiers, so that anyone can whip up a
protocol and assign it an identifier, and hack a client and server to use
it, without having to walk it past three dozen members of the committee. </p>
<p>But then, of course, we would probably wind up with a lot of protocols.
&nbsp;This could potentially lead to a lot of protocol negotiation round
trips </p>
<blockquote>
<p>Do you speak protocol A? No.</p>
<p>Do you speak protocol B? No.</p>
<p>Do you speak protocol C? No.</p>
<p>Do you speak protocol D? No.</p>
<p>Do you speak protocol E? Yes. </p>
</blockquote>
<p>One solution to this problem is to have complete lists of protocols, call
it a protocol dictionary, which dictionary maps the long probabilistically
globally unique protocol names to short deterministically unique local
protocol names, and gives an order of preference.&nbsp; If the client
names a dictionary that it supports, and/or the server names a dictionary
that it supports, then they can usually come to immediate agreement. <br/>
</p>
<p>If, for example, the client wants to talk protocol X, it proposes one or
more dictionaries of updates to protocol X, implying that it can talk all
the updates listed in each dictionary, and an order of preference among
dictionaries</p>
<p>If the server recognizes one or more of the dictionaries, it then
responds with one of the protocols listed in the first dictionary that it
recognizes, by its short dictionary name, and the conversation proceeds.</p>
<p>An ordered list of dictionaries is identified by a public key and a short
human readable type name.&nbsp; The typename is only unique with respect
to the dictionaries signed by this public key, thus ftp version 1, ftp
version 2, ftp version 4 ... </p>
<p>The globally unique identifier of a dictionary is the hash of the rule
identifying its public key, plus its typename and version number.</p>
<p>If the server recognizes the hash of the rule identifying the dictionary
public key, but not the version number, it responds with the highest
version number that it does recognize, and the most favored protocol in
that dictionary.&nbsp; Thus if the client requests a protocol of
dictionary version n, it has to know dictionaries versions 1 to n, and be
able to deal with all protocols in versions 1 to n, if only to the extent
that it is able to fail the protocol gracefully. </p>
<h3>The one true ciphersuite</h3>
<p>Why would you want multiple ciphers?</p>
<p>In case one turns out to be weak. </p>
<p>OK, suppose one turns out to be weak.&nbsp; Oops, Malloc can now launch a
downgrade attack.</p>
<p>So, if supporting multiple ciphers, you need a floodfill mechanism where
you can disable the bad ciphersuite in the field.</p>
<p>Each program supporting a set of ciphersuits has a set of signatures it
recognizes as authoritative.&nbsp; If another program that it talks to has
a revocation of ciphersuite, and it recognizes one of the signatures on the
revocation, the revocation floodfills.</p>
<p>So, ideally you should support multiple ciphersuites but if you do,
have a mechanism for field revocation.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
<p><a href="./index.html"> To Home page</a> </p>
<h1>Protocol Negotiation</h1>
<p> Once a protocol is in use, it becomes very hard to change. If one person
updates the server, and the client is not updated, everything breaks. </p>
<p> And so, we are stuck with a lot of frozen protocols, many of which are
in urgent need of change, but to change, requires wide consensus, which
requires a big bunch of people showing up at a meeting, but at such
meetings very little gets done, and what gets done is stupid.</p>
<p> If a standard is successful, more and more people want to be in the
committee, many of whom represent business profit centers and government
special interests, and who really do not understand much about the
technology, except that any change might be adverse to the very important
people who sent them there.</p>
<p> As the committee gets larger, it gets more unworkable, and as it
represents more and more special interests, it gets more unworkable</p>
<p> In order to have to have a system where the internets protocols can be
upgraded, and new protocols introduced, without central coordination,
protocol negotiation, where client and server first discuss what protocol
version they will be using, has to be part of every protocol, all the way
down to the level of TCP and UDP.</p>
<p>These days everyone builds in protocol negotiation, often on top of SSL, which is on top of TCP, resulting in three additional round trips.</p>
<p>And then a widely distributed client or server breaks the protocol negotiation, which no one notices because it interorperates with all existing implementations, until someone tries to introduce a new protocol, whereupon the new code implementing the new protocol is blamed for its failure to interoperate with the existing clients and/or servers, and then we get another layer of protocol negotiation on top of all the existing layers of protocol negotiation.</p>
<p>TCP has built in protocol negotiation, eight bits worth, which turned
out, unsurprisingly, to be inadequate.</p>
<p> For the content of the internet to be free from central control, we need
to ensure that the address spaces and protocols are free from central
control.</p>
<p> When an old protocol is broken, clients and servers that have not
upgraded to a new improved protocol will remain forever, so the old
defective protocol has to be supported forever without, however,
allowing an attacker a downgrade attack. </p>
<p>To prevent a downgrade attack, there has to be some way of disabling
protocols in the field, where the signed ban on certain protocols flood
fills from one program to the next.</p>
<p> Often, it is impossible to support the old clients, because protocol
negotiation was never adequately designed in, or because it was designed
in but was designed vulnerable to a downgrade attack.</p>
<p>But let us suppose the protocol negotiation was well designed:&nbsp; The
committee has to assign a code.&nbsp; And of course, they will only assign
this code to a protocol that they agree is right and nothing gets done,
for there is always some vested interest that for some strange and obscure
reason does not want this protocol to exist.</p>
<p>One solution is to have quite large protocol identifiers, or arbitrarily
large variable length protocol identifiers, so that anyone can whip up a
protocol and assign it an identifier, and hack a client and server to use
it, without having to walk it past three dozen members of the committee. </p>
<p>But then, of course, we would probably wind up with a lot of protocols.
&nbsp;This could potentially lead to a lot of protocol negotiation round
trips </p>
<blockquote>
<p>Do you speak protocol A? No.</p>
<p>Do you speak protocol B? No.</p>
<p>Do you speak protocol C? No.</p>
<p>Do you speak protocol D? No.</p>
<p>Do you speak protocol E? Yes. </p>
</blockquote>
<p>One solution to this problem is to have complete lists of protocols, call
it a protocol dictionary, which dictionary maps the long probabilistically
globally unique protocol names to short deterministically unique local
protocol names, and gives an order of preference.&nbsp; If the client
names a dictionary that it supports, and/or the server names a dictionary
that it supports, then they can usually come to immediate agreement. <br/>
</p>
<p>If, for example, the client wants to talk protocol X, it proposes one or
more dictionaries of updates to protocol X, implying that it can talk all
the updates listed in each dictionary, and an order of preference among
dictionaries</p>
<p>If the server recognizes one or more of the dictionaries, it then
responds with one of the protocols listed in the first dictionary that it
recognizes, by its short dictionary name, and the conversation proceeds.</p>
<p>An ordered list of dictionaries is identified by a public key and a short
human readable type name.&nbsp; The typename is only unique with respect
to the dictionaries signed by this public key, thus ftp version 1, ftp
version 2, ftp version 4 ... </p>
<p>The globally unique identifier of a dictionary is the hash of the rule
identifying its public key, plus its typename and version number.</p>
<p>If the server recognizes the hash of the rule identifying the dictionary
public key, but not the version number, it responds with the highest
version number that it does recognize, and the most favored protocol in
that dictionary.&nbsp; Thus if the client requests a protocol of
dictionary version n, it has to know dictionaries versions 1 to n, and be
able to deal with all protocols in versions 1 to n, if only to the extent
that it is able to fail the protocol gracefully. </p>
<h3>The one true ciphersuite</h3>
<p>Why would you want multiple ciphers?</p>
<p>In case one turns out to be weak. </p>
<p>OK, suppose one turns out to be weak.&nbsp; Oops, Malloc can now launch a
downgrade attack.</p>
<p>So, if supporting multiple ciphers, you need a floodfill mechanism where
you can disable the bad ciphersuite in the field.</p>
<p>Each program supporting a set of ciphersuits has a set of signatures it
recognizes as authoritative.&nbsp; If another program that it talks to has
a revocation of ciphersuite, and it recognizes one of the signatures on the
revocation, the revocation floodfills.</p>
<p>So, ideally you should support multiple ciphersuites but if you do,
have a mechanism for field revocation.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons
Attribution-Share Alike 3.0 License</a></p>
Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -11,68 +11,68 @@
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Protocol Specification</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Protocol Specification</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Protocol Specification</h1><p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Protocol Specification</h1><p>
In general, supposing your server, written in Javascript in the node.js environment, is assembling a response to an end user from services provided by other servers, is there any reasonable alternative to having them all talk JSON?</p><p>
RPC calls usually use XDR, which is binary, or JSON, which is ascii. There are handy libraries to take care of all the boring sockets stuff for you for these cases.</p><p>
Node.js tools to use RPC are available for RPC in JSON, but is there any easy way to access RPC calls in binary formats such as XDR, ASN.1 PER, or Avro Apache from node.js. Or to do something similar?</p><p>
I am not asking what is the best way, or what can be done, because anything can be done with enough hard work, but rather what ways have standards to avoid reinventing the wheel, and libraries to do as much of the work as possible.</p><p>
<a href="https://stackoverflow.com/questions/47298905/binary-rpc-in-node-js/47368469#47368469">asked Nov 15 at 3:33</a></p><p>
<a href="https://github.com/mtth/avsc">avsc</a> implements Avros RPC specification, you can use it to expose "type-safe" services over any transport (e.g. HTTP, TCP, WebSockets). The <a href="https://github.com/mtth/avsc/wiki/Quickstart#services">quickstart guide</a> goes through a simple example that should help you get started.</p><p>
Your avro is in javascript, and is, I assume, expected to run under node.js Apache provides avro to run as compiled C++, Java, Python, and C#. So it looks like an Avro service communicating in binary can communicate between any two environments also avro can serialize to memory, which should make possible operating system and compiler independent hashing of data. Looks like a standardized solution. So this would seem to imply that Avro services adequately cover anyone who wants to talk to anyone in any environment. </p><p>
This question turned out to be problematic and insoluble, because I want hashes defined independently of a particular computer and compiler. Which turns out to be hard. ASN.1 theoretically solves this problem, but in practice sucks horribly.</p><p>
I wish I had a MsgPack that was not dynamic, but would guarantee predefined MsgFormats. Human generation of on the wire message formats case by case should also be considered. Let us see how cryptonote deals with this problem. </p><p>
Looks like Avro gives me what I want: Predefined message formats.</p><p>
To render them human readable output them as YAML, but reading YAML is problematic, because YAML could represent anything, including something hostile. So any message from outside has to be presented in Avro.</p><p>
Cryptonote: https://github.com/cryptonotefoundation/cryptonote</p><p>
https://cryptonotestarter.org/create-wallet.html</p><p>
What is a protocol? Protocols wind up being defined by implementations,
which people attempt, not very successfully, to reverse engineer. By trial
and error they get their client to work with the existing server, and
their server to work with the existing client, and so an ill defined
protocol becomes over time even worse defined.</p><p>
To address this problem, we have ASN.1, ASN.1 PER, and ASN.1 DER</p><p>
ASN.1 is a language for describing data.</p><p>
It is also a compiler for generating C code to process the data described. Some people complain that DER is too complex for anyone to get right.</p><p>
The library https://github.com/vlm/asn1c supports canonical per, but does not entirely support Visual Studio.</p><p>
Your avro is in javascript, and is, I assume, expected to run under node.js Apache provides avro to run as compiled C++, Java, Python, and C#. So it looks like an Avro service communicating in binary can communicate between any two environments also avro can serialize to memory, which should make possible operating system and compiler independent hashing of data. Looks like a standardized solution. So this would seem to imply that Avro services adequately cover anyone who wants to talk to anyone in any environment. </p><p>
This question turned out to be problematic and insoluble, because I want hashes defined independently of a particular computer and compiler. Which turns out to be hard. ASN.1 theoretically solves this problem, but in practice sucks horribly.</p><p>
I wish I had a MsgPack that was not dynamic, but would guarantee predefined MsgFormats. Human generation of on the wire message formats case by case should also be considered. Let us see how cryptonote deals with this problem. </p><p>
Looks like Avro gives me what I want: Predefined message formats.</p><p>
To render them human readable output them as YAML, but reading YAML is problematic, because YAML could represent anything, including something hostile. So any message from outside has to be presented in Avro.</p><p>
Cryptonote: https://github.com/cryptonotefoundation/cryptonote</p><p>
https://cryptonotestarter.org/create-wallet.html</p><p>
What is a protocol? Protocols wind up being defined by implementations,
which people attempt, not very successfully, to reverse engineer. By trial
and error they get their client to work with the existing server, and
their server to work with the existing client, and so an ill defined
protocol becomes over time even worse defined.</p><p>
To address this problem, we have ASN.1, ASN.1 PER, and ASN.1 DER</p><p>
ASN.1 is a language for describing data.</p><p>
It is also a compiler for generating C code to process the data described. Some people complain that DER is too complex for anyone to get right.</p><p>
The library https://github.com/vlm/asn1c supports canonical per, but does not entirely support Visual Studio.</p><p>
On the other hand Avro apache seems to be a more complete solution, addressing the problem of protocol negotiation, RPC calls, and map reduce calls. I dont think the ASN.1 crowd have even thought of map reduce.</p><p>
And there is the oldie but goodie, XDR, which is just the simplest possible way of avoiding compiler and endian dependancy. <a href ="https://www.cprogramming.com/tutorial/rpc/remote_procedure_call_start.html">There is a nice XDR RPC library.</a> ONC RPC is standardized by the Internet Engineering Task Force (IETF) as RFC 1831. It is based on the External Data Representation standard (XDR), known as RFC 4506 (see References). But, on the other hand, everyone using node.js seems to be doing RPC ins JSON.</p><p>
Maybe this whole canonicalization question is just too hard, and need ad hoc solutions. Surely crypto libraries have serialization formats? Let me see what cryptonote does. No point in reinventing the wheel.</p><p>
A type that is becoming increasingly common is unordered name value mapping, where a lot of names have defaults, and people keep adding new names.</p><p>
Now if you send the names with the message every single time, it is a huge overhead. Which is what you wind up with with JSON based rpc calls.</p><p>
So you want compiled message types, which wind up as a C++ object and when someone wants more fields, he has a derived type of the C++ object, with the new fields having default values, allowing you to cast the object to have additional names and derived types as needed.</p><p>
MsgPack is binary, and has modules for every language and environment you have ever heard of, and lots of languages you have never heard of, and library code to do anything you want, but it is all dynamically typed, which means it is not as efficient as PER.</p><p>
It is basically YAML in binary, in that MsgPack messages correspond closely to YAML documents. Because it supports every language, you can use MsgPack to talk C++ to node.js, and YAML to talk to humans.</p><p>
@ -80,66 +80,66 @@ Your avro is in javascript, and is, I assume, expected to run under node.js Apac
Dynamic typing means that a hostile evil program could send no end of messages in MsgPack that your program is not equipped to deal with. The great thing about PER is that anything that you are not expecting just gets magically rejected.</p><p>
Both of them, Per and MsgPack totally punt on the issue of protocol negotiation, whereas Avro apache has support for protocol negotiation. It combines static and dynamic typing, but dynamic requires both sides to play nice. Evil program with surprise data structures will just have the connection shut down on it.</p><p>
ASN.1 Per punts by requiring both sides to be in total agreement about the ASN.1 schema at compile time, MsgPack punts in the opposite way by being able to send any schema, including hostile schemas that are totally suprising, unexpected, and impossible for the recipient to deal with.</p><p>
Maybe the way to go would be to agree on a dictionary mapping numbers to ASN.1 schemas at connection establishment, and if agreement, send schema number and binary blob in a MsgPack message, if no agreement, send dynamic message with a schema GUI, and the end user has to deal with it as best he can. If receiving a message with a schema guid, then precompiled code to generate the corresponding C++ object from the MsgPack message, and to generate a MsgPack message from the C++ object. On reception, execute the generic object->process_on_receive(&amp;connection) job on the received object. Which typically will look up the table for the object to which this is a response. Everything in the table is a pointer to a common base type, but, on getting the object, executes a derived operation.
For an expected message, which is anticipated to be a response to a previous message, the response should contain an opaque callback which was sent with the previous message, which is an index into a table of local objects whose base type can run time check that the message is of the type that they expected. (All schemas corresponding to data that can be received, or loaded from files, derive from a base type that reports their schema, so any time you deal with a message, it is mapped into a C++ object that will only be dealt with by code that at compile time expects that object.)
Thus every transmissable object has Yaml serialization, MsgPack serialization, Asn.1 Per serialization, Yaml serialization, Yaml serialization inside MsgPack, and Asn.1 Per serialization inside MsgPack. If MsgPack or Asn.1 throws on receiving a message, terminate the connection. If it throws on sending a message, it is a bug.
But I dont see that Avro Apache can talk to node.js, in part because it wisely refuses to be fully dynamic.</p><p>
There is a review of more serializers than you can shake a stick at <a href="https://github.com/thekvs/cpp-serializers">https://github.com/thekvs/cpp-serializers</a>
If you attempt to hand generate code for processing packets described by
ASN.1, you will probably get it wrong and your head will explode.&nbsp;
Hence ASN.1 is much cursed and condemned.&nbsp; </p><p>
Dont do that.&nbsp; Dont hand write code to generate or interpret ASN.1
data packets.&nbsp; You are unlikely to succeed, and your code will have
mystery bugs.</p>
<p>ASN.1 PER is ASN.1 data description compiled to produce efficiently
compressed data packets that conform to a description in ASN.1, and
efficiently decompresses them.</p>
<p>ASN.1 DER&nbsp; that data description that generates data packets with a
description of what the data packet means, so that if two programs sign
the same ASN.1 DER data, they agree not only on the data, but on the
meaning of that data, and if one program means the same thing as the other
program, the signatures will come out the same.</p>
<p>Use it.&nbsp; ASN.1, used right, is what is needed to rigorously define a
protocol so that a client written by one person will work with a server
written by another.</p>
<p>There is much loud cursing about the fact that the data on the wire is
humanly incomprehensible, and that the code that converts it into program
data structures is humanly incomprehensible.&nbsp; No one should be
looking at machine generated code, because machine generated code is
notoriously incomprehensible.&nbsp; The question then is, does the
compiler work, and is the compiler usable.</p><p>
ASN.1, you will probably get it wrong and your head will explode.&nbsp;
Hence ASN.1 is much cursed and condemned.&nbsp; </p><p>
Dont do that.&nbsp; Dont hand write code to generate or interpret ASN.1
data packets.&nbsp; You are unlikely to succeed, and your code will have
mystery bugs.</p>
<p>ASN.1 PER is ASN.1 data description compiled to produce efficiently
compressed data packets that conform to a description in ASN.1, and
efficiently decompresses them.</p>
<p>ASN.1 DER&nbsp; that data description that generates data packets with a
description of what the data packet means, so that if two programs sign
the same ASN.1 DER data, they agree not only on the data, but on the
meaning of that data, and if one program means the same thing as the other
program, the signatures will come out the same.</p>
<p>Use it.&nbsp; ASN.1, used right, is what is needed to rigorously define a
protocol so that a client written by one person will work with a server
written by another.</p>
<p>There is much loud cursing about the fact that the data on the wire is
humanly incomprehensible, and that the code that converts it into program
data structures is humanly incomprehensible.&nbsp; No one should be
looking at machine generated code, because machine generated code is
notoriously incomprehensible.&nbsp; The question then is, does the
compiler work, and is the compiler usable.</p><p>
There is an <a href="http://lionet.info/asn1c/faq.html">internet tool for compiling asn.1 specifications into C code</a>.</p><p>
If a program reads DER or BER data, the result is apt to be disastrous.&nbsp; BER and DER can express an arbitrary data structure and thus can crash the program receiving the data, probably causing it to execute transmitted data as code.</p>
<p>You cant depend on a DER or BER bit string being able to map back into any well-defined ASN.1 objectthat the program was designed to deal with.&nbsp;&nbsp;</p>
<p> Incoming data should be parsed as the expected and bounded size data
structure, thus we need something that can generate parsing code from a
description of the data at compile time.&nbsp; We need compile time
descriptions of the data, not run time descriptions, because the program
that uses the incoming data will unavoidably rely on compile time
description of the data.
<br/>
<br/>
PER, however cannot receive unexpected data structures, because the
expected data structure is specified at compile time, not run time.&nbsp;
Malicious or faulty data will generate an error, not a crash.<br/>
<br/>
Thus all data should be received as PER or by a format with the properties
of PER.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
<p>You cant depend on a DER or BER bit string being able to map back into any well-defined ASN.1 objectthat the program was designed to deal with.&nbsp;&nbsp;</p>
<p> Incoming data should be parsed as the expected and bounded size data
structure, thus we need something that can generate parsing code from a
description of the data at compile time.&nbsp; We need compile time
descriptions of the data, not run time descriptions, because the program
that uses the incoming data will unavoidably rely on compile time
description of the data.
<br/>
<br/>
PER, however cannot receive unexpected data structures, because the
expected data structure is specified at compile time, not run time.&nbsp;
Malicious or faulty data will generate an error, not a crash.<br/>
<br/>
Thus all data should be received as PER or by a format with the properties
of PER.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons
Attribution-Share Alike 3.0 License</a></p>
Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -121,7 +121,7 @@ that message.
In the api, the application and api know the message type, because
otherwise the api just would not work. But on the rare occasions when the
message is represented globally, outside the api, *then* it needs a message type header.
message is represented globally, outside the api, *then* it needs a message type header.
# TCP is broken
@ -357,7 +357,7 @@ The the TCP replacement handshake needs to be a four phase handshake.
1. Server checks the keyed hash to ensure that this is a real client
reply to a real and recent server reply. Then it checks the proof of
work.
work.
If the proof of work passes, Server allocates memory

View File

@ -190,6 +190,6 @@ the anonymity that comes from the faceless crowd of a
power law network.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -1,16 +1,16 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>RPC to the blockchain</title>
<link rel="shortcut icon" href="../rho.ico">
<title>RPC to the blockchain</title>
</head>
<body><p><a href="./index.html"> To Home page</a></p>
<h1>RPC to the blockchain</h1><p>

View File

@ -11,7 +11,7 @@
p.center {text-align:center;}
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Safe Operating System</title>
</head><body>
@ -32,16 +32,16 @@ system that is largely immune to viruses, Trojans and
spyware.&nbsp; “Capdesk” was a demo of the design
principles for a secure desktop operating system.&nbsp; It
has led to similar, though more realistic versions
retrofitted to windows and Linux, and
retrofitted to windows and Linux, and
<a href="http://www.cl.cam.ac.uk/research/security/capsicum/">
sandboxes</a> based on the same concept. </p>
<ul>
<li>Resources, such as files and directories, should be
owned by packages, or package*user, not merely users,
so that one package cannot interfere with another package,
so that packages cannot run with the full authority
of the user, and so that a package cannot be furtively installed
<li>Resources, such as files and directories, should be
owned by packages, or package*user, not merely users,
so that one package cannot interfere with another package,
so that packages cannot run with the full authority
of the user, and so that a package cannot be furtively installed
(for to be installed, has to be allocated resources, such as
a directory for its executable files to live in)</li>
@ -352,6 +352,6 @@ particular processes, and for any particular installed
package.&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -21,7 +21,7 @@
bottom: 0pt;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Configuration Data in a Safe Operating System</title></head>
<body>
@ -95,6 +95,5 @@ message.  </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -54,12 +54,11 @@ Known link farms get colors from the bad palette.</p><p>
So, the main network of legitimate actors will tend to get all the same color mix, because every legitimate customer buys from lots of legitimate sellers, and every legitimate seller sells to lots of legitimate buyers.</p><p>
Networks of fakes will get their own distinct color, because the reputation circulates inside that network. The big giveaway will not so much be bad colors versus good colors, but the failure of colors to homogenize. All the good actors will tend to develop rather similar colors, each link farm will have its own distinct color. Every transaction inside your own little group will tend to result in more of your group color, and less of the general mix.</p><p>
Networks of fakes will get their own distinct color, because the reputation circulates inside that network. The big giveaway will not so much be bad colors versus good colors, but the failure of colors to homogenize. All the good actors will tend to develop rather similar colors, each link farm will have its own distinct color. Every transaction inside your own little group will tend to result in more of your group color, and less of the general mix.</p><p>
With forty colors, we have a trillion different composite colors, so we randomly assign each seller entity that collects reviews an initial pool of distinct color, and they get additional color from feedback on each transaction of the entity transacted with. If feedback from a wallet never seen before, it has no color, so they get more of their own color, and it gets a pool of the color of the entity they gave feedback to proportional to the amount paid. Every completely unknown seller entity gets one hundred units of various randomly chosen colors. External reputational indications result in additions of color reflecting that external information.which will get mixed in with throughout the network of real actors and real feedback.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -196,7 +196,7 @@ Both problems are being actively worked on. Both problems need a good deal
more work, last time I checked. For end user trust in client wallets
relying on zk-snark verification to be valid, at least some of the end
users of client wallets will need to themselves generate the verifiers from
the script.
the script.
For trust based on zk-snarks to be valid, a very large number of people
must themselves have the source code to a large program that was

View File

@ -1,8 +1,8 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
body {
max-width: 30em;
margin-left: 2em;
@ -14,50 +14,50 @@
td, th {
padding: 6px;
border: solid 1px black;
}
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Name System</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Name System</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Seed phrase wallets</h1>
<p>Bitcoin has had big problems with wallet security, and eventually converged on the <a href="https://en.bitcoin.it/wiki/Seed_phrase">seed phrase system</a>. A seed phrase generates a sequence of key pairs.</p>
<p>This is implemented as the BIP-039 standard.</p>
<p>On the other hand, <a href="https://keybase.io/warp/warp_1.0.9_SHA256_a2067491ab582bde779f4505055807c2479354633a2216b22cf1e92d1a6e4a87.html">Warpwallet</a> indicates that with password strengthening, forty eight bits of passphrase security suffices, which if using the BIP-039 system is five words.</p>
<p>For durable storage, write the words on art low acid paper in pencil, and spray with art fixative to prevent the pencil from smudging.</p>
<p>We want to be able to create a receive only wallet, that can sign on with a name controlled by its master wallet, and can receive money that only its master wallet can spend.</p>
<p>In order for it to receive money, it has to be able to generate an indefinitely large number of public keys, that it cannot generate private keys for. It generates a pseudo random sequence, integer hashed with a 128 bit value, and multiplies the public key of the master by that pseudo random number. To spend the money, the master multiplies the private key by that pseudo random number.</p>
<p><a href="https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki">Example code</a> for maintaining a tree of deterministically generated keys, which is not exactly what we want, but which is similar.</p>
<p>One evident defect of this system is that leaking a single private key corresponding to the public keys generated by the receive only wallet, plus the information that enables the receive only wallet to generate an unlimited number of receive keys, leaks them all. It might be safer just to give the receive only wallet a great big bundle or receive keys.
<p>One evident defect of this system is that leaking a single private key corresponding to the public keys generated by the receive only wallet, plus the information that enables the receive only wallet to generate an unlimited number of receive keys, leaks them all. It might be safer just to give the receive only wallet a great big bundle or receive keys.
<p>A key that owns a name, can sign a statement allowing another key to use that name for some defined time, so the always logged on wallet can operate. If the always logged on wallet gets stolen, it will receive payments for some other key sequence, </p>
<p>so, given a strong secret, we generate two wallets one of which can receive money, can chat, can assert it rightly has a certain zooko or ro identity, and one of which can spend that money and assert the same identity. Chat includes making conversations and files available, either generally, or two identities that have a key . This implements sale of books by the receive only wallet.</p>
<p>Everyone is converging to BIP0032 BIP0039 wallets</p>
<p>But electrum has some powerful and convincing objections to BIP0039 wallets</p>
<p>You want the wallet to depend only on the secret list of words, not on the wordlist, because the wordlist is likely to change. But you dont want to let the user construct his own secret capriciously, because he will fuck up, so you need a checksum so that his own custom secret will not work.</p>
<p>So here is my solution: You hash his word list through secret strengthening, but the strengthened secret has to have some zeroes in the right places. Our code for generating the word list generates a hundred or so wordlist till it finds one that works. If the user tries his own master passphrase, probably will not work. </p>
<p>We need to have an explicit standard for generating the wallet from the passphrase, because with bitcoin a seed phrase only works with the same wallet software that created it. If storing for a long period of time, the seed phrase will stop working.</p>
<p>Therefore, need a standard test vector: A wallet <em>must</em> generate this (empty) test wallet with this phrase, which test wallet is authorized to use the name "anon 1". We need to make the construction of a wallet from a test phrase part of the standard, so that future wallet software will give you access to the same account for the same passphrase as past wallet software.</p>
<p>It has to be a standard for wallet software that for a given pass phrase that is valid for the standard wallet, any new wallet software shall give you access to the same accounts, and a test vector for this has to be part of the system from the beginning, giving a sequence of wallets authorized to use the names anonymous_1 anonymous_2, and anonymous_3</p>
<p><a href="coinb.in">coinb.in</a> is the client wallet for bitcoin. Before I roll my own, I need to understand what they are doing and why they are doing it.</p>
<p style="background-color : #ccffcc; font-size:80%">This document is licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">CreativeCommons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -151,7 +151,7 @@ ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key
Note that visual studio remote compile requires an `ecdsa-sha2-nistp256` key on the host machine that it is remote compiling for. If it is nist, it is
backdoored
If the host has a domain name, the default in `/etc/bash.bashrc` will not display it in full at the prompt, which can lead to you being confused about which host on the internet you are commanding.
If the host has a domain name, the default in `/etc/bash.bashrc` will not display it in full at the prompt, which can lead to you being confused about which host on the internet you are commanding.
```bash
nano /etc/bash.bashrc
@ -184,7 +184,7 @@ inadequate swap file, so that they will fail gracefully under overload,
rather than locking up, needing to be powered down, and then needing to
be recreated from scratch because of data corruption.
This looks to me like a kernel defect. The kernel should detect when it is
This looks to me like a kernel defect. The kernel should detect when it is
thrashing the swap file, and respond by sleeping entire processes for
lengthy and growing periods, and logging these abnormally long sleeps
on wake. Swapping should never escalate to lockup, and if it does, bad
@ -463,7 +463,7 @@ Now that putty can do a non interactive login, you can use `plink` to have a
script in a client window execute a program on the server, and echo the
output to the client, and psftp to transfer files, though `scp` in the Git Bash
window is better, and `rsync` (Unix to Unix only, requires `rsync` running on
both computers) is the best. `scp` and `rsync`, like `git`, get their keys from
both computers) is the best. `scp` and `rsync`, like `git`, get their keys from
`~/.ssh/config`
On windows, FileZilla uses putty private keys to do scp. This is a much
@ -743,7 +743,7 @@ and configuration
```bash
apt-get -qy install certbot python-certbot-apache
certbot register --register-unsafely-without-email --agree-tos
certbot register --register-unsafely-without-email --agree-tos
certbot --apache
```
@ -784,7 +784,7 @@ Thus, after certbot has worked its magic, your conf file looks like
## Lemp stack on Debian
```bash
apt-get -qy update && apt-get -qy install nginx mariadb-server php php-cli php-xml php-mbstring php-mysql php7.3-fpm
apt-get -qy update && apt-get -qy install nginx mariadb-server php php-cli php-xml php-mbstring php-mysql php7.3-fpm
nginx -t
ufw status verbose
```
@ -890,7 +890,7 @@ php fpm service for the fpm service.
nginx -t
# find the name of your php fpm service
systemctl status php* | grep fpm.service
# substitute the actual php fpm service for
# substitute the actual php fpm service for
# php7.3-fpm.sock in the configuration file.
systemctl stop nginx
rm -v /etc/nginx/sites-enabled/*
@ -978,21 +978,21 @@ If that works, then create the file `/var/www/reaction.la/index.php` containing:
```php
<?php
$user = "example_user";
$user = "example_user";
$password = "mypassword";
$database = "example_database";
$table = "todo_list";
try {
try {
$db = new PDO("mysql:host=localhost;dbname=$database", $user, $password);
echo "<h2>TODO</h2><ol>";
foreach($db->query("SELECT content FROM $table") as $row) {
echo "<li>" . $row['content'] . "</li>";
echo "<li>" . $row['content'] . "</li>";
}
echo "</ol>";
echo "</ol>";
}
catch (PDOException $e) {
print "Error!: " . $e->getMessage() . "<br/>";
die();
die();
}
?>
```
@ -1065,7 +1065,7 @@ great.
# certbots many mysterious, confusing, and frequently
# changing behaviors expect a working environment.
apt-get -qy install certbot python-certbot-nginx
certbot register --register-unsafely-without-email --agree-tos
certbot register --register-unsafely-without-email --agree-tos
certbot --nginx
# This also, by default, sets up automatic renewal,
# and reconfigures everything to redirect to https
@ -1083,7 +1083,7 @@ map to the old server, until the new server works.)
```bash
apt-get -qy install certbot python-certbot-nginx
certbot register --register-unsafely-without-email --agree-tos
certbot register --register-unsafely-without-email --agree-tos
certbot run -a manual --preferred-challenges dns -i nginx -d reaction.la -d blog.reaction.la
nginx -t
```
@ -1101,7 +1101,7 @@ But if you are doing this, not on your test server, but on your live server, the
If instead you already have a certificate, because you copied over your `/etc/letsencrypt` directory
```bash
apt-get -qy install certbot python-certbot-nginx
apt-get -qy install certbot python-certbot-nginx
certbot install -i nginx
nginx -t
```
@ -1125,7 +1125,7 @@ automation, which mean you have to have given it the information\
To backup and restore letsencrypt, to move your certificates from one
server to another, `rsync -HAvaX reaction.la:/etc/letsencrypt /etc`, as root
on the computer which will receive the backup. The letsencrypt directory
gets mangled by `tar`, `scp` and `sftp`.
gets mangled by `tar`, `scp` and `sftp`.
Again, browse to your server. You should get redirected to https, and https should work.
@ -1246,7 +1246,7 @@ Now you should delete the example user and the example database:
```sql
mariadb
REVOKE ALL PRIVILEGES, GRANT OPTION FROM
REVOKE ALL PRIVILEGES, GRANT OPTION FROM
'example_user'@'localhost';
DROP USER 'example_user'@'localhost';
DROP DATABASE example_database;
@ -1784,7 +1784,7 @@ Set `mydestination` to all dns names that map to your server (it probably alrea
```bash
postconf -e mailbox_size_limit=268435456
postconf -e message_size_limit=67108864
postconf
postconf
postconf myhostname
postconf mydestination
postconf smtpd_banner
@ -2077,7 +2077,7 @@ dan@blog.reaction.la dan
# eventually automatically deleted.
#
# The addresses without username catch all emails that do not
# have an entry.
# have an entry.
# You don't want an error message response for invalid email
# addresses, as this may reveal too much to your enemies.
```
@ -2800,7 +2800,7 @@ when your subkey expires.
```bash
save
gpg --list-keys --with-subkey-fingerprints --with-keygrip «master key»
gpg -a --export-keys «master key»
gpg -a --export-keys «master key»
gpg -a --export-secret-keys «master key»
```

View File

@ -590,7 +590,7 @@ them, and receive money from them.
If the wallet integrates an identity and messaging system, then making
payments and receiving payments over the wallet can be made easier than
with any existing system.
with any existing system.
We have to put the medium for communication about money, for
communicating metadata about transactions, inside the wallet, as in the old
@ -637,7 +637,7 @@ justice. Debian broke Gnome3 and cannot fix it because of social justice.
Business needs a currency and [book] keeping system that enables them to
operate a business instead of a social justice crusade.
A blockchain is just a public ledger with an immense number of columns.
A blockchain is just a public ledger with an immense number of columns.
Triple entry [book] keeping with immutable journal entries is a narrowly
shared ledger with a considerably smaller number of columns. Every
business needs its books on its own blockchain, to escape government
@ -856,7 +856,7 @@ This does not occupy mainchain space, because a single two fifty six bit
hash on the mainchain can represent to the total state and total order of
many very large sidechains, with the very large preimage of the hash being
known to peers on the sidechain, but not to peers on the mainchain unless
they are also peers on the sidechain.
they are also peers on the sidechain.
A two fifty six bit hash gives unlimited compression, which is lossy in the
sense that it is one way compression, but lossless in that if you know the

View File

@ -12,7 +12,7 @@
text-align:center;
}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>How to Save the World</title>
</head>
<body>
@ -31,6 +31,6 @@ test</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -9,7 +9,7 @@
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<link rel="shortcut icon" href="../rho.ico">
<title>Spam filtering</title>
</head>
<body>

View File

@ -1,112 +1,112 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
<meta content="text/html; charset=UTF-8" http-equiv="content-type">
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Squaring Zookos triangle</title>
<link rel="shortcut icon" href="../rho.ico">
<title>Squaring Zookos triangle</title>
</head>
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Squaring Zookos triangle</h1>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Squaring Zookos triangle</h1>
<p>
Need a system for handing ones keys around that protects end users from the horrifying sight of actual keys or actual strong hashes of keys.</p>
<p>
But at the same time the system has to not say, "I cant deliver your message to that person because an invisible gnotzaframmit that I wont describe to you seems to be unavailable to me in the flabogrommit."</p>
<p>It seems like the clever bit of CT is the insight that some actions, like
a CA signing a cert, are intended to be public, and so should be forced
(via clever crypto) to take place in public. This makes me wonder what
other crypto actions should also take place in public, in a way that
doesnt permit hiding them from the world.&nbsp; </p>
<p>Revocation&nbsp; </p>
<p>Software releases&nbsp; </p>
<p>Mapping of email address to public key&nbsp; </p>
<p>Delegation of DNSSEC keys&nbsp; </p>
<p>&nbsp; </p>
<p>Of course, globally visible events need to take place at a globally
visible time. The most widely available time is GPS time (which is 19
seconds off the commonly used time), and which is available from the
seldom connected pps line.</p>
<p>At present, unfortunately, anyone who wants gps time has to do his own
soldering and hack his own software. There is a pre soldered device
available, but it is hard to get.&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>
Imagine skype as originally designed, (central authority maps public and
private keys to user names) plus a key continuity feature, plus the seldom
used option of doing a zero knowledge shared passphrase to detect man in
the middle.&nbsp; </p>
<p>
The possibility that the zero knowledge check could be used would deter
powerful adversaries, even if seldom used in practice. The more powerful,
the greater the deterrent effect.&nbsp; </p>
<p>
It is not totally end to end, central authority can listen in, but the
check would limit the amount of listening.&nbsp; </p>
<p>
It can be made completely end to end for strong passwords. Assume login is
by zero knowledge password protocol, which means that the central
authority does not know the end users password, for strong
passwords.&nbsp; </p>
<p>
The secret key is generated from the strong secret supplied by central
authority, plus the password.&nbsp; </p>
<p>
When you change your password, you generate a certificate mapping your new
public key to your old public key, which certificate makes other peoples
key continuity check happy.&nbsp; </p>
<p>
If key continuity fails, people get a warning, but they dont have to
click it away, for that just trains people to click it away. They can just
continue right on and not pay attention to it.&nbsp; </p>
<p>
Or they could use the zero knowledge shared passphrase procedure to detect
man in the middle.&nbsp; </p>
<p>
So, if non paranoid, and using easy passwords, works like skype used to
work. No interception except by central authority, and central authority
cannot intercept everyone, or even large numbers of people.&nbsp; </p>
<p>
If paranoid and using strong passwords, provides OTR like end to end
capability.&nbsp; </p>
<p><br/>
</p>
<p><br/>
</p>
<p>Key management is an unsolved problem.&nbsp; In my biased opinion the
best<br/>
solution was my Crypto Kong, which received limited takeup.<br/>
<br/>
So, in conclusion, dont make people manage keys, though that should be an
option for the seriously paranoid.<br/>
<br/>
Instead, autogenerate the keys with zero knowledge passphrase logon.<br/>
<br/>
If he uses a password weak enough to fall to an offline dictionary attack,
this is equivalent to the old skype system, where central authority
manages his keys and he has password logon.&nbsp; If he uses a stronger
password, equivalent to a salted strong passphrase system.</p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
<p>It seems like the clever bit of CT is the insight that some actions, like
a CA signing a cert, are intended to be public, and so should be forced
(via clever crypto) to take place in public. This makes me wonder what
other crypto actions should also take place in public, in a way that
doesnt permit hiding them from the world.&nbsp; </p>
<p>Revocation&nbsp; </p>
<p>Software releases&nbsp; </p>
<p>Mapping of email address to public key&nbsp; </p>
<p>Delegation of DNSSEC keys&nbsp; </p>
<p>&nbsp; </p>
<p>Of course, globally visible events need to take place at a globally
visible time. The most widely available time is GPS time (which is 19
seconds off the commonly used time), and which is available from the
seldom connected pps line.</p>
<p>At present, unfortunately, anyone who wants gps time has to do his own
soldering and hack his own software. There is a pre soldered device
available, but it is hard to get.&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>
Imagine skype as originally designed, (central authority maps public and
private keys to user names) plus a key continuity feature, plus the seldom
used option of doing a zero knowledge shared passphrase to detect man in
the middle.&nbsp; </p>
<p>
The possibility that the zero knowledge check could be used would deter
powerful adversaries, even if seldom used in practice. The more powerful,
the greater the deterrent effect.&nbsp; </p>
<p>
It is not totally end to end, central authority can listen in, but the
check would limit the amount of listening.&nbsp; </p>
<p>
It can be made completely end to end for strong passwords. Assume login is
by zero knowledge password protocol, which means that the central
authority does not know the end users password, for strong
passwords.&nbsp; </p>
<p>
The secret key is generated from the strong secret supplied by central
authority, plus the password.&nbsp; </p>
<p>
When you change your password, you generate a certificate mapping your new
public key to your old public key, which certificate makes other peoples
key continuity check happy.&nbsp; </p>
<p>
If key continuity fails, people get a warning, but they dont have to
click it away, for that just trains people to click it away. They can just
continue right on and not pay attention to it.&nbsp; </p>
<p>
Or they could use the zero knowledge shared passphrase procedure to detect
man in the middle.&nbsp; </p>
<p>
So, if non paranoid, and using easy passwords, works like skype used to
work. No interception except by central authority, and central authority
cannot intercept everyone, or even large numbers of people.&nbsp; </p>
<p>
If paranoid and using strong passwords, provides OTR like end to end
capability.&nbsp; </p>
<p><br/>
</p>
<p><br/>
</p>
<p>Key management is an unsolved problem.&nbsp; In my biased opinion the
best<br/>
solution was my Crypto Kong, which received limited takeup.<br/>
<br/>
So, in conclusion, dont make people manage keys, though that should be an
option for the seriously paranoid.<br/>
<br/>
Instead, autogenerate the keys with zero knowledge passphrase logon.<br/>
<br/>
If he uses a password weak enough to fall to an offline dictionary attack,
this is equivalent to the old skype system, where central authority
manages his keys and he has password logon.&nbsp; If he uses a stronger
password, equivalent to a salted strong passphrase system.</p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p>&nbsp; </p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons
Attribution-Share Alike 3.0 License</a></p>
Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -180,5 +180,5 @@ owed, and others less well placed. </p>
font-size:80%">These documents are licensed under the <a
rel="license"
href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -159,5 +159,5 @@ mindlessly do a one click add.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -138,7 +138,7 @@
<li>Be interested in liberty and the freedom to transact and speak to get back to the original motivations. Don&#8217;t spend time trying to make government-friendly financial alternatives.</li>
<li>Remember, there are a lot tyrants out there.</li>
</ul>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative Commons Attribution-Share Alike 3.0 License</a></p>
</body>

View File

@ -253,7 +253,7 @@ by the Venetian Friar
<a href=3D"http://www-groups.dcs.st-andrews.ac.uk/~history/Mathematicians/P=
acioli.html">
Luca Pacioli</a>
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_LP" na=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_LP" na=
me=3D"back_LP">LP</a>]</small>.
In his treatise, Pacioli documented many standard
techniques, including a chapter on accounting.
@ -284,7 +284,7 @@ was never capable of being fielded. The replacement
double entry system was fielded in early 1996 and
has never lost a transaction
(although there have been some close shaves
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_IG1" n=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_IG1" n=
ame=3D"back_IG1">IG1</a>]</small>).
</p>
@ -417,7 +417,7 @@ effective form of signature, and
<i>public key cryptosystems</i> provide
another form where signers hold a private
key and verifiers hold a public key
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_MB" na=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_MB" na=
me=3D"back_MB">MB</a>]</small>.
There are also many ways to attack the
@ -437,14 +437,14 @@ At first it was suggested that a
variant known as the
<i>blinded signature</i>
would enable digital cash
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_DC" na=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_DC" na=
me=3D"back_DC">DC</a>]</small>.
Then, <i>certificates</i> would
circulate as rights or contracts, in much
the same way as the share certificates
of old and thus replace centralised accounting
systems
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_RAH" n=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_RAH" n=
ame=3D"back_RAH">RAH</a>]</small>.
These ideas took financial cryptography part of
@ -469,31 +469,31 @@ eipt">The Initial Role of a Receipt</a></h3>
<center>
<table bgcolor=3D"#99FFFF" border=3D"1">
<tbody><tr><td>
<table cellspacing=3D"5">
<tbody><tr>
<td>From</td>
<td>Alice</td>
</tr><tr>
<td>To</td>
<td>Bob</td>
</tr><tr>
<td>Unit</td>
<td>Euro</td>
</tr><tr>
<td>Quantity</td>
<td>100</td>
</tr><tr>
<td>Date</td>
<td>2005.12.25</td>
</tr>
</tbody></table>
<table cellspacing=3D"5">
<tbody><tr>
<td>From</td>
<td>Alice</td>
</tr><tr>
<td>To</td>
<td>Bob</td>
</tr><tr>
<td>Unit</td>
<td>Euro</td>
</tr><tr>
<td>Quantity</td>
<td>100</td>
</tr><tr>
<td>Date</td>
<td>2005.12.25</td>
</tr>
</tbody></table>
</td></tr>
<tr><td>
<table cellspacing=3D"5">
<tbody><tr>
<td><i>digital signature</i></td>
</tr>
</tbody></table>
<table cellspacing=3D"5">
<tbody><tr>
<td><i>digital signature</i></td>
</tr>
</tbody></table>
</td></tr>
</tbody></table></center>
@ -508,7 +508,7 @@ the Internet, the capabilities of cryptography
and the needs of governance
led to the development of the
<i>signed receipt</i>
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_GH" na=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_GH" na=
me=3D"back_GH">GH</a>]</small>.
In order to develop this concept, let us assume
a simple three party payment system,
@ -571,66 +571,66 @@ of risks that we decided to address.
<p align=3D"center"><i>2: A Signed Receipt</i></p>
<center><table bgcolor=3D"#99FFFF" border=3D"1">
<tbody><tr><td>
<table cellspacing=3D"5">
<tbody><tr>
<td>User's Cheque</td>
<td>
<table bgcolor=3D"#FFBBFF" border=3D"1">
<tbody><tr><td>
<table>
<tbody><tr><td>
</td>
</tr><tr>
<td>From</td>
<td>Alice</td>
</tr><tr>
<td>To</td>
<td>Bob</td>
</tr><tr>
<td>Unit</td>
<td>Euro</td>
</tr><tr>
<td>Qty</td>
<td>100</td>
</tr><tr>
<td>Com</td>
<td>Pens</td>
</tr>
</tbody></table>
</td></tr>
<tr><td>
<table cellspacing=3D"5">
<tbody><tr>
<td><i>Alice's sig</i></td>
</tr>
</tbody></table>
</td></tr>
</tbody></table>
</td>
</tr><tr>
<td>From</td>
<td>Alice</td>
</tr><tr>
<td>To</td>
<td>Bob</td>
</tr><tr>
<td>Unit</td>
<td>Euro</td>
</tr><tr>
<td>Quantity</td>
<td>100</td>
</tr><tr>
<td>Date</td>
<td>2005.04.10</td>
</tr>
</tbody></table>
<table cellspacing=3D"5">
<tbody><tr>
<td>User's Cheque</td>
<td>
<table bgcolor=3D"#FFBBFF" border=3D"1">
<tbody><tr><td>
<table>
<tbody><tr><td>
</td>
</tr><tr>
<td>From</td>
<td>Alice</td>
</tr><tr>
<td>To</td>
<td>Bob</td>
</tr><tr>
<td>Unit</td>
<td>Euro</td>
</tr><tr>
<td>Qty</td>
<td>100</td>
</tr><tr>
<td>Com</td>
<td>Pens</td>
</tr>
</tbody></table>
</td></tr>
<tr><td>
<table cellspacing=3D"5">
<tbody><tr>
<td><i>Alice's sig</i></td>
</tr>
</tbody></table>
</td></tr>
</tbody></table>
</td>
</tr><tr>
<td>From</td>
<td>Alice</td>
</tr><tr>
<td>To</td>
<td>Bob</td>
</tr><tr>
<td>Unit</td>
<td>Euro</td>
</tr><tr>
<td>Quantity</td>
<td>100</td>
</tr><tr>
<td>Date</td>
<td>2005.04.10</td>
</tr>
</tbody></table>
</td></tr>
<tr><td>
<table cellspacing=3D"5">
<tbody><tr>
<td><i>Ivan's signature</i></td>
</tr>
</tbody></table>
<table cellspacing=3D"5">
<tbody><tr>
<td><i>Ivan's signature</i></td>
</tr>
</tbody></table>
</td></tr>
</tbody></table></center>
@ -780,7 +780,7 @@ directs that we store the primary records,
in this case the set of receipts, and we
construct derivative records, the accounting
books, on the fly
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_4NF" n=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_4NF" n=
ame=3D"back_4NF">4NF</a>]</small>.
</p>
@ -982,21 +982,21 @@ Todd Boyle looked at a similar problem from the point
of view of small business needs in an Internet age,
and reached the same conclusion - triple entry
accounting
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_1" nam=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_1" nam=
e=3D"back_1">1</a>]</small>.
His starting premises were that:
</p>
<ol><li><p>
The major need is not accounting or payments, per se,
but patterns of exchange - complex patterns of trade;
The major need is not accounting or payments, per se,
but patterns of exchange - complex patterns of trade;
</p></li><li><p>
Small businesses could not afford large complex
systems that understood these patterns;
Small businesses could not afford large complex
systems that understood these patterns;
</p></li><li><p>
They would not lock themselves into proprietary
frameworks;
They would not lock themselves into proprietary
frameworks;
</p></li></ol>
<p>
@ -1137,9 +1137,9 @@ that it imposes well recognised.
<p>
Below are the list of requirements that we
believed to be important
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_2" nam=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_2" nam=
e=3D"back_2">2</a>]</small>
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_3" nam=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_3" nam=
e=3D"back_3">3</a>]</small>.
</p>
@ -1310,7 +1310,7 @@ would have shown a clear audit trail of transactions
and thus late timing and otherwise perverted or
dropped transactions would have been clearly
identified or eliminated completely
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_NG" na=
<small>[<a href=3D"https://iang.org/papers/triple_entry.html#ref_NG" na=
me=3D"back_NG">NG</a>]</small>.
The emerging scandal in the USA known as
<i>Stockgate</i> would have been impossible
@ -1435,30 +1435,30 @@ A Relational Model of Data for Large Shared Data Banks
<p>
<b><a name=3D"ref_1">[1]</a></b>
Todd Boyle,
"<a href=3D"http://ledgerism.net/GLT-GLR.htm">
GLT and GLR: conceptual architecture for general ledgers</a>,"
Ledgerism.net, 1997-2005.
Todd Boyle,
"<a href=3D"http://ledgerism.net/GLT-GLR.htm">
GLT and GLR: conceptual architecture for general ledgers</a>,"
Ledgerism.net, 1997-2005.
</p>
<p>
<b><a name=3D"ref_2">[2]</a></b>
Todd Boyle,
"<a href=3D"http://www.ledgerism.net/STR.htm">
STR software specification</a>,"
Goals, 1-5.
This section adopts that numbering convention.
Todd Boyle,
"<a href=3D"http://www.ledgerism.net/STR.htm">
STR software specification</a>,"
Goals, 1-5.
This section adopts that numbering convention.
</p>
<p>
<b><a name=3D"ref_3">[3]</a></b>
Ian Grigg,
various design and requirements documents,
Systemics, unpublished.
Ian Grigg,
various design and requirements documents,
Systemics, unpublished.
</p>

View File

@ -1,6 +1,6 @@
---
title: >-
Triple Entry Accounting
Triple Entry Accounting
---
See [Sox accounting], for why we need to replace Sox accounting with triple entry accounting.
@ -28,7 +28,7 @@ forms of cheating, and this design is profoundly difficult, complex, and
notoriously subtle and difficult to correctly implement.
The way of the future will be to move bookkeeping, accounting, and
various other measures against cheating to the blockchain.
various other measures against cheating to the blockchain.
The fundamental force moving us to a blockchain based world is an
untrusted and untrustworthy elite.

View File

@ -2,19 +2,19 @@
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style>
<style>
body {
max-width: 30em;
margin-left: 2em;
}
p.center {
p.center {
text-align:center;
}
</style>
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>
<title>
True Names and TCP
</title>
</title>
</head><body>
<h1>True Names and TCP
</h1><p>
@ -30,7 +30,7 @@ transaction. </p><p>
Recently there have been moves to make your cell phone
into a wallet. A big problem with this is that cell
phone cryptography is broken. Another problem is that
phone cryptography is broken. Another problem is that
cell phones are not necessarily associated with true names, and as soon as the government hears that they might control money, it starts insisting that cell phones <em>are</em> associated with true names. The phone companies dont like this, for if money is transferred from true name to true name, rather than cell phone to cell phone, it will make them a servant of the banking cartel, and the bankers will suck up all the gravy, but once people start stealing money through flaws in the encryption, they will be depressingly grateful that the government can track account holders down and punish them except, of course, the government probably will not be much good at doing so. </p><p>
TCP is all about creating connections.  It creates connections between network addresses, but network adresses correspond to the way networks are organized, not the way people are organized, so on top of networks we have domain names.  </p><p>
@ -71,6 +71,5 @@ Often the server wants to make sure that the client at one end of a connection i
</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -26,7 +26,6 @@ Fred is in the business of buying and selling cryptocurrency for Filipino pesos,
This plan reflects our intent to replace government mediated trust and cooperation, with blockchain mediated trust and cooperation, because government mediated trust and cooperation is conspicuously failing.&nbsp; </p><p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

View File

@ -19,15 +19,15 @@ p.center {text-align:center;}
upwards compatibility, we want to have variable precision
numbers. This class of problem is that of a <a href="https://infogalactic.com/info/Universal_code_%28data_compression%29">
universal code for integers</a> </p>
<p>But all this stuff is too clever by half.</p>
<p>A way simpler solution in context is that all numbers are potentially sixty four bit numbers. Communication is always preceded by protocol negotiation, in which both sides agree on a protocol that guarantees they both know the schemas that will be used to represent records and what is to be done with those records. Each record will be preceded by a schema number, which tells the recipient how to interpret the ensuing record. A potentially sixty four bit number is represented by a group of up to nine bytes in little endian order, each byte containing seven bits, with its high order bit indicating whether the next byte is part of the group, except for ninth byte, if there is a ninth byte, in which case we shift left by eight instead of seven, and use the entire eight bits of the ninth byte, thus making overflow integers unrepresentable. This schema is good for schema identifiers, protocol identifiers, block numbers, length counts, database record numbers, and times dated from after the start of the information epoch. Blobs of known size will directly stored in the record. In the unlikely event that a blob is of variable size, its size will be length count in the schema, usually followed directly by the blob. If a record contains a variable number of records, again, a length count of the number of records.</p>
<p>Having spent an unreasonable amount of time and energy on figuring out optimal ways of representing variable precision numbers, and coming up with the extremely ingenious idea of representing the numbers from 2 to any positive number with an implied probability of
n*(n+1)/(4^n)/8, where n is the number of bits following the first non zero bit, I decided to throw all that stuff away.</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -17,11 +17,11 @@
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Verifiable log backed map</h1>
<p>The point and purpose of the Verifiable Log Backed Map is to provide a mapping from identiers, such as phone numbers, email addresses, etc, to public keys, which mapping has to be the same for everyone so that if you see your public key is such and such, you know that those you converse with also see your public key is such and such.</p>
<p>For this to be actually useful, needs to drive a blogging and messaging system with capabilities analogous to facebook, wordpress, or twitter, and each entity in the system needs to have a wallet analogous to a blockchain wallet. Not all wallets should be associated with entities that can be easily found on the network, but entities that can be easily found on the network should have wallets</p>
<p>For this to be actually useful, needs to drive a blogging and messaging system with capabilities analogous to facebook, wordpress, or twitter, and each entity in the system needs to have a wallet analogous to a blockchain wallet. Not all wallets should be associated with entities that can be easily found on the network, but entities that can be easily found on the network should have wallets</p>
<p>A Verifiable Log Backed map is a Verifiable Map backed by a Verifiable Log that describes an ordered set of operations that result in predictable mutations to the map.</p>
<h2>Clients of a verifiable log can:</h2><ol>

View File

@ -14,12 +14,12 @@
<body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Vision Statement</h1>
<p>Not yet written</p>
<p> A vision statement is a one page version of the functional spec, saying what your software will do for users
</p>
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body>
</html>

View File

@ -12,7 +12,7 @@ p.center {text-align:center;}
</style>
<link rel="shortcut icon" href="../rho.ico">
<title>Wallet Design</title> </head><body>
<p><a href="./index.html"> To Home page</a> </p>
<h1>Wallet Design</h1><p>
@ -25,7 +25,6 @@ It seems like a lot of work to implement, but that is only because you have not
<p style="background-color : #ccffcc; font-size:80%">These documents are
licensed under the <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/">Creative
Commons Attribution-Share Alike 3.0 License</a></p>
Commons Attribution-Share Alike 3.0 License</a></p>
</body></html>

Some files were not shown because too many files have changed in this diff Show More