From a9fb1fd15d404dbee0e50b25d207ea9d5fee7347 Mon Sep 17 00:00:00 2001
From: yuriy0803 <68668177+yuriy0803@users.noreply.github.com>
Date: Mon, 22 Feb 2021 20:36:41 +0100
Subject: [PATCH] New ETC POOL
---
.gitignore | 6 +
.travis.yml | 8 +
LICENSE | 621 +
Makefile | 16 +
README.md | 446 +
api/server.go | 374 +
build/env.sh | 38 +
configs/api.json | 112 +
configs/nginx.default.example | 18 +
configs/payout.json | 112 +
configs/stratum2b.json | 108 +
configs/stratum4b.json | 113 +
configs/stratum8b.json | 113 +
configs/stratum9b.json | 113 +
configs/unlocker.json | 112 +
docs/PAYOUTS.md | 144 +
docs/POLICIES.md | 19 +
docs/STRATUM.md | 143 +
main.go | 107 +
payouts/payer.go | 335 +
payouts/unlocker.go | 545 +
payouts/unlocker_test.go | 113 +
policy/policy.go | 317 +
proxy/blocks.go | 117 +
proxy/config.go | 63 +
proxy/handlers.go | 127 +
proxy/miner.go | 97 +
proxy/proto.go | 38 +
proxy/proxy.go | 311 +
proxy/stratum.go | 221 +
rpc/rpc.go | 300 +
scripts/start_2_bil.sh | 2 +
service_installer.sh | 108 +
storage/redis.go | 1329 ++
storage/redis_test.go | 329 +
util/util.go | 81 +
www/.bowerrc | 4 +
www/.editorconfig | 34 +
www/.ember-cli | 9 +
www/.gitignore | 17 +
www/.jshintrc | 33 +
www/.travis.yml | 23 +
www/.watchmanconfig | 3 +
www/README.md | 53 +
www/app/app.js | 18 +
www/app/components/.gitkeep | 0
www/app/components/active-li.js | 17 +
www/app/controllers/.gitkeep | 0
www/app/controllers/account.js | 155 +
www/app/controllers/account/index.js | 151 +
www/app/controllers/account/payouts.js | 111 +
www/app/controllers/application.js | 105 +
www/app/controllers/help.js | 6 +
www/app/controllers/index.js | 116 +
www/app/formats.js | 18 +
www/app/helpers/.gitkeep | 0
www/app/helpers/format-balance.js | 8 +
www/app/helpers/format-date-locale.js | 8 +
www/app/helpers/format-difficulty.js | 8 +
www/app/helpers/format-hashrate.js | 14 +
www/app/helpers/format-tx.js | 7 +
www/app/helpers/seconds-to-ms.js | 7 +
www/app/helpers/string-to-int.js | 7 +
www/app/helpers/with-metric-prefix.js | 19 +
www/app/helpers/worker-colorizer.js | 30 +
www/app/helpers/worker-earnperday.js | 9 +
www/app/index.html | 25 +
www/app/models/.gitkeep | 0
www/app/models/block.js | 30 +
www/app/models/payment.js | 10 +
www/app/resolver.js | 3 +
www/app/router.js | 28 +
www/app/routes/.gitkeep | 0
www/app/routes/account.js | 25 +
www/app/routes/application.js | 88 +
www/app/routes/blocks.js | 32 +
www/app/routes/index.js | 11 +
www/app/routes/miners.js | 34 +
www/app/routes/payments.js | 22 +
www/app/styles/app.css | 364 +
www/app/templates/about.hbs | 20 +
www/app/templates/account.hbs | 77 +
www/app/templates/account/index.hbs | 41 +
www/app/templates/account/payouts.hbs | 30 +
www/app/templates/account/rewards.hbs | 40 +
www/app/templates/application-error.hbs | 6 +
www/app/templates/application.hbs | 95 +
www/app/templates/blocks.hbs | 25 +
www/app/templates/blocks/block.hbs | 33 +
www/app/templates/blocks/immature.hbs | 24 +
www/app/templates/blocks/index.hbs | 23 +
www/app/templates/blocks/pending.hbs | 31 +
www/app/templates/components/.gitkeep | 0
www/app/templates/components/active-li.hbs | 1 +
www/app/templates/help.hbs | 95 +
www/app/templates/index.hbs | 172 +
www/app/templates/luck.hbs | 22 +
www/app/templates/miners.hbs | 33 +
www/app/templates/not-found.hbs | 6 +
www/app/templates/payments.hbs | 39 +
www/bower.json | 14 +
www/build.sh | 4 +
www/config/ember-intl.js | 53 +
www/config/environment.js | 70 +
www/ember-cli-build.js | 36 +
www/package-lock.json | 13305 ++++++++++++++++
www/package.json | 47 +
www/public/bg.png | Bin 0 -> 191 bytes
www/public/crossdomain.xml | 15 +
www/public/favicon.ico | Bin 0 -> 370070 bytes
www/public/robots.txt | 3 +
www/testem.json | 12 +
www/tests/.jshintrc | 52 +
www/tests/helpers/destroy-app.js | 5 +
www/tests/helpers/module-for-acceptance.js | 23 +
www/tests/helpers/resolver.js | 11 +
www/tests/helpers/start-app.js | 18 +
www/tests/index.html | 33 +
www/tests/integration/.gitkeep | 0
www/tests/test-helper.js | 6 +
www/tests/unit/.gitkeep | 0
.../unit/helpers/format-difficulty-test.js | 12 +
.../unit/helpers/worker-colorizer-test.js | 12 +
.../unit/helpers/worker-earnperday-test.js | 12 +
www/translations/ar-sa.yaml | 159 +
www/translations/en-us.yaml | 158 +
www/vendor/.gitkeep | 0
127 files changed, 23691 insertions(+)
create mode 100644 .gitignore
create mode 100644 .travis.yml
create mode 100644 LICENSE
create mode 100644 Makefile
create mode 100644 README.md
create mode 100644 api/server.go
create mode 100644 build/env.sh
create mode 100644 configs/api.json
create mode 100644 configs/nginx.default.example
create mode 100644 configs/payout.json
create mode 100644 configs/stratum2b.json
create mode 100644 configs/stratum4b.json
create mode 100644 configs/stratum8b.json
create mode 100644 configs/stratum9b.json
create mode 100644 configs/unlocker.json
create mode 100644 docs/PAYOUTS.md
create mode 100644 docs/POLICIES.md
create mode 100644 docs/STRATUM.md
create mode 100644 main.go
create mode 100644 payouts/payer.go
create mode 100644 payouts/unlocker.go
create mode 100644 payouts/unlocker_test.go
create mode 100644 policy/policy.go
create mode 100644 proxy/blocks.go
create mode 100644 proxy/config.go
create mode 100644 proxy/handlers.go
create mode 100644 proxy/miner.go
create mode 100644 proxy/proto.go
create mode 100644 proxy/proxy.go
create mode 100644 proxy/stratum.go
create mode 100644 rpc/rpc.go
create mode 100644 scripts/start_2_bil.sh
create mode 100644 service_installer.sh
create mode 100644 storage/redis.go
create mode 100644 storage/redis_test.go
create mode 100644 util/util.go
create mode 100644 www/.bowerrc
create mode 100644 www/.editorconfig
create mode 100644 www/.ember-cli
create mode 100644 www/.gitignore
create mode 100644 www/.jshintrc
create mode 100644 www/.travis.yml
create mode 100644 www/.watchmanconfig
create mode 100644 www/README.md
create mode 100644 www/app/app.js
create mode 100644 www/app/components/.gitkeep
create mode 100644 www/app/components/active-li.js
create mode 100644 www/app/controllers/.gitkeep
create mode 100644 www/app/controllers/account.js
create mode 100644 www/app/controllers/account/index.js
create mode 100644 www/app/controllers/account/payouts.js
create mode 100644 www/app/controllers/application.js
create mode 100644 www/app/controllers/help.js
create mode 100644 www/app/controllers/index.js
create mode 100644 www/app/formats.js
create mode 100644 www/app/helpers/.gitkeep
create mode 100644 www/app/helpers/format-balance.js
create mode 100644 www/app/helpers/format-date-locale.js
create mode 100644 www/app/helpers/format-difficulty.js
create mode 100644 www/app/helpers/format-hashrate.js
create mode 100644 www/app/helpers/format-tx.js
create mode 100644 www/app/helpers/seconds-to-ms.js
create mode 100644 www/app/helpers/string-to-int.js
create mode 100644 www/app/helpers/with-metric-prefix.js
create mode 100644 www/app/helpers/worker-colorizer.js
create mode 100644 www/app/helpers/worker-earnperday.js
create mode 100644 www/app/index.html
create mode 100644 www/app/models/.gitkeep
create mode 100644 www/app/models/block.js
create mode 100644 www/app/models/payment.js
create mode 100644 www/app/resolver.js
create mode 100644 www/app/router.js
create mode 100644 www/app/routes/.gitkeep
create mode 100644 www/app/routes/account.js
create mode 100644 www/app/routes/application.js
create mode 100644 www/app/routes/blocks.js
create mode 100644 www/app/routes/index.js
create mode 100644 www/app/routes/miners.js
create mode 100644 www/app/routes/payments.js
create mode 100644 www/app/styles/app.css
create mode 100644 www/app/templates/about.hbs
create mode 100644 www/app/templates/account.hbs
create mode 100644 www/app/templates/account/index.hbs
create mode 100644 www/app/templates/account/payouts.hbs
create mode 100644 www/app/templates/account/rewards.hbs
create mode 100644 www/app/templates/application-error.hbs
create mode 100644 www/app/templates/application.hbs
create mode 100644 www/app/templates/blocks.hbs
create mode 100644 www/app/templates/blocks/block.hbs
create mode 100644 www/app/templates/blocks/immature.hbs
create mode 100644 www/app/templates/blocks/index.hbs
create mode 100644 www/app/templates/blocks/pending.hbs
create mode 100644 www/app/templates/components/.gitkeep
create mode 100644 www/app/templates/components/active-li.hbs
create mode 100644 www/app/templates/help.hbs
create mode 100644 www/app/templates/index.hbs
create mode 100644 www/app/templates/luck.hbs
create mode 100644 www/app/templates/miners.hbs
create mode 100644 www/app/templates/not-found.hbs
create mode 100644 www/app/templates/payments.hbs
create mode 100644 www/bower.json
create mode 100644 www/build.sh
create mode 100644 www/config/ember-intl.js
create mode 100644 www/config/environment.js
create mode 100644 www/ember-cli-build.js
create mode 100644 www/package-lock.json
create mode 100644 www/package.json
create mode 100644 www/public/bg.png
create mode 100644 www/public/crossdomain.xml
create mode 100644 www/public/favicon.ico
create mode 100644 www/public/robots.txt
create mode 100644 www/testem.json
create mode 100644 www/tests/.jshintrc
create mode 100644 www/tests/helpers/destroy-app.js
create mode 100644 www/tests/helpers/module-for-acceptance.js
create mode 100644 www/tests/helpers/resolver.js
create mode 100644 www/tests/helpers/start-app.js
create mode 100644 www/tests/index.html
create mode 100644 www/tests/integration/.gitkeep
create mode 100644 www/tests/test-helper.js
create mode 100644 www/tests/unit/.gitkeep
create mode 100644 www/tests/unit/helpers/format-difficulty-test.js
create mode 100644 www/tests/unit/helpers/worker-colorizer-test.js
create mode 100644 www/tests/unit/helpers/worker-earnperday-test.js
create mode 100644 www/translations/ar-sa.yaml
create mode 100644 www/translations/en-us.yaml
create mode 100644 www/vendor/.gitkeep
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..d6d6ece
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+.DS_Store
+config.json
+testnet.json
+
+/build/_workspace/
+/build/bin/
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..cd665b3
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+
+go:
+ - "1.10"
+ - tip
+
+services:
+ - redis-server
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..94a0453
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,621 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..8062f50
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,16 @@
+# This Makefile is meant to be used by people that do not usually work
+# with Go source code. If you know what GOPATH is then you probably
+# don't need to bother with make.
+
+.PHONY: all test clean
+
+GOBIN = build/bin
+
+all:
+ build/env.sh go get -v ./...
+
+test: all
+ build/env.sh go test -v ./...
+
+clean:
+ rm -fr build/_workspace/pkg/ $(GOBIN)/*
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..8af21c0
--- /dev/null
+++ b/README.md
@@ -0,0 +1,446 @@
+## Open Source Perkle (ETC 2021) Mining Pool
+
+
+### Features
+
+**This pool is being further developed to provide an easy to use pool for Perkle miners. Testing and bug submissions are welcome!**
+
+* Updated to work with Perkle 0.2.1
+* Support for HTTP and Stratum mining
+* Detailed block stats with luck percentage and full reward
+* Failover geth instances: geth high availability built in
+* Modern beautiful Ember.js frontend
+* Separate stats for workers: can highlight timed-out workers so miners can perform maintenance of rigs
+* JSON-API for stats
+* PPLNS block reward
+* Multi-tx payout at once
+* Beautiful front-end highcharts embedded
+
+#### Proxies
+
+* [Ether-Proxy](https://github.com/sammy007/ether-proxy) HTTP proxy with web interface
+* [Stratum Proxy](https://github.com/Atrides/eth-proxy) for Ethereum
+
+## Guide to make your very own Perkle mining pool
+
+### Building on Linux
+
+Dependencies:
+
+ * go >= 1.10
+ * redis-server >= 2.8.0
+ * nodejs >= 4 LTS
+ * nginx
+ * geth (multi-geth)
+
+**I highly recommend to use Ubuntu 16.04 LTS.**
+
+### Install go lang
+
+ $ sudo apt-get install -y build-essential golang-1.10-go unzip
+ $ sudo ln -s /usr/lib/go-1.10/bin/go /usr/local/bin/go
+
+### Install redis-server
+
+ $ sudo apt-get install redis-server
+
+It is recommended to bind your DB address on 127.0.0.1 or on internal ip. Also, please set up the password for advanced security!!!
+
+### Install nginx
+
+ $ sudo apt-get install nginx
+
+sample config located at configs/nginx.default.example (HINT, edit and move to /etc/nginx/sites-available/default)
+
+### Install NODE
+
+This will install the latest nodejs
+
+ $ curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
+ $ sudo apt-get install -y nodejs
+
+### Install Perkle Node
+See https://github.com/esprezzo/perkle
+
+### Install Perkle Pool
+
+ $ git clone https://github.com/yuriy0803/open-etc-pool-friends
+ $ cd open-perkle-pool
+ $ make all
+
+If you see open-perkle-pool after ls ~/open-etc-pool-friends/build/bin/, the installation has completed.
+
+ $ ls ~/open-perkle-pool/build/bin/
+
+### Set up Perkle pool
+
+ $ mv config.example.json config.json
+ $ nano config.json
+
+Set up based on commands below.
+
+```javascript
+{
+ // The number of cores of CPU.
+ "threads": 2,
+ // Prefix for keys in redis store
+ "coin": "prkl",
+ // Give unique name to each instance
+ "name": "main",
+ // PPLNS rounds
+ "pplns": 9000,
+
+ "proxy": {
+ "enabled": true,
+
+ // Bind HTTP mining endpoint to this IP:PORT
+ "listen": "0.0.0.0:8888",
+
+ // Allow only this header and body size of HTTP request from miners
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+
+ /* Set to true if you are behind CloudFlare (not recommended) or behind http-reverse
+ proxy to enable IP detection from X-Forwarded-For header.
+ Advanced users only. It's tricky to make it right and secure.
+ */
+ "behindReverseProxy": false,
+
+ // Stratum mining endpoint
+ "stratum": {
+ "enabled": true,
+ // Bind stratum mining socket to this IP:PORT
+ "listen": "0.0.0.0:8008",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ // Try to get new job from geth in this interval
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ // If there are many rejects because of heavy hash, difficulty should be increased properly.
+ "difficulty": 2000000000,
+
+ /* Reply error to miner instead of job if redis is unavailable.
+ Should save electricity to miners if pool is sick and they didn't set up failovers.
+ */
+ "healthCheck": true,
+ // Mark pool sick after this number of redis failures.
+ "maxFails": 100,
+ // TTL for workers stats, usually should be equal to large hashrate window from API section
+ "hashrateExpiration": "3h",
+
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+
+ "banning": {
+ "enabled": false,
+ /* Name of ipset for banning.
+ Check http://ipset.netfilter.org/ documentation.
+ */
+ "ipset": "blacklist",
+ // Remove ban after this amount of time
+ "timeout": 1800,
+ // Percent of invalid shares from all shares to ban miner
+ "invalidPercent": 30,
+ // Check after after miner submitted this number of shares
+ "checkThreshold": 30,
+ // Bad miner after this number of malformed requests
+ "malformedLimit": 5
+ },
+ // Connection rate limit
+ "limits": {
+ "enabled": false,
+ // Number of initial connections
+ "limit": 30,
+ "grace": "5m",
+ // Increase allowed number of connections on each valid share
+ "limitJump": 10
+ }
+ }
+ },
+
+ // Provides JSON data for frontend which is static website
+ "api": {
+ "enabled": true,
+ "listen": "0.0.0.0:8080",
+ // Collect miners stats (hashrate, ...) in this interval
+ "statsCollectInterval": "5s",
+ // Purge stale stats interval
+ "purgeInterval": "10m",
+ // Fast hashrate estimation window for each miner from it's shares
+ "hashrateWindow": "30m",
+ // Long and precise hashrate from shares, 3h is cool, keep it
+ "hashrateLargeWindow": "3h",
+ // Collect stats for shares/diff ratio for this number of blocks
+ "luckWindow": [64, 128, 256],
+ // Max number of payments to display in frontend
+ "payments": 50,
+ // Max numbers of blocks to display in frontend
+ "blocks": 50,
+ // Frontend Chart related settings
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74
+
+ /* If you are running API node on a different server where this module
+ is reading data from redis writeable slave, you must run an api instance with this option enabled in order to purge hashrate stats from main redis node.
+ Only redis writeable slave will work properly if you are distributing using redis slaves.
+ Very advanced. Usually all modules should share same redis instance.
+ */
+ "purgeOnly": false
+ },
+
+ // Check health of each geth node in this interval
+ "upstreamCheckInterval": "5s",
+
+ /* List of geth nodes to poll for new jobs. Pool will try to get work from
+ first alive one and check in background for failed to back up.
+ Current block template of the pool is always cached in RAM indeed.
+ */
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.2:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ // This is standard redis connection options
+ "redis": {
+ // Where your redis instance is listening for commands
+ // NOTE THAT THE POOL IS CONFIGURED FOR Redis database "1"
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 10,
+ "database": 1,
+ "password": ""
+ },
+
+ // This module periodically remits ether to miners
+ "unlocker": {
+ "enabled": false,
+ // Pool fee percentage
+ "poolFee": 1.0,
+ // the address is for pool fee. Personal wallet is recommended to prevent from server hacking.
+ "poolFeeAddress": "",
+ // Amount of donation to a pool maker. 10 percent of pool fee is donated to a pool maker now. If pool fee is 1 percent, 0.1 percent which is 10 percent of pool fee should be donated to a pool maker.
+ "donate": true,
+ // Unlock only if this number of blocks mined back
+ "depth": 120,
+ // Simply don't touch this option
+ "immatureDepth": 20,
+ // Keep mined transaction fees as pool fees
+ "keepTxFees": false,
+ // Run unlocker in this interval
+ "interval": "10m",
+ // Geth instance node rpc endpoint for unlocking blocks
+ "daemon": "http://127.0.0.1:8501",
+ // Rise error if can't reach geth in this amount of time
+ "timeout": "10s"
+ },
+
+ // Pay out miners using this module
+ "payouts": {
+ "enabled": true,
+ // Require minimum number of peers on node
+ "requirePeers": 5,
+ // Run payouts in this interval
+ "interval": "12h",
+ // Geth instance node rpc endpoint for payouts processing
+ "daemon": "http://127.0.0.1:8501",
+ // Rise error if can't reach geth in this amount of time
+ "timeout": "10s",
+ // Address with pool coinbase wallet address.
+ "address": "0x0",
+ // Let geth to determine gas and gasPrice
+ "autoGas": true,
+ // Gas amount and price for payout tx (advanced users only)
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ // The minimum distribution of mining reward. It is 1 CLO now.
+ "threshold": 1000000000,
+ // Perform BGSAVE on Redis after successful payouts session
+ "bgsave": false
+ "concurrentTx": 10
+ }
+}
+```
+
+If you are distributing your pool deployment to several servers or processes,
+create several configs and disable unneeded modules on each server. (Advanced users)
+
+I recommend this deployment strategy:
+
+* Mining instance - 1x (it depends, you can run one node for EU, one for US, one for Asia)
+* Unlocker and payouts instance - 1x each (strict!)
+* API instance - 1x
+
+
+### Run Pool
+It is required to run pool by serviced. If it is not, the terminal could be stopped, and pool doesn’t work.
+
+ $ sudo nano /etc/systemd/system/etherpool.service
+
+Copy the following example
+
+```
+[Unit]
+Description=Etherpool
+After=perkle.target
+
+[Service]
+Type=simple
+ExecStart=/home//open-etc-pool-friends/build/bin/open-etc-pool-friends /home//open-etc-pool-friends/config.json
+
+[Install]
+WantedBy=multi-user.target
+```
+
+Then run pool by the following commands
+
+ $ sudo systemctl enable etherpool
+ $ sudo systemctl start etherpool
+
+If you want to debug the node command
+
+ $ sudo systemctl status etherpool
+
+Backend operation has completed so far.
+
+### Open Firewall
+
+Firewall should be opened to operate this service. Whether Ubuntu firewall is basically opened or not, the firewall should be opened based on your situation.
+You can open firewall by opening 80,443,8080,8888,8008.
+
+## Install Frontend
+
+### Modify configuration file
+
+ $ nano ~/open-etc-pool-friends/www/config/environment.js
+
+Make some modifications in these settings.
+
+ BrowserTitle: 'Perkle Mining Pool',
+ ApiUrl: '//your-pool-domain/',
+ HttpHost: 'http://your-pool-domain',
+ StratumHost: 'your-pool-domain',
+ PoolFee: '1%',
+
+The frontend is a single-page Ember.js application that polls the pool API to render miner stats.
+
+ $ cd ~/open-etc-pool-friends/www
+ $ sudo npm install -g ember-cli@2.9.1
+ $ sudo npm install -g bower
+ $ sudo chown -R $USER:$GROUP ~/.npm
+ $ sudo chown -R $USER:$GROUP ~/.config
+ $ npm install
+ $ bower install
+ $ ./build.sh
+ $ cp -R ~/open-etc-pool-friends/www/dist ~/www
+
+As you can see above, the frontend of the pool homepage is created. Then, move to the directory, www, which services the file.
+
+Set up nginx.
+
+ $ sudo nano /etc/nginx/sites-available/default
+
+Modify based on configuration file.
+
+ # Default server configuration
+ # nginx example
+
+ upstream api {
+ server 127.0.0.1:8080;
+ }
+
+ server {
+ listen 80 default_server;
+ listen [::]:80 default_server;
+ root /home//www;
+
+ # Add index.php to the list if you are using PHP
+ index index.html index.htm index.nginx-debian.html;
+
+ server_name _;
+
+ location / {
+ # First attempt to serve request as file, then
+ # as directory, then fall back to displaying a 404.
+ try_files $uri $uri/ =404;
+ }
+
+ location /api {
+ proxy_pass http://api;
+ }
+
+ }
+
+After setting nginx is completed, run the command below.
+
+ $ sudo service nginx restart
+
+Type your homepage address or IP address on the web.
+If you face screen without any issues, pool installation has completed.
+
+### Extra) How To Secure the pool frontend with Let's Encrypt (https)
+
+This guide was originally referred from [digitalocean - How To Secure Nginx with Let's Encrypt on Ubuntu 16.04](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-16-04)
+
+First, install the Certbot's Nginx package with apt-get
+
+```
+$ sudo add-apt-repository ppa:certbot/certbot
+$ sudo apt-get update
+$ sudo apt-get install python-certbot-nginx
+```
+
+And then open your nginx setting file, make sure the server name is configured!
+
+```
+$ sudo nano /etc/nginx/sites-available/default
+. . .
+server_name ;
+. . .
+```
+
+Change the _ to your pool domain, and now you can obtain your auto-renewaled ssl certificate for free!
+
+```
+$ sudo certbot --nginx -d
+```
+
+Now you can access your pool's frontend via https! Share your pool link!
+
+### Notes
+
+* Unlocking and payouts are sequential, 1st tx go, 2nd waiting for 1st to confirm and so on. You can disable that in code. Carefully read `docs/PAYOUTS.md`.
+* Also, keep in mind that **unlocking and payouts will halt in case of backend or node RPC errors**. In that case check everything and restart.
+* You must restart module if you see errors with the word *suspended*.
+* Don't run payouts and unlocker modules as part of mining node. Create separate configs for both, launch independently and make sure you have a single instance of each module running.
+* If `poolFeeAddress` is not specified all pool profit will remain on coinbase address. If it specified, make sure to periodically send some dust back required for payments.
+* DO NOT OPEN YOUR RPC OR REDIS ON 0.0.0.0!!! It will eventually cause coin theft.
+
+### Credits
+
+Made by sammy007. Licensed under GPLv3.
+Modified by Akira Takizawa & The Ellaism Project & The Esprezzo Team.
+
+#### Contributors
+
+[Alex Leverington](https://github.com/subtly)
+
+### Donations
+
+ETH/ETC/ETSC/CLO: 0xd92fa5a9732a0aec36dc8d5a6a1305dc2d3e09e6
+
+
+
+Highly appreciated.
diff --git a/api/server.go b/api/server.go
new file mode 100644
index 0000000..aaca1d2
--- /dev/null
+++ b/api/server.go
@@ -0,0 +1,374 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/gorilla/mux"
+ "github.com/robfig/cron"
+
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+type ApiConfig struct {
+ Enabled bool `json:"enabled"`
+ Listen string `json:"listen"`
+ PoolCharts string `json:"poolCharts"`
+ PoolChartsNum int64 `json:"poolChartsNum"`
+ MinerChartsNum int64 `json:"minerChartsNum"`
+ MinerCharts string `json:"minerCharts"`
+ StatsCollectInterval string `json:"statsCollectInterval"`
+ HashrateWindow string `json:"hashrateWindow"`
+ HashrateLargeWindow string `json:"hashrateLargeWindow"`
+ LuckWindow []int `json:"luckWindow"`
+ Payments int64 `json:"payments"`
+ Blocks int64 `json:"blocks"`
+ PurgeOnly bool `json:"purgeOnly"`
+ PurgeInterval string `json:"purgeInterval"`
+}
+
+type ApiServer struct {
+ config *ApiConfig
+ backend *storage.RedisClient
+ hashrateWindow time.Duration
+ hashrateLargeWindow time.Duration
+ stats atomic.Value
+ miners map[string]*Entry
+ minersMu sync.RWMutex
+ statsIntv time.Duration
+}
+
+type Entry struct {
+ stats map[string]interface{}
+ updatedAt int64
+}
+
+func NewApiServer(cfg *ApiConfig, backend *storage.RedisClient) *ApiServer {
+ hashrateWindow := util.MustParseDuration(cfg.HashrateWindow)
+ hashrateLargeWindow := util.MustParseDuration(cfg.HashrateLargeWindow)
+ return &ApiServer{
+ config: cfg,
+ backend: backend,
+ hashrateWindow: hashrateWindow,
+ hashrateLargeWindow: hashrateLargeWindow,
+ miners: make(map[string]*Entry),
+ }
+}
+
+func (s *ApiServer) Start() {
+ if s.config.PurgeOnly {
+ log.Printf("Starting API in purge-only mode")
+ } else {
+ log.Printf("Starting API on %v", s.config.Listen)
+ }
+
+ s.statsIntv = util.MustParseDuration(s.config.StatsCollectInterval)
+ statsTimer := time.NewTimer(s.statsIntv)
+ log.Printf("Set stats collect interval to %v", s.statsIntv)
+
+ purgeIntv := util.MustParseDuration(s.config.PurgeInterval)
+ purgeTimer := time.NewTimer(purgeIntv)
+ log.Printf("Set purge interval to %v", purgeIntv)
+
+ sort.Ints(s.config.LuckWindow)
+
+ if s.config.PurgeOnly {
+ s.purgeStale()
+ } else {
+ s.purgeStale()
+ s.collectStats()
+ }
+
+ go func() {
+ for {
+ select {
+ case <-statsTimer.C:
+ if !s.config.PurgeOnly {
+ s.collectStats()
+ }
+ statsTimer.Reset(s.statsIntv)
+ case <-purgeTimer.C:
+ s.purgeStale()
+ purgeTimer.Reset(purgeIntv)
+ }
+ }
+ }()
+
+ go func() {
+ c := cron.New()
+
+ poolCharts := s.config.PoolCharts
+ log.Printf("pool charts config is :%v", poolCharts)
+ c.AddFunc(poolCharts, func() {
+ s.collectPoolCharts()
+ })
+
+ minerCharts := s.config.MinerCharts
+ log.Printf("miner charts config is :%v", minerCharts)
+ c.AddFunc(minerCharts, func() {
+
+ miners, err := s.backend.GetAllMinerAccount()
+ if err != nil {
+ log.Println("Get all miners account error: ", err)
+ }
+ for _, login := range miners {
+ miner, _ := s.backend.CollectWorkersStats(s.hashrateWindow, s.hashrateLargeWindow, login)
+ s.collectMinerCharts(login, miner["currentHashrate"].(int64), miner["hashrate"].(int64), miner["workersOnline"].(int64))
+ }
+ })
+
+ c.Start()
+ }()
+
+ if !s.config.PurgeOnly {
+ s.listen()
+ }
+}
+
+func (s *ApiServer) collectPoolCharts() {
+ ts := util.MakeTimestamp() / 1000
+ now := time.Now()
+ year, month, day := now.Date()
+ hour, min, _ := now.Clock()
+ t2 := fmt.Sprintf("%d-%02d-%02d %02d_%02d", year, month, day, hour, min)
+ stats := s.getStats()
+ hash := fmt.Sprint(stats["hashrate"])
+ log.Println("Pool Hash is ", ts, t2, hash)
+ err := s.backend.WritePoolCharts(ts, t2, hash)
+ if err != nil {
+ log.Printf("Failed to fetch pool charts from backend: %v", err)
+ return
+ }
+}
+
+func (s *ApiServer) collectMinerCharts(login string, hash int64, largeHash int64, workerOnline int64) {
+ ts := util.MakeTimestamp() / 1000
+ now := time.Now()
+ year, month, day := now.Date()
+ hour, min, _ := now.Clock()
+ t2 := fmt.Sprintf("%d-%02d-%02d %02d_%02d", year, month, day, hour, min)
+
+ log.Println("Miner "+login+" Hash is", ts, t2, hash, largeHash)
+ err := s.backend.WriteMinerCharts(ts, t2, login, hash, largeHash, workerOnline)
+ if err != nil {
+ log.Printf("Failed to fetch miner %v charts from backend: %v", login, err)
+ }
+}
+
+func (s *ApiServer) listen() {
+ r := mux.NewRouter()
+ r.HandleFunc("/api/stats", s.StatsIndex)
+ r.HandleFunc("/api/miners", s.MinersIndex)
+ r.HandleFunc("/api/blocks", s.BlocksIndex)
+ r.HandleFunc("/api/payments", s.PaymentsIndex)
+ r.HandleFunc("/api/accounts/{login:0x[0-9a-fA-F]{40}}", s.AccountIndex)
+ r.NotFoundHandler = http.HandlerFunc(notFound)
+ err := http.ListenAndServe(s.config.Listen, r)
+ if err != nil {
+ log.Fatalf("Failed to start API: %v", err)
+ }
+}
+
+func notFound(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=UTF-8")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.WriteHeader(http.StatusNotFound)
+}
+
+func (s *ApiServer) purgeStale() {
+ start := time.Now()
+ total, err := s.backend.FlushStaleStats(s.hashrateWindow, s.hashrateLargeWindow)
+ if err != nil {
+ log.Println("Failed to purge stale data from backend:", err)
+ } else {
+ log.Printf("Purged stale stats from backend, %v shares affected, elapsed time %v", total, time.Since(start))
+ }
+}
+
+func (s *ApiServer) collectStats() {
+ start := time.Now()
+ stats, err := s.backend.CollectStats(s.hashrateWindow, s.config.Blocks, s.config.Payments)
+ if err != nil {
+ log.Printf("Failed to fetch stats from backend: %v", err)
+ return
+ }
+ if len(s.config.LuckWindow) > 0 {
+ stats["luck"], err = s.backend.CollectLuckStats(s.config.LuckWindow)
+ if err != nil {
+ log.Printf("Failed to fetch luck stats from backend: %v", err)
+ return
+ }
+ }
+ stats["poolCharts"], err = s.backend.GetPoolCharts(s.config.PoolChartsNum)
+ s.stats.Store(stats)
+ log.Printf("Stats collection finished %s", time.Since(start))
+}
+
+func (s *ApiServer) StatsIndex(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=UTF-8")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.WriteHeader(http.StatusOK)
+
+ reply := make(map[string]interface{})
+ nodes, err := s.backend.GetNodeStates()
+ if err != nil {
+ log.Printf("Failed to get nodes stats from backend: %v", err)
+ }
+ reply["nodes"] = nodes
+
+ stats := s.getStats()
+ if stats != nil {
+ reply["now"] = util.MakeTimestamp()
+ reply["stats"] = stats["stats"]
+ reply["poolCharts"] = stats["poolCharts"]
+ reply["hashrate"] = stats["hashrate"]
+ reply["minersTotal"] = stats["minersTotal"]
+ reply["maturedTotal"] = stats["maturedTotal"]
+ reply["immatureTotal"] = stats["immatureTotal"]
+ reply["candidatesTotal"] = stats["candidatesTotal"]
+ }
+
+ err = json.NewEncoder(w).Encode(reply)
+ if err != nil {
+ log.Println("Error serializing API response: ", err)
+ }
+}
+
+func (s *ApiServer) MinersIndex(w http.ResponseWriter, r *http.Request) {
+
+ // TODO: Want to get the most used server from workers, so it can be deisplayed in miners page
+
+ w.Header().Set("Content-Type", "application/json; charset=UTF-8")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.WriteHeader(http.StatusOK)
+
+ reply := make(map[string]interface{})
+ stats := s.getStats()
+ if stats != nil {
+ reply["now"] = util.MakeTimestamp()
+ reply["miners"] = stats["miners"]
+ reply["hashrate"] = stats["hashrate"]
+ reply["minersTotal"] = stats["minersTotal"]
+ }
+
+ err := json.NewEncoder(w).Encode(reply)
+ if err != nil {
+ log.Println("Error serializing API response: ", err)
+ }
+}
+
+func (s *ApiServer) BlocksIndex(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=UTF-8")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.WriteHeader(http.StatusOK)
+
+ reply := make(map[string]interface{})
+ stats := s.getStats()
+ if stats != nil {
+ reply["matured"] = stats["matured"]
+ reply["maturedTotal"] = stats["maturedTotal"]
+ reply["immature"] = stats["immature"]
+ reply["immatureTotal"] = stats["immatureTotal"]
+ reply["candidates"] = stats["candidates"]
+ reply["candidatesTotal"] = stats["candidatesTotal"]
+ reply["luck"] = stats["luck"]
+ }
+
+ err := json.NewEncoder(w).Encode(reply)
+ if err != nil {
+ log.Println("Error serializing API response: ", err)
+ }
+}
+
+func (s *ApiServer) PaymentsIndex(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=UTF-8")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.WriteHeader(http.StatusOK)
+
+ reply := make(map[string]interface{})
+ stats := s.getStats()
+ if stats != nil {
+ reply["payments"] = stats["payments"]
+ reply["paymentsTotal"] = stats["paymentsTotal"]
+ }
+
+ err := json.NewEncoder(w).Encode(reply)
+ if err != nil {
+ log.Println("Error serializing API response: ", err)
+ }
+}
+
+func (s *ApiServer) AccountIndex(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json; charset=UTF-8")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Cache-Control", "no-cache")
+
+ login := strings.ToLower(mux.Vars(r)["login"])
+ s.minersMu.Lock()
+ defer s.minersMu.Unlock()
+
+ reply, ok := s.miners[login]
+ now := util.MakeTimestamp()
+ cacheIntv := int64(s.statsIntv / time.Millisecond)
+ // Refresh stats if stale
+ if !ok || reply.updatedAt < now-cacheIntv {
+ exist, err := s.backend.IsMinerExists(login)
+ if !exist {
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ log.Printf("Failed to fetch stats from backend: %v", err)
+ return
+ }
+
+ stats, err := s.backend.GetMinerStats(login, s.config.Payments)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ log.Printf("Failed to fetch stats from backend: %v", err)
+ return
+ }
+ workers, err := s.backend.CollectWorkersStats(s.hashrateWindow, s.hashrateLargeWindow, login)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ log.Printf("Failed to fetch stats from backend: %v", err)
+ return
+ }
+ for key, value := range workers {
+ stats[key] = value
+ }
+ stats["pageSize"] = s.config.Payments
+ stats["minerCharts"], err = s.backend.GetMinerCharts(s.config.MinerChartsNum, login)
+ stats["paymentCharts"], err = s.backend.GetPaymentCharts(login)
+ reply = &Entry{stats: stats, updatedAt: now}
+ s.miners[login] = reply
+ }
+
+ w.WriteHeader(http.StatusOK)
+ err := json.NewEncoder(w).Encode(reply.stats)
+ if err != nil {
+ log.Println("Error serializing API response: ", err)
+ }
+}
+
+func (s *ApiServer) getStats() map[string]interface{} {
+ stats := s.stats.Load()
+ if stats != nil {
+ return stats.(map[string]interface{})
+ }
+ return nil
+}
diff --git a/build/env.sh b/build/env.sh
new file mode 100644
index 0000000..716bbec
--- /dev/null
+++ b/build/env.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+set -e
+
+if [ ! -f "build/env.sh" ]; then
+ echo "$0 must be run from the root of the repository."
+ exit 2
+fi
+
+# Create fake Go workspace if it doesn't exist yet.
+workspace="$PWD/build/_workspace"
+root="$PWD"
+ethdir="$workspace/src/github.com/esprezzo"
+if [ ! -L "$ethdir/open-etc-pool-friends" ]; then
+ mkdir -p "$ethdir"
+ cd "$ethdir"
+ ln -s ../../../../../. open-etc-pool-friends
+ cd "$root"
+fi
+
+cd "$ethdir"
+mv "open-etc-pool-friends" "open-etc-pool-friends.old"
+ln -s ../../../../../. open-etc-pool-friends
+cd "$root"
+
+
+# Set up the environment to use the workspace.
+# Also add Godeps workspace so we build using canned dependencies.
+GOPATH="$workspace"
+GOBIN="$PWD/build/bin"
+export GOPATH GOBIN
+
+# Run the command inside the workspace.
+cd "$ethdir/open-etc-pool-friends"
+PWD="$ethdir/open-etc-pool-friends"
+
+# Launch the arguments with the configured environment.
+exec "$@"
diff --git a/configs/api.json b/configs/api.json
new file mode 100644
index 0000000..6563585
--- /dev/null
+++ b/configs/api.json
@@ -0,0 +1,112 @@
+{
+ "threads": 2,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": false,
+ "listen": "0.0.0.0:8888",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8008",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 2000000000,
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": true,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"*/20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"*/20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://40.121.105.44:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.2:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 10,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": false,
+ "poolFee": 0.5,
+ "poolFeeAddress": "",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s"
+ },
+
+ "payouts": {
+ "enabled": false,
+ "requirePeers": 5,
+ "interval": "3h",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s",
+ "address": "0x",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": true,
+ "concurrentTx": 5
+ }
+}
diff --git a/configs/nginx.default.example b/configs/nginx.default.example
new file mode 100644
index 0000000..a986720
--- /dev/null
+++ b/configs/nginx.default.example
@@ -0,0 +1,18 @@
+server {
+ listen 80;
+ listen [::]:80;
+
+ root /var/www/etcpool;
+ index index.html index.htm index.nginx-debian.html;
+
+ server_name etc.yourdomain.name;
+
+ location / {
+ try_files $uri $uri/ =404;
+ }
+
+ location /api {
+ proxy_pass http://127.0.0.1:8080/api;
+ }
+
+}
diff --git a/configs/payout.json b/configs/payout.json
new file mode 100644
index 0000000..7183497
--- /dev/null
+++ b/configs/payout.json
@@ -0,0 +1,112 @@
+{
+ "threads": 2,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": false,
+ "listen": "0.0.0.0:8888",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8008",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 2000000000,
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": false,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://40.121.105.44:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 10,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": false,
+ "poolFee": 0.5,
+ "poolFeeAddress": "0xd92fa5a9732a0aec36dc8d5a6a1305dc2d3e09e6",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8591",
+ "timeout": "10s"
+ },
+
+ "payouts": {
+ "enabled": true,
+ "requirePeers": 4,
+ "interval": "1h",
+ "daemon": "http://127.0.0.1:8501",
+ "timeout": "120s",
+ "address": "0x3f156afdb248618892cb5089ba5a5fcac8ee0b01",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": true,
+ "concurrentTx": 5
+ }
+}
diff --git a/configs/stratum2b.json b/configs/stratum2b.json
new file mode 100644
index 0000000..1a477c8
--- /dev/null
+++ b/configs/stratum2b.json
@@ -0,0 +1,108 @@
+{
+ "threads": 2,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": true,
+ "listen": "0.0.0.0:8882",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8002",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 2000000000,
+ "stratumHostname": "perkle-pool.esprezzo.io",
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": false,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "backup",
+ "url": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 10,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": false,
+ "poolFee": 0.5,
+ "poolFeeAddress": "",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s"
+ },
+
+ "payouts": {
+ "enabled": false,
+ "requirePeers": 5,
+ "interval": "3h",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s",
+ "address": "0x",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": true,
+ "concurrentTx": 5
+ }
+}
diff --git a/configs/stratum4b.json b/configs/stratum4b.json
new file mode 100644
index 0000000..b2f2f0b
--- /dev/null
+++ b/configs/stratum4b.json
@@ -0,0 +1,113 @@
+{
+ "threads": 4,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": true,
+ "listen": "0.0.0.0:8884",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8004",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 4000000000,
+ "stratumHostname": "perkle-pool.esprezzo.io",
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": false,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://40.121.105.44:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.2:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 10,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": false,
+ "poolFee": 0.5,
+ "poolFeeAddress": "",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ },
+
+ "payouts": {
+ "enabled": false,
+ "requirePeers": 5,
+ "interval": "3h",
+ "daemon": "http://127.0.0.1:8501",
+ "timeout": "10s",
+ "address": "0x",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": true,
+ "concurrentTx": 5
+ }
+}
diff --git a/configs/stratum8b.json b/configs/stratum8b.json
new file mode 100644
index 0000000..b9d9eba
--- /dev/null
+++ b/configs/stratum8b.json
@@ -0,0 +1,113 @@
+{
+ "threads": 8,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": true,
+ "listen": "0.0.0.0:8888",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8008",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 8000000000,
+ "stratumHostname": "perkle-pool.esprezzo.io",
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": false,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://40.121.105.44:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.2:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 15,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": false,
+ "poolFee": 0.5,
+ "poolFeeAddress": "0x0f31986d7a0d4f160acd97583e3c3b591dcb5dde",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ },
+
+ "payouts": {
+ "enabled": false,
+ "requirePeers": 5,
+ "interval": "3h",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s",
+ "address": "0x",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": true,
+ "concurrentTx": 5
+ }
+}
diff --git a/configs/stratum9b.json b/configs/stratum9b.json
new file mode 100644
index 0000000..fba4ccf
--- /dev/null
+++ b/configs/stratum9b.json
@@ -0,0 +1,113 @@
+{
+ "threads": 8,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": true,
+ "listen": "0.0.0.0:8889",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8009",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 9000000000,
+ "stratumHostname": "perkle-pool.esprezzo.io",
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": false,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://40.121.105.44:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.2:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 15,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": false,
+ "poolFee": 0.5,
+ "poolFeeAddress": "0x0f31986d7a0d4f160acd97583e3c3b591dcb5dde",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ },
+
+ "payouts": {
+ "enabled": false,
+ "requirePeers": 5,
+ "interval": "3h",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s",
+ "address": "0x",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": true,
+ "concurrentTx": 5
+ }
+}
diff --git a/configs/unlocker.json b/configs/unlocker.json
new file mode 100644
index 0000000..b76f5be
--- /dev/null
+++ b/configs/unlocker.json
@@ -0,0 +1,112 @@
+{
+ "threads": 2,
+ "coin": "prkl",
+ "name": "main",
+ "pplns": 9000,
+ "proxy": {
+ "enabled": false,
+ "listen": "0.0.0.0:8888",
+ "limitHeadersSize": 1024,
+ "limitBodySize": 256,
+ "behindReverseProxy": false,
+ "stratum": {
+ "enabled": true,
+ "listen": "0.0.0.0:8008",
+ "timeout": "120s",
+ "maxConn": 8192
+ },
+
+ "blockRefreshInterval": "120ms",
+ "stateUpdateInterval": "3s",
+ "difficulty": 2000000000,
+ "healthCheck": true,
+ "maxFails": 100,
+ "hashrateExpiration": "3h",
+ "policy": {
+ "workers": 8,
+ "resetInterval": "60m",
+ "refreshInterval": "1m",
+ "banning": {
+ "enabled": false,
+ "ipset": "blacklist",
+ "timeout": 1800,
+ "invalidPercent": 30,
+ "checkThreshold": 30,
+ "malformedLimit": 5
+ },
+ "limits": {
+ "enabled": false,
+ "limit": 30,
+ "grace": "5m",
+ "limitJump": 10
+ }
+ }
+ },
+
+ "api": {
+ "enabled": false,
+ "listen": "0.0.0.0:8080",
+ "statsCollectInterval": "5s",
+ "purgeInterval": "10m",
+ "hashrateWindow": "30m",
+ "hashrateLargeWindow": "3h",
+ "luckWindow": [64, 128, 256],
+ "payments": 50,
+ "blocks": 50,
+ "poolCharts":"0 */20 * * * *",
+ "poolChartsNum":74,
+ "minerCharts":"0 */20 * * * *",
+ "minerChartsNum":74,
+ "purgeOnly": false
+ },
+
+ "upstreamCheckInterval": "5s",
+
+ "upstream": [
+ {
+ "name": "main",
+ "url": "http://40.121.105.44:8501",
+ "timeout": "10s"
+ },
+ {
+ "name": "backup",
+ "url": "http://127.0.0.1:8501",
+ "timeout": "10s"
+ }
+ ],
+
+ "redis": {
+ "endpoint": "127.0.0.1:6379",
+ "poolSize": 10,
+ "database": 1,
+ "password": ""
+ },
+
+ "unlocker": {
+ "enabled": true,
+ "poolFee": 0.5,
+ "poolFeeAddress": "0x0f31986d7a0d4f160acd97583e3c3b591dcb5dde",
+ "donate": true,
+ "depth": 120,
+ "immatureDepth": 20,
+ "keepTxFees": false,
+ "interval": "10m",
+ "daemon": "http://127.0.0.1:8501",
+ "timeout": "120s"
+ },
+
+ "payouts": {
+ "enabled": false,
+ "requirePeers": 5,
+ "interval": "3h",
+ "daemon": "http://127.0.0.1:8545",
+ "timeout": "10s",
+ "address": "0x",
+ "autoGas": true,
+ "gas": "21000",
+ "gasPrice": "50000000000",
+ "threshold": 1000000000,
+ "bgsave": false,
+ "concurrentTx": 5
+ }
+}
diff --git a/docs/PAYOUTS.md b/docs/PAYOUTS.md
new file mode 100644
index 0000000..c1c2329
--- /dev/null
+++ b/docs/PAYOUTS.md
@@ -0,0 +1,144 @@
+**First of all make sure your Redis instance and backups are configured properly http://redis.io/topics/persistence.**
+
+Keep in mind that pool maintains all balances in **Shannon**.
+
+# Processing and Resolving Payouts
+
+**You MUST run payouts module in a separate process**, ideally don't run it as daemon and process payouts 2-3 times per day and watch how it goes. **You must configure logging**, otherwise it can lead to big problems.
+
+Module will fetch accounts and sequentially process payouts.
+
+For every account who reached minimal threshold:
+
+* Check if we have enough peers on a node
+* Check that account is unlocked
+
+If any of checks fails, module will not even try to continue.
+
+* Check if we have enough money for payout (should not happen under normal circumstances)
+* Lock payments
+
+If payments can't be locked (another lock exist, usually after a failure) module will halt payouts.
+
+* Deduct balance of a miner and log pending payment
+* Submit a transaction to a node via `eth_sendTransaction`
+
+**If transaction submission fails, payouts will remain locked and halted in erroneous state.**
+
+If transaction submission was successful, we have a TX hash:
+
+* Write this TX hash to a database
+* Unlock payouts
+
+And so on. Repeat for every account.
+
+After payout session, payment module will perform `BGSAVE` (background saving) on Redis if you have enabled `bgsave` option.
+
+## Resolving Failed Payments (automatic)
+
+If your payout is not logged and not confirmed by Ethereum network you can resolve it automatically. You need to payouts in maintenance mode by setting up `RESOLVE_PAYOUT=1` or `RESOLVE_PAYOUT=True` environment variable:
+
+`RESOLVE_PAYOUT=1 ./build/bin/open-ethereum-pool payouts.json`.
+
+Payout module will fetch all rows from Redis with key `eth:payments:pending` and credit balance back to miners. Usually you will have only single entry there.
+
+If you see `No pending payments to resolve` we have no data about failed debits.
+
+If there was a debit operation performed which is not followed by actual money transfer (after `eth_sendTransaction` returned an error), you will likely see:
+
+```
+Will credit back following balances:
+Address: 0x34AE12692BD4567A27e3E86411b58Ea6954BA773, Amount: 166798415 Shannon, 2016-05-11 08:14:34
+```
+
+followed by
+
+```
+Credited 166798415 Shannon back to 0x34AE12692BD4567A27e3E86411b58Ea6954BA773
+```
+
+Usually every maintenance run ends with following message and halt:
+
+```
+Payouts unlocked
+Now you have to restart payouts module with RESOLVE_PAYOUT=0 for normal run
+```
+
+Unset `RESOLVE_PAYOUT=1` or run payouts with `RESOLVE_PAYOUT=0`.
+
+## Resolving Failed Payment (manual)
+
+You can perform manual maintenance using `geth` and `redis-cli` utilities.
+
+### Check For Failed Transactions:
+
+Perform the following command in a `redis-cli`:
+
+```
+ZREVRANGE "eth:payments:pending" 0 -1 WITHSCORES
+```
+
+Result will be like this:
+
+> 1) "0x34AE12692BD4567A27e3E86411b58Ea6954BA773:25000000"
+
+It's a pair of `LOGIN:AMOUNT`.
+
+>2) "1462920526"
+
+It's a `UNIXTIME`
+
+### Manual Payment Submission
+
+**Make sure there is no TX sent using block explorer. Skip this step if payment actually exist in a blockchain.**
+
+```javascript
+eth.sendTransaction({
+ from: eth.coinbase,
+ to: '0x34AE12692BD4567A27e3E86411b58Ea6954BA773',
+ value: web3.toWei(25000000, 'shannon')
+})
+
+// => 0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331
+```
+
+**Write down tx hash**.
+
+### Store Payment in Redis
+
+Also usable for fixing missing payment entries.
+
+```
+ZADD "eth:payments:all" 1462920526 0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331:0x34AE12692BD4567A27e3E86411b58Ea6954BA773:25000000
+```
+
+```
+ZADD "eth:payments:0x34AE12692BD4567A27e3E86411b58Ea6954BA773" 1462920526 0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331:25000000
+```
+
+### Delete Erroneous Payment Entry
+
+```
+ZREM "eth:payments:pending" "0x34AE12692BD4567A27e3E86411b58Ea6954BA773:25000000"
+```
+
+### Update Internal Stats
+
+```
+HINCRBY "eth:finances" pending -25000000
+HINCRBY "eth:finances" paid 25000000
+```
+
+### Unlock Payouts
+
+```
+DEL "eth:payments:lock"
+```
+
+## Resolving Missing Payment Entries
+
+If pool actually paid but didn't log transaction, scroll up to `Store Payment in Redis` section. You should have a transaction hash from block explorer.
+
+## Transaction Didn't Confirm
+
+If you are sure, just repeat it manually, you should have all the logs.
diff --git a/docs/POLICIES.md b/docs/POLICIES.md
new file mode 100644
index 0000000..7ba3f76
--- /dev/null
+++ b/docs/POLICIES.md
@@ -0,0 +1,19 @@
+# Enforcing Policies
+
+Pool policy server collecting several stats on per IP basis. There are two options: `iptables+ipset` or simple application level bans. Banning is disabled by default.
+
+## Firewall Banning
+
+First you need to configure your firewall to use `ipset`, read [this article](https://wiki.archlinux.org/index.php/Ipset).
+
+Specify `ipset` name for banning in `policy` section. Timeout argument (in seconds) will be passed to this `ipset`. Stratum will use `os/exec` command like `sudo ipset add banlist x.x.x.x 1800` for banning, so you have to configure `sudo` properly and make sure that your system will never ask for password:
+
+Example `/etc/sudoers.d/pool` where `pool` is a username under which pool runs:
+
+ pool ALL=NOPASSWD: /sbin/ipset
+
+If you need something simple, just set `ipset` name to blank string and simple application level banning will be used instead.
+
+## Limiting
+
+Under some weird circumstances you can enforce limits to prevent connection flood to stratum, there are initial settings: `limit` and `limitJump`. Policy server will increase number of allowed connections per IP address on each valid share submission. Stratum will not enforce this policy for a `grace` period specified after stratum start.
diff --git a/docs/STRATUM.md b/docs/STRATUM.md
new file mode 100644
index 0000000..62c0195
--- /dev/null
+++ b/docs/STRATUM.md
@@ -0,0 +1,143 @@
+# Stratum Mining Protocol
+
+This is the description of stratum protocol used in this pool.
+
+Stratum defines simple exception handling. Example of rejected share looks like:
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: 23, message: "Invalid share" } }
+```
+
+Each response with exception is followed by disconnect.
+
+## Authentication
+
+Request looks like:
+
+```javascript
+{
+ "id": 1,
+ "jsonrpc": "2.0",
+ "method": "eth_submitLogin",
+ "params": ["0x34AE12692BD4567A27e3E86411b58Ea6954BA773"]
+}
+```
+
+Request can include additional 2nd param (email for example):
+
+```javascript
+{
+ "id": 1,
+ "jsonrpc": "2.0",
+ "method": "eth_submitLogin",
+ "params": ["0x34AE12692BD4567A27e3E86411b58Ea6954BA773", "admin@example.net"]
+}
+```
+
+Successful response:
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": true }
+```
+
+Exceptions:
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: -1, message: "Invalid login" } }
+```
+
+## Request For Job
+
+Request looks like:
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "method": "eth_getWork" }
+```
+
+Successful response:
+
+```javascript
+{
+ "id": 1,
+ "jsonrpc": "2.0",
+ "result": [
+ "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "0x5eed00000000000000000000000000005eed0000000000000000000000000000",
+ "0xd1ff1c01710000000000000000000000d1ff1c01710000000000000000000000"
+ ]
+}
+```
+
+Exceptions:
+
+```javascript
+{ "id": 10, "result": null, "error": { code: 0, message: "Work not ready" } }
+```
+
+## New Job Notification
+
+Server sends job to peers if new job is available:
+
+```javascript
+{
+ "jsonrpc": "2.0",
+ "result": [
+ "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "0x5eed00000000000000000000000000005eed0000000000000000000000000000",
+ "0xd1ff1c01710000000000000000000000d1ff1c01710000000000000000000000"
+ ]
+}
+```
+
+## Share Submission
+
+Request looks like:
+
+```javascript
+{
+ "id": 1,
+ "jsonrpc": "2.0",
+ "method": "eth_submitWork",
+ "params": [
+ "0xe05d1fd4002d962f",
+ "0x6c872e2304cd1e64b553a65387d7383470f22331aff288cbce5748dc430f016a",
+ "0x2b20a6c641ed155b893ee750ef90ec3be5d24736d16838b84759385b6724220d"
+ ]
+}
+```
+
+Request can include optional `worker` param:
+
+```javascript
+{ "id": 1, "worker": "rig-1" /* ... */ }
+```
+
+Response:
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": true }
+{ "id": 1, "jsonrpc": "2.0", "result": false }
+```
+
+Exceptions:
+
+Pool MAY return exception on invalid share submission usually followed by temporal ban.
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: 23, message: "Invalid share" } }
+```
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: 22, message: "Duplicate share" } }
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: -1, message: "High rate of invalid shares" } }
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: 25, message: "Not subscribed" } }
+{ "id": 1, "jsonrpc": "2.0", "result": null, "error": { code: -1, message: "Malformed PoW result" } }
+```
+
+## Submit Hashrate
+
+`eth_submitHashrate` is a nonsense method. Pool ignores it and the reply is always:
+
+```javascript
+{ "id": 1, "jsonrpc": "2.0", "result": true }
+```
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..e8921bc
--- /dev/null
+++ b/main.go
@@ -0,0 +1,107 @@
+// +build go1.9
+
+package main
+
+import (
+ "encoding/json"
+ "log"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "runtime"
+ "time"
+
+ "github.com/yvasiyarov/gorelic"
+
+ "github.com/yuriy0803/open-etc-pool-friends/api"
+ "github.com/yuriy0803/open-etc-pool-friends/payouts"
+ "github.com/yuriy0803/open-etc-pool-friends/proxy"
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+)
+
+var cfg proxy.Config
+var backend *storage.RedisClient
+
+func startProxy() {
+ s := proxy.NewProxy(&cfg, backend)
+ s.Start()
+}
+
+func startApi() {
+ s := api.NewApiServer(&cfg.Api, backend)
+ s.Start()
+}
+
+func startBlockUnlocker() {
+ u := payouts.NewBlockUnlocker(&cfg.BlockUnlocker, backend)
+ u.Start()
+}
+
+func startPayoutsProcessor() {
+ u := payouts.NewPayoutsProcessor(&cfg.Payouts, backend)
+ u.Start()
+}
+
+func startNewrelic() {
+ if cfg.NewrelicEnabled {
+ nr := gorelic.NewAgent()
+ nr.Verbose = cfg.NewrelicVerbose
+ nr.NewrelicLicense = cfg.NewrelicKey
+ nr.NewrelicName = cfg.NewrelicName
+ nr.Run()
+ }
+}
+
+func readConfig(cfg *proxy.Config) {
+ configFileName := "config.json"
+ if len(os.Args) > 1 {
+ configFileName = os.Args[1]
+ }
+ configFileName, _ = filepath.Abs(configFileName)
+ log.Printf("Loading config: %v", configFileName)
+
+ configFile, err := os.Open(configFileName)
+ if err != nil {
+ log.Fatal("File error: ", err.Error())
+ }
+ defer configFile.Close()
+ jsonParser := json.NewDecoder(configFile)
+ if err := jsonParser.Decode(&cfg); err != nil {
+ log.Fatal("Config error: ", err.Error())
+ }
+}
+
+func main() {
+ readConfig(&cfg)
+ rand.Seed(time.Now().UnixNano())
+
+ if cfg.Threads > 0 {
+ runtime.GOMAXPROCS(cfg.Threads)
+ log.Printf("Running with %v threads", cfg.Threads)
+ }
+
+ startNewrelic()
+
+ backend = storage.NewRedisClient(&cfg.Redis, cfg.Coin, cfg.Pplns)
+ pong, err := backend.Check()
+ if err != nil {
+ log.Printf("Can't establish connection to backend: %v", err)
+ } else {
+ log.Printf("Backend check reply: %v", pong)
+ }
+
+ if cfg.Proxy.Enabled {
+ go startProxy()
+ }
+ if cfg.Api.Enabled {
+ go startApi()
+ }
+ if cfg.BlockUnlocker.Enabled {
+ go startBlockUnlocker()
+ }
+ if cfg.Payouts.Enabled {
+ go startPayoutsProcessor()
+ }
+ quit := make(chan bool)
+ <-quit
+}
diff --git a/payouts/payer.go b/payouts/payer.go
new file mode 100644
index 0000000..668ae8e
--- /dev/null
+++ b/payouts/payer.go
@@ -0,0 +1,335 @@
+package payouts
+
+import (
+ "fmt"
+ "log"
+ "math/big"
+ "os"
+ "os/exec"
+ "strconv"
+ "time"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+
+ "github.com/yuriy0803/open-etc-pool-friends/rpc"
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+const txCheckInterval = 5 * time.Second
+
+type PayoutsConfig struct {
+ Enabled bool `json:"enabled"`
+ RequirePeers int64 `json:"requirePeers"`
+ Interval string `json:"interval"`
+ Daemon string `json:"daemon"`
+ Timeout string `json:"timeout"`
+ Address string `json:"address"`
+ Gas string `json:"gas"`
+ GasPrice string `json:"gasPrice"`
+ AutoGas bool `json:"autoGas"`
+ // In Shannon
+ Threshold int64 `json:"threshold"`
+ BgSave bool `json:"bgsave"`
+ ConcurrentTx int `json:"concurrentTx"`
+}
+
+func (self PayoutsConfig) GasHex() string {
+ x := util.String2Big(self.Gas)
+ return hexutil.EncodeBig(x)
+}
+
+func (self PayoutsConfig) GasPriceHex() string {
+ x := util.String2Big(self.GasPrice)
+ return hexutil.EncodeBig(x)
+}
+
+type PayoutsProcessor struct {
+ config *PayoutsConfig
+ backend *storage.RedisClient
+ rpc *rpc.RPCClient
+ halt bool
+ lastFail error
+}
+
+func NewPayoutsProcessor(cfg *PayoutsConfig, backend *storage.RedisClient) *PayoutsProcessor {
+ u := &PayoutsProcessor{config: cfg, backend: backend}
+ u.rpc = rpc.NewRPCClient("PayoutsProcessor", cfg.Daemon, cfg.Timeout)
+ return u
+}
+
+func (u *PayoutsProcessor) Start() {
+ log.Println("Starting payouts")
+
+ if u.mustResolvePayout() {
+ log.Println("Running with env RESOLVE_PAYOUT=1, now trying to resolve locked payouts")
+ u.resolvePayouts()
+ log.Println("Now you have to restart payouts module with RESOLVE_PAYOUT=0 for normal run")
+ return
+ }
+
+ intv := util.MustParseDuration(u.config.Interval)
+ timer := time.NewTimer(intv)
+ log.Printf("Set payouts interval to %v", intv)
+
+ payments := u.backend.GetPendingPayments()
+ if len(payments) > 0 {
+ log.Printf("Previous payout failed, you have to resolve it. List of failed payments:\n %v",
+ formatPendingPayments(payments))
+ return
+ }
+
+ locked, err := u.backend.IsPayoutsLocked()
+ if err != nil {
+ log.Println("Unable to start payouts:", err)
+ return
+ }
+ if locked {
+ log.Println("Unable to start payouts because they are locked")
+ return
+ }
+
+ // Immediately process payouts after start
+ u.process()
+ timer.Reset(intv)
+
+ go func() {
+ for {
+ select {
+ case <-timer.C:
+ u.process()
+ timer.Reset(intv)
+ }
+ }
+ }()
+}
+
+func (u *PayoutsProcessor) process() {
+ if u.halt {
+ log.Println("Payments suspended due to last critical error:", u.lastFail)
+ os.Exit(1)
+ return
+ }
+ mustPay := 0
+ minersPaid := 0
+ totalAmount := big.NewInt(0)
+ payees, err := u.backend.GetPayees()
+ if err != nil {
+ log.Println("Error while retrieving payees from backend:", err)
+ return
+ }
+
+ waitingCount := 0
+ var wg sync.WaitGroup
+
+ for _, login := range payees {
+ amount, _ := u.backend.GetBalance(login)
+ amountInShannon := big.NewInt(amount)
+
+ // Shannon^2 = Wei
+ amountInWei := new(big.Int).Mul(amountInShannon, util.Shannon)
+
+ if !u.reachedThreshold(amountInShannon) {
+ continue
+ }
+ mustPay++
+
+ // Require active peers before processing
+ if !u.checkPeers() {
+ break
+ }
+ // Require unlocked account
+ if !u.isUnlockedAccount() {
+ break
+ }
+
+ // Check if we have enough funds
+ poolBalance, err := u.rpc.GetBalance(u.config.Address)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ break
+ }
+ if poolBalance.Cmp(amountInWei) < 0 {
+ err := fmt.Errorf("Not enough balance for payment, need %s Wei, pool has %s Wei",
+ amountInWei.String(), poolBalance.String())
+ u.halt = true
+ u.lastFail = err
+ break
+ }
+
+ // Lock payments for current payout
+ err = u.backend.LockPayouts(login, amount)
+ if err != nil {
+ log.Printf("Failed to lock payment for %s: %v", login, err)
+ u.halt = true
+ u.lastFail = err
+ break
+ }
+ log.Printf("Locked payment for %s, %v Shannon", login, amount)
+
+ // Debit miner's balance and update stats
+ err = u.backend.UpdateBalance(login, amount)
+ if err != nil {
+ log.Printf("Failed to update balance for %s, %v Shannon: %v", login, amount, err)
+ u.halt = true
+ u.lastFail = err
+ break
+ }
+
+ value := hexutil.EncodeBig(amountInWei)
+ txHash, err := u.rpc.SendTransaction(u.config.Address, login, u.config.GasHex(), u.config.GasPriceHex(), value, u.config.AutoGas)
+ if err != nil {
+ log.Printf("Failed to send payment to %s, %v Shannon: %v. Check outgoing tx for %s in block explorer and docs/PAYOUTS.md",
+ login, amount, err, login)
+ u.halt = true
+ u.lastFail = err
+ break
+ }
+
+ if postCommand, present := os.LookupEnv("POST_PAYOUT_HOOK"); present {
+ go func(postCommand string, login string, value string) {
+ out, err := exec.Command(postCommand, login, value).CombinedOutput()
+ if err != nil {
+ log.Printf("WARNING: Error running post payout hook: %s", err.Error())
+ }
+ log.Printf("Running post payout hook with result: %s", out)
+ }(postCommand, login, value)
+ }
+
+ // Log transaction hash
+ err = u.backend.WritePayment(login, txHash, amount)
+ if err != nil {
+ log.Printf("Failed to log payment data for %s, %v Shannon, tx: %s: %v", login, amount, txHash, err)
+ u.halt = true
+ u.lastFail = err
+ break
+ }
+
+ minersPaid++
+ totalAmount.Add(totalAmount, big.NewInt(amount))
+ log.Printf("Paid %v Shannon to %v, TxHash: %v", amount, login, txHash)
+
+ wg.Add(1)
+ waitingCount++
+ go func(txHash string, login string, wg *sync.WaitGroup) {
+ // Wait for TX confirmation before further payouts
+ for {
+ log.Printf("Waiting for tx confirmation: %v", txHash)
+ time.Sleep(txCheckInterval)
+ receipt, err := u.rpc.GetTxReceipt(txHash)
+ if err != nil {
+ log.Printf("Failed to get tx receipt for %v: %v", txHash, err)
+ continue
+ }
+ // Tx has been mined
+ if receipt != nil && receipt.Confirmed() {
+ if receipt.Successful() {
+ log.Printf("Payout tx successful for %s: %s", login, txHash)
+ } else {
+ log.Printf("Payout tx failed for %s: %s. Address contract throws on incoming tx.", login, txHash)
+ }
+ break
+ }
+ }
+ wg.Done()
+ }(txHash, login, &wg)
+
+ if waitingCount > u.config.ConcurrentTx {
+ wg.Wait()
+ waitingCount = 0
+ }
+ }
+
+ wg.Wait()
+ waitingCount = 0
+
+ if mustPay > 0 {
+ log.Printf("Paid total %v Shannon to %v of %v payees", totalAmount, minersPaid, mustPay)
+ } else {
+ log.Println("No payees that have reached payout threshold")
+ }
+
+ // Save redis state to disk
+ if minersPaid > 0 && u.config.BgSave {
+ u.bgSave()
+ }
+}
+
+func (self PayoutsProcessor) isUnlockedAccount() bool {
+ _, err := self.rpc.Sign(self.config.Address, "0x0")
+ if err != nil {
+ log.Println("Unable to process payouts:", err)
+ return false
+ }
+ return true
+}
+
+func (self PayoutsProcessor) checkPeers() bool {
+ n, err := self.rpc.GetPeerCount()
+ if err != nil {
+ log.Println("Unable to start payouts, failed to retrieve number of peers from node:", err)
+ return false
+ }
+ if n < self.config.RequirePeers {
+ log.Println("Unable to start payouts, number of peers on a node is less than required", self.config.RequirePeers)
+ return false
+ }
+ return true
+}
+
+func (self PayoutsProcessor) reachedThreshold(amount *big.Int) bool {
+ return big.NewInt(self.config.Threshold).Cmp(amount) < 0
+}
+
+func formatPendingPayments(list []*storage.PendingPayment) string {
+ var s string
+ for _, v := range list {
+ s += fmt.Sprintf("\tAddress: %s, Amount: %v Shannon, %v\n", v.Address, v.Amount, time.Unix(v.Timestamp, 0))
+ }
+ return s
+}
+
+func (self PayoutsProcessor) bgSave() {
+ result, err := self.backend.BgSave()
+ if err != nil {
+ log.Println("Failed to perform BGSAVE on backend:", err)
+ return
+ }
+ log.Println("Saving backend state to disk:", result)
+}
+
+func (self PayoutsProcessor) resolvePayouts() {
+ payments := self.backend.GetPendingPayments()
+
+ if len(payments) > 0 {
+ log.Printf("Will credit back following balances:\n%s", formatPendingPayments(payments))
+
+ for _, v := range payments {
+ err := self.backend.RollbackBalance(v.Address, v.Amount)
+ if err != nil {
+ log.Printf("Failed to credit %v Shannon back to %s, error is: %v", v.Amount, v.Address, err)
+ return
+ }
+ log.Printf("Credited %v Shannon back to %s", v.Amount, v.Address)
+ }
+ err := self.backend.UnlockPayouts()
+ if err != nil {
+ log.Println("Failed to unlock payouts:", err)
+ return
+ }
+ } else {
+ log.Println("No pending payments to resolve")
+ }
+
+ if self.config.BgSave {
+ self.bgSave()
+ }
+ log.Println("Payouts unlocked")
+}
+
+func (self PayoutsProcessor) mustResolvePayout() bool {
+ v, _ := strconv.ParseBool(os.Getenv("RESOLVE_PAYOUT"))
+ return v
+}
diff --git a/payouts/unlocker.go b/payouts/unlocker.go
new file mode 100644
index 0000000..07a4576
--- /dev/null
+++ b/payouts/unlocker.go
@@ -0,0 +1,545 @@
+package payouts
+
+import (
+ "fmt"
+ "log"
+ "math/big"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/math"
+
+ "github.com/yuriy0803/open-etc-pool-friends/rpc"
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+type UnlockerConfig struct {
+ Enabled bool `json:"enabled"`
+ PoolFee float64 `json:"poolFee"`
+ PoolFeeAddress string `json:"poolFeeAddress"`
+ Donate bool `json:"donate"`
+ Depth int64 `json:"depth"`
+ ImmatureDepth int64 `json:"immatureDepth"`
+ KeepTxFees bool `json:"keepTxFees"`
+ Interval string `json:"interval"`
+ Daemon string `json:"daemon"`
+ Timeout string `json:"timeout"`
+}
+
+const minDepth = 16
+
+var constReward = math.MustParseBig256("3200000000000000000")
+var uncleReward = new(big.Int).Div(constReward, new(big.Int).SetInt64(32))
+
+// Donate 5% from pool fees to developers
+const donationFee = 5.0
+const donationAccount = "0xd92fa5a9732a0aec36dc8d5a6a1305dc2d3e09e6"
+
+type BlockUnlocker struct {
+ config *UnlockerConfig
+ backend *storage.RedisClient
+ rpc *rpc.RPCClient
+ halt bool
+ lastFail error
+}
+
+func NewBlockUnlocker(cfg *UnlockerConfig, backend *storage.RedisClient) *BlockUnlocker {
+ if len(cfg.PoolFeeAddress) != 0 && !util.IsValidHexAddress(cfg.PoolFeeAddress) {
+ log.Fatalln("Invalid poolFeeAddress", cfg.PoolFeeAddress)
+ }
+ if cfg.Depth < minDepth*2 {
+ log.Fatalf("Block maturity depth can't be < %v, your depth is %v", minDepth*2, cfg.Depth)
+ }
+ if cfg.ImmatureDepth < minDepth {
+ log.Fatalf("Immature depth can't be < %v, your depth is %v", minDepth, cfg.ImmatureDepth)
+ }
+ u := &BlockUnlocker{config: cfg, backend: backend}
+ u.rpc = rpc.NewRPCClient("BlockUnlocker", cfg.Daemon, cfg.Timeout)
+ return u
+}
+
+func (u *BlockUnlocker) Start() {
+ log.Println("Starting block unlocker")
+ intv := util.MustParseDuration(u.config.Interval)
+ timer := time.NewTimer(intv)
+ log.Printf("Set block unlock interval to %v", intv)
+
+ // Immediately unlock after start
+ u.unlockPendingBlocks()
+ u.unlockAndCreditMiners()
+ timer.Reset(intv)
+
+ go func() {
+ for {
+ select {
+ case <-timer.C:
+ u.unlockPendingBlocks()
+ u.unlockAndCreditMiners()
+ timer.Reset(intv)
+ }
+ }
+ }()
+}
+
+type UnlockResult struct {
+ maturedBlocks []*storage.BlockData
+ orphanedBlocks []*storage.BlockData
+ orphans int
+ uncles int
+ blocks int
+}
+
+/* Geth does not provide consistent state when you need both new height and new job,
+ * so in redis I am logging just what I have in a pool state on the moment when block found.
+ * Having very likely incorrect height in database results in a weird block unlocking scheme,
+ * when I have to check what the hell we actually found and traversing all the blocks with height-N and height+N
+ * to make sure we will find it. We can't rely on round height here, it's just a reference point.
+ * ISSUE: https://github.com/ethereum/go-ethereum/issues/2333
+ */
+func (u *BlockUnlocker) unlockCandidates(candidates []*storage.BlockData) (*UnlockResult, error) {
+ result := &UnlockResult{}
+
+ // Data row is: "height:nonce:powHash:mixDigest:timestamp:diff:totalShares"
+ for _, candidate := range candidates {
+ orphan := true
+
+ /* Search for a normal block with wrong height here by traversing 16 blocks back and forward.
+ * Also we are searching for a block that can include this one as uncle.
+ */
+ for i := int64(minDepth * -1); i < minDepth; i++ {
+ height := candidate.Height + i
+
+ if height < 0 {
+ continue
+ }
+
+ block, err := u.rpc.GetBlockByHeight(height)
+ if err != nil {
+ log.Printf("Error while retrieving block %v from node: %v", height, err)
+ return nil, err
+ }
+ if block == nil {
+ return nil, fmt.Errorf("Error while retrieving block %v from node, wrong node height", height)
+ }
+
+ if matchCandidate(block, candidate) {
+ orphan = false
+ result.blocks++
+
+ err = u.handleBlock(block, candidate)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ return nil, err
+ }
+ result.maturedBlocks = append(result.maturedBlocks, candidate)
+ log.Printf("Mature block %v with %v tx, hash: %v", candidate.Height, len(block.Transactions), candidate.Hash[0:10])
+ break
+ }
+
+ if len(block.Uncles) == 0 {
+ continue
+ }
+
+ // Trying to find uncle in current block during our forward check
+ for uncleIndex, uncleHash := range block.Uncles {
+ uncle, err := u.rpc.GetUncleByBlockNumberAndIndex(height, uncleIndex)
+ if err != nil {
+ return nil, fmt.Errorf("Error while retrieving uncle of block %v from node: %v", uncleHash, err)
+ }
+ if uncle == nil {
+ return nil, fmt.Errorf("Error while retrieving uncle of block %v from node", height)
+ }
+
+ // Found uncle
+ if matchCandidate(uncle, candidate) {
+ orphan = false
+ result.uncles++
+
+ err := handleUncle(height, uncle, candidate)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ return nil, err
+ }
+ result.maturedBlocks = append(result.maturedBlocks, candidate)
+ log.Printf("Mature uncle %v/%v of reward %v with hash: %v", candidate.Height, candidate.UncleHeight,
+ util.FormatReward(candidate.Reward), uncle.Hash[0:10])
+ break
+ }
+ }
+ // Found block or uncle
+ if !orphan {
+ break
+ }
+ }
+ // Block is lost, we didn't find any valid block or uncle matching our data in a blockchain
+ if orphan {
+ result.orphans++
+ candidate.Orphan = true
+ result.orphanedBlocks = append(result.orphanedBlocks, candidate)
+ log.Printf("Orphaned block %v:%v", candidate.RoundHeight, candidate.Nonce)
+ }
+ }
+ return result, nil
+}
+
+func matchCandidate(block *rpc.GetBlockReply, candidate *storage.BlockData) bool {
+ // Just compare hash if block is unlocked as immature
+ if len(candidate.Hash) > 0 && strings.EqualFold(candidate.Hash, block.Hash) {
+ return true
+ }
+ // Geth-style candidate matching
+ if len(block.Nonce) > 0 {
+ return strings.EqualFold(block.Nonce, candidate.Nonce)
+ }
+ // Parity's EIP: https://github.com/ethereum/EIPs/issues/95
+ if len(block.SealFields) == 2 {
+ return strings.EqualFold(candidate.Nonce, block.SealFields[1])
+ }
+ return false
+}
+
+func (u *BlockUnlocker) handleBlock(block *rpc.GetBlockReply, candidate *storage.BlockData) error {
+ // Initial 5 Ether static reward
+ reward := new(big.Int).Set(constReward)
+
+ correctHeight, err := strconv.ParseInt(strings.Replace(block.Number, "0x", "", -1), 16, 64)
+ if err != nil {
+ return err
+ }
+ candidate.Height = correctHeight
+
+ // Add TX fees
+ extraTxReward, err := u.getExtraRewardForTx(block)
+ if err != nil {
+ return fmt.Errorf("Error while fetching TX receipt: %v", err)
+ }
+ if u.config.KeepTxFees {
+ candidate.ExtraReward = extraTxReward
+ } else {
+ reward.Add(reward, extraTxReward)
+ }
+
+ // Add reward for including uncles
+ rewardForUncles := big.NewInt(0).Mul(uncleReward, big.NewInt(int64(len(block.Uncles))))
+ reward.Add(reward, rewardForUncles)
+
+ candidate.Orphan = false
+ candidate.Hash = block.Hash
+ candidate.Reward = reward
+ return nil
+}
+
+func handleUncle(height int64, uncle *rpc.GetBlockReply, candidate *storage.BlockData) error {
+ uncleHeight, err := strconv.ParseInt(strings.Replace(uncle.Number, "0x", "", -1), 16, 64)
+ if err != nil {
+ return err
+ }
+ reward := getUncleReward(uncleHeight, height)
+ candidate.Height = height
+ candidate.UncleHeight = uncleHeight
+ candidate.Orphan = false
+ candidate.Hash = uncle.Hash
+ candidate.Reward = reward
+ return nil
+}
+
+func (u *BlockUnlocker) unlockPendingBlocks() {
+ if u.halt {
+ log.Println("Unlocking suspended due to last critical error:", u.lastFail)
+ os.Exit(1)
+ return
+ }
+
+ current, err := u.rpc.GetPendingBlock()
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Unable to get current blockchain height from node: %v", err)
+ return
+ }
+ currentHeight, err := strconv.ParseInt(strings.Replace(current.Number, "0x", "", -1), 16, 64)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Can't parse pending block number: %v", err)
+ return
+ }
+
+ candidates, err := u.backend.GetCandidates(currentHeight - u.config.ImmatureDepth)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to get block candidates from backend: %v", err)
+ return
+ }
+
+ if len(candidates) == 0 {
+ log.Println("No block candidates to unlock")
+ return
+ }
+
+ result, err := u.unlockCandidates(candidates)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to unlock blocks: %v", err)
+ return
+ }
+ log.Printf("Immature %v blocks, %v uncles, %v orphans", result.blocks, result.uncles, result.orphans)
+
+ err = u.backend.WritePendingOrphans(result.orphanedBlocks)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to insert orphaned blocks into backend: %v", err)
+ return
+ } else {
+ log.Printf("Inserted %v orphaned blocks to backend", result.orphans)
+ }
+
+ totalRevenue := new(big.Rat)
+ totalMinersProfit := new(big.Rat)
+ totalPoolProfit := new(big.Rat)
+
+ for _, block := range result.maturedBlocks {
+ revenue, minersProfit, poolProfit, roundRewards, percents, err := u.calculateRewards(block)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to calculate rewards for round %v: %v", block.RoundKey(), err)
+ return
+ }
+ err = u.backend.WriteImmatureBlock(block, roundRewards)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to credit rewards for round %v: %v", block.RoundKey(), err)
+ return
+ }
+ totalRevenue.Add(totalRevenue, revenue)
+ totalMinersProfit.Add(totalMinersProfit, minersProfit)
+ totalPoolProfit.Add(totalPoolProfit, poolProfit)
+
+ logEntry := fmt.Sprintf(
+ "IMMATURE %v: revenue %v, miners profit %v, pool profit: %v",
+ block.RoundKey(),
+ util.FormatRatReward(revenue),
+ util.FormatRatReward(minersProfit),
+ util.FormatRatReward(poolProfit),
+ )
+ entries := []string{logEntry}
+ for login, reward := range roundRewards {
+ entries = append(entries, fmt.Sprintf("\tREWARD %v: %v: %v Shannon", block.RoundKey(), login, reward))
+ per := new(big.Rat)
+ if val, ok := percents[login]; ok {
+ per = val
+ }
+ u.backend.WriteReward(login, reward, per, true, block)
+ }
+ log.Println(strings.Join(entries, "\n"))
+ }
+
+ log.Printf(
+ "IMMATURE SESSION: revenue %v, miners profit %v, pool profit: %v",
+ util.FormatRatReward(totalRevenue),
+ util.FormatRatReward(totalMinersProfit),
+ util.FormatRatReward(totalPoolProfit),
+ )
+}
+
+func (u *BlockUnlocker) unlockAndCreditMiners() {
+ if u.halt {
+ log.Println("Unlocking suspended due to last critical error:", u.lastFail)
+ return
+ }
+
+ current, err := u.rpc.GetPendingBlock()
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Unable to get current blockchain height from node: %v", err)
+ return
+ }
+ currentHeight, err := strconv.ParseInt(strings.Replace(current.Number, "0x", "", -1), 16, 64)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Can't parse pending block number: %v", err)
+ return
+ }
+
+ immature, err := u.backend.GetImmatureBlocks(currentHeight - u.config.Depth)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to get block candidates from backend: %v", err)
+ return
+ }
+
+ if len(immature) == 0 {
+ log.Println("No immature blocks to credit miners")
+ return
+ }
+
+ result, err := u.unlockCandidates(immature)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to unlock blocks: %v", err)
+ return
+ }
+ log.Printf("Unlocked %v blocks, %v uncles, %v orphans", result.blocks, result.uncles, result.orphans)
+
+ for _, block := range result.orphanedBlocks {
+ err = u.backend.WriteOrphan(block)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to insert orphaned block into backend: %v", err)
+ return
+ }
+ }
+ log.Printf("Inserted %v orphaned blocks to backend", result.orphans)
+
+ totalRevenue := new(big.Rat)
+ totalMinersProfit := new(big.Rat)
+ totalPoolProfit := new(big.Rat)
+
+ for _, block := range result.maturedBlocks {
+ revenue, minersProfit, poolProfit, roundRewards, percents, err := u.calculateRewards(block)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to calculate rewards for round %v: %v", block.RoundKey(), err)
+ return
+ }
+ err = u.backend.WriteMaturedBlock(block, roundRewards)
+ if err != nil {
+ u.halt = true
+ u.lastFail = err
+ log.Printf("Failed to credit rewards for round %v: %v", block.RoundKey(), err)
+ return
+ }
+ totalRevenue.Add(totalRevenue, revenue)
+ totalMinersProfit.Add(totalMinersProfit, minersProfit)
+ totalPoolProfit.Add(totalPoolProfit, poolProfit)
+
+ logEntry := fmt.Sprintf(
+ "MATURED %v: revenue %v, miners profit %v, pool profit: %v",
+ block.RoundKey(),
+ util.FormatRatReward(revenue),
+ util.FormatRatReward(minersProfit),
+ util.FormatRatReward(poolProfit),
+ )
+ entries := []string{logEntry}
+ for login, reward := range roundRewards {
+ entries = append(entries, fmt.Sprintf("\tREWARD %v: %v: %v Shannon", block.RoundKey(), login, reward))
+ per := new(big.Rat)
+ if val, ok := percents[login]; ok {
+ per = val
+ }
+ u.backend.WriteReward(login, reward, per, false, block)
+ }
+ log.Println(strings.Join(entries, "\n"))
+ }
+
+ log.Printf(
+ "MATURE SESSION: revenue %v, miners profit %v, pool profit: %v",
+ util.FormatRatReward(totalRevenue),
+ util.FormatRatReward(totalMinersProfit),
+ util.FormatRatReward(totalPoolProfit),
+ )
+}
+
+func (u *BlockUnlocker) calculateRewards(block *storage.BlockData) (*big.Rat, *big.Rat, *big.Rat, map[string]int64, map[string]*big.Rat, error) {
+ revenue := new(big.Rat).SetInt(block.Reward)
+ minersProfit, poolProfit := chargeFee(revenue, u.config.PoolFee)
+
+ shares, err := u.backend.GetRoundShares(block.RoundHeight, block.Nonce)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+
+ totalShares := int64(0)
+ for _, val := range shares {
+ totalShares += val
+ }
+
+ rewards, percents := calculateRewardsForShares(shares, totalShares, minersProfit)
+
+ if block.ExtraReward != nil {
+ extraReward := new(big.Rat).SetInt(block.ExtraReward)
+ poolProfit.Add(poolProfit, extraReward)
+ revenue.Add(revenue, extraReward)
+ }
+
+ if u.config.Donate {
+ var donation = new(big.Rat)
+ poolProfit, donation = chargeFee(poolProfit, donationFee)
+ login := strings.ToLower(donationAccount)
+ rewards[login] += weiToShannonInt64(donation)
+ }
+
+ if len(u.config.PoolFeeAddress) != 0 {
+ address := strings.ToLower(u.config.PoolFeeAddress)
+ rewards[address] += weiToShannonInt64(poolProfit)
+ }
+
+ return revenue, minersProfit, poolProfit, rewards, percents, nil
+}
+
+func calculateRewardsForShares(shares map[string]int64, total int64, reward *big.Rat) (map[string]int64, map[string]*big.Rat) {
+ rewards := make(map[string]int64)
+ percents := make(map[string]*big.Rat)
+
+ for login, n := range shares {
+ percents[login] = big.NewRat(n, total)
+ workerReward := new(big.Rat).Mul(reward, percents[login])
+ rewards[login] += weiToShannonInt64(workerReward)
+ }
+ return rewards, percents
+}
+
+// Returns new value after fee deduction and fee value.
+func chargeFee(value *big.Rat, fee float64) (*big.Rat, *big.Rat) {
+ feePercent := new(big.Rat).SetFloat64(fee / 100)
+ feeValue := new(big.Rat).Mul(value, feePercent)
+ return new(big.Rat).Sub(value, feeValue), feeValue
+}
+
+func weiToShannonInt64(wei *big.Rat) int64 {
+ shannon := new(big.Rat).SetInt(util.Shannon)
+ inShannon := new(big.Rat).Quo(wei, shannon)
+ value, _ := strconv.ParseInt(inShannon.FloatString(0), 10, 64)
+ return value
+}
+
+func getUncleReward(uHeight, height int64) *big.Int {
+ reward := new(big.Int).Set(constReward)
+ reward.Mul(big.NewInt(uHeight+8-height), reward)
+ reward.Div(reward, big.NewInt(8))
+ return reward
+}
+
+func (u *BlockUnlocker) getExtraRewardForTx(block *rpc.GetBlockReply) (*big.Int, error) {
+ amount := new(big.Int)
+
+ for _, tx := range block.Transactions {
+ receipt, err := u.rpc.GetTxReceipt(tx.Hash)
+ if err != nil {
+ return nil, err
+ }
+ if receipt != nil {
+ gasUsed := util.String2Big(receipt.GasUsed)
+ gasPrice := util.String2Big(tx.GasPrice)
+ fee := new(big.Int).Mul(gasUsed, gasPrice)
+ amount.Add(amount, fee)
+ }
+ }
+ return amount, nil
+}
diff --git a/payouts/unlocker_test.go b/payouts/unlocker_test.go
new file mode 100644
index 0000000..bd57fcf
--- /dev/null
+++ b/payouts/unlocker_test.go
@@ -0,0 +1,113 @@
+package payouts
+
+import (
+ "math/big"
+ "os"
+ "testing"
+
+ "github.com/yuriy0803/open-etc-pool-friends/rpc"
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+)
+
+func TestMain(m *testing.M) {
+ os.Exit(m.Run())
+}
+
+func TestCalculateRewards(t *testing.T) {
+ blockReward, _ := new(big.Rat).SetString("5000000000000000000")
+ shares := map[string]int64{"0x0": 1000000, "0x1": 20000, "0x2": 5000, "0x3": 10, "0x4": 1}
+ expectedRewards := map[string]int64{"0x0": 4877996431, "0x1": 97559929, "0x2": 24389982, "0x3": 48780, "0x4": 4878}
+ totalShares := int64(1025011)
+
+ rewards := calculateRewardsForShares(shares, totalShares, blockReward)
+ expectedTotalAmount := int64(5000000000)
+
+ totalAmount := int64(0)
+ for login, amount := range rewards {
+ totalAmount += amount
+
+ if expectedRewards[login] != amount {
+ t.Errorf("Amount for %v must be equal to %v vs %v", login, expectedRewards[login], amount)
+ }
+ }
+ if totalAmount != expectedTotalAmount {
+ t.Errorf("Total reward must be equal to block reward in Shannon: %v vs %v", expectedTotalAmount, totalAmount)
+ }
+}
+
+func TestChargeFee(t *testing.T) {
+ orig, _ := new(big.Rat).SetString("5000000000000000000")
+ value, _ := new(big.Rat).SetString("5000000000000000000")
+ expectedNewValue, _ := new(big.Rat).SetString("3750000000000000000")
+ expectedFee, _ := new(big.Rat).SetString("1250000000000000000")
+ newValue, fee := chargeFee(orig, 25.0)
+
+ if orig.Cmp(value) != 0 {
+ t.Error("Must not change original value")
+ }
+ if newValue.Cmp(expectedNewValue) != 0 {
+ t.Error("Must charge and deduct correct fee")
+ }
+ if fee.Cmp(expectedFee) != 0 {
+ t.Error("Must charge fee")
+ }
+}
+
+func TestWeiToShannonInt64(t *testing.T) {
+ wei, _ := new(big.Rat).SetString("1000000000000000000")
+ origWei, _ := new(big.Rat).SetString("1000000000000000000")
+ shannon := int64(1000000000)
+
+ if weiToShannonInt64(wei) != shannon {
+ t.Error("Must convert to Shannon")
+ }
+ if wei.Cmp(origWei) != 0 {
+ t.Error("Must charge original value")
+ }
+}
+
+func TestGetUncleReward(t *testing.T) {
+ rewards := make(map[int64]string)
+ expectedRewards := map[int64]string{
+ 1: "4375000000000000000",
+ 2: "3750000000000000000",
+ 3: "3125000000000000000",
+ 4: "2500000000000000000",
+ 5: "1875000000000000000",
+ 6: "1250000000000000000",
+ }
+ for i := int64(1); i < 7; i++ {
+ rewards[i] = getUncleReward(1, i+1).String()
+ }
+ for i, reward := range rewards {
+ if expectedRewards[i] != rewards[i] {
+ t.Errorf("Incorrect uncle reward for %v, expected %v vs %v", i, expectedRewards[i], reward)
+ }
+ }
+}
+
+func TestMatchCandidate(t *testing.T) {
+ gethBlock := &rpc.GetBlockReply{Hash: "0x12345A", Nonce: "0x1A"}
+ parityBlock := &rpc.GetBlockReply{Hash: "0x12345A", SealFields: []string{"0x0A", "0x1A"}}
+ candidate := &storage.BlockData{Nonce: "0x1a"}
+ orphan := &storage.BlockData{Nonce: "0x1abc"}
+
+ if !matchCandidate(gethBlock, candidate) {
+ t.Error("Must match with nonce")
+ }
+ if !matchCandidate(parityBlock, candidate) {
+ t.Error("Must match with seal fields")
+ }
+ if matchCandidate(gethBlock, orphan) {
+ t.Error("Must not match with orphan with nonce")
+ }
+ if matchCandidate(parityBlock, orphan) {
+ t.Error("Must not match orphan with seal fields")
+ }
+
+ block := &rpc.GetBlockReply{Hash: "0x12345A"}
+ immature := &storage.BlockData{Hash: "0x12345a", Nonce: "0x0"}
+ if !matchCandidate(block, immature) {
+ t.Error("Must match with hash")
+ }
+}
diff --git a/policy/policy.go b/policy/policy.go
new file mode 100644
index 0000000..0f5e199
--- /dev/null
+++ b/policy/policy.go
@@ -0,0 +1,317 @@
+package policy
+
+import (
+ "fmt"
+ "log"
+ "os/exec"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+type Config struct {
+ Workers int `json:"workers"`
+ Banning Banning `json:"banning"`
+ Limits Limits `json:"limits"`
+ ResetInterval string `json:"resetInterval"`
+ RefreshInterval string `json:"refreshInterval"`
+}
+
+type Limits struct {
+ Enabled bool `json:"enabled"`
+ Limit int32 `json:"limit"`
+ Grace string `json:"grace"`
+ LimitJump int32 `json:"limitJump"`
+}
+
+type Banning struct {
+ Enabled bool `json:"enabled"`
+ IPSet string `json:"ipset"`
+ Timeout int64 `json:"timeout"`
+ InvalidPercent float32 `json:"invalidPercent"`
+ CheckThreshold int32 `json:"checkThreshold"`
+ MalformedLimit int32 `json:"malformedLimit"`
+}
+
+type Stats struct {
+ sync.Mutex
+ // We are using atomic with LastBeat,
+ // so moving it before the rest in order to avoid alignment issue
+ LastBeat int64
+ BannedAt int64
+ ValidShares int32
+ InvalidShares int32
+ Malformed int32
+ ConnLimit int32
+ Banned int32
+}
+
+type PolicyServer struct {
+ sync.RWMutex
+ statsMu sync.Mutex
+ config *Config
+ stats map[string]*Stats
+ banChannel chan string
+ startedAt int64
+ grace int64
+ timeout int64
+ blacklist []string
+ whitelist []string
+ storage *storage.RedisClient
+}
+
+func Start(cfg *Config, storage *storage.RedisClient) *PolicyServer {
+ s := &PolicyServer{config: cfg, startedAt: util.MakeTimestamp()}
+ grace := util.MustParseDuration(cfg.Limits.Grace)
+ s.grace = int64(grace / time.Millisecond)
+ s.banChannel = make(chan string, 64)
+ s.stats = make(map[string]*Stats)
+ s.storage = storage
+ s.refreshState()
+
+ timeout := util.MustParseDuration(s.config.ResetInterval)
+ s.timeout = int64(timeout / time.Millisecond)
+
+ resetIntv := util.MustParseDuration(s.config.ResetInterval)
+ resetTimer := time.NewTimer(resetIntv)
+ log.Printf("Set policy stats reset every %v", resetIntv)
+
+ refreshIntv := util.MustParseDuration(s.config.RefreshInterval)
+ refreshTimer := time.NewTimer(refreshIntv)
+ log.Printf("Set policy state refresh every %v", refreshIntv)
+
+ go func() {
+ for {
+ select {
+ case <-resetTimer.C:
+ s.resetStats()
+ resetTimer.Reset(resetIntv)
+ case <-refreshTimer.C:
+ s.refreshState()
+ refreshTimer.Reset(refreshIntv)
+ }
+ }
+ }()
+
+ for i := 0; i < s.config.Workers; i++ {
+ s.startPolicyWorker()
+ }
+ log.Printf("Running with %v policy workers", s.config.Workers)
+ return s
+}
+
+func (s *PolicyServer) startPolicyWorker() {
+ go func() {
+ for {
+ select {
+ case ip := <-s.banChannel:
+ s.doBan(ip)
+ }
+ }
+ }()
+}
+
+func (s *PolicyServer) resetStats() {
+ now := util.MakeTimestamp()
+ banningTimeout := s.config.Banning.Timeout * 1000
+ total := 0
+ s.statsMu.Lock()
+ defer s.statsMu.Unlock()
+
+ for key, m := range s.stats {
+ lastBeat := atomic.LoadInt64(&m.LastBeat)
+ bannedAt := atomic.LoadInt64(&m.BannedAt)
+
+ if now-bannedAt >= banningTimeout {
+ atomic.StoreInt64(&m.BannedAt, 0)
+ if atomic.CompareAndSwapInt32(&m.Banned, 1, 0) {
+ log.Printf("Ban dropped for %v", key)
+ delete(s.stats, key)
+ total++
+ }
+ }
+ if now-lastBeat >= s.timeout {
+ delete(s.stats, key)
+ total++
+ }
+ }
+ log.Printf("Flushed stats for %v IP addresses", total)
+}
+
+func (s *PolicyServer) refreshState() {
+ s.Lock()
+ defer s.Unlock()
+ var err error
+
+ s.blacklist, err = s.storage.GetBlacklist()
+ if err != nil {
+ log.Printf("Failed to get blacklist from backend: %v", err)
+ }
+ s.whitelist, err = s.storage.GetWhitelist()
+ if err != nil {
+ log.Printf("Failed to get whitelist from backend: %v", err)
+ }
+ log.Println("Policy state refresh complete")
+}
+
+func (s *PolicyServer) NewStats() *Stats {
+ x := &Stats{
+ ConnLimit: s.config.Limits.Limit,
+ }
+ x.heartbeat()
+ return x
+}
+
+func (s *PolicyServer) Get(ip string) *Stats {
+ s.statsMu.Lock()
+ defer s.statsMu.Unlock()
+
+ if x, ok := s.stats[ip]; !ok {
+ x = s.NewStats()
+ s.stats[ip] = x
+ return x
+ } else {
+ x.heartbeat()
+ return x
+ }
+}
+
+func (s *PolicyServer) BanClient(ip string) {
+ x := s.Get(ip)
+ s.forceBan(x, ip)
+}
+
+func (s *PolicyServer) IsBanned(ip string) bool {
+ x := s.Get(ip)
+ return atomic.LoadInt32(&x.Banned) > 0
+}
+
+func (s *PolicyServer) ApplyLimitPolicy(ip string) bool {
+ if !s.config.Limits.Enabled {
+ return true
+ }
+ now := util.MakeTimestamp()
+ if now-s.startedAt > s.grace {
+ return s.Get(ip).decrLimit() > 0
+ }
+ return true
+}
+
+func (s *PolicyServer) ApplyLoginPolicy(addy, ip string) bool {
+ if s.InBlackList(addy) {
+ x := s.Get(ip)
+ s.forceBan(x, ip)
+ return false
+ }
+ return true
+}
+
+func (s *PolicyServer) ApplyMalformedPolicy(ip string) bool {
+ x := s.Get(ip)
+ n := x.incrMalformed()
+ if n >= s.config.Banning.MalformedLimit {
+ s.forceBan(x, ip)
+ return false
+ }
+ return true
+}
+
+func (s *PolicyServer) ApplySharePolicy(ip string, validShare bool) bool {
+ x := s.Get(ip)
+ x.Lock()
+
+ if validShare {
+ x.ValidShares++
+ if s.config.Limits.Enabled {
+ x.incrLimit(s.config.Limits.LimitJump)
+ }
+ } else {
+ x.InvalidShares++
+ }
+
+ totalShares := x.ValidShares + x.InvalidShares
+ if totalShares < s.config.Banning.CheckThreshold {
+ x.Unlock()
+ return true
+ }
+ validShares := float32(x.ValidShares)
+ invalidShares := float32(x.InvalidShares)
+ x.resetShares()
+ x.Unlock()
+
+ ratio := invalidShares / validShares
+
+ if ratio >= s.config.Banning.InvalidPercent/100.0 {
+ s.forceBan(x, ip)
+ return false
+ }
+ return true
+}
+
+func (x *Stats) resetShares() {
+ x.ValidShares = 0
+ x.InvalidShares = 0
+}
+
+func (s *PolicyServer) forceBan(x *Stats, ip string) {
+ if !s.config.Banning.Enabled || s.InWhiteList(ip) {
+ return
+ }
+ atomic.StoreInt64(&x.BannedAt, util.MakeTimestamp())
+
+ if atomic.CompareAndSwapInt32(&x.Banned, 0, 1) {
+ if len(s.config.Banning.IPSet) > 0 {
+ s.banChannel <- ip
+ } else {
+ log.Println("Banned peer", ip)
+ }
+ }
+}
+
+func (x *Stats) incrLimit(n int32) {
+ atomic.AddInt32(&x.ConnLimit, n)
+}
+
+func (x *Stats) incrMalformed() int32 {
+ return atomic.AddInt32(&x.Malformed, 1)
+}
+
+func (x *Stats) decrLimit() int32 {
+ return atomic.AddInt32(&x.ConnLimit, -1)
+}
+
+func (s *PolicyServer) InBlackList(addy string) bool {
+ s.RLock()
+ defer s.RUnlock()
+ return util.StringInSlice(addy, s.blacklist)
+}
+
+func (s *PolicyServer) InWhiteList(ip string) bool {
+ s.RLock()
+ defer s.RUnlock()
+ return util.StringInSlice(ip, s.whitelist)
+}
+
+func (s *PolicyServer) doBan(ip string) {
+ set, timeout := s.config.Banning.IPSet, s.config.Banning.Timeout
+ cmd := fmt.Sprintf("sudo ipset add %s %s timeout %v -!", set, ip, timeout)
+ args := strings.Fields(cmd)
+ head := args[0]
+ args = args[1:]
+
+ log.Printf("Banned %v with timeout %v on ipset %s", ip, timeout, set)
+
+ _, err := exec.Command(head, args...).Output()
+ if err != nil {
+ log.Printf("CMD Error: %s", err)
+ }
+}
+
+func (x *Stats) heartbeat() {
+ now := util.MakeTimestamp()
+ atomic.StoreInt64(&x.LastBeat, now)
+}
diff --git a/proxy/blocks.go b/proxy/blocks.go
new file mode 100644
index 0000000..941e38d
--- /dev/null
+++ b/proxy/blocks.go
@@ -0,0 +1,117 @@
+package proxy
+
+import (
+ "log"
+ "math/big"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/yuriy0803/open-etc-pool-friends/rpc"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+const maxBacklog = 3
+
+type heightDiffPair struct {
+ diff *big.Int
+ height uint64
+}
+
+type BlockTemplate struct {
+ sync.RWMutex
+ Header string
+ Seed string
+ Target string
+ Difficulty *big.Int
+ Height uint64
+ GetPendingBlockCache *rpc.GetBlockReplyPart
+ nonces map[string]bool
+ headers map[string]heightDiffPair
+}
+
+type Block struct {
+ difficulty *big.Int
+ hashNoNonce common.Hash
+ nonce uint64
+ mixDigest common.Hash
+ number uint64
+}
+
+func (b Block) Difficulty() *big.Int { return b.difficulty }
+func (b Block) HashNoNonce() common.Hash { return b.hashNoNonce }
+func (b Block) Nonce() uint64 { return b.nonce }
+func (b Block) MixDigest() common.Hash { return b.mixDigest }
+func (b Block) NumberU64() uint64 { return b.number }
+
+func (s *ProxyServer) fetchBlockTemplate() {
+ rpc := s.rpc()
+ t := s.currentBlockTemplate()
+ pendingReply, height, diff, err := s.fetchPendingBlock()
+ if err != nil {
+ log.Printf("Error while refreshing pending block on %s: %s", rpc.Name, err)
+ return
+ }
+ reply, err := rpc.GetWork()
+ if err != nil {
+ log.Printf("Error while refreshing block template on %s: %s", rpc.Name, err)
+ return
+ }
+ // No need to update, we have fresh job
+ if t != nil && t.Header == reply[0] {
+ return
+ }
+
+ pendingReply.Difficulty = util.ToHex(s.config.Proxy.Difficulty)
+
+ newTemplate := BlockTemplate{
+ Header: reply[0],
+ Seed: reply[1],
+ Target: reply[2],
+ Height: height,
+ Difficulty: big.NewInt(diff),
+ GetPendingBlockCache: pendingReply,
+ headers: make(map[string]heightDiffPair),
+ }
+ // Copy job backlog and add current one
+ newTemplate.headers[reply[0]] = heightDiffPair{
+ diff: util.TargetHexToDiff(reply[2]),
+ height: height,
+ }
+ if t != nil {
+ for k, v := range t.headers {
+ if v.height > height-maxBacklog {
+ newTemplate.headers[k] = v
+ }
+ }
+ }
+ s.blockTemplate.Store(&newTemplate)
+ log.Printf("New block to mine on %s at height %d / %s", rpc.Name, height, reply[0][0:10])
+
+ // Stratum
+ if s.config.Proxy.Stratum.Enabled {
+ go s.broadcastNewJobs()
+ }
+}
+
+func (s *ProxyServer) fetchPendingBlock() (*rpc.GetBlockReplyPart, uint64, int64, error) {
+ rpc := s.rpc()
+ reply, err := rpc.GetPendingBlock()
+ if err != nil {
+ log.Printf("Error while refreshing pending block on %s: %s", rpc.Name, err)
+ return nil, 0, 0, err
+ }
+ blockNumber, err := strconv.ParseUint(strings.Replace(reply.Number, "0x", "", -1), 16, 64)
+ if err != nil {
+ log.Println("Can't parse pending block number")
+ return nil, 0, 0, err
+ }
+ blockDiff, err := strconv.ParseInt(strings.Replace(reply.Difficulty, "0x", "", -1), 16, 64)
+ if err != nil {
+ log.Println("Can't parse pending block difficulty")
+ return nil, 0, 0, err
+ }
+ return reply, blockNumber, blockDiff, nil
+}
diff --git a/proxy/config.go b/proxy/config.go
new file mode 100644
index 0000000..bc2c502
--- /dev/null
+++ b/proxy/config.go
@@ -0,0 +1,63 @@
+package proxy
+
+import (
+ "github.com/yuriy0803/open-etc-pool-friends/api"
+ "github.com/yuriy0803/open-etc-pool-friends/payouts"
+ "github.com/yuriy0803/open-etc-pool-friends/policy"
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+)
+
+type Config struct {
+ Name string `json:"name"`
+ Proxy Proxy `json:"proxy"`
+ Api api.ApiConfig `json:"api"`
+ Upstream []Upstream `json:"upstream"`
+ UpstreamCheckInterval string `json:"upstreamCheckInterval"`
+
+ Threads int `json:"threads"`
+
+ Coin string `json:"coin"`
+ Pplns int64 `json:"pplns"`
+ Redis storage.Config `json:"redis"`
+
+ BlockUnlocker payouts.UnlockerConfig `json:"unlocker"`
+ Payouts payouts.PayoutsConfig `json:"payouts"`
+
+ NewrelicName string `json:"newrelicName"`
+ NewrelicKey string `json:"newrelicKey"`
+ NewrelicVerbose bool `json:"newrelicVerbose"`
+ NewrelicEnabled bool `json:"newrelicEnabled"`
+}
+
+type Proxy struct {
+ Enabled bool `json:"enabled"`
+ Listen string `json:"listen"`
+ LimitHeadersSize int `json:"limitHeadersSize"`
+ LimitBodySize int64 `json:"limitBodySize"`
+ BehindReverseProxy bool `json:"behindReverseProxy"`
+ BlockRefreshInterval string `json:"blockRefreshInterval"`
+ Difficulty int64 `json:"difficulty"`
+ StateUpdateInterval string `json:"stateUpdateInterval"`
+ HashrateExpiration string `json:"hashrateExpiration"`
+ StratumHostname string `json:"stratumHostname"`
+
+ Policy policy.Config `json:"policy"`
+
+ MaxFails int64 `json:"maxFails"`
+ HealthCheck bool `json:"healthCheck"`
+
+ Stratum Stratum `json:"stratum"`
+}
+
+type Stratum struct {
+ Enabled bool `json:"enabled"`
+ Listen string `json:"listen"`
+ Timeout string `json:"timeout"`
+ MaxConn int `json:"maxConn"`
+}
+
+type Upstream struct {
+ Name string `json:"name"`
+ Url string `json:"url"`
+ Timeout string `json:"timeout"`
+}
diff --git a/proxy/handlers.go b/proxy/handlers.go
new file mode 100644
index 0000000..bab64a4
--- /dev/null
+++ b/proxy/handlers.go
@@ -0,0 +1,127 @@
+package proxy
+
+import (
+ "log"
+ "regexp"
+ "strings"
+ "errors"
+
+ "github.com/yuriy0803/open-etc-pool-friends/rpc"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+// Allow only lowercase hexadecimal with 0x prefix
+var noncePattern = regexp.MustCompile("^0x[0-9a-f]{16}$")
+var hashPattern = regexp.MustCompile("^0x[0-9a-f]{64}$")
+var workerPattern = regexp.MustCompile("^[0-9a-zA-Z-_]{1,8}$")
+
+// Stratum
+func (s *ProxyServer) handleLoginRPC(cs *Session, params []string, id string) (bool, *ErrorReply) {
+ if len(params) == 0 {
+ return false, &ErrorReply{Code: -1, Message: "Invalid params"}
+ }
+
+ login := strings.ToLower(params[0])
+ if !util.IsValidHexAddress(login) {
+ return false, &ErrorReply{Code: -1, Message: "Invalid login"}
+ }
+ if !s.policy.ApplyLoginPolicy(login, cs.ip) {
+ return false, &ErrorReply{Code: -1, Message: "You are blacklisted"}
+ }
+ cs.login = login
+ s.registerSession(cs)
+ log.Printf("Stratum miner connected %v@%v", login, cs.ip)
+ return true, nil
+}
+
+func (s *ProxyServer) handleGetWorkRPC(cs *Session) ([]string, *ErrorReply) {
+ t := s.currentBlockTemplate()
+ if t == nil || len(t.Header) == 0 || s.isSick() {
+ return nil, &ErrorReply{Code: 0, Message: "Work not ready"}
+ }
+ return []string{t.Header, t.Seed, s.diff}, nil
+}
+
+// Stratum
+func (s *ProxyServer) handleTCPSubmitRPC(cs *Session, id string, params []string) (bool, *ErrorReply) {
+ s.sessionsMu.RLock()
+ _, ok := s.sessions[cs]
+ s.sessionsMu.RUnlock()
+
+ if !ok {
+ return false, &ErrorReply{Code: 25, Message: "Not subscribed"}
+ }
+ return s.handleSubmitRPC(cs, cs.login, id, params)
+}
+
+func (s *ProxyServer) handleSubmitRPC(cs *Session, login, id string, params []string) (bool, *ErrorReply) {
+ if !workerPattern.MatchString(id){
+ id = "0"
+ }
+ if len(params) != 3 {
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ log.Printf("Malformed params from %s@%s %v", login, cs.ip, params)
+ return false, &ErrorReply{Code: -1, Message: "Invalid params"}
+ }
+
+ if !noncePattern.MatchString(params[0]) || !hashPattern.MatchString(params[1]) || !hashPattern.MatchString(params[2]) {
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ log.Printf("Malformed PoW result from %s@%s %v", login, cs.ip, params)
+ return false, &ErrorReply{Code: -1, Message: "Malformed PoW result"}
+ }
+
+ go func(s *ProxyServer, cs *Session, login, id string, params []string) {
+ t := s.currentBlockTemplate()
+
+ //MFO: This function (s.processShare) will process a share as per hasher.Verify function of github.com/ethereum/ethash
+ // output of this function is either:
+ // true,true (Exists) which means share already exists and it is validShare
+ // true,false (Exists & invalid)which means share already exists and it is invalidShare or it is a block <-- should not ever happen
+ // false,false (stale/invalid)which means share is new, and it is not a block, might be a stale share or invalidShare
+ // false,true (valid)which means share is new, and it is a block or accepted share
+ // When this function finishes, the results is already recorded in the db for valid shares or blocks.
+ exist, validShare := s.processShare(login, id, cs.ip, t, params)
+ ok := s.policy.ApplySharePolicy(cs.ip, !exist && validShare)
+
+
+ // if true,true or true,false
+ if exist {
+ log.Printf("Duplicate share from %s@%s %v", login, cs.ip, params)
+ cs.lastErr = errors.New("Duplicate share")
+ }
+
+ // if false, false
+ if !validShare {
+ //MFO: Here we have an invalid share
+ log.Printf("Invalid share from %s@%s", login, cs.ip)
+ // Bad shares limit reached, return error and close
+ if !ok {
+ cs.lastErr = errors.New("Invalid share")
+ }
+ }
+ //MFO: Here we have a valid share and it is already recorded in DB by miner.go
+ // if false, true
+ log.Printf("Valid share from %s@%s", login, cs.ip)
+
+ if !ok {
+ cs.lastErr = errors.New("High rate of invalid shares")
+ }
+ }(s, cs, login, id, params)
+
+ return true, nil
+}
+
+func (s *ProxyServer) handleGetBlockByNumberRPC() *rpc.GetBlockReplyPart {
+ t := s.currentBlockTemplate()
+ var reply *rpc.GetBlockReplyPart
+ if t != nil {
+ reply = t.GetPendingBlockCache
+ }
+ return reply
+}
+
+func (s *ProxyServer) handleUnknownRPC(cs *Session, m string) *ErrorReply {
+ log.Printf("Unknown request method %s from %s", m, cs.ip)
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ return &ErrorReply{Code: -3, Message: "Method not found"}
+}
diff --git a/proxy/miner.go b/proxy/miner.go
new file mode 100644
index 0000000..68209f9
--- /dev/null
+++ b/proxy/miner.go
@@ -0,0 +1,97 @@
+package proxy
+
+import (
+ "log"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/etclabscore/go-etchash"
+)
+
+var ecip1099FBlockClassic uint64 = 11700000 // classic mainnet
+var ecip1099FBlockMordor uint64 = 2520000 // mordor testnet
+
+var hasher = etchash.New(&ecip1099FBlockClassic)
+
+func (s *ProxyServer) processShare(login, id, ip string, t *BlockTemplate, params []string) (bool, bool) {
+ // Now, the function received some work with login id and worker name and all information, ready to be processed
+ // and checked if it is a valid work or not, and if it is a block or not and write to db accordingly
+ nonceHex := params[0]
+ hashNoNonce := params[1]
+ mixDigest := params[2]
+ nonce, _ := strconv.ParseUint(strings.Replace(nonceHex, "0x", "", -1), 16, 64)
+ shareDiff := s.config.Proxy.Difficulty
+ stratumHostname := s.config.Proxy.StratumHostname
+
+ h, ok := t.headers[hashNoNonce]
+ if !ok {
+ log.Printf("Stale share from %v@%v", login, ip)
+ // Here we have a stale share, we need to create a redis function as follows
+ // CASE1: stale Share
+ // s.backend.WriteWorkerShareStatus(login, id, valid bool, stale bool, invalid bool)
+ return false, false
+ }
+
+ share := Block{
+ number: h.height,
+ hashNoNonce: common.HexToHash(hashNoNonce),
+ difficulty: big.NewInt(shareDiff),
+ nonce: nonce,
+ mixDigest: common.HexToHash(mixDigest),
+ }
+
+ block := Block{
+ number: h.height,
+ hashNoNonce: common.HexToHash(hashNoNonce),
+ difficulty: h.diff,
+ nonce: nonce,
+ mixDigest: common.HexToHash(mixDigest),
+ }
+
+ if !hasher.Verify(share) {
+ // THis is an invalid block, record it
+ // CASE2: invalid Share
+ // s.backend.WriteWorkerShareStatus(login, id, valid bool, stale bool, invalid bool)
+ return false, false
+ }
+
+ if hasher.Verify(block) {
+ ok, err := s.rpc().SubmitBlock(params)
+ if err != nil {
+ log.Printf("Block submission failure at height %v for %v: %v", h.height, t.Header, err)
+ } else if !ok {
+ log.Printf("Block rejected at height %v for %v", h.height, t.Header)
+ return false, false
+ } else {
+ s.fetchBlockTemplate()
+ exist, err := s.backend.WriteBlock(login, id, params, shareDiff, h.diff.Int64(), h.height, s.hashrateExpiration, stratumHostname)
+ if exist {
+ return true, false
+ }
+ if err != nil {
+ log.Println("Failed to insert block candidate into backend:", err)
+ } else {
+ log.Printf("Inserted block %v to backend", h.height)
+ }
+ // Here we have a valid share, which is in-fact a block and it is written to db
+ log.Printf("Block found by miner %v@%v at height %d", login, ip, h.height)
+ }
+ } else {
+ exist, err := s.backend.WriteShare(login, id, params, shareDiff, h.height, s.hashrateExpiration, stratumHostname)
+ if exist {
+ return true, false
+ }
+ if err != nil {
+ log.Println("Failed to insert share data into backend:", err)
+ }
+
+ // Here we have a valid share, which is only a share and it is written to db
+ }
+ // This means success, either a valid share or a valid block, in this case, record a valid share for the worker
+ // CASE3: Valid Share
+ // s.backend.WriteWorkerShareStatus(login, id, valid bool, stale bool, invalid bool)
+
+ return false, true
+}
diff --git a/proxy/proto.go b/proxy/proto.go
new file mode 100644
index 0000000..16de943
--- /dev/null
+++ b/proxy/proto.go
@@ -0,0 +1,38 @@
+package proxy
+
+import "encoding/json"
+
+type JSONRpcReq struct {
+ Id json.RawMessage `json:"id"`
+ Method string `json:"method"`
+ Params json.RawMessage `json:"params"`
+}
+
+type StratumReq struct {
+ JSONRpcReq
+ Worker string `json:"worker"`
+}
+
+// Stratum
+type JSONPushMessage struct {
+ // FIXME: Temporarily add ID for Claymore compliance
+ Id int64 `json:"id"`
+ Version string `json:"jsonrpc"`
+ Result interface{} `json:"result"`
+}
+
+type JSONRpcResp struct {
+ Id json.RawMessage `json:"id"`
+ Version string `json:"jsonrpc"`
+ Result interface{} `json:"result"`
+ Error interface{} `json:"error,omitempty"`
+}
+
+type SubmitReply struct {
+ Status string `json:"status"`
+}
+
+type ErrorReply struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+}
diff --git a/proxy/proxy.go b/proxy/proxy.go
new file mode 100644
index 0000000..0f8d8e3
--- /dev/null
+++ b/proxy/proxy.go
@@ -0,0 +1,311 @@
+package proxy
+
+import (
+ "encoding/json"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/gorilla/mux"
+
+ "github.com/yuriy0803/open-etc-pool-friends/policy"
+ "github.com/yuriy0803/open-etc-pool-friends/rpc"
+ "github.com/yuriy0803/open-etc-pool-friends/storage"
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+type ProxyServer struct {
+ config *Config
+ blockTemplate atomic.Value
+ upstream int32
+ upstreams []*rpc.RPCClient
+ backend *storage.RedisClient
+ diff string
+ policy *policy.PolicyServer
+ hashrateExpiration time.Duration
+ failsCount int64
+
+ // Stratum
+ sessionsMu sync.RWMutex
+ sessions map[*Session]struct{}
+ timeout time.Duration
+}
+
+type Session struct {
+ ip string
+ enc *json.Encoder
+
+ // Stratum
+ sync.Mutex
+ conn *net.TCPConn
+ login string
+ lastErr error
+}
+
+func NewProxy(cfg *Config, backend *storage.RedisClient) *ProxyServer {
+ if len(cfg.Name) == 0 {
+ log.Fatal("You must set instance name")
+ }
+ policy := policy.Start(&cfg.Proxy.Policy, backend)
+
+ proxy := &ProxyServer{config: cfg, backend: backend, policy: policy}
+ proxy.diff = util.GetTargetHex(cfg.Proxy.Difficulty)
+
+ proxy.upstreams = make([]*rpc.RPCClient, len(cfg.Upstream))
+ for i, v := range cfg.Upstream {
+ proxy.upstreams[i] = rpc.NewRPCClient(v.Name, v.Url, v.Timeout)
+ log.Printf("Upstream: %s => %s", v.Name, v.Url)
+ }
+ log.Printf("Default upstream: %s => %s", proxy.rpc().Name, proxy.rpc().Url)
+
+ if cfg.Proxy.Stratum.Enabled {
+ proxy.sessions = make(map[*Session]struct{})
+ go proxy.ListenTCP()
+ }
+
+ proxy.fetchBlockTemplate()
+
+ proxy.hashrateExpiration = util.MustParseDuration(cfg.Proxy.HashrateExpiration)
+
+ refreshIntv := util.MustParseDuration(cfg.Proxy.BlockRefreshInterval)
+ refreshTimer := time.NewTimer(refreshIntv)
+ log.Printf("Set block refresh every %v", refreshIntv)
+
+ checkIntv := util.MustParseDuration(cfg.UpstreamCheckInterval)
+ checkTimer := time.NewTimer(checkIntv)
+
+ stateUpdateIntv := util.MustParseDuration(cfg.Proxy.StateUpdateInterval)
+ stateUpdateTimer := time.NewTimer(stateUpdateIntv)
+
+ go func() {
+ for {
+ select {
+ case <-refreshTimer.C:
+ proxy.fetchBlockTemplate()
+ refreshTimer.Reset(refreshIntv)
+ }
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case <-checkTimer.C:
+ proxy.checkUpstreams()
+ checkTimer.Reset(checkIntv)
+ }
+ }
+ }()
+
+ go func() {
+ for {
+ select {
+ case <-stateUpdateTimer.C:
+ t := proxy.currentBlockTemplate()
+ if t != nil {
+ err := backend.WriteNodeState(cfg.Name, t.Height, t.Difficulty)
+ if err != nil {
+ log.Printf("Failed to write node state to backend: %v", err)
+ proxy.markSick()
+ } else {
+ proxy.markOk()
+ }
+ }
+ stateUpdateTimer.Reset(stateUpdateIntv)
+ }
+ }
+ }()
+
+ return proxy
+}
+
+func (s *ProxyServer) Start() {
+ log.Printf("Starting proxy on %v", s.config.Proxy.Listen)
+ r := mux.NewRouter()
+ r.Handle("/{login:0x[0-9a-fA-F]{40}}/{id:[0-9a-zA-Z-_]{1,8}}", s)
+ r.Handle("/{login:0x[0-9a-fA-F]{40}}", s)
+ srv := &http.Server{
+ Addr: s.config.Proxy.Listen,
+ Handler: r,
+ MaxHeaderBytes: s.config.Proxy.LimitHeadersSize,
+ }
+ err := srv.ListenAndServe()
+ if err != nil {
+ log.Fatalf("Failed to start proxy: %v", err)
+ }
+}
+
+func (s *ProxyServer) rpc() *rpc.RPCClient {
+ i := atomic.LoadInt32(&s.upstream)
+ return s.upstreams[i]
+}
+
+func (s *ProxyServer) checkUpstreams() {
+ candidate := int32(0)
+ backup := false
+
+ for i, v := range s.upstreams {
+ if v.Check() && !backup {
+ candidate = int32(i)
+ backup = true
+ }
+ }
+
+ if s.upstream != candidate {
+ log.Printf("Switching to %v upstream", s.upstreams[candidate].Name)
+ atomic.StoreInt32(&s.upstream, candidate)
+ }
+}
+
+func (s *ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ s.writeError(w, 405, "rpc: POST method required, received "+r.Method)
+ return
+ }
+ ip := s.remoteAddr(r)
+ if !s.policy.IsBanned(ip) {
+ s.handleClient(w, r, ip)
+ }
+}
+
+func (s *ProxyServer) remoteAddr(r *http.Request) string {
+ if s.config.Proxy.BehindReverseProxy {
+ ip := r.Header.Get("X-Forwarded-For")
+ if len(ip) > 0 && net.ParseIP(ip) != nil {
+ return ip
+ }
+ }
+ ip, _, _ := net.SplitHostPort(r.RemoteAddr)
+ return ip
+}
+
+func (s *ProxyServer) handleClient(w http.ResponseWriter, r *http.Request, ip string) {
+ if r.ContentLength > s.config.Proxy.LimitBodySize {
+ log.Printf("Socket flood from %s", ip)
+ s.policy.ApplyMalformedPolicy(ip)
+ http.Error(w, "Request too large", http.StatusExpectationFailed)
+ return
+ }
+ r.Body = http.MaxBytesReader(w, r.Body, s.config.Proxy.LimitBodySize)
+ defer r.Body.Close()
+
+ cs := &Session{ip: ip, enc: json.NewEncoder(w)}
+ dec := json.NewDecoder(r.Body)
+ for {
+ var req JSONRpcReq
+ if err := dec.Decode(&req); err == io.EOF {
+ break
+ } else if err != nil {
+ log.Printf("Malformed request from %v: %v", ip, err)
+ s.policy.ApplyMalformedPolicy(ip)
+ return
+ }
+ cs.handleMessage(s, r, &req)
+ }
+}
+
+func (cs *Session) handleMessage(s *ProxyServer, r *http.Request, req *JSONRpcReq) {
+ if req.Id == nil {
+ log.Printf("Missing RPC id from %s", cs.ip)
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ return
+ }
+
+ vars := mux.Vars(r)
+ login := strings.ToLower(vars["login"])
+
+ if !util.IsValidHexAddress(login) {
+ errReply := &ErrorReply{Code: -1, Message: "Invalid login"}
+ cs.sendError(req.Id, errReply)
+ return
+ }
+ if !s.policy.ApplyLoginPolicy(login, cs.ip) {
+ errReply := &ErrorReply{Code: -1, Message: "You are blacklisted"}
+ cs.sendError(req.Id, errReply)
+ return
+ }
+
+ // Handle RPC methods
+ switch req.Method {
+ case "eth_getWork":
+ reply, errReply := s.handleGetWorkRPC(cs)
+ if errReply != nil {
+ cs.sendError(req.Id, errReply)
+ break
+ }
+ cs.sendResult(req.Id, &reply)
+ case "eth_submitWork":
+ if req.Params != nil {
+ var params []string
+ err := json.Unmarshal(req.Params, ¶ms)
+ if err != nil {
+ log.Printf("Unable to parse params from %v", cs.ip)
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ break
+ }
+ reply, errReply := s.handleSubmitRPC(cs, login, vars["id"], params)
+ if errReply != nil {
+ cs.sendError(req.Id, errReply)
+ break
+ }
+ cs.sendResult(req.Id, &reply)
+ } else {
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ errReply := &ErrorReply{Code: -1, Message: "Malformed request"}
+ cs.sendError(req.Id, errReply)
+ }
+ case "eth_getBlockByNumber":
+ reply := s.handleGetBlockByNumberRPC()
+ cs.sendResult(req.Id, reply)
+ case "eth_submitHashrate":
+ cs.sendResult(req.Id, true)
+ default:
+ errReply := s.handleUnknownRPC(cs, req.Method)
+ cs.sendError(req.Id, errReply)
+ }
+}
+
+func (cs *Session) sendResult(id json.RawMessage, result interface{}) error {
+ message := JSONRpcResp{Id: id, Version: "2.0", Error: nil, Result: result}
+ return cs.enc.Encode(&message)
+}
+
+func (cs *Session) sendError(id json.RawMessage, reply *ErrorReply) error {
+ message := JSONRpcResp{Id: id, Version: "2.0", Error: reply}
+ return cs.enc.Encode(&message)
+}
+
+func (s *ProxyServer) writeError(w http.ResponseWriter, status int, msg string) {
+ w.WriteHeader(status)
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+}
+
+func (s *ProxyServer) currentBlockTemplate() *BlockTemplate {
+ t := s.blockTemplate.Load()
+ if t != nil {
+ return t.(*BlockTemplate)
+ } else {
+ return nil
+ }
+}
+
+func (s *ProxyServer) markSick() {
+ atomic.AddInt64(&s.failsCount, 1)
+}
+
+func (s *ProxyServer) isSick() bool {
+ x := atomic.LoadInt64(&s.failsCount)
+ if s.config.Proxy.HealthCheck && x >= s.config.Proxy.MaxFails {
+ return true
+ }
+ return false
+}
+
+func (s *ProxyServer) markOk() {
+ atomic.StoreInt64(&s.failsCount, 0)
+}
diff --git a/proxy/stratum.go b/proxy/stratum.go
new file mode 100644
index 0000000..31daef2
--- /dev/null
+++ b/proxy/stratum.go
@@ -0,0 +1,221 @@
+package proxy
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "io"
+ "log"
+ "net"
+ "time"
+
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+const (
+ MaxReqSize = 1024
+)
+
+func (s *ProxyServer) ListenTCP() {
+ timeout := util.MustParseDuration(s.config.Proxy.Stratum.Timeout)
+ s.timeout = timeout
+
+ addr, err := net.ResolveTCPAddr("tcp", s.config.Proxy.Stratum.Listen)
+ if err != nil {
+ log.Fatalf("Error: %v", err)
+ }
+ server, err := net.ListenTCP("tcp", addr)
+ if err != nil {
+ log.Fatalf("Error: %v", err)
+ }
+ defer server.Close()
+
+ log.Printf("Stratum listening on %s", s.config.Proxy.Stratum.Listen)
+ var accept = make(chan int, s.config.Proxy.Stratum.MaxConn)
+ n := 0
+
+ for {
+ conn, err := server.AcceptTCP()
+ if err != nil {
+ continue
+ }
+ conn.SetKeepAlive(true)
+
+ ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
+
+ if s.policy.IsBanned(ip) || !s.policy.ApplyLimitPolicy(ip) {
+ conn.Close()
+ continue
+ }
+ n += 1
+ cs := &Session{conn: conn, ip: ip}
+
+ accept <- n
+ go func(cs *Session) {
+ err = s.handleTCPClient(cs)
+ if err != nil || cs.lastErr != nil {
+ s.removeSession(cs)
+ conn.Close()
+ }
+ <-accept
+ }(cs)
+ }
+}
+
+func (s *ProxyServer) handleTCPClient(cs *Session) error {
+ cs.enc = json.NewEncoder(cs.conn)
+ connbuff := bufio.NewReaderSize(cs.conn, MaxReqSize)
+ s.setDeadline(cs.conn)
+
+ for {
+ data, isPrefix, err := connbuff.ReadLine()
+ if isPrefix {
+ log.Printf("Socket flood detected from %s", cs.ip)
+ s.policy.BanClient(cs.ip)
+ return err
+ } else if err == io.EOF {
+ log.Printf("Client %s disconnected", cs.ip)
+ s.removeSession(cs)
+ break
+ } else if err != nil {
+ log.Printf("Error reading from socket: %v", err)
+ return err
+ }
+
+ if len(data) > 1 {
+ var req StratumReq
+ err = json.Unmarshal(data, &req)
+ if err != nil {
+ s.policy.ApplyMalformedPolicy(cs.ip)
+ log.Printf("Malformed stratum request from %s: %v", cs.ip, err)
+ return err
+ }
+ s.setDeadline(cs.conn)
+ err = cs.handleTCPMessage(s, &req)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (cs *Session) handleTCPMessage(s *ProxyServer, req *StratumReq) error {
+ // Handle RPC methods
+ switch req.Method {
+ case "eth_submitLogin":
+ var params []string
+ err := json.Unmarshal(req.Params, ¶ms)
+ if err != nil {
+ log.Println("Malformed stratum request params from", cs.ip)
+ return err
+ }
+ reply, errReply := s.handleLoginRPC(cs, params, req.Worker)
+ if errReply != nil {
+ return cs.sendTCPError(req.Id, errReply)
+ }
+ return cs.sendTCPResult(req.Id, reply)
+ case "eth_getWork":
+ reply, errReply := s.handleGetWorkRPC(cs)
+ if errReply != nil {
+ return cs.sendTCPError(req.Id, errReply)
+ }
+ return cs.sendTCPResult(req.Id, &reply)
+ case "eth_submitWork":
+ var params []string
+ err := json.Unmarshal(req.Params, ¶ms)
+ if err != nil {
+ log.Println("Malformed stratum request params from", cs.ip)
+ return err
+ }
+ reply, errReply := s.handleTCPSubmitRPC(cs, req.Worker, params)
+ if errReply != nil {
+ return cs.sendTCPError(req.Id, errReply)
+ }
+ return cs.sendTCPResult(req.Id, &reply)
+ case "eth_submitHashrate":
+ return cs.sendTCPResult(req.Id, true)
+ default:
+ errReply := s.handleUnknownRPC(cs, req.Method)
+ return cs.sendTCPError(req.Id, errReply)
+ }
+}
+
+func (cs *Session) sendTCPResult(id json.RawMessage, result interface{}) error {
+ cs.Lock()
+ defer cs.Unlock()
+
+ message := JSONRpcResp{Id: id, Version: "2.0", Error: nil, Result: result}
+ return cs.enc.Encode(&message)
+}
+
+func (cs *Session) pushNewJob(result interface{}) error {
+ cs.Lock()
+ defer cs.Unlock()
+ // FIXME: Temporarily add ID for Claymore compliance
+ message := JSONPushMessage{Version: "2.0", Result: result, Id: 0}
+ return cs.enc.Encode(&message)
+}
+
+func (cs *Session) sendTCPError(id json.RawMessage, reply *ErrorReply) error {
+ cs.Lock()
+ defer cs.Unlock()
+
+ message := JSONRpcResp{Id: id, Version: "2.0", Error: reply}
+ err := cs.enc.Encode(&message)
+ if err != nil {
+ return err
+ }
+ return errors.New(reply.Message)
+}
+
+func (self *ProxyServer) setDeadline(conn *net.TCPConn) {
+ conn.SetDeadline(time.Now().Add(self.timeout))
+}
+
+func (s *ProxyServer) registerSession(cs *Session) {
+ s.sessionsMu.Lock()
+ defer s.sessionsMu.Unlock()
+ s.sessions[cs] = struct{}{}
+}
+
+func (s *ProxyServer) removeSession(cs *Session) {
+ s.sessionsMu.Lock()
+ defer s.sessionsMu.Unlock()
+ delete(s.sessions, cs)
+}
+
+func (s *ProxyServer) broadcastNewJobs() {
+ t := s.currentBlockTemplate()
+ if t == nil || len(t.Header) == 0 || s.isSick() {
+ return
+ }
+ reply := []string{t.Header, t.Seed, s.diff}
+
+ s.sessionsMu.RLock()
+ defer s.sessionsMu.RUnlock()
+
+ count := len(s.sessions)
+ log.Printf("Broadcasting new job to %v stratum miners", count)
+
+ start := time.Now()
+ bcast := make(chan int, 1024)
+ n := 0
+
+ for m, _ := range s.sessions {
+ n++
+ bcast <- n
+
+ go func(cs *Session) {
+ err := cs.pushNewJob(&reply)
+ <-bcast
+ if err != nil {
+ log.Printf("Job transmit error to %v@%v: %v", cs.login, cs.ip, err)
+ s.removeSession(cs)
+ } else {
+ s.setDeadline(cs.conn)
+ }
+ }(m)
+ }
+ log.Printf("Jobs broadcast finished %s", time.Since(start))
+}
diff --git a/rpc/rpc.go b/rpc/rpc.go
new file mode 100644
index 0000000..fc11c6d
--- /dev/null
+++ b/rpc/rpc.go
@@ -0,0 +1,300 @@
+package rpc
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+type RPCClient struct {
+ sync.RWMutex
+ Url string
+ Name string
+ sick bool
+ sickRate int
+ successRate int
+ client *http.Client
+}
+
+type GetBlockReply struct {
+ Number string `json:"number"`
+ Hash string `json:"hash"`
+ Nonce string `json:"nonce"`
+ Miner string `json:"miner"`
+ Difficulty string `json:"difficulty"`
+ GasLimit string `json:"gasLimit"`
+ GasUsed string `json:"gasUsed"`
+ Transactions []Tx `json:"transactions"`
+ Uncles []string `json:"uncles"`
+ // https://github.com/ethereum/EIPs/issues/95
+ SealFields []string `json:"sealFields"`
+}
+
+type GetBlockReplyPart struct {
+ Number string `json:"number"`
+ Difficulty string `json:"difficulty"`
+}
+
+const receiptStatusSuccessful = "0x1"
+
+type TxReceipt struct {
+ TxHash string `json:"transactionHash"`
+ GasUsed string `json:"gasUsed"`
+ BlockHash string `json:"blockHash"`
+ Status string `json:"status"`
+}
+
+func (r *TxReceipt) Confirmed() bool {
+ return len(r.BlockHash) > 0
+}
+
+// Use with previous method
+func (r *TxReceipt) Successful() bool {
+ if len(r.Status) > 0 {
+ return r.Status == receiptStatusSuccessful
+ }
+ return true
+}
+
+type Tx struct {
+ Gas string `json:"gas"`
+ GasPrice string `json:"gasPrice"`
+ Hash string `json:"hash"`
+}
+
+type JSONRpcResp struct {
+ Id *json.RawMessage `json:"id"`
+ Result *json.RawMessage `json:"result"`
+ Error map[string]interface{} `json:"error"`
+}
+
+func NewRPCClient(name, url, timeout string) *RPCClient {
+ rpcClient := &RPCClient{Name: name, Url: url}
+ timeoutIntv := util.MustParseDuration(timeout)
+ rpcClient.client = &http.Client{
+ Timeout: timeoutIntv,
+ }
+ return rpcClient
+}
+
+func (r *RPCClient) GetWork() ([]string, error) {
+ rpcResp, err := r.doPost(r.Url, "eth_getWork", []string{})
+ if err != nil {
+ return nil, err
+ }
+ var reply []string
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ return reply, err
+}
+
+func (r *RPCClient) GetPendingBlock() (*GetBlockReplyPart, error) {
+ rpcResp, err := r.doPost(r.Url, "eth_getBlockByNumber", []interface{}{"latest", true})
+ if err != nil {
+ return nil, err
+ }
+ if rpcResp.Result != nil {
+ var reply *GetBlockReplyPart
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ return reply, err
+ }
+ return nil, nil
+}
+
+func (r *RPCClient) GetBlockByHeight(height int64) (*GetBlockReply, error) {
+ params := []interface{}{fmt.Sprintf("0x%x", height), true}
+ return r.getBlockBy("eth_getBlockByNumber", params)
+}
+
+func (r *RPCClient) GetBlockByHash(hash string) (*GetBlockReply, error) {
+ params := []interface{}{hash, true}
+ return r.getBlockBy("eth_getBlockByHash", params)
+}
+
+func (r *RPCClient) GetUncleByBlockNumberAndIndex(height int64, index int) (*GetBlockReply, error) {
+ params := []interface{}{fmt.Sprintf("0x%x", height), fmt.Sprintf("0x%x", index)}
+ return r.getBlockBy("eth_getUncleByBlockNumberAndIndex", params)
+}
+
+func (r *RPCClient) getBlockBy(method string, params []interface{}) (*GetBlockReply, error) {
+ rpcResp, err := r.doPost(r.Url, method, params)
+ if err != nil {
+ return nil, err
+ }
+ if rpcResp.Result != nil {
+ var reply *GetBlockReply
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ return reply, err
+ }
+ return nil, nil
+}
+
+func (r *RPCClient) GetTxReceipt(hash string) (*TxReceipt, error) {
+ rpcResp, err := r.doPost(r.Url, "eth_getTransactionReceipt", []string{hash})
+ if err != nil {
+ return nil, err
+ }
+ if rpcResp.Result != nil {
+ var reply *TxReceipt
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ return reply, err
+ }
+ return nil, nil
+}
+
+func (r *RPCClient) SubmitBlock(params []string) (bool, error) {
+ rpcResp, err := r.doPost(r.Url, "eth_submitWork", params)
+ if err != nil {
+ return false, err
+ }
+ var reply bool
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ return reply, err
+}
+
+func (r *RPCClient) GetBalance(address string) (*big.Int, error) {
+ rpcResp, err := r.doPost(r.Url, "eth_getBalance", []string{address, "latest"})
+ if err != nil {
+ return nil, err
+ }
+ var reply string
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ if err != nil {
+ return nil, err
+ }
+ return util.String2Big(reply), err
+}
+
+func (r *RPCClient) Sign(from string, s string) (string, error) {
+ hash := sha256.Sum256([]byte(s))
+ rpcResp, err := r.doPost(r.Url, "eth_sign", []string{from, common.ToHex(hash[:])})
+ var reply string
+ if err != nil {
+ return reply, err
+ }
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ if err != nil {
+ return reply, err
+ }
+ if util.IsZeroHash(reply) {
+ err = errors.New("Can't sign message, perhaps account is locked")
+ }
+ return reply, err
+}
+
+func (r *RPCClient) GetPeerCount() (int64, error) {
+ rpcResp, err := r.doPost(r.Url, "net_peerCount", nil)
+ if err != nil {
+ return 0, err
+ }
+ var reply string
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(strings.Replace(reply, "0x", "", -1), 16, 64)
+}
+
+func (r *RPCClient) SendTransaction(from, to, gas, gasPrice, value string, autoGas bool) (string, error) {
+ params := map[string]string{
+ "from": from,
+ "to": to,
+ "value": value,
+ }
+ if !autoGas {
+ params["gas"] = gas
+ params["gasPrice"] = gasPrice
+ }
+ rpcResp, err := r.doPost(r.Url, "eth_sendTransaction", []interface{}{params})
+ var reply string
+ if err != nil {
+ return reply, err
+ }
+ err = json.Unmarshal(*rpcResp.Result, &reply)
+ if err != nil {
+ return reply, err
+ }
+ /* There is an inconsistence in a "standard". Geth returns error if it can't unlock signer account,
+ * but Parity returns zero hash 0x000... if it can't send tx, so we must handle this case.
+ * https://github.com/ethereum/wiki/wiki/JSON-RPC#returns-22
+ */
+ if util.IsZeroHash(reply) {
+ err = errors.New("transaction is not yet available")
+ }
+ return reply, err
+}
+
+func (r *RPCClient) doPost(url string, method string, params interface{}) (*JSONRpcResp, error) {
+ jsonReq := map[string]interface{}{"jsonrpc": "2.0", "method": method, "params": params, "id": 0}
+ data, _ := json.Marshal(jsonReq)
+
+ req, err := http.NewRequest("POST", url, bytes.NewBuffer(data))
+ req.Header.Set("Content-Length", (string)(len(data)))
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "application/json")
+
+ resp, err := r.client.Do(req)
+ if err != nil {
+ r.markSick()
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var rpcResp *JSONRpcResp
+ err = json.NewDecoder(resp.Body).Decode(&rpcResp)
+ if err != nil {
+ r.markSick()
+ return nil, err
+ }
+ if rpcResp.Error != nil {
+ r.markSick()
+ return nil, errors.New(rpcResp.Error["message"].(string))
+ }
+ return rpcResp, err
+}
+
+func (r *RPCClient) Check() bool {
+ _, err := r.GetWork()
+ if err != nil {
+ return false
+ }
+ r.markAlive()
+ return !r.Sick()
+}
+
+func (r *RPCClient) Sick() bool {
+ r.RLock()
+ defer r.RUnlock()
+ return r.sick
+}
+
+func (r *RPCClient) markSick() {
+ r.Lock()
+ r.sickRate++
+ r.successRate = 0
+ if r.sickRate >= 5 {
+ r.sick = true
+ }
+ r.Unlock()
+}
+
+func (r *RPCClient) markAlive() {
+ r.Lock()
+ r.successRate++
+ if r.successRate >= 5 {
+ r.sick = false
+ r.sickRate = 0
+ r.successRate = 0
+ }
+ r.Unlock()
+}
diff --git a/scripts/start_2_bil.sh b/scripts/start_2_bil.sh
new file mode 100644
index 0000000..d90d828
--- /dev/null
+++ b/scripts/start_2_bil.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+./build/bin/open-etc-pool-friends ./configs/stratum2b.json
\ No newline at end of file
diff --git a/service_installer.sh b/service_installer.sh
new file mode 100644
index 0000000..9f2a1e8
--- /dev/null
+++ b/service_installer.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+#will make the services for the pool, based on the pool exe location of /usr/local/bin/poolbin
+user="perklepool"
+coin="prkl"
+config_dir="/home/$user/open-etc-pool-friends/configs"
+poolbinary="/home/$user/open-etc-pool-friends/build/bin/open-etc-pool-friends"
+
+if [ ! -e $config_dir ] || [ ! -e $poolbinary ]
+then
+echo missing config dir or pool binary, exiting
+exit 1
+fi
+
+echo "
+[Unit]
+Description=$coin-api
+
+[Service]
+Type=simple
+ExecStart=$poolbinary $config_dir/api.json
+
+[Install]
+WantedBy=multi-user.target
+">/etc/systemd/system/$coin-api.service
+
+echo "
+[Unit]
+Description=$coin-stratum2b
+
+[Service]
+Type=simple
+ExecStart=$poolbinary $config_dir/stratum2b.json
+
+[Install]
+WantedBy=multi-user.target
+">/etc/systemd/system/$coin-stratum2b.service
+
+
+echo "
+[Unit]
+Description=$coin-stratum4b
+
+[Service]
+Type=simple
+ExecStart=$poolbinary $config_dir/stratum4b.json
+
+[Install]
+WantedBy=multi-user.target
+">/etc/systemd/system/$coin-stratum4b.service
+
+
+echo "
+[Unit]
+Description=$coin-stratum9b
+
+
+[Service]
+Type=simple
+ExecStart=$poolbinary $config_dir/stratum9b.json
+
+[Install]
+WantedBy=multi-user.target
+">/etc/systemd/system/$coin-stratum9b.service
+
+
+echo "
+[Unit]
+Description=$coin-unlocker
+
+
+[Service]
+Type=simple
+ExecStart=$poolbinary $config_dir/unlocker.json
+
+[Install]
+WantedBy=multi-user.target
+">/etc/systemd/system/$coin-unlocker.service
+
+echo "
+[Unit]
+Description=$coin-payout
+
+[Service]
+Type=simple
+ExecStart=$poolbinary $config_dir/payout.json
+
+[Install]
+WantedBy=multi-user.target
+">/etc/systemd/system/$coin-payout.service
+
+systemctl daemon-reload
+
+systemctl enable $coin-api
+systemctl enable $coin-stratum2b
+systemctl enable $coin-stratum4b
+systemctl enable $coin-stratum9b
+#systemctl enable $coin-unlocker
+#systemctl enable $coin-payout
+
+systemctl start $coin-api
+systemctl start $coin-stratum2b
+systemctl start $coin-stratum4b
+systemctl start $coin-stratum9b
+#systemctl start $coin-unlocker
+#systemctl start $coin-payout
+
+
+
diff --git a/storage/redis.go b/storage/redis.go
new file mode 100644
index 0000000..7be9e6e
--- /dev/null
+++ b/storage/redis.go
@@ -0,0 +1,1329 @@
+package storage
+
+import (
+ "fmt"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+ "time"
+
+ "gopkg.in/redis.v3"
+
+ "github.com/yuriy0803/open-etc-pool-friends/util"
+)
+
+type Config struct {
+ Endpoint string `json:"endpoint"`
+ Password string `json:"password"`
+ Database int64 `json:"database"`
+ PoolSize int `json:"poolSize"`
+}
+
+type RedisClient struct {
+ client *redis.Client
+ prefix string
+ pplns int64
+}
+
+type PoolCharts struct {
+ Timestamp int64 `json:"x"`
+ TimeFormat string `json:"timeFormat"`
+ PoolHash int64 `json:"y"`
+}
+
+type MinerCharts struct {
+ Timestamp int64 `json:"x"`
+ TimeFormat string `json:"timeFormat"`
+ MinerHash int64 `json:"minerHash"`
+ MinerLargeHash int64 `json:"minerLargeHash"`
+ WorkerOnline string `json:"workerOnline"`
+}
+
+type PaymentCharts struct {
+ Timestamp int64 `json:"x"`
+ TimeFormat string `json:"timeFormat"`
+ Amount int64 `json:"amount"`
+}
+
+type SumRewardData struct {
+ Interval int64 `json:"inverval"`
+ Reward int64 `json:"reward"`
+ Name string `json:"name"`
+ Offset int64 `json:"offset"`
+}
+
+type RewardData struct {
+ Height int64 `json:"blockheight"`
+ Timestamp int64 `json:"timestamp"`
+ BlockHash string `json:"blockhash"`
+ Reward int64 `json:"reward"`
+ Percent float64 `json:"percent"`
+ Immature bool `json:"immature"`
+}
+
+type BlockData struct {
+ Height int64 `json:"height"`
+ Timestamp int64 `json:"timestamp"`
+ Difficulty int64 `json:"difficulty"`
+ TotalShares int64 `json:"shares"`
+ Uncle bool `json:"uncle"`
+ UncleHeight int64 `json:"uncleHeight"`
+ Orphan bool `json:"orphan"`
+ Hash string `json:"hash"`
+ Nonce string `json:"-"`
+ PowHash string `json:"-"`
+ MixDigest string `json:"-"`
+ Reward *big.Int `json:"-"`
+ ExtraReward *big.Int `json:"-"`
+ ImmatureReward string `json:"-"`
+ RewardString string `json:"reward"`
+ RoundHeight int64 `json:"-"`
+ candidateKey string
+ immatureKey string
+}
+
+func (b *BlockData) RewardInShannon() int64 {
+ reward := new(big.Int).Div(b.Reward, util.Shannon)
+ return reward.Int64()
+}
+
+func (b *BlockData) serializeHash() string {
+ if len(b.Hash) > 0 {
+ return b.Hash
+ } else {
+ return "0x0"
+ }
+}
+
+func (b *BlockData) RoundKey() string {
+ return join(b.RoundHeight, b.Hash)
+}
+
+func (b *BlockData) key() string {
+ return join(b.UncleHeight, b.Orphan, b.Nonce, b.serializeHash(), b.Timestamp, b.Difficulty, b.TotalShares, b.Reward)
+}
+
+type Miner struct {
+ LastBeat int64 `json:"lastBeat"`
+ HR int64 `json:"hr"`
+ Offline bool `json:"offline"`
+ startedAt int64
+}
+
+// Addition from Mohannad Otaibi to report Difficulty
+type Worker struct {
+ Miner
+ TotalHR int64 `json:"hr2"`
+ WorkerDiff int64 `json:"difficulty"`
+ WorkerHostname string `json:"hostname"`
+}
+
+func NewRedisClient(cfg *Config, prefix string, pplns int64) *RedisClient {
+ options := redis.Options{
+ Addr: cfg.Endpoint,
+ Password: cfg.Password,
+ DB: cfg.Database,
+ PoolSize: cfg.PoolSize,
+ }
+ if cfg.Endpoint[0:1] == "/" {
+ options.Network = "unix"
+ }
+ client := redis.NewClient(&options)
+ return &RedisClient{client: client, prefix: prefix, pplns: pplns}
+}
+
+func (r *RedisClient) Client() *redis.Client {
+ return r.client
+}
+
+func (r *RedisClient) Check() (string, error) {
+ return r.client.Ping().Result()
+}
+
+func (r *RedisClient) BgSave() (string, error) {
+ return r.client.BgSave().Result()
+}
+
+// Always returns list of addresses. If Redis fails it will return empty list.
+func (r *RedisClient) GetBlacklist() ([]string, error) {
+ cmd := r.client.SMembers(r.formatKey("blacklist"))
+ if cmd.Err() != nil {
+ return []string{}, cmd.Err()
+ }
+ return cmd.Val(), nil
+}
+
+// Always returns list of IPs. If Redis fails it will return empty list.
+func (r *RedisClient) GetWhitelist() ([]string, error) {
+ cmd := r.client.SMembers(r.formatKey("whitelist"))
+ if cmd.Err() != nil {
+ return []string{}, cmd.Err()
+ }
+ return cmd.Val(), nil
+}
+
+func (r *RedisClient) WritePoolCharts(time1 int64, time2 string, poolHash string) error {
+ s := join(time1, time2, poolHash)
+ cmd := r.client.ZAdd(r.formatKey("charts", "pool"), redis.Z{Score: float64(time1), Member: s})
+ return cmd.Err()
+}
+
+func (r *RedisClient) WriteMinerCharts(time1 int64, time2, k string, hash, largeHash, workerOnline int64) error {
+ s := join(time1, time2, hash, largeHash, workerOnline)
+ cmd := r.client.ZAdd(r.formatKey("charts", "miner", k), redis.Z{Score: float64(time1), Member: s})
+ return cmd.Err()
+}
+
+func (r *RedisClient) GetPoolCharts(poolHashLen int64) (stats []*PoolCharts, err error) {
+
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ now := util.MakeTimestamp() / 1000
+
+ cmds, err := tx.Exec(func() error {
+ tx.ZRemRangeByScore(r.formatKey("charts", "pool"), "-inf", fmt.Sprint("(", now-172800))
+ tx.ZRevRangeWithScores(r.formatKey("charts", "pool"), 0, poolHashLen)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ stats = convertPoolChartsResults(cmds[1].(*redis.ZSliceCmd))
+ return stats, nil
+}
+
+func convertPoolChartsResults(raw *redis.ZSliceCmd) []*PoolCharts {
+ var result []*PoolCharts
+ for _, v := range raw.Val() {
+ // "Timestamp:TimeFormat:Hash"
+ pc := PoolCharts{}
+ pc.Timestamp = int64(v.Score)
+ str := v.Member.(string)
+ pc.TimeFormat = str[strings.Index(str, ":")+1 : strings.LastIndex(str, ":")]
+ pc.PoolHash, _ = strconv.ParseInt(str[strings.LastIndex(str, ":")+1:], 10, 64)
+ result = append(result, &pc)
+ }
+ return result
+}
+
+func convertMinerChartsResults(raw *redis.ZSliceCmd) []*MinerCharts {
+ var result []*MinerCharts
+ for _, v := range raw.Val() {
+ // "Timestamp:TimeFormat:Hash:largeHash:workerOnline"
+ mc := MinerCharts{}
+ mc.Timestamp = int64(v.Score)
+ str := v.Member.(string)
+ mc.TimeFormat = strings.Split(str, ":")[1]
+ mc.MinerHash, _ = strconv.ParseInt(strings.Split(str, ":")[2], 10, 64)
+ mc.MinerLargeHash, _ = strconv.ParseInt(strings.Split(str, ":")[3], 10, 64)
+ mc.WorkerOnline = strings.Split(str, ":")[4]
+ result = append(result, &mc)
+ }
+ return result
+}
+
+func (r *RedisClient) GetAllMinerAccount() (account []string, err error) {
+ var c int64
+ for {
+ now := util.MakeTimestamp() / 1000
+ c, keys, err := r.client.Scan(c, r.formatKey("miners", "*"), now).Result()
+
+ if err != nil {
+ return account, err
+ }
+ for _, key := range keys {
+ m := strings.Split(key, ":")
+ //if ( len(m) >= 2 && strings.Index(strings.ToLower(m[2]), "0x") == 0) {
+ if len(m) >= 2 {
+ account = append(account, m[2])
+ }
+ }
+ if c == 0 {
+ break
+ }
+ }
+ return account, nil
+}
+
+func (r *RedisClient) GetMinerCharts(hashNum int64, login string) (stats []*MinerCharts, err error) {
+
+ tx := r.client.Multi()
+ defer tx.Close()
+ now := util.MakeTimestamp() / 1000
+ cmds, err := tx.Exec(func() error {
+ tx.ZRemRangeByScore(r.formatKey("charts", "miner", login), "-inf", fmt.Sprint("(", now-172800))
+ tx.ZRevRangeWithScores(r.formatKey("charts", "miner", login), 0, hashNum)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ stats = convertMinerChartsResults(cmds[1].(*redis.ZSliceCmd))
+ return stats, nil
+}
+
+func (r *RedisClient) GetPaymentCharts(login string) (stats []*PaymentCharts, err error) {
+
+ tx := r.client.Multi()
+ defer tx.Close()
+ cmds, err := tx.Exec(func() error {
+ tx.ZRevRangeWithScores(r.formatKey("payments", login), 0, 360)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ stats = convertPaymentChartsResults(cmds[0].(*redis.ZSliceCmd))
+ //fmt.Println(stats)
+ return stats, nil
+}
+
+func (r *RedisClient) WriteNodeState(id string, height uint64, diff *big.Int) error {
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ now := util.MakeTimestamp() / 1000
+
+ _, err := tx.Exec(func() error {
+ tx.HSet(r.formatKey("nodes"), join(id, "name"), id)
+ tx.HSet(r.formatKey("nodes"), join(id, "height"), strconv.FormatUint(height, 10))
+ tx.HSet(r.formatKey("nodes"), join(id, "difficulty"), diff.String())
+ tx.HSet(r.formatKey("nodes"), join(id, "lastBeat"), strconv.FormatInt(now, 10))
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) GetNodeStates() ([]map[string]interface{}, error) {
+ cmd := r.client.HGetAllMap(r.formatKey("nodes"))
+ if cmd.Err() != nil {
+ return nil, cmd.Err()
+ }
+ m := make(map[string]map[string]interface{})
+ for key, value := range cmd.Val() {
+ parts := strings.Split(key, ":")
+ if val, ok := m[parts[0]]; ok {
+ val[parts[1]] = value
+ } else {
+ node := make(map[string]interface{})
+ node[parts[1]] = value
+ m[parts[0]] = node
+ }
+ }
+ v := make([]map[string]interface{}, len(m), len(m))
+ i := 0
+ for _, value := range m {
+ v[i] = value
+ i++
+ }
+ return v, nil
+}
+
+func (r *RedisClient) checkPoWExist(height uint64, params []string) (bool, error) {
+ // Sweep PoW backlog for previous blocks, we have 3 templates back in RAM
+ r.client.ZRemRangeByScore(r.formatKey("pow"), "-inf", fmt.Sprint("(", height-8))
+ val, err := r.client.ZAdd(r.formatKey("pow"), redis.Z{Score: float64(height), Member: strings.Join(params, ":")}).Result()
+ return val == 0, err
+}
+
+func (r *RedisClient) WriteShare(login, id string, params []string, diff int64, height uint64, window time.Duration, hostname string) (bool, error) {
+ exist, err := r.checkPoWExist(height, params)
+ if err != nil {
+ return false, err
+ }
+ // Duplicate share, (nonce, powHash, mixDigest) pair exist
+ if exist {
+ return true, nil
+ }
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ ms := util.MakeTimestamp()
+ ts := ms / 1000
+
+ _, err = tx.Exec(func() error {
+ r.writeShare(tx, ms, ts, login, id, diff, window, hostname)
+ tx.HIncrBy(r.formatKey("stats"), "roundShares", diff)
+ return nil
+ })
+ return false, err
+}
+
+func (r *RedisClient) WriteBlock(login, id string, params []string, diff, roundDiff int64, height uint64, window time.Duration, hostname string) (bool, error) {
+ exist, err := r.checkPoWExist(height, params)
+ if err != nil {
+ return false, err
+ }
+ // Duplicate share, (nonce, powHash, mixDigest) pair exist
+ if exist {
+ return true, nil
+ }
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ ms := util.MakeTimestamp()
+ ts := ms / 1000
+
+ cmds, err := tx.Exec(func() error {
+ r.writeShare(tx, ms, ts, login, id, diff, window, hostname)
+ tx.HSet(r.formatKey("stats"), "lastBlockFound", strconv.FormatInt(ts, 10))
+ tx.HDel(r.formatKey("stats"), "roundShares")
+ tx.ZIncrBy(r.formatKey("finders"), 1, login)
+ tx.HIncrBy(r.formatKey("miners", login), "blocksFound", 1)
+ tx.HGetAllMap(r.formatKey("shares", "roundCurrent"))
+ tx.Del(r.formatKey("shares", "roundCurrent"))
+ tx.LRange(r.formatKey("lastshares"), 0, r.pplns)
+ return nil
+ })
+ if err != nil {
+ return false, err
+ } else {
+
+ shares := cmds[len(cmds)-1].(*redis.StringSliceCmd).Val()
+
+ tx2 := r.client.Multi()
+ defer tx2.Close()
+
+ totalshares := make(map[string]int64)
+ for _, val := range shares {
+ totalshares[val] += 1
+ }
+
+ _, err := tx2.Exec(func() error {
+ for k, v := range totalshares {
+ tx2.HIncrBy(r.formatRound(int64(height), params[0]), k, v)
+ }
+ return nil
+ })
+ if err != nil {
+ return false, err
+ }
+
+ sharesMap, _ := cmds[len(cmds)-3].(*redis.StringStringMapCmd).Result()
+ totalShares := int64(0)
+ for _, v := range sharesMap {
+ n, _ := strconv.ParseInt(v, 10, 64)
+ totalShares += n
+ }
+ hashHex := strings.Join(params, ":")
+ s := join(hashHex, ts, roundDiff, totalShares)
+ cmd := r.client.ZAdd(r.formatKey("blocks", "candidates"), redis.Z{Score: float64(height), Member: s})
+ return false, cmd.Err()
+ }
+}
+
+// ID is the worker name
+func (r *RedisClient) writeShare(tx *redis.Multi, ms, ts int64, login, id string, diff int64, expire time.Duration, hostname string) {
+ /* # Note To Me:
+ Will have to write to get from redis the current value for round
+ shares and increase by 1, then include the new number to be added to redis
+ */
+
+ times := int(diff / 1000000000)
+
+ // Moved get hostname to stratums
+
+ for i := 0; i < times; i++ {
+ tx.LPush(r.formatKey("lastshares"), login)
+ }
+ tx.LTrim(r.formatKey("lastshares"), 0, r.pplns)
+
+ tx.HIncrBy(r.formatKey("shares", "roundCurrent"), login, diff)
+ // For aggregation of hashrate, to store value in hashrate key
+ tx.ZAdd(r.formatKey("hashrate"), redis.Z{Score: float64(ts), Member: join(diff, login, id, ms, diff, hostname)})
+ // For separate miner's workers hashrate, to store under hashrate table under login key
+ tx.ZAdd(r.formatKey("hashrate", login), redis.Z{Score: float64(ts), Member: join(diff, id, ms, diff, hostname)})
+ // Will delete hashrates for miners that gone
+ tx.Expire(r.formatKey("hashrate", login), expire)
+ tx.HSet(r.formatKey("miners", login), "lastShare", strconv.FormatInt(ts, 10))
+}
+
+func (r *RedisClient) formatKey(args ...interface{}) string {
+ return join(r.prefix, join(args...))
+}
+
+func (r *RedisClient) formatRound(height int64, nonce string) string {
+ return r.formatKey("shares", "round"+strconv.FormatInt(height, 10), nonce)
+}
+
+func join(args ...interface{}) string {
+ s := make([]string, len(args))
+ for i, v := range args {
+ switch v.(type) {
+ case string:
+ s[i] = v.(string)
+ case int64:
+ s[i] = strconv.FormatInt(v.(int64), 10)
+ case uint64:
+ s[i] = strconv.FormatUint(v.(uint64), 10)
+ case float64:
+ s[i] = strconv.FormatFloat(v.(float64), 'f', 0, 64)
+ case bool:
+ if v.(bool) {
+ s[i] = "1"
+ } else {
+ s[i] = "0"
+ }
+ case *big.Int:
+ n := v.(*big.Int)
+ if n != nil {
+ s[i] = n.String()
+ } else {
+ s[i] = "0"
+ }
+ case *big.Rat:
+ x := v.(*big.Rat)
+ if x != nil {
+ s[i] = x.FloatString(9)
+ } else {
+ s[i] = "0"
+ }
+
+ default:
+ panic("Invalid type specified for conversion")
+ }
+ }
+ return strings.Join(s, ":")
+}
+
+func (r *RedisClient) GetCandidates(maxHeight int64) ([]*BlockData, error) {
+ option := redis.ZRangeByScore{Min: "0", Max: strconv.FormatInt(maxHeight, 10)}
+ cmd := r.client.ZRangeByScoreWithScores(r.formatKey("blocks", "candidates"), option)
+ if cmd.Err() != nil {
+ return nil, cmd.Err()
+ }
+ return convertCandidateResults(cmd), nil
+}
+
+func (r *RedisClient) GetImmatureBlocks(maxHeight int64) ([]*BlockData, error) {
+ option := redis.ZRangeByScore{Min: "0", Max: strconv.FormatInt(maxHeight, 10)}
+ cmd := r.client.ZRangeByScoreWithScores(r.formatKey("blocks", "immature"), option)
+ if cmd.Err() != nil {
+ return nil, cmd.Err()
+ }
+ return convertBlockResults(cmd), nil
+}
+
+func (r *RedisClient) GetRewards(login string) ([]*RewardData, error) {
+ option := redis.ZRangeByScore{Min: "0", Max: strconv.FormatInt(10, 10)}
+ cmd := r.client.ZRangeByScoreWithScores(r.formatKey("rewards", login), option)
+ if cmd.Err() != nil {
+ return nil, cmd.Err()
+ }
+ return convertRewardResults(cmd), nil
+}
+
+func (r *RedisClient) GetRoundShares(height int64, nonce string) (map[string]int64, error) {
+ result := make(map[string]int64)
+ cmd := r.client.HGetAllMap(r.formatRound(height, nonce))
+ if cmd.Err() != nil {
+ return nil, cmd.Err()
+ }
+ sharesMap, _ := cmd.Result()
+ for login, v := range sharesMap {
+ n, _ := strconv.ParseInt(v, 10, 64)
+ result[login] = n
+ }
+ return result, nil
+}
+
+func (r *RedisClient) GetPayees() ([]string, error) {
+ payees := make(map[string]struct{})
+ var result []string
+ var c int64
+
+ for {
+ var keys []string
+ var err error
+ c, keys, err = r.client.Scan(c, r.formatKey("miners", "*"), 100).Result()
+ if err != nil {
+ return nil, err
+ }
+ for _, row := range keys {
+ login := strings.Split(row, ":")[2]
+ payees[login] = struct{}{}
+ }
+ if c == 0 {
+ break
+ }
+ }
+ for login, _ := range payees {
+ result = append(result, login)
+ }
+ return result, nil
+}
+
+func (r *RedisClient) GetTotalShares() (int64, error) {
+ cmd := r.client.LLen(r.formatKey("lastshares"))
+ if cmd.Err() == redis.Nil {
+ return 0, nil
+ } else if cmd.Err() != nil {
+ return 0, cmd.Err()
+ }
+ return cmd.Val(), nil
+}
+
+func (r *RedisClient) GetBalance(login string) (int64, error) {
+ cmd := r.client.HGet(r.formatKey("miners", login), "balance")
+ if cmd.Err() == redis.Nil {
+ return 0, nil
+ } else if cmd.Err() != nil {
+ return 0, cmd.Err()
+ }
+ return cmd.Int64()
+}
+
+func (r *RedisClient) LockPayouts(login string, amount int64) error {
+ key := r.formatKey("payments", "lock")
+ result := r.client.SetNX(key, join(login, amount), 0).Val()
+ if !result {
+ return fmt.Errorf("Unable to acquire lock '%s'", key)
+ }
+ return nil
+}
+
+func (r *RedisClient) UnlockPayouts() error {
+ key := r.formatKey("payments", "lock")
+ _, err := r.client.Del(key).Result()
+ return err
+}
+
+func (r *RedisClient) IsPayoutsLocked() (bool, error) {
+ _, err := r.client.Get(r.formatKey("payments", "lock")).Result()
+ if err == redis.Nil {
+ return false, nil
+ } else if err != nil {
+ return false, err
+ } else {
+ return true, nil
+ }
+}
+
+type PendingPayment struct {
+ Timestamp int64 `json:"timestamp"`
+ Amount int64 `json:"amount"`
+ Address string `json:"login"`
+}
+
+func (r *RedisClient) GetPendingPayments() []*PendingPayment {
+ raw := r.client.ZRevRangeWithScores(r.formatKey("payments", "pending"), 0, -1)
+ var result []*PendingPayment
+ for _, v := range raw.Val() {
+ // timestamp -> "address:amount"
+ payment := PendingPayment{}
+ payment.Timestamp = int64(v.Score)
+ fields := strings.Split(v.Member.(string), ":")
+ payment.Address = fields[0]
+ payment.Amount, _ = strconv.ParseInt(fields[1], 10, 64)
+ result = append(result, &payment)
+ }
+ return result
+}
+
+// Deduct miner's balance for payment
+func (r *RedisClient) UpdateBalance(login string, amount int64) error {
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ ts := util.MakeTimestamp() / 1000
+
+ _, err := tx.Exec(func() error {
+ tx.HIncrBy(r.formatKey("miners", login), "balance", (amount * -1))
+ tx.HIncrBy(r.formatKey("miners", login), "pending", amount)
+ tx.HIncrBy(r.formatKey("finances"), "balance", (amount * -1))
+ tx.HIncrBy(r.formatKey("finances"), "pending", amount)
+ tx.ZAdd(r.formatKey("payments", "pending"), redis.Z{Score: float64(ts), Member: join(login, amount)})
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) RollbackBalance(login string, amount int64) error {
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ _, err := tx.Exec(func() error {
+ tx.HIncrBy(r.formatKey("miners", login), "balance", amount)
+ tx.HIncrBy(r.formatKey("miners", login), "pending", (amount * -1))
+ tx.HIncrBy(r.formatKey("finances"), "balance", amount)
+ tx.HIncrBy(r.formatKey("finances"), "pending", (amount * -1))
+ tx.ZRem(r.formatKey("payments", "pending"), join(login, amount))
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) WritePayment(login, txHash string, amount int64) error {
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ ts := util.MakeTimestamp() / 1000
+
+ _, err := tx.Exec(func() error {
+ tx.HIncrBy(r.formatKey("miners", login), "pending", (amount * -1))
+ tx.HIncrBy(r.formatKey("miners", login), "paid", amount)
+ tx.HIncrBy(r.formatKey("finances"), "pending", (amount * -1))
+ tx.HIncrBy(r.formatKey("finances"), "paid", amount)
+ tx.ZAdd(r.formatKey("payments", "all"), redis.Z{Score: float64(ts), Member: join(txHash, login, amount)})
+ tx.ZRemRangeByRank(r.formatKey("payments", "all"), 0, -10000)
+ tx.ZAdd(r.formatKey("payments", login), redis.Z{Score: float64(ts), Member: join(txHash, amount)})
+ tx.ZRemRangeByRank(r.formatKey("payments", login), 0, -100)
+ tx.ZRem(r.formatKey("payments", "pending"), join(login, amount))
+ tx.Del(r.formatKey("payments", "lock"))
+ tx.HIncrBy(r.formatKey("paymentsTotal"), "all", 1)
+ tx.HIncrBy(r.formatKey("paymentsTotal"), login, 1)
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) WriteReward(login string, amount int64, percent *big.Rat, immature bool, block *BlockData) error {
+ if amount <= 0 {
+ return nil
+ }
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ addStr := join(amount, percent, immature, block.Hash, block.Height, block.Timestamp)
+ remStr := join(amount, percent, !immature, block.Hash, block.Height, block.Timestamp)
+ remscore := block.Timestamp - 3600*24*40 // Store the last 40 Days
+
+ _, err := tx.Exec(func() error {
+ tx.ZAdd(r.formatKey("rewards", login), redis.Z{Score: float64(block.Timestamp), Member: addStr})
+ tx.ZRem(r.formatKey("rewards", login), remStr)
+ tx.ZRemRangeByScore(r.formatKey("rewards", login), "-inf", "("+strconv.FormatInt(remscore, 10))
+
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) WriteImmatureBlock(block *BlockData, roundRewards map[string]int64) error {
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ _, err := tx.Exec(func() error {
+ r.writeImmatureBlock(tx, block)
+ total := int64(0)
+ for login, amount := range roundRewards {
+ total += amount
+ tx.HIncrBy(r.formatKey("miners", login), "immature", amount)
+ tx.HSetNX(r.formatKey("credits", "immature", block.Height, block.Hash), login, strconv.FormatInt(amount, 10))
+ }
+ tx.HIncrBy(r.formatKey("finances"), "immature", total)
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) WriteMaturedBlock(block *BlockData, roundRewards map[string]int64) error {
+ creditKey := r.formatKey("credits", "immature", block.RoundHeight, block.Hash)
+ tx, err := r.client.Watch(creditKey)
+ // Must decrement immatures using existing log entry
+ immatureCredits := tx.HGetAllMap(creditKey)
+ if err != nil {
+ return err
+ }
+ defer tx.Close()
+
+ ts := util.MakeTimestamp() / 1000
+ value := join(block.Hash, ts, block.Reward)
+
+ _, err = tx.Exec(func() error {
+ r.writeMaturedBlock(tx, block)
+ tx.ZAdd(r.formatKey("credits", "all"), redis.Z{Score: float64(block.Height), Member: value})
+
+ // Decrement immature balances
+ totalImmature := int64(0)
+ for login, amountString := range immatureCredits.Val() {
+ amount, _ := strconv.ParseInt(amountString, 10, 64)
+ totalImmature += amount
+ tx.HIncrBy(r.formatKey("miners", login), "immature", (amount * -1))
+ }
+
+ // Increment balances
+ total := int64(0)
+ for login, amount := range roundRewards {
+ total += amount
+ // NOTICE: Maybe expire round reward entry in 604800 (a week)?
+ tx.HIncrBy(r.formatKey("miners", login), "balance", amount)
+ tx.HSetNX(r.formatKey("credits", block.Height, block.Hash), login, strconv.FormatInt(amount, 10))
+ }
+ tx.Del(creditKey)
+ tx.HIncrBy(r.formatKey("finances"), "balance", total)
+ tx.HIncrBy(r.formatKey("finances"), "immature", (totalImmature * -1))
+ tx.HSet(r.formatKey("finances"), "lastCreditHeight", strconv.FormatInt(block.Height, 10))
+ tx.HSet(r.formatKey("finances"), "lastCreditHash", block.Hash)
+ tx.HIncrBy(r.formatKey("finances"), "totalMined", block.RewardInShannon())
+ tx.Expire(r.formatKey("credits", block.Height, block.Hash), 604800*time.Second)
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) WriteOrphan(block *BlockData) error {
+ creditKey := r.formatKey("credits", "immature", block.RoundHeight, block.Hash)
+ tx, err := r.client.Watch(creditKey)
+ // Must decrement immatures using existing log entry
+ immatureCredits := tx.HGetAllMap(creditKey)
+ if err != nil {
+ return err
+ }
+ defer tx.Close()
+
+ _, err = tx.Exec(func() error {
+ r.writeMaturedBlock(tx, block)
+
+ // Decrement immature balances
+ totalImmature := int64(0)
+ for login, amountString := range immatureCredits.Val() {
+ amount, _ := strconv.ParseInt(amountString, 10, 64)
+ totalImmature += amount
+ tx.HIncrBy(r.formatKey("miners", login), "immature", (amount * -1))
+ }
+ tx.Del(creditKey)
+ tx.HIncrBy(r.formatKey("finances"), "immature", (totalImmature * -1))
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) WritePendingOrphans(blocks []*BlockData) error {
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ _, err := tx.Exec(func() error {
+ for _, block := range blocks {
+ r.writeImmatureBlock(tx, block)
+ }
+ return nil
+ })
+ return err
+}
+
+func (r *RedisClient) writeImmatureBlock(tx *redis.Multi, block *BlockData) {
+ // Redis 2.8.x returns "ERR source and destination objects are the same"
+ if block.Height != block.RoundHeight {
+ tx.Rename(r.formatRound(block.RoundHeight, block.Nonce), r.formatRound(block.Height, block.Nonce))
+ }
+ tx.ZRem(r.formatKey("blocks", "candidates"), block.candidateKey)
+ tx.ZAdd(r.formatKey("blocks", "immature"), redis.Z{Score: float64(block.Height), Member: block.key()})
+}
+
+func (r *RedisClient) writeMaturedBlock(tx *redis.Multi, block *BlockData) {
+ tx.Del(r.formatRound(block.RoundHeight, block.Nonce))
+ tx.ZRem(r.formatKey("blocks", "immature"), block.immatureKey)
+ tx.ZAdd(r.formatKey("blocks", "matured"), redis.Z{Score: float64(block.Height), Member: block.key()})
+}
+
+func (r *RedisClient) IsMinerExists(login string) (bool, error) {
+ return r.client.Exists(r.formatKey("miners", login)).Result()
+}
+
+func (r *RedisClient) GetMinerStats(login string, maxPayments int64) (map[string]interface{}, error) {
+ stats := make(map[string]interface{})
+
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ cmds, err := tx.Exec(func() error {
+ tx.HGetAllMap(r.formatKey("miners", login))
+ tx.ZRevRangeWithScores(r.formatKey("payments", login), 0, maxPayments-1)
+ tx.HGet(r.formatKey("paymentsTotal"), login)
+ tx.HGet(r.formatKey("shares", "currentShares"), login)
+ tx.LRange(r.formatKey("lastshares"), 0, r.pplns)
+ tx.ZRevRangeWithScores(r.formatKey("rewards", login), 0, 39)
+ tx.ZRevRangeWithScores(r.formatKey("rewards", login), 0, -1)
+
+ return nil
+ })
+
+ if err != nil && err != redis.Nil {
+ return nil, err
+ } else {
+ result, _ := cmds[0].(*redis.StringStringMapCmd).Result()
+ stats["stats"] = convertStringMap(result)
+ payments := convertPaymentsResults(cmds[1].(*redis.ZSliceCmd))
+ stats["payments"] = payments
+ stats["paymentsTotal"], _ = cmds[2].(*redis.StringCmd).Int64()
+ shares := cmds[4].(*redis.StringSliceCmd).Val()
+ csh := 0
+ for _, val := range shares {
+ if val == login {
+ csh++
+ }
+ }
+ stats["roundShares"] = csh
+ }
+
+ return stats, nil
+}
+
+// Try to convert all numeric strings to int64
+func convertStringMap(m map[string]string) map[string]interface{} {
+ result := make(map[string]interface{})
+ var err error
+ for k, v := range m {
+ result[k], err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ result[k] = v
+ }
+ }
+ return result
+}
+
+// WARNING: Must run it periodically to flush out of window hashrate entries
+func (r *RedisClient) FlushStaleStats(window, largeWindow time.Duration) (int64, error) {
+ now := util.MakeTimestamp() / 1000
+ max := fmt.Sprint("(", now-int64(window/time.Second))
+ total, err := r.client.ZRemRangeByScore(r.formatKey("hashrate"), "-inf", max).Result()
+ if err != nil {
+ return total, err
+ }
+
+ var c int64
+ miners := make(map[string]struct{})
+ max = fmt.Sprint("(", now-int64(largeWindow/time.Second))
+
+ for {
+ var keys []string
+ var err error
+ c, keys, err = r.client.Scan(c, r.formatKey("hashrate", "*"), 100).Result()
+ if err != nil {
+ return total, err
+ }
+ for _, row := range keys {
+ login := strings.Split(row, ":")[2]
+ if _, ok := miners[login]; !ok {
+ n, err := r.client.ZRemRangeByScore(r.formatKey("hashrate", login), "-inf", max).Result()
+ if err != nil {
+ return total, err
+ }
+ miners[login] = struct{}{}
+ total += n
+ }
+ }
+ if c == 0 {
+ break
+ }
+ }
+ return total, nil
+}
+
+func (r *RedisClient) CollectStats(smallWindow time.Duration, maxBlocks, maxPayments int64) (map[string]interface{}, error) {
+ window := int64(smallWindow / time.Second)
+ stats := make(map[string]interface{})
+
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ now := util.MakeTimestamp() / 1000
+
+ cmds, err := tx.Exec(func() error {
+ tx.ZRemRangeByScore(r.formatKey("hashrate"), "-inf", fmt.Sprint("(", now-window))
+ tx.ZRangeWithScores(r.formatKey("hashrate"), 0, -1)
+ tx.HGetAllMap(r.formatKey("stats"))
+ tx.ZRevRangeWithScores(r.formatKey("blocks", "candidates"), 0, -1)
+ tx.ZRevRangeWithScores(r.formatKey("blocks", "immature"), 0, -1)
+ tx.ZRevRangeWithScores(r.formatKey("blocks", "matured"), 0, maxBlocks-1)
+ tx.ZCard(r.formatKey("blocks", "candidates"))
+ tx.ZCard(r.formatKey("blocks", "immature"))
+ tx.ZCard(r.formatKey("blocks", "matured"))
+ tx.HGet(r.formatKey("paymentsTotal"), "all")
+ tx.ZRevRangeWithScores(r.formatKey("payments", "all"), 0, maxPayments-1)
+ tx.LLen(r.formatKey("lastshares"))
+ return nil
+ })
+
+ if (err != nil) && (err != redis.Nil) {
+ return nil, err
+ }
+
+ result, _ := cmds[2].(*redis.StringStringMapCmd).Result()
+ result["nShares"] = strconv.FormatInt(cmds[11].(*redis.IntCmd).Val(), 10)
+ stats["stats"] = convertStringMap(result)
+ candidates := convertCandidateResults(cmds[3].(*redis.ZSliceCmd))
+ stats["candidates"] = candidates
+ stats["candidatesTotal"] = cmds[6].(*redis.IntCmd).Val()
+
+ immature := convertBlockResults(cmds[4].(*redis.ZSliceCmd))
+ stats["immature"] = immature
+ stats["immatureTotal"] = cmds[7].(*redis.IntCmd).Val()
+
+ matured := convertBlockResults(cmds[5].(*redis.ZSliceCmd))
+ stats["matured"] = matured
+ stats["maturedTotal"] = cmds[8].(*redis.IntCmd).Val()
+
+ payments := convertPaymentsResults(cmds[10].(*redis.ZSliceCmd))
+ stats["payments"] = payments
+ stats["paymentsTotal"], _ = cmds[9].(*redis.StringCmd).Int64()
+
+ totalHashrate, miners := convertMinersStats(window, cmds[1].(*redis.ZSliceCmd))
+ stats["miners"] = miners
+ stats["minersTotal"] = len(miners)
+ stats["hashrate"] = totalHashrate
+ return stats, nil
+}
+
+func (r *RedisClient) CollectWorkersStats(sWindow, lWindow time.Duration, login string) (map[string]interface{}, error) {
+ smallWindow := int64(sWindow / time.Second)
+ largeWindow := int64(lWindow / time.Second)
+ stats := make(map[string]interface{})
+
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ now := util.MakeTimestamp() / 1000
+
+ cmds, err := tx.Exec(func() error {
+ tx.ZRemRangeByScore(r.formatKey("hashrate", login), "-inf", fmt.Sprint("(", now-largeWindow))
+ tx.ZRangeWithScores(r.formatKey("hashrate", login), 0, -1)
+ tx.ZRevRangeWithScores(r.formatKey("rewards", login), 0, 39)
+ tx.ZRevRangeWithScores(r.formatKey("rewards", login), 0, -1)
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ totalHashrate := int64(0)
+ currentHashrate := int64(0)
+ online := int64(0)
+ offline := int64(0)
+ workers := convertWorkersStats(smallWindow, cmds[1].(*redis.ZSliceCmd))
+
+ for id, worker := range workers {
+ timeOnline := now - worker.startedAt
+ if timeOnline < 600 {
+ timeOnline = 600
+ }
+
+ boundary := timeOnline
+ if timeOnline >= smallWindow {
+ boundary = smallWindow
+ }
+ worker.HR = worker.HR / boundary
+
+ boundary = timeOnline
+ if timeOnline >= largeWindow {
+ boundary = largeWindow
+ }
+ worker.TotalHR = worker.TotalHR / boundary
+
+ if worker.LastBeat < (now - smallWindow/2) {
+ worker.Offline = true
+ offline++
+ } else {
+ online++
+ }
+
+ currentHashrate += worker.HR
+ totalHashrate += worker.TotalHR
+ workers[id] = worker
+ }
+
+ stats["workers"] = workers
+ stats["workersTotal"] = len(workers)
+ stats["workersOnline"] = online
+ stats["workersOffline"] = offline
+ stats["hashrate"] = totalHashrate
+ stats["currentHashrate"] = currentHashrate
+
+ stats["rewards"] = convertRewardResults(cmds[2].(*redis.ZSliceCmd)) // last 40
+ rewards := convertRewardResults(cmds[3].(*redis.ZSliceCmd)) // all
+
+ var dorew []*SumRewardData
+ dorew = append(dorew, &SumRewardData{Name: "Last 60 minutes", Interval: 3600, Offset: 0})
+ dorew = append(dorew, &SumRewardData{Name: "Last 12 hours", Interval: 3600 * 12, Offset: 0})
+ dorew = append(dorew, &SumRewardData{Name: "Last 24 hours", Interval: 3600 * 24, Offset: 0})
+ dorew = append(dorew, &SumRewardData{Name: "Last 7 days", Interval: 3600 * 24 * 7, Offset: 0})
+ dorew = append(dorew, &SumRewardData{Name: "Last 30 days", Interval: 3600 * 24 * 30, Offset: 0})
+
+ for _, reward := range rewards {
+
+ for _, dore := range dorew {
+ dore.Reward += 0
+ if reward.Timestamp > now-dore.Interval {
+ dore.Reward += reward.Reward
+ }
+ }
+ }
+ stats["sumrewards"] = dorew
+ stats["24hreward"] = dorew[2].Reward
+ return stats, nil
+}
+
+func (r *RedisClient) CollectLuckStats(windows []int) (map[string]interface{}, error) {
+ stats := make(map[string]interface{})
+
+ tx := r.client.Multi()
+ defer tx.Close()
+
+ max := int64(windows[len(windows)-1])
+
+ cmds, err := tx.Exec(func() error {
+ tx.ZRevRangeWithScores(r.formatKey("blocks", "immature"), 0, -1)
+ tx.ZRevRangeWithScores(r.formatKey("blocks", "matured"), 0, max-1)
+ return nil
+ })
+ if err != nil {
+ return stats, err
+ }
+ blocks := convertBlockResults(cmds[0].(*redis.ZSliceCmd), cmds[1].(*redis.ZSliceCmd))
+
+ calcLuck := func(max int) (int, float64, float64, float64) {
+ var total int
+ var sharesDiff, uncles, orphans float64
+ for i, block := range blocks {
+ if i > (max - 1) {
+ break
+ }
+ if block.Uncle {
+ uncles++
+ }
+ if block.Orphan {
+ orphans++
+ }
+ sharesDiff += float64(block.TotalShares) / float64(block.Difficulty)
+ total++
+ }
+ if total > 0 {
+ sharesDiff /= float64(total)
+ uncles /= float64(total)
+ orphans /= float64(total)
+ }
+ return total, sharesDiff, uncles, orphans
+ }
+ for _, max := range windows {
+ total, sharesDiff, uncleRate, orphanRate := calcLuck(max)
+ row := map[string]float64{
+ "luck": sharesDiff, "uncleRate": uncleRate, "orphanRate": orphanRate,
+ }
+ stats[strconv.Itoa(total)] = row
+ if total < max {
+ break
+ }
+ }
+ return stats, nil
+}
+
+func convertCandidateResults(raw *redis.ZSliceCmd) []*BlockData {
+ var result []*BlockData
+ for _, v := range raw.Val() {
+ // "nonce:powHash:mixDigest:timestamp:diff:totalShares"
+ block := BlockData{}
+ block.Height = int64(v.Score)
+ block.RoundHeight = block.Height
+ fields := strings.Split(v.Member.(string), ":")
+ block.Nonce = fields[0]
+ block.PowHash = fields[1]
+ block.MixDigest = fields[2]
+ block.Timestamp, _ = strconv.ParseInt(fields[3], 10, 64)
+ block.Difficulty, _ = strconv.ParseInt(fields[4], 10, 64)
+ block.TotalShares, _ = strconv.ParseInt(fields[5], 10, 64)
+ block.candidateKey = v.Member.(string)
+ result = append(result, &block)
+ }
+ return result
+}
+
+func convertRewardResults(rows ...*redis.ZSliceCmd) []*RewardData {
+ var result []*RewardData
+ for _, row := range rows {
+ for _, v := range row.Val() {
+ // "amount:percent:immature:block.Hash:block.height"
+ reward := RewardData{}
+ reward.Timestamp = int64(v.Score)
+ fields := strings.Split(v.Member.(string), ":")
+ //block.UncleHeight, _ = strconv.ParseInt(fields[0], 10, 64)
+ reward.BlockHash = fields[3]
+ reward.Reward, _ = strconv.ParseInt(fields[0], 10, 64)
+ reward.Percent, _ = strconv.ParseFloat(fields[1], 64)
+ reward.Immature, _ = strconv.ParseBool(fields[2])
+ reward.Height, _ = strconv.ParseInt(fields[4], 10, 64)
+ result = append(result, &reward)
+ }
+ }
+ return result
+}
+
+func convertBlockResults(rows ...*redis.ZSliceCmd) []*BlockData {
+ var result []*BlockData
+ for _, row := range rows {
+ for _, v := range row.Val() {
+ // "uncleHeight:orphan:nonce:blockHash:timestamp:diff:totalShares:rewardInWei"
+ block := BlockData{}
+ block.Height = int64(v.Score)
+ block.RoundHeight = block.Height
+ fields := strings.Split(v.Member.(string), ":")
+ block.UncleHeight, _ = strconv.ParseInt(fields[0], 10, 64)
+ block.Uncle = block.UncleHeight > 0
+ block.Orphan, _ = strconv.ParseBool(fields[1])
+ block.Nonce = fields[2]
+ block.Hash = fields[3]
+ block.Timestamp, _ = strconv.ParseInt(fields[4], 10, 64)
+ block.Difficulty, _ = strconv.ParseInt(fields[5], 10, 64)
+ block.TotalShares, _ = strconv.ParseInt(fields[6], 10, 64)
+ block.RewardString = fields[7]
+ block.ImmatureReward = fields[7]
+ block.immatureKey = v.Member.(string)
+ result = append(result, &block)
+ }
+ }
+ return result
+}
+
+// Build per login workers's total shares map {'rig-1': 12345, 'rig-2': 6789, ...}
+// TS => diff, id, ms
+func convertWorkersStats(window int64, raw *redis.ZSliceCmd) map[string]Worker {
+ now := util.MakeTimestamp() / 1000
+ workers := make(map[string]Worker)
+
+ for _, v := range raw.Val() {
+ parts := strings.Split(v.Member.(string), ":")
+ share, _ := strconv.ParseInt(parts[0], 10, 64)
+
+ //By Mohannad
+ var hostname string
+ if len(parts) > 3 {
+ hostname = parts[4]
+ } else {
+ hostname = "unknown"
+ }
+
+ id := parts[1]
+ score := int64(v.Score)
+ worker := workers[id]
+
+ // Add for large window
+ worker.TotalHR += share
+
+ // Addition from Mohannad Otaibi to report Difficulty
+ worker.WorkerDiff = share
+ worker.WorkerHostname = hostname
+ // End Mohannad Adjustments
+
+ // Add for small window if matches
+ if score >= now-window {
+ worker.HR += share
+ }
+
+ if worker.LastBeat < score {
+ worker.LastBeat = score
+ }
+ if worker.startedAt > score || worker.startedAt == 0 {
+ worker.startedAt = score
+ }
+ workers[id] = worker
+ }
+ return workers
+}
+
+func convertMinersStats(window int64, raw *redis.ZSliceCmd) (int64, map[string]Miner) {
+ now := util.MakeTimestamp() / 1000
+ miners := make(map[string]Miner)
+ totalHashrate := int64(0)
+
+ for _, v := range raw.Val() {
+ parts := strings.Split(v.Member.(string), ":")
+ share, _ := strconv.ParseInt(parts[0], 10, 64)
+ id := parts[1]
+ score := int64(v.Score)
+ miner := miners[id]
+ miner.HR += share
+
+ if miner.LastBeat < score {
+ miner.LastBeat = score
+ }
+ if miner.startedAt > score || miner.startedAt == 0 {
+ miner.startedAt = score
+ }
+ miners[id] = miner
+ }
+
+ for id, miner := range miners {
+ timeOnline := now - miner.startedAt
+ if timeOnline < 600 {
+ timeOnline = 600
+ }
+
+ boundary := timeOnline
+ if timeOnline >= window {
+ boundary = window
+ }
+ miner.HR = miner.HR / boundary
+
+ if miner.LastBeat < (now - window/2) {
+ miner.Offline = true
+ }
+ totalHashrate += miner.HR
+ miners[id] = miner
+ }
+ return totalHashrate, miners
+}
+
+func convertPaymentsResults(raw *redis.ZSliceCmd) []map[string]interface{} {
+ var result []map[string]interface{}
+ for _, v := range raw.Val() {
+ tx := make(map[string]interface{})
+ tx["timestamp"] = int64(v.Score)
+ fields := strings.Split(v.Member.(string), ":")
+ tx["tx"] = fields[0]
+ // Individual or whole payments row
+ if len(fields) < 3 {
+ tx["amount"], _ = strconv.ParseInt(fields[1], 10, 64)
+ } else {
+ tx["address"] = fields[1]
+ tx["amount"], _ = strconv.ParseInt(fields[2], 10, 64)
+ }
+ result = append(result, tx)
+ }
+ return result
+}
+
+/*
+Timestamp int64 `json:"x"`
+TimeFormat string `json:"timeFormat"`
+Amount int64 `json:"amount"`
+*/
+func convertPaymentChartsResults(raw *redis.ZSliceCmd) []*PaymentCharts {
+ var result []*PaymentCharts
+ for _, v := range raw.Val() {
+ pc := PaymentCharts{}
+ pc.Timestamp = int64(v.Score)
+ tm := time.Unix(pc.Timestamp, 0)
+ pc.TimeFormat = tm.Format("2006-01-02") + " 00_00"
+ fields := strings.Split(v.Member.(string), ":")
+ pc.Amount, _ = strconv.ParseInt(fields[1], 10, 64)
+ //fmt.Printf("%d : %s : %d \n", pc.Timestamp, pc.TimeFormat, pc.Amount)
+
+ var chkAppend bool
+ for _, pcc := range result {
+ if pcc.TimeFormat == pc.TimeFormat {
+ pcc.Amount += pc.Amount
+ chkAppend = true
+ }
+ }
+ if !chkAppend {
+ pc.Timestamp -= int64(math.Mod(float64(v.Score), float64(86400)))
+ result = append(result, &pc)
+ }
+ }
+ return result
+}
+
+func (r *RedisClient) GetCurrentHashrate(login string) (int64, error) {
+ hashrate := r.client.HGet(r.formatKey("currenthashrate", login), "hashrate")
+ if hashrate.Err() == redis.Nil {
+ return 0, nil
+ } else if hashrate.Err() != nil {
+ return 0, hashrate.Err()
+ }
+ return hashrate.Int64()
+}
diff --git a/storage/redis_test.go b/storage/redis_test.go
new file mode 100644
index 0000000..666722e
--- /dev/null
+++ b/storage/redis_test.go
@@ -0,0 +1,329 @@
+package storage
+
+import (
+ "os"
+ "reflect"
+ "strconv"
+ "testing"
+
+ "gopkg.in/redis.v3"
+)
+
+var r *RedisClient
+
+const prefix = "test"
+
+func TestMain(m *testing.M) {
+ r = NewRedisClient(&Config{Endpoint: "127.0.0.1:6379"}, prefix, 3000)
+ reset()
+ c := m.Run()
+ reset()
+ os.Exit(c)
+}
+
+func TestWriteShareCheckExist(t *testing.T) {
+ reset()
+
+ exist, _ := r.WriteShare("x", "x", []string{"0x0", "0x0", "0x0"}, 10, 1008, 0)
+ if exist {
+ t.Error("PoW must not exist")
+ }
+ exist, _ = r.WriteShare("x", "x", []string{"0x0", "0x1", "0x0"}, 10, 1008, 0)
+ if exist {
+ t.Error("PoW must not exist")
+ }
+ exist, _ = r.WriteShare("x", "x", []string{"0x0", "0x0", "0x1"}, 100, 1010, 0)
+ if exist {
+ t.Error("PoW must not exist")
+ }
+ exist, _ = r.WriteShare("z", "x", []string{"0x0", "0x0", "0x1"}, 100, 1016, 0)
+ if !exist {
+ t.Error("PoW must exist")
+ }
+ exist, _ = r.WriteShare("x", "x", []string{"0x0", "0x0", "0x1"}, 100, 1025, 0)
+ if exist {
+ t.Error("PoW must not exist")
+ }
+}
+
+func TestGetPayees(t *testing.T) {
+ reset()
+
+ n := 256
+ for i := 0; i < n; i++ {
+ r.client.HSet(r.formatKey("miners", strconv.Itoa(i)), "balance", strconv.Itoa(i))
+ }
+
+ var payees []string
+ payees, _ = r.GetPayees()
+ if len(payees) != n {
+ t.Error("Must return all payees")
+ }
+ m := make(map[string]struct{})
+ for _, v := range payees {
+ m[v] = struct{}{}
+ }
+ if len(m) != n {
+ t.Error("Must be unique list")
+ }
+}
+
+func TestGetBalance(t *testing.T) {
+ reset()
+
+ r.client.HSet(r.formatKey("miners:x"), "balance", "750")
+
+ v, _ := r.GetBalance("x")
+ if v != 750 {
+ t.Error("Must return balance")
+ }
+
+ v, err := r.GetBalance("z")
+ if v != 0 {
+ t.Error("Must return 0 if account does not exist")
+ }
+ if err != nil {
+ t.Error("Must not return error if account does not exist")
+ }
+}
+
+func TestLockPayouts(t *testing.T) {
+ reset()
+
+ r.LockPayouts("x", 1000)
+ v := r.client.Get("test:payments:lock").Val()
+ if v != "x:1000" {
+ t.Errorf("Invalid lock amount: %v", v)
+ }
+
+ err := r.LockPayouts("x", 100)
+ if err == nil {
+ t.Errorf("Must not overwrite lock")
+ }
+}
+
+func TestUnlockPayouts(t *testing.T) {
+ reset()
+
+ r.client.Set(r.formatKey("payments:lock"), "x:1000", 0)
+
+ r.UnlockPayouts()
+ err := r.client.Get(r.formatKey("payments:lock")).Err()
+ if err != redis.Nil {
+ t.Errorf("Must release lock")
+ }
+}
+
+func TestIsPayoutsLocked(t *testing.T) {
+ reset()
+
+ r.LockPayouts("x", 1000)
+ if locked, _ := r.IsPayoutsLocked(); !locked {
+ t.Errorf("Payouts must be locked")
+ }
+}
+
+func TestUpdateBalance(t *testing.T) {
+ reset()
+
+ r.client.HMSetMap(
+ r.formatKey("miners:x"),
+ map[string]string{"paid": "50", "balance": "1000"},
+ )
+ r.client.HMSetMap(
+ r.formatKey("finances"),
+ map[string]string{"paid": "500", "balance": "10000"},
+ )
+
+ amount := int64(250)
+ r.UpdateBalance("x", amount)
+ result := r.client.HGetAllMap(r.formatKey("miners:x")).Val()
+ if result["pending"] != "250" {
+ t.Error("Must set pending amount")
+ }
+ if result["balance"] != "750" {
+ t.Error("Must deduct balance")
+ }
+ if result["paid"] != "50" {
+ t.Error("Must not touch paid")
+ }
+
+ result = r.client.HGetAllMap(r.formatKey("finances")).Val()
+ if result["pending"] != "250" {
+ t.Error("Must set pool pending amount")
+ }
+ if result["balance"] != "9750" {
+ t.Error("Must deduct pool balance")
+ }
+ if result["paid"] != "500" {
+ t.Error("Must not touch pool paid")
+ }
+
+ rank := r.client.ZRank(r.formatKey("payments:pending"), join("x", amount)).Val()
+ if rank != 0 {
+ t.Error("Must add pending payment")
+ }
+}
+
+func TestRollbackBalance(t *testing.T) {
+ reset()
+
+ r.client.HMSetMap(
+ r.formatKey("miners:x"),
+ map[string]string{"paid": "100", "balance": "750", "pending": "250"},
+ )
+ r.client.HMSetMap(
+ r.formatKey("finances"),
+ map[string]string{"paid": "500", "balance": "10000", "pending": "250"},
+ )
+ r.client.ZAdd(r.formatKey("payments:pending"), redis.Z{Score: 1, Member: "xx"})
+
+ amount := int64(250)
+ r.RollbackBalance("x", amount)
+ result := r.client.HGetAllMap(r.formatKey("miners:x")).Val()
+ if result["paid"] != "100" {
+ t.Error("Must not touch paid")
+ }
+ if result["balance"] != "1000" {
+ t.Error("Must increase balance")
+ }
+ if result["pending"] != "0" {
+ t.Error("Must deduct pending")
+ }
+
+ result = r.client.HGetAllMap(r.formatKey("finances")).Val()
+ if result["paid"] != "500" {
+ t.Error("Must not touch pool paid")
+ }
+ if result["balance"] != "10250" {
+ t.Error("Must increase pool balance")
+ }
+ if result["pending"] != "0" {
+ t.Error("Must deduct pool pending")
+ }
+
+ err := r.client.ZRank(r.formatKey("payments:pending"), join("x", amount)).Err()
+ if err != redis.Nil {
+ t.Errorf("Must remove pending payment")
+ }
+}
+
+func TestWritePayment(t *testing.T) {
+ reset()
+
+ r.client.HMSetMap(
+ r.formatKey("miners:x"),
+ map[string]string{"paid": "50", "balance": "1000", "pending": "250"},
+ )
+ r.client.HMSetMap(
+ r.formatKey("finances"),
+ map[string]string{"paid": "500", "balance": "10000", "pending": "250"},
+ )
+
+ amount := int64(250)
+ r.WritePayment("x", "0x0", amount)
+ result := r.client.HGetAllMap(r.formatKey("miners:x")).Val()
+ if result["pending"] != "0" {
+ t.Error("Must unset pending amount")
+ }
+ if result["balance"] != "1000" {
+ t.Error("Must not touch balance")
+ }
+ if result["paid"] != "300" {
+ t.Error("Must increase paid")
+ }
+
+ result = r.client.HGetAllMap(r.formatKey("finances")).Val()
+ if result["pending"] != "0" {
+ t.Error("Must deduct pool pending amount")
+ }
+ if result["balance"] != "10000" {
+ t.Error("Must not touch pool balance")
+ }
+ if result["paid"] != "750" {
+ t.Error("Must increase pool paid")
+ }
+
+ err := r.client.Get(r.formatKey("payments:lock")).Err()
+ if err != redis.Nil {
+ t.Errorf("Must release lock")
+ }
+
+ err = r.client.ZRank(r.formatKey("payments:pending"), join("x", amount)).Err()
+ if err != redis.Nil {
+ t.Error("Must remove pending payment")
+ }
+ err = r.client.ZRank(r.formatKey("payments:all"), join("0x0", "x", amount)).Err()
+ if err == redis.Nil {
+ t.Error("Must add payment to set")
+ }
+ err = r.client.ZRank(r.formatKey("payments:x"), join("0x0", amount)).Err()
+ if err == redis.Nil {
+ t.Error("Must add payment to set")
+ }
+}
+
+func TestGetPendingPayments(t *testing.T) {
+ reset()
+
+ r.client.HMSetMap(
+ r.formatKey("miners:x"),
+ map[string]string{"paid": "100", "balance": "750", "pending": "250"},
+ )
+
+ amount := int64(1000)
+ r.UpdateBalance("x", amount)
+ pending := r.GetPendingPayments()
+
+ if len(pending) != 1 {
+ t.Error("Must return pending payment")
+ }
+ if pending[0].Amount != amount {
+ t.Error("Must have corrent amount")
+ }
+ if pending[0].Address != "x" {
+ t.Error("Must have corrent account")
+ }
+ if pending[0].Timestamp <= 0 {
+ t.Error("Must have timestamp")
+ }
+}
+
+func TestCollectLuckStats(t *testing.T) {
+ reset()
+
+ members := []redis.Z{
+ redis.Z{Score: 0, Member: "1:0:0x0:0x0:0:100:100:0"},
+ }
+ r.client.ZAdd(r.formatKey("blocks:immature"), members...)
+ members = []redis.Z{
+ redis.Z{Score: 1, Member: "1:0:0x2:0x0:0:50:100:0"},
+ redis.Z{Score: 2, Member: "0:1:0x1:0x0:0:100:100:0"},
+ redis.Z{Score: 3, Member: "0:0:0x3:0x0:0:200:100:0"},
+ }
+ r.client.ZAdd(r.formatKey("blocks:matured"), members...)
+
+ stats, _ := r.CollectLuckStats([]int{1, 2, 5, 10})
+ expectedStats := map[string]interface{}{
+ "1": map[string]float64{
+ "luck": 1, "uncleRate": 1, "orphanRate": 0,
+ },
+ "2": map[string]float64{
+ "luck": 0.75, "uncleRate": 0.5, "orphanRate": 0,
+ },
+ "4": map[string]float64{
+ "luck": 1.125, "uncleRate": 0.5, "orphanRate": 0.25,
+ },
+ }
+
+ if !reflect.DeepEqual(stats, expectedStats) {
+ t.Error("Stats != expected stats")
+ }
+}
+
+func reset() {
+ keys := r.client.Keys(r.prefix + ":*").Val()
+ for _, k := range keys {
+ r.client.Del(k)
+ }
+}
diff --git a/util/util.go b/util/util.go
new file mode 100644
index 0000000..b4219c5
--- /dev/null
+++ b/util/util.go
@@ -0,0 +1,81 @@
+package util
+
+import (
+ "math/big"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
+)
+
+var Ether = math.BigPow(10, 18)
+var Shannon = math.BigPow(10, 9)
+
+var pow256 = math.BigPow(2, 256)
+var addressPattern = regexp.MustCompile("^0x[0-9a-fA-F]{40}$")
+var zeroHash = regexp.MustCompile("^0?x?0+$")
+
+func IsValidHexAddress(s string) bool {
+ if IsZeroHash(s) || !addressPattern.MatchString(s) {
+ return false
+ }
+ return true
+}
+
+func IsZeroHash(s string) bool {
+ return zeroHash.MatchString(s)
+}
+
+func MakeTimestamp() int64 {
+ return time.Now().UnixNano() / int64(time.Millisecond)
+}
+
+func GetTargetHex(diff int64) string {
+ difficulty := big.NewInt(diff)
+ diff1 := new(big.Int).Div(pow256, difficulty)
+ return string(common.ToHex(diff1.Bytes()))
+}
+
+func TargetHexToDiff(targetHex string) *big.Int {
+ targetBytes := common.FromHex(targetHex)
+ return new(big.Int).Div(pow256, new(big.Int).SetBytes(targetBytes))
+}
+
+func ToHex(n int64) string {
+ return "0x0" + strconv.FormatInt(n, 16)
+}
+
+func FormatReward(reward *big.Int) string {
+ return reward.String()
+}
+
+func FormatRatReward(reward *big.Rat) string {
+ wei := new(big.Rat).SetInt(Ether)
+ reward = reward.Quo(reward, wei)
+ return reward.FloatString(8)
+}
+
+func StringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func MustParseDuration(s string) time.Duration {
+ value, err := time.ParseDuration(s)
+ if err != nil {
+ panic("util: Can't parse duration `" + s + "`: " + err.Error())
+ }
+ return value
+}
+
+func String2Big(num string) *big.Int {
+ n := new(big.Int)
+ n.SetString(num, 0)
+ return n
+}
diff --git a/www/.bowerrc b/www/.bowerrc
new file mode 100644
index 0000000..959e169
--- /dev/null
+++ b/www/.bowerrc
@@ -0,0 +1,4 @@
+{
+ "directory": "bower_components",
+ "analytics": false
+}
diff --git a/www/.editorconfig b/www/.editorconfig
new file mode 100644
index 0000000..47c5438
--- /dev/null
+++ b/www/.editorconfig
@@ -0,0 +1,34 @@
+# EditorConfig helps developers define and maintain consistent
+# coding styles between different editors and IDEs
+# editorconfig.org
+
+root = true
+
+
+[*]
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+
+[*.js]
+indent_style = space
+indent_size = 2
+
+[*.hbs]
+insert_final_newline = false
+indent_style = space
+indent_size = 2
+
+[*.css]
+indent_style = space
+indent_size = 2
+
+[*.html]
+indent_style = space
+indent_size = 2
+
+[*.{diff,md}]
+trim_trailing_whitespace = false
diff --git a/www/.ember-cli b/www/.ember-cli
new file mode 100644
index 0000000..ee64cfe
--- /dev/null
+++ b/www/.ember-cli
@@ -0,0 +1,9 @@
+{
+ /**
+ Ember CLI sends analytics information by default. The data is completely
+ anonymous, but there are times when you might want to disable this behavior.
+
+ Setting `disableAnalytics` to true will prevent any data from being sent.
+ */
+ "disableAnalytics": false
+}
diff --git a/www/.gitignore b/www/.gitignore
new file mode 100644
index 0000000..86fceae
--- /dev/null
+++ b/www/.gitignore
@@ -0,0 +1,17 @@
+# See http://help.github.com/ignore-files/ for more about ignoring files.
+
+# compiled output
+/dist
+/tmp
+
+# dependencies
+/node_modules
+/bower_components
+
+# misc
+/.sass-cache
+/connect.lock
+/coverage/*
+/libpeerconnection.log
+npm-debug.log
+testem.log
diff --git a/www/.jshintrc b/www/.jshintrc
new file mode 100644
index 0000000..e75f719
--- /dev/null
+++ b/www/.jshintrc
@@ -0,0 +1,33 @@
+{
+ "predef": [
+ "document",
+ "window",
+ "-Promise",
+ "moment"
+ ],
+ "browser": true,
+ "boss": true,
+ "curly": true,
+ "debug": false,
+ "devel": true,
+ "eqeqeq": true,
+ "evil": true,
+ "forin": false,
+ "immed": false,
+ "laxbreak": false,
+ "newcap": true,
+ "noarg": true,
+ "noempty": false,
+ "nonew": false,
+ "nomen": false,
+ "onevar": false,
+ "plusplus": false,
+ "regexp": false,
+ "undef": true,
+ "sub": true,
+ "strict": false,
+ "white": false,
+ "eqnull": true,
+ "esnext": true,
+ "unused": true
+}
diff --git a/www/.travis.yml b/www/.travis.yml
new file mode 100644
index 0000000..66dd107
--- /dev/null
+++ b/www/.travis.yml
@@ -0,0 +1,23 @@
+---
+language: node_js
+node_js:
+ - "0.12"
+
+sudo: false
+
+cache:
+ directories:
+ - node_modules
+
+before_install:
+ - export PATH=/usr/local/phantomjs-2.0.0/bin:$PATH
+ - "npm config set spin false"
+ - "npm install -g npm@^2"
+
+install:
+ - npm install -g bower
+ - npm install
+ - bower install
+
+script:
+ - npm test
diff --git a/www/.watchmanconfig b/www/.watchmanconfig
new file mode 100644
index 0000000..5e9462c
--- /dev/null
+++ b/www/.watchmanconfig
@@ -0,0 +1,3 @@
+{
+ "ignore_dirs": ["tmp"]
+}
diff --git a/www/README.md b/www/README.md
new file mode 100644
index 0000000..5a3c03f
--- /dev/null
+++ b/www/README.md
@@ -0,0 +1,53 @@
+# Pool
+
+This README outlines the details of collaborating on this Ember application.
+A short introduction of this app could easily go here.
+
+## Prerequisites
+
+You will need the following things properly installed on your computer.
+
+* [Git](http://git-scm.com/)
+* [Node.js](http://nodejs.org/) (with NPM)
+* [Bower](http://bower.io/)
+* [Ember CLI](http://www.ember-cli.com/)
+* [PhantomJS](http://phantomjs.org/)
+
+## Installation
+
+* `git clone ` this repository
+* change into the new directory
+* `npm install`
+* `bower install`
+
+## Running / Development
+
+* `ember server`
+* Visit your app at [http://localhost:4200](http://localhost:4200).
+
+### Code Generators
+
+Make use of the many generators for code, try `ember help generate` for more details
+
+### Running Tests
+
+* `ember test`
+* `ember test --server`
+
+### Building
+
+* `ember build` (development)
+* `ember build --environment production` (production)
+
+### Deploying
+
+Specify what it takes to deploy your app.
+
+## Further Reading / Useful Links
+
+* [ember.js](http://emberjs.com/)
+* [ember-cli](http://www.ember-cli.com/)
+* Development Browser Extensions
+ * [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi)
+ * [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/)
+
diff --git a/www/app/app.js b/www/app/app.js
new file mode 100644
index 0000000..831ad61
--- /dev/null
+++ b/www/app/app.js
@@ -0,0 +1,18 @@
+import Ember from 'ember';
+import Resolver from './resolver';
+import loadInitializers from 'ember-load-initializers';
+import config from './config/environment';
+
+let App;
+
+Ember.MODEL_FACTORY_INJECTIONS = true;
+
+App = Ember.Application.extend({
+ modulePrefix: config.modulePrefix,
+ podModulePrefix: config.podModulePrefix,
+ Resolver
+});
+
+loadInitializers(App, config.modulePrefix);
+
+export default App;
diff --git a/www/app/components/.gitkeep b/www/app/components/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/www/app/components/active-li.js b/www/app/components/active-li.js
new file mode 100644
index 0000000..5ec7a7a
--- /dev/null
+++ b/www/app/components/active-li.js
@@ -0,0 +1,17 @@
+import Ember from 'ember';
+
+const { getOwner } = Ember;
+
+export default Ember.Component.extend({
+ tagName: 'li',
+ classNameBindings: ['isActive:active:inactive'],
+
+ router: function(){
+ return getOwner(this).lookup('router:main');
+ }.property(),
+
+ isActive: function(){
+ var currentWhen = this.get('currentWhen');
+ return this.get('router').isActive(currentWhen);
+ }.property('router.url', 'currentWhen')
+});
diff --git a/www/app/controllers/.gitkeep b/www/app/controllers/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/www/app/controllers/account.js b/www/app/controllers/account.js
new file mode 100644
index 0000000..e906a9b
--- /dev/null
+++ b/www/app/controllers/account.js
@@ -0,0 +1,155 @@
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+ applicationController: Ember.inject.controller('application'),
+ config: Ember.computed.reads('applicationController.config'),
+ stats: Ember.computed.reads('applicationController.model.stats'),
+ hashrate: Ember.computed.reads('applicationController.hashrate'),
+ chartOptions: Ember.computed("model.hashrate", {
+ get() {
+ var e = this,
+ t = e.getWithDefault("model.minerCharts"),
+ a = {
+ chart: {
+ backgroundColor: "rgba(0, 0, 0, 0.1)",
+ type: "spline",
+ marginRight: 10,
+ height: 400,
+ events: {
+ load: function() {
+ var series = this.series[0];
+ setInterval(function() {
+ var x = (new Date()).getTime(),
+ y = e.getWithDefault("model.currentHashrate") / 1000000;
+ series.addPoint([x, y], true, true);
+ }, 109000000);
+ }
+ }
+ },
+ title: {
+ text: ""
+ },
+ xAxis: {
+ ordinal: false,
+ type: "datetime",
+ dateTimeLabelFormats: {
+ millisecond: "%H:%M:%S",
+ second: "%H:%M:%S",
+ minute: "%H:%M",
+ hour: "%H:%M",
+ day: "%e. %b",
+ week: "%e. %b",
+ month: "%b '%y",
+ year: "%Y"
+ }
+ },
+ yAxis: {
+ title: {
+ text: "HASHRATE"
+ },
+ min: 0
+ },
+ plotLines: [{
+ value: 0,
+ width: 1,
+ color: "#808080"
+ }],
+ legend: {
+ enabled: true,
+ itemStyle:
+ {
+ color:"#ffffff"
+ },
+ },
+ tooltip: {
+ formatter: function() {
+ return this.y > 1000000000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000000000).toFixed(2) + " TH/s" : this.y > 1000000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000000).toFixed(2) + " GH/s" : this.y > 1000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000).toFixed(2) + " MH/s" : "" + this.point.d + " Hashrate " + this.y.toFixed(2) + " H/s";
+
+ },
+
+ useHTML: true
+ },
+ exporting: {
+ enabled: false
+ },
+ series: [{
+ color: "#E99002",
+ name: "Average hashrate",
+ data: function() {
+ var e, a = [];
+ if (null != t) {
+ for (e = 0; e <= t.length - 1; e += 1) {
+ var n = 0,
+ r = 0,
+ l = 0;
+ r = new Date(1e3 * t[e].x);
+ l = r.toLocaleString();
+ n = t[e].minerLargeHash;
+ a.push({
+ x: r,
+ d: l,
+ y: n
+ });
+ }
+ } else {
+ a.push({
+ x: 0,
+ d: 0,
+ y: 0
+ });
+ }
+ return a;
+ }()
+ }, {
+ name: "Current hashrate",
+ data: function() {
+ var e, a = [];
+ if (null != t) {
+ for (e = 0; e <= t.length - 1; e += 1) {
+ var n = 0,
+ r = 0,
+ l = 0;
+ r = new Date(1e3 * t[e].x);
+ l = r.toLocaleString();
+ n = t[e].minerHash;
+ a.push({
+ x: r,
+ d: l,
+ y: n
+ });
+ }
+ } else {
+ a.push({
+ x: 0,
+ d: 0,
+ y: 0
+ });
+ }
+ return a;
+ }()
+ }]
+ };
+ return a;
+ }
+ }),
+ roundPercent: Ember.computed('stats', 'model', {
+ get() {
+ var percent = this.get('model.roundShares') / this.get('stats.nShares');
+ if (!percent) {
+ return 0;
+ }
+ return percent;
+ }
+ }),
+ netHashrate: Ember.computed({
+ get() {
+ return this.get('hashrate');
+ }
+ }),
+ earnPerDay: Ember.computed('model', {
+ get() {
+ return 24 * 60 * 60 / this.get('config').BlockTime * this.get('config').BlockReward *
+ this.getWithDefault('model.hashrate') / this.get('hashrate');
+ }
+ })
+});
diff --git a/www/app/controllers/account/index.js b/www/app/controllers/account/index.js
new file mode 100644
index 0000000..f16397c
--- /dev/null
+++ b/www/app/controllers/account/index.js
@@ -0,0 +1,151 @@
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+ applicationController: Ember.inject.controller('application'),
+ netstats: Ember.computed.reads('applicationController'),
+ stats: Ember.computed.reads('applicationController.model.stats'),
+ config: Ember.computed.reads('applicationController.config'),
+
+ chartOptions: Ember.computed("model.hashrate", {
+ get() {
+ var e = this,
+ t = e.getWithDefault("model.minerCharts"),
+ a = {
+ chart: {
+ backgroundColor: "rgba(0, 0, 0, 0.1)",
+
+ type: "spline",
+ marginRight: 10,
+ height: 200,
+ events: {
+ load: function() {
+ var series = this.series[0];
+ setInterval(function() {
+ var x = (new Date()).getTime(),
+ y = e.getWithDefault("model.currentHashrate") / 1000000;
+ series.addPoint([x, y], true, true);
+ }, 1090000000);
+ }
+ }
+ },
+ title: {
+ text: ""
+ },
+ xAxis: {
+ ordinal: false,
+ labels: {
+ style: {
+ color: "#ccc"
+ }
+ },
+ type: "datetime",
+ dateTimeLabelFormats: {
+ millisecond: "%H:%M:%S",
+ second: "%H:%M:%S",
+ minute: "%H:%M",
+ hour: "%H:%M",
+ day: "%e. %b",
+ week: "%e. %b",
+ month: "%b '%y",
+ year: "%Y"
+ }
+ },
+ yAxis: {
+ title: {
+ text: "Hashrate by Account",
+ style: {
+ color: "#ccc"
+ },
+ },
+ labels: {
+ style: {
+ color: "#ccc"
+ }
+ },
+ //softMin: e.getWithDefault("model.currentHashrate") / 1000000,
+ //softMax: e.getWithDefault("model.currentHashrate") / 1000000,
+ },
+ plotLines: [{
+ value: 0,
+ width: 1,
+ color: "#aaaaaa"
+ }],
+ legend: {
+ enabled: true,
+ itemStyle:
+ {
+ color:"#ccc"
+ },
+ },
+ tooltip: {
+ formatter: function() {
+ return this.y > 1000000000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000000000).toFixed(2) + " TH/s" : this.y > 1000000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000000).toFixed(2) + " GH/s" : this.y > 1000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000).toFixed(2) + " MH/s" : "" + this.point.d + " Hashrate " + this.y.toFixed(2) + " H/s";
+
+ },
+
+ useHTML: true
+ },
+ exporting: {
+ enabled: false
+ },
+ series: [{
+ color: "#E99002",
+ name: "3 hours average hashrate",
+ data: function() {
+ var e, a = [];
+ if (null != t) {
+ for (e = 0; e <= t.length - 1; e += 1) {
+ var n = 0,
+ r = 0,
+ l = 0;
+ r = new Date(1e3 * t[e].x);
+ l = r.toLocaleString();
+ n = t[e].minerLargeHash;
+ a.push({
+ x: r,
+ d: l,
+ y: n
+ });
+ }
+ } else {
+ a.push({
+ x: 0,
+ d: 0,
+ y: 0
+ });
+ }
+ return a;
+ }()
+ }, {
+ name: "30 minutes average hashrate",
+ data: function() {
+ var e, a = [];
+ if (null != t) {
+ for (e = 0; e <= t.length - 1; e += 1) {
+ var n = 0,
+ r = 0,
+ l = 0;
+ r = new Date(1e3 * t[e].x);
+ l = r.toLocaleString();
+ n = t[e].minerHash;
+ a.push({
+ x: r,
+ d: l,
+ y: n
+ });
+ }
+ } else {
+ a.push({
+ x: 0,
+ d: 0,
+ y: 0
+ });
+ }
+ return a;
+ }()
+ }]
+ };
+ return a;
+ }
+ })
+});
diff --git a/www/app/controllers/account/payouts.js b/www/app/controllers/account/payouts.js
new file mode 100644
index 0000000..28708c1
--- /dev/null
+++ b/www/app/controllers/account/payouts.js
@@ -0,0 +1,111 @@
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+ applicationController: Ember.inject.controller('application'),
+ stats: Ember.computed.reads('applicationController.model.stats'),
+ intl: Ember.inject.service(),
+
+ chartPaymentText: Ember.computed('model', {
+ get() {
+ var outText = this.get('model.paymentCharts');
+ if (!outText) {
+ return 0;
+ }
+ return outText;
+ }
+ }),
+
+ chartPayment: Ember.computed('intl', 'model.paymentCharts', {
+ get() {
+ var e = this,
+ t = e.getWithDefault("model.paymentCharts"),
+ a = {
+ chart: {
+ backgroundColor: "rgba(0, 0, 0, 0.1)",
+ type: "column",
+ marginRight: 10,
+ height: 200,
+ events: {
+ load: function() {
+ var series = this.series[0];
+ setInterval(function() {
+ var x = (new Date()).getDate(),
+ y = e.getWithDefault("model.paymentCharts");
+ series.addPoint([x, y], true, true);
+ }, 1090000000);
+ }
+ }
+ },
+ title: {
+ text: ""
+ },
+ xAxis: {
+ ordinal: false,
+ type: "datetime",
+ dateTimeLabelFormats: {
+ day: "%e. %b",
+ week: "%e. %b",
+ month: "%b '%y",
+ year: "%Y"
+ }
+ },
+ yAxis: {
+ title: {
+ text: "Payment by Account"
+ }
+ },
+ plotLines: [{
+ value: 0,
+ width: 1,
+ color: "#808080"
+ }],
+ legend: {
+ enabled: true,
+ itemStyle:
+ {
+ color:"#ccc"
+ },
+ },
+ tooltip: {
+ formatter: function() {
+ return "" + Highcharts.dateFormat('%Y-%m-%d', new Date(this.x)) + " Payment " + this.y.toFixed(4) + " CLO";
+ },
+ useHTML: true
+ },
+ exporting: {
+ enabled: false
+ },
+ series: [{
+ color: "#E99002",
+ name: "Payment Series",
+ data: function() {
+ var e, a = [];
+ if (null != t) {
+ for (e = 0; e <= t.length - 1; e += 1) {
+ var n = 0,
+ r = 0,
+ l = 0;
+ r = new Date(1e3 * t[e].x);
+ l = r.toLocaleString();
+ n = t[e].amount / 1000000000;
+ a.push({
+ x: r,
+ d: l,
+ y: n
+ });
+ }
+ } else {
+ a.push({
+ x: 0,
+ d: 0,
+ y: 0
+ });
+ }
+ return a;
+ }()
+ }]
+ };
+ return a;
+ }
+})
+});
diff --git a/www/app/controllers/application.js b/www/app/controllers/application.js
new file mode 100644
index 0000000..e52ecbf
--- /dev/null
+++ b/www/app/controllers/application.js
@@ -0,0 +1,105 @@
+import Ember from 'ember';
+import config from '../config/environment';
+
+export default Ember.Controller.extend({
+ intl: Ember.inject.service(),
+ get config() {
+ return config.APP;
+ },
+
+ height: Ember.computed('model.nodes', {
+ get() {
+ var node = this.get('bestNode');
+ if (node) {
+ return node.height;
+ }
+ return 0;
+ }
+ }),
+
+ roundShares: Ember.computed('model.stats', {
+ get() {
+ return parseInt(this.get('model.stats.roundShares'));
+ }
+ }),
+
+ difficulty: Ember.computed('model.nodes', {
+ get() {
+ var node = this.get('bestNode');
+ if (node) {
+ return node.difficulty;
+ }
+ return 0;
+ }
+ }),
+
+ hashrate: Ember.computed('difficulty', {
+ get() {
+ return this.getWithDefault('difficulty', 0) / config.APP.BlockTime;
+ }
+ }),
+
+ immatureTotal: Ember.computed('model', {
+ get() {
+ return this.getWithDefault('model.immatureTotal', 0) + this.getWithDefault('model.candidatesTotal', 0);
+ }
+ }),
+
+ bestNode: Ember.computed('model.nodes', {
+ get() {
+ var node = null;
+ this.get('model.nodes').forEach(function (n) {
+ if (!node) {
+ node = n;
+ }
+ if (node.height < n.height) {
+ node = n;
+ }
+ });
+ return node;
+ }
+ }),
+
+ lastBlockFound: Ember.computed('model', {
+ get() {
+ return parseInt(this.get('model.lastBlockFound')) || 0;
+ }
+ }),
+
+
+ languages: Ember.computed('model', {
+ get() {
+ return this.get('model.languages');
+ }
+ }),
+
+ selectedLanguage: Ember.computed({
+ get() {
+ var langs = this.get('languages');
+ var lang = Ember.$.cookie('lang');
+ for (var i = 0; i < langs.length; i++) {
+ if (langs[i].value == lang) {
+ return langs[i].name;
+ }
+ }
+ return lang;
+ }
+ }),
+
+ roundVariance: Ember.computed('model', {
+ get() {
+ var percent = this.get('model.stats.roundShares') / this.get('difficulty');
+ if (!percent) {
+ return 0;
+ }
+ return percent.toFixed(2);
+ }
+ }),
+
+ nextEpoch: Ember.computed('height', {
+ get() {
+ var epochOffset = (30000 - (this.getWithDefault('height', 1) % 30000)) * 1000 * this.get('config').BlockTime;
+ return Date.now() + epochOffset;
+ }
+ })
+});
diff --git a/www/app/controllers/help.js b/www/app/controllers/help.js
new file mode 100644
index 0000000..12b7f9a
--- /dev/null
+++ b/www/app/controllers/help.js
@@ -0,0 +1,6 @@
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+ applicationController: Ember.inject.controller('application'),
+ config: Ember.computed.reads('applicationController.config')
+});
diff --git a/www/app/controllers/index.js b/www/app/controllers/index.js
new file mode 100644
index 0000000..ace7784
--- /dev/null
+++ b/www/app/controllers/index.js
@@ -0,0 +1,116 @@
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+ applicationController: Ember.inject.controller('application'),
+ stats: Ember.computed.reads('applicationController'),
+ config: Ember.computed.reads('applicationController.config'),
+
+ cachedLogin: Ember.computed('login', {
+ get() {
+ return this.get('login') || Ember.$.cookie('login');
+ },
+ set(key, value) {
+ Ember.$.cookie('login', value);
+ this.set('model.login', value);
+ return value;
+ }
+ }),
+ chartOptions: Ember.computed("model.hashrate", {
+ get() {
+ var e = this,
+ t = e.getWithDefault("stats.model.poolCharts"),
+ a = {
+ chart: {
+ backgroundColor: "rgba(0, 0, 0, 0.1)",
+ type: "spline",
+ height: 300,
+ marginRight: 10,
+ events: {
+ load: function() {
+ var series = this.series[0];
+ setInterval(function() {
+ var x = (new Date()).getTime(), y = e.getWithDefault("model.Hashrate") / 1000000;
+ series.addPoint([x, y], true, true);
+ }, 1090000000);
+ }
+ }
+ },
+ title: {
+ text: "Our pool's hashrate",
+ style: {
+ color: "#ccc"
+ }
+ },
+ xAxis: {
+ labels: {
+ style: {
+ color: "#ccc"
+ }
+ },
+ ordinal: false,
+ type: "datetime"
+ },
+ yAxis: {
+ title: {
+ text: "HASHRATE",
+ style: {
+ color: "#ccc"
+ }
+ },
+ min: 0,
+ labels: {
+ style: {
+ color: "#ccc"
+ }
+ }
+ },
+ plotLines: [{
+ value: 0,
+ width: 1,
+ color: "#ccc"
+ }],
+ legend: {
+ enabled: false
+ },
+ tooltip: {
+ formatter: function() {
+ return this.y > 1000000000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000000000).toFixed(2) + " TH/s" : this.y > 1000000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000000).toFixed(2) + " GH/s" : this.y > 1000000 ? "" + this.point.d + " Hashrate " + (this.y / 1000000).toFixed(2) + " MH/s" : "" + this.point.d + " Hashrate " + this.y.toFixed(2) + " H/s";
+ },
+ useHTML: true
+ },
+ exporting: {
+ enabled: false
+ },
+ series: [{
+ color: "#15BD27",
+ name: "Hashrate",
+ data: function() {
+ var e, a = [];
+ if (null != t) {
+ for (e = 0; e <= t.length - 1; e += 1) {
+ var n = 0,
+ r = 0,
+ l = 0;
+ r = new Date(1e3 * t[e].x);
+ l = r.toLocaleString();
+ n = t[e].y; a.push({
+ x: r,
+ d: l,
+ y: n
+ });
+ }
+ } else {
+ a.push({
+ x: 0,
+ d: 0,
+ y: 0
+ });
+ }
+ return a;
+ }()
+ }]
+ };
+ return a;
+ }
+ })
+});
diff --git a/www/app/formats.js b/www/app/formats.js
new file mode 100644
index 0000000..7078388
--- /dev/null
+++ b/www/app/formats.js
@@ -0,0 +1,18 @@
+var hhmmss = {
+ hour: 'numeric',
+ minute: 'numeric',
+ second: 'numeric'
+};
+
+export default {
+ time: {
+ hhmmss: hhmmss
+ },
+ date: {
+ hhmmss: hhmmss
+ },
+ number: {
+ EUR: { style: 'currency', currency: 'EUR', minimumFractionDigits: 2, maximumFractionDigits: 2 },
+ USD: { style: 'currency', currency: 'USD', minimumFractionDigits: 2, maximumFractionDigits: 2 }
+ }
+};
diff --git a/www/app/helpers/.gitkeep b/www/app/helpers/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/www/app/helpers/format-balance.js b/www/app/helpers/format-balance.js
new file mode 100644
index 0000000..0f5d024
--- /dev/null
+++ b/www/app/helpers/format-balance.js
@@ -0,0 +1,8 @@
+import Ember from 'ember';
+
+export function formatBalance(value) {
+ value = value * 0.000000001;
+ return value.toFixed(8);
+}
+
+export default Ember.Helper.helper(formatBalance);
diff --git a/www/app/helpers/format-date-locale.js b/www/app/helpers/format-date-locale.js
new file mode 100644
index 0000000..db008b5
--- /dev/null
+++ b/www/app/helpers/format-date-locale.js
@@ -0,0 +1,8 @@
+import Ember from 'ember';
+
+export function formatDateLocale(ts) {
+ var date = new Date(ts * 1000);
+ return date.toLocaleString();
+}
+
+export default Ember.Helper.helper(formatDateLocale);
diff --git a/www/app/helpers/format-difficulty.js b/www/app/helpers/format-difficulty.js
new file mode 100644
index 0000000..bd33407
--- /dev/null
+++ b/www/app/helpers/format-difficulty.js
@@ -0,0 +1,8 @@
+import Ember from 'ember';
+
+export function formatDifficulty(value) {
+ value = value / 1000000000
+ return Ember.String.htmlSafe('' + value + 'b');
+}
+
+export default Ember.Helper.helper(formatDifficulty);
diff --git a/www/app/helpers/format-hashrate.js b/www/app/helpers/format-hashrate.js
new file mode 100644
index 0000000..11e4524
--- /dev/null
+++ b/www/app/helpers/format-hashrate.js
@@ -0,0 +1,14 @@
+import Ember from 'ember';
+
+export function formatHashrate(params/*, hash*/) {
+ var hashrate = params[0];
+ var i = 0;
+ var units = ['H', 'KH', 'MH', 'GH', 'TH', 'PH'];
+ while (hashrate > 1000) {
+ hashrate = hashrate / 1000;
+ i++;
+ }
+ return hashrate.toFixed(2) + ' ' + units[i];
+}
+
+export default Ember.Helper.helper(formatHashrate);
diff --git a/www/app/helpers/format-tx.js b/www/app/helpers/format-tx.js
new file mode 100644
index 0000000..c65a3f6
--- /dev/null
+++ b/www/app/helpers/format-tx.js
@@ -0,0 +1,7 @@
+import Ember from 'ember';
+
+export function formatTx(value) {
+ return value[0].substring(2, 26) + "..." + value[0].substring(42);
+}
+
+export default Ember.Helper.helper(formatTx);
diff --git a/www/app/helpers/seconds-to-ms.js b/www/app/helpers/seconds-to-ms.js
new file mode 100644
index 0000000..b190261
--- /dev/null
+++ b/www/app/helpers/seconds-to-ms.js
@@ -0,0 +1,7 @@
+import Ember from 'ember';
+
+export function secondsToMs(value) {
+ return value * 1000;
+}
+
+export default Ember.Helper.helper(secondsToMs);
diff --git a/www/app/helpers/string-to-int.js b/www/app/helpers/string-to-int.js
new file mode 100644
index 0000000..da8b774
--- /dev/null
+++ b/www/app/helpers/string-to-int.js
@@ -0,0 +1,7 @@
+import Ember from 'ember';
+
+export function stringToInt(value) {
+ return parseInt(value);
+}
+
+export default Ember.Helper.helper(stringToInt);
diff --git a/www/app/helpers/with-metric-prefix.js b/www/app/helpers/with-metric-prefix.js
new file mode 100644
index 0000000..2359f33
--- /dev/null
+++ b/www/app/helpers/with-metric-prefix.js
@@ -0,0 +1,19 @@
+import Ember from 'ember';
+
+export function withMetricPrefix(params/*, hash*/) {
+ var n = params[0];
+
+ if (n < 1000) {
+ return n;
+ }
+
+ var i = 0;
+ var units = ['K', 'M', 'G', 'T', 'P'];
+ while (n > 1000) {
+ n = n / 1000;
+ i++;
+ }
+ return n.toFixed(3) + ' ' + units[i - 1];
+}
+
+export default Ember.Helper.helper(withMetricPrefix);
diff --git a/www/app/helpers/worker-colorizer.js b/www/app/helpers/worker-colorizer.js
new file mode 100644
index 0000000..2346582
--- /dev/null
+++ b/www/app/helpers/worker-colorizer.js
@@ -0,0 +1,30 @@
+import Ember from 'ember';
+
+export function workerColorizer(value) {
+ let class_name;
+ let difference_seconds = (Date.now() / 1000) - value;
+
+ if (difference_seconds >= (60 * 15)) {
+ class_name = "offline-1";
+ }
+
+ if (difference_seconds >= (60 * 17)) {
+ class_name = "offline-2";
+ }
+
+ if (difference_seconds >= (60 * 20)) {
+ class_name = "offline-3";
+ }
+
+ if (difference_seconds >= (60 * 25)) {
+ class_name = "offline-4";
+ }
+
+ if (difference_seconds >= (60 * 28)) {
+ class_name = "offline-5";
+ }
+
+ return class_name;
+}
+
+export default Ember.Helper.helper(workerColorizer);
diff --git a/www/app/helpers/worker-earnperday.js b/www/app/helpers/worker-earnperday.js
new file mode 100644
index 0000000..e6b9a2d
--- /dev/null
+++ b/www/app/helpers/worker-earnperday.js
@@ -0,0 +1,9 @@
+import Ember from 'ember';
+import config from '../config/environment';
+
+
+export function workerEarnperday(hashrates) {
+ return 24 * 60 * 60 / config.APP.BlockTime * (hashrates[0] / hashrates[1]) * config.APP.BlockReward;
+}
+
+export default Ember.Helper.helper(workerEarnperday);
diff --git a/www/app/index.html b/www/app/index.html
new file mode 100644
index 0000000..abf68ea
--- /dev/null
+++ b/www/app/index.html
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+ Official etc Pool | https://t.me/poolnode
+
+
+
+ {{content-for "head"}}
+
+
+
+
+
+ {{content-for "head-footer"}}
+
+
+ {{content-for "body"}}
+
+
+ {{content-for "body-footer"}}
+
+
diff --git a/www/app/models/.gitkeep b/www/app/models/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/www/app/models/block.js b/www/app/models/block.js
new file mode 100644
index 0000000..7c7ee21
--- /dev/null
+++ b/www/app/models/block.js
@@ -0,0 +1,30 @@
+import Ember from 'ember';
+
+var Block = Ember.Object.extend({
+ variance: Ember.computed('difficulty', 'shares', function() {
+ var percent = this.get('shares') / this.get('difficulty');
+ if (!percent) {
+ return 0;
+ }
+ return percent;
+ }),
+
+ isLucky: Ember.computed('variance', function() {
+ return this.get('variance') <= 1.0;
+ }),
+
+ isOk: Ember.computed('orphan', 'uncle', function() {
+ return !this.get('orphan');
+ }),
+
+ formatReward: Ember.computed('reward', function() {
+ if (!this.get('orphan')) {
+ var value = parseInt(this.get('reward')) * 0.000000000000000001;
+ return value.toFixed(6);
+ } else {
+ return 0;
+ }
+ })
+});
+
+export default Block;
diff --git a/www/app/models/payment.js b/www/app/models/payment.js
new file mode 100644
index 0000000..f07c9a3
--- /dev/null
+++ b/www/app/models/payment.js
@@ -0,0 +1,10 @@
+import Ember from 'ember';
+
+var Payment = Ember.Object.extend({
+ formatAmount: Ember.computed('amount', function() {
+ var value = parseInt(this.get('amount')) * 0.000000001;
+ return value.toFixed(8);
+ })
+});
+
+export default Payment;
diff --git a/www/app/resolver.js b/www/app/resolver.js
new file mode 100644
index 0000000..2fb563d
--- /dev/null
+++ b/www/app/resolver.js
@@ -0,0 +1,3 @@
+import Resolver from 'ember-resolver';
+
+export default Resolver;
diff --git a/www/app/router.js b/www/app/router.js
new file mode 100644
index 0000000..afd25bb
--- /dev/null
+++ b/www/app/router.js
@@ -0,0 +1,28 @@
+import Ember from 'ember';
+import config from './config/environment';
+
+var Router = Ember.Router.extend({
+ location: config.locationType
+});
+
+Router.map(function() {
+ this.route('account', { path: '/account/:login' }, function() {
+ this.route('payouts');
+ this.route('rewards');
+ });
+ this.route('not-found');
+
+ this.route('blocks', function() {
+ this.route('immature');
+ this.route('pending');
+ });
+
+ this.route('help');
+ //this.route('help-ar');
+ this.route('payments');
+ this.route('miners');
+ this.route('about');
+ //this.route('about-ar');
+});
+
+export default Router;
diff --git a/www/app/routes/.gitkeep b/www/app/routes/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/www/app/routes/account.js b/www/app/routes/account.js
new file mode 100644
index 0000000..b5744f6
--- /dev/null
+++ b/www/app/routes/account.js
@@ -0,0 +1,25 @@
+import Ember from 'ember';
+import config from '../config/environment';
+
+export default Ember.Route.extend({
+ model: function(params) {
+ var url = config.APP.ApiUrl + 'api/accounts/' + params.login;
+ return Ember.$.getJSON(url).then(function(data) {
+ data.login = params.login;
+ return Ember.Object.create(data);
+ });
+ },
+
+ setupController: function(controller, model) {
+ this._super(controller, model);
+ Ember.run.later(this, this.refresh, 5000);
+ },
+
+ actions: {
+ error(error) {
+ if (error.status === 404) {
+ return this.transitionTo('not-found');
+ }
+ }
+ }
+});
diff --git a/www/app/routes/application.js b/www/app/routes/application.js
new file mode 100644
index 0000000..61bf196
--- /dev/null
+++ b/www/app/routes/application.js
@@ -0,0 +1,88 @@
+import Ember from 'ember';
+import config from '../config/environment';
+
+function selectLocale(selected) {
+ // FIXME
+ let supported = ['en', 'ar-sa', 'en-us'];
+ const language = navigator.languages[0] || navigator.language || navigator.userLanguage;
+
+ let locale = selected;
+
+ if (locale == null) {
+ // default locale
+ locale = language;
+ if (supported.indexOf(locale) < 0) {
+ locale = locale.replace(/\-[a-zA-Z]*$/, '');
+ }
+ }
+ if (supported.indexOf(locale) >= 0) {
+ if (locale === 'en') {
+ locale = 'en-us';
+ }
+ } else {
+ locale = 'en-us';
+ }
+ return locale;
+}
+
+
+export default Ember.Route.extend({
+ intl: Ember.inject.service(),
+ selectedLanguage: null,
+ languages: null,
+ beforeModel() {
+ let locale = this.get('selectedLanguage');
+ if (!locale) {
+ // read cookie
+ locale = Ember.$.cookie('lang');
+ // pick a locale
+ locale = selectLocale(locale);
+
+ this.get('intl').setLocale(locale);
+ Ember.$.cookie('lang', locale);
+ console.log('INFO: locale selected - ' + locale);
+ this.set('selectedLanguage', locale);
+ }
+ let intl = this.get('intl');
+ this.set('languages', [
+ { name: intl.t('lang.arabic'), value: 'ar-sa'},
+ { name: intl.t('lang.english'), value: 'en-us'}
+ ]);
+ },
+
+ actions: {
+ selectLanguage: function(lang) {
+ let selected = lang;
+ if (typeof selected === 'undefined') {
+ return true;
+ }
+ let locale = selectLocale(selected);
+ this.get('intl').setLocale(locale);
+ this.set('selectedLanguage', locale);
+ Ember.$.cookie('lang', locale);
+ let languages = this.get('languages');
+ for (var i = 0; i < languages.length; i++) {
+ if (languages[i].value == locale) {
+ Ember.$('#selectedLanguage').html(languages[i].name + '');
+ break;
+ }
+ }
+
+
+ return true;
+ }
+ },
+
+ model: function() {
+ var url = config.APP.ApiUrl + 'api/stats';
+ return Ember.$.getJSON(url).then(function(data) {
+ return Ember.Object.create(data);
+ });
+ },
+
+ setupController: function(controller, model) {
+ this._super(controller, model);
+ Ember.run.later(this, this.refresh, 5000);
+ model.languages = this.get('languages');
+ }
+});
diff --git a/www/app/routes/blocks.js b/www/app/routes/blocks.js
new file mode 100644
index 0000000..975d698
--- /dev/null
+++ b/www/app/routes/blocks.js
@@ -0,0 +1,32 @@
+import Ember from 'ember';
+import Block from "../models/block";
+import config from '../config/environment';
+
+export default Ember.Route.extend({
+ model: function() {
+ var url = config.APP.ApiUrl + 'api/blocks';
+ return Ember.$.getJSON(url).then(function(data) {
+ if (data.candidates) {
+ data.candidates = data.candidates.map(function(b) {
+ return Block.create(b);
+ });
+ }
+ if (data.immature) {
+ data.immature = data.immature.map(function(b) {
+ return Block.create(b);
+ });
+ }
+ if (data.matured) {
+ data.matured = data.matured.map(function(b) {
+ return Block.create(b);
+ });
+ }
+ return data;
+ });
+ },
+
+ setupController: function(controller, model) {
+ this._super(controller, model);
+ Ember.run.later(this, this.refresh, 5000);
+ }
+});
diff --git a/www/app/routes/index.js b/www/app/routes/index.js
new file mode 100644
index 0000000..cbee966
--- /dev/null
+++ b/www/app/routes/index.js
@@ -0,0 +1,11 @@
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+ actions: {
+ lookup(login) {
+ if (!Ember.isEmpty(login)) {
+ return this.transitionTo('account', login);
+ }
+ }
+ }
+});
diff --git a/www/app/routes/miners.js b/www/app/routes/miners.js
new file mode 100644
index 0000000..303c416
--- /dev/null
+++ b/www/app/routes/miners.js
@@ -0,0 +1,34 @@
+import Ember from 'ember';
+import config from '../config/environment';
+
+export default Ember.Route.extend({
+ model: function() {
+ var url = config.APP.ApiUrl + 'api/miners';
+ return Ember.$.getJSON(url).then(function(data) {
+ if (data.miners) {
+ // Convert map to array
+ data.miners = Object.keys(data.miners).map((value) => {
+ let m = data.miners[value];
+ m.login = value;
+ return m;
+ });
+ // Sort miners by hashrate
+ data.miners = data.miners.sort((a, b) => {
+ if (a.hr < b.hr) {
+ return 1;
+ }
+ if (a.hr > b.hr) {
+ return -1;
+ }
+ return 0;
+ });
+ }
+ return data;
+ });
+ },
+
+ setupController: function(controller, model) {
+ this._super(controller, model);
+ Ember.run.later(this, this.refresh, 5000);
+ }
+});
diff --git a/www/app/routes/payments.js b/www/app/routes/payments.js
new file mode 100644
index 0000000..aa52f04
--- /dev/null
+++ b/www/app/routes/payments.js
@@ -0,0 +1,22 @@
+import Ember from 'ember';
+import Payment from "../models/payment";
+import config from '../config/environment';
+
+export default Ember.Route.extend({
+ model: function() {
+ var url = config.APP.ApiUrl + 'api/payments';
+ return Ember.$.getJSON(url).then(function(data) {
+ if (data.payments) {
+ data.payments = data.payments.map(function(p) {
+ return Payment.create(p);
+ });
+ }
+ return data;
+ });
+ },
+
+ setupController: function(controller, model) {
+ this._super(controller, model);
+ Ember.run.later(this, this.refresh, 5000);
+ }
+});
diff --git a/www/app/styles/app.css b/www/app/styles/app.css
new file mode 100644
index 0000000..9b11c42
--- /dev/null
+++ b/www/app/styles/app.css
@@ -0,0 +1,364 @@
+/* Sticky footer styles
+-------------------------------------------------- */
+html {
+ position: relative;
+ min-height: 100%;
+}
+body {
+ background: url('/bg.png');
+}
+
+caption, th {
+ text-align: inherit;
+}
+
+
+/* Custom page CSS
+-------------------------------------------------- */
+/* Not required for template or sticky footer method. */
+
+body {
+ padding-top: 20px;
+ padding-bottom: 0px;
+ color: #ddd;
+}
+
+body > .container {
+ padding: 0px 15px 0;
+}
+.container .text-muted {
+ margin: 20px 0;
+}
+
+/* doesn't work -------- */
+#coin_calculator iframe form{
+ border:none !important;
+ border-radius: 0px !important;
+ background-color: transparent !important;
+}
+
+/* Menu CSS Stuff */
+
+.navbar-fixed-bottom .navbar-collapse, .navbar-fixed-top .navbar-collapse {
+ max-height: unset;
+}
+
+.navbar-collapse.in{
+ overflow: visible;
+}
+
+
+
+h1 {
+ font-size: 36px;
+ line-height: 42px;
+}
+
+h2 {
+ font-size: 24px;
+ line-height: 32px;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ color: #fff;
+ margin: 0px 0px 15px 0px;
+ font-weight: 400;
+ font-family: 'Poppins', sans-serif;
+}
+
+.modal-title{
+ color:#333;
+}
+
+.space-medium {
+ padding-top: 40px;
+ padding-bottom: 40px;
+}
+
+.jumbotron {
+ margin: 0;
+ padding: 40px 0 15px 0;
+ margin-bottom: 15px;
+}
+
+
+code {
+ font-size: 80%;
+}
+
+.navbar-default {
+ background-color: #060820;
+ border-color: transparent;
+}
+.navbar-default .navbar-brand {
+ color: #dadada;
+}
+.navbar-default .navbar-brand:hover, .navbar-default .navbar-brand:focus {
+ color: #ffffff;
+}
+.navbar-default .navbar-text {
+ color: #dadada;
+}
+.navbar-default .navbar-nav > li > a {
+ color: #dadada;
+}
+.navbar-default .navbar-nav > li > a:hover, .navbar-default .navbar-nav > li > a:focus {
+ color: #ffffff;
+}
+.navbar-default .navbar-nav > .active > a, .navbar-default .navbar-nav > .active > a:hover, .navbar-default .navbar-nav > .active > a:focus {
+ color: #ffffff;
+ background-color: #0a6c9d;
+}
+.navbar-default .navbar-nav > .open > a, .navbar-default .navbar-nav > .open > a:hover, .navbar-default .navbar-nav > .open > a:focus {
+ color: #ffffff;
+ background-color: #69102b;
+}
+.navbar-default .navbar-toggle {
+ border-color: #0a6c9d;
+}
+.navbar-default .navbar-toggle:hover, .navbar-default .navbar-toggle:focus {
+ background-color: #0a6c9d;
+}
+.navbar-default .navbar-toggle .icon-bar {
+ background-color: #dadada;
+}
+.navbar-default .navbar-collapse,
+.navbar-default .navbar-form {
+ border-color: #00ff58;
+}
+.navbar-default .navbar-link {
+ color: #dadada;
+}
+.navbar-default .navbar-link:hover {
+ color: #ffffff;
+}
+
+@media (max-width: 767px) {
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a {
+ color: #dadada;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {
+ color: #ffffff;
+ }
+ .navbar-default .navbar-nav .open .dropdown-menu > .active > a, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {
+ color: #ffffff;
+ background-color: #0a6c9d;
+ }
+}
+
+span.logo-1 {
+ font-weight: 700;
+ color: #53ad2d;
+}
+
+span.logo-2 {
+ font-weight: 500;
+ font-size:smaller;
+ color: #FFF;
+}
+
+span.logo-3 {
+ color: #FFF;
+ font-weight: 100;
+}
+
+.navbar-collapse {
+ font-size: 14px;
+ font-weight: 200;
+ background-color: rgb(0, 12, 34);
+}
+
+.note {
+ margin: 0 0 20px 0;
+ padding: 15px 30px 15px 15px;
+ border-left: 5px solid #eee;
+ border-radius: 15px;
+}
+
+.note-info {
+ background-color: #E8F6FC;
+ border-color: #57b5e3;
+ color: #333333;
+}
+
+.note-danger {
+ background-color: #ff9999;
+ border-color: #ff0000;
+}
+
+h4.note {
+ margin-top: 0;
+ font-weight: 300 !important;
+}
+
+.hash {
+ font-family: 'Inconsolata', monospace;
+}
+
+/* Stats */
+
+.stats-box {
+ padding: 24px 30px;
+ background: #0e102f;
+ border-radius: 15px;
+ margin-bottom: 6px;
+}
+
+
+
+
+.stats-box > h3 > i {
+ width: 21px;
+}
+
+.stats-box > div > .fa {
+ width: 25px;
+}
+.stats-box > div > span:first-of-type{
+ font-weight: bold;
+}
+
+.bg-dark {
+ background-image: linear-gradient(to bottom, #0b0c22 0%, #0e122f 70%);
+}
+
+.worker-class.warning.offline-1 > td{
+ background-color: #ffd1d1;
+}
+
+.worker-class.warning.offline-2 > td{
+ background-color: #f99393;
+}
+
+.worker-class.warning.offline-3 > td{
+ background-color: #ff5959;
+ color:#ffffff;
+}
+
+.worker-class.warning.offline-4 > td{
+ background-color: #ff3e3e;
+ color:#ffffff;
+}
+
+.worker-class.warning.offline-5 > td{
+ background-color: #ff1e1e;
+ color:#ffffff;
+}
+
+
+.jumbotron{
+ background: transparent;
+ color:#bbbbbb;
+}
+
+.jumbotron small{
+ color:#919191;
+ margin-left:30px;
+}
+
+.alert{
+ border-radius:20px;
+}
+.nav-tabs{
+ margin-bottom:20px;
+}
+.nav-tabs>li>a {
+ border-radius: 15px 15px 0 0;
+}
+
+a {
+ color: #5ea2ff;
+}
+
+.table-striped>tbody>tr:nth-of-type(odd) {
+ background-color: rgba(0,0,0,0.5);
+}
+.table {
+ margin-bottom: 0px;
+}
+.table-responsive {
+ color: #444;
+ background-color: rgba(255,255,255,0.9);
+ padding: 5px 5px 0px;
+ border-radius: 20px 20px 0px 0px;
+ margin-bottom:20px;
+}
+.table-responsive a {
+ color: #004bb1;
+}
+
+.table-striped>tbody>tr:nth-of-type(odd) {
+ background-color: rgba(0,0,0,.2);
+}
+
+.highcharts-wrapper{
+ margin-bottom:20px;
+}
+
+
+.command_lines{
+ margin: 15px 0;
+}
+
+.command_lines ul{
+ margin-bottom:0px;
+}
+
+.command_lines h5{
+ color: #333;
+}
+.command_lines .tab-content {
+ background-color: #E8F6FC;
+ border-left: 5px solid #eee;
+ border-color: #57b5e3;
+ color: #ddd;
+ border-radius: 0 0 15px 15px;
+}
+
+
+
+.command_lines .nav-tabs {
+ border-bottom: 0;
+}
+
+.command_lines .nav-tabs>li.active>a,
+.command_lines .nav-tabs>li.active>a:focus,
+.command_lines .nav-tabs>li.active>a:hover {
+ border-left: 5px solid #eee;
+ border-color: #57b5e3;
+ border-bottom: 0px;
+}
+
+
+.command_lines .panel-group .panel+.panel {
+ margin-top: 0px;
+}
+
+.command_lines .panel-group .panel {
+ margin-bottom: 0;
+ border-radius: 0;
+}
+
+.command_lines .panel-default>.panel-heading {
+ color: #dddddd;
+ background-color: #0e102f;
+ border: 0px;
+}
+
+.command_lines .panel-default {
+ border: 0px;
+}
+
+.join_telegram{
+ border-radius: 5px;
+ font-size: x-large;
+}
+
+
+.footer { background: #020207;}
+.footer-section { text-align: center; }
+.footer-title { margin-bottom: 40px; }
+.footer > .container {
+ padding-right: 15px;
+ padding-left: 15px;
+}
diff --git a/www/app/templates/about.hbs b/www/app/templates/about.hbs
new file mode 100644
index 0000000..f6f5979
--- /dev/null
+++ b/www/app/templates/about.hbs
@@ -0,0 +1,20 @@
+
+
+
About
+
+
Terms of Service
+
By using the pool you accept all possible risks related to experimental software usage.
+ Pool owner can't compensate any irreversible losses, but will do his best to prevent worst case.
+
+
Details
+
+
+
Written in Go it's a rocket highly concurrent and low RAM consuming piece of code
This is your address for payouts
+ Example:0x8b92c50e1c39466f900a578edb20a49356c4fe24.
+
+
your-worker-1 your-worker-2
+
+ ID of your PC/mining-rig to distinguish it from your other rigs. If you have just one rig, feel free to omit this param.
+ This param must be short alphanumeric string with optional dashes and underscores.
+ Example:worker-1
+