Browse Source

storage-node-v2: Remove the old storage-node.

Shamil Gadelshin 3 years ago
parent
commit
93cdda68aa
82 changed files with 0 additions and 7654 deletions
  1. 0 35
      storage-node/.eslintrc.js
  2. 0 31
      storage-node/.gitignore
  3. 0 1
      storage-node/.prettierignore
  4. 0 675
      storage-node/LICENSE.md
  5. 0 90
      storage-node/README.md
  6. 0 54
      storage-node/docs/json-signing.md
  7. 0 55
      storage-node/package.json
  8. 0 4
      storage-node/packages/cli/.eslintignore
  9. 0 40
      storage-node/packages/cli/README.md
  10. 0 14
      storage-node/packages/cli/bin/cli.js
  11. 0 56
      storage-node/packages/cli/package.json
  12. 0 126
      storage-node/packages/cli/src/cli.ts
  13. 0 93
      storage-node/packages/cli/src/commands/base.ts
  14. 0 265
      storage-node/packages/cli/src/commands/dev.ts
  15. 0 70
      storage-node/packages/cli/src/commands/download.ts
  16. 0 48
      storage-node/packages/cli/src/commands/head.ts
  17. 0 202
      storage-node/packages/cli/src/commands/upload.ts
  18. 0 1
      storage-node/packages/cli/src/test/index.ts
  19. 0 9
      storage-node/packages/cli/tsconfig.json
  20. 0 1
      storage-node/packages/colossus/.eslintrc.js
  21. 0 81
      storage-node/packages/colossus/README.md
  22. 0 33
      storage-node/packages/colossus/api-base.yml
  23. 0 340
      storage-node/packages/colossus/bin/cli.js
  24. 0 78
      storage-node/packages/colossus/lib/app.js
  25. 0 43
      storage-node/packages/colossus/lib/middleware/file_uploads.js
  26. 0 77
      storage-node/packages/colossus/lib/middleware/ipfs_proxy.js
  27. 0 61
      storage-node/packages/colossus/lib/middleware/validate_responses.js
  28. 0 120
      storage-node/packages/colossus/lib/sync.js
  29. 0 73
      storage-node/packages/colossus/package.json
  30. 0 385
      storage-node/packages/colossus/paths/asset/v0/{id}.js
  31. 0 1
      storage-node/packages/colossus/test/index.js
  32. 0 3
      storage-node/packages/helios/.gitignore
  33. 0 9
      storage-node/packages/helios/README.md
  34. 0 128
      storage-node/packages/helios/bin/cli.js
  35. 0 21
      storage-node/packages/helios/package.json
  36. 0 1
      storage-node/packages/helios/test/index.js
  37. 0 1
      storage-node/packages/runtime-api/.eslintrc.js
  38. 0 3
      storage-node/packages/runtime-api/.gitignore
  39. 0 6
      storage-node/packages/runtime-api/README.md
  40. 0 210
      storage-node/packages/runtime-api/assets.js
  41. 0 79
      storage-node/packages/runtime-api/balances.js
  42. 0 246
      storage-node/packages/runtime-api/identities.js
  43. 0 379
      storage-node/packages/runtime-api/index.js
  44. 0 58
      storage-node/packages/runtime-api/package.json
  45. 0 33
      storage-node/packages/runtime-api/system.js
  46. 0 48
      storage-node/packages/runtime-api/test/assets.js
  47. 0 44
      storage-node/packages/runtime-api/test/balances.js
  48. 0 6
      storage-node/packages/runtime-api/test/data/edwards.json
  49. 0 6
      storage-node/packages/runtime-api/test/data/edwards_unlocked.json
  50. 0 6
      storage-node/packages/runtime-api/test/data/schnorr.json
  51. 0 98
      storage-node/packages/runtime-api/test/identities.js
  52. 0 28
      storage-node/packages/runtime-api/test/index.js
  53. 0 303
      storage-node/packages/runtime-api/workers.js
  54. 0 1
      storage-node/packages/storage/.eslintrc.js
  55. 0 20
      storage-node/packages/storage/README.md
  56. 0 143
      storage-node/packages/storage/filter.js
  57. 0 25
      storage-node/packages/storage/index.js
  58. 0 55
      storage-node/packages/storage/package.json
  59. 0 437
      storage-node/packages/storage/storage.js
  60. 0 227
      storage-node/packages/storage/test/storage.js
  61. 0 1
      storage-node/packages/util/.eslintrc.js
  62. 0 11
      storage-node/packages/util/README.md
  63. 0 22
      storage-node/packages/util/externalPromise.js
  64. 0 65
      storage-node/packages/util/fs/resolve.js
  65. 0 139
      storage-node/packages/util/fs/walk.js
  66. 0 117
      storage-node/packages/util/lru.js
  67. 0 52
      storage-node/packages/util/package.json
  68. 0 160
      storage-node/packages/util/pagination.js
  69. 0 429
      storage-node/packages/util/ranges.js
  70. 0 16
      storage-node/packages/util/sleep.js
  71. 0 9
      storage-node/packages/util/stripEndingSlash.js
  72. 0 0
      storage-node/packages/util/test/data/bar
  73. 0 0
      storage-node/packages/util/test/data/foo/baz
  74. 0 1
      storage-node/packages/util/test/data/quux
  75. 0 68
      storage-node/packages/util/test/fs/resolve.js
  76. 0 67
      storage-node/packages/util/test/fs/walk.js
  77. 0 152
      storage-node/packages/util/test/lru.js
  78. 0 113
      storage-node/packages/util/test/pagination.js
  79. 0 392
      storage-node/packages/util/test/ranges.js
  80. 0 13
      storage-node/packages/util/test/stripEndingSlash.js
  81. 0 17
      storage-node/storage-node_new.svg
  82. 0 24
      storage-node/tsconfig.json

+ 0 - 35
storage-node/.eslintrc.js

@@ -1,35 +0,0 @@
-module.exports = {
-  env: {
-    node: true,
-    es6: true,
-    mocha: true,
-  },
-  rules: {
-    'import/no-commonjs': 'off', // remove after converting to TS.
-    // Disabling Rules because of monorepo environment:
-    // https://github.com/benmosher/eslint-plugin-import/issues/1174
-    'import/no-extraneous-dependencies': 'off',
-    'import/no-nodejs-modules': 'off', // nodejs project
-    'no-console': 'off', // we use console in the project
-    '@typescript-eslint/no-var-requires': 'warn',
-    '@typescript-eslint/naming-convention': 'off',
-  },
-  overrides: [
-    {
-      files: [
-        '**/test/ranges.js',
-        '**/test/lru.js',
-        '**/test/fs/walk.js',
-        '**/test/storage.js',
-        '**/test/identities.js',
-        '**/test/balances.js',
-        '**/test/assets.js',
-      ],
-      rules: {
-        // Disabling Rules because of used chai lib:
-        // https://stackoverflow.com/questions/45079454/no-unused-expressions-in-mocha-chai-unit-test-using-standardjs
-        'no-unused-expressions': 'off',
-      },
-    },
-  ],
-}

+ 0 - 31
storage-node/.gitignore

@@ -1,31 +0,0 @@
-build/
-coverage/
-dist/
-tmp/
-.DS_Store
-
-.env.local
-.env.development.local
-.env.test.local
-.env.production.local
-
-.npmrc
-package-lock.json
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-
-# IDEs
-.idea
-.vscode
-.*.sw*
-
-# Node modules
-node_modules/
-
-# Ignore nvm config file
-.nvmrc
-
-yarn.lock
-
-*.tsbuildinfo

+ 0 - 1
storage-node/.prettierignore

@@ -1 +0,0 @@
-packages/cli/dist

+ 0 - 675
storage-node/LICENSE.md

@@ -1,675 +0,0 @@
-### GNU GENERAL PUBLIC LICENSE
-
-Version 3, 29 June 2007
-
-Copyright (C) 2007 Free Software Foundation, Inc.
-<https://fsf.org/>
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-### Preamble
-
-The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom
-to share and change all versions of a program--to make sure it remains
-free software for all its users. We, the Free Software Foundation, use
-the GNU General Public License for most of our software; it applies
-also to any other work released this way by its authors. You can apply
-it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you
-have certain responsibilities if you distribute copies of the
-software, or if you modify it: responsibilities to respect the freedom
-of others.
-
-For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
-Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the
-manufacturer can do so. This is fundamentally incompatible with the
-aim of protecting users' freedom to change the software. The
-systematic pattern of such abuse occurs in the area of products for
-individuals to use, which is precisely where it is most unacceptable.
-Therefore, we have designed this version of the GPL to prohibit the
-practice for those products. If such problems arise substantially in
-other domains, we stand ready to extend this provision to those
-domains in future versions of the GPL, as needed to protect the
-freedom of users.
-
-Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish
-to avoid the special danger that patents applied to a free program
-could make it effectively proprietary. To prevent this, the GPL
-assures that patents cannot be used to render the program non-free.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-### TERMS AND CONDITIONS
-
-#### 0. Definitions.
-
-"This License" refers to version 3 of the GNU General Public License.
-
-"Copyright" also means copyright-like laws that apply to other kinds
-of works, such as semiconductor masks.
-
-"The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
-To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of
-an exact copy. The resulting work is called a "modified version" of
-the earlier work or a work "based on" the earlier work.
-
-A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user
-through a computer network, with no transfer of a copy, is not
-conveying.
-
-An interactive user interface displays "Appropriate Legal Notices" to
-the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-#### 1. Source Code.
-
-The "source code" for a work means the preferred form of the work for
-making modifications to it. "Object code" means any non-source form of
-a work.
-
-A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-The Corresponding Source need not include anything that users can
-regenerate automatically from other parts of the Corresponding Source.
-
-The Corresponding Source for a work in source code form is that same
-work.
-
-#### 2. Basic Permissions.
-
-All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-You may make, run and propagate covered works that you do not convey,
-without conditions so long as your license otherwise remains in force.
-You may convey covered works to others for the sole purpose of having
-them make modifications exclusively for you, or provide you with
-facilities for running those works, provided that you comply with the
-terms of this License in conveying all material for which you do not
-control copyright. Those thus making or running the covered works for
-you must do so exclusively on your behalf, under your direction and
-control, on terms that prohibit them from making any copies of your
-copyrighted material outside their relationship with you.
-
-Conveying under any other circumstances is permitted solely under the
-conditions stated below. Sublicensing is not allowed; section 10 makes
-it unnecessary.
-
-#### 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such
-circumvention is effected by exercising rights under this License with
-respect to the covered work, and you disclaim any intention to limit
-operation or modification of the work as a means of enforcing, against
-the work's users, your or third parties' legal rights to forbid
-circumvention of technological measures.
-
-#### 4. Conveying Verbatim Copies.
-
-You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-#### 5. Conveying Modified Source Versions.
-
-You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these
-conditions:
-
-- a) The work must carry prominent notices stating that you modified
-  it, and giving a relevant date.
-- b) The work must carry prominent notices stating that it is
-  released under this License and any conditions added under
-  section 7. This requirement modifies the requirement in section 4
-  to "keep intact all notices".
-- c) You must license the entire work, as a whole, under this
-  License to anyone who comes into possession of a copy. This
-  License will therefore apply, along with any applicable section 7
-  additional terms, to the whole of the work, and all its parts,
-  regardless of how they are packaged. This License gives no
-  permission to license the work in any other way, but it does not
-  invalidate such permission if you have separately received it.
-- d) If the work has interactive user interfaces, each must display
-  Appropriate Legal Notices; however, if the Program has interactive
-  interfaces that do not display Appropriate Legal Notices, your
-  work need not make them do so.
-
-A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-#### 6. Conveying Non-Source Forms.
-
-You may convey a covered work in object code form under the terms of
-sections 4 and 5, provided that you also convey the machine-readable
-Corresponding Source under the terms of this License, in one of these
-ways:
-
-- a) Convey the object code in, or embodied in, a physical product
-  (including a physical distribution medium), accompanied by the
-  Corresponding Source fixed on a durable physical medium
-  customarily used for software interchange.
-- b) Convey the object code in, or embodied in, a physical product
-  (including a physical distribution medium), accompanied by a
-  written offer, valid for at least three years and valid for as
-  long as you offer spare parts or customer support for that product
-  model, to give anyone who possesses the object code either (1) a
-  copy of the Corresponding Source for all the software in the
-  product that is covered by this License, on a durable physical
-  medium customarily used for software interchange, for a price no
-  more than your reasonable cost of physically performing this
-  conveying of source, or (2) access to copy the Corresponding
-  Source from a network server at no charge.
-- c) Convey individual copies of the object code with a copy of the
-  written offer to provide the Corresponding Source. This
-  alternative is allowed only occasionally and noncommercially, and
-  only if you received the object code with such an offer, in accord
-  with subsection 6b.
-- d) Convey the object code by offering access from a designated
-  place (gratis or for a charge), and offer equivalent access to the
-  Corresponding Source in the same way through the same place at no
-  further charge. You need not require recipients to copy the
-  Corresponding Source along with the object code. If the place to
-  copy the object code is a network server, the Corresponding Source
-  may be on a different server (operated by you or a third party)
-  that supports equivalent copying facilities, provided you maintain
-  clear directions next to the object code saying where to find the
-  Corresponding Source. Regardless of what server hosts the
-  Corresponding Source, you remain obligated to ensure that it is
-  available for as long as needed to satisfy these requirements.
-- e) Convey the object code using peer-to-peer transmission,
-  provided you inform other peers where the object code and
-  Corresponding Source of the work are being offered to the general
-  public at no charge under subsection 6d.
-
-A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal,
-family, or household purposes, or (2) anything designed or sold for
-incorporation into a dwelling. In determining whether a product is a
-consumer product, doubtful cases shall be resolved in favor of
-coverage. For a particular product received by a particular user,
-"normally used" refers to a typical or common use of that class of
-product, regardless of the status of the particular user or of the way
-in which the particular user actually uses, or expects or is expected
-to use, the product. A product is a consumer product regardless of
-whether the product has substantial commercial, industrial or
-non-consumer uses, unless such uses represent the only significant
-mode of use of the product.
-
-"Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to
-install and execute modified versions of a covered work in that User
-Product from a modified version of its Corresponding Source. The
-information must suffice to ensure that the continued functioning of
-the modified object code is in no case prevented or interfered with
-solely because modification has been made.
-
-If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or
-updates for a work that has been modified or installed by the
-recipient, or for the User Product in which it has been modified or
-installed. Access to a network may be denied when the modification
-itself materially and adversely affects the operation of the network
-or violates the rules and protocols for communication across the
-network.
-
-Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-#### 7. Additional Terms.
-
-"Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders
-of that material) supplement the terms of this License with terms:
-
-- a) Disclaiming warranty or limiting liability differently from the
-  terms of sections 15 and 16 of this License; or
-- b) Requiring preservation of specified reasonable legal notices or
-  author attributions in that material or in the Appropriate Legal
-  Notices displayed by works containing it; or
-- c) Prohibiting misrepresentation of the origin of that material,
-  or requiring that modified versions of such material be marked in
-  reasonable ways as different from the original version; or
-- d) Limiting the use for publicity purposes of names of licensors
-  or authors of the material; or
-- e) Declining to grant rights under trademark law for use of some
-  trade names, trademarks, or service marks; or
-- f) Requiring indemnification of licensors and authors of that
-  material by anyone who conveys the material (or modified versions
-  of it) with contractual assumptions of liability to the recipient,
-  for any liability that these contractual assumptions directly
-  impose on those licensors and authors.
-
-All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions; the
-above requirements apply either way.
-
-#### 8. Termination.
-
-You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-However, if you cease all violation of this License, then your license
-from a particular copyright holder is reinstated (a) provisionally,
-unless and until the copyright holder explicitly and finally
-terminates your license, and (b) permanently, if the copyright holder
-fails to notify you of the violation by some reasonable means prior to
-60 days after the cessation.
-
-Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-#### 9. Acceptance Not Required for Having Copies.
-
-You are not required to accept this License in order to receive or run
-a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-#### 10. Automatic Licensing of Downstream Recipients.
-
-Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
-An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-#### 11. Patents.
-
-A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
-A contributor's "essential patent claims" are all patent claims owned
-or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-A patent license is "discriminatory" if it does not include within the
-scope of its coverage, prohibits the exercise of, or is conditioned on
-the non-exercise of one or more of the rights that are specifically
-granted under this License. You may not convey a covered work if you
-are a party to an arrangement with a third party that is in the
-business of distributing software, under which you make payment to the
-third party based on the extent of your activity of conveying the
-work, and under which the third party grants, to any of the parties
-who would receive the covered work from you, a discriminatory patent
-license (a) in connection with copies of the covered work conveyed by
-you (or copies made from those copies), or (b) primarily for and in
-connection with specific products or compilations that contain the
-covered work, unless you entered into that arrangement, or that patent
-license was granted, prior to 28 March 2007.
-
-Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-#### 12. No Surrender of Others' Freedom.
-
-If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under
-this License and any other pertinent obligations, then as a
-consequence you may not convey it at all. For example, if you agree to
-terms that obligate you to collect a royalty for further conveying
-from those to whom you convey the Program, the only way you could
-satisfy both those terms and this License would be to refrain entirely
-from conveying the Program.
-
-#### 13. Use with the GNU Affero General Public License.
-
-Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-#### 14. Revised Versions of this License.
-
-The Free Software Foundation may publish revised and/or new versions
-of the GNU General Public License from time to time. Such new versions
-will be similar in spirit to the present version, but may differ in
-detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies that a certain numbered version of the GNU General Public
-License "or any later version" applies to it, you have the option of
-following the terms and conditions either of that numbered version or
-of any later version published by the Free Software Foundation. If the
-Program does not specify a version number of the GNU General Public
-License, you may choose any version ever published by the Free
-Software Foundation.
-
-If the Program specifies that a proxy can decide which future versions
-of the GNU General Public License can be used, that proxy's public
-statement of acceptance of a version permanently authorizes you to
-choose that version for the Program.
-
-Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-#### 15. Disclaimer of Warranty.
-
-THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT
-WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
-PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
-DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
-CORRECTION.
-
-#### 16. Limitation of Liability.
-
-IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR
-CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
-ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT
-NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR
-LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM
-TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER
-PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-#### 17. Interpretation of Sections 15 and 16.
-
-If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-END OF TERMS AND CONDITIONS
-
-### How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these
-terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively state
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-        <one line to give the program's name and a brief idea of what it does.>
-        Copyright (C) <year>  <name of author>
-
-        This program is free software: you can redistribute it and/or modify
-        it under the terms of the GNU General Public License as published by
-        the Free Software Foundation, either version 3 of the License, or
-        (at your option) any later version.
-
-        This program is distributed in the hope that it will be useful,
-        but WITHOUT ANY WARRANTY; without even the implied warranty of
-        MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-        GNU General Public License for more details.
-
-        You should have received a copy of the GNU General Public License
-        along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper
-mail.
-
-If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-        <program>  Copyright (C) <year>  <name of author>
-        This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-        This is free software, and you are welcome to redistribute it
-        under certain conditions; type `show c' for details.
-
-The hypothetical commands \`show w' and \`show c' should show the
-appropriate parts of the General Public License. Of course, your
-program's commands might be different; for a GUI interface, you would
-use an "about box".
-
-You should also get your employer (if you work as a programmer) or
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. For more information on this, and how to apply and follow
-the GNU GPL, see <https://www.gnu.org/licenses/>.
-
-The GNU General Public License does not permit incorporating your
-program into proprietary programs. If your program is a subroutine
-library, you may consider it more useful to permit linking proprietary
-applications with the library. If this is what you want to do, use the
-GNU Lesser General Public License instead of this License. But first,
-please read <https://www.gnu.org/licenses/why-not-lgpl.html>.

+ 0 - 90
storage-node/README.md

@@ -1,90 +0,0 @@
-![Storage Nodes for Joystream](./storage-node_new.svg)
-
-This repository contains several Node packages, located under the `packages/`
-subdirectory. See each individual package for details:
-
-- [colossus](./packages/colossus/README.md) - the main colossus app.
-- [storage-node-backend](./packages/storage/README.md) - abstraction over the storage backend.
-- [storage-runtime-api](./packages/runtime-api/README.md) - convenience wrappers for the runtime API.
-- [storage-utils](./packages/util/README.md) - general utility functions.
-- [discovery](./packages/discovery/README.md) - service discovery using IPNS.
-- [storage-cli](./packages/cli/README.md) - cli for uploading and downloading content from the network
-- [helios](./packages/helios/README.md) - cli tool for getting status of storage network
-
-## Installation
-
-_Requirements_
-
-This project uses [yarn](https://yarnpkg.com/) as Node package manager. It also
-uses some node packages with native components, so make sure to install your
-system's basic build tools.
-
-On Debian-based systems:
-
-```bash
-$ apt install build-essential
-```
-
-On Mac OS (using [homebrew](https://brew.sh/)):
-
-```bash
-$ brew install libtool automake autoconf
-```
-
-_Building_
-
-```bash
-$ yarn install
-$ yarn build
-```
-
-The command will install dependencies, and make a `colossus` executable available:
-
-```bash
-$ yarn colossus --help
-```
-
-_Testing_
-
-Run an ipfs node and a joystream-node development chain (in separate terminals)
-
-```sh
-ipfs daemon
-```
-
-```sh
-joystream-node --dev
-```
-
-```sh
-$ yarn workspace storage-node test
-```
-
-Running a development environment, after starting the ipfs node and development chain
-
-```sh
-yarn storage-cli dev-init
-```
-
-This will configure the running chain with alice as the storage lead and with a known role key for
-the storage provider.
-
-Run colossus in development mode:
-
-```sh
-yarn colossus --dev
-```
-
-Start pioneer ui:
-
-```sh
-yarn workspace pioneer start
-
-```
-
-Browse pioneer on http://localhost:3000/
-You should find Alice account is the storage working group lead and is a storage provider.
-
-## Detailed Setup and Configuration Guide
-
-For details on how to setup a storage node on the Joystream network, follow this [step by step guide](https://github.com/Joystream/helpdesk/tree/master/roles/storage-providers).

+ 0 - 54
storage-node/docs/json-signing.md

@@ -1,54 +0,0 @@
-# JSON Data Signing
-
-As serializing and deserializing JSON is not deterministic, but may depend
-on the order in which keys are added or even the system's collation method,
-signing JSON cryptographically is fraught with issues. We circumvent them
-by wrapping any JSON to be signed in another JSON object:
-
-- `version` contains the version of the wrapper JSON, currently always `1`.
-- `serialized` contains the serialized version of the data, currently this
-  will be the base64 encoded, serialized JSON payload.
-- `signature` contains the base64 encoded signature of the `serialized` field
-  value prior to its base64 encoding.
-- `payload` [optional] contains the deserialized JSON object corresponding
-  to the `serialized` payload.
-
-For signing and verification, we'll use polkadot's _ed25519_ or _sr25519_ keys
-directly.
-
-## Signing Process
-
-Given some structured data:
-
-1. Serialize the structured data into a JSON string.
-1. Create a signature over the serialized JSON string.
-1. Create a new structured data with the appropriate `version` field.
-1. Add a base64 encoded version of the serialized JSON string as the `serialized` field.
-1. Add a base64 encoded version of the signature as the `signature` field.
-1. Optionally add the original structured data as the `payload` field.
-
-## Verification Process
-
-1. Verify data contains a `version`, `serialized` and `signature` field.
-1. Currently, verify that the `version` field's value is `1`.
-1. Try to base64 decode the `serialized` and `signature` fields.
-1. Verify that the decoded `signature` is valid for the decoded `serialized`
-   field.
-1. JSON deserialize the decoded `serialized` field.
-1. Add the resulting structured data as the `payload` field, and return the
-   modified object.
-
-# Alternatives
-
-There are alternative schemes available for signing JSON objects, but they
-have specific issues we'd like to avoid.
-
-- [JOSE](https://jose.readthedocs.io/en/latest/) has no support for the _ed25519_
-  or _sr25519_ keys used in polkadot apps, and
-  [appears to be fraught with security issues](https://paragonie.com/blog/2017/03/jwt-json-web-tokens-is-bad-standard-that-everyone-should-avoid).
-  Either makes its use hard to justify.
-- While [PASETO](https://paseto.io/) does use _ed25519_ keys and seems to have
-  a reasonably robuts JavaScript implementation, it requires its secret keys to
-  be 512 bits long, while polkadot provides 256 bit secret keys. The implication
-  is that we would have to manage 512 bit keys and their corresponding public
-  keys as linked to polkadot's keys, which is cumbersome at the very least.

+ 0 - 55
storage-node/package.json

@@ -1,55 +0,0 @@
-{
-  "private": true,
-  "name": "storage-node",
-  "version": "1.0.0",
-  "engines": {
-    "node": ">=14.0.0",
-    "yarn": "^1.22.0"
-  },
-  "homepage": "https://github.com/Joystream/joystream/",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "scripts": {
-    "test": "wsrun --serial test",
-    "lint": "eslint --ext .js,.ts --ignore-path .gitignore .",
-    "build": "yarn workspace @joystream/storage-cli run build",
-    "checks": "prettier . --check && yarn lint",
-    "format": "prettier . --write"
-  },
-  "devDependencies": {
-    "@types/chai": "^4.2.11",
-    "@types/mocha": "^7.0.2",
-    "eslint": "^7.6.0",
-    "eslint-config-esnext": "^4.1.0",
-    "eslint-config-prettier": "^6.11.0",
-    "eslint-plugin-babel": "^5.3.1",
-    "eslint-plugin-prettier": "^3.1.4",
-    "prettier": "^2.0.5",
-    "typescript": "^3.9.6",
-    "wsrun": "^3.6.5"
-  },
-  "volta": {
-    "extends": "../package.json"
-  }
-}

+ 0 - 4
storage-node/packages/cli/.eslintignore

@@ -1,4 +0,0 @@
-**/build/*
-**/dist/*
-**/coverage/*
-**/node_modules/*

+ 0 - 40
storage-node/packages/cli/README.md

@@ -1,40 +0,0 @@
-# A CLI for the Joystream Runtime & Colossus
-
-- CLI access for some functionality from other packages in the storage-node workspace
-- Colossus/storage node functionality:
-  - File uploads
-  - File downloads
-- Development
-  - Setup development environment
-
-Running the storage cli tool:
-
-```sh
-$ yarn storage-cli --help
-```
-
-```sh
-
-  Joystream tool for uploading and downloading files to the network
-
-  Usage:
-    $ storage-cli command [arguments..] [key_file] [passphrase]
-
-  Some commands require a key file as the last option holding the identity for
-  interacting with the runtime API.
-
-  Commands:
-    upload            Upload a file to a Colossus storage node. Requires a
-                      storage node URL, and a local file name to upload. As
-                      an optional third parameter, you can provide a Data
-                      Object Type ID - this defaults to "1" if not provided.
-    download          Retrieve a file. Requires a storage node URL and a content
-                      ID, as well as an output filename.
-    head              Send a HEAD request for a file, and print headers.
-                      Requires a storage node URL and a content ID.
-
-  Dev Commands:       Commands to run on a development chain.
-    dev-init          Setup chain with Alice as lead and storage provider.
-    dev-check         Check the chain is setup with Alice as lead and storage provider.
-
-```

+ 0 - 14
storage-node/packages/cli/bin/cli.js

@@ -1,14 +0,0 @@
-#!/usr/bin/env node
-
-const chalk = require('chalk')
-const { main } = require('../dist/cli')
-
-main()
-  .then(() => {
-    process.exit(0)
-  })
-  .catch((err) => {
-    console.error(chalk.red(`Error: ${JSON.stringify(err)}`))
-    console.error(chalk.red(`Stack: ${err.stack}`))
-    process.exit(-1)
-  })

+ 0 - 56
storage-node/packages/cli/package.json

@@ -1,56 +0,0 @@
-{
-  "name": "@joystream/storage-cli",
-  "private": true,
-  "version": "0.1.0",
-  "description": "Joystream tool for uploading and downloading files to the network",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'dist/test/**/*.js'",
-    "lint": "eslint --ext .js,.ts . && tsc --noEmit --pretty",
-    "build": "(rm tsconfig.tsbuildinfo || :) && tsc --build"
-  },
-  "bin": {
-    "storage-cli": "./bin/cli.js"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "mocha": "^5.2.0",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "@joystream/storage-runtime-api": "^0.1.0",
-    "@joystream/storage-utils": "^0.1.0",
-    "@joystream/types": "^0.16.1",
-    "axios": "^0.21.1",
-    "chalk": "^2.4.2",
-    "lodash": "^4.17.11",
-    "meow": "^5.0.0",
-    "ipfs-only-hash": "^1.0.2"
-  }
-}

+ 0 - 126
storage-node/packages/cli/src/cli.ts

@@ -1,126 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-import { RuntimeApi } from '@joystream/storage-runtime-api'
-import meow from 'meow'
-import _ from 'lodash'
-
-// Commands
-import * as dev from './commands/dev'
-import { HeadCommand } from './commands/head'
-import { DownloadCommand } from './commands/download'
-import { UploadCommand } from './commands/upload'
-
-// Parse CLI
-const FLAG_DEFINITIONS = {
-  // TODO: current version of meow doesn't support subcommands. We should consider a migration to yargs or oclif.
-}
-
-const usage = `
-  Usage:
-    $ storage-cli command [arguments..]
-
-  Commands:
-    upload            Upload a file to the Joystream Network. Requires a
-                      source file path to upload, data object ID, member ID and account key file with
-                      pass phrase to unlock it.
-    download          Retrieve a file. Requires a content and an output filename.
-    head              Send a HEAD request for a file, and print headers.
-                      Requires a storage node URL and a content ID.
-
-  Dev Commands:       Commands to run on a development chain.
-    dev-init          Setup chain with Alice as lead and storage provider.
-    dev-check         Check the chain is setup with Alice as lead and storage provider.
-    sudo-create-sp    Initialize the chain with a lead storage provider.
-    
-  Type 'storage-cli command' for the exact command usage examples.
-  `
-
-const cli = meow(usage, { flags: FLAG_DEFINITIONS })
-
-// Shows a message, CLI general usage and exits.
-function showUsageAndExit(message: string) {
-  console.log(message)
-  console.log(usage)
-  process.exit(1)
-}
-
-const commands = {
-  // add Alice well known account as storage provider
-  'dev-init': async (api) => {
-    return dev.init(api)
-  },
-  // Checks that the setup done by dev-init command was successful
-  'dev-check': async (api) => {
-    return dev.check(api)
-  },
-  'sudo-create-sp': async (api) => {
-    return dev.makeMemberInitialLeadAndStorageProvider(api)
-  },
-  // Uploads the file to the system. Registers new data object in the runtime, obtains proper colossus instance URL.
-  upload: async (
-    api: any,
-    filePath: string,
-    dataObjectTypeId: string,
-    memberId: string,
-    keyFile: string,
-    passPhrase: string
-  ) => {
-    const uploadCmd = new UploadCommand(api, filePath, dataObjectTypeId, memberId, keyFile, passPhrase)
-
-    await uploadCmd.run()
-  },
-  download: async (api: any, contentId: string, filePath: string) => {
-    const downloadCmd = new DownloadCommand(api, contentId, filePath)
-
-    await downloadCmd.run()
-  },
-  // Shows asset information derived from response headers.
-  // Accepts colossus URL and content ID.
-  head: async (api: any, storageNodeUrl: string, contentId: string) => {
-    const headCmd = new HeadCommand(api, storageNodeUrl, contentId)
-
-    await headCmd.run()
-  },
-}
-
-// Entry point.
-export async function main() {
-  const api = await RuntimeApi.create({ retries: 3 })
-
-  // Simple CLI commands
-  const command = cli.input[0]
-  if (!command) {
-    showUsageAndExit('Enter the command, please.')
-  }
-
-  if (Object.prototype.hasOwnProperty.call(commands, command)) {
-    // Command recognized
-    const args = _.clone(cli.input).slice(1)
-    try {
-      await commands[command](api, ...args)
-    } catch (err) {
-      console.error('Command Failed:', err)
-      process.exit(-1)
-    }
-  } else {
-    showUsageAndExit(`Command "${command}" not recognized.`)
-  }
-}

+ 0 - 93
storage-node/packages/cli/src/commands/base.ts

@@ -1,93 +0,0 @@
-import chalk from 'chalk'
-import removeEndingForwardSlash from '@joystream/storage-utils/stripEndingSlash'
-import { ContentId } from '@joystream/types/storage'
-import Debug from 'debug'
-const debug = Debug('joystream:storage-cli:base')
-
-// Commands base abstract class. Contains reusable methods.
-export abstract class BaseCommand {
-  protected readonly api: any
-
-  constructor(api: any) {
-    this.api = api
-  }
-
-  // Creates the Colossus asset URL and logs it.
-  protected createAndLogAssetUrl(url: string, contentId: string | ContentId): string {
-    let normalizedContentId: string
-
-    if (typeof contentId === 'string') {
-      normalizedContentId = contentId
-    } else {
-      normalizedContentId = contentId.encode()
-    }
-
-    const normalizedUrl = removeEndingForwardSlash(url)
-    const assetUrl = `${normalizedUrl}/asset/v0/${normalizedContentId}`
-    console.log(chalk.yellow('Generated asset URL:', assetUrl))
-
-    return assetUrl
-  }
-
-  // Abstract method to provide parameter validation.
-  protected abstract validateParameters(): boolean
-
-  // Abstract method to show command usage.
-  protected abstract showUsage()
-
-  // Checks command parameters and shows the usage if necessary.
-  protected assertParameters(): boolean {
-    // Create, validate and show parameters.
-    if (!this.validateParameters()) {
-      console.log(chalk.yellow(`Invalid parameters for the command:`))
-      this.showUsage()
-
-      return false
-    }
-
-    return true
-  }
-
-  // Shows the error message and ends the process with error code.
-  protected fail(message: string): void {
-    console.log(chalk.red(message))
-    process.exit(1)
-  }
-
-  protected maxContentSize(): number {
-    // Maximum content length for the assets (files)
-    return 2000 * 1024 * 1024
-  }
-
-  // Requests the runtime and obtains the storage node endpoint URL.
-  protected async getStorageProviderEndpoint(storageProviderId: string): Promise<string> {
-    try {
-      const endpoint = await this.api.workers.getWorkerStorageValue(storageProviderId)
-
-      debug(`Resolved endpoint: ${endpoint}`)
-
-      return endpoint
-    } catch (err) {
-      this.fail(`Could not get provider endpoint: ${err}`)
-    }
-  }
-
-  protected async getAnyProviderEndpoint(): Promise<string> {
-    try {
-      const providers = await this.api.workers.getAllProviders()
-
-      debug(`Available Providers: ${providers}`)
-      // select first provider
-      do {
-        const id = providers.ids.pop()
-        const endpoint = await this.getStorageProviderEndpoint(id)
-        if (endpoint) {
-          return endpoint
-        }
-      } while (providers.ids.length)
-      throw new Error('No Providers registered endpoint')
-    } catch (err) {
-      this.fail(`Could not get provider endpoint: ${err}`)
-    }
-  }
-}

+ 0 - 265
storage-node/packages/cli/src/commands/dev.ts

@@ -1,265 +0,0 @@
-'use strict'
-
-import dbug from 'debug'
-import { KeyringPair } from '@polkadot/keyring/types'
-import { RuntimeApi } from '@joystream/storage-runtime-api'
-import { GenericJoyStreamRoleSchema as HRTJson } from '@joystream/types/hiring/schemas/role.schema.typings'
-
-const debug = dbug('joystream:storage-cli:dev')
-
-// Derivation path appended to well known development seed used on
-// development chains
-const ALICE_URI = '//Alice'
-const ROLE_ACCOUNT_URI = '//Colossus'
-
-function aliceKeyPair(api: RuntimeApi): KeyringPair {
-  return api.identities.keyring.addFromUri(ALICE_URI, null, 'sr25519')
-}
-
-function roleKeyPair(api: RuntimeApi): KeyringPair {
-  return api.identities.keyring.addFromUri(ROLE_ACCOUNT_URI, null, 'sr25519')
-}
-
-function getKeyFromAddressOrSuri(api: RuntimeApi, addressOrSuri: string) {
-  // Get key from keyring if it is an address
-  try {
-    return api.identities.keyring.getPair(addressOrSuri)
-  } catch (err) {
-    debug('supplied argument was not an address')
-  }
-
-  // Assume a SURI, add to keyring and return keypair
-  return api.identities.keyring.addFromUri(addressOrSuri, null, 'sr25519')
-}
-
-function developmentPort(): number {
-  return 3001
-}
-
-// Checks the chain state for the storage provider setup we expect
-// to have if the initialization was successfully run prior.
-// Returns the provider id if found, throws otherwise.
-const check = async (api): Promise<any> => {
-  const roleAccountId = roleKeyPair(api).address
-  const providerId = await api.workers.findProviderIdByRoleAccount(roleAccountId)
-
-  if (providerId === null) {
-    throw new Error('Dev storage provider not found on chain.')
-  }
-
-  console.log(`
-  Chain is setup with Dev storage provider:
-    providerId = ${providerId}
-    roleAccountId = ${roleAccountId}
-    roleKey = ${ROLE_ACCOUNT_URI}
-  `)
-
-  return providerId
-}
-
-// Setup Alice account on a developement chain as
-// a member, storage lead, and a storage provider using a deterministic
-// development key for the role account
-const init = async (api: RuntimeApi): Promise<any> => {
-  debug('Ensuring we are on Development chain')
-  if (!(await api.system.isDevelopmentChain())) {
-    console.log('This command should only be run on a Development chain')
-    return
-  }
-
-  // check if the initialization was previously run, skip if so.
-  try {
-    await check(api)
-    return
-  } catch (err) {
-    // We didn't find a storage provider with expected role account
-  }
-
-  // Load alice keypair into keyring
-  const alice = aliceKeyPair(api).address
-  const roleAccount = roleKeyPair(api).address
-
-  debug(`Ensuring Alice ${alice} is sudo.`)
-
-  // make sure alice is sudo - indirectly checking this is a dev chain
-  const sudo = await api.identities.getSudoAccount()
-
-  if (!sudo.eq(alice)) {
-    throw new Error('Setup requires Alice to be sudo. Are you sure you are running a devchain?')
-  }
-
-  console.log('Running setup.')
-
-  debug('Ensuring Alice is as member.')
-  let aliceMemberId = await api.identities.firstMemberIdOf(alice)
-
-  if (aliceMemberId === undefined) {
-    debug('Registering Alice as member.')
-    aliceMemberId = await api.identities.registerMember(alice, {
-      handle: 'alice',
-    })
-  } else {
-    debug('Alice is already a member.')
-  }
-
-  debug('Transferring tokens to storage role account.')
-  // Give role account some tokens to work with
-  api.balances.transfer(alice, roleAccount, 100000)
-
-  // Make alice the storage lead
-  debug('Making Alice the storage Lead.')
-  const leadOpeningId = await api.workers.devAddStorageLeadOpening()
-  const leadApplicationId = await api.workers.devApplyOnOpening(leadOpeningId, aliceMemberId, alice, alice)
-  api.workers.devBeginLeadOpeningReview(leadOpeningId)
-  await api.workers.devFillLeadOpening(leadOpeningId, leadApplicationId)
-
-  const leadAccount = await api.workers.getLeadRoleAccount()
-  if (!leadAccount.eq(alice)) {
-    throw new Error('Setting alice as lead failed.')
-  }
-
-  // Create a storage openinging, apply, start review, and fill opening
-  debug(`Making ${ROLE_ACCOUNT_URI} account a storage provider.`)
-
-  const openingId = await api.workers.devAddStorageOpening()
-  debug(`Created new storage opening: ${openingId}`)
-
-  const applicationId = await api.workers.devApplyOnOpening(openingId, aliceMemberId, alice, roleAccount)
-  debug(`Applied with application id: ${applicationId}`)
-
-  api.workers.devBeginStorageOpeningReview(openingId)
-
-  debug(`Filling storage opening.`)
-  const providerId = await api.workers.devFillStorageOpening(openingId, applicationId)
-
-  debug(`Assigned storage provider id: ${providerId}`)
-
-  return check(api)
-}
-
-// Using sudo to create initial storage lead and worker with given keys taken from env variables.
-// Used to quickly setup a storage provider on a new chain before a council is ready.
-const makeMemberInitialLeadAndStorageProvider = async (api: RuntimeApi): Promise<any> => {
-  if (await api.workers.getLeadRoleAccount()) {
-    throw new Error('The Storage Lead is already set!')
-  }
-
-  if (!process.env.SUDO_URI) {
-    throw new Error('required SUDO_URI env variable was not set')
-  }
-
-  if (!process.env.MEMBER_ID) {
-    throw new Error('required MEMBER_ID env variable was not set')
-  }
-
-  if (!process.env.MEMBER_CONTROLLER_URI) {
-    throw new Error('required MEMBER_CONTROLLER_URI env variable was not set')
-  }
-
-  if (!process.env.STORAGE_WORKER_ADDRESS) {
-    throw new Error('required STORAGE_WORKER_ADDRESS env variable was not set')
-  }
-
-  const sudoKey = getKeyFromAddressOrSuri(api, process.env.SUDO_URI)
-  const memberId = parseInt(process.env.MEMBER_ID)
-  const memberController = getKeyFromAddressOrSuri(api, process.env.MEMBER_CONTROLLER_URI).address
-  const leadAccount = memberController
-  const workerAccount = process.env.STORAGE_WORKER_ADDRESS
-
-  const sudo = await api.identities.getSudoAccount()
-
-  // Ensure correct sudo key was provided
-  if (!sudo.eq(sudoKey.address)) {
-    throw new Error('Provided SUDO_URI is not the chain sudo')
-  }
-
-  // Ensure MEMBER_ID and MEMBER_CONTROLLER_URI are valid
-  const memberIds = await api.identities.memberIdsOfController(memberController)
-  if (memberIds.find((id) => id.eq(memberId)) === undefined) {
-    throw new Error(
-      'MEMBER_ID and MEMBER_CONTROLLER_URI do not correspond to a registered member and their controller account'
-    )
-  }
-
-  // Ensure STORAGE_WORKER_ADDRESS is a valid Address
-  api.identities.keyring.decodeAddress(workerAccount)
-
-  debug(`Creating Leader with role key: ${leadAccount}`)
-  debug('Creating Lead Opening')
-  const leadOpeningId = await api.workers.devAddStorageLeadOpening(JSON.stringify(getLeadOpeningInfo()))
-  debug('Applying')
-  const leadApplicationId = await api.workers.devApplyOnOpening(leadOpeningId, memberId, memberController, leadAccount)
-  debug('Starting Review')
-  api.workers.devBeginLeadOpeningReview(leadOpeningId)
-  debug('Filling Opening')
-  await api.workers.devFillLeadOpening(leadOpeningId, leadApplicationId)
-
-  const setLeadAccount = await api.workers.getLeadRoleAccount()
-  if (!setLeadAccount.eq(leadAccount)) {
-    throw new Error('Setting Lead failed!')
-  }
-
-  // Create a storage openinging, apply, start review, and fill opening
-  debug(`Making ${workerAccount} account a storage provider.`)
-
-  const openingId = await api.workers.devAddStorageOpening(JSON.stringify(getWorkerOpeningInfo()))
-  debug(`Created new storage opening: ${openingId}`)
-
-  const applicationId = await api.workers.devApplyOnOpening(openingId, memberId, memberController, workerAccount)
-  debug(`Applied with application id: ${applicationId}`)
-
-  api.workers.devBeginStorageOpeningReview(openingId)
-
-  debug(`Filling storage opening.`)
-  const providerId = await api.workers.devFillStorageOpening(openingId, applicationId)
-
-  debug(`Assigned storage provider id: ${providerId}`)
-}
-
-function getLeadOpeningInfo(): HRTJson {
-  return {
-    'version': 1,
-    'headline': 'Initial Storage Lead',
-    'job': {
-      'title': 'Bootstrap Lead',
-      'description': 'Starting opportunity to bootstrap the network',
-    },
-    'application': {
-      'sections': [],
-    },
-    'reward': 'None',
-    'creator': {
-      'membership': {
-        'handle': 'mokhtar',
-      },
-    },
-    'process': {
-      'details': ['automated'],
-    },
-  }
-}
-
-function getWorkerOpeningInfo(): HRTJson {
-  return {
-    'version': 1,
-    'headline': 'Initial Storage Worker',
-    'job': {
-      'title': 'Bootstrap Worker',
-      'description': 'Starting opportunity to bootstrap the network',
-    },
-    'application': {
-      'sections': [],
-    },
-    'reward': 'None',
-    'creator': {
-      'membership': {
-        'handle': 'mokhtar',
-      },
-    },
-    'process': {
-      'details': ['automated'],
-    },
-  }
-}
-
-export { init, check, aliceKeyPair, roleKeyPair, developmentPort, makeMemberInitialLeadAndStorageProvider }

+ 0 - 70
storage-node/packages/cli/src/commands/download.ts

@@ -1,70 +0,0 @@
-import axios from 'axios'
-import chalk from 'chalk'
-import fs from 'fs'
-import { BaseCommand } from './base'
-
-// Download command class. Validates input parameters and execute the logic for asset downloading.
-export class DownloadCommand extends BaseCommand {
-  private readonly contentId: string
-  private readonly outputFilePath: string
-
-  constructor(api: any, contentId: string, outputFilePath: string) {
-    super(api)
-
-    this.contentId = contentId
-    this.outputFilePath = outputFilePath
-  }
-
-  // Provides parameter validation. Overrides the abstract method from the base class.
-  protected validateParameters(): boolean {
-    return this.contentId && this.contentId !== '' && this.outputFilePath && this.outputFilePath !== ''
-  }
-
-  // Shows command usage. Overrides the abstract method from the base class.
-  protected showUsage() {
-    console.log(
-      chalk.yellow(`
-        Usage:   storage-cli download contentID filePath
-        Example: storage-cli download 5Ec3PL3wbutqvDykhNxXJFEWSdw9rS4LBsGUXH9gSusFzc5X ./movie.mp4
-      `)
-    )
-  }
-
-  // Command executor.
-  async run(): Promise<void> {
-    // Checks for input parameters, shows usage if they are invalid.
-    if (!this.assertParameters()) return
-
-    const storageNodeUrl = await this.getAnyProviderEndpoint()
-
-    const assetUrl = this.createAndLogAssetUrl(storageNodeUrl, this.contentId)
-    console.log(chalk.yellow('File path:', this.outputFilePath))
-
-    // Create file write stream and set error handler.
-    const writer = fs.createWriteStream(this.outputFilePath).on('error', (err) => {
-      this.fail(`File write failed: ${err}`)
-    })
-
-    // Request file download.
-    try {
-      const response = await axios({
-        url: assetUrl,
-        method: 'GET',
-        responseType: 'stream',
-        // max length of response
-        maxContentLength: this.maxContentSize(),
-      })
-
-      response.data.pipe(writer)
-
-      return new Promise((resolve) => {
-        writer.on('finish', () => {
-          console.log('File downloaded.')
-          resolve()
-        })
-      })
-    } catch (err) {
-      this.fail(`Colossus request failed: ${err.message}`)
-    }
-  }
-}

+ 0 - 48
storage-node/packages/cli/src/commands/head.ts

@@ -1,48 +0,0 @@
-import axios from 'axios'
-import chalk from 'chalk'
-import { BaseCommand } from './base'
-
-// Head command class. Validates input parameters and obtains the asset headers.
-export class HeadCommand extends BaseCommand {
-  private readonly storageNodeUrl: string
-  private readonly contentId: string
-
-  constructor(api: any, storageNodeUrl: string, contentId: string) {
-    super(api)
-
-    this.storageNodeUrl = storageNodeUrl
-    this.contentId = contentId
-  }
-
-  // Provides parameter validation. Overrides the abstract method from the base class.
-  protected validateParameters(): boolean {
-    return this.storageNodeUrl && this.storageNodeUrl !== '' && this.contentId && this.contentId !== ''
-  }
-
-  // Shows command usage. Overrides the abstract method from the base class.
-  protected showUsage() {
-    console.log(
-      chalk.yellow(`
-        Usage:   storage-cli head colossusURL contentID
-        Example: storage-cli head http://localhost:3001 0x7a6ba7e9157e5fba190dc146fe1baa8180e29728a5c76779ed99655500cff795
-      `)
-    )
-  }
-
-  // Command executor.
-  async run() {
-    // Checks for input parameters, shows usage if they are invalid.
-    if (!this.assertParameters()) return
-
-    const assetUrl = this.createAndLogAssetUrl(this.storageNodeUrl, this.contentId)
-
-    try {
-      const response = await axios.head(assetUrl)
-
-      console.log(chalk.green(`Content type: ${response.headers['content-type']}`))
-      console.log(chalk.green(`Content length: ${response.headers['content-length']}`))
-    } catch (err) {
-      this.fail(`Colossus request failed: ${err.message}`)
-    }
-  }
-}

+ 0 - 202
storage-node/packages/cli/src/commands/upload.ts

@@ -1,202 +0,0 @@
-import axios, { AxiosRequestConfig } from 'axios'
-import fs from 'fs'
-import ipfsHash from 'ipfs-only-hash'
-import { ContentId, DataObject } from '@joystream/types/storage'
-import BN from 'bn.js'
-import { BaseCommand } from './base'
-import Debug from 'debug'
-import chalk from 'chalk'
-import { aliceKeyPair } from './dev'
-const debug = Debug('joystream:storage-cli:upload')
-
-// Defines the necessary parameters for the AddContent runtime tx.
-interface AddContentParams {
-  accountId: string
-  ipfsCid: string
-  contentId: ContentId
-  fileSize: BN
-  dataObjectTypeId: number
-  memberId: number
-}
-
-// Upload command class. Validates input parameters and uploads the asset to the storage node and runtime.
-export class UploadCommand extends BaseCommand {
-  private readonly mediaSourceFilePath: string
-  private readonly dataObjectTypeId: string
-  private readonly keyFile: string
-  private readonly passPhrase: string
-  private readonly memberId: string
-
-  constructor(
-    api: any,
-    mediaSourceFilePath: string,
-    dataObjectTypeId: string,
-    memberId: string,
-    keyFile: string,
-    passPhrase: string
-  ) {
-    super(api)
-
-    this.mediaSourceFilePath = mediaSourceFilePath
-    this.dataObjectTypeId = dataObjectTypeId
-    this.memberId = memberId
-    this.keyFile = keyFile
-    this.passPhrase = passPhrase
-  }
-
-  // Provides parameter validation. Overrides the abstract method from the base class.
-  protected validateParameters(): boolean {
-    return (
-      this.mediaSourceFilePath &&
-      this.mediaSourceFilePath !== '' &&
-      this.dataObjectTypeId &&
-      this.dataObjectTypeId !== '' &&
-      this.memberId &&
-      this.memberId !== ''
-    )
-  }
-
-  // Reads the file from the filesystem and computes IPFS hash.
-  private async computeIpfsHash(): Promise<string> {
-    const file = fs.createReadStream(this.mediaSourceFilePath).on('error', (err) => {
-      this.fail(`File read failed: ${err}`)
-    })
-
-    return await ipfsHash.of(file)
-  }
-
-  // Read the file size from the file system.
-  private getFileSize(): number {
-    const stats = fs.statSync(this.mediaSourceFilePath)
-    return stats.size
-  }
-
-  // Creates parameters for the AddContent runtime tx.
-  private async getAddContentParams(): Promise<AddContentParams> {
-    const identity = await this.loadIdentity()
-    const accountId = identity.address
-
-    const dataObjectTypeId: number = parseInt(this.dataObjectTypeId)
-    if (isNaN(dataObjectTypeId)) {
-      this.fail(`Cannot parse dataObjectTypeId: ${this.dataObjectTypeId}`)
-    }
-
-    const memberId: number = parseInt(this.memberId)
-    if (isNaN(dataObjectTypeId)) {
-      this.fail(`Cannot parse memberIdString: ${this.memberId}`)
-    }
-
-    return {
-      accountId,
-      ipfsCid: await this.computeIpfsHash(),
-      contentId: ContentId.generate(this.api.api.registry),
-      fileSize: new BN(this.getFileSize()),
-      dataObjectTypeId,
-      memberId,
-    }
-  }
-
-  // Creates the DataObject in the runtime.
-  private async createContent(p: AddContentParams): Promise<DataObject> {
-    try {
-      const dataObject: DataObject = await this.api.assets.createDataObject(
-        p.accountId,
-        p.memberId,
-        p.contentId,
-        p.dataObjectTypeId,
-        p.fileSize,
-        p.ipfsCid
-      )
-
-      return dataObject
-    } catch (err) {
-      if (err.dispatchError) {
-        if (err.dispatchError.isModule) {
-          const error = err.dispatchError.asModule
-          const { name, documentation } = this.api.api.registry.findMetaError(error)
-          this.fail(`Cannot create data object: ${name} ${documentation}`)
-        } else {
-          const error = err.dispatchError.toString()
-          this.fail(`Cannot create data object: ${error}`)
-        }
-      } else {
-        this.fail(`Cannot create data object: ${err}`)
-      }
-    }
-  }
-
-  // Uploads file to given asset URL.
-  private async uploadFile(assetUrl: string) {
-    // Create file read stream and set error handler.
-    const file = fs.createReadStream(this.mediaSourceFilePath).on('error', (err) => {
-      this.fail(`File read failed: ${err}`)
-    })
-
-    // Upload file from the stream.
-    try {
-      const fileSize = this.getFileSize()
-      const config: AxiosRequestConfig = {
-        headers: {
-          'Content-Type': '', // https://github.com/Joystream/storage-node-joystream/issues/16
-          'Content-Length': fileSize.toString(),
-        },
-        // max length of body in PUT request
-        maxBodyLength: this.maxContentSize(),
-      }
-      await axios.put(assetUrl, file, config)
-
-      console.log('File uploaded.')
-    } catch (err) {
-      this.fail(err.toString())
-    }
-  }
-
-  // Loads and unlocks the runtime identity using the key file and pass phrase.
-  private async loadIdentity(): Promise<any> {
-    const noKeyFileProvided = !this.keyFile || this.keyFile === ''
-    const useAlice = noKeyFileProvided && (await this.api.system.isDevelopmentChain())
-
-    if (useAlice) {
-      debug("Discovered 'development' chain.")
-      return aliceKeyPair(this.api)
-    }
-
-    try {
-      await fs.promises.access(this.keyFile)
-    } catch (error) {
-      this.fail(`Cannot read file "${this.keyFile}".`)
-    }
-
-    return this.api.identities.loadUnlock(this.keyFile, this.passPhrase)
-  }
-
-  // Shows command usage. Overrides the abstract method from the base class.
-  protected showUsage() {
-    console.log(
-      chalk.yellow(`
-        Usage:       storage-cli upload mediaSourceFilePath dataObjectTypeId memberId [keyFilePath] [passPhrase]
-        Example:     storage-cli upload ./movie.mp4 1 1 ./keyFile.json secretPhrase
-        Development: storage-cli upload ./movie.mp4 1 0
-      `)
-    )
-  }
-
-  // Command executor.
-  async run() {
-    // Checks for input parameters, shows usage if they are invalid.
-    if (!this.assertParameters()) return
-
-    const addContentParams = await this.getAddContentParams()
-    debug(`AddContent Tx params: ${JSON.stringify(addContentParams)}`)
-    debug(`Decoded CID: ${addContentParams.contentId.toString()}`)
-
-    const dataObject = await this.createContent(addContentParams)
-    debug(`Received data object: ${dataObject.toString()}`)
-
-    const colossusEndpoint = await this.getAnyProviderEndpoint()
-    debug(`Discovered storage node endpoint: ${colossusEndpoint}`)
-
-    const assetUrl = this.createAndLogAssetUrl(colossusEndpoint, addContentParams.contentId)
-    await this.uploadFile(assetUrl)
-  }
-}

+ 0 - 1
storage-node/packages/cli/src/test/index.ts

@@ -1 +0,0 @@
-// Add Tests!

+ 0 - 9
storage-node/packages/cli/tsconfig.json

@@ -1,9 +0,0 @@
-{
-  "include": ["src"],
-  "extends": "../../tsconfig.json",
-  "compilerOptions": {
-    "outDir": "dist",
-    "rootDir": "src",
-    "baseUrl": "."
-  }
-}

+ 0 - 1
storage-node/packages/colossus/.eslintrc.js

@@ -1 +0,0 @@
-../../.eslintrc.js

+ 0 - 81
storage-node/packages/colossus/README.md

@@ -1,81 +0,0 @@
-![Storage Nodes for Joystream](../../banner.svg)
-
-## Development
-
-Run a development server (an ipfs node and development chain should be running on the local machine)
-
-```bash
-$ yarn colossus --dev
-```
-
-This will expect the chain to be configured with certain development accounts.
-The setup can be done by running the dev-init command for the storage-cli:
-
-```sh
-yarn storage-cli dev-init
-```
-
-## Command-Line
-
-```sh
-$ yarn colossus --help
-```
-
-```
-  Colossus - Joystream Storage Node
-
-  Usage:
-    $ colossus [command] [arguments]
-
-  Commands:
-    server        Runs a production server instance. (discovery and storage services)
-                  This is the default command if not specified.
-    discovery     Run the discovery service only.
-
-  Arguments (required for server. Ignored if running server with --dev option):
-    --provider-id ID, -i ID     StorageProviderId assigned to you in working group.
-    --key-file FILE             JSON key export file to use as the storage provider (role account).
-    --public-url=URL, -u URL    API Public URL to announce.
-
-  Arguments (optional):
-    --dev                   Runs server with developer settings.
-    --passphrase            Optional passphrase to use to decrypt the key-file.
-    --port=PORT, -p PORT    Port number to listen on, defaults to 3000.
-    --ws-provider WS_URL    Joystream-node websocket provider, defaults to ws://localhost:9944
-```
-
-To run a storage server in production you will need to enroll on the network first to
-obtain your provider-id and role account.
-
-## API Packages
-
-Since it's not entirely clear yet how APIs will develop in future, the approach
-taken here is to package individual APIs up individually. That is, instead of
-providing an overall API version in `api-base.yml`, it should be part of each
-API package's path.
-
-For example, for a `foo` API in its version `v1`, its definitions should live
-in `./paths/foo/v1.js` and `./paths/foo/v1/*.js` respectively.
-
-_Note:_ until a reasonably stable API is reached, this project uses a `v0`
-version prefix.
-
-## Interface/implementation
-
-For reusability across API versions, it's best to keep files in the `paths`
-subfolder very thin, and instead inject implementations via the `dependencies`
-configuration value of `express-openapi`.
-
-These implementations line to the `./lib` subfolder. Adjust `app.js` as
-needed to make them available to API packages.
-
-## Streaming Notes
-
-For streaming content, it is required that stream metadata is located at the
-start of the stream. Most software writes metadata at the end of the stream,
-because it is when the stream is committed to disk that the entirety of the
-metadata is known.
-
-To move metadata to the start of the stream, a CLI tool such as
-[qtfaststart](https://github.com/danielgtaylor/qtfaststart) for MP4 files might
-be used.

+ 0 - 33
storage-node/packages/colossus/api-base.yml

@@ -1,33 +0,0 @@
-openapi: '3.0.0'
-info:
-  title: 'Colossus - Joystream Storage Node'
-  version: '1.1.0'
-paths: {} # Will be populated by express-openapi
-
-components:
-  # Re-usable parameter definitions
-  parameters: {}
-
-  # Re-usable (response) object definitions
-  schemas:
-    Error:
-      required:
-        - message
-      properties:
-        code:
-          type: integer
-          format: int32
-        message:
-          type: string
-
-    ContentDirectoryEntry: # TODO implement
-      required:
-        - name
-      properties:
-        name:
-          type: string
-
-    ContentDirectoryEntries:
-      type: array
-      items:
-        $ref: '#/components/schemas/ContentDirectoryEntry'

+ 0 - 340
storage-node/packages/colossus/bin/cli.js

@@ -1,340 +0,0 @@
-#!/usr/bin/env node
-/* es-lint disable  */
-
-'use strict'
-
-// Node requires
-const path = require('path')
-
-// npm requires
-const meow = require('meow')
-const chalk = require('chalk')
-const figlet = require('figlet')
-const _ = require('lodash')
-const { sleep } = require('@joystream/storage-utils/sleep')
-
-const debug = require('debug')('joystream:colossus')
-
-// Project root
-const PROJECT_ROOT = path.resolve(__dirname, '..')
-
-// Parse CLI
-const FLAG_DEFINITIONS = {
-  port: {
-    type: 'number',
-    alias: 'p',
-    default: 3000,
-  },
-  keyFile: {
-    type: 'string',
-    isRequired: (flags, input) => {
-      // Only required if running server command and not in dev or anonymous mode
-      if (flags.anonymous || flags.dev) {
-        return false
-      }
-      return input[0] === 'server'
-    },
-  },
-  publicUrl: {
-    type: 'string',
-    alias: 'u',
-    isRequired: (flags, input) => {
-      // Only required if running server command and not in dev or anonymous mode
-      if (flags.anonymous || flags.dev) {
-        return false
-      }
-      return input[0] === 'server'
-    },
-  },
-  passphrase: {
-    type: 'string',
-  },
-  wsProvider: {
-    type: 'string',
-    default: 'ws://localhost:9944',
-  },
-  providerId: {
-    type: 'number',
-    alias: 'i',
-    isRequired: (flags, input) => {
-      // Only required if running server command and not in dev or anonymous mode
-      if (flags.anonymous || flags.dev) {
-        return false
-      }
-      return input[0] === 'server'
-    },
-  },
-  ipfsHost: {
-    type: 'string',
-    default: 'localhost',
-  },
-  anonymous: {
-    type: 'boolean',
-    default: false,
-  },
-  maxSync: {
-    type: 'number',
-    default: 200,
-  },
-}
-
-const cli = meow(
-  `
-  Usage:
-    $ colossus [command] [arguments]
-
-  Commands:
-    server        Runs a production server instance
-
-  Arguments (required for with server command, unless --dev or --anonymous args are used):
-    --provider-id ID, -i ID     StorageProviderId assigned to you in working group.
-    --key-file FILE             JSON key export file to use as the storage provider (role account).
-    --public-url=URL, -u URL    API Public URL to announce.
-
-  Arguments (optional):
-    --dev                   Runs server with developer settings.
-    --passphrase            Optional passphrase to use to decrypt the key-file.
-    --port=PORT, -p PORT    Port number to listen on, defaults to 3000.
-    --ws-provider WS_URL    Joystream-node websocket provider, defaults to ws://localhost:9944
-    --ipfs-host   hostname  ipfs host to use, default to 'localhost'. Default port 5001 is always used
-    --anonymous             Runs server in anonymous mode. Replicates content without need to register
-                            on-chain, and can serve content. Cannot be used to upload content.
-    --maxSync               The max number of items to sync concurrently. Defaults to 30.
-  `,
-  { flags: FLAG_DEFINITIONS }
-)
-
-// All-important banner!
-function banner() {
-  console.log(chalk.blue(figlet.textSync('joystream', 'Speed')))
-}
-
-function startExpressApp(app, port) {
-  const http = require('http')
-  const server = http.createServer(app)
-
-  return new Promise((resolve, reject) => {
-    server.on('error', reject)
-    server.on('close', (...args) => {
-      console.log('Server closed, shutting down...')
-      resolve(...args)
-    })
-    server.on('listening', () => {
-      console.log('API server started.', server.address())
-    })
-    server.listen(port, '::')
-    console.log('Starting API server...')
-  })
-}
-
-// Start app
-function startAllServices({ store, api, port, ipfsHttpGatewayUrl, anonymous }) {
-  const app = require('../lib/app')(PROJECT_ROOT, store, api, ipfsHttpGatewayUrl, anonymous)
-  return startExpressApp(app, port)
-}
-
-// Get an initialized storage instance
-function getStorage(runtimeApi, { ipfsHost }) {
-  // TODO at some point, we can figure out what backend-specific connection
-  // options make sense. For now, just don't use any configuration.
-  const { Storage } = require('@joystream/storage-node-backend')
-
-  const options = {
-    resolve_content_id: async (contentId) => {
-      // Resolve accepted content from cache
-      const hash = runtimeApi.assets.resolveContentIdToIpfsHash(contentId)
-      if (hash) return hash
-
-      // Resolve via API
-      const obj = await runtimeApi.assets.getDataObject(contentId)
-      if (!obj) {
-        return
-      }
-      // if obj.liaison_judgement !== Accepted .. throw ?
-      return obj.ipfs_content_id.toString()
-    },
-    ipfsHost,
-  }
-
-  return Storage.create(options)
-}
-
-async function initApiProduction({ wsProvider, providerId, keyFile, passphrase, anonymous }) {
-  // Load key information
-  const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-  const api = await RuntimeApi.create({
-    account_file: keyFile,
-    passphrase,
-    provider_url: wsProvider,
-    storageProviderId: providerId,
-  })
-
-  if (!anonymous && !api.identities.key) {
-    throw new Error('Failed to unlock storage provider account')
-  }
-
-  await api.untilChainIsSynced()
-
-  // We allow the node to startup without correct provider id and account, but syncing and
-  // publishing of identity will be skipped.
-  if (!anonymous && !(await api.providerIsActiveWorker())) {
-    debug('storage provider role account and storageProviderId are not associated with a worker')
-  }
-
-  return api
-}
-
-async function initApiDevelopment({ wsProvider }) {
-  // Load key information
-  const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-  const api = await RuntimeApi.create({
-    provider_url: wsProvider,
-  })
-
-  const dev = require('../../cli/dist/commands/dev')
-
-  api.identities.useKeyPair(dev.roleKeyPair(api))
-
-  // Wait until dev provider is added to role
-  while (true) {
-    try {
-      api.storageProviderId = await dev.check(api)
-      break
-    } catch (err) {
-      debug(err)
-    }
-
-    await sleep(10000)
-  }
-
-  return api
-}
-
-// TODO: instead of recursion use while/async-await and use promise/setTimout based sleep
-// or cleaner code with generators?
-async function announcePublicUrl(api, publicUrl) {
-  // re-announce in future
-  const reannounce = function (timeoutMs) {
-    setTimeout(announcePublicUrl, timeoutMs, api, publicUrl)
-  }
-
-  const chainIsSyncing = await api.chainIsSyncing()
-  if (chainIsSyncing) {
-    debug('Chain is syncing. Postponing announcing public url.')
-    return reannounce(10 * 60 * 1000)
-  }
-
-  // postpone if provider not active
-  if (!(await api.providerIsActiveWorker())) {
-    debug('storage provider role account and storageProviderId are not associated with a worker')
-    return reannounce(10 * 60 * 1000)
-  }
-
-  const sufficientBalance = await api.providerHasMinimumBalance(1)
-  if (!sufficientBalance) {
-    debug('Provider role account does not have sufficient balance. Postponing announcing public url.')
-    return reannounce(10 * 60 * 1000)
-  }
-
-  debug('announcing public url')
-
-  try {
-    await api.workers.setWorkerStorageValue(publicUrl)
-
-    debug('announcing complete.')
-  } catch (err) {
-    debug(`announcing public url failed: ${err.stack}`)
-
-    // On failure retry sooner
-    debug(`announcing failed, retrying in: 2 minutes`)
-    reannounce(120 * 1000)
-  }
-}
-
-// Simple CLI commands
-let command = cli.input[0]
-if (!command) {
-  command = 'server'
-}
-
-const commands = {
-  server: async () => {
-    banner()
-    let publicUrl, port, api
-
-    if (cli.flags.dev) {
-      const dev = require('../../cli/dist/commands/dev')
-      api = await initApiDevelopment(cli.flags)
-      port = dev.developmentPort()
-      publicUrl = `http://localhost:${port}/`
-    } else {
-      api = await initApiProduction(cli.flags)
-      publicUrl = cli.flags.publicUrl
-      port = cli.flags.port
-    }
-
-    // Get initlal data objects into cache
-    while (true) {
-      try {
-        debug('Fetching data objects')
-        await api.assets.fetchDataObjects()
-        break
-      } catch (err) {
-        debug('Failed fetching data objects', err)
-        await sleep(5000)
-      }
-    }
-
-    // Regularly update data objects
-    setInterval(async () => {
-      try {
-        debug('Fetching data objects')
-        await api.assets.fetchDataObjects()
-      } catch (err) {
-        debug('Failed updating data objects from chain', err)
-      }
-    }, 60000)
-
-    // TODO: check valid url, and valid port number
-    const store = getStorage(api, cli.flags)
-
-    const ipfsHost = cli.flags.ipfsHost
-    const ipfsHttpGatewayUrl = `http://${ipfsHost}:8080/`
-
-    const { startSyncing } = require('../lib/sync')
-    startSyncing(api, { anonymous: cli.flags.anonymous, maxSync: cli.flags.maxSync }, store)
-
-    if (!cli.flags.anonymous) {
-      announcePublicUrl(api, publicUrl)
-    }
-
-    return startAllServices({ store, api, port, ipfsHttpGatewayUrl, anonymous: cli.flags.anonymous })
-  },
-}
-
-async function main() {
-  // Simple CLI commands
-  let command = cli.input[0]
-  if (!command) {
-    command = 'server'
-  }
-
-  if (Object.prototype.hasOwnProperty.call(commands, command)) {
-    // Command recognized
-    const args = _.clone(cli.input).slice(1)
-    await commands[command](...args)
-  } else {
-    throw new Error(`Command '${command}' not recognized, aborting!`)
-  }
-}
-
-main()
-  .then(() => {
-    process.exit(0)
-  })
-  .catch((err) => {
-    console.error(chalk.red(err.stack))
-    process.exit(-1)
-  })

+ 0 - 78
storage-node/packages/colossus/lib/app.js

@@ -1,78 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-// Node requires
-const fs = require('fs')
-const path = require('path')
-
-// npm requires
-const express = require('express')
-const openapi = require('express-openapi')
-const bodyParser = require('body-parser')
-const cors = require('cors')
-const yaml = require('js-yaml')
-
-// Project requires
-const validateResponses = require('./middleware/validate_responses')
-const fileUploads = require('./middleware/file_uploads')
-const pagination = require('@joystream/storage-utils/pagination')
-
-// Configure app
-function createApp(projectRoot, storage, runtime, ipfsHttpGatewayUrl, anonymous) {
-  const app = express()
-  app.use(cors())
-  app.use(bodyParser.json())
-  // FIXME app.use(bodyParser.urlencoded({ extended: true }));
-
-  // Load & extend/configure API docs
-  let api = yaml.safeLoad(fs.readFileSync(path.resolve(projectRoot, 'api-base.yml')))
-  api['x-express-openapi-additional-middleware'] = [validateResponses]
-  api['x-express-openapi-validation-strict'] = true
-
-  api = pagination.openapi(api)
-
-  openapi.initialize({
-    apiDoc: api,
-    app,
-    paths: path.resolve(projectRoot, 'paths'),
-    docsPath: '/swagger.json',
-    consumesMiddleware: {
-      'multipart/form-data': fileUploads,
-    },
-    dependencies: {
-      storage,
-      runtime,
-      ipfsHttpGatewayUrl,
-      anonymous,
-    },
-  })
-
-  // If no other handler gets triggered (errors), respond with the
-  // error serialized to JSON.
-  // Disable lint because we need such function signature.
-  // eslint-disable-next-line no-unused-vars
-  app.use(function (err, req, res, next) {
-    res.status(err.status).json(err)
-  })
-
-  return app
-}
-
-module.exports = createApp

+ 0 - 43
storage-node/packages/colossus/lib/middleware/file_uploads.js

@@ -1,43 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const multer = require('multer')
-
-// Taken from express-openapi examples
-module.exports = function (req, res, next) {
-  multer().any()(req, res, function (err) {
-    if (err) {
-      return next(err)
-    }
-    // Handle both single and multiple files
-    const filesMap = req.files.reduce(
-      (acc, f) =>
-        Object.assign(acc, {
-          [f.fieldname]: (acc[f.fieldname] || []).concat(f),
-        }),
-      {}
-    )
-    Object.keys(filesMap).forEach((fieldname) => {
-      const files = filesMap[fieldname]
-      req.body[fieldname] = files.length > 1 ? files.map(() => '') : ''
-    })
-    return next()
-  })
-}

+ 0 - 77
storage-node/packages/colossus/lib/middleware/ipfs_proxy.js

@@ -1,77 +0,0 @@
-const { createProxyMiddleware } = require('http-proxy-middleware')
-const debug = require('debug')('joystream:ipfs-proxy')
-const mime = require('mime-types')
-
-/* 
-For this proxying to work correctly, ensure IPFS HTTP Gateway is configured as a path gateway:
-This can be done manually with the following command:
-
-  $ ipfs config --json Gateway.PublicGateways '{"localhost": null }' 
-  
-The implicit default config is below which is not what we want!
-
-  $ ipfs config --json Gateway.PublicGateways '{
-    "localhost": {
-        "Paths": ["/ipfs", "/ipns"],
-        "UseSubdomains": true
-      }
-    }'
-
-https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gateway
-*/
-
-const pathFilter = function (path, req) {
-  // we get the full path here so it needs to match the path where
-  // it is used by the openapi initializer
-  return path.match('^/asset/v0') && (req.method === 'GET' || req.method === 'HEAD')
-}
-
-const createPathRewriter = () => {
-  return async (_path, req) => {
-    const hash = req.params.ipfs_content_id
-    return `/ipfs/${hash}`
-  }
-}
-
-const createProxy = (ipfsHttpGatewayUrl) => {
-  const pathRewrite = createPathRewriter()
-
-  return createProxyMiddleware(pathFilter, {
-    // Default path to local IPFS HTTP GATEWAY
-    target: ipfsHttpGatewayUrl || 'http://localhost:8080/',
-    pathRewrite,
-    onProxyRes: function (proxRes, req, res) {
-      /*
-        Make sure the reverse proxy used infront of colosss (nginx/caddy) Does not duplicate
-        these headers to prevent some browsers getting confused especially
-        with duplicate access-control-allow-origin headers!
-        'accept-ranges': 'bytes',
-        'access-control-allow-headers': 'Content-Type, Range, User-Agent, X-Requested-With',
-        'access-control-allow-methods': 'GET',
-        'access-control-allow-origin': '*',
-        'access-control-expose-headers': 'Content-Range, X-Chunked-Output, X-Stream-Output',
-      */
-
-      if (proxRes.statusCode === 301) {
-        // capture redirect when IPFS HTTP Gateway is configured with 'UseDomains':true
-        // and treat it as an error.
-        console.error('IPFS HTTP Gateway is configured for "UseSubdomains". Killing stream')
-        res.status(500).end()
-        proxRes.destroy()
-      } else {
-        // Handle downloading as attachment /asset/v0/:id?download
-        if (req.query.download) {
-          const contentId = req.params.id
-          const contentType = proxRes.headers['content-type']
-          const ext = mime.extension(contentType) || 'bin'
-          const fileName = `${contentId}.${ext}`
-          proxRes.headers['Content-Disposition'] = `attachment; filename=${fileName}`
-        }
-      }
-    },
-  })
-}
-
-module.exports = {
-  createProxy,
-}

+ 0 - 61
storage-node/packages/colossus/lib/middleware/validate_responses.js

@@ -1,61 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:middleware:validate')
-
-// Function taken directly from https://github.com/kogosoftwarellc/open-api/tree/master/packages/express-openapi
-module.exports = function (req, res, next) {
-  const strictValidation = !!req.apiDoc['x-express-openapi-validation-strict']
-  if (typeof res.validateResponse === 'function') {
-    const send = res.send
-    res.send = function expressOpenAPISend(...args) {
-      const onlyWarn = !strictValidation
-      if (res.get('x-express-openapi-validation-error-for') !== undefined) {
-        return send.apply(res, args)
-      }
-      if (res.get('x-express-openapi-validation-for') !== undefined) {
-        return send.apply(res, args)
-      }
-
-      const body = args[0]
-      let validation = res.validateResponse(res.statusCode, body)
-      let validationMessage
-      if (validation === undefined) {
-        validation = { message: undefined, errors: undefined }
-      }
-      if (validation.errors) {
-        const errorList = Array.from(validation.errors)
-          .map((_) => _.message)
-          .join(',')
-        validationMessage = `Invalid response for status code ${res.statusCode}: ${errorList}`
-        debug(validationMessage)
-        // Set to avoid a loop, and to provide the original status code
-        res.set('x-express-openapi-validation-error-for', res.statusCode.toString())
-      }
-      if ((onlyWarn || !validation.errors) && res.statusCode) {
-        res.set('x-express-openapi-validation-for', res.statusCode.toString())
-        return send.apply(res, args)
-      }
-      res.status(500)
-      return res.json({ error: validationMessage })
-    }
-  }
-  next()
-}

+ 0 - 120
storage-node/packages/colossus/lib/sync.js

@@ -1,120 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:sync')
-const _ = require('lodash')
-const { ContentId } = require('@joystream/types/storage')
-const { nextTick } = require('@joystream/storage-utils/sleep')
-
-// Time to wait between sync runs. The lower the better chance to consume all
-// available sync sessions allowed.
-const INTERVAL_BETWEEN_SYNC_RUNS_MS = 3000
-
-async function syncRun({ api, storage, contentBeingSynced, contentCompletedSync, flags }) {
-  // The number of concurrent items to attemp to fetch.
-  const MAX_CONCURRENT_SYNC_ITEMS = Math.max(1, flags.maxSync)
-
-  const contentIds = api.assets.getAcceptedIpfsHashes()
-
-  // Select ids which may need to be synced
-  const idsNotSynced = contentIds
-    .filter((id) => !contentCompletedSync.has(id))
-    .filter((id) => !contentBeingSynced.has(id))
-
-  // We are limiting how many content ids can be synced concurrently, so to ensure
-  // better distribution of content across storage nodes during a potentially long
-  // sync process we don't want all nodes to replicate items in the same order, so
-  // we simply shuffle.
-  const idsToSync = _.shuffle(idsNotSynced)
-
-  while (contentBeingSynced.size < MAX_CONCURRENT_SYNC_ITEMS && idsToSync.length) {
-    const id = idsToSync.shift()
-
-    try {
-      contentBeingSynced.set(id)
-      await storage.pin(id, (err, status) => {
-        if (err) {
-          contentBeingSynced.delete(id)
-          debug(`Error Syncing ${err}`)
-        } else if (status.synced) {
-          contentBeingSynced.delete(id)
-          contentCompletedSync.set(id)
-        }
-      })
-    } catch (err) {
-      // Most likely failed to resolve the content id
-      debug(`Failed calling synchronize ${err}`)
-      contentBeingSynced.delete(id)
-    }
-
-    // Allow callbacks to call to storage.synchronize() to be invoked during this sync run
-    // This will happen if content is found to be local and will speed overall sync process.
-    await nextTick()
-  }
-}
-
-async function syncRunner({ api, flags, storage, contentBeingSynced, contentCompletedSync }) {
-  const retry = () => {
-    setTimeout(syncRunner, INTERVAL_BETWEEN_SYNC_RUNS_MS, {
-      api,
-      flags,
-      storage,
-      contentBeingSynced,
-      contentCompletedSync,
-    })
-  }
-
-  try {
-    if (await api.chainIsSyncing()) {
-      debug('Chain is syncing. Postponing sync.')
-    } else {
-      await syncRun({
-        api,
-        storage,
-        contentBeingSynced,
-        contentCompletedSync,
-        flags,
-      })
-    }
-  } catch (err) {
-    debug(`Error during sync ${err.stack}`)
-  }
-
-  // schedule next sync run
-  retry()
-}
-
-function startSyncing(api, flags, storage) {
-  // ids of content currently being synced
-  const contentBeingSynced = new Map()
-  // ids of content that completed sync
-  const contentCompletedSync = new Map()
-
-  syncRunner({ api, flags, storage, contentBeingSynced, contentCompletedSync })
-
-  setInterval(() => {
-    debug(`objects syncing: ${contentBeingSynced.size}`)
-    debug(`objects local: ${contentCompletedSync.size}`)
-  }, 60000)
-}
-
-module.exports = {
-  startSyncing,
-}

+ 0 - 73
storage-node/packages/colossus/package.json

@@ -1,73 +0,0 @@
-{
-  "name": "@joystream/colossus",
-  "private": true,
-  "version": "0.4.0",
-  "description": "Colossus - Joystream Storage Node",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'test/**/*.js'",
-    "lint": "eslint 'paths/**/*.js' 'lib/**/*.js'",
-    "dev": "nodemon --watch api-base.yml --watch bin/ --watch paths/ --watch lib/ --verbose --ext js --exec node bin/cli.js --"
-  },
-  "bin": {
-    "colossus": "bin/cli.js"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "express": "^4.16.4",
-    "mocha": "^5.2.0",
-    "node-mocks-http": "^1.7.3",
-    "nodemon": "^1.18.10",
-    "supertest": "^3.4.2",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "@joystream/storage-node-backend": "^0.1.0",
-    "@joystream/storage-runtime-api": "^0.1.0",
-    "@joystream/storage-utils": "^0.1.0",
-    "body-parser": "^1.19.0",
-    "chalk": "^2.4.2",
-    "cors": "^2.8.5",
-    "express-openapi": "^4.6.1",
-    "figlet": "^1.2.1",
-    "http-proxy-middleware": "^1.0.5",
-    "ipfs-only-hash": "^1.0.2",
-    "js-yaml": "^3.13.1",
-    "lodash": "^4.17.11",
-    "meow": "^7.0.1",
-    "mime-types": "^2.1.27",
-    "multer": "^1.4.1",
-    "si-prefix": "^0.2.0"
-  }
-}

+ 0 - 385
storage-node/packages/colossus/paths/asset/v0/{id}.js

@@ -1,385 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:colossus:api:asset')
-const filter = require('@joystream/storage-node-backend/filter')
-const ipfsProxy = require('../../../lib/middleware/ipfs_proxy')
-const assert = require('assert')
-
-function errorHandler(response, err, code) {
-  debug(err)
-  // Some err types don't have a valid http status code such as one that come from ipfs node for example
-  const statusCode = typeof err.code === 'number' ? err.code : code
-  response.status(statusCode || 500).send({ message: err.toString() })
-  response.end()
-}
-
-// The maximum total estimated balance that will be spent submitting transactions
-// by the node following processing one upload. Here we assume 3 transactions with
-// base transaction fee = 1. In future this estimate will need to be more accurate
-// and derived from weight to fee calculation.
-const PROCESS_UPLOAD_TX_COSTS = 3
-
-module.exports = function (storage, runtime, ipfsHttpGatewayUrl, anonymous) {
-  debug('created path handler')
-
-  // Creat the IPFS HTTP Gateway proxy middleware
-  const proxy = ipfsProxy.createProxy(ipfsHttpGatewayUrl)
-
-  // Cache of known content mappings and local availability info
-  const ipfsContentIdMap = new Map()
-
-  // Make sure id is valid and was 'Accepted', only then proxy if content is local
-  const proxyAcceptedContentToIpfsGateway = async (req, res, next) => {
-    const content_id = req.params.id
-
-    if (!ipfsContentIdMap.has(content_id)) {
-      const hash = runtime.assets.resolveContentIdToIpfsHash(req.params.id)
-
-      if (!hash) {
-        return res.status(404).send({ message: 'Unknown content' })
-      }
-
-      ipfsContentIdMap.set(content_id, {
-        local: false,
-        ipfs_content_id: hash,
-      })
-    }
-
-    const { ipfs_content_id, local } = ipfsContentIdMap.get(content_id)
-
-    // Pass on the ipfs hash to the middleware
-    req.params.ipfs_content_id = ipfs_content_id
-
-    // Serve it if we know we have it, or it was recently synced successfully
-    if (local || storage.syncStatus(ipfs_content_id).synced) {
-      return proxy(req, res, next)
-    }
-
-    // Not yet processed by sync run, check if we have it locally
-    try {
-      const stat = await storage.ipfsStat(ipfs_content_id, 4000)
-
-      if (stat.local) {
-        ipfsContentIdMap.set(content_id, {
-          local: true,
-          ipfs_content_id,
-        })
-
-        // We know we have the full content locally, serve it
-        return proxy(req, res, next)
-      }
-    } catch (_err) {
-      // timeout trying to stat which most likely means we do not have it
-      // debug('Failed to stat', ipfs_content_id)
-    }
-
-    // Valid content but no certainty that the node has it locally yet.
-    // We a void serving it to prevent poor performance (ipfs node will have to retrieve it on demand
-    // which might be slow and wasteful if content is not cached locally)
-    res.status(404).send({ message: 'Content not available locally' })
-  }
-
-  const doc = {
-    // parameters for all operations in this path
-    parameters: [
-      {
-        name: 'id',
-        in: 'path',
-        required: true,
-        description: 'Joystream Content ID',
-        schema: {
-          type: 'string',
-        },
-      },
-    ],
-
-    // Put for uploads
-    async put(req, res) {
-      if (anonymous) {
-        errorHandler(res, 'Uploads Not Permitted in Anonymous Mode', 400)
-        return
-      }
-
-      const id = req.params.id // content id
-
-      // Check if content exists
-      const roleAddress = runtime.identities.key.address
-      const providerId = runtime.storageProviderId
-      let dataObject
-
-      try {
-        dataObject = await runtime.assets.getDataObject(id)
-      } catch (err) {
-        errorHandler(res, err, 403)
-        return
-      }
-
-      if (!dataObject) {
-        res.status(404).send({ message: 'Content Not Found' })
-        return
-      }
-
-      // Early filtering on content_length..do not wait for fileInfo
-      // ensure its less than max allowed by node policy.
-      const filterResult = filter({}, req.headers)
-
-      if (filterResult.code !== 200) {
-        errorHandler(res, new Error(filterResult.message), filterResult.code)
-        return
-      }
-
-      // Ensure content_length from request equals size in data object.
-      if (!dataObject.size_in_bytes.eq(filterResult.content_length)) {
-        errorHandler(res, new Error('Content Length does not match expected size of content'), 403)
-        return
-      }
-
-      // Ensure we have minimum blance to successfully update state on chain after processing
-      // upload. Due to the node handling concurrent uploads this check will not always guarantee
-      // at the point when transactions are sent that the balance will still be sufficient.
-      const sufficientBalance = await runtime.providerHasMinimumBalance(PROCESS_UPLOAD_TX_COSTS)
-
-      if (!sufficientBalance) {
-        errorHandler(res, 'Server has insufficient balance to process upload.', 503)
-        return
-      }
-
-      // We'll open a write stream to the backend, but reserve the right to
-      // abort upload if the filters don't smell right.
-      let stream
-      try {
-        stream = await storage.open(id, 'w')
-
-        // Wether we are aborting early because of early file detection not passing filter
-        let aborted = false
-
-        // Early file info detection so we can abort early on.. but we do not reject
-        // content because we don't yet have ipfs computed
-        stream.on('fileInfo', async (info) => {
-          try {
-            debug('Early file detection info:', info)
-
-            const filterResult = filter({}, req.headers, info.mimeType)
-            if (filterResult.code !== 200) {
-              aborted = true
-              debug('Ending stream', filterResult.message)
-              stream.end()
-              stream.cleanup()
-              res.status(filterResult.code).send({ message: filterResult.message })
-            }
-          } catch (err) {
-            errorHandler(res, err)
-          }
-        })
-
-        stream.on('end', async () => {
-          if (!aborted) {
-            try {
-              // try to get file info and compute ipfs hash before committing the stream to ifps node.
-              await stream.info()
-            } catch (err) {
-              errorHandler(res, err)
-            }
-          }
-        })
-
-        // At end of stream we should have file info and computed ipfs hash - this event is emitted
-        // only by explicitly calling stream.info() in the stream.on('finish') event handler
-        stream.once('info', async (info, hash) => {
-          if (hash === dataObject.ipfs_content_id.toString()) {
-            const filterResult = filter({}, req.headers, info.mimeType)
-            if (filterResult.code !== 200) {
-              debug('Rejecting content')
-              stream.cleanup()
-              res.status(400).send({ message: 'Rejecting content type' })
-            } else {
-              try {
-                await stream.commit()
-              } catch (err) {
-                errorHandler(res, err)
-              }
-            }
-          } else {
-            stream.cleanup()
-            res.status(400).send({ message: 'Aborting - Not expected IPFS hash for content' })
-          }
-        })
-
-        stream.on('committed', async (hash) => {
-          // they cannot be different unless we did something stupid!
-          assert(hash === dataObject.ipfs_content_id.toString())
-
-          ipfsContentIdMap.set(id, {
-            ipfs_content_id: hash,
-            local: true,
-          })
-
-          // Send ok response early, no need for client to wait for relationships to be created.
-          debug('Sending OK response.')
-          res.status(200).send({ message: 'Asset uploaded.' })
-
-          try {
-            debug('accepting Content')
-            // Only if judegment is Pending
-            if (dataObject.liaison_judgement.type === 'Pending') {
-              await runtime.assets.acceptContent(roleAddress, providerId, id)
-            }
-          } catch (err) {
-            debug(`${err.message}`)
-          }
-        })
-
-        stream.on('error', (err) => {
-          stream.end()
-          stream.cleanup()
-          errorHandler(res, err)
-        })
-        req.pipe(stream)
-      } catch (err) {
-        errorHandler(res, err)
-      }
-    },
-
-    async get(req, res, next) {
-      proxyAcceptedContentToIpfsGateway(req, res, next)
-    },
-
-    async head(req, res, next) {
-      proxyAcceptedContentToIpfsGateway(req, res, next)
-    },
-  }
-
-  // doc.get = proxy
-  // doc.head = proxy
-  // Note: Adding the middleware this way is causing problems!
-  // We are loosing some information from the request, specifically req.query.download parameters for some reason.
-  // Does it have to do with how/when the apiDoc is being processed? binding issue?
-
-  // OpenAPI specs
-  doc.get.apiDoc = {
-    description: 'Download an asset.',
-    operationId: 'assetData',
-    tags: ['asset', 'data'],
-    parameters: [
-      {
-        name: 'download',
-        in: 'query',
-        description: 'Download instead of streaming inline.',
-        required: false,
-        allowEmptyValue: true,
-        schema: {
-          type: 'boolean',
-          default: false,
-        },
-      },
-    ],
-    responses: {
-      200: {
-        description: 'Asset download.',
-        content: {
-          default: {
-            schema: {
-              type: 'string',
-              format: 'binary',
-            },
-          },
-        },
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              $ref: '#/components/schemas/Error',
-            },
-          },
-        },
-      },
-    },
-  }
-
-  doc.put.apiDoc = {
-    description: 'Asset upload.',
-    operationId: 'assetUpload',
-    tags: ['asset', 'data'],
-    requestBody: {
-      content: {
-        '*/*': {
-          schema: {
-            type: 'string',
-            format: 'binary',
-          },
-        },
-      },
-    },
-    responses: {
-      200: {
-        description: 'Asset upload.',
-        content: {
-          'application/json': {
-            schema: {
-              type: 'object',
-              required: ['message'],
-              properties: {
-                message: {
-                  type: 'string',
-                },
-              },
-            },
-          },
-        },
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              $ref: '#/components/schemas/Error',
-            },
-          },
-        },
-      },
-    },
-  }
-
-  doc.head.apiDoc = {
-    description: 'Asset download information.',
-    operationId: 'assetInfo',
-    tags: ['asset', 'metadata'],
-    responses: {
-      200: {
-        description: 'Asset info.',
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              $ref: '#/components/schemas/Error',
-            },
-          },
-        },
-      },
-    },
-  }
-
-  return doc
-}

+ 0 - 1
storage-node/packages/colossus/test/index.js

@@ -1 +0,0 @@
-// Add Tests!

+ 0 - 3
storage-node/packages/helios/.gitignore

@@ -1,3 +0,0 @@
-node_modules/
-lib/
-

+ 0 - 9
storage-node/packages/helios/README.md

@@ -1,9 +0,0 @@
-# Joystream Helios
-
-A basic tool to scan the joystream storage network to get a birds eye view of the health of the storage providers and content replication status.
-
-## Scanning
-
-```
-yarn helios
-```

+ 0 - 128
storage-node/packages/helios/bin/cli.js

@@ -1,128 +0,0 @@
-#!/usr/bin/env node
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-const { encodeAddress } = require('@polkadot/keyring')
-const axios = require('axios')
-const stripEndingSlash = require('@joystream/storage-utils/stripEndingSlash')
-
-function makeAssetUrl(contentId, source) {
-  source = stripEndingSlash(source)
-  return `${source}/asset/v0/${encodeAddress(contentId)}`
-}
-
-// HTTP HEAD with axios all known content ids on given endpoint
-async function countContentAvailability(providerId, endpoint, contentIds) {
-  let found = 0
-  let errored = 0
-  let requestsSent = 0
-  // Avoid opening too many connections, do it in chunks.. otherwise we get
-  // "Client network socket disconnected before secure TLS connection was established" errors
-  while (contentIds.length) {
-    const chunk = contentIds.splice(0, 100)
-    requestsSent += chunk.length
-    const results = await Promise.allSettled(chunk.map((id) => axios.head(makeAssetUrl(id, endpoint))))
-
-    results.forEach((result, _ix) => {
-      if (result.status === 'rejected') {
-        errored++
-      } else {
-        found++
-      }
-    })
-
-    // show some progress
-    console.error(`provider: ${providerId}:`, `total checks: ${requestsSent}`, `ok: ${found}`, `errors: ${errored}`)
-  }
-
-  return { found, errored }
-}
-
-async function testProviderHasAssets(providerId, endpoint, contentIds) {
-  const total = contentIds.length
-  const startedAt = Date.now()
-  const { found, errored } = await countContentAvailability(providerId, endpoint, contentIds)
-  const completedAt = Date.now()
-  console.log(`
-    ---------------------------------------
-    Final Result for provider ${providerId}
-    url: ${endpoint}
-    fetched: ${found}/${total}
-    failed: ${errored}
-    check took: ${(completedAt - startedAt) / 1000}s
-    ------------------------------------------
-  `)
-}
-
-async function main() {
-  const runtime = await RuntimeApi.create()
-  const { api } = runtime
-
-  // get all providers
-  const { ids: storageProviders } = await runtime.workers.getAllProviders()
-  console.log(`Found ${storageProviders.length} staked providers`)
-
-  // Resolve Endpoints of providers
-  console.log('\nResolving live provider API Endpoints...')
-  const endpoints = await Promise.all(
-    storageProviders.map(async (providerId) => {
-      try {
-        const endpoint = (await runtime.workers.getWorkerStorageValue(providerId)).toString()
-        return { providerId, endpoint }
-      } catch (err) {
-        console.log('resolve failed for id', providerId, err.message)
-        return { providerId, endpoint: null }
-      }
-    })
-  )
-
-  console.log('\nChecking API Endpoints are online')
-  await Promise.all(
-    endpoints.map(async (provider) => {
-      if (!provider.endpoint) {
-        console.log(provider.providerId, 'No url set, skipping')
-        return
-      }
-      const swaggerUrl = `${stripEndingSlash(provider.endpoint)}/swagger.json`
-      try {
-        const { data } = await axios.get(swaggerUrl)
-        console.log(
-          `${provider.providerId}:`,
-          `${provider.endpoint}`,
-          '- OK -',
-          `storage node version ${data.info.version}`
-        )
-      } catch (err) {
-        console.log(`${provider.providerId}`, `${provider.endpoint} - ${err.message}`)
-      }
-    })
-  )
-
-  // Load data objects
-  await runtime.assets.fetchDataObjects()
-
-  const allContentIds = await runtime.assets.getKnownContentIds()
-  const acceptedContentIds = runtime.assets.getAcceptedContentIds()
-  const ipfsHashes = runtime.assets.getAcceptedIpfsHashes()
-
-  console.log('\nData Directory objects:')
-  console.log(allContentIds.length, 'created')
-  console.log(acceptedContentIds.length, 'accepted')
-  console.log(ipfsHashes.length, 'unique accepted hashes')
-
-  // We no longer need a connection to the chain
-  api.disconnect()
-
-  console.log(`
-    Checking available assets on providers (this can take some time)
-    This is done by sending HEAD requests for all 'Accepted' assets.
-  `)
-
-  endpoints.forEach(async ({ providerId, endpoint }) => {
-    if (!endpoint) {
-      return
-    }
-    return testProviderHasAssets(providerId, endpoint, acceptedContentIds.slice())
-  })
-}
-
-main()

+ 0 - 21
storage-node/packages/helios/package.json

@@ -1,21 +0,0 @@
-{
-  "name": "@joystream/helios",
-  "private": true,
-  "version": "0.1.0",
-  "bin": {
-    "helios": "bin/cli.js"
-  },
-  "scripts": {
-    "test": "echo \"Error: no test specified\" && exit 0"
-  },
-  "license": "GPL-3.0-only",
-  "dependencies": {
-    "@joystream/storage-runtime-api": "^0.1.0",
-    "@types/bn.js": "^4.11.5",
-    "axios": "^0.19.0",
-    "bn.js": "^4.11.8"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  }
-}

+ 0 - 1
storage-node/packages/helios/test/index.js

@@ -1 +0,0 @@
-// Add Tests!

+ 0 - 1
storage-node/packages/runtime-api/.eslintrc.js

@@ -1 +0,0 @@
-../../.eslintrc.js

+ 0 - 3
storage-node/packages/runtime-api/.gitignore

@@ -1,3 +0,0 @@
-# Generated JS files
-types/*.js
-!types/index.js

+ 0 - 6
storage-node/packages/runtime-api/README.md

@@ -1,6 +0,0 @@
-# Summary
-
-This package contains convenience functions for the runtime API.
-
-The main entry point creates and initializes a `@polkadot/api` instance, and
-provides more workflow oriented functions than the underlying API exposes.

+ 0 - 210
storage-node/packages/runtime-api/assets.js

@@ -1,210 +0,0 @@
-'use strict'
-
-const debug = require('debug')('joystream:runtime:assets')
-const { decodeAddress } = require('@polkadot/keyring')
-const { StorageObjectOwner, DataObject } = require('@joystream/types/storage')
-
-function parseContentId(contentId) {
-  try {
-    return decodeAddress(contentId)
-  } catch (err) {
-    return contentId
-  }
-}
-
-/*
- * Add asset related functionality to the substrate API.
- */
-class AssetsApi {
-  static async create(base) {
-    const ret = new AssetsApi()
-    ret.base = base
-    await AssetsApi.init()
-    return ret
-  }
-
-  static async init() {
-    debug('Init')
-  }
-
-  /*
-   * Create and return a data object.
-   */
-  async createDataObject(accountId, memberId, contentId, doTypeId, size, ipfsCid) {
-    contentId = parseContentId(contentId)
-    const owner = {
-      Member: memberId,
-    }
-    const content = [
-      {
-        content_id: contentId,
-        type_id: doTypeId,
-        size,
-        ipfs_content_id: ipfsCid,
-      },
-    ]
-    const tx = this.base.api.tx.dataDirectory.addContent(owner, content)
-    await this.base.signAndSend(accountId, tx)
-
-    // If the data object constructed properly, we should now be able to return
-    // the data object from the state.
-    return this.getDataObject(contentId)
-  }
-
-  /*
-   * Returns the Data Object for a contendId.
-   * Returns null if it doesn't exist.
-   */
-  async getDataObject(contentId) {
-    contentId = parseContentId(contentId)
-    // check if contentId key exists in map
-    const storageSize = await this.base.api.query.dataDirectory.dataByContentId.size(contentId)
-    if (storageSize.eq(0)) {
-      return null
-    }
-    return this.base.api.query.dataDirectory.dataByContentId(contentId)
-  }
-
-  /*
-   * Verify the liaison state for a DataObject:
-   * - Check the content ID has a DataObject
-   * - Check the storageProviderId is the liaison
-   * - Check the liaison state is Pending
-   *
-   * Each failure errors out, success returns the data object.
-   */
-  async checkLiaisonForDataObject(storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-
-    const obj = await this.getDataObject(contentId)
-
-    if (!obj) {
-      throw new Error(`No DataObject found for content ID: ${contentId}`)
-    }
-
-    if (!obj.liaison.eq(storageProviderId)) {
-      throw new Error(`This storage node is not liaison for the content ID: ${contentId}`)
-    }
-
-    if (obj.liaison_judgement.type !== 'Pending') {
-      throw new Error(`Content upload has already been processed.`)
-    }
-
-    return obj
-  }
-
-  /*
-   * Sets the data object liaison judgement to Accepted
-   */
-  async acceptContent(providerAccoundId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataDirectory.acceptContent(storageProviderId, contentId)
-    return this.base.signAndSend(providerAccoundId, tx)
-  }
-
-  /*
-   * Gets storage relationship for contentId for the given provider
-   */
-  async getStorageRelationshipAndId(storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const rids = await this.base.api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
-
-    while (rids.length) {
-      const relationshipId = rids.shift()
-      let relationship = await this.base.api.query.dataObjectStorageRegistry.relationships(relationshipId)
-      relationship = relationship.unwrap()
-      if (relationship.storage_provider.eq(storageProviderId)) {
-        return { relationship, relationshipId }
-      }
-    }
-
-    return {}
-  }
-
-  /*
-   * Creates storage relationship for a data object and provider and
-   * returns the relationship id
-   */
-  async createStorageRelationship(providerAccountId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataObjectStorageRegistry.addRelationship(storageProviderId, contentId)
-
-    return this.base.signAndSendThenGetEventResult(providerAccountId, tx, {
-      module: 'dataObjectStorageRegistry',
-      event: 'DataObjectStorageRelationshipAdded',
-      type: 'DataObjectStorageRelationshipId',
-      index: 0,
-    })
-  }
-
-  /*
-   * Set the ready state for a data object storage relationship to the new value
-   */
-  async toggleStorageRelationshipReady(providerAccountId, storageProviderId, dosrId, ready) {
-    const tx = ready
-      ? this.base.api.tx.dataObjectStorageRegistry.setRelationshipReady(storageProviderId, dosrId)
-      : this.base.api.tx.dataObjectStorageRegistry.unsetRelationshipReady(storageProviderId, dosrId)
-    return this.base.signAndSend(providerAccountId, tx)
-  }
-
-  /*
-   * Returns array of all the content ids in storage
-   */
-  async getKnownContentIds() {
-    const keys = await this.base.api.query.dataDirectory.dataByContentId.keys()
-    return keys.map(({ args: [contentId] }) => contentId)
-  }
-
-  /*
-   * Returns array of all content ids in storage where liaison judgement was Accepted
-   */
-  getAcceptedContentIds() {
-    if (!this._cachedEntries) {
-      return []
-    }
-
-    return this._cachedEntries
-      .filter(([, dataObject]) => dataObject.liaison_judgement.type === 'Accepted')
-      .map(
-        ([
-          {
-            args: [contentId],
-          },
-        ]) => contentId
-      )
-  }
-
-  /*
-   * Returns array of all ipfs hashes in storage where liaison judgement was Accepted
-   */
-  getAcceptedIpfsHashes() {
-    if (!this._cachedEntries) {
-      return []
-    }
-    const hashes = new Map()
-    this._cachedEntries
-      .filter(([, dataObject]) => dataObject.liaison_judgement.type === 'Accepted')
-      .forEach(([, dataObject]) => hashes.set(dataObject.ipfs_content_id.toString()))
-    return Array.from(hashes.keys())
-  }
-
-  /*
-   * Fetch and cache all data objects
-   */
-  async fetchDataObjects() {
-    this._cachedEntries = await this.base.api.query.dataDirectory.dataByContentId.entries()
-    this._idMappings = new Map()
-    this._cachedEntries.forEach(([{ args: [contentId] }, dataObject]) =>
-      this._idMappings.set(contentId.encode(), dataObject.ipfs_content_id.toString())
-    )
-  }
-
-  resolveContentIdToIpfsHash(contentId) {
-    if (!this._idMappings) return null
-    return this._idMappings.get(contentId)
-  }
-}
-
-module.exports = {
-  AssetsApi,
-}

+ 0 - 79
storage-node/packages/runtime-api/balances.js

@@ -1,79 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:runtime:balances')
-
-/*
- * Bundle API calls related to account balances.
- */
-class BalancesApi {
-  static async create(base) {
-    const ret = new BalancesApi()
-    ret.base = base
-    await BalancesApi.init()
-    return ret
-  }
-
-  static async init() {
-    debug('Init')
-  }
-
-  /*
-   * Return true/false if the account has a minimum spendable balance.
-   */
-  async hasMinimumBalanceOf(accountId, min) {
-    const balance = await this.availableBalance(accountId)
-    if (typeof min === 'number') {
-      return balance.cmpn(min) >= 0
-    }
-    return balance.cmp(min) >= 0
-  }
-
-  /*
-   * Return the account's available balance which can be spent.
-   */
-  async availableBalance(accountId) {
-    const decoded = this.base.identities.keyring.decodeAddress(accountId, true)
-    return (await this.base.api.derive.balances.all(decoded)).availableBalance
-  }
-
-  /*
-   * Return the base transaction fee.
-   */
-  baseTransactionFee() {
-    return this.base.api.consts.transactionPayment.transactionBaseFee
-  }
-
-  /*
-   * Transfer amount currency from one address to another. The sending
-   * address must be an unlocked key pair!
-   */
-  async transfer(from, to, amount) {
-    const decode = require('@polkadot/keyring').decodeAddress
-    const toDecoded = decode(to, true)
-
-    const tx = this.base.api.tx.balances.transfer(toDecoded, amount)
-    return this.base.signAndSend(from, tx)
-  }
-}
-
-module.exports = {
-  BalancesApi,
-}

+ 0 - 246
storage-node/packages/runtime-api/identities.js

@@ -1,246 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const path = require('path')
-const fs = require('fs')
-const debug = require('debug')('joystream:runtime:identities')
-const { Keyring } = require('@polkadot/keyring')
-const utilCrypto = require('@polkadot/util-crypto')
-
-/*
- * Add identity management to the substrate API.
- *
- * This loosely groups: accounts, key management, and membership.
- */
-class IdentitiesApi {
-  static async create(base, { accountFile, passphrase, canPromptForPassphrase }) {
-    const ret = new IdentitiesApi()
-    ret.base = base
-    await ret.init(accountFile, passphrase, canPromptForPassphrase)
-    return ret
-  }
-
-  async init(accountFile, passphrase, canPromptForPassphrase) {
-    debug('Init')
-
-    // Creatre keyring
-    this.keyring = new Keyring()
-
-    this.canPromptForPassphrase = canPromptForPassphrase || false
-
-    // Load account file, if possible.
-    try {
-      this.key = await this.loadUnlock(accountFile, passphrase)
-    } catch (err) {
-      debug('Error loading account file:', err.message)
-    }
-  }
-
-  /*
-   * Load a key file and unlock it if necessary.
-   */
-  async loadUnlock(accountFile, passphrase) {
-    const fullname = path.resolve(accountFile)
-    debug('Initializing key from', fullname)
-    const key = this.keyring.addFromJson(require(fullname))
-    await this.tryUnlock(key, passphrase)
-    debug('Successfully initialized with address', key.address)
-    return key
-  }
-
-  /*
-   * Try to unlock a key if it isn't already unlocked.
-   * passphrase should be supplied as argument.
-   */
-  async tryUnlock(key, passphrase) {
-    if (!key.isLocked) {
-      debug('Key is not locked, not attempting to unlock')
-      return
-    }
-
-    // First try with an empty passphrase - for convenience
-    try {
-      key.decodePkcs8('')
-
-      if (passphrase) {
-        debug('Key was not encrypted, supplied passphrase was ignored')
-      }
-
-      return
-    } catch (err) {
-      // pass
-    }
-
-    // Then with supplied passphrase
-    try {
-      debug('Decrypting with supplied passphrase')
-      key.decodePkcs8(passphrase)
-      return
-    } catch (err) {
-      // pass
-    }
-
-    // If that didn't work, ask for a passphrase if appropriate
-    if (this.canPromptForPassphrase) {
-      passphrase = await this.askForPassphrase(key.address)
-      key.decodePkcs8(passphrase)
-      return
-    }
-
-    throw new Error('invalid passphrase supplied')
-  }
-
-  /*
-   * Ask for a passphrase
-   */
-
-  /* eslint-disable class-methods-use-this */
-  // Disable lint because the method used by a mocking library.
-  askForPassphrase(address) {
-    // Query for passphrase
-    const prompt = require('password-prompt')
-    return prompt(`Enter passphrase for ${address}: `, { required: false })
-  }
-
-  /*
-   * Return true if the account is a root account of a member
-   */
-  async isMember(accountId) {
-    const memberIds = await this.memberIdsOf(accountId) // return array of member ids
-    return memberIds.length > 0 // true if at least one member id exists for the acccount
-  }
-
-  /*
-   * Return all the member IDs of an account by the root account id
-   */
-  async memberIdsOf(accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    return this.base.api.query.members.memberIdsByRootAccountId(decoded)
-  }
-
-  /*
-   * Return all the member IDs of an account by the controller account id
-   */
-  async memberIdsOfController(accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    return this.base.api.query.members.memberIdsByControllerAccountId(decoded)
-  }
-
-  /*
-   * Return the first member ID of an account, or undefined if not a member root account.
-   */
-  async firstMemberIdOf(accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    const ids = await this.base.api.query.members.memberIdsByRootAccountId(decoded)
-    return ids[0]
-  }
-
-  /*
-   * Export a key pair to JSON. Will ask for a passphrase.
-   */
-  async exportKeyPair(accountId) {
-    const passphrase = await this.askForPassphrase(accountId)
-
-    // Produce JSON output
-    return this.keyring.toJson(accountId, passphrase)
-  }
-
-  /*
-   * Export a key pair and write it to a JSON file with the account ID as the
-   * name.
-   */
-  async writeKeyPairExport(accountId, prefix) {
-    // Generate JSON
-    const data = await this.exportKeyPair(accountId)
-
-    // Write JSON
-    let filename = `${data.address}.json`
-
-    if (prefix) {
-      const path = require('path')
-      filename = path.resolve(prefix, filename)
-    }
-
-    fs.writeFileSync(filename, JSON.stringify(data), {
-      encoding: 'utf8',
-      mode: 0o600,
-    })
-
-    return filename
-  }
-
-  /*
-   * Register account id with userInfo as a new member
-   * using default policy 0, returns new member id
-   */
-  async registerMember(accountId, userInfo) {
-    const tx = this.base.api.tx.members.buyMembership(0, userInfo.handle, userInfo.avatarUri, userInfo.about)
-
-    return this.base.signAndSendThenGetEventResult(accountId, tx, {
-      module: 'members',
-      event: 'MemberRegistered',
-      type: 'MemberId',
-      index: 0,
-    })
-  }
-
-  /*
-   * Injects a keypair and sets it as the default identity
-   */
-  useKeyPair(keyPair) {
-    this.key = this.keyring.addPair(keyPair)
-  }
-
-  /*
-   * Create a new role key. If no name is given,
-   * default to 'storage'.
-   */
-  async createNewRoleKey(name) {
-    name = name || 'storage-provider'
-
-    // Generate new key pair
-    const keyPair = utilCrypto.naclKeypairFromRandom()
-
-    // Encode to an address.
-    const addr = this.keyring.encodeAddress(keyPair.publicKey)
-    debug('Generated new key pair with address', addr)
-
-    // Add to key wring. We set the meta to identify the account as
-    // a role key.
-    const meta = {
-      name: `${name} role account`,
-    }
-
-    const createPair = require('@polkadot/keyring/pair').default
-    const pair = createPair('ed25519', keyPair, meta)
-
-    this.keyring.addPair(pair)
-
-    return pair
-  }
-
-  getSudoAccount() {
-    return this.base.api.query.sudo.key()
-  }
-}
-
-module.exports = {
-  IdentitiesApi,
-}

+ 0 - 379
storage-node/packages/runtime-api/index.js

@@ -1,379 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:runtime:base')
-const debugTx = require('debug')('joystream:runtime:base:tx')
-
-const { types } = require('@joystream/types')
-const { ApiPromise, WsProvider } = require('@polkadot/api')
-const { IdentitiesApi } = require('@joystream/storage-runtime-api/identities')
-const { BalancesApi } = require('@joystream/storage-runtime-api/balances')
-const { WorkersApi } = require('@joystream/storage-runtime-api/workers')
-const { AssetsApi } = require('@joystream/storage-runtime-api/assets')
-const { SystemApi } = require('@joystream/storage-runtime-api/system')
-const AsyncLock = require('async-lock')
-const Promise = require('bluebird')
-const { sleep } = require('@joystream/storage-utils/sleep')
-
-Promise.config({
-  cancellation: true,
-})
-
-const TX_TIMEOUT = 20 * 1000
-
-/*
- * Initialize runtime (substrate) API and keyring.
- */
-class RuntimeApi {
-  static async create(options) {
-    const runtimeApi = new RuntimeApi()
-    await runtimeApi.init(options || {})
-    return runtimeApi
-  }
-
-  async init(options) {
-    debug('Init')
-
-    options = options || {}
-
-    const provider = new WsProvider(options.provider_url || 'ws://localhost:9944')
-    let attempts = 0
-    // Create the API instrance
-    while (true) {
-      attempts++
-
-      if (options.retries && attempts > options.retries) {
-        throw new Error('Timeout trying to connect to node')
-      }
-
-      try {
-        this.api = new ApiPromise({ provider, types })
-        await this.api.isReadyOrError
-        break
-      } catch (err) {
-        debug('Connecting to node failed, will retry..')
-      }
-      await sleep(5000)
-    }
-
-    this.asyncLock = new AsyncLock()
-
-    // The storage provider id to use
-    this.storageProviderId = parseInt(options.storageProviderId) // u64 instead ?
-
-    // Ok, create individual APIs
-    this.identities = await IdentitiesApi.create(this, {
-      accountFile: options.account_file,
-      passphrase: options.passphrase,
-      canPromptForPassphrase: options.canPromptForPassphrase,
-    })
-    this.balances = await BalancesApi.create(this)
-    this.workers = await WorkersApi.create(this)
-    this.assets = await AssetsApi.create(this)
-    this.system = await SystemApi.create(this)
-  }
-
-  disconnect() {
-    this.api.disconnect()
-  }
-
-  async untilChainIsSynced() {
-    debug('Waiting for chain to be synced before proceeding.')
-    while (true) {
-      const isSyncing = await this.chainIsSyncing()
-      if (isSyncing) {
-        debug('Still waiting for chain to be synced.')
-        await sleep(1 * 30 * 1000)
-      } else {
-        return
-      }
-    }
-  }
-
-  async chainIsSyncing() {
-    const { isSyncing } = await this.api.rpc.system.health()
-    return isSyncing.isTrue
-  }
-
-  async providerHasMinimumBalance(minimumBalance) {
-    const providerAccountId = this.identities.key.address
-    return this.balances.hasMinimumBalanceOf(providerAccountId, minimumBalance)
-  }
-
-  async providerIsActiveWorker() {
-    return this.workers.isRoleAccountOfStorageProvider(this.storageProviderId, this.identities.key.address)
-  }
-
-  executeWithAccountLock(func) {
-    return this.asyncLock.acquire('tx-queue', func)
-  }
-
-  static matchingEvents(subscribed = [], events = []) {
-    const filtered = events.filter((record) => {
-      const { event } = record
-
-      // Skip events we're not interested in.
-      const matching = subscribed.filter((value) => {
-        if (value[0] === '*' && value[1] === '*') {
-          return true
-        } else if (value[0] === '*') {
-          return event.method === value[1]
-        } else if (value[1] === '*') {
-          return event.section === value[0]
-        } else {
-          return event.section === value[0] && event.method === value[1]
-        }
-      })
-      return matching.length > 0
-    })
-
-    return filtered.map((record) => {
-      const { event } = record
-      const types = event.typeDef
-      const payload = new Map()
-
-      // this check may be un-necessary but doing it just incase
-      if (event.data) {
-        event.data.forEach((data, index) => {
-          const type = types[index].type
-          payload.set(index, { type, data })
-        })
-      }
-      const fullName = `${event.section}.${event.method}`
-      debugTx(`matched event: ${fullName} =>`, event.data && event.data.join(', '))
-      return [fullName, payload]
-    })
-  }
-
-  /*
-   * signAndSend() with nonce tracking, to enable concurrent sending of transacctions
-   * so that they can be included in the same block. Allows you to use the accountId instead
-   * of the key, without requiring an external Signer configured on the underlying ApiPromie
-   *
-   * If the subscribed events are given, then the matchedEvents will be returned in the resolved
-   * value.
-   * Resolves when a transaction finalizes with a successful dispatch (for both signed and root origins)
-   * Rejects in all other cases.
-   * Will also reject on timeout if the transaction doesn't finalize in time.
-   */
-  async signAndSend(accountId, tx, subscribed) {
-    // Accept both a string or AccountId as argument
-    accountId = this.identities.keyring.encodeAddress(accountId)
-
-    // Throws if keyPair is not found
-    const fromKey = this.identities.keyring.getPair(accountId)
-
-    // Key must be unlocked to use
-    if (fromKey.isLocked) {
-      throw new Error('Must unlock key before using it to sign!')
-    }
-
-    const callbacks = {
-      // Functions to be called when the submitted transaction is finalized. They are initialized
-      // after the transaction is submitted to the resolve and reject function of the final promise
-      // returned by signAndSend
-      // on extrinsic success
-      onFinalizedSuccess: null,
-      // on extrinsic failure
-      onFinalizedFailed: null,
-      // Function assigned when transaction is successfully submitted. Invoking it ubsubscribes from
-      // listening to tx status updates.
-      unsubscribe: null,
-    }
-
-    // object used to communicate back information from the tx updates handler
-    const out = {
-      lastResult: { status: {} },
-    }
-
-    // synchronize access to nonce
-    await this.executeWithAccountLock(async () => {
-      const nonce = await this.api.rpc.system.accountNextIndex(accountId)
-      const signed = tx.sign(fromKey, { nonce })
-      const txhash = signed.hash
-
-      try {
-        callbacks.unsubscribe = await signed.send(
-          RuntimeApi.createTxUpdateHandler(callbacks, { nonce, txhash, subscribed }, out)
-        )
-
-        const serialized = JSON.stringify({
-          nonce,
-          txhash,
-          tx: signed.toHex(),
-        })
-
-        // We are depending on the behaviour that at this point the Ready status
-        // Elaboration: when the tx is rejected and therefore the tx isn't added
-        // to the tx pool ready queue status is not updated and
-        // .send() throws, so we don't reach this code.
-        if (out.lastResult.status.isFuture) {
-          debugTx(`Warning: Submitted Tx with future nonce: ${serialized}`)
-        } else {
-          debugTx(`Submitted: ${serialized}`)
-        }
-      } catch (err) {
-        const errstr = err.toString()
-        debugTx(`Rejected: ${errstr} txhash: ${txhash} nonce: ${nonce}`)
-        throw err
-      }
-    })
-
-    // Here again we assume that the transaction has been accepted into the tx pool
-    // and status was updated.
-    // We cannot get tx updates for a future tx so return now to avoid blocking caller
-    if (out.lastResult.status.isFuture) {
-      return {}
-    }
-
-    // Return a promise that will resolve when the transaction finalizes.
-    // On timeout it will be rejected. Timeout is a workaround for dealing with the
-    // fact that if rpc connection is lost to node we have no way of detecting it or recovering.
-    // Timeout can also occur if a transaction that was part of batch of transactions submitted
-    // gets usurped.
-    return new Promise((resolve, reject) => {
-      callbacks.onFinalizedSuccess = resolve
-      callbacks.onFinalizedFailed = reject
-    }).timeout(TX_TIMEOUT)
-  }
-
-  /*
-   * Sign and send a transaction expect event from
-   * module and return specific(index) value from event data
-   */
-  async signAndSendThenGetEventResult(senderAccountId, tx, { module, event, index, type }) {
-    if (!module || !event || index === undefined || !type) {
-      throw new Error('MissingSubscribeEventDetails')
-    }
-
-    const subscribed = [[module, event]]
-
-    const { mappedEvents } = await this.signAndSend(senderAccountId, tx, subscribed)
-
-    if (!mappedEvents) {
-      // The tx was a future so it was not possible and will not be possible to get events
-      throw new Error('NoEventsWereCaptured')
-    }
-
-    if (!mappedEvents.length) {
-      // our expected event was not emitted
-      throw new Error('ExpectedEventNotFound')
-    }
-
-    // fix - we may not necessarily want the first event
-    // when there are multiple instances of the same event
-    const firstEvent = mappedEvents[0]
-
-    if (firstEvent[0] !== `${module}.${event}`) {
-      throw new Error('WrongEventCaptured')
-    }
-
-    const payload = firstEvent[1]
-    if (!payload.has(index)) {
-      throw new Error('DataIndexOutOfRange')
-    }
-
-    const value = payload.get(index)
-    if (value.type !== type) {
-      throw new Error('DataTypeNotExpectedType')
-    }
-
-    return value.data
-  }
-
-  static createTxUpdateHandler(callbacks, submittedTx, out = {}) {
-    const { nonce, txhash, subscribed } = submittedTx
-
-    return function handleTxUpdates(result) {
-      const { events = [], status } = result
-      const { unsubscribe, onFinalizedFailed, onFinalizedSuccess } = callbacks
-
-      if (!result || !status) {
-        return
-      }
-
-      out.lastResult = result
-
-      const txinfo = () => {
-        return JSON.stringify({
-          nonce,
-          txhash,
-        })
-      }
-
-      if (result.isError) {
-        unsubscribe()
-
-        debugTx(`Error: ${status.type}`, txinfo())
-
-        onFinalizedFailed &&
-          onFinalizedFailed({ err: status.type, result, tx: status.isUsurped ? status.asUsurped : undefined })
-      } else if (result.isCompleted) {
-        unsubscribe()
-
-        debugTx('Finalized', txinfo())
-
-        const mappedEvents = RuntimeApi.matchingEvents(subscribed, events)
-        const failed = result.findRecord('system', 'ExtrinsicFailed')
-        const success = result.findRecord('system', 'ExtrinsicSuccess')
-        const sudid = result.findRecord('sudo', 'Sudid')
-        const sudoAsDone = result.findRecord('sudo', 'SudoAsDone')
-
-        if (failed) {
-          const {
-            event: { data },
-          } = failed
-          const dispatchError = data[0]
-          onFinalizedFailed({
-            err: 'ExtrinsicFailed',
-            mappedEvents,
-            result,
-            block: status.asCompleted,
-            dispatchError, // we get module number/id and index into the Error enum
-          })
-        } else if (success) {
-          // Note: For root origin calls, the dispatch error is logged to the joystream-node
-          // console, we cannot get it in the events
-          if (sudid) {
-            const dispatchSuccess = sudid.event.data[0]
-            if (dispatchSuccess.isOk) {
-              onFinalizedSuccess({ mappedEvents, result, block: status.asCompleted })
-            } else {
-              onFinalizedFailed({ err: 'SudoFailed', mappedEvents, result, block: status.asCompleted })
-            }
-          } else if (sudoAsDone) {
-            const dispatchSuccess = sudoAsDone.event.data[0]
-            if (dispatchSuccess.isOk) {
-              onFinalizedSuccess({ mappedEvents, result, block: status.asCompleted })
-            } else {
-              onFinalizedFailed({ err: 'SudoAsFailed', mappedEvents, result, block: status.asCompleted })
-            }
-          } else {
-            onFinalizedSuccess({ mappedEvents, result, block: status.asCompleted })
-          }
-        }
-      }
-    }
-  }
-}
-
-module.exports = {
-  RuntimeApi,
-}

+ 0 - 58
storage-node/packages/runtime-api/package.json

@@ -1,58 +0,0 @@
-{
-  "name": "@joystream/storage-runtime-api",
-  "private": true,
-  "version": "0.1.0",
-  "description": "Runtime API abstraction for Joystream Storage Node",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org/"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node",
-    "runtime"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'test/**/*.js' --exit",
-    "lint": "eslint '**/*.js' --ignore-pattern 'test/**/*.js'"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "mocha": "^5.2.0",
-    "sinon": "^7.3.2",
-    "sinon-chai": "^3.3.0",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "@joystream/storage-utils": "^0.1.0",
-    "@joystream/types": "^0.16.1",
-    "@polkadot/api": "4.2.1",
-    "async-lock": "^1.2.0",
-    "lodash": "^4.17.11",
-    "password-prompt": "^1.1.2"
-  }
-}

+ 0 - 33
storage-node/packages/runtime-api/system.js

@@ -1,33 +0,0 @@
-'use strict'
-
-const debug = require('debug')('joystream:runtime:system')
-
-/*
- * Add system functionality to the substrate API.
- */
-class SystemApi {
-  static async create(base) {
-    const ret = new SystemApi()
-    ret.base = base
-    await SystemApi.init()
-    return ret
-  }
-
-  static async init() {
-    debug('Init')
-  }
-
-  /*
-   * Check the running chain for the development setup.
-   */
-  async isDevelopmentChain() {
-    const developmentChainName = 'Development'
-    const runningChainName = await this.base.api.rpc.system.chain()
-
-    return runningChainName.toString() === developmentChainName
-  }
-}
-
-module.exports = {
-  SystemApi,
-}

+ 0 - 48
storage-node/packages/runtime-api/test/assets.js

@@ -1,48 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-describe('Assets', () => {
-  let api
-  before(async () => {
-    api = await RuntimeApi.create()
-    await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-  })
-
-  it('returns DataObjects for a content ID', async () => {
-    const obj = await api.assets.getDataObject('foo')
-    expect(obj).to.be.null
-  })
-
-  it('can check the liaison for a DataObject', async () => {
-    expect(async () => {
-      await api.assets.checkLiaisonForDataObject('foo', 'bar')
-    }).to.throw
-  })
-
-  // Needs properly staked accounts
-  it('can accept content')
-  it('can reject content')
-  it('can create a storage relationship for content')
-  it('can toggle a storage relationship to ready state')
-})

+ 0 - 44
storage-node/packages/runtime-api/test/balances.js

@@ -1,44 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-describe('Balances', () => {
-  let api
-  let key
-  before(async () => {
-    api = await RuntimeApi.create()
-    key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-  })
-
-  it('returns free balance for an account', async () => {
-    const balance = await api.balances.availableBalance(key.address)
-    // Should be exactly zero
-    expect(balance.cmpn(0)).to.equal(0)
-  })
-
-  it('checks whether a minimum balance exists', async () => {
-    // A minimum of 0 should exist, but no more.
-    expect(await api.balances.hasMinimumBalanceOf(key.address, 0)).to.be.true
-    expect(await api.balances.hasMinimumBalanceOf(key.address, 1)).to.be.false
-  })
-})

+ 0 - 6
storage-node/packages/runtime-api/test/data/edwards.json

@@ -1,6 +0,0 @@
-{
-  "address": "5HDnLpCjdbUBR6eyuz5geBJWzoZdXmWFXahEYrLg44rvToCK",
-  "encoded": "0x475f0c37c7893517f5a93c88b81208346211dfa9b0fd09e08bfd34f6e14da5468f48c6d9b0b4cbfbd7dd03a6f0730f5ee9a01b0cd30265e6b1b9fb652958889d5b174624568f49f3a671b8c330c3920814e938383749aa9046366ae6881281e0d053a9aa913a54ad53bd2f1dcf6c26e6b476495ea058832a36f122d09c18154577f951298ac72e6f471a6dca41e4d5741ed5db966001ae5ffd2b99d4c7",
-  "encoding": { "content": ["pkcs8", "ed25519"], "type": "xsalsa20-poly1305", "version": "2" },
-  "meta": { "name": "Edwards keypair for testing", "whenCreated": 1558974074691 }
-}

+ 0 - 6
storage-node/packages/runtime-api/test/data/edwards_unlocked.json

@@ -1,6 +0,0 @@
-{
-  "address": "5EZxbX2arChvhYL7cEgSybJL3kzEeuPqqNYyLqRBJxZx7Mao",
-  "encoded": "0x3053020101300506032b65700422042071f2096e5857177f03768478d0c006f60d1ee684f14feaede0f9c17e139e65586ec832e5db75112b0a4585b6a9ffe58fa056e5b1228f02663e9e64743e65c9a5a1230321006ec832e5db75112b0a4585b6a9ffe58fa056e5b1228f02663e9e64743e65c9a5",
-  "encoding": { "content": ["pkcs8", "ed25519"], "type": "none", "version": "2" },
-  "meta": { "name": "Unlocked keypair for testing", "whenCreated": 1558975434890 }
-}

+ 0 - 6
storage-node/packages/runtime-api/test/data/schnorr.json

@@ -1,6 +0,0 @@
-{
-  "address": "5GjxHjq9rtcxsfgcNswLGjYNRu8UmHAnYq7KfACE3yTjfYVk",
-  "encoded": "0x3dd5965708bbf4316c431ba8274b885a6017d82bc8bcb8c8b02e00c0c90356fb8a379f4be44bd454c76799d9d09bda7fc03c695340e23818f60cfcf00f3b48f42fb8d362e74f261354e99fff9cb2f91d899a722f0051db74d985602f3e95e49a99c73f77951022f98a99bb90981e3c1f60a5642ed583cd65b0161f8461d30f8b320bcd98cd7fb7ec71886d76825696d6fc11ac14a7391f2cdcb2b721d4",
-  "encoding": { "content": ["pkcs8", "sr25519"], "type": "xsalsa20-poly1305", "version": "2" },
-  "meta": { "name": "Schnorr keypair for testing", "whenCreated": 1558974091206 }
-}

+ 0 - 98
storage-node/packages/runtime-api/test/identities.js

@@ -1,98 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-const sinon = require('sinon')
-const temp = require('temp').track()
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-describe('Identities', () => {
-  let api
-  before(async () => {
-    api = await RuntimeApi.create({ canPromptForPassphrase: true })
-  })
-
-  it('imports keys', async () => {
-    // Unlocked keys can be imported without asking for a passphrase
-    await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-
-    // Edwards and schnorr keys should unlock
-    const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
-    await api.identities.loadUnlock('test/data/edwards.json')
-    await api.identities.loadUnlock('test/data/schnorr.json')
-    passphraseStub.restore()
-
-    // Except if the wrong passphrase is given
-    const passphraseStubBad = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'bad')
-    expect(async () => {
-      await api.identities.loadUnlock('test/data/edwards.json')
-    }).to.throw
-    passphraseStubBad.restore()
-  })
-
-  it('knows about membership', async () => {
-    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-    const addr = key.address
-
-    // Without seeding the runtime with data, we can only verify that the API
-    // reacts well in the absence of membership
-    expect(await api.identities.isMember(addr)).to.be.false
-    const memberId = await api.identities.firstMemberIdOf(addr)
-
-    expect(memberId).to.be.undefined
-  })
-
-  it('exports keys', async () => {
-    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-
-    const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
-    const exported = await api.identities.exportKeyPair(key.address)
-    passphraseStub.restore()
-
-    expect(exported).to.have.property('address')
-    expect(exported.address).to.equal(key.address)
-
-    expect(exported).to.have.property('encoding')
-
-    expect(exported.encoding).to.have.property('version', '3')
-
-    expect(exported.encoding).to.have.property('content')
-    expect(exported.encoding.content).to.include('pkcs8')
-    expect(exported.encoding.content).to.include('ed25519')
-
-    expect(exported.encoding).to.have.property('type')
-    expect(exported.encoding.type).to.include('xsalsa20-poly1305')
-  })
-
-  it('writes key export files', async () => {
-    const prefix = temp.mkdirSync('joystream-runtime-api-test')
-
-    const key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-
-    const passphraseStub = sinon.stub(api.identities, 'askForPassphrase').callsFake(() => 'asdf')
-    const filename = await api.identities.writeKeyPairExport(key.address, prefix)
-    passphraseStub.restore()
-
-    const fs = require('fs')
-    const stat = fs.statSync(filename)
-    expect(stat.isFile()).to.be.true
-  })
-})

+ 0 - 28
storage-node/packages/runtime-api/test/index.js

@@ -1,28 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-describe('RuntimeApi', () => {
-  it('can be created', async () => {
-    const api = await RuntimeApi.create()
-    api.disconnect()
-  })
-})

+ 0 - 303
storage-node/packages/runtime-api/workers.js

@@ -1,303 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:runtime:roles')
-const BN = require('bn.js')
-const { Text } = require('@polkadot/types')
-
-/*
- * Finds assigned worker id corresponding to the application id from the resulting
- * ApplicationIdToWorkerIdMap map in the OpeningFilled event. Expects map to
- * contain at least one entry.
- */
-function getWorkerIdFromApplicationIdToWorkerIdMap(filledMap, applicationId) {
-  if (filledMap.size === 0) {
-    throw new Error('Expected opening to be filled!')
-  }
-
-  let ourApplicationIdKey
-
-  for (const key of filledMap.keys()) {
-    if (key.eq(applicationId)) {
-      ourApplicationIdKey = key
-      break
-    }
-  }
-
-  if (!ourApplicationIdKey) {
-    throw new Error('Expected application id to have been filled!')
-  }
-
-  const workerId = filledMap.get(ourApplicationIdKey)
-
-  return workerId
-}
-
-/*
- * Add worker related functionality to the substrate API.
- */
-class WorkersApi {
-  static async create(base) {
-    const ret = new WorkersApi()
-    ret.base = base
-    await ret.init()
-    return ret
-  }
-
-  // eslint-disable-next-line class-methods-use-this, require-await
-  async init() {
-    debug('Init')
-  }
-
-  /*
-   * Check whether the given account and id represent an enrolled storage provider
-   */
-  async isRoleAccountOfStorageProvider(storageProviderId, roleAccountId) {
-    const id = new BN(storageProviderId)
-    const roleAccount = this.base.identities.keyring.decodeAddress(roleAccountId)
-    const providerAccount = await this.storageProviderRoleAccount(id)
-    return providerAccount && providerAccount.eq(roleAccount)
-  }
-
-  /*
-   * Returns true if the provider id is enrolled
-   */
-  async isStorageProvider(storageProviderId) {
-    const worker = await this.storageWorkerByProviderId(storageProviderId)
-    return worker !== null
-  }
-
-  /*
-   * Returns a provider's role account or null if provider doesn't exist
-   */
-  async storageProviderRoleAccount(storageProviderId) {
-    const worker = await this.storageWorkerByProviderId(storageProviderId)
-    return worker ? worker.role_account_id : null
-  }
-
-  /*
-   * Returns a Worker instance or null if provider does not exist
-   */
-  async storageWorkerByProviderId(storageProviderId) {
-    const id = new BN(storageProviderId)
-    const { providers } = await this.getAllProviders()
-    return providers[id.toNumber()] || null
-  }
-
-  /*
-   * Returns storage provider's general purpose storage value from chain
-   */
-  async getWorkerStorageValue(id) {
-    const value = await this.base.api.query.storageWorkingGroup.workerStorage(id)
-    return new Text(this.base.api.registry, value).toString()
-  }
-
-  /*
-   * Set storage provider's general purpose storage value on chain
-   */
-  async setWorkerStorageValue(value) {
-    const id = this.base.storageProviderId
-    const tx = this.base.api.tx.storageWorkingGroup.updateRoleStorage(id, value)
-    const senderAccount = await this.storageProviderRoleAccount(id)
-    return this.base.signAndSend(senderAccount, tx)
-  }
-
-  /*
-   * Returns the the first found provider id with a role account or null if not found
-   */
-  async findProviderIdByRoleAccount(roleAccount) {
-    const { ids, providers } = await this.getAllProviders()
-
-    for (let i = 0; i < ids.length; i++) {
-      const id = ids[i]
-      if (providers[id].role_account_id.eq(roleAccount)) {
-        return id
-      }
-    }
-
-    return null
-  }
-
-  /*
-   * Returns the set of ids and Worker instances of providers enrolled on the network
-   */
-  async getAllProviders() {
-    const ids = []
-    const providers = {}
-    const entries = await this.base.api.query.storageWorkingGroup.workerById.entries()
-    entries.forEach(([storageKey, worker]) => {
-      const id = storageKey.args[0].toNumber()
-      ids.push(id)
-      providers[id] = worker
-    })
-
-    return { ids, providers }
-  }
-
-  async getLeadRoleAccount() {
-    const currentLead = await this.base.api.query.storageWorkingGroup.currentLead()
-    if (currentLead.isSome) {
-      const leadWorkerId = currentLead.unwrap()
-      const worker = await this.base.api.query.storageWorkingGroup.workerById(leadWorkerId)
-      return worker.role_account_id
-    }
-    return null
-  }
-
-  // Helper methods below don't really belong in the colossus runtime api library.
-  // They are only used by the dev-init command in the cli to setup a development environment
-
-  /*
-   * Add a new storage group opening using the lead account. Returns the
-   * new opening id.
-   */
-  async devAddStorageOpening(info) {
-    const openTx = this.devMakeAddOpeningTx('Worker', info)
-    return this.devSubmitAddOpeningTx(openTx, await this.getLeadRoleAccount())
-  }
-
-  /*
-   * Add a new storage working group lead opening using sudo account. Returns the
-   * new opening id.
-   */
-  async devAddStorageLeadOpening(info) {
-    const openTx = this.devMakeAddOpeningTx('Leader', info)
-    const sudoTx = this.base.api.tx.sudo.sudo(openTx)
-    return this.devSubmitAddOpeningTx(sudoTx, await this.base.identities.getSudoAccount())
-  }
-
-  /*
-   * Constructs an addOpening tx of openingType
-   */
-  devMakeAddOpeningTx(openingType, info) {
-    return this.base.api.tx.storageWorkingGroup.addOpening(
-      'CurrentBlock',
-      {
-        application_rationing_policy: {
-          max_active_applicants: 1,
-        },
-        max_review_period_length: 10,
-        // default values for everything else..
-      },
-      info || 'dev-opening',
-      openingType
-    )
-  }
-
-  /*
-   * Submits a tx (expecting it to dispatch storageWorkingGroup.addOpening) and returns
-   * the OpeningId from the resulting event.
-   */
-  async devSubmitAddOpeningTx(tx, senderAccount) {
-    return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
-      module: 'storageWorkingGroup',
-      event: 'OpeningAdded',
-      type: 'OpeningId',
-      index: 0,
-    })
-  }
-
-  /*
-   * Apply on an opening, returns the application id.
-   */
-  async devApplyOnOpening(openingId, memberId, memberAccount, roleAccount) {
-    const applyTx = this.base.api.tx.storageWorkingGroup.applyOnOpening(
-      memberId,
-      openingId,
-      roleAccount,
-      null,
-      null,
-      `colossus-${memberId}`
-    )
-
-    return this.base.signAndSendThenGetEventResult(memberAccount, applyTx, {
-      module: 'storageWorkingGroup',
-      event: 'AppliedOnOpening',
-      type: 'ApplicationId',
-      index: 1,
-    })
-  }
-
-  /*
-   * Move lead opening to review state using sudo account
-   */
-  async devBeginLeadOpeningReview(openingId) {
-    const beginReviewTx = this.devMakeBeginOpeningReviewTx(openingId)
-    const sudoTx = this.base.api.tx.sudo.sudo(beginReviewTx)
-    return this.base.signAndSend(await this.base.identities.getSudoAccount(), sudoTx)
-  }
-
-  /*
-   * Move a storage opening to review state using lead account
-   */
-  async devBeginStorageOpeningReview(openingId) {
-    const beginReviewTx = this.devMakeBeginOpeningReviewTx(openingId)
-    return this.base.signAndSend(await this.getLeadRoleAccount(), beginReviewTx)
-  }
-
-  /*
-   * Constructs a beingApplicantReview tx for openingId, which puts an opening into the review state
-   */
-  devMakeBeginOpeningReviewTx(openingId) {
-    return this.base.api.tx.storageWorkingGroup.beginApplicantReview(openingId)
-  }
-
-  /*
-   * Fill a lead opening, return the assigned worker id, using the sudo account
-   */
-  async devFillLeadOpening(openingId, applicationId) {
-    const fillTx = this.devMakeFillOpeningTx(openingId, applicationId)
-    const sudoTx = this.base.api.tx.sudo.sudo(fillTx)
-    const filled = await this.devSubmitFillOpeningTx(await this.base.identities.getSudoAccount(), sudoTx)
-    return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
-  }
-
-  /*
-   * Fill a storage opening, return the assigned worker id, using the lead account
-   */
-  async devFillStorageOpening(openingId, applicationId) {
-    const fillTx = this.devMakeFillOpeningTx(openingId, applicationId)
-    const filled = await this.devSubmitFillOpeningTx(await this.getLeadRoleAccount(), fillTx)
-    return getWorkerIdFromApplicationIdToWorkerIdMap(filled, applicationId)
-  }
-
-  /*
-   * Constructs a FillOpening transaction
-   */
-  devMakeFillOpeningTx(openingId, applicationId) {
-    return this.base.api.tx.storageWorkingGroup.fillOpening(openingId, [applicationId], null)
-  }
-
-  /*
-   * Dispatches a fill opening tx and returns a map of the application id to their new assigned worker ids.
-   */
-  async devSubmitFillOpeningTx(senderAccount, tx) {
-    return this.base.signAndSendThenGetEventResult(senderAccount, tx, {
-      module: 'storageWorkingGroup',
-      event: 'OpeningFilled',
-      type: 'ApplicationIdToWorkerIdMap',
-      index: 1,
-    })
-  }
-}
-
-module.exports = {
-  WorkersApi,
-}

+ 0 - 1
storage-node/packages/storage/.eslintrc.js

@@ -1 +0,0 @@
-../../.eslintrc.js

+ 0 - 20
storage-node/packages/storage/README.md

@@ -1,20 +0,0 @@
-# Summary
-
-This package contains an abstraction over the storage backend of colossus.
-
-In the current version, the storage is backed by IPFS. In order to run tests,
-you have to also run an [IPFS node](https://dist.ipfs.io/#go-ipfs).
-
-## Testing
-
-Note also that tests do not finish. This is due to a design flaw in the
-[IPFS HTTP Client](https://github.com/ipfs/js-ipfs-http-client/i) npm package.
-In that package, requests can seemingly never time out - this client library
-patches over this by using [bluebird's cancellable Promises](http://bluebirdjs.com/docs/api/cancellation.html),
-so that at least this package can provide a timeout. In the client library,
-however, that still leaves some dangling requests, meaning node cannot
-exit cleanly.
-
-For this reason, we're passing the `--exit` flag to `mocha` in the `test`
-script - run `yarn run test` and you should have a well behaving test suite.
-Run `mocha` directly, without this flag, and you may be disappointed.

+ 0 - 143
storage-node/packages/storage/filter.js

@@ -1,143 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:storage:filter')
-
-const DEFAULT_MAX_FILE_SIZE = 10000 * 1024 * 1024
-const DEFAULT_ACCEPT_TYPES = ['video/*', 'audio/*', 'image/*']
-const DEFAULT_REJECT_TYPES = []
-
-// Configuration defaults
-function configDefaults(config) {
-  const filter = config.filter || {}
-
-  // We accept zero as switching this check off.
-  if (typeof filter.max_size === 'undefined') {
-    filter.max_size = DEFAULT_MAX_FILE_SIZE
-  }
-
-  // Figure out mime types
-  filter.mime = filter.mime || []
-  filter.mime.accept = filter.mime.accept || DEFAULT_ACCEPT_TYPES
-  filter.mime.reject = filter.mime.reject || DEFAULT_REJECT_TYPES
-
-  return filter
-}
-
-// Mime type matching
-function mimeMatches(acceptable, provided) {
-  if (acceptable.endsWith('*')) {
-    // Wildcard match
-    const prefix = acceptable.slice(0, acceptable.length - 1)
-    debug('wildcard matching', provided, 'against', acceptable, '/', prefix)
-    return provided.startsWith(prefix)
-  }
-  // Exact match
-  debug('exact matching', provided, 'against', acceptable)
-  return provided === acceptable
-}
-
-function mimeMatchesAny(accept, reject, provided) {
-  // Pass accept
-  let accepted = false
-  for (const item of accept) {
-    if (mimeMatches(item, provided)) {
-      debug('Content type matches', item, 'which is acceptable.')
-      accepted = true
-      break
-    }
-  }
-  if (!accepted) {
-    return false
-  }
-
-  // Don't pass reject
-  for (const item of reject) {
-    if (mimeMatches(item, provided)) {
-      debug('Content type matches', item, 'which is unacceptable.')
-      return false
-    }
-  }
-
-  return true
-}
-
-/**
- * Simple filter function deciding whether or not to accept a content
- * upload.
- *
- * This is a straightforward implementation of
- * https://github.com/Joystream/storage-node-joystream/issues/14 - but should
- * most likely be improved on in future.
- * @param {object} config - configuration
- * @param {object} headers - required headers
- * @param {string} mimeType - expected MIME type
- * @return {object} HTTP status code and error message.
- **/
-function filterFunc(config, headers, mimeType) {
-  const filter = configDefaults(config)
-
-  const size = contentLengthFromHeaders(headers)
-
-  // Enforce maximum file upload size
-  if (filter.max_size) {
-    if (!size) {
-      return {
-        code: 411,
-        message: 'A Content-Length header is required.',
-        content_length: size,
-      }
-    }
-
-    if (size > filter.max_size) {
-      return {
-        code: 413,
-        message: 'The provided Content-Length is too large.',
-        content_length: size,
-      }
-    }
-  }
-
-  // Enforce mime type based filtering
-  if (mimeType && !mimeMatchesAny(filter.mime.accept, filter.mime.reject, mimeType)) {
-    return {
-      code: 415,
-      message: 'Content has an unacceptable MIME type.',
-      content_length: size,
-    }
-  }
-
-  return {
-    code: 200,
-    content_length: size,
-  }
-}
-
-function contentLengthFromHeaders(headers) {
-  const content_length = headers['content-length']
-
-  if (!content_length) {
-    return null
-  }
-
-  return parseInt(content_length, 10)
-}
-
-module.exports = filterFunc

+ 0 - 25
storage-node/packages/storage/index.js

@@ -1,25 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const { Storage } = require('./storage')
-
-module.exports = {
-  Storage,
-}

+ 0 - 55
storage-node/packages/storage/package.json

@@ -1,55 +0,0 @@
-{
-  "name": "@joystream/storage-node-backend",
-  "private": true,
-  "version": "0.1.0",
-  "description": "Storage management code for Joystream Storage Node",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node",
-    "storage"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha --exit 'test/**/*.js'",
-    "lint": "eslint '**/*.js' --ignore-pattern 'test/**/*.js'",
-    "checks": "yarn lint && prettier ./ --check && tsc --noEmit --pretty"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "chai-as-promised": "^7.1.1",
-    "eslint": "^7.6.0",
-    "mocha": "^5.2.0"
-  },
-  "dependencies": {
-    "bluebird": "^3.5.5",
-    "file-type": "^11.0.0",
-    "ipfs-http-client": "^32.0.1",
-    "temp": "^0.9.0"
-  }
-}

+ 0 - 437
storage-node/packages/storage/storage.js

@@ -1,437 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const { Transform } = require('stream')
-const fs = require('fs')
-
-const debug = require('debug')('joystream:storage:storage')
-
-const Promise = require('bluebird')
-
-const Hash = require('ipfs-only-hash')
-
-Promise.config({
-  cancellation: true,
-})
-
-const fileType = require('file-type')
-const ipfsClient = require('ipfs-http-client')
-const temp = require('temp').track()
-const _ = require('lodash')
-
-// Default request timeout; imposed on top of the IPFS client, because the
-// client doesn't seem to care.
-const DEFAULT_TIMEOUT = 30 * 1000
-
-// Default/dummy resolution implementation.
-const DEFAULT_RESOLVE_CONTENT_ID = async (original) => {
-  debug('Warning: Default resolution returns original CID', original)
-  return original
-}
-
-// Default file info if nothing could be detected.
-const DEFAULT_FILE_INFO = {
-  mimeType: 'application/octet-stream',
-  ext: 'bin',
-}
-
-/*
- * fileType is a weird name, because we're really looking at MIME types.
- * Also, the type field includes extension info, so we're going to call
- * it fileInfo { mimeType, ext } instead.
- * Nitpicking, but it also means we can add our default type if things
- * go wrong.
- */
-function fixFileInfo(info) {
-  if (!info) {
-    info = DEFAULT_FILE_INFO
-  } else {
-    info.mimeType = info.mime
-    delete info.mime
-  }
-  return info
-}
-
-function fixFileInfoOnStream(stream) {
-  const info = fixFileInfo(stream.fileType)
-  delete stream.fileType
-  stream.fileInfo = info
-  return stream
-}
-
-/*
- * Internal Transform stream for helping write to a temporary location, adding
- * MIME type detection, and a commit() function.
- */
-class StorageWriteStream extends Transform {
-  constructor(storage, options) {
-    options = _.clone(options || {})
-
-    super(options)
-
-    this.storage = storage
-
-    // Create temp target.
-    this.temp = temp.createWriteStream()
-    this.temp.on('error', (err) => this.emit('error', err))
-
-    // Small temporary buffer storing first fileType.minimumBytes of stream
-    // used for early file type detection
-    this.buf = Buffer.alloc(0)
-  }
-
-  _transform(chunk, encoding, callback) {
-    // Deal with buffers only
-    if (typeof chunk === 'string') {
-      chunk = Buffer.from(chunk)
-    }
-
-    // Try to detect file type during streaming.
-    if (!this.fileInfo && this.buf.byteLength <= fileType.minimumBytes) {
-      this.buf = Buffer.concat([this.buf, chunk])
-
-      if (this.buf.byteLength >= fileType.minimumBytes) {
-        const info = fileType(this.buf)
-        // No info? We will try again at the end of the stream.
-        if (info) {
-          this.fileInfo = fixFileInfo(info)
-          this.emit('fileInfo', this.fileInfo)
-        }
-      }
-    }
-
-    // Always waiting for write flush can be slow..
-    // this.temp.write(chunk, (err) => {
-    //   callback(err)
-    // })
-
-    // Respect backpressure and handle write error
-    if (!this.temp.write(chunk)) {
-      this.temp.once('drain', () => callback(null))
-    } else {
-      process.nextTick(() => callback(null))
-    }
-  }
-
-  _flush(callback) {
-    debug('Flushing temporary stream:', this.temp.path)
-    this.temp.end(() => {
-      debug('flushed!')
-      callback(null)
-      this.emit('end')
-    })
-  }
-
-  /*
-   * Get file info
-   */
-
-  async info() {
-    if (!this.temp) {
-      throw new Error('Cannot get info on temporary stream that does not exist. Did you call cleanup()?')
-    }
-
-    if (!this.fileInfo) {
-      const read = fs.createReadStream(this.temp.path)
-
-      const stream = await fileType.stream(read)
-
-      this.fileInfo = fixFileInfoOnStream(stream).fileInfo
-    }
-
-    if (!this.hash) {
-      const read = fs.createReadStream(this.temp.path)
-      this.hash = await Hash.of(read)
-    }
-
-    this.emit('info', this.fileInfo, this.hash)
-
-    return {
-      info: this.fileInfo,
-      hash: this.hash,
-    }
-  }
-
-  /*
-   * Commit this stream to the IPFS backend.
-   */
-  commit() {
-    if (!this.temp) {
-      throw new Error('Cannot commit a temporary stream that does not exist. Did you call cleanup()?')
-    }
-
-    debug('Committing temporary stream: ', this.temp.path)
-    this.storage.ipfs
-      .addFromFs(this.temp.path)
-      .then(async (result) => {
-        const hash = result[0].hash
-        debug('Stream committed as', hash)
-        this.emit('committed', hash)
-        await this.storage.ipfs.pin.add(hash)
-        this.cleanup()
-      })
-      .catch((err) => {
-        debug('Error committing stream', err)
-        this.emit('error', err)
-        this.cleanup()
-      })
-  }
-
-  /*
-   * Clean up temporary data.
-   */
-  cleanup() {
-    // Make it safe to call cleanup more than once
-    if (!this.temp) return
-    debug('Cleaning up temporary file: ', this.temp.path)
-    fs.unlink(this.temp.path, () => {
-      /* Ignore errors. */
-    })
-    delete this.temp
-  }
-}
-
-/*
- * Manages the storage backend interaction. This provides a Promise-based API.
- *
- * Usage:
- *
- *   const store = await Storage.create({ ... });
- *   store.open(...);
- */
-class Storage {
-  /*
-   * Create a Storage instance. Options include:
-   *
-   * - an `ipfs` property, which is itself a hash containing
-   *   - `connect_options` to be passed to the IPFS client library for
-   *     connecting to an IPFS node.
-   * - a `resolve_content_id` function, which translates Joystream
-   *   content IDs to IPFS content IDs or vice versa. The default is to
-   *   not perform any translation, which is not practical for a production
-   *   system, but serves its function during development and testing. The
-   *   function must be asynchronous.
-   * - a `timeout` parameter, defaulting to DEFAULT_TIMEOUT. After this time,
-   *   requests to the IPFS backend time out.
-   *
-   * Functions in this class accept an optional timeout parameter. If the
-   * timeout is given, it is used - otherwise, the `option.timeout` value
-   * above is used.
-   */
-  static create(options) {
-    const storage = new Storage()
-    storage._init(options)
-    return storage
-  }
-
-  _init(options) {
-    this.options = _.clone(options || {})
-    this.options.ipfs = this.options.ipfs || {}
-
-    this._timeout = this.options.timeout || DEFAULT_TIMEOUT
-    this._resolve_content_id = this.options.resolve_content_id || DEFAULT_RESOLVE_CONTENT_ID
-
-    this.ipfs = ipfsClient(this.options.ipfsHost || 'localhost', '5001', { protocol: 'http' })
-
-    this.pinned = {}
-    this.pinning = {}
-
-    this.ipfs.id((err, identity) => {
-      if (err) {
-        debug(`Warning IPFS daemon not running: ${err.message}`)
-      } else {
-        debug(`IPFS node is up with identity: ${identity.id}`)
-        // TODO: wait for IPFS daemon to be online for this to be effective..?
-        // set the IPFS HTTP Gateway config we desire.. operator might need
-        // to restart their daemon if the config was changed.
-        this.ipfs.config.set('Gateway.PublicGateways', { 'localhost': null })
-      }
-    })
-  }
-
-  /*
-   * Uses bluebird's timeout mechanism to return a Promise that times out after
-   * the given timeout interval, and tries to execute the given operation within
-   * that time.
-   */
-  async withSpecifiedTimeout(timeout, operation) {
-    // TODO: rewrite this method to async-await style
-    // eslint-disable-next-line  no-async-promise-executor
-    return new Promise(async (resolve, reject) => {
-      try {
-        resolve(await new Promise(operation))
-      } catch (err) {
-        reject(err)
-      }
-    }).timeout(timeout || this._timeout)
-  }
-
-  /*
-   * Resolve content ID with timeout.
-   */
-  async resolveContentIdWithTimeout(timeout, contentId) {
-    return await this.withSpecifiedTimeout(timeout, async (resolve, reject) => {
-      try {
-        resolve(await this._resolve_content_id(contentId))
-      } catch (err) {
-        reject(err)
-      }
-    })
-  }
-
-  /*
-   * Stat a content ID.
-   */
-  async stat(contentId, timeout) {
-    const ipfsHash = await this.resolveContentIdWithTimeout(timeout, contentId)
-
-    return this.ipfsStat(ipfsHash, timeout)
-  }
-
-  /*
-   * Stat IPFS hash
-   */
-  async ipfsStat(hash, timeout) {
-    return this.withSpecifiedTimeout(timeout, (resolve, reject) => {
-      this.ipfs.files.stat(`/ipfs/${hash}`, { withLocal: true }, (err, res) => {
-        if (err) {
-          reject(err)
-          return
-        }
-        resolve(res)
-      })
-    })
-  }
-
-  /*
-   * Return the size of a content ID.
-   */
-  async size(contentId, timeout) {
-    const stat = await this.stat(contentId, timeout)
-    return stat.size
-  }
-
-  /*
-   * Opens the specified content in read or write mode, and returns a Promise
-   * with the stream.
-   *
-   * Read streams will contain a fileInfo property, with:
-   *  - a `mimeType` field providing the file's MIME type, or a default.
-   *  - an `ext` property, providing a file extension suggestion, or a default.
-   *
-   * Write streams have a slightly different flow, in order to allow for MIME
-   * type detection and potential filtering. First off, they are written to a
-   * temporary location, and only committed to the backend once their
-   * `commit()` function is called.
-   *
-   * When the commit has finished, a `committed` event is emitted, which
-   * contains the IPFS backend's content ID.
-   *
-   * Write streams also emit a `fileInfo` event during writing. It is passed
-   * the `fileInfo` field as described above. Event listeners may now opt to
-   * abort the write or continue and eventually `commit()` the file. There is
-   * an explicit `cleanup()` function that removes temporary files as well,
-   * in case comitting is not desired.
-   */
-  async open(contentId, mode, timeout) {
-    if (mode !== 'r' && mode !== 'w') {
-      throw Error('The only supported modes are "r", "w" and "a".')
-    }
-
-    // Write stream
-    if (mode === 'w') {
-      return this.createWriteStream(contentId, timeout)
-    }
-
-    // Read stream - with file type detection
-    return await this.createReadStream(contentId, timeout)
-  }
-
-  createWriteStream() {
-    return new StorageWriteStream(this)
-  }
-
-  async createReadStream(contentId, timeout) {
-    const ipfsHash = await this.resolveContentIdWithTimeout(timeout, contentId)
-
-    let found = false
-    return await this.withSpecifiedTimeout(timeout, (resolve, reject) => {
-      const ls = this.ipfs.getReadableStream(ipfsHash)
-      ls.on('data', async (result) => {
-        if (result.path === ipfsHash) {
-          found = true
-
-          const ftStream = await fileType.stream(result.content)
-          resolve(fixFileInfoOnStream(ftStream))
-        }
-      })
-      ls.on('error', (err) => {
-        ls.end()
-        debug(err)
-        reject(err)
-      })
-      ls.on('end', () => {
-        if (!found) {
-          const err = new Error('No matching content found for', contentId)
-          debug(err)
-          reject(err)
-        }
-      })
-      ls.resume()
-    })
-  }
-
-  /*
-   * Pin the given IPFS CID
-   */
-  async pin(ipfsHash, callback) {
-    if (!this.pinning[ipfsHash] && !this.pinned[ipfsHash]) {
-      // debug(`Pinning hash: ${ipfsHash} content-id: ${contentId}`)
-      this.pinning[ipfsHash] = true
-
-      // Callback passed to add() will be called on error or when the entire file
-      // is retrieved. So on success we consider the content synced.
-      this.ipfs.pin.add(ipfsHash, { quiet: true, pin: true }, (err) => {
-        delete this.pinning[ipfsHash]
-        if (err) {
-          debug(`Error Pinning: ${ipfsHash}`)
-          callback && callback(err)
-        } else {
-          // debug(`Pinned ${ipfsHash}`)
-          this.pinned[ipfsHash] = true
-          callback && callback(null, this.syncStatus(ipfsHash))
-        }
-      })
-    } else {
-      callback && callback(null, this.syncStatus(ipfsHash))
-    }
-  }
-
-  syncStatus(ipfsHash) {
-    return {
-      syncing: this.pinning[ipfsHash] === true,
-      synced: this.pinned[ipfsHash] === true,
-    }
-  }
-}
-
-module.exports = {
-  Storage,
-}

+ 0 - 227
storage-node/packages/storage/test/storage.js

@@ -1,227 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const chai = require('chai')
-const chaiAsPromised = require('chai-as-promised')
-
-chai.use(chaiAsPromised)
-const expect = chai.expect
-
-const fs = require('fs')
-
-const { Storage } = require('@joystream/storage-node-backend')
-
-const IPFS_CID_REGEX = /^Qm[1-9A-HJ-NP-Za-km-z]{44}$/
-
-function write(store, contentId, contents, callback) {
-  store
-    .open(contentId, 'w')
-    .then((stream) => {
-      stream.on('end', () => {
-        stream.commit()
-      })
-      stream.on('committed', callback)
-
-      if (!stream.write(contents)) {
-        stream.once('drain', () => stream.end())
-      } else {
-        process.nextTick(() => stream.end())
-      }
-    })
-    .catch((err) => {
-      expect.fail(err)
-    })
-}
-
-function readAll(stream) {
-  return new Promise((resolve, reject) => {
-    const chunks = []
-    stream.on('data', (chunk) => chunks.push(chunk))
-    stream.on('end', () => resolve(Buffer.concat(chunks)))
-    stream.on('error', (err) => reject(err))
-    stream.resume()
-  })
-}
-
-function createKnownObject(contentId, contents, callback) {
-  let hash
-  const store = Storage.create({
-    resolve_content_id: () => {
-      return hash
-    },
-  })
-
-  write(store, contentId, contents, (theHash) => {
-    hash = theHash
-
-    callback(store, hash)
-  })
-}
-
-describe('storage/storage', () => {
-  let storage
-  before(async () => {
-    storage = await Storage.create({ timeout: 1900 })
-  })
-
-  describe('open()', () => {
-    it('can write a stream', (done) => {
-      write(storage, 'foobar', 'test-content', (hash) => {
-        expect(hash).to.not.be.undefined
-        expect(hash).to.match(IPFS_CID_REGEX)
-        done()
-      })
-    })
-
-    it('detects the MIME type of a write stream', (done) => {
-      const contents = fs.readFileSync('../../storage-node_new.svg')
-      storage
-        .open('mime-test', 'w')
-        .then((stream) => {
-          let fileInfo
-          stream.on('fileInfo', (info) => {
-            // Could filter & abort here now, but we're just going to set this,
-            // and expect it to be set later...
-            fileInfo = info
-          })
-
-          stream.on('end', () => {
-            stream.info()
-          })
-
-          stream.once('info', async (info) => {
-            fileInfo = info
-            stream.commit()
-          })
-
-          stream.on('committed', () => {
-            // ... if fileInfo is not set here, there's an issue.
-            expect(fileInfo).to.have.property('mimeType', 'application/xml')
-            expect(fileInfo).to.have.property('ext', 'xml')
-            done()
-          })
-
-          if (!stream.write(contents)) {
-            stream.once('drain', () => stream.end())
-          } else {
-            process.nextTick(() => stream.end())
-          }
-        })
-        .catch((err) => {
-          expect.fail(err)
-        })
-    })
-
-    it('can read a stream', (done) => {
-      const contents = 'test-for-reading'
-      createKnownObject('foobar', contents, (store) => {
-        store
-          .open('foobar', 'r')
-          .then(async (stream) => {
-            const data = await readAll(stream)
-            expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
-            done()
-          })
-          .catch((err) => {
-            expect.fail(err)
-          })
-      })
-    })
-
-    it('detects the MIME type of a read stream', (done) => {
-      const contents = fs.readFileSync('../../storage-node_new.svg')
-      createKnownObject('foobar', contents, (store) => {
-        store
-          .open('foobar', 'r')
-          .then(async (stream) => {
-            const data = await readAll(stream)
-            expect(contents.length).to.equal(data.length)
-            expect(Buffer.compare(data, contents)).to.equal(0)
-            expect(stream).to.have.property('fileInfo')
-
-            // application/xml+svg would be better, but this is good-ish.
-            expect(stream.fileInfo).to.have.property('mimeType', 'application/xml')
-            expect(stream.fileInfo).to.have.property('ext', 'xml')
-            done()
-          })
-          .catch((err) => {
-            expect.fail(err)
-          })
-      })
-    })
-
-    it('provides default MIME type for read streams', (done) => {
-      const contents = 'test-for-reading'
-      createKnownObject('foobar', contents, (store) => {
-        store
-          .open('foobar', 'r')
-          .then(async (stream) => {
-            const data = await readAll(stream)
-            expect(Buffer.compare(data, Buffer.from(contents))).to.equal(0)
-
-            expect(stream.fileInfo).to.have.property('mimeType', 'application/octet-stream')
-            expect(stream.fileInfo).to.have.property('ext', 'bin')
-            done()
-          })
-          .catch((err) => {
-            expect.fail(err)
-          })
-      })
-    })
-  })
-
-  describe('stat()', () => {
-    it('times out for unknown content', async () => {
-      const content = Buffer.from('this-should-not-exist')
-      const x = await storage.ipfs.add(content, { onlyHash: true })
-      const hash = x[0].hash
-
-      // Try to stat this entry, it should timeout.
-      expect(storage.stat(hash)).to.eventually.be.rejectedWith('timed out')
-    })
-
-    it('returns stats for a known object', (done) => {
-      const content = 'stat-test'
-      const expectedSize = content.length
-      createKnownObject('foobar', content, (store, hash) => {
-        expect(store.stat(hash)).to.eventually.have.property('size', expectedSize)
-        done()
-      })
-    })
-  })
-
-  describe('size()', () => {
-    it('times out for unknown content', async () => {
-      const content = Buffer.from('this-should-not-exist')
-      const x = await storage.ipfs.add(content, { onlyHash: true })
-      const hash = x[0].hash
-
-      // Try to stat this entry, it should timeout.
-      expect(storage.size(hash)).to.eventually.be.rejectedWith('timed out')
-    })
-
-    it('returns the size of a known object', (done) => {
-      createKnownObject('foobar', 'stat-test', (store, hash) => {
-        expect(store.size(hash)).to.eventually.equal(15)
-        done()
-      })
-    })
-  })
-})

+ 0 - 1
storage-node/packages/util/.eslintrc.js

@@ -1 +0,0 @@
-../../.eslintrc.js

+ 0 - 11
storage-node/packages/util/README.md

@@ -1,11 +0,0 @@
-# Summary
-
-This package contains general utility functions for running the colossus
-storage node.
-
-- `lru` contains an in-memory least-recently-used cache abstraction.
-- `fs/*` contains helpers for resolving path names and walking file system
-  hierarchies.
-- `pagination` contains utility functions for paginating APIs.
-- `ranges` contains functions for dealing with `Range` headers in download
-  requests.

+ 0 - 22
storage-node/packages/util/externalPromise.js

@@ -1,22 +0,0 @@
-/**
- * Creates a new promise.
- * @return { object} Returns an object that contains a Promise and exposes its handlers, ie. resolve and reject methods
- * so it can be fulfilled 'externally'. This is a bit of a hack, but most useful application is when
- * concurrent async operations are initiated that are all waiting on the same result value.
- */
-function newExternallyControlledPromise() {
-  let resolve, reject
-
-  // Disable lint until the migration to TypeScript.
-  // eslint-disable-next-line promise/param-names
-  const promise = new Promise((res, rej) => {
-    resolve = res
-    reject = rej
-  })
-
-  return { resolve, reject, promise }
-}
-
-module.exports = {
-  newExternallyControlledPromise,
-}

+ 0 - 65
storage-node/packages/util/fs/resolve.js

@@ -1,65 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const path = require('path')
-
-const debug = require('debug')('joystream:util:fs:resolve')
-
-/*
- * Resolves name relative to base, throwing an error if the given
- * name wants to break out of the base directory.
- *
- * The problem is, we want to use node's functions so we don't add
- * platform dependent code, but node's path.resolve() function is a little
- * useless for our case because it does not care about breaking out of
- * a base directory.
- */
-function resolve(base, name) {
-  debug('Resolving', name)
-
-  // In a firs step, we strip leading slashes from the name, because they're
-  // just saying "relative to the base" in our use case.
-  let res = name.replace(/^\/+/, '')
-  debug('Stripped', res)
-
-  // At this point resolving the path should stay within the base we specify.
-  // We do specify a base other than the file system root, because the file
-  // everything is always relative to the file system root.
-  const testBase = path.join(path.sep, 'test-base')
-  debug('Test base is', testBase)
-  res = path.resolve(testBase, res)
-  debug('Resolved', res)
-
-  // Ok, we can check for violations now.
-  if (res.slice(0, testBase.length) !== testBase) {
-    throw Error(`Name "${name}" cannot be resolved to a repo relative path, aborting!`)
-  }
-
-  // If we strip the base now, we have the relative name resolved.
-  res = res.slice(testBase.length + 1)
-  debug('Relative', res)
-
-  // Finally we can join this relative name to the requested base.
-  res = path.join(base, res)
-  debug('Result', res)
-  return res
-}
-
-module.exports = resolve

+ 0 - 139
storage-node/packages/util/fs/walk.js

@@ -1,139 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const fs = require('fs')
-const path = require('path')
-
-const debug = require('debug')('joystream:util:fs:walk')
-
-class Walker {
-  constructor(archive, base, cb) {
-    this.archive = archive
-    this.base = base
-    this.slice_offset = this.base.length
-    if (this.base[this.slice_offset - 1] !== '/') {
-      this.slice_offset += 1
-    }
-    this.cb = cb
-    this.pending = 0
-  }
-
-  /*
-   * Check pending
-   */
-  checkPending(name) {
-    // Decrease pending count again.
-    this.pending -= 1
-    debug('Finishing', name, 'decreases pending to', this.pending)
-    if (!this.pending) {
-      debug('No more pending.')
-      this.cb(null)
-    }
-  }
-
-  /*
-   * Helper function for walk; split out because it's used in two places.
-   */
-  reportAndRecurse(relname, fname, lstat, linktarget) {
-    // First report the value
-    this.cb(null, relname, lstat, linktarget)
-
-    // Recurse
-    if (lstat.isDirectory()) {
-      this.walk(fname)
-    }
-
-    this.checkPending(fname)
-  }
-
-  walk(dir) {
-    // This is a little hacky - since readdir() may take a while, and we don't
-    // want the pending count to drop to zero before it's finished, we bump
-    // it up and down while readdir() does it's job.
-    // What this achieves is that when processing a parent directory finishes
-    // before walk() on a subdirectory could finish its readdir() call, the
-    // pending count still has a value.
-    // Note that in order not to hang on empty directories, we need to
-    // explicitly check the pending count in cases when there are no files.
-    this.pending += 1
-    this.archive.readdir(dir, (err, files) => {
-      if (err) {
-        this.cb(err)
-        return
-      }
-
-      // More pending data.
-      this.pending += files.length
-      debug('Reading', dir, 'bumps pending to', this.pending)
-
-      files.forEach((name) => {
-        const fname = path.resolve(dir, name)
-        this.archive.lstat(fname, (err2, lstat) => {
-          if (err2) {
-            this.cb(err2)
-            return
-          }
-
-          // The base is always prefixed, so a simple string slice should do.
-          const relname = fname.slice(this.slice_offset)
-
-          // We have a symbolic link? Resolve it.
-          if (lstat.isSymbolicLink()) {
-            this.archive.readlink(fname, (err3, linktarget) => {
-              if (err3) {
-                this.cb(err3)
-                return
-              }
-
-              this.reportAndRecurse(relname, fname, lstat, linktarget)
-            })
-          } else {
-            this.reportAndRecurse(relname, fname, lstat)
-          }
-        })
-      })
-
-      this.checkPending(dir)
-    })
-  }
-}
-
-/*
- * Recursively walk a file system hierarchy (in undefined order), returning all
- * entries via the callback(err, relname, lstat, [linktarget]). The name relative
- * to the base is returned.
- *
- * You can optionally pass an 'archive', i.e. a class or module that responds to
- * file system like functions. If you don't, then the 'fs' module is assumed as
- * default.
- *
- * The callback is invoked one last time without data to signal the end of data.
- */
-module.exports = function (base, archive, cb) {
-  // Archive is optional and defaults to fs, but cb is not.
-  if (!cb) {
-    cb = archive
-    archive = fs
-  }
-
-  const resolved = path.resolve(base)
-  const w = new Walker(archive, resolved, cb)
-  w.walk(resolved)
-}

+ 0 - 117
storage-node/packages/util/lru.js

@@ -1,117 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const DEFAULT_CAPACITY = 100
-
-const debug = require('debug')('joystream:util:lru')
-
-/*
- * Simple least recently used cache.
- */
-class LRUCache {
-  constructor(capacity = DEFAULT_CAPACITY) {
-    this.capacity = capacity
-    this.clear()
-  }
-
-  /*
-   * Return the entry with the given key, and update it's usage.
-   */
-  get(key) {
-    const val = this.store.get(key)
-    if (val) {
-      this.access.set(key, Date.now())
-    }
-    return val
-  }
-
-  /*
-   * Return true if the key is the cache, false otherwise.
-   */
-  has(key) {
-    return this.store.has(key)
-  }
-
-  /*
-   * Put a value into the cache.
-   */
-  put(key, value) {
-    this.store.set(key, value)
-    this.access.set(key, Date.now())
-    this._prune()
-  }
-
-  /*
-   * Delete a value from the cache.
-   */
-  del(key) {
-    this.store.delete(key)
-    this.access.delete(key)
-  }
-
-  /*
-   * Current size of the cache
-   */
-  size() {
-    return this.store.size
-  }
-
-  /*
-   * Clear the LRU cache entirely.
-   */
-  clear() {
-    this.store = new Map()
-    this.access = new Map()
-  }
-
-  /*
-   * Internal pruning function.
-   */
-  _prune() {
-    debug('About to prune; have', this.store.size, 'and capacity is', this.capacity)
-
-    const sorted = Array.from(this.access.entries())
-    sorted.sort((first, second) => {
-      if (first[1] === second[1]) {
-        return 0
-      }
-      return first[1] < second[1] ? -1 : 1
-    })
-    debug('Sorted keys are:', sorted)
-
-    debug('Have to prune', this.store.size - this.capacity, 'items.')
-    let idx = 0
-    const toPrune = []
-    while (idx < sorted.length && toPrune.length < this.store.size - this.capacity) {
-      toPrune.push(sorted[idx][0])
-      ++idx
-    }
-
-    toPrune.forEach((key) => {
-      this.store.delete(key)
-      this.access.delete(key)
-    })
-    debug('Size after pruning', this.store.size)
-  }
-}
-
-module.exports = {
-  LRUCache,
-}

+ 0 - 52
storage-node/packages/util/package.json

@@ -1,52 +0,0 @@
-{
-  "name": "@joystream/storage-utils",
-  "private": true,
-  "version": "0.1.0",
-  "description": "Utility code for Joystream Storage Node",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node",
-    "utility"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'test/**/*.js'",
-    "lint": "eslint '**/*.js' --ignore-pattern 'test/**/*.js'"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "mocha": "^5.2.0",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "stream-buffers": "^3.0.2",
-    "uuid": "^3.3.2"
-  }
-}

+ 0 - 160
storage-node/packages/util/pagination.js

@@ -1,160 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:middleware:pagination')
-
-// Pagination definitions
-const apiDefs = {
-  parameters: {
-    paginationLimit: {
-      name: 'limit',
-      in: 'query',
-      description: 'Number of items per page.',
-      required: false,
-      schema: {
-        type: 'integer',
-        minimum: 1,
-        maximum: 50,
-        default: 20,
-      },
-    },
-    paginationOffset: {
-      name: 'offset',
-      in: 'query',
-      description: 'Page number (offset)',
-      schema: {
-        type: 'integer',
-        minimum: 0,
-      },
-    },
-  },
-  schemas: {
-    PaginationInfo: {
-      type: 'object',
-      required: ['self'],
-      properties: {
-        self: {
-          type: 'string',
-        },
-        next: {
-          type: 'string',
-        },
-        prev: {
-          type: 'string',
-        },
-        first: {
-          type: 'string',
-        },
-        last: {
-          type: 'string',
-        },
-      },
-    },
-  },
-}
-
-/**
- * Silly pagination because it's faster than getting other modules to work.
- *
- * Usage:
- * - apiDoc.parameters = pagination.parameters
- *   -> Validates pagination parameters
- * - apiDoc.responses.200.schema.pagination = pagination.response
- *   -> Generates pagination info on response
- * - paginate(req, res, [lastOffset])
- *   -> add (valid) pagination fields to response object
- *      If lastOffset is given, create a last link with that offset
- **/
-module.exports = {
-  // Add pagination parameters and pagination info responses.
-  parameters: [
-    { $ref: '#/components/parameters/paginationLimit' },
-    { $ref: '#/components/parameters/paginationOffset' },
-  ],
-
-  response: {
-    $ref: '#/components/schema/PaginationInfo',
-  },
-
-  // Update swagger/openapi specs with our own parameters and definitions
-  openapi(api) {
-    api.components = api.components || {}
-    api.components.parameters = { ...(api.components.parameters || {}), ...apiDefs.parameters }
-    api.components.schemas = { ...(api.components.schemas || {}), ...apiDefs.schemas }
-    return api
-  },
-
-  // Pagination function
-  paginate(req, res, lastOffset) {
-    // Skip if the response is not an object.
-    if (Object.prototype.toString.call(res) !== '[object Object]') {
-      debug('Cannot paginate non-objects.')
-      return res
-    }
-
-    // Defaults for parameters
-    const offset = req.query.offset || 0
-    const limit = req.query.limit || 20
-    debug('Create pagination links from offset=' + offset, 'limit=' + limit)
-
-    // Parse current url
-    const url = require('url')
-    // Disable lint because the code (and tests) relied upon obsolete UrlObject. Remove after migration to TypeScript.
-    // eslint-disable-next-line node/no-deprecated-api
-    const reqUrl = url.parse(req.protocol + '://' + req.get('host') + req.originalUrl)
-    const params = new url.URLSearchParams(reqUrl.query)
-
-    // Pagination object
-    const pagination = {
-      self: reqUrl.href,
-    }
-
-    const prev = offset - limit
-    if (prev >= 0) {
-      params.set('offset', prev)
-      reqUrl.search = params.toString()
-      pagination.prev = url.format(reqUrl)
-    }
-
-    const next = offset + limit
-    if (next >= 0) {
-      params.set('offset', next)
-      reqUrl.search = params.toString()
-      pagination.next = url.format(reqUrl)
-    }
-
-    if (lastOffset) {
-      params.set('offset', lastOffset)
-      reqUrl.search = params.toString()
-      pagination.last = url.format(reqUrl)
-    }
-
-    // First
-    params.set('offset', 0)
-    reqUrl.search = params.toString()
-    pagination.first = url.format(reqUrl)
-
-    debug('pagination', pagination)
-
-    // Now set pagination values in response.
-    res.pagination = pagination
-    return res
-  },
-}

+ 0 - 429
storage-node/packages/util/ranges.js

@@ -1,429 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const uuid = require('uuid')
-const streamBuf = require('stream-buffers')
-
-/*
- * Range parsing
- */
-
-// Increase performance by "pre-computing" these regex expressions
-const PARSE_RANGE_REGEX = /^(\d+-\d+|\d+-|-\d+|\*)$/u
-const PARSE_RANGE_HEADERS_REGEX = /^(([^\s]+)=)?((?:(?:\d+-\d+|-\d+|\d+-),?)+)$/u
-
-/*
- * Parse a range string, e.g. '0-100' or '-100' or '0-'. Return the values
- * in an array of int or undefined (if not provided).
- */
-function parseRange(range) {
-  const matches = range.match(PARSE_RANGE_REGEX)
-  if (!matches) {
-    throw new Error(`Not a valid range: ${range}`)
-  }
-
-  const vals = matches[1].split('-').map((v) => {
-    return v === '*' || v === '' ? undefined : parseInt(v, 10)
-  })
-
-  if (vals[1] <= vals[0]) {
-    throw new Error(`Invalid range: start "${vals[0]}" must be before end "${vals[1]}".`)
-  }
-
-  return [vals[0], vals[1]]
-}
-
-/*
- * Parse a range header value, e.g. unit=ranges, where ranges
- * are a comma separated list of individual ranges, and unit is any
- * custom unit string. If the unit (and equal sign) are not given, assume
- * 'bytes'.
- */
-function parse(rangeStr) {
-  const res = {}
-  const matches = rangeStr.match(PARSE_RANGE_HEADERS_REGEX)
-  if (!matches) {
-    throw new Error(`Not a valid range header: ${rangeStr}`)
-  }
-
-  res.unit = matches[2] || 'bytes'
-  res.rangeStr = matches[3]
-  res.ranges = []
-
-  // Parse individual ranges
-  const ranges = []
-  res.rangeStr.split(',').forEach((range) => {
-    ranges.push(parseRange(range))
-  })
-
-  // Merge ranges into result.
-  ranges.forEach((newRange) => {
-    let isMerged = false
-    for (const i in res.ranges) {
-      const oldRange = res.ranges[i]
-
-      // Skip if the new range is fully separate from the old range.
-      if (oldRange[1] + 1 < newRange[0] || newRange[1] + 1 < oldRange[0]) {
-        continue
-      }
-
-      // If we know they're adjacent or overlapping, we construct the
-      // merged range from the lower start and the higher end of both
-      // ranges.
-      const merged = [Math.min(oldRange[0], newRange[0]), Math.max(oldRange[1], newRange[1])]
-      res.ranges[i] = merged
-      isMerged = true
-    }
-
-    if (!isMerged) {
-      res.ranges.push(newRange)
-    }
-  })
-
-  // Finally, sort ranges
-  res.ranges.sort((first, second) => {
-    if (first[0] === second[0]) {
-      // Should not happen due to merging.
-      return 0
-    }
-    return first[0] < second[0] ? -1 : 1
-  })
-
-  return res
-}
-
-/*
- * Async version of parse().
- */
-function parseAsync(rangeStr, cb) {
-  try {
-    return cb(parse(rangeStr))
-  } catch (err) {
-    return cb(null, err)
-  }
-}
-
-/*
- * Range streaming
- */
-
-/*
- * The class writes parts specified in the options to the response. If no ranges
- * are specified, the entire stream is written. At the end, the given callback
- * is invoked - if an error occurred, it is invoked with an error parameter.
- *
- * Note that the range implementation can be optimized for streams that support
- * seeking.
- *
- * There's another optimization here for when sizes are given, which is possible
- * with file system based streams. We'll see how likely that's going to be in
- * future.
- */
-class RangeSender {
-  constructor(response, stream, opts, endCallback) {
-    // Options
-    this.name = opts.name || 'content.bin'
-    this.type = opts.type || 'application/octet-stream'
-    this.size = opts.size
-    this.ranges = opts.ranges
-    this.download = opts.download || false
-
-    // Range handling related state.
-    this.readOffset = 0 // Nothing read so far
-    this.rangeIndex = -1 // No range index yet.
-    this.rangeBoundary = undefined // Generate boundary when needed.
-
-    // Event handlers & state
-    this.handlers = {}
-    this.opened = false
-
-    // Parameters
-    this.response = response
-    this.stream = stream
-    this.opts = opts
-    this.endCallback = endCallback
-  }
-
-  onError(err) {
-    // Assume hiding the actual error is best, and default to 404.
-    if (!this.response.headersSent) {
-      this.response.status(err.code || 404).send({
-        message: err.message || `File not found: ${this.name}`,
-      })
-    }
-    if (this.endCallback) {
-      this.endCallback(err)
-    }
-  }
-
-  onEnd() {
-    this.response.end()
-    if (this.endCallback) {
-      this.endCallback()
-    }
-  }
-
-  // **** No ranges
-  onOpenNoRange() {
-    // File got opened, so we can set headers/status
-    this.opened = true
-
-    this.response.status(200)
-    this.response.contentType(this.type)
-    this.response.header('Accept-Ranges', 'bytes')
-    this.response.header('Content-Transfer-Encoding', 'binary')
-
-    if (this.download) {
-      this.response.header('Content-Disposition', `attachment; filename="${this.name}"`)
-    } else {
-      this.response.header('Content-Disposition', 'inline')
-    }
-
-    if (this.size) {
-      this.response.header('Content-Length', this.size)
-    }
-  }
-
-  onDataNoRange(chunk) {
-    if (!this.opened) {
-      this.handlers.open()
-    }
-
-    // As simple as it can be.
-    this.response.write(Buffer.from(chunk, 'binary'))
-  }
-
-  // *** With ranges
-  nextRangeHeaders() {
-    // Next range
-    this.rangeIndex += 1
-    if (this.rangeIndex >= this.ranges.ranges.length) {
-      return undefined
-    }
-
-    // Calculate this range's size.
-    const range = this.ranges.ranges[this.rangeIndex]
-    let totalSize
-    if (this.size) {
-      totalSize = this.size
-    }
-    if (typeof range[0] === 'undefined') {
-      range[0] = 0
-    }
-    if (typeof range[1] === 'undefined') {
-      if (this.size) {
-        range[1] = totalSize - 1
-      }
-    }
-
-    let sendSize
-    if (typeof range[0] !== 'undefined' && typeof range[1] !== 'undefined') {
-      sendSize = range[1] - range[0] + 1
-    }
-
-    // Write headers, but since we may be in a multipart situation, write them
-    // explicitly to the stream.
-    const start = typeof range[0] === 'undefined' ? '' : `${range[0]}`
-    const end = typeof range[1] === 'undefined' ? '' : `${range[1]}`
-
-    let sizeStr
-    if (totalSize) {
-      sizeStr = `${totalSize}`
-    } else {
-      sizeStr = '*'
-    }
-
-    const ret = {
-      'Content-Range': `bytes ${start}-${end}/${sizeStr}`,
-      'Content-Type': `${this.type}`,
-    }
-    if (sendSize) {
-      ret['Content-Length'] = `${sendSize}`
-    }
-    return ret
-  }
-
-  nextRange() {
-    if (this.ranges.ranges.length === 1) {
-      this.stream.off('data', this.handlers.data)
-      return false
-    }
-
-    const headers = this.nextRangeHeaders()
-
-    if (headers) {
-      const onDataRanges = new streamBuf.WritableStreamBuffer()
-      // We start a range with a boundary.
-      onDataRanges.write(`\r\n--${this.rangeBoundary}\r\n`)
-
-      // The we write the range headers.
-      for (const header in headers) {
-        onDataRanges.write(`${header}: ${headers[header]}\r\n`)
-      }
-      onDataRanges.write('\r\n')
-      this.response.write(onDataRanges.getContents())
-      return true
-    }
-
-    // No headers means we're finishing the last range.
-    this.response.write(`\r\n--${this.rangeBoundary}--\r\n`)
-    this.stream.off('data', this.handlers.data)
-    return false
-  }
-
-  onOpenRanges() {
-    // File got opened, so we can set headers/status
-    this.opened = true
-
-    this.response.header('Accept-Ranges', 'bytes')
-    this.response.header('Content-Transfer-Encoding', 'binary')
-    this.response.header('Content-Disposition', 'inline')
-
-    // For single ranges, the content length should be the size of the
-    // range. For multiple ranges, we don't send a content length
-    // header.
-    //
-    // Similarly, the type is different whether or not there is more than
-    // one range.
-    if (this.ranges.ranges.length === 1) {
-      this.response.writeHead(206, 'Partial Content', this.nextRangeHeaders())
-    } else {
-      this.rangeBoundary = uuid.v4()
-      const headers = {
-        'Content-Type': `multipart/byteranges; boundary=${this.rangeBoundary}`,
-      }
-      this.response.writeHead(206, 'Partial Content', headers)
-      this.nextRange()
-    }
-  }
-
-  onDataRanges(chunk) {
-    if (!this.opened) {
-      this.handlers.open()
-    }
-    // Crap, node.js streams are stupid. No guarantee for seek support. Sure,
-    // that makes node.js easier to implement, but offloads everything onto the
-    // application developer.
-    //
-    // So, we skip chunks until our read position is within the range we want to
-    // send at the moment. We're relying on ranges being in-order, which this
-    // file's parser luckily (?) provides.
-    //
-    // The simplest optimization would be at ever range start to seek() to the
-    // start.
-    const chunkRange = [this.readOffset, this.readOffset + chunk.length - 1]
-    while (true) {
-      let reqRange = this.ranges.ranges[this.rangeIndex]
-      if (!reqRange) {
-        break
-      }
-
-      if (!reqRange[1]) {
-        reqRange = [reqRange[0], Number.MAX_SAFE_INTEGER]
-      }
-
-      // No overlap in the chunk and requested range; don't write.
-      if (chunkRange[1] < reqRange[0] || chunkRange[0] > reqRange[1]) {
-        break
-      }
-
-      // Since there is overlap, find the segment that's entirely within the
-      // chunk.
-      const segment = [Math.max(chunkRange[0], reqRange[0]), Math.min(chunkRange[1], reqRange[1])]
-
-      // Normalize the segment to a chunk offset
-      const start = segment[0] - this.readOffset
-      const end = segment[1] - this.readOffset
-      const len = end - start + 1
-
-      // Write the slice that we want to write. We first create a buffer from the
-      // chunk. Then we slice a new buffer from the same underlying ArrayBuffer,
-      // starting at the original buffer's offset, further offset by the segment
-      // start. The segment length bounds the end of our slice.
-      const buf = Buffer.from(chunk, 'binary')
-      this.response.write(Buffer.from(buf.buffer, buf.byteOffset + start, len))
-
-      // If the requested range is finished, we should start the next one.
-      if (reqRange[1] > chunkRange[1]) {
-        break
-      }
-
-      if (reqRange[1] <= chunkRange[1]) {
-        if (!this.nextRange(segment)) {
-          break
-        }
-      }
-    }
-
-    // Update read offset when chunk is finished.
-    this.readOffset += chunk.length
-  }
-
-  start() {
-    // Before we start streaming, let's ensure our ranges don't contain any
-    // without start - if they do, we nuke them all and treat this as a full
-    // request.
-    let nuke = false
-    if (this.ranges) {
-      for (const i in this.ranges.ranges) {
-        if (typeof this.ranges.ranges[i][0] === 'undefined') {
-          nuke = true
-          break
-        }
-      }
-    }
-    if (nuke) {
-      this.ranges = undefined
-    }
-
-    // Register callbacks. Store them in a handlers object so we can
-    // keep the bound version around for stopping to listen to events.
-    this.handlers.error = this.onError.bind(this)
-    this.handlers.end = this.onEnd.bind(this)
-
-    if (this.ranges) {
-      this.handlers.open = this.onOpenRanges.bind(this)
-      this.handlers.data = this.onDataRanges.bind(this)
-    } else {
-      this.handlers.open = this.onOpenNoRange.bind(this)
-      this.handlers.data = this.onDataNoRange.bind(this)
-    }
-
-    for (const handler in this.handlers) {
-      this.stream.on(handler, this.handlers[handler])
-    }
-  }
-}
-
-function send(response, stream, opts, endCallback) {
-  const sender = new RangeSender(response, stream, opts, endCallback)
-  sender.start()
-}
-
-/*
- * Exports
- */
-
-module.exports = {
-  parse,
-  parseAsync,
-  RangeSender,
-  send,
-}

+ 0 - 16
storage-node/packages/util/sleep.js

@@ -1,16 +0,0 @@
-function sleep(ms) {
-  return new Promise((resolve) => {
-    setTimeout(resolve, ms)
-  })
-}
-
-function nextTick() {
-  return new Promise((resolve) => {
-    process.nextTick(resolve)
-  })
-}
-
-module.exports = {
-  sleep,
-  nextTick,
-}

+ 0 - 9
storage-node/packages/util/stripEndingSlash.js

@@ -1,9 +0,0 @@
-// return url with last `/` removed
-function removeEndingForwardSlash(url) {
-  if (url.endsWith('/')) {
-    return url.substring(0, url.length - 1)
-  }
-  return url.toString()
-}
-
-module.exports = removeEndingForwardSlash

+ 0 - 0
storage-node/packages/util/test/data/bar


+ 0 - 0
storage-node/packages/util/test/data/foo/baz


+ 0 - 1
storage-node/packages/util/test/data/quux

@@ -1 +0,0 @@
-foo/baz

+ 0 - 68
storage-node/packages/util/test/fs/resolve.js

@@ -1,68 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-const path = require('path')
-
-const resolve = require('@joystream/storage-utils/fs/resolve')
-
-function tests(base) {
-  it('resolves absolute paths relative to the base', function () {
-    const resolved = resolve(base, '/foo')
-    const relative = path.relative(base, resolved)
-    expect(relative).to.equal('foo')
-  })
-
-  it('allows for relative paths that stay in the base', function () {
-    const resolved = resolve(base, 'foo/../bar')
-    const relative = path.relative(base, resolved)
-    expect(relative).to.equal('bar')
-  })
-
-  it('prevents relative paths from breaking out of the base', function () {
-    expect(() => resolve(base, '../foo')).to.throw()
-  })
-
-  it('prevents long relative paths from breaking out of the base', function () {
-    expect(() => resolve(base, '../../../foo')).to.throw()
-  })
-
-  it('prevents sneaky relative paths from breaking out of the base', function () {
-    expect(() => resolve(base, 'foo/../../../bar')).to.throw()
-  })
-}
-
-describe('util/fs/resolve', function () {
-  describe('slash base', function () {
-    tests('/')
-  })
-
-  describe('empty base', function () {
-    tests('')
-  })
-
-  describe('short base', function () {
-    tests('/base')
-  })
-
-  describe('long base', function () {
-    tests('/this/base/is/very/long/indeed')
-  })
-})

+ 0 - 67
storage-node/packages/util/test/fs/walk.js

@@ -1,67 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-// Disabling the rule because of the 'temp' package API.
-// eslint-disable-next-line no-unused-vars
-const temp = require('temp').track()
-
-const fs = require('fs')
-const path = require('path')
-
-const fswalk = require('@joystream/storage-utils/fs/walk')
-
-function walktest(archive, base, done) {
-  const results = new Map()
-
-  fswalk(base, archive, (err, relname, stat, linktarget) => {
-    expect(err).to.be.null
-
-    if (relname) {
-      results.set(relname, [stat, linktarget])
-      return
-    }
-
-    // End of data, do testing
-    const entries = Array.from(results.keys())
-    expect(entries).to.include('foo')
-    expect(results.get('foo')[0].isDirectory()).to.be.true
-
-    expect(entries).to.include('bar')
-    expect(results.get('bar')[0].isFile()).to.be.true
-
-    if (archive === fs) {
-      expect(entries).to.include('quux')
-      expect(results.get('quux')[0].isSymbolicLink()).to.be.true
-      expect(results.get('quux')[1]).to.equal('foo/baz')
-    }
-
-    expect(entries).to.include('foo/baz')
-    expect(results.get('foo/baz')[0].isFile()).to.be.true
-
-    done()
-  })
-}
-
-describe('util/fs/walk', function () {
-  it('reports all files in a file system hierarchy', function (done) {
-    walktest(fs, path.resolve(__dirname, '../data'), done)
-  })
-})

+ 0 - 152
storage-node/packages/util/test/lru.js

@@ -1,152 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-
-const lru = require('@joystream/storage-utils/lru')
-
-const DEFAULT_SLEEP = 1
-function sleep(ms = DEFAULT_SLEEP) {
-  return new Promise((resolve) => {
-    setTimeout(resolve, ms)
-  })
-}
-
-describe('util/lru', function () {
-  describe('simple usage', function () {
-    it('does not contain keys that were not added', function () {
-      const cache = new lru.LRUCache()
-      expect(cache.size()).to.equal(0)
-
-      const val = cache.get('something')
-      expect(val).to.be.undefined
-
-      expect(cache.has('something')).to.be.false
-    })
-
-    it('contains keys that were added', function () {
-      const cache = new lru.LRUCache()
-      cache.put('something', 'yay!')
-      expect(cache.size()).to.equal(1)
-
-      const val = cache.get('something')
-      expect(val).to.be.equal('yay!')
-
-      expect(cache.has('something')).to.be.true
-    })
-
-    it('does not contain keys that were deleted', function () {
-      const cache = new lru.LRUCache()
-      cache.put('something', 'yay!')
-      expect(cache.size()).to.equal(1)
-      let val = cache.get('something')
-      expect(val).to.be.equal('yay!')
-      expect(cache.has('something')).to.be.true
-
-      cache.del('something')
-      expect(cache.size()).to.equal(0)
-      val = cache.get('something')
-      expect(val).to.be.undefined
-      expect(cache.has('something')).to.be.false
-    })
-
-    it('can be cleared', function () {
-      const cache = new lru.LRUCache()
-      cache.put('something', 'yay!')
-      expect(cache.size()).to.equal(1)
-
-      cache.clear()
-      expect(cache.size()).to.equal(0)
-    })
-  })
-
-  describe('capacity management', function () {
-    it('does not grow beyond capacity', async function () {
-      const cache = new lru.LRUCache(2) // Small capacity
-      expect(cache.size()).to.equal(0)
-
-      cache.put('foo', '42')
-      expect(cache.size()).to.equal(1)
-
-      await sleep()
-
-      cache.put('bar', '42')
-      expect(cache.size()).to.equal(2)
-
-      await sleep()
-
-      cache.put('baz', '42')
-      expect(cache.size()).to.equal(2) // Capacity exceeded
-    })
-
-    it('removes the oldest key when pruning', async function () {
-      const cache = new lru.LRUCache(2) // Small capacity
-      expect(cache.size()).to.equal(0)
-
-      cache.put('foo', '42')
-      expect(cache.size()).to.equal(1)
-      expect(cache.has('foo')).to.be.true
-
-      await sleep()
-
-      cache.put('bar', '42')
-      expect(cache.size()).to.equal(2)
-      expect(cache.has('foo')).to.be.true
-      expect(cache.has('bar')).to.be.true
-
-      await sleep()
-
-      cache.put('baz', '42')
-      expect(cache.size()).to.equal(2) // Capacity exceeded
-      expect(cache.has('bar')).to.be.true
-      expect(cache.has('baz')).to.be.true
-    })
-
-    it('updates LRU timestamp when reading', async function () {
-      const cache = new lru.LRUCache(2) // Small capacity
-      expect(cache.size()).to.equal(0)
-
-      cache.put('foo', '42')
-      expect(cache.size()).to.equal(1)
-      expect(cache.has('foo')).to.be.true
-
-      await sleep()
-
-      cache.put('bar', '42')
-      expect(cache.size()).to.equal(2)
-      expect(cache.has('foo')).to.be.true
-      expect(cache.has('bar')).to.be.true
-
-      await sleep()
-
-      // 'foo' is older than 'bar' right now, so should be pruned first. But
-      // if we get 'foo', it would be 'bar' that has to go.
-      cache.get('foo')
-
-      // Makes debugging a bit more obvious
-      await sleep()
-
-      cache.put('baz', '42')
-      expect(cache.size()).to.equal(2) // Capacity exceeded
-      expect(cache.has('foo')).to.be.true
-      expect(cache.has('baz')).to.be.true
-    })
-  })
-})

+ 0 - 113
storage-node/packages/util/test/pagination.js

@@ -1,113 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-const mockHttp = require('node-mocks-http')
-
-const pagination = require('@joystream/storage-utils/pagination')
-
-describe('util/pagination', function () {
-  describe('openapi()', function () {
-    it('should add parameters and definitions to an API spec', function () {
-      const api = pagination.openapi({})
-
-      // Parameters
-      expect(api).to.have.property('components')
-
-      expect(api.components).to.have.property('parameters')
-      expect(api.components.parameters).to.have.property('paginationLimit')
-
-      expect(api.components.parameters.paginationLimit).to.have.property('name')
-      expect(api.components.parameters.paginationLimit.name).to.equal('limit')
-
-      expect(api.components.parameters.paginationLimit).to.have.property('schema')
-      expect(api.components.parameters.paginationLimit.schema).to.have.property('type')
-      expect(api.components.parameters.paginationLimit.schema.type).to.equal('integer')
-
-      expect(api.components.parameters.paginationOffset).to.have.property('name')
-      expect(api.components.parameters.paginationOffset.name).to.equal('offset')
-
-      expect(api.components.parameters.paginationOffset).to.have.property('schema')
-      expect(api.components.parameters.paginationOffset.schema).to.have.property('type')
-      expect(api.components.parameters.paginationOffset.schema.type).to.equal('integer')
-
-      // Defintiions
-      expect(api.components).to.have.property('schemas')
-      expect(api.components.schemas).to.have.property('PaginationInfo')
-
-      expect(api.components.schemas.PaginationInfo).to.have.property('type')
-      expect(api.components.schemas.PaginationInfo.type).to.equal('object')
-
-      expect(api.components.schemas.PaginationInfo).to.have.property('properties')
-      expect(api.components.schemas.PaginationInfo.properties)
-        .to.be.an('object')
-        .that.has.all.keys('self', 'next', 'prev', 'first', 'last')
-    })
-  })
-
-  describe('paginate()', function () {
-    it('should add pagination links to a response object', function () {
-      const req = mockHttp.createRequest({
-        method: 'GET',
-        url: '/foo?limit=10',
-        query: {
-          limit: 10, // Mock is a little stupid, we have to explicitly set query
-        },
-        headers: {
-          host: 'localhost',
-        },
-        protocol: 'http',
-      })
-
-      const res = pagination.paginate(req, {})
-
-      expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next')
-
-      expect(res.pagination.self).to.equal('http://localhost/foo?limit=10')
-      expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
-      expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=10')
-    })
-
-    it('should add a last pagination link when requested', function () {
-      const req = mockHttp.createRequest({
-        method: 'GET',
-        url: '/foo?limit=10&offset=15',
-        query: {
-          limit: 10, // Mock is a little stupid, we have to explicitly set query
-          offset: 15,
-        },
-        headers: {
-          host: 'localhost',
-        },
-        protocol: 'http',
-      })
-
-      const res = pagination.paginate(req, {}, 35)
-
-      expect(res).to.have.property('pagination').that.has.all.keys('self', 'first', 'next', 'prev', 'last')
-
-      expect(res.pagination.self).to.equal('http://localhost/foo?limit=10&offset=15')
-      expect(res.pagination.first).to.equal('http://localhost/foo?limit=10&offset=0')
-      expect(res.pagination.last).to.equal('http://localhost/foo?limit=10&offset=35')
-      expect(res.pagination.prev).to.equal('http://localhost/foo?limit=10&offset=5')
-      expect(res.pagination.next).to.equal('http://localhost/foo?limit=10&offset=25')
-    })
-  })
-})

+ 0 - 392
storage-node/packages/util/test/ranges.js

@@ -1,392 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-const mockHttp = require('node-mocks-http')
-const streamBuffers = require('stream-buffers')
-
-const ranges = require('@joystream/storage-utils/ranges')
-
-describe('util/ranges', function () {
-  describe('parse()', function () {
-    it('should parse a full range', function () {
-      // Range with unit
-      let range = ranges.parse('bytes=0-100')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-100')
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(100)
-
-      // Range without unit
-      range = ranges.parse('0-100')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-100')
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(100)
-
-      // Range with custom unit
-      //
-      range = ranges.parse('foo=0-100')
-      expect(range.unit).to.equal('foo')
-      expect(range.rangeStr).to.equal('0-100')
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(100)
-    })
-
-    it('should error out on malformed strings', function () {
-      expect(() => ranges.parse('foo')).to.throw()
-      expect(() => ranges.parse('foo=bar')).to.throw()
-      expect(() => ranges.parse('foo=100')).to.throw()
-      expect(() => ranges.parse('foo=100-0')).to.throw()
-    })
-
-    it('should parse a range without end', function () {
-      const range = ranges.parse('0-')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-')
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.be.undefined
-    })
-
-    it('should parse a range without start', function () {
-      const range = ranges.parse('-100')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('-100')
-      expect(range.ranges[0][0]).to.be.undefined
-      expect(range.ranges[0][1]).to.equal(100)
-    })
-
-    it('should parse multiple ranges', function () {
-      const range = ranges.parse('0-10,30-40,60-80')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-10,30-40,60-80')
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(10)
-      expect(range.ranges[1][0]).to.equal(30)
-      expect(range.ranges[1][1]).to.equal(40)
-      expect(range.ranges[2][0]).to.equal(60)
-      expect(range.ranges[2][1]).to.equal(80)
-    })
-
-    it('should merge overlapping ranges', function () {
-      // Two overlapping ranges
-      let range = ranges.parse('0-20,10-30')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-20,10-30')
-      expect(range.ranges).to.have.lengthOf(1)
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(30)
-
-      // Three overlapping ranges
-      range = ranges.parse('0-15,10-25,20-30')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-15,10-25,20-30')
-      expect(range.ranges).to.have.lengthOf(1)
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(30)
-
-      // Three overlapping ranges, reverse order
-      range = ranges.parse('20-30,10-25,0-15')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('20-30,10-25,0-15')
-      expect(range.ranges).to.have.lengthOf(1)
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(30)
-
-      // Adjacent ranges
-      range = ranges.parse('0-10,11-20')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('0-10,11-20')
-      expect(range.ranges).to.have.lengthOf(1)
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(20)
-    })
-
-    it('should sort ranges', function () {
-      const range = ranges.parse('10-30,0-5')
-      expect(range.unit).to.equal('bytes')
-      expect(range.rangeStr).to.equal('10-30,0-5')
-      expect(range.ranges).to.have.lengthOf(2)
-      expect(range.ranges[0][0]).to.equal(0)
-      expect(range.ranges[0][1]).to.equal(5)
-      expect(range.ranges[1][0]).to.equal(10)
-      expect(range.ranges[1][1]).to.equal(30)
-    })
-  })
-
-  describe('send()', function () {
-    it('should send full files on request', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(200)
-        expect(res.getHeader('content-type')).to.equal('application/test')
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-        expect(res._getBuffer().toString()).to.equal('Hello, world!')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-
-    it('should send a range spanning the entire file on request', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[0, 12]],
-        },
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206)
-        expect(res.getHeader('content-type')).to.equal('application/test')
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-        expect(res.getHeader('content-range')).to.equal('bytes 0-12/*')
-        expect(res.getHeader('content-length')).to.equal('13')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-        expect(res._getBuffer().toString()).to.equal('Hello, world!')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-
-    it('should send a small range on request', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[1, 11]], // Cut off first and last letter
-        },
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206)
-        expect(res.getHeader('content-type')).to.equal('application/test')
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-        expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
-        expect(res.getHeader('content-length')).to.equal('11')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-        expect(res._getBuffer().toString()).to.equal('ello, world')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-
-    it('should send ranges crossing buffer boundaries', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({
-        chunkSize: 3, // Setting a chunk size smaller than the range should
-        // not impact the test.
-      })
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[1, 11]], // Cut off first and last letter
-        },
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206)
-        expect(res.getHeader('content-type')).to.equal('application/test')
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-        expect(res.getHeader('content-range')).to.equal('bytes 1-11/*')
-        expect(res.getHeader('content-length')).to.equal('11')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-        expect(res._getBuffer().toString()).to.equal('ello, world')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-
-    it('should send multiple ranges', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [
-            [1, 3],
-            [5, 7],
-          ], // Slice two ranges out
-        },
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206)
-        expect(res.getHeader('content-type')).to.satisfy((str) => str.startsWith('multipart/byteranges'))
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-
-        // The buffer should contain both ranges, but with all the That would be
-        // "ell" and ", w".
-        // It's pretty elaborate having to parse the entire multipart response
-        // body, so we'll restrict ourselves to finding lines within it.
-        const body = res._getBuffer().toString()
-        expect(body).to.contain('\r\nContent-Range: bytes 1-3/*\r\n')
-        expect(body).to.contain('\r\nell\r\n')
-        expect(body).to.contain('\r\nContent-Range: bytes 5-7/*\r\n')
-        expect(body).to.contain('\r\n, w')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-
-    it('should deal with ranges without end', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[5, undefined]], // Skip the first part, but read until end
-        },
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(206)
-        expect(res.getHeader('content-type')).to.equal('application/test')
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-        expect(res.getHeader('content-range')).to.equal('bytes 5-/*')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-        expect(res._getBuffer().toString()).to.equal(', world!')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-
-    it('should ignore ranges without start', function (done) {
-      const res = mockHttp.createResponse({})
-      const inStream = new streamBuffers.ReadableStreamBuffer({})
-
-      // End-of-stream callback
-      const opts = {
-        name: 'test.file',
-        type: 'application/test',
-        ranges: {
-          ranges: [[undefined, 5]], // Only last five
-        },
-      }
-      ranges.send(res, inStream, opts, function (err) {
-        expect(err).to.not.exist
-
-        // HTTP handling
-        expect(res.statusCode).to.equal(200)
-        expect(res.getHeader('content-type')).to.equal('application/test')
-        expect(res.getHeader('content-disposition')).to.equal('inline')
-
-        // Data/stream handling
-        expect(res._isEndCalled()).to.be.true
-        expect(res._getBuffer().toString()).to.equal('Hello, world!')
-
-        // Notify mocha that we're done.
-        done()
-      })
-
-      // Simulate file stream
-      inStream.emit('open')
-      inStream.put('Hello, world!')
-      inStream.stop()
-    })
-  })
-})

+ 0 - 13
storage-node/packages/util/test/stripEndingSlash.js

@@ -1,13 +0,0 @@
-'use strict'
-
-const expect = require('chai').expect
-const stripEndingSlash = require('@joystream/storage-utils/stripEndingSlash')
-
-describe('stripEndingSlash', function () {
-  it('stripEndingSlash should keep URL without the slash', function () {
-    expect(stripEndingSlash('http://keep.one')).to.equal('http://keep.one')
-  })
-  it('stripEndingSlash should remove ending slash', function () {
-    expect(stripEndingSlash('http://strip.one/')).to.equal('http://strip.one')
-  })
-})

File diff suppressed because it is too large
+ 0 - 17
storage-node/storage-node_new.svg


+ 0 - 24
storage-node/tsconfig.json

@@ -1,24 +0,0 @@
-{
-  "compilerOptions": {
-    "composite": true,
-    "rootDir": "./packages/",
-    "outDir": "./build",
-    "allowJs": true,
-    "target": "es2017",
-    "module": "commonjs",
-    "esModuleInterop": true,
-    "baseUrl": ".",
-    "skipLibCheck": true,
-    "types": ["node", "mocha"],
-    "paths": {
-      "@polkadot/types/augment": ["../types/augment-codec/augment-types.ts"],
-      "@polkadot/api/augment": ["../types/augment-codec/augment-api.ts"]
-    }
-  },
-  "files": [],
-  "exclude": ["**/node_modules/*", "build"],
-  "references": [
-    { "path": "packages/cli" }
-    //   { "path": "packages/storage" }
-  ]
-}

Some files were not shown because too many files changed in this diff