Browse Source

Merge pull request #2764 from Lezek123/distributor-node-staging-2

Merge distributor node into giza_staging
shamil-gadelshin 3 years ago
parent
commit
47c16cb9c7
100 changed files with 4807 additions and 1562 deletions
  1. 15 0
      distributor-node/.eslintrc.js
  2. 2 0
      distributor-node/.prettierignore
  3. 24 405
      distributor-node/README.md
  4. 13 5
      distributor-node/config.yml
  5. 9 4
      distributor-node/config/docker/config.docker.yml
  6. 404 0
      distributor-node/docs/api/index.md
  7. 142 0
      distributor-node/docs/api/templates/main.dot
  8. 48 0
      distributor-node/docs/commands/dev.md
  9. 25 0
      distributor-node/docs/commands/help.md
  10. 5 0
      distributor-node/docs/commands/index.md
  11. 366 0
      distributor-node/docs/commands/leader.md
  12. 69 0
      distributor-node/docs/commands/operator.md
  13. 25 0
      distributor-node/docs/commands/start.md
  14. 400 0
      distributor-node/docs/node/index.md
  15. 11 0
      distributor-node/docs/schema/definition-properties-buckets-oneof-all-buckets.md
  16. 7 0
      distributor-node/docs/schema/definition-properties-buckets-oneof-bucket-ids-items.md
  17. 7 0
      distributor-node/docs/schema/definition-properties-buckets-oneof-bucket-ids.md
  18. 9 0
      distributor-node/docs/schema/definition-properties-buckets.md
  19. 3 0
      distributor-node/docs/schema/definition-properties-directories-properties-assets.md
  20. 3 0
      distributor-node/docs/schema/definition-properties-directories-properties-cachestate.md
  21. 3 0
      distributor-node/docs/schema/definition-properties-directories-properties-logs.md
  22. 65 0
      distributor-node/docs/schema/definition-properties-directories.md
  23. 3 0
      distributor-node/docs/schema/definition-properties-endpoints-properties-elasticsearch.md
  24. 3 0
      distributor-node/docs/schema/definition-properties-endpoints-properties-joystreamnodews.md
  25. 3 0
      distributor-node/docs/schema/definition-properties-endpoints-properties-querynode.md
  26. 65 0
      distributor-node/docs/schema/definition-properties-endpoints.md
  27. 7 0
      distributor-node/docs/schema/definition-properties-id.md
  28. 7 0
      distributor-node/docs/schema/definition-properties-intervals-properties-cachecleanup.md
  29. 7 0
      distributor-node/docs/schema/definition-properties-intervals-properties-checkstoragenoderesponsetimes.md
  30. 7 0
      distributor-node/docs/schema/definition-properties-intervals-properties-savecachestate.md
  31. 77 0
      distributor-node/docs/schema/definition-properties-intervals.md
  32. 3 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-json-backup-file-properties-keyfile.md
  33. 27 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-json-backup-file.md
  34. 3 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-mnemonic-phrase-properties-mnemonic.md
  35. 21 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-mnemonic-phrase-properties-type.md
  36. 64 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-mnemonic-phrase.md
  37. 3 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-substrate-uri-properties-suri.md
  38. 21 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-substrate-uri-properties-type.md
  39. 64 0
      distributor-node/docs/schema/definition-properties-keys-items-oneof-substrate-uri.md
  40. 11 0
      distributor-node/docs/schema/definition-properties-keys-items.md
  41. 7 0
      distributor-node/docs/schema/definition-properties-keys.md
  42. 7 0
      distributor-node/docs/schema/definition-properties-limits-properties-maxconcurrentoutboundconnections.md
  43. 7 0
      distributor-node/docs/schema/definition-properties-limits-properties-maxconcurrentstoragenodedownloads.md
  44. 7 0
      distributor-node/docs/schema/definition-properties-limits-properties-outboundrequeststimeout.md
  45. 13 0
      distributor-node/docs/schema/definition-properties-limits-properties-storage.md
  46. 106 0
      distributor-node/docs/schema/definition-properties-limits.md
  47. 18 0
      distributor-node/docs/schema/definition-properties-log-properties-console.md
  48. 18 0
      distributor-node/docs/schema/definition-properties-log-properties-elastic.md
  49. 18 0
      distributor-node/docs/schema/definition-properties-log-properties-file.md
  50. 110 0
      distributor-node/docs/schema/definition-properties-log.md
  51. 7 0
      distributor-node/docs/schema/definition-properties-port.md
  52. 7 0
      distributor-node/docs/schema/definition-properties-workerid.md
  53. 220 0
      distributor-node/docs/schema/definition.md
  54. 25 6
      distributor-node/package.json
  55. 10 11
      distributor-node/scripts/data/family-metadata.json
  56. 3 1
      distributor-node/scripts/test-commands.sh
  57. 2 2
      distributor-node/src/api-spec/openapi.yml
  58. 42 19
      distributor-node/src/app/index.ts
  59. 3 0
      distributor-node/src/command-base/ExitCodes.ts
  60. 100 3
      distributor-node/src/command-base/accounts.ts
  61. 1 1
      distributor-node/src/command-base/api.ts
  62. 3 0
      distributor-node/src/command-base/default.ts
  63. 3 4
      distributor-node/src/commands/dev/batchUpload.ts
  64. 1 1
      distributor-node/src/commands/leader/create-bucket.ts
  65. 43 2
      distributor-node/src/commands/leader/set-bucket-family-metadata.ts
  66. 1 1
      distributor-node/src/commands/start.ts
  67. 215 0
      distributor-node/src/schemas/configSchema.ts
  68. 46 0
      distributor-node/src/schemas/familyMetadataSchema.ts
  69. 3 3
      distributor-node/src/schemas/index.ts
  70. 0 0
      distributor-node/src/schemas/operatorMetadataSchema.ts
  71. 8 0
      distributor-node/src/schemas/scripts/generateConfigDoc.ts
  72. 4 4
      distributor-node/src/schemas/scripts/generateTypes.ts
  73. 66 81
      distributor-node/src/services/cache/StateCacheService.ts
  74. 116 75
      distributor-node/src/services/content/ContentService.ts
  75. 14 17
      distributor-node/src/services/logging/LoggingService.ts
  76. 60 62
      distributor-node/src/services/networking/NetworkingService.ts
  77. 30 16
      distributor-node/src/services/networking/distributor-node/generated/api.ts
  78. 13 3
      distributor-node/src/services/networking/query-node/api.ts
  79. 314 556
      distributor-node/src/services/networking/query-node/generated/schema.ts
  80. 2 2
      distributor-node/src/services/networking/runtime/api.ts
  81. 15 13
      distributor-node/src/services/networking/storage-node/api.ts
  82. 366 47
      distributor-node/src/services/networking/storage-node/generated/api.ts
  83. 8 14
      distributor-node/src/services/parsers/BagIdParserService.ts
  84. 12 2
      distributor-node/src/services/parsers/ConfigParserService.ts
  85. 38 41
      distributor-node/src/services/server/controllers/public.ts
  86. 1 1
      distributor-node/src/services/validation/ValidationService.ts
  87. 0 56
      distributor-node/src/services/validation/schemas/configSchema.ts
  88. 0 23
      distributor-node/src/services/validation/schemas/familyMetadataSchema.ts
  89. 0 10
      distributor-node/src/services/validation/schemas/utils.ts
  90. 5 0
      distributor-node/src/types/app.ts
  91. 1 1
      distributor-node/src/types/config.ts
  92. 122 7
      distributor-node/src/types/generated/ConfigJson.d.ts
  93. 13 4
      distributor-node/src/types/generated/FamilyMetadataJson.d.ts
  94. 6 7
      docker-compose.yml
  95. 1 2
      package.json
  96. 2 2
      storage-node-v2/scripts/run-all-commands.sh
  97. 5 4
      types/augment/all/defs.json
  98. 4 3
      types/augment/all/types.ts
  99. 12 8
      types/src/storage.ts
  100. 472 33
      yarn.lock

+ 15 - 0
distributor-node/.eslintrc.js

@@ -0,0 +1,15 @@
+module.exports = {
+  env: {
+    mocha: true,
+  },
+  parserOptions: {
+    project: './tsconfig.json',
+  },
+  extends: ['@joystream/eslint-config'],
+  rules: {
+    'no-unused-vars': 'off', // Required by the typescript rule below
+    '@typescript-eslint/no-unused-vars': ['error'],
+    '@typescript-eslint/no-floating-promises': 'error',
+    'no-void': 'off',
+  },
+}

+ 2 - 0
distributor-node/.prettierignore

@@ -1,3 +1,5 @@
 /**/generated
 /**/mock.graphql
 lib
+local
+/**/*.md

+ 24 - 405
distributor-node/README.md

@@ -1,419 +1,38 @@
-@joystream/distributor-cli
-==========================
+# Joystream Distributor CLI
 
-Joystream distributor node CLI
+The Joystream Distributor CLI package contains a set of commands that allow:
+- running the actual distributor node,
+- performing the node operator on-chain duties (like setting the node metadata)
+- performing the distribution working group leader on-chain duties (like setting the distribution system limits, assigning distribution bags and buckets)
 
-[![oclif](https://img.shields.io/badge/cli-oclif-brightgreen.svg)](https://oclif.io)
-[![Version](https://img.shields.io/npm/v/@joystream/distributor-cli.svg)](https://npmjs.org/package/@joystream/distributor-cli)
-[![Downloads/week](https://img.shields.io/npm/dw/@joystream/distributor-cli.svg)](https://npmjs.org/package/@joystream/distributor-cli)
-[![License](https://img.shields.io/npm/l/@joystream/distributor-cli.svg)](https://github.com/Joystream/joystream/blob/master/package.json)
+**To see the list of all available commands and their flags / arguments, check out the [commands](docs/commands/index.md) documentation.**
 
-<!-- toc -->
-* [Usage](#usage)
-* [Commands](#commands)
-<!-- tocstop -->
-# Usage
-<!-- usage -->
-```sh-session
-$ npm install -g @joystream/distributor-cli
-$ joystream-distributor COMMAND
-running command...
-$ joystream-distributor (-v|--version|version)
-@joystream/distributor-cli/0.1.0 linux-x64 node-v14.17.3
-$ joystream-distributor --help [COMMAND]
-USAGE
-  $ joystream-distributor COMMAND
-...
-```
-<!-- usagestop -->
-# Commands
-<!-- commands -->
-* [`joystream-distributor dev:init`](#joystream-distributor-devinit)
-* [`joystream-distributor help [COMMAND]`](#joystream-distributor-help-command)
-* [`joystream-distributor leader:cancel-invitation`](#joystream-distributor-leadercancel-invitation)
-* [`joystream-distributor leader:create-bucket`](#joystream-distributor-leadercreate-bucket)
-* [`joystream-distributor leader:create-bucket-family`](#joystream-distributor-leadercreate-bucket-family)
-* [`joystream-distributor leader:delete-bucket`](#joystream-distributor-leaderdelete-bucket)
-* [`joystream-distributor leader:delete-bucket-family`](#joystream-distributor-leaderdelete-bucket-family)
-* [`joystream-distributor leader:invite-bucket-operator`](#joystream-distributor-leaderinvite-bucket-operator)
-* [`joystream-distributor leader:set-buckets-per-bag-limit`](#joystream-distributor-leaderset-buckets-per-bag-limit)
-* [`joystream-distributor leader:update-bag`](#joystream-distributor-leaderupdate-bag)
-* [`joystream-distributor leader:update-bucket-mode`](#joystream-distributor-leaderupdate-bucket-mode)
-* [`joystream-distributor leader:update-bucket-status`](#joystream-distributor-leaderupdate-bucket-status)
-* [`joystream-distributor leader:update-dynamic-bag-policy`](#joystream-distributor-leaderupdate-dynamic-bag-policy)
-* [`joystream-distributor operator:accept-invitation`](#joystream-distributor-operatoraccept-invitation)
-* [`joystream-distributor operator:set-metadata`](#joystream-distributor-operatorset-metadata)
-* [`joystream-distributor start`](#joystream-distributor-start)
+## Configuration
 
-## `joystream-distributor dev:init`
+### Config file
 
-Initialize development environment. Sets Alice as distributor working group leader.
+All the configuration values required by Joystream Distributor CLI are provided via a single configuration file (either `yml` or `json`).
 
-```
-USAGE
-  $ joystream-distributor dev:init
+The path to the configuration will be (ordered from highest to lowest priority):
+- The value of `--configPath` flag provided when running a command, _or_
+- The value of `CONFIG_PATH` environment variable, _or_
+- `config.yml` in the current working directory by default
 
-OPTIONS
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
+### ENV variables
 
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
+All configuration values can be overriden using environment variables, which may be useful when running the distributor node as docker service.
 
-_See code: [src/commands/dev/init.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/init.ts)_
+To determine environment variable name based on a config key, for example `intervals.cacheCleanup`, use the following formula:
+- convert `pascalCase` fieldnames to `SCREAMING_SNAKE_CASE`: `intervals.cacheCleanup` => `INTERVALS.CACHE_CLEANUP`
+- replace all dots with `__`: `INTERVALS.CACHE_CLEANUP` => `INTERVALS__CACHE_CLEANUP`
+- add `JOYSTREAM_DISTRIBUTOR__` prefix: `INTERVALS__CACHE_CLEANUP` => `JOYSTREAM_DISTRIBUTOR__INTERVALS__CACHE_CLEANUP`
 
-## `joystream-distributor help [COMMAND]`
+In case of arrays, the values must be provided as json string, for example `JOYSTREAM_DISTRIBUTOR__KEYS="[{\"suri\":\"//Bob\"}]"`.
 
-display help for joystream-distributor
+For more envirnoment variable examples see the `distributor-node` service configuration in [docker-compose.yml](../docker-compose.yml).
 
-```
-USAGE
-  $ joystream-distributor help [COMMAND]
+**For detailed configuration reference, checkout the [config schema](docs/schema/definition.md) documentation.**
 
-ARGUMENTS
-  COMMAND  command to show help for
+## Distributor Node
 
-OPTIONS
-  --all  see all commands in CLI
-```
-
-_See code: [@oclif/plugin-help](https://github.com/oclif/plugin-help/blob/v2.2.3/src/commands/help.ts)_
-
-## `joystream-distributor leader:cancel-invitation`
-
-Cancel pending distribution bucket operator invitation.
-
-```
-USAGE
-  $ joystream-distributor leader:cancel-invitation
-
-OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
-
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-
-DESCRIPTION
-  Requires distribution working group leader permissions.
-```
-
-_See code: [src/commands/leader/cancel-invitation.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/cancel-invitation.ts)_
-
-## `joystream-distributor leader:create-bucket`
-
-Create new distribution bucket. Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:create-bucket
-
-OPTIONS
-  -a, --acceptingBags=(yes|no)  [default: no] Whether the created bucket should accept new bags
-
-  -c, --configPath=configPath   [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                                directory)
-
-  -f, --familyId=familyId       (required) Distribution bucket family id
-
-  -y, --yes                     Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/create-bucket.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/create-bucket.ts)_
-
-## `joystream-distributor leader:create-bucket-family`
-
-Create new distribution bucket family. Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:create-bucket-family
-
-OPTIONS
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/create-bucket-family.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/create-bucket-family.ts)_
-
-## `joystream-distributor leader:delete-bucket`
-
-Delete distribution bucket. The bucket must have no operators. Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:delete-bucket
-
-OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
-
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/delete-bucket.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/delete-bucket.ts)_
-
-## `joystream-distributor leader:delete-bucket-family`
-
-Delete distribution bucket family. Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:delete-bucket-family
-
-OPTIONS
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/delete-bucket-family.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/delete-bucket-family.ts)_
-
-## `joystream-distributor leader:invite-bucket-operator`
-
-Invite distribution bucket operator (distribution group worker).
-
-```
-USAGE
-  $ joystream-distributor leader:invite-bucket-operator
-
-OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
-
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -w, --workerId=workerId      (required) ID of the distribution group worker to invite as bucket operator
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-
-DESCRIPTION
-  The specified bucket must not have any operator currently.
-     Requires distribution working group leader permissions.
-```
-
-_See code: [src/commands/leader/invite-bucket-operator.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/invite-bucket-operator.ts)_
-
-## `joystream-distributor leader:set-buckets-per-bag-limit`
-
-Set max. distribution buckets per bag limit. Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:set-buckets-per-bag-limit
-
-OPTIONS
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -l, --limit=limit            (required) New limit value
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/set-buckets-per-bag-limit.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/set-buckets-per-bag-limit.ts)_
-
-## `joystream-distributor leader:update-bag`
-
-Add/remove distribution buckets from a bag.
-
-```
-USAGE
-  $ joystream-distributor leader:update-bag
-
-OPTIONS
-  -a, --add=add
-      [default: ] ID of a bucket to add to bag
-
-  -b, --bagId=bagId
-      (required) Bag ID. Format: {bag_type}:{sub_type}:{id}.
-           - Bag types: 'static', 'dynamic'
-           - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
-           - Id:
-             - absent for 'static:council'
-             - working group name for 'static:wg'
-             - integer for 'dynamic:member' and 'dynamic:channel'
-           Examples:
-           - static:council
-           - static:wg:storage
-           - dynamic:member:4
-
-  -c, --configPath=configPath
-      [default: ./config.yml] Path to config JSON/YAML file (relative to current working directory)
-
-  -f, --familyId=familyId
-      (required) ID of the distribution bucket family
-
-  -r, --remove=remove
-      [default: ] ID of a bucket to remove from bag
-
-  -y, --yes
-      Answer "yes" to any prompt, skipping any manual confirmations
-
-EXAMPLE
-  $ joystream-distributor leader:update-bag -b 1 -f 1 -a 1 -a 2 -a 3 -r 4 -r 5
-```
-
-_See code: [src/commands/leader/update-bag.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bag.ts)_
-
-## `joystream-distributor leader:update-bucket-mode`
-
-Update distribution bucket mode ("distributing" flag). Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:update-bucket-mode
-
-OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
-
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -d, --mode=(on|off)          (required) Whether the bucket should be "on" (distributing) or "off" (not distributing)
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/update-bucket-mode.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bucket-mode.ts)_
-
-## `joystream-distributor leader:update-bucket-status`
-
-Update distribution bucket status ("acceptingNewBags" flag). Requires distribution working group leader permissions.
-
-```
-USAGE
-  $ joystream-distributor leader:update-bucket-status
-
-OPTIONS
-  -B, --bucketId=bucketId       (required) Distribution bucket id
-  -a, --acceptingBags=(yes|no)  (required) Whether the bucket should accept new bags
-
-  -c, --configPath=configPath   [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                                directory)
-
-  -f, --familyId=familyId       (required) Distribution bucket family id
-
-  -y, --yes                     Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/leader/update-bucket-status.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bucket-status.ts)_
-
-## `joystream-distributor leader:update-dynamic-bag-policy`
-
-Update dynamic bag creation policy (number of buckets by family that should store given dynamic bag type).
-
-```
-USAGE
-  $ joystream-distributor leader:update-dynamic-bag-policy
-
-OPTIONS
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -p, --policy=policy          Key-value pair of {familyId}:{numberOfBuckets}
-
-  -t, --type=(Member|Channel)  (required) Dynamic bag type
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-
-DESCRIPTION
-  Requires distribution working group leader permissions.
-
-EXAMPLE
-  $ joystream-distributor leader:update-dynamic-bag-policy -t Member -p 1:5 -p 2:10 -p 3:5
-```
-
-_See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-dynamic-bag-policy.ts)_
-
-## `joystream-distributor operator:accept-invitation`
-
-Accept pending distribution bucket operator invitation.
-
-```
-USAGE
-  $ joystream-distributor operator:accept-invitation
-
-OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
-
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-
-DESCRIPTION
-  Requires the invited distribution group worker role key.
-```
-
-_See code: [src/commands/operator/accept-invitation.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/operator/accept-invitation.ts)_
-
-## `joystream-distributor operator:set-metadata`
-
-Set/update distribution bucket operator metadata.
-
-```
-USAGE
-  $ joystream-distributor operator:set-metadata
-
-OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
-
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -e, --endpoint=endpoint      Root distribution node endpoint
-
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
-  -i, --input=input            Path to JSON metadata file
-
-  -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-
-DESCRIPTION
-  Requires active distribution bucket operator worker role key.
-```
-
-_See code: [src/commands/operator/set-metadata.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/operator/set-metadata.ts)_
-
-## `joystream-distributor start`
-
-Start the node
-
-```
-USAGE
-  $ joystream-distributor start
-
-OPTIONS
-  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
-                               directory)
-
-  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
-```
-
-_See code: [src/commands/start.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/start.ts)_
-<!-- commandsstop -->
+**To understand how the distributor node works in detail, checkout the [node](docs/node/index.md) documentation.**

+ 13 - 5
distributor-node/config.yml

@@ -1,22 +1,30 @@
 id: test-node
 endpoints:
   queryNode: http://localhost:8081/graphql
-  substrateNode: ws://localhost:9944
+  joystreamNodeWs: ws://localhost:9944
   # elasticSearch: http://localhost:9200
 directories:
-  data: ./local/data
-  cache: ./local/cache
+  assets: ./local/data
+  cacheState: ./local/cache
   logs: ./local/logs
 log:
   file: debug
-  console: info
+  console: verbose
   # elastic: info
 limits:
   storage: 100G
   maxConcurrentStorageNodeDownloads: 100
   maxConcurrentOutboundConnections: 300
   outboundRequestsTimeout: 5000
+intervals:
+  saveCacheState: 60
+  checkStorageNodeResponseTimes: 60
+  cacheCleanup: 60
 port: 3334
-keys: [//Alice]
+keys:
+  - suri: //Alice
+  # - mnemonic: "escape naive annual throw tragic achieve grunt verify cram note harvest problem"
+  #   type: ed25519
+  # - keyfile: "/path/to/keyfile.json"
 buckets: 'all'
 workerId: 0

+ 9 - 4
distributor-node/config/docker/config.docker.yml

@@ -1,11 +1,11 @@
 id: distributor-node-docker
 endpoints:
   queryNode: http://graphql-server-mnt:4002/graphql
-  substrateNode: ws://joystream-node:9944
+  joystreamNodeWs: ws://joystream-node:9944
   # elasticSearch: http://elasticsearch:9200
 directories:
-  data: /data
-  cache: /cache
+  assets: /data
+  cacheState: /cache
   logs: /logs
 log:
   console: info
@@ -15,7 +15,12 @@ limits:
   maxConcurrentStorageNodeDownloads: 100
   maxConcurrentOutboundConnections: 300
   outboundRequestsTimeout: 5000
+intervals:
+  saveCacheState: 60
+  checkStorageNodeResponseTimes: 60
+  cacheCleanup: 60
 port: 3334
-keys: [//Alice]
+keys:
+  - suri: //Alice
 buckets: 'all'
 workerId: 0

+ 404 - 0
distributor-node/docs/api/index.md

@@ -0,0 +1,404 @@
+---
+title: Distributor node API v0.1.0
+language_tabs:
+  - javascript: JavaScript
+  - shell: Shell
+language_clients:
+  - javascript: ""
+  - shell: ""
+toc_footers:
+  - <a href="https://github.com/Joystream/joystream/issues/2224">Distributor
+    node API</a>
+includes: []
+search: true
+highlight_theme: darkula
+headingLevel: 2
+
+---
+
+<!-- AUTO-GENERATED-CONTENT:START (TOC) -->
+- [public](#public)
+- [public.status](#publicstatus)
+  - [Responses](#responses)
+  - [Responses](#responses-1)
+- [public.buckets](#publicbuckets)
+- [public.assetHead](#publicassethead)
+  - [Parameters](#parameters)
+  - [Responses](#responses-2)
+  - [Response Headers](#response-headers)
+- [public.asset](#publicasset)
+  - [Parameters](#parameters-1)
+  - [Responses](#responses-3)
+- [ErrorResponse](#errorresponse)
+  - [Response Headers](#response-headers-1)
+- [Schemas](#schemas)
+  - [Properties](#properties)
+- [StatusResponse](#statusresponse)
+  - [Properties](#properties-1)
+- [BucketsResponse](#bucketsresponse)
+  - [Properties](#properties-2)
+<!-- AUTO-GENERATED-CONTENT:END -->
+
+<h1 id="distributor-node-api">Distributor node API v0.1.0</h1>
+
+> Scroll down for code samples, example requests and responses.
+
+Distributor node API
+
+Base URLs:
+
+* <a href="http://localhost:3334/api/v1/">http://localhost:3334/api/v1/</a>
+
+Email: <a href="mailto:info@joystream.org">Support</a> 
+License: <a href="https://spdx.org/licenses/GPL-3.0-only.html">GPL-3.0-only</a>
+
+<h1 id="distributor-node-api-public">public</h1>
+
+Public distributor node API
+
+## public.status
+
+<a id="opIdpublic.status"></a>
+
+> Code samples
+
+```javascript
+
+const headers = {
+  'Accept':'application/json'
+};
+
+fetch('http://localhost:3334/api/v1/status',
+{
+  method: 'GET',
+
+  headers: headers
+})
+.then(function(res) {
+    return res.json();
+}).then(function(body) {
+    console.log(body);
+});
+
+```
+
+```shell
+# You can also use wget
+curl -X GET http://localhost:3334/api/v1/status \
+  -H 'Accept: application/json'
+
+```
+
+`GET /status`
+
+Returns json object describing current node status.
+
+> Example responses
+
+> 200 Response
+
+```json
+{
+  "id": "string",
+  "objectsInCache": 0,
+  "storageLimit": 0,
+  "storageUsed": 0,
+  "uptime": 0,
+  "downloadsInProgress": 0
+}
+```
+
+<h3 id="public.status-responses">Responses</h3>
+
+|Status|Meaning|Description|Schema|
+|---|---|---|---|
+|200|[OK](https://tools.ietf.org/html/rfc7231#section-6.3.1)|OK|[StatusResponse](#schemastatusresponse)|
+|500|[Internal Server Error](https://tools.ietf.org/html/rfc7231#section-6.6.1)|Unexpected server error|None|
+
+<aside class="success">
+This operation does not require authentication
+</aside>
+
+## public.buckets
+
+<a id="opIdpublic.buckets"></a>
+
+> Code samples
+
+```javascript
+
+const headers = {
+  'Accept':'application/json'
+};
+
+fetch('http://localhost:3334/api/v1/buckets',
+{
+  method: 'GET',
+
+  headers: headers
+})
+.then(function(res) {
+    return res.json();
+}).then(function(body) {
+    console.log(body);
+});
+
+```
+
+```shell
+# You can also use wget
+curl -X GET http://localhost:3334/api/v1/buckets \
+  -H 'Accept: application/json'
+
+```
+
+`GET /buckets`
+
+Returns list of distributed buckets
+
+> Example responses
+
+> 200 Response
+
+```json
+{
+  "bucketIds": [
+    0
+  ]
+}
+```
+
+<h3 id="public.buckets-responses">Responses</h3>
+
+|Status|Meaning|Description|Schema|
+|---|---|---|---|
+|200|[OK](https://tools.ietf.org/html/rfc7231#section-6.3.1)|OK|[BucketsResponse](#schemabucketsresponse)|
+|500|[Internal Server Error](https://tools.ietf.org/html/rfc7231#section-6.6.1)|Unexpected server error|None|
+
+<aside class="success">
+This operation does not require authentication
+</aside>
+
+## public.assetHead
+
+<a id="opIdpublic.assetHead"></a>
+
+> Code samples
+
+```javascript
+
+fetch('http://localhost:3334/api/v1/asset/{objectId}',
+{
+  method: 'HEAD'
+
+})
+.then(function(res) {
+    return res.json();
+}).then(function(body) {
+    console.log(body);
+});
+
+```
+
+```shell
+# You can also use wget
+curl -X HEAD http://localhost:3334/api/v1/asset/{objectId}
+
+```
+
+`HEAD /asset/{objectId}`
+
+Returns asset response headers (cache status, content type and/or length, accepted ranges etc.)
+
+<h3 id="public.assethead-parameters">Parameters</h3>
+
+|Name|In|Type|Required|Description|
+|---|---|---|---|---|
+|objectId|path|string|true|Data Object ID|
+
+<h3 id="public.assethead-responses">Responses</h3>
+
+|Status|Meaning|Description|Schema|
+|---|---|---|---|
+|200|[OK](https://tools.ietf.org/html/rfc7231#section-6.3.1)|Object is supported and should be send on GET request.|None|
+|404|[Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)|Data object does not exist.|None|
+|421|[Misdirected request](https://tools.ietf.org/html/rfc7540#section-9.1.2)|Misdirected request. Data object not supported by the node.|None|
+|500|[Internal Server Error](https://tools.ietf.org/html/rfc7231#section-6.6.1)|Unexpected server error|None|
+
+### Response Headers
+
+|Status|Header|Type|Format|Description|
+|---|---|---|---|---|
+|200|X-Cache|string||Describes cache status of an object. Hit - object is already fully fetched in distributor node's cache. Pending - object is still beeing fetched from the storage node. Miss - object is neither in cache not currently beeing fetched. Fetching from storage node may be triggered.|
+
+<aside class="success">
+This operation does not require authentication
+</aside>
+
+## public.asset
+
+<a id="opIdpublic.asset"></a>
+
+> Code samples
+
+```javascript
+
+const headers = {
+  'Accept':'image/*'
+};
+
+fetch('http://localhost:3334/api/v1/asset/{objectId}',
+{
+  method: 'GET',
+
+  headers: headers
+})
+.then(function(res) {
+    return res.json();
+}).then(function(body) {
+    console.log(body);
+});
+
+```
+
+```shell
+# You can also use wget
+curl -X GET http://localhost:3334/api/v1/asset/{objectId} \
+  -H 'Accept: image/*'
+
+```
+
+`GET /asset/{objectId}`
+
+Returns a media file.
+
+<h3 id="public.asset-parameters">Parameters</h3>
+
+|Name|In|Type|Required|Description|
+|---|---|---|---|---|
+|objectId|path|string|true|Data Object ID|
+
+> Example responses
+
+> 200 Response
+
+> 404 Response
+
+```json
+{
+  "type": "string",
+  "message": "string"
+}
+```
+
+<h3 id="public.asset-responses">Responses</h3>
+
+|Status|Meaning|Description|Schema|
+|---|---|---|---|
+|200|[OK](https://tools.ietf.org/html/rfc7231#section-6.3.1)|Full available object data sent|string|
+|206|[Partial Content](https://tools.ietf.org/html/rfc7233#section-4.1)|Requested partial object data sent|string|
+|404|[Not Found](https://tools.ietf.org/html/rfc7231#section-6.5.4)|Data object does not exist.|[ErrorResponse](#schemaerrorresponse)|
+|421|[Misdirected request](https://tools.ietf.org/html/rfc7540#section-9.1.2)|Misdirected request. Data object not supported.|[ErrorResponse](#schemaerrorresponse)|
+|500|[Internal Server Error](https://tools.ietf.org/html/rfc7231#section-6.6.1)|Unexpected server error|None|
+
+### Response Headers
+
+|Status|Header|Type|Format|Description|
+|---|---|---|---|---|
+|200|X-Cache|string||Describes cache status of an object. Hit - object is already fully fetched in distributor node's cache. Pending - object is still beeing fetched from the storage node. Miss - object is neither in cache not currently beeing fetched. Fetching from storage node may be triggered.|
+|200|X-Data-Source|string||Describes the source of data stream. External - the request was proxied to a storage node. Local - the data is streamed from local file.|
+|206|X-Cache|string||Describes cache status of an object. Hit - object is already fully fetched in distributor node's cache. Pending - object is still beeing fetched from the storage node. Miss - object is neither in cache not currently beeing fetched. Fetching from storage node may be triggered.|
+|206|X-Data-Source|string||Describes the source of data stream. External - the request was proxied to a storage node. Local - the data is streamed from local file.|
+
+<aside class="success">
+This operation does not require authentication
+</aside>
+
+# Schemas
+
+<h2 id="tocS_ErrorResponse">ErrorResponse</h2>
+
+<a id="schemaerrorresponse"></a>
+<a id="schema_ErrorResponse"></a>
+<a id="tocSerrorresponse"></a>
+<a id="tocserrorresponse"></a>
+
+```json
+{
+  "type": "string",
+  "message": "string"
+}
+
+```
+
+### Properties
+
+|Name|Type|Required|Restrictions|Description|
+|---|---|---|---|---|
+|type|string|false|none|none|
+|message|string|true|none|none|
+
+<h2 id="tocS_StatusResponse">StatusResponse</h2>
+
+<a id="schemastatusresponse"></a>
+<a id="schema_StatusResponse"></a>
+<a id="tocSstatusresponse"></a>
+<a id="tocsstatusresponse"></a>
+
+```json
+{
+  "id": "string",
+  "objectsInCache": 0,
+  "storageLimit": 0,
+  "storageUsed": 0,
+  "uptime": 0,
+  "downloadsInProgress": 0
+}
+
+```
+
+### Properties
+
+|Name|Type|Required|Restrictions|Description|
+|---|---|---|---|---|
+|id|string|true|none|none|
+|objectsInCache|integer|true|none|none|
+|storageLimit|integer|true|none|none|
+|storageUsed|integer|true|none|none|
+|uptime|integer|true|none|none|
+|downloadsInProgress|integer|true|none|none|
+
+<h2 id="tocS_BucketsResponse">BucketsResponse</h2>
+
+<a id="schemabucketsresponse"></a>
+<a id="schema_BucketsResponse"></a>
+<a id="tocSbucketsresponse"></a>
+<a id="tocsbucketsresponse"></a>
+
+```json
+{
+  "bucketIds": [
+    0
+  ]
+}
+
+```
+
+### Properties
+
+oneOf
+
+|Name|Type|Required|Restrictions|Description|
+|---|---|---|---|---|
+|*anonymous*|object|false|none|none|
+|» bucketIds|[integer]|true|none|none|
+
+xor
+
+|Name|Type|Required|Restrictions|Description|
+|---|---|---|---|---|
+|*anonymous*|object|false|none|none|
+|» allByWorkerId|integer|true|none|none|
+
+undefined
+

+ 142 - 0
distributor-node/docs/api/templates/main.dot

@@ -0,0 +1,142 @@
+<!-- AUTO-GENERATED-CONTENT:START (TOC) -->
+<!-- AUTO-GENERATED-CONTENT:END -->
+
+{{= data.tags.section }}
+<h1 id="{{=data.title_prefix}}">{{=data.api.info && data.api.info.title}} {{=data.version}}</h1>
+
+> Scroll down for {{? data.header.language_tabs.length}}code samples, {{?}}example requests and responses.
+
+{{? data.api.info && data.api.info.description}}{{=data.api.info.description}}{{?}}
+
+{{? data.api.servers }}
+Base URLs:
+{{~data.api.servers :s}}
+* <a href="{{=s.url}}">{{=s.url}}</a>
+{{ for(var v in s.variables) { }}
+    * **{{=v}}** - {{=s.variables[v].description||''}} Default: {{=s.variables[v].default}}
+{{? s.variables[v].enum}}
+{{~ s.variables[v].enum :e}}
+        * {{= e}}
+{{~}}
+{{?}}
+{{ } }}
+{{~}}
+{{?}}
+
+{{? data.api.info && data.api.info.termsOfService}}<a href="{{=data.api.info.termsOfService}}">Terms of service</a>{{?}}
+{{? data.api.info && data.api.info.contact}}{{? data.api.info.contact.email}}Email: <a href="mailto:{{=data.api.info.contact.email}}">{{=data.api.info.contact.name || 'Support'}}</a> {{?}}{{? data.api.info.contact.url}}Web: <a href="{{=data.api.info.contact.url}}">{{= data.api.info.contact.name || 'Support'}}</a> {{?}}{{?}}
+{{? data.api.info && data.api.info.license}}{{? data.api.info.license.url}}License: <a href="{{=data.api.info.license.url}}">{{=data.api.info.license.name}}</a>{{??}} License: {{=data.api.info.license.name}}{{?}}{{?}}
+{{= data.tags.endSection }}
+
+{{? data.api.components && data.api.components.securitySchemes }}
+{{#def.security}}
+{{?}}
+
+{{ for (var r in data.resources) { }}
+{{ data.resource = data.resources[r]; }}
+
+{{= data.tags.section }}
+<h1 id="{{=data.title_prefix+'-'+data.utils.slugify(r)}}">{{= r}}</h1>
+
+{{? data.resource.description }}{{= data.resource.description}}{{?}}
+
+{{? data.resource.externalDocs}}
+<a href="{{=data.resource.externalDocs.url}}">{{=data.resource.externalDocs.description||'External documentation'}}</a>
+{{?}}
+
+{{ for (var m in data.resource.methods) { }}
+{{ data.operationUniqueName = m; }}
+{{ data.method = data.resource.methods[m]; }}
+{{ data.operationUniqueSlug = data.method.slug; }}
+{{ data.operation = data.method.operation; }}
+{{= data.templates.operation(data) }}
+{{ } /* of methods */ }}
+
+{{= data.tags.endSection }}
+{{ } /* of resources */ }}
+
+{{? data.api.components && data.api.components.schemas }}
+{{= data.tags.section }}
+
+# Schemas
+
+{{ for (var s in data.components.schemas) { }}
+{{ var origSchema = data.components.schemas[s]; }}
+{{ var schema = data.api.components.schemas[s]; }}
+
+{{= data.tags.section }}
+<h2 id="tocS_{{=s}}">{{=s}}</h2>
+{{ /* backwards compatibility */ }}
+<a id="schema{{=s.toLowerCase()}}"></a>
+<a id="schema_{{=s}}"></a>
+<a id="tocS{{=s.toLowerCase()}}"></a>
+<a id="tocs{{=s.toLowerCase()}}"></a>
+
+{{? data.options.yaml }}
+```yaml
+{{=data.utils.yaml.stringify(data.utils.getSample(schema,data.options,{quiet:true},data.api))}}
+{{??}}
+```json
+{{=data.utils.safejson(data.utils.getSample(schema,data.options,{quiet:true},data.api),null,2)}}
+{{?}}
+```
+
+{{ var enums = []; }}
+{{ var blocks = data.utils.schemaToArray(origSchema,-1,{trim:true,join:true},data); }}
+{{ for (var block of blocks) {
+     for (var p of block.rows) {
+       if (p.schema && p.schema.enum) {
+         for (var e of p.schema.enum) {
+           enums.push({name:p.name,value:e});
+         }
+       }
+     }
+   }
+}}
+
+{{~ blocks :block}}
+{{? block.title }}{{= block.title}}{{= '\n\n'}}{{?}}
+{{? block.externalDocs}}
+<a href="{{=block.externalDocs.url}}">{{=block.externalDocs.description||'External documentation'}}</a>
+{{?}}
+
+{{? block===blocks[0] }}
+{{= data.tags.section }}
+
+### Properties
+{{?}}
+
+{{? block.rows.length}}|Name|Type|Required|Restrictions|Description|
+|---|---|---|---|---|{{?}}
+{{~ block.rows :p}}|{{=p.displayName}}|{{=p.safeType}}|{{=p.required}}|{{=p.restrictions||'none'}}|{{=p.description||'none'}}|
+{{~}}
+{{~}}
+{{? (blocks[0].rows.length === 0) && (blocks.length === 1) }}
+*None*
+{{?}}
+
+{{? enums.length > 0 }}
+{{= data.tags.section }}
+
+#### Enumerated Values
+
+|Property|Value|
+|---|---|
+{{~ enums :e}}|{{=e.name}}|{{=data.utils.toPrimitive(e.value)}}|
+{{~}}
+
+{{= data.tags.endSection }}
+{{?}}
+
+{{= data.tags.endSection }}
+{{= data.tags.endSection }}
+
+{{ } /* of schemas */ }}
+
+{{?}}
+
+{{#def.footer}}
+
+{{? data.options.discovery}}
+{{#def.discovery}}
+{{?}}

+ 48 - 0
distributor-node/docs/commands/dev.md

@@ -0,0 +1,48 @@
+`joystream-distributor dev`
+===========================
+
+Developer utility commands
+
+* [`joystream-distributor dev:batchUpload`](#joystream-distributor-devbatchupload)
+* [`joystream-distributor dev:init`](#joystream-distributor-devinit)
+
+## `joystream-distributor dev:batchUpload`
+
+```
+undefined
+
+USAGE
+  $ joystream-distributor dev:batchUpload
+
+OPTIONS
+  -B, --bucketId=bucketId          (required) Storage bucket id
+  -C, --batchesCount=batchesCount  (required)
+  -S, --batchSize=batchSize        (required)
+  -b, --bagId=bagId                (required)
+
+  -c, --configPath=configPath      [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                                   directory)
+
+  -y, --yes                        Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/dev/batchUpload.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/batchUpload.ts)_
+
+## `joystream-distributor dev:init`
+
+Initialize development environment. Sets Alice as distributor working group leader.
+
+```
+Initialize development environment. Sets Alice as distributor working group leader.
+
+USAGE
+  $ joystream-distributor dev:init
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/dev/init.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/init.ts)_

+ 25 - 0
distributor-node/docs/commands/help.md

@@ -0,0 +1,25 @@
+`joystream-distributor help`
+============================
+
+display help for joystream-distributor
+
+* [`joystream-distributor help [COMMAND]`](#joystream-distributor-help-command)
+
+## `joystream-distributor help [COMMAND]`
+
+display help for joystream-distributor
+
+```
+display help for <%= config.bin %>
+
+USAGE
+  $ joystream-distributor help [COMMAND]
+
+ARGUMENTS
+  COMMAND  command to show help for
+
+OPTIONS
+  --all  see all commands in CLI
+```
+
+_See code: [@oclif/plugin-help](https://github.com/oclif/plugin-help/blob/v2.2.3/src/commands/help.ts)_

+ 5 - 0
distributor-node/docs/commands/index.md

@@ -0,0 +1,5 @@
+- [`joystream-distributor help` command](./help.md)
+- [`joystream-distributor start` command](./start.md)
+- [`joystream-distributor leader:*` commands](./leader.md)
+- [`joystream-distributor operator:*` commands](./operator.md)
+- [`joystream-distributor dev:*` commands](./dev.md)

+ 366 - 0
distributor-node/docs/commands/leader.md

@@ -0,0 +1,366 @@
+`joystream-distributor leader`
+==============================
+
+Commands for performing Distribution Working Group leader on-chain duties (like setting distribution module limits and parameters, assigning bags and buckets etc.)
+
+* [`joystream-distributor leader:cancel-invitation`](#joystream-distributor-leadercancel-invitation)
+* [`joystream-distributor leader:create-bucket`](#joystream-distributor-leadercreate-bucket)
+* [`joystream-distributor leader:create-bucket-family`](#joystream-distributor-leadercreate-bucket-family)
+* [`joystream-distributor leader:delete-bucket`](#joystream-distributor-leaderdelete-bucket)
+* [`joystream-distributor leader:delete-bucket-family`](#joystream-distributor-leaderdelete-bucket-family)
+* [`joystream-distributor leader:invite-bucket-operator`](#joystream-distributor-leaderinvite-bucket-operator)
+* [`joystream-distributor leader:remove-bucket-operator`](#joystream-distributor-leaderremove-bucket-operator)
+* [`joystream-distributor leader:set-bucket-family-metadata`](#joystream-distributor-leaderset-bucket-family-metadata)
+* [`joystream-distributor leader:set-buckets-per-bag-limit`](#joystream-distributor-leaderset-buckets-per-bag-limit)
+* [`joystream-distributor leader:update-bag`](#joystream-distributor-leaderupdate-bag)
+* [`joystream-distributor leader:update-bucket-mode`](#joystream-distributor-leaderupdate-bucket-mode)
+* [`joystream-distributor leader:update-bucket-status`](#joystream-distributor-leaderupdate-bucket-status)
+* [`joystream-distributor leader:update-dynamic-bag-policy`](#joystream-distributor-leaderupdate-dynamic-bag-policy)
+
+## `joystream-distributor leader:cancel-invitation`
+
+Cancel pending distribution bucket operator invitation.
+
+```
+Cancel pending distribution bucket operator invitation.
+  Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:cancel-invitation
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  Requires distribution working group leader permissions.
+```
+
+_See code: [src/commands/leader/cancel-invitation.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/cancel-invitation.ts)_
+
+## `joystream-distributor leader:create-bucket`
+
+Create new distribution bucket. Requires distribution working group leader permissions.
+
+```
+Create new distribution bucket. Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:create-bucket
+
+OPTIONS
+  -a, --acceptingBags=(yes|no)  [default: no] Whether the created bucket should accept new bags
+
+  -c, --configPath=configPath   [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                                directory)
+
+  -f, --familyId=familyId       (required) Distribution bucket family id
+
+  -y, --yes                     Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/create-bucket.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/create-bucket.ts)_
+
+## `joystream-distributor leader:create-bucket-family`
+
+Create new distribution bucket family. Requires distribution working group leader permissions.
+
+```
+Create new distribution bucket family. Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:create-bucket-family
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/create-bucket-family.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/create-bucket-family.ts)_
+
+## `joystream-distributor leader:delete-bucket`
+
+Delete distribution bucket. The bucket must have no operators. Requires distribution working group leader permissions.
+
+```
+Delete distribution bucket. The bucket must have no operators. Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:delete-bucket
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/delete-bucket.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/delete-bucket.ts)_
+
+## `joystream-distributor leader:delete-bucket-family`
+
+Delete distribution bucket family. Requires distribution working group leader permissions.
+
+```
+Delete distribution bucket family. Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:delete-bucket-family
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/delete-bucket-family.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/delete-bucket-family.ts)_
+
+## `joystream-distributor leader:invite-bucket-operator`
+
+Invite distribution bucket operator (distribution group worker).
+
+```
+Invite distribution bucket operator (distribution group worker).
+  The specified bucket must not have any operator currently.
+  Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:invite-bucket-operator
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -w, --workerId=workerId      (required) ID of the distribution group worker to invite as bucket operator
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  The specified bucket must not have any operator currently.
+     Requires distribution working group leader permissions.
+```
+
+_See code: [src/commands/leader/invite-bucket-operator.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/invite-bucket-operator.ts)_
+
+## `joystream-distributor leader:remove-bucket-operator`
+
+Remove distribution bucket operator (distribution group worker).
+
+```
+Remove distribution bucket operator (distribution group worker).
+  Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:remove-bucket-operator
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -w, --workerId=workerId      (required) ID of the operator (distribution working group worker) to remove from the
+                               bucket
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  Requires distribution working group leader permissions.
+```
+
+_See code: [src/commands/leader/remove-bucket-operator.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/remove-bucket-operator.ts)_
+
+## `joystream-distributor leader:set-bucket-family-metadata`
+
+Set/update distribution bucket family metadata.
+
+```
+Set/update distribution bucket family metadata.
+  Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:set-bucket-family-metadata
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -i, --input=input            (required) Path to JSON metadata file
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  Requires distribution working group leader permissions.
+```
+
+_See code: [src/commands/leader/set-bucket-family-metadata.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/set-bucket-family-metadata.ts)_
+
+## `joystream-distributor leader:set-buckets-per-bag-limit`
+
+Set max. distribution buckets per bag limit. Requires distribution working group leader permissions.
+
+```
+Set max. distribution buckets per bag limit. Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:set-buckets-per-bag-limit
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -l, --limit=limit            (required) New limit value
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/set-buckets-per-bag-limit.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/set-buckets-per-bag-limit.ts)_
+
+## `joystream-distributor leader:update-bag`
+
+Add/remove distribution buckets from a bag.
+
+```
+Add/remove distribution buckets from a bag.
+
+USAGE
+  $ joystream-distributor leader:update-bag
+
+OPTIONS
+  -a, --add=add
+      [default: ] ID of a bucket to add to bag
+
+  -b, --bagId=bagId
+      (required) Bag ID. Format: {bag_type}:{sub_type}:{id}.
+           - Bag types: 'static', 'dynamic'
+           - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
+           - Id:
+             - absent for 'static:council'
+             - working group name for 'static:wg'
+             - integer for 'dynamic:member' and 'dynamic:channel'
+           Examples:
+           - static:council
+           - static:wg:storage
+           - dynamic:member:4
+
+  -c, --configPath=configPath
+      [default: ./config.yml] Path to config JSON/YAML file (relative to current working directory)
+
+  -f, --familyId=familyId
+      (required) ID of the distribution bucket family
+
+  -r, --remove=remove
+      [default: ] ID of a bucket to remove from bag
+
+  -y, --yes
+      Answer "yes" to any prompt, skipping any manual confirmations
+
+EXAMPLE
+  $ joystream-distributor leader:update-bag -b 1 -f 1 -a 1 -a 2 -a 3 -r 4 -r 5
+```
+
+_See code: [src/commands/leader/update-bag.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bag.ts)_
+
+## `joystream-distributor leader:update-bucket-mode`
+
+Update distribution bucket mode ("distributing" flag). Requires distribution working group leader permissions.
+
+```
+Update distribution bucket mode ("distributing" flag). Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:update-bucket-mode
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -d, --mode=(on|off)          (required) Whether the bucket should be "on" (distributing) or "off" (not distributing)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/update-bucket-mode.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bucket-mode.ts)_
+
+## `joystream-distributor leader:update-bucket-status`
+
+Update distribution bucket status ("acceptingNewBags" flag). Requires distribution working group leader permissions.
+
+```
+Update distribution bucket status ("acceptingNewBags" flag). Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:update-bucket-status
+
+OPTIONS
+  -B, --bucketId=bucketId       (required) Distribution bucket id
+  -a, --acceptingBags=(yes|no)  (required) Whether the bucket should accept new bags
+
+  -c, --configPath=configPath   [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                                directory)
+
+  -f, --familyId=familyId       (required) Distribution bucket family id
+
+  -y, --yes                     Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/leader/update-bucket-status.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bucket-status.ts)_
+
+## `joystream-distributor leader:update-dynamic-bag-policy`
+
+Update dynamic bag creation policy (number of buckets by family that should store given dynamic bag type).
+
+```
+Update dynamic bag creation policy (number of buckets by family that should store given dynamic bag type).
+    Requires distribution working group leader permissions.
+
+USAGE
+  $ joystream-distributor leader:update-dynamic-bag-policy
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -p, --policy=policy          Key-value pair of {familyId}:{numberOfBuckets}
+
+  -t, --type=(Member|Channel)  (required) Dynamic bag type
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  Requires distribution working group leader permissions.
+
+EXAMPLE
+  $ joystream-distributor leader:update-dynamic-bag-policy -t Member -p 1:5 -p 2:10 -p 3:5
+```
+
+_See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-dynamic-bag-policy.ts)_

+ 69 - 0
distributor-node/docs/commands/operator.md

@@ -0,0 +1,69 @@
+`joystream-distributor operator`
+================================
+
+Commands for performing node operator (Distribution Working Group worker) on-chain duties (like accepting bucket invitations, setting node metadata)
+
+* [`joystream-distributor operator:accept-invitation`](#joystream-distributor-operatoraccept-invitation)
+* [`joystream-distributor operator:set-metadata`](#joystream-distributor-operatorset-metadata)
+
+## `joystream-distributor operator:accept-invitation`
+
+Accept pending distribution bucket operator invitation.
+
+```
+Accept pending distribution bucket operator invitation.
+  Requires the invited distribution group worker role key.
+
+USAGE
+  $ joystream-distributor operator:accept-invitation
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  Requires the invited distribution group worker role key.
+```
+
+_See code: [src/commands/operator/accept-invitation.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/operator/accept-invitation.ts)_
+
+## `joystream-distributor operator:set-metadata`
+
+Set/update distribution bucket operator metadata.
+
+```
+Set/update distribution bucket operator metadata.
+  Requires active distribution bucket operator worker role key.
+
+USAGE
+  $ joystream-distributor operator:set-metadata
+
+OPTIONS
+  -B, --bucketId=bucketId      (required) Distribution bucket id
+
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -e, --endpoint=endpoint      Root distribution node endpoint
+
+  -f, --familyId=familyId      (required) Distribution bucket family id
+
+  -i, --input=input            Path to JSON metadata file
+
+  -w, --workerId=workerId      (required) ID of the operator (distribution group worker)
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+DESCRIPTION
+  Requires active distribution bucket operator worker role key.
+```
+
+_See code: [src/commands/operator/set-metadata.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/operator/set-metadata.ts)_

+ 25 - 0
distributor-node/docs/commands/start.md

@@ -0,0 +1,25 @@
+`joystream-distributor start`
+=============================
+
+Start the node
+
+* [`joystream-distributor start`](#joystream-distributor-start)
+
+## `joystream-distributor start`
+
+Start the node
+
+```
+Start the node
+
+USAGE
+  $ joystream-distributor start
+
+OPTIONS
+  -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
+                               directory)
+
+  -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+```
+
+_See code: [src/commands/start.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/start.ts)_

+ 400 - 0
distributor-node/docs/node/index.md

@@ -0,0 +1,400 @@
+<!-- AUTO-GENERATED-CONTENT:START (TOC:firsth1=true) -->
+- [The API](#the-api)
+  - [Requesting assets](#requesting-assets)
+    - [Scenario 1 (cache hit)](#scenario-1-cache-hit)
+    - [Scenario 2 (pending)](#scenario-2-pending)
+      - [Scenario 2.1: No `Range` header was provided with a request or the `Range` start is `<= partiallyDownloadedContentSize`](#scenario-21-no-range-header-was-provided-with-a-request-or-the-range-start-is--partiallydownloadedcontentsize)
+      - [Scenario 2.2: `Range` header was provided with a request and `Range` start is `> partiallyDownloadedContentSize`](#scenario-22-range-header-was-provided-with-a-request-and-range-start-is--partiallydownloadedcontentsize)
+    - [Scenario 3 (cache miss)](#scenario-3-cache-miss)
+      - [Scenario 3.1: The requested data object is not found](#scenario-31-the-requested-data-object-is-not-found)
+      - [Scenario 3.2: The object is not distributed by the node](#scenario-32-the-object-is-not-distributed-by-the-node)
+      - [Scenario 3.3: The request is valid, the node needs to fetch the missing object](#scenario-33-the-request-is-valid-the-node-needs-to-fetch-the-missing-object)
+  - [Checking asset status](#checking-asset-status)
+  - [API limits](#api-limits)
+    - [Example Nginx configuration](#example-nginx-configuration)
+    - [System configuration](#system-configuration)
+- [Data fetching](#data-fetching)
+  - [Finding nearby storage nodes:](#finding-nearby-storage-nodes)
+  - [Data object fetching flow](#data-object-fetching-flow)
+- [Metadata](#metadata)
+  - [DistributionBucketFamilyMetadata](#distributionbucketfamilymetadata)
+    - [Geographical areas covered by the distirbution bucket family](#geographical-areas-covered-by-the-distirbution-bucket-family)
+    - [Using latency tests for choosing a family](#using-latency-tests-for-choosing-a-family)
+  - [Distribution bucket operator metadata](#distribution-bucket-operator-metadata)
+- [State](#state)
+- [Caching](#caching)
+  - [Caching policy](#caching-policy)
+    - [LRU groups](#lru-groups)
+  - [Cache cleanup](#cache-cleanup)
+- [Logging](#logging)
+- [Query node integration](#query-node-integration)
+<!-- AUTO-GENERATED-CONTENT:END -->
+
+<a name="the-api"></a>
+
+# The API
+
+The Distributor Node exposes an HTTP api implemented with [ExpressJS](https://expressjs.com/).
+
+The api is described by an [OpenAPI](https://swagger.io/specification/) schema located at _[src/api-spec/openapi.yml](../../src/api-spec/openapi.yml)_
+
+**Current, detailed api documentation can be found [here](../api/index.md)**
+
+<a name="requesting-assets"></a>
+
+## Requesting assets
+
+The assets are requested from the distributor node by using a `GET` request to [`/asset/{objectId}`](../api/index.md#opIdpublic.asset) endpoint.
+
+There are multiple scenarios of how a distributor will act upon that request, depending on its current state:
+
+<a name="scenario-1"></a>
+
+### Scenario 1 (cache hit)
+
+**The requested data object is already available in the distributor node's filesystem (cache)**
+
+In this case:
+- Object's [LRU-SP cache state](#caching-policy) is updated
+- The [`send`](https://www.npmjs.com/package/send) library is used to handle the request and serve the object. The library supports, among others, partial responses (`Ranges`) and conditional-GET negotiation (`If-Match`, `If-Unmodified-Since`, `If-None-Match`, `If-Modified-Since`).
+- `cache-control: max-age` is set to `31536000` (one year), which is a common practice for informing the browser that the object can essentially be cached "forever" (minimizing the number of request for the same data object)
+- `x-cache: hit` and `x-data-source: local` headers are sent, providing the client detailed information about the triggered scenario (see: [_public.assets Responses_](../api/index.md#public.asset-responses)).
+
+<a name="scenario-2"></a>
+
+### Scenario 2 (pending)
+
+**The object is not yet cached, but is currently being fetched from the storage node**
+
+In this case `cache-control: max-age` is set to a substantially lower value (currently `180`), as the distributor node cannot yet confirm whether the object being fetched is indeed valid.
+
+<a name="scenario-2-1"></a>
+
+#### Scenario 2.1: No `Range` header was provided with a request or the `Range` start is `<= partiallyDownloadedContentSize`
+
+In this case:
+
+- The data is streamed into the response from the local, partially downloaded file. All the data that gets written into the local file, as it's being downloaded from the storage node, is beeing simultaneously read from the file (using a small interval) and immediately pushed into the http response.
+- `x-cache: pending` and `x-data-source: local` headers are sent, providing the client detailed information about the triggered scenario (see: [_public.assets Responses_](../api/index.md#public.asset-responses)).
+
+<a name="scenario-2-2"></a>
+
+#### Scenario 2.2: `Range` header was provided with a request and `Range` start is `> partiallyDownloadedContentSize`
+
+In this case streaming the response from partially downloaded file, like in the scenario above, may cause unnecessary delay, because the requested `Range` may target the very end of the file (which will only be available locally once the entire data object is fetched). That's why in this case:
+- The request is forwarded to the storage node (that the data object is currently being downloaded from) via [express-http-proxy](https://www.npmjs.com/package/express-http-proxy)
+- `x-cache: pending` and `x-data-source: external` headers are sent, providing the client detailed information about the triggered scenario (see: [_public.assets Responses_](../api/index.md#public.asset-responses)).
+
+<a name="scenario-3"></a>
+### Scenario 3 (cache miss)
+
+In this case the distributor node is making an additional request to the query node in order to fetch details of the requested object, including:
+- content hash,
+- object size,
+- storage buckets assigned to store the object,
+- distribution buckets assigned to distribute the object
+
+It then proceeds to one of the following scenarios:
+
+<a name="scenario-3-1"></a>
+
+#### Scenario 3.1: The requested data object is not found
+
+Node responds with `HTTP 404 (Not Found)` and a message
+
+<a name="scenario-3-2"></a>
+
+#### Scenario 3.2: The object is not distributed by the node
+
+Node responds with `HTTP 421 (Misdirected Request)` and a message
+
+<a name="scenario-3-3"></a>
+
+#### Scenario 3.3: The request is valid, the node needs to fetch the missing object
+
+In this case
+- The process of fetching the data object from storage node described in the [Data fetching](#data-fetching) section below is triggered.
+- Once the storage node from which the object is going to be fetched is chosen, the request is handled in a way analogous to the one described in [Scenario 2](#scenario-2), with the exception that `x-cache: miss` header will be sent instead of `x-cache: pending`.
+
+<a name="checking-asset-status"></a>
+
+## Checking asset status
+
+It is possible to check an asset status without affecting the distributor node state in any way (for example - by triggering the process of [fetching the missing data object](#data-fetching)), by sending a [`HEAD` request to `/asset/{objectId}`](../api/index.md#opIdpublic.assetHead) endpoint.
+
+If the request is valid, the node will respond with, among others, the `x-cache`, `content-length`, `cache-control` headers.
+
+In case the request is not invalid, the node will respond with the same status code it would in case of an invalid `GET` request.
+
+<a name="api-limits"></a>
+
+## API limits
+
+There are no rate / connection limits on incoming requests enforced by the node, it is therefore recommended to use a firewall or reverse proxy in order to protect the node from DOS/DDOS attacks.
+
+The outbound connections (from distributor node to storage nodes) however can be limited with [`limits`](../schema/definition-properties-limits.md) configuration settings.
+
+<a name="example-nginx-configuration"></a>
+
+### Example Nginx configuration
+
+```
+upstream distributor {
+    server 127.0.0.1:3334;
+}
+
+http {
+  # Create a conn_perip zone that will keep track of concurrent connections by ip
+  limit_conn_zone $binary_remote_addr zone=conn_perip:10m;
+
+  server {
+    server_name example-distributor-node;
+    listen 443;
+
+    # Limit to max 20 connections per ip at a time
+    limit_conn addr 20;
+
+    location / {
+      proxy_pass http://distributor/;
+      proxy_http_version 1.1;
+      proxy_set_header Upgrade $http_upgrade;
+      proxy_set_header Connection "upgrade";
+      proxy_set_header Host $http_host;
+
+      proxy_set_header X-Real-IP $remote_addr;
+      proxy_set_header X-Forward-For $proxy_add_x_forwarded_for;
+      proxy_set_header X-Forward-Proto http;
+      proxy_set_header X-Nginx-Proxy true;
+
+        proxy_redirect off;
+    }
+
+    # SSL and other configuration...
+  }
+}
+```
+
+Because Nginx does not support [HTTP pipelining](https://en.wikipedia.org/wiki/HTTP_pipelining), by limiting the number of concurrent connections per ip we also limit the number of data objects that can be concurrently fetched from the distributor node by a single IP.
+
+Having in mind that [most browsers will not make more than 6 concurrent connections](https://docs.pushtechnology.com/cloud/latest/manual/html/designguide/solution/support/connection_limitations.html), the limit of `20` concurrent connections per ip should be more than sufficient.
+
+<a name="system-configuration"></a>
+
+### System configuration
+
+When configuring the limits, keep in mind that a lot of simultaneous connections may also cause some OS limits to be hit.
+
+For example, the default limit of file descriptors a single process can open on Linux systems is `1024`. If left unchanged, this limit can easily cause problems, as this means only `1024` connections can be handled concurrently. In reality this number will be much lower for distributor node, because:
+- Each connection will require 1 file descriptor for a socket
+- Each incoming connection will most likely require an asset (data object) file to be accessed, which will take another descriptor,
+- Each incoming connection may trigger many outbound connections (see [Data fetching](#data-fetching) section below) in case of cache miss, in worst case taking over 10 more descriptors
+
+For Linux users it is recommended to either run the distributor node using the docker image, which already has high limits set, or [modify the max open file descriptors limit manually](https://docs.oracle.com/cd/E19623-01/820-6168/file-descriptor-requirements.html)
+
+<a name="data-fetching"></a>
+
+# Data fetching
+
+<a name="finding-nearby-storage-nodes"></a>
+
+## Finding nearby storage nodes:
+
+In order to limit the number of requests being made on cache miss and the time it takes to fetch a new object [in this scenario](#scenario-3), the distributor node needs to keep track of how quickly (on average) the currently available storage nodes are responding to requests.
+
+This can be partially solved by making use of the on-chain metadata provided by storage node operators, which may include details about the node location (see [Metadata](#metadata) section) that can provide some estimation of which nodes will likely respond faster. However, because this approach is quite limited and it's possible that most storage providers will choose not to expose their node location, the distributor node instead uses a different approach to find nearby nodes.
+
+Currently the distributor node periodically (every [`intervals.checkStorageNodeResponseTimes`](../schema/definition-properties-intervals.md#checkstoragenoderesponsetimes) seconds) fetches all active storage provider endpoints (from the query node) and measures their average response times to `/status/version` requests. This is done independently of any incoming requests. The "response time check" requests are queued using relatively small concurrency limit (10) in order to make the cost of this operation minimal.
+
+This provides a pretty good estimation on which nodes will likely be the best candidates for fetching data objects during a cache miss, it also allows filtering-out storage nodes that don't respond at all or respond with an error.
+
+<a name="data-object-fetching-flow"></a>
+
+## Data object fetching flow
+
+During the [cache miss scenario (`Scenario 3.3`)](#scenario-3-3), the following tasks are executed:
+
+First, the endpoints of all storage providers that are supposed to store the given object are ordered by the mean response time using 10 last response times (the process of obtaining those measurements is described in the [previous section](#finding-nearby-storage-nodes))
+
+The `HEAD /files/{objectId}` requests are then sent to the storage endpoints, starting from the ones with lowest mean response time. Those initial requests are only meant to determine whether a given storage node can indeed serve the object. In fact, all those requests are put (in the specified order) in the `availabilityCheckQueue` which then executes them with a constant maximum concurrency (`10` at the time of writing).
+
+As soon as any storage node confirms the availability of the object, the `availabilityCheckQueue` is temporarily stopped and `GET /files/{objectId}` request is made to fetch the full data from the selected provider. Because the distributor node uses `Connection: keep-alive` headers when sending requests to storage nodes, there's no need to re-establish a TCP connection at this point, which can save a considerable amount of time. If other storage providers confirm the availability of the object during this time, other `GET` requests will be added to `objectDownloadQueue` (which uses a concurrency of 1), allowing the distributor node to instantly try a different provider in case the first `GET` request fails. The process continues until a storage node that responds with `HTTP 200` to a `GET` request is found.
+
+Once some storage node responds with `HTTP 200` and starts streaming the data, all other requests related to that data object are stopped and the distributor node begins to write the data into its filesystem. Any errors at this point (unexpected data size, stream errors) will mean that the fetching process has failed, causing the data object and any related state to be dropped and the whole process of fetching the object to potentially be re-tried upon another request.
+
+<a name="metadata"></a>
+
+# Metadata
+
+The documentation of current storage&distribution system on-chain metadata standard can be found [here](../../../metadata-protobuf/doc/index.md#proto/Storage.proto)
+
+[Distributor node metadata](#distribution-bucket-operator-metadata) can be set using [`operator:set-metadata`](../commands/operator.md#joystream-distributor-operatorset-metadata) command in Distributor Node CLI.
+
+[Distribution family metadata](#distribution-bucket-family-metadata) can be set using [`leader:set-bucket-family-metadata`](../commands/leader.md#joystream-distributor-leaderset-bucket-family-metadata)
+
+Once set, the metadata can be accessed from the Query Node with a GraphQL query like, for example:
+```graphql
+query {
+  distributionBuckets {
+    family {
+      metadata {
+        region
+        description
+        latencyTestTargets
+        areas {
+          area {
+            __typename
+            ...on GeographicalAreaCountry {
+              countryCode: code
+            }
+            ...on GeographicalAreaContinent {
+              continentCode: code
+            }
+            ...on GeographicalAreaSubdivistion {
+              subdivisionCode: code
+            }
+          }
+        }
+      }
+    }
+    operators {
+      metadata {
+        nodeEndpoint
+        nodeLocation {
+          countryCode
+          coordinates {
+            latitude
+            longitude
+          }
+        }
+        extra
+      }
+    }
+  }
+}
+```
+
+<a name="distribution-bucket-family-metadata"></a>
+
+## DistributionBucketFamilyMetadata
+
+The main purpose of distribution family metadata is to help client (frontend) applications find out which distribution nodes should be preferred when fetching assets.
+
+Although each node operator may choose to expose its own node location in the [DistributionBucketOperatorMetadata](#distribution-bucket-operator-metadata), it is generally assumed that all nodes belonging to a given family will have a good-enough latency in the region covered by this family, so they can be treated more-or-less equally.
+
+What exactly constitutes a `region` in the `DistributionBucketFamilyMetadata` is not strictly enforced and the current metadata standard remains quite flexible in that regard.
+
+<a name="geographical-areas-covered-by-the-distirbution-bucket-family"></a>
+
+### Geographical areas covered by the distirbution bucket family
+
+Initially, as the number of distribution nodes will probably be limited, a region can mean a relatively large geographic area (ie. a continent or part of a continent). Later, as the network grows, the region may mean a single country / subdivision or a small set of nearby countries / subdivisions.
+
+In order to support all those cases, the `areas` field in the `DistributionBucketFamilyMetadata` allows specifying either one or multiple geographical areas covered by the family, where each area can be either:
+- a continent uniquely identified by `Continent` enum value, _or_
+- a country uniquely identified by [`ISO-3166-1 alpha-2`](https://en.wikipedia.org/wiki/ISO_3166-2) country code, _or_
+- a subdivision (for example, a state) uniquely identified by [`ISO_3166-2`](https://en.wikipedia.org/wiki/ISO_3166-2) subdivision code
+
+There are multiple ways client applications may be able to determine most suitable regions:
+
+- Using [`HTML5 geolocation API`](https://developer.mozilla.org/en-US/docs/Web/API/Geolocation_API) and reverse geocoding (which can be done either using a local dataset, custom backend or an external service)
+- using GeoDNS or a backend service to establish the approximate location before rendering the interface
+- Prompting the user to manually provide the preferred region
+
+<a name="using-latency-tests-for-choosing-a-family"></a>
+
+### Using latency tests for choosing a family
+
+Another way to choose the most appropriate region may be to perform an initial latency check by pinging endpoints that are supposed to give the most representative results for given family (for example, https://www.cloudping.info/ can perform such measurements using endpoints that represent AWS regions).
+
+In order to facilitate this, `latency_test_targets` field in the `DistributionBucketFamilyMetadata` allows specifying the list of representative ips / hosts to be used for such measurements. Alternatively a chosen set of distribution nodes themselves may also be used.
+
+<a name="distribution-bucket-operator-metadata"></a>
+
+## Distribution bucket operator metadata
+
+The most essential part of `DistributionBucketOperatorMetadata` is the node API root endpoint, it must be provided by all active node operators, otherwise no app will be able to access the node.
+
+The node operator may optionally choose to expose more details about the node, like specific `location` metadata or some additional `extra` information.
+
+<a name="state"></a>
+
+# State
+
+The distributor node state is divided into memory state (recreated on startup) and persistent state (stored in filesystem).
+
+Most of the state is managed via via an "intermediary" service called [`StateCacheService`](../../src/services/cache/StateCacheService.ts). This is to facilitate the potential migration to other state management approaches, like using `Redis` in the future. Currently the service automatically saves the persistent state to the filesystem every [`intervals.saveCacheState`](../schema/definition-properties-intervals.md#savecachestate) seconds. It also tries to save the state every time the node is exiting.
+
+The current state includes:
+
+**Memory state**
+- `pendingDownloadsByObjectId` map - stores information about currently pending downloads (data object fetching attempts). Each pending download can be in one of the following states:
+  - `Waiting` - in case [`limits.maxConcurrentStorageNodeDownloads`](../schema/definition-properties-limits.md#maxconcurrentstoragenodedownloads) limit is reached, this is the status of pending downloads that are waiting in the queue. It is also the initial status of all pending downloads in general.
+  - `LookingForSource` - the process of looking for a storage node that is able to serve the asset has started, but the source node has not yet been chosen.
+  - `Downloading` - the source storage node has been chosen and the data object is being downloaded.
+- `storageNodeEndpointDataByEndpoint` map - currently stores the last 10 average mean response times mapped by storage nodes endpoints (see: [_Finding nearby storage nodes_](#finding-nearby-storage-nodes))
+- `groupNumberByObjectId` map - stores the LRU-SP cache group number (see: [_Caching policy_](#caching-policy)) of each cached data object.
+
+**Persistent state**
+- `lruCacheGroups` - list of LRU-SP cache groups. Each LRU group contains a map of cached data object details (size, popularity, last access time) required to to calculate its `cost` parameter (see: [_Caching policy_](#caching-policy))
+- `mimeTypeByObjectId` map - stores the `mimeType` (as determined by the distributor node) of each cached data object
+
+<a name="caching"></a>
+
+# Caching
+
+<a name="caching-policy"></a>
+
+## Caching policy
+
+The caching policy used for the data objects stored by the distributor node is called **[`LRU-SP`](http://www.is.kyusan-u.ac.jp/~chengk/pub/papers/compsac00_A07-07.pdf)**.
+
+This caching policy was designed specifically for the web and it takes into account the following 3 properties of a data object:
+- object size (`s`)
+- object popularity (number of times it was requested while being cached) (`p`)
+- time elapsed since the object was last requested (`t`)
+
+The cost function of a cache item is described by the formula: `t * s / p`.
+Objects with highest cost are the first to be evicted in case [`limits.storage`](../schema/definition-properties-limits.md#storage) limit is reached.
+
+<a name="lru-groups"></a>
+
+### LRU groups
+
+For efficiency, the cache is divided into `LRU` ([_Least recently used_](https://en.wikipedia.org/wiki/Page_replacement_algorithm#Least_recently_used)) sets (groups) such that all objects in a given group share the same integer value of `log2(s / p)`. In the current distributor node implementation, the unit used for `s` (object size) is `KB` (kilobytes). This means that if we have 24 LRU groups and assume `p = 1` (_popularity = 1_) for all objects, first LRU group will contain objects of size `1 - 2 KB`, second one `2 - 4 KB` etc. up until 24-th group with objects of size `8 - 16 GB` (or `2^23 KB - 2^24 KB`).
+
+When the object is being requested, we're incrementing its `p` and checking the current value of `log2(s / p)`. Then we're calling `SetA.delete(object)` + `SetB.add(object)` (either moving the item to a different LRU set based on current `log2(s / p)`, in which case `SetA` !== `SetB`, or just moving the item to the "top" of the current set, in which case `SetA` === `SetB`). All of those operations are very quick and don't require any costly iterations.
+
+In order to find the best eviction candidate, we're taking the "bottom" item from each LRU set and then choose an element with lowest `t * s / p` (which is also a low-cost operation, considering we need only ~24 LRU groups)
+
+<a name="cache-cleanup"></a>
+
+## Cache cleanup
+
+No-longer-distributed data objects are dropped from the cache periodically every [`intervals.cacheCleanup`](../schema/definition-properties-intervals.md#cachecleanup) seconds. During this time the distributor node will fetch all its current on-chain obligations using the query node and drop any objects that are part of the cache but not part of the obligations from both the cache state and filesystem.
+
+<a name="logging"></a>
+
+# Logging
+
+The distributor node supports detailed logging with [winston](https://www.npmjs.com/package/winston) library. [NPM log levels](https://www.npmjs.com/package/winston#logging-levels) are used to specify the log priority.
+
+The logs can be directed to some of the 3 available outputs, depending on the [`log`](../schema/definition-properties-log.md) configuration settings:
+- console
+- a log file inside [`directories.logs`](../schema/definition-properties-directories.md#logs)
+- an elasticsearch endpoint specified via [`endpoints.elasticsearch`](../schema/definition-properties-endpoints.md#elasticsearch)
+
+# Query node integration
+
+Because the distributor node is making requests to a query node:
+- on [cache miss](#scenario-3)
+- periodically, for [cache cleanup](#cache-cleanup) purposes
+
+In order to achieve the best perfomance it is recommended to either run the query-node processor and graphql server on the same machine the distributor node will be running on, or use a query node endpoint that can be accessed with a minimal latency.
+
+Taking the [docker-compose.yml](../../docker-compose.yml) example, the services that could be run on the same machine may include:
+- `db`
+- `processor`
+- `graphql-server`
+- `distributor-node`
+
+The `INDEXER_ENDPOINT_URL` can be point to a completely external indexer endpoint, as the latency between `processor` and `indexer` is less of an issue in this case.

+ 11 - 0
distributor-node/docs/schema/definition-properties-buckets-oneof-all-buckets.md

@@ -0,0 +1,11 @@
+## 1 Type
+
+`string` ([All buckets](definition-properties-buckets-oneof-all-buckets.md))
+
+## 1 Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value   | Explanation |
+| :------ | :---------- |
+| `"all"` |             |

+ 7 - 0
distributor-node/docs/schema/definition-properties-buckets-oneof-bucket-ids-items.md

@@ -0,0 +1,7 @@
+## items Type
+
+`integer`
+
+## items Constraints
+
+**minimum**: the value of this number must greater than or equal to: `0`

+ 7 - 0
distributor-node/docs/schema/definition-properties-buckets-oneof-bucket-ids.md

@@ -0,0 +1,7 @@
+## 0 Type
+
+`integer[]`
+
+## 0 Constraints
+
+**minimum number of items**: the minimum number of items for this array is: `1`

+ 9 - 0
distributor-node/docs/schema/definition-properties-buckets.md

@@ -0,0 +1,9 @@
+## buckets Type
+
+merged type ([Details](definition-properties-buckets.md))
+
+one (and only one) of
+
+*   [Bucket ids](definition-properties-buckets-oneof-bucket-ids.md "check type definition")
+
+*   [All buckets](definition-properties-buckets-oneof-all-buckets.md "check type definition")

+ 3 - 0
distributor-node/docs/schema/definition-properties-directories-properties-assets.md

@@ -0,0 +1,3 @@
+## assets Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-directories-properties-cachestate.md

@@ -0,0 +1,3 @@
+## cacheState Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-directories-properties-logs.md

@@ -0,0 +1,3 @@
+## logs Type
+
+`string`

+ 65 - 0
distributor-node/docs/schema/definition-properties-directories.md

@@ -0,0 +1,65 @@
+## directories Type
+
+`object` ([Details](definition-properties-directories.md))
+
+# directories Properties
+
+| Property                  | Type     | Required | Nullable       | Defined by                                                                                                                                             |
+| :------------------------ | :------- | :------- | :------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [assets](#assets)         | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-directories-properties-assets.md "undefined#/properties/directories/properties/assets")         |
+| [cacheState](#cachestate) | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-directories-properties-cachestate.md "undefined#/properties/directories/properties/cacheState") |
+| [logs](#logs)             | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-directories-properties-logs.md "undefined#/properties/directories/properties/logs")             |
+
+## assets
+
+Path to a directory where all the cached assets will be stored
+
+`assets`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-directories-properties-assets.md "undefined#/properties/directories/properties/assets")
+
+### assets Type
+
+`string`
+
+## cacheState
+
+Path to a directory where information about the current cache state will be stored (LRU-SP cache data, stored assets mime types etc.)
+
+`cacheState`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-directories-properties-cachestate.md "undefined#/properties/directories/properties/cacheState")
+
+### cacheState Type
+
+`string`
+
+## logs
+
+Path to a directory where logs will be stored if logging to a file was enabled (via `log.file`).
+
+`logs`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-directories-properties-logs.md "undefined#/properties/directories/properties/logs")
+
+### logs Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-endpoints-properties-elasticsearch.md

@@ -0,0 +1,3 @@
+## elasticSearch Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-endpoints-properties-joystreamnodews.md

@@ -0,0 +1,3 @@
+## joystreamNodeWs Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-endpoints-properties-querynode.md

@@ -0,0 +1,3 @@
+## queryNode Type
+
+`string`

+ 65 - 0
distributor-node/docs/schema/definition-properties-endpoints.md

@@ -0,0 +1,65 @@
+## endpoints Type
+
+`object` ([Details](definition-properties-endpoints.md))
+
+# endpoints Properties
+
+| Property                            | Type     | Required | Nullable       | Defined by                                                                                                                                                   |
+| :---------------------------------- | :------- | :------- | :------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [queryNode](#querynode)             | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-endpoints-properties-querynode.md "undefined#/properties/endpoints/properties/queryNode")             |
+| [joystreamNodeWs](#joystreamnodews) | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-endpoints-properties-joystreamnodews.md "undefined#/properties/endpoints/properties/joystreamNodeWs") |
+| [elasticSearch](#elasticsearch)     | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-endpoints-properties-elasticsearch.md "undefined#/properties/endpoints/properties/elasticSearch")     |
+
+## queryNode
+
+Query node graphql server uri (for example: <http://localhost:8081/graphql>)
+
+`queryNode`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-endpoints-properties-querynode.md "undefined#/properties/endpoints/properties/queryNode")
+
+### queryNode Type
+
+`string`
+
+## joystreamNodeWs
+
+Joystream node websocket api uri (for example: ws\://localhost:9944)
+
+`joystreamNodeWs`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-endpoints-properties-joystreamnodews.md "undefined#/properties/endpoints/properties/joystreamNodeWs")
+
+### joystreamNodeWs Type
+
+`string`
+
+## elasticSearch
+
+Elasticsearch uri used for submitting the distributor node logs (if enabled via `log.elastic`)
+
+`elasticSearch`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-endpoints-properties-elasticsearch.md "undefined#/properties/endpoints/properties/elasticSearch")
+
+### elasticSearch Type
+
+`string`

+ 7 - 0
distributor-node/docs/schema/definition-properties-id.md

@@ -0,0 +1,7 @@
+## id Type
+
+`string`
+
+## id Constraints
+
+**minimum length**: the minimum number of characters for this string is: `1`

+ 7 - 0
distributor-node/docs/schema/definition-properties-intervals-properties-cachecleanup.md

@@ -0,0 +1,7 @@
+## cacheCleanup Type
+
+`integer`
+
+## cacheCleanup Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 7 - 0
distributor-node/docs/schema/definition-properties-intervals-properties-checkstoragenoderesponsetimes.md

@@ -0,0 +1,7 @@
+## checkStorageNodeResponseTimes Type
+
+`integer`
+
+## checkStorageNodeResponseTimes Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 7 - 0
distributor-node/docs/schema/definition-properties-intervals-properties-savecachestate.md

@@ -0,0 +1,7 @@
+## saveCacheState Type
+
+`integer`
+
+## saveCacheState Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 77 - 0
distributor-node/docs/schema/definition-properties-intervals.md

@@ -0,0 +1,77 @@
+## intervals Type
+
+`object` ([Details](definition-properties-intervals.md))
+
+# intervals Properties
+
+| Property                                                        | Type      | Required | Nullable       | Defined by                                                                                                                                                                               |
+| :-------------------------------------------------------------- | :-------- | :------- | :------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [saveCacheState](#savecachestate)                               | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-intervals-properties-savecachestate.md "undefined#/properties/intervals/properties/saveCacheState")                               |
+| [checkStorageNodeResponseTimes](#checkstoragenoderesponsetimes) | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-intervals-properties-checkstoragenoderesponsetimes.md "undefined#/properties/intervals/properties/checkStorageNodeResponseTimes") |
+| [cacheCleanup](#cachecleanup)                                   | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-intervals-properties-cachecleanup.md "undefined#/properties/intervals/properties/cacheCleanup")                                   |
+
+## saveCacheState
+
+How often, in seconds, will the cache state be saved in `directories.state`. Independently of the specified interval, the node will always try to save cache state before exiting.
+
+`saveCacheState`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-intervals-properties-savecachestate.md "undefined#/properties/intervals/properties/saveCacheState")
+
+### saveCacheState Type
+
+`integer`
+
+### saveCacheState Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`
+
+## checkStorageNodeResponseTimes
+
+How often, in seconds, will the distributor node attempt to send requests to all current storage node endpoints in order to check how quickly they respond. The node will never make more than 10 such requests concurrently.
+
+`checkStorageNodeResponseTimes`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-intervals-properties-checkstoragenoderesponsetimes.md "undefined#/properties/intervals/properties/checkStorageNodeResponseTimes")
+
+### checkStorageNodeResponseTimes Type
+
+`integer`
+
+### checkStorageNodeResponseTimes Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`
+
+## cacheCleanup
+
+How often, in seconds, will the distributor node fetch data about all its distribution obligations from the query node and remove all the no-longer assigned data objects from local storage and cache state
+
+`cacheCleanup`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-intervals-properties-cachecleanup.md "undefined#/properties/intervals/properties/cacheCleanup")
+
+### cacheCleanup Type
+
+`integer`
+
+### cacheCleanup Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 3 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-json-backup-file-properties-keyfile.md

@@ -0,0 +1,3 @@
+## keyfile Type
+
+`string`

+ 27 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-json-backup-file.md

@@ -0,0 +1,27 @@
+## 2 Type
+
+`object` ([JSON backup file](definition-properties-keys-items-oneof-json-backup-file.md))
+
+# 2 Properties
+
+| Property            | Type     | Required | Nullable       | Defined by                                                                                                                                                                    |
+| :------------------ | :------- | :------- | :------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [keyfile](#keyfile) | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-keys-items-oneof-json-backup-file-properties-keyfile.md "undefined#/properties/keys/items/oneOf/2/properties/keyfile") |
+
+## keyfile
+
+
+
+`keyfile`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-keys-items-oneof-json-backup-file-properties-keyfile.md "undefined#/properties/keys/items/oneOf/2/properties/keyfile")
+
+### keyfile Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-mnemonic-phrase-properties-mnemonic.md

@@ -0,0 +1,3 @@
+## mnemonic Type
+
+`string`

+ 21 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-mnemonic-phrase-properties-type.md

@@ -0,0 +1,21 @@
+## type Type
+
+`string`
+
+## type Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"ed25519"` |             |
+| `"sr25519"` |             |
+| `"ecdsa"`   |             |
+
+## type Default Value
+
+The default value is:
+
+```json
+"sr25519"
+```

+ 64 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-mnemonic-phrase.md

@@ -0,0 +1,64 @@
+## 1 Type
+
+`object` ([Mnemonic phrase](definition-properties-keys-items-oneof-mnemonic-phrase.md))
+
+# 1 Properties
+
+| Property              | Type     | Required | Nullable       | Defined by                                                                                                                                                                     |
+| :-------------------- | :------- | :------- | :------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [type](#type)         | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-keys-items-oneof-mnemonic-phrase-properties-type.md "undefined#/properties/keys/items/oneOf/1/properties/type")         |
+| [mnemonic](#mnemonic) | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-keys-items-oneof-mnemonic-phrase-properties-mnemonic.md "undefined#/properties/keys/items/oneOf/1/properties/mnemonic") |
+
+## type
+
+
+
+`type`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-keys-items-oneof-mnemonic-phrase-properties-type.md "undefined#/properties/keys/items/oneOf/1/properties/type")
+
+### type Type
+
+`string`
+
+### type Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"ed25519"` |             |
+| `"sr25519"` |             |
+| `"ecdsa"`   |             |
+
+### type Default Value
+
+The default value is:
+
+```json
+"sr25519"
+```
+
+## mnemonic
+
+
+
+`mnemonic`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-keys-items-oneof-mnemonic-phrase-properties-mnemonic.md "undefined#/properties/keys/items/oneOf/1/properties/mnemonic")
+
+### mnemonic Type
+
+`string`

+ 3 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-substrate-uri-properties-suri.md

@@ -0,0 +1,3 @@
+## suri Type
+
+`string`

+ 21 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-substrate-uri-properties-type.md

@@ -0,0 +1,21 @@
+## type Type
+
+`string`
+
+## type Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"ed25519"` |             |
+| `"sr25519"` |             |
+| `"ecdsa"`   |             |
+
+## type Default Value
+
+The default value is:
+
+```json
+"sr25519"
+```

+ 64 - 0
distributor-node/docs/schema/definition-properties-keys-items-oneof-substrate-uri.md

@@ -0,0 +1,64 @@
+## 0 Type
+
+`object` ([Substrate uri](definition-properties-keys-items-oneof-substrate-uri.md))
+
+# 0 Properties
+
+| Property      | Type     | Required | Nullable       | Defined by                                                                                                                                                           |
+| :------------ | :------- | :------- | :------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [type](#type) | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-keys-items-oneof-substrate-uri-properties-type.md "undefined#/properties/keys/items/oneOf/0/properties/type") |
+| [suri](#suri) | `string` | Required | cannot be null | [Distributor node configuration](definition-properties-keys-items-oneof-substrate-uri-properties-suri.md "undefined#/properties/keys/items/oneOf/0/properties/suri") |
+
+## type
+
+
+
+`type`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-keys-items-oneof-substrate-uri-properties-type.md "undefined#/properties/keys/items/oneOf/0/properties/type")
+
+### type Type
+
+`string`
+
+### type Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"ed25519"` |             |
+| `"sr25519"` |             |
+| `"ecdsa"`   |             |
+
+### type Default Value
+
+The default value is:
+
+```json
+"sr25519"
+```
+
+## suri
+
+
+
+`suri`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-keys-items-oneof-substrate-uri-properties-suri.md "undefined#/properties/keys/items/oneOf/0/properties/suri")
+
+### suri Type
+
+`string`

+ 11 - 0
distributor-node/docs/schema/definition-properties-keys-items.md

@@ -0,0 +1,11 @@
+## items Type
+
+merged type ([Details](definition-properties-keys-items.md))
+
+one (and only one) of
+
+*   [Substrate uri](definition-properties-keys-items-oneof-substrate-uri.md "check type definition")
+
+*   [Mnemonic phrase](definition-properties-keys-items-oneof-mnemonic-phrase.md "check type definition")
+
+*   [JSON backup file](definition-properties-keys-items-oneof-json-backup-file.md "check type definition")

+ 7 - 0
distributor-node/docs/schema/definition-properties-keys.md

@@ -0,0 +1,7 @@
+## keys Type
+
+an array of merged types ([Details](definition-properties-keys-items.md))
+
+## keys Constraints
+
+**minimum number of items**: the minimum number of items for this array is: `1`

+ 7 - 0
distributor-node/docs/schema/definition-properties-limits-properties-maxconcurrentoutboundconnections.md

@@ -0,0 +1,7 @@
+## maxConcurrentOutboundConnections Type
+
+`integer`
+
+## maxConcurrentOutboundConnections Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 7 - 0
distributor-node/docs/schema/definition-properties-limits-properties-maxconcurrentstoragenodedownloads.md

@@ -0,0 +1,7 @@
+## maxConcurrentStorageNodeDownloads Type
+
+`integer`
+
+## maxConcurrentStorageNodeDownloads Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 7 - 0
distributor-node/docs/schema/definition-properties-limits-properties-outboundrequeststimeout.md

@@ -0,0 +1,7 @@
+## outboundRequestsTimeout Type
+
+`integer`
+
+## outboundRequestsTimeout Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 13 - 0
distributor-node/docs/schema/definition-properties-limits-properties-storage.md

@@ -0,0 +1,13 @@
+## storage Type
+
+`string`
+
+## storage Constraints
+
+**pattern**: the string must match the following regular expression: 
+
+```regexp
+^[0-9]+(B|K|M|G|T)$
+```
+
+[try pattern](https://regexr.com/?expression=%5E%5B0-9%5D%2B\(B%7CK%7CM%7CG%7CT\)%24 "try regular expression with regexr.com")

+ 106 - 0
distributor-node/docs/schema/definition-properties-limits.md

@@ -0,0 +1,106 @@
+## limits Type
+
+`object` ([Details](definition-properties-limits.md))
+
+# limits Properties
+
+| Property                                                                | Type      | Required | Nullable       | Defined by                                                                                                                                                                                 |
+| :---------------------------------------------------------------------- | :-------- | :------- | :------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [storage](#storage)                                                     | `string`  | Required | cannot be null | [Distributor node configuration](definition-properties-limits-properties-storage.md "undefined#/properties/limits/properties/storage")                                                     |
+| [maxConcurrentStorageNodeDownloads](#maxconcurrentstoragenodedownloads) | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-limits-properties-maxconcurrentstoragenodedownloads.md "undefined#/properties/limits/properties/maxConcurrentStorageNodeDownloads") |
+| [maxConcurrentOutboundConnections](#maxconcurrentoutboundconnections)   | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-limits-properties-maxconcurrentoutboundconnections.md "undefined#/properties/limits/properties/maxConcurrentOutboundConnections")   |
+| [outboundRequestsTimeout](#outboundrequeststimeout)                     | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-limits-properties-outboundrequeststimeout.md "undefined#/properties/limits/properties/outboundRequestsTimeout")                     |
+
+## storage
+
+Maximum total size of all (cached) assets stored in `directories.assets`
+
+`storage`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-limits-properties-storage.md "undefined#/properties/limits/properties/storage")
+
+### storage Type
+
+`string`
+
+### storage Constraints
+
+**pattern**: the string must match the following regular expression: 
+
+```regexp
+^[0-9]+(B|K|M|G|T)$
+```
+
+[try pattern](https://regexr.com/?expression=%5E%5B0-9%5D%2B\(B%7CK%7CM%7CG%7CT\)%24 "try regular expression with regexr.com")
+
+## maxConcurrentStorageNodeDownloads
+
+Maximum number of concurrent downloads from the storage node(s)
+
+`maxConcurrentStorageNodeDownloads`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-limits-properties-maxconcurrentstoragenodedownloads.md "undefined#/properties/limits/properties/maxConcurrentStorageNodeDownloads")
+
+### maxConcurrentStorageNodeDownloads Type
+
+`integer`
+
+### maxConcurrentStorageNodeDownloads Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`
+
+## maxConcurrentOutboundConnections
+
+Maximum number of total simultaneous outbound connections to storage node(s)
+
+`maxConcurrentOutboundConnections`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-limits-properties-maxconcurrentoutboundconnections.md "undefined#/properties/limits/properties/maxConcurrentOutboundConnections")
+
+### maxConcurrentOutboundConnections Type
+
+`integer`
+
+### maxConcurrentOutboundConnections Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`
+
+## outboundRequestsTimeout
+
+Timeout for all outbound storage node http requests in miliseconds
+
+`outboundRequestsTimeout`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-limits-properties-outboundrequeststimeout.md "undefined#/properties/limits/properties/outboundRequestsTimeout")
+
+### outboundRequestsTimeout Type
+
+`integer`
+
+### outboundRequestsTimeout Constraints
+
+**minimum**: the value of this number must greater than or equal to: `1`

+ 18 - 0
distributor-node/docs/schema/definition-properties-log-properties-console.md

@@ -0,0 +1,18 @@
+## console Type
+
+`string`
+
+## console Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"error"`   |             |
+| `"warn"`    |             |
+| `"info"`    |             |
+| `"http"`    |             |
+| `"verbose"` |             |
+| `"debug"`   |             |
+| `"silly"`   |             |
+| `"off"`     |             |

+ 18 - 0
distributor-node/docs/schema/definition-properties-log-properties-elastic.md

@@ -0,0 +1,18 @@
+## elastic Type
+
+`string`
+
+## elastic Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"error"`   |             |
+| `"warn"`    |             |
+| `"info"`    |             |
+| `"http"`    |             |
+| `"verbose"` |             |
+| `"debug"`   |             |
+| `"silly"`   |             |
+| `"off"`     |             |

+ 18 - 0
distributor-node/docs/schema/definition-properties-log-properties-file.md

@@ -0,0 +1,18 @@
+## file Type
+
+`string`
+
+## file Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"error"`   |             |
+| `"warn"`    |             |
+| `"info"`    |             |
+| `"http"`    |             |
+| `"verbose"` |             |
+| `"debug"`   |             |
+| `"silly"`   |             |
+| `"off"`     |             |

+ 110 - 0
distributor-node/docs/schema/definition-properties-log.md

@@ -0,0 +1,110 @@
+## log Type
+
+`object` ([Details](definition-properties-log.md))
+
+# log Properties
+
+| Property            | Type     | Required | Nullable       | Defined by                                                                                                                       |
+| :------------------ | :------- | :------- | :------------- | :------------------------------------------------------------------------------------------------------------------------------- |
+| [file](#file)       | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-log-properties-file.md "undefined#/properties/log/properties/file")       |
+| [console](#console) | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-log-properties-console.md "undefined#/properties/log/properties/console") |
+| [elastic](#elastic) | `string` | Optional | cannot be null | [Distributor node configuration](definition-properties-log-properties-elastic.md "undefined#/properties/log/properties/elastic") |
+
+## file
+
+Minimum level of logs written to a file specified in `directories.logs`
+
+`file`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-log-properties-file.md "undefined#/properties/log/properties/file")
+
+### file Type
+
+`string`
+
+### file Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"error"`   |             |
+| `"warn"`    |             |
+| `"info"`    |             |
+| `"http"`    |             |
+| `"verbose"` |             |
+| `"debug"`   |             |
+| `"silly"`   |             |
+| `"off"`     |             |
+
+## console
+
+Minimum level of logs outputted to a console
+
+`console`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-log-properties-console.md "undefined#/properties/log/properties/console")
+
+### console Type
+
+`string`
+
+### console Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"error"`   |             |
+| `"warn"`    |             |
+| `"info"`    |             |
+| `"http"`    |             |
+| `"verbose"` |             |
+| `"debug"`   |             |
+| `"silly"`   |             |
+| `"off"`     |             |
+
+## elastic
+
+Minimum level of logs sent to elasticsearch endpoint specified in `endpoints.elasticSearch`
+
+`elastic`
+
+*   is optional
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-log-properties-elastic.md "undefined#/properties/log/properties/elastic")
+
+### elastic Type
+
+`string`
+
+### elastic Constraints
+
+**enum**: the value of this property must be equal to one of the following values:
+
+| Value       | Explanation |
+| :---------- | :---------- |
+| `"error"`   |             |
+| `"warn"`    |             |
+| `"info"`    |             |
+| `"http"`    |             |
+| `"verbose"` |             |
+| `"debug"`   |             |
+| `"silly"`   |             |
+| `"off"`     |             |

+ 7 - 0
distributor-node/docs/schema/definition-properties-port.md

@@ -0,0 +1,7 @@
+## port Type
+
+`integer`
+
+## port Constraints
+
+**minimum**: the value of this number must greater than or equal to: `0`

+ 7 - 0
distributor-node/docs/schema/definition-properties-workerid.md

@@ -0,0 +1,7 @@
+## workerId Type
+
+`integer`
+
+## workerId Constraints
+
+**minimum**: the value of this number must greater than or equal to: `0`

+ 220 - 0
distributor-node/docs/schema/definition.md

@@ -0,0 +1,220 @@
+## Distributor node configuration Type
+
+`object` ([Distributor node configuration](definition.md))
+
+# Distributor node configuration Properties
+
+| Property                    | Type      | Required | Nullable       | Defined by                                                                                                 |
+| :-------------------------- | :-------- | :------- | :------------- | :--------------------------------------------------------------------------------------------------------- |
+| [id](#id)                   | `string`  | Required | cannot be null | [Distributor node configuration](definition-properties-id.md "undefined#/properties/id")                   |
+| [endpoints](#endpoints)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-endpoints.md "undefined#/properties/endpoints")     |
+| [directories](#directories) | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-directories.md "undefined#/properties/directories") |
+| [log](#log)                 | `object`  | Optional | cannot be null | [Distributor node configuration](definition-properties-log.md "undefined#/properties/log")                 |
+| [limits](#limits)           | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-limits.md "undefined#/properties/limits")           |
+| [intervals](#intervals)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-intervals.md "undefined#/properties/intervals")     |
+| [port](#port)               | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-port.md "undefined#/properties/port")               |
+| [keys](#keys)               | `array`   | Required | cannot be null | [Distributor node configuration](definition-properties-keys.md "undefined#/properties/keys")               |
+| [buckets](#buckets)         | Merged    | Required | cannot be null | [Distributor node configuration](definition-properties-buckets.md "undefined#/properties/buckets")         |
+| [workerId](#workerid)       | `integer` | Required | cannot be null | [Distributor node configuration](definition-properties-workerid.md "undefined#/properties/workerId")       |
+
+## id
+
+Node identifier used when sending elasticsearch logs and exposed on /status endpoint
+
+`id`
+
+*   is required
+
+*   Type: `string`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-id.md "undefined#/properties/id")
+
+### id Type
+
+`string`
+
+### id Constraints
+
+**minimum length**: the minimum number of characters for this string is: `1`
+
+## endpoints
+
+Specifies external endpoints that the distributor node will connect to
+
+`endpoints`
+
+*   is required
+
+*   Type: `object` ([Details](definition-properties-endpoints.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-endpoints.md "undefined#/properties/endpoints")
+
+### endpoints Type
+
+`object` ([Details](definition-properties-endpoints.md))
+
+## directories
+
+Specifies paths where node's data will be stored
+
+`directories`
+
+*   is required
+
+*   Type: `object` ([Details](definition-properties-directories.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-directories.md "undefined#/properties/directories")
+
+### directories Type
+
+`object` ([Details](definition-properties-directories.md))
+
+## log
+
+Specifies minimum log levels by supported log outputs
+
+`log`
+
+*   is optional
+
+*   Type: `object` ([Details](definition-properties-log.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-log.md "undefined#/properties/log")
+
+### log Type
+
+`object` ([Details](definition-properties-log.md))
+
+## limits
+
+Specifies node limits w\.r.t. storage, outbound connections etc.
+
+`limits`
+
+*   is required
+
+*   Type: `object` ([Details](definition-properties-limits.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-limits.md "undefined#/properties/limits")
+
+### limits Type
+
+`object` ([Details](definition-properties-limits.md))
+
+## intervals
+
+Specifies how often periodic tasks (for example cache cleanup) are executed by the node.
+
+`intervals`
+
+*   is required
+
+*   Type: `object` ([Details](definition-properties-intervals.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-intervals.md "undefined#/properties/intervals")
+
+### intervals Type
+
+`object` ([Details](definition-properties-intervals.md))
+
+## port
+
+Distributor node http server port
+
+`port`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-port.md "undefined#/properties/port")
+
+### port Type
+
+`integer`
+
+### port Constraints
+
+**minimum**: the value of this number must greater than or equal to: `0`
+
+## keys
+
+Specifies the keys available within distributor node CLI.
+
+`keys`
+
+*   is required
+
+*   Type: an array of merged types ([Details](definition-properties-keys-items.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-keys.md "undefined#/properties/keys")
+
+### keys Type
+
+an array of merged types ([Details](definition-properties-keys-items.md))
+
+### keys Constraints
+
+**minimum number of items**: the minimum number of items for this array is: `1`
+
+## buckets
+
+Specifies the buckets distributed by the node
+
+`buckets`
+
+*   is required
+
+*   Type: merged type ([Details](definition-properties-buckets.md))
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-buckets.md "undefined#/properties/buckets")
+
+### buckets Type
+
+merged type ([Details](definition-properties-buckets.md))
+
+one (and only one) of
+
+*   [Bucket ids](definition-properties-buckets-oneof-bucket-ids.md "check type definition")
+
+*   [All buckets](definition-properties-buckets-oneof-all-buckets.md "check type definition")
+
+## workerId
+
+ID of the node operator (distribution working group worker)
+
+`workerId`
+
+*   is required
+
+*   Type: `integer`
+
+*   cannot be null
+
+*   defined in: [Distributor node configuration](definition-properties-workerid.md "undefined#/properties/workerId")
+
+### workerId Type
+
+`integer`
+
+### workerId Constraints
+
+**minimum**: the value of this number must greater than or equal to: `0`

+ 25 - 6
distributor-node/package.json

@@ -68,7 +68,10 @@
     "nyc": "^14",
     "openapi-typescript": "^4.0.2",
     "ts-node": "^8",
-    "typescript": "^3.3"
+    "typescript": "^3.3",
+    "@adobe/jsonschema2md": "https://github.com/adobe/jsonschema2md",
+    "widdershins": "^4.0.1",
+    "markdown-magic": "^2.5.2"
   },
   "engines": {
     "node": ">=14.16.1"
@@ -90,7 +93,18 @@
     "bin": "joystream-distributor",
     "plugins": [
       "@oclif/plugin-help"
-    ]
+    ],
+    "topics": {
+      "leader": {
+        "description": "Commands for performing Distribution Working Group leader on-chain duties (like setting distribution module limits and parameters, assigning bags and buckets etc.)"
+      },
+      "operator": {
+        "description": "Commands for performing node operator (Distribution Working Group worker) on-chain duties (like accepting bucket invitations, setting node metadata)"
+      },
+      "dev": {
+        "description":"Developer utility commands"
+      }
+    }
   },
   "repository": {
     "type": "git",
@@ -99,17 +113,22 @@
   },
   "scripts": {
     "postpack": "rm -f oclif.manifest.json",
-    "prepack": "rm -rf lib && tsc -b && oclif-dev manifest && oclif-dev readme",
+    "prepack": "rm -rf lib && tsc -b && oclif-dev manifest && generate:all",
     "test": "nyc --extension .ts mocha --forbid-only \"test/**/*.test.ts\"",
-    "version": "oclif-dev readme && git add README.md",
-    "generate:types:json-schema": "yarn ts-node ./src/services/validation/generateTypes.ts",
+    "version": "generate:docs:cli && git add docs/cli/*",
+    "generate:types:json-schema": "yarn ts-node ./src/schemas/scripts/generateTypes.ts",
     "generate:types:graphql": "yarn graphql-codegen -c ./src/services/networking/query-node/codegen.yml",
     "generate:types:openapi": "yarn openapi-typescript ./src/api-spec/openapi.yml -o ./src/types/generated/OpenApi.ts -c ../prettierrc.js",
     "generate:types:all": "yarn generate:types:json-schema && yarn generate:types:graphql && yarn generate:types:openapi",
     "generate:api:storage-node": "yarn openapi-generator-cli generate -i ../storage-node-v2/src/api-spec/openapi.yaml -g typescript-axios -o ./src/services/networking/storage-node/generated",
     "generate:api:distributor-node": "yarn openapi-generator-cli generate -i ./src/api-spec/openapi.yml -g typescript-axios -o ./src/services/networking/distributor-node/generated",
     "generate:api:all": "yarn generate:api:storage-node && yarn generate:api:distributor-node",
-    "generate:all": "yarn generate:types:all && yarn generate:api:all",
+    "generate:docs:cli": "yarn oclif-dev readme --multi --dir ./docs/commands",
+    "generate:docs:config": "yarn ts-node --transpile-only ./src/schemas/scripts/generateConfigDoc.ts",
+    "generate:docs:api": "yarn widdershins ./src/api-spec/openapi.yml --language_tabs javascript:JavaScript shell:Shell -o ./docs/api/index.md -u ./docs/api/templates",
+    "generate:docs:toc": "yarn md-magic --path ./docs/**/*.md",
+    "generate:docs:all": "yarn generate:docs:cli && yarn generate:docs:config && yarn generate:docs:api && yarn generate:docs:toc",
+    "generate:all": "yarn generate:types:all && yarn generate:api:all && yarn generate:docs:all",
     "build": "rm -rf lib && tsc --build tsconfig.json && cp -r ./src/api-spec ./lib/api-spec",
     "lint": "eslint ./src --ext .ts",
     "format": "prettier ./ --write",

+ 10 - 11
distributor-node/scripts/data/family-metadata.json

@@ -1,16 +1,15 @@
 {
   "region": "eu-west",
   "description": "Western Europe",
-  "boundary": [
-    { "longitude": 0.935664253776034, "latitude": 61.70157919955392 },
-    { "longitude": 7.077063962609969, "latitude": 37.40179586925884 },
-    { "longitude": 27.46754964469303, "latitude": 32.88770433956931 },
-    { "longitude": 40.68423960078124, "latitude": 48.39367044189657 },
-    { "longitude": 32.14019766910849, "latitude": 54.63502471598309 },
-    { "longitude": 28.56450578831937, "latitude": 59.09093283322235 },
-    { "longitude": 30.75892533489921, "latitude": 70.1670216697313 },
-    { "longitude": 19.2385951319647, "latitude": 73.4978175093038 },
-    { "longitude": -9.158590783812665, "latitude": 67.80006125371919 },
-    { "longitude": 0.935664253776034, "latitude": 61.70157919955392 }
+  "areas": [
+    { "countryCode": "AT" },
+    { "countryCode": "BE" },
+    { "countryCode": "FR" },
+    { "countryCode": "DE" },
+    { "countryCode": "LI" },
+    { "countryCode": "LU" },
+    { "countryCode": "MC" },
+    { "countryCode": "NL" },
+    { "countryCode": "CH" }
   ]
 }

+ 3 - 1
distributor-node/scripts/test-commands.sh

@@ -14,7 +14,9 @@ BUCKET_ID=`${CLI} leader:create-bucket -f ${FAMILY_ID} -a yes`
 ${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -a ${BUCKET_ID}
 ${CLI} leader:update-bag -b static:wg:storage -f ${FAMILY_ID} -a ${BUCKET_ID}
 ${CLI} leader:update-bag -b static:wg:content -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:operations -f ${FAMILY_ID} -a ${BUCKET_ID}
+${CLI} leader:update-bag -b static:wg:operationsAlpha -f ${FAMILY_ID} -a ${BUCKET_ID}
+${CLI} leader:update-bag -b static:wg:operationsBeta -f ${FAMILY_ID} -a ${BUCKET_ID}
+${CLI} leader:update-bag -b static:wg:operationsGamma -f ${FAMILY_ID} -a ${BUCKET_ID}
 ${CLI} leader:update-bag -b static:wg:gateway -f ${FAMILY_ID} -a ${BUCKET_ID}
 ${CLI} leader:update-bag -b static:wg:distribution -f ${FAMILY_ID} -a ${BUCKET_ID}
 ${CLI} leader:update-bucket-status -f ${FAMILY_ID} -B ${BUCKET_ID}  --acceptingBags yes

+ 2 - 2
distributor-node/src/api-spec/openapi.yml

@@ -56,7 +56,7 @@ paths:
       tags:
         - public
       parameters:
-        - $ref: '#components/parameters/ObjectId'
+        - $ref: '#/components/parameters/ObjectId'
       responses:
         200:
           description: Object is supported and should be send on GET request.
@@ -75,7 +75,7 @@ paths:
       tags:
         - public
       parameters:
-        - $ref: '#components/parameters/ObjectId'
+        - $ref: '#/components/parameters/ObjectId'
       responses:
         200:
           description: Full available object data sent

+ 42 - 19
distributor-node/src/app/index.ts

@@ -7,6 +7,7 @@ import { ServerService } from '../services/server/ServerService'
 import { Logger } from 'winston'
 import fs from 'fs'
 import nodeCleanup from 'node-cleanup'
+import { AppIntervals } from '../types/app'
 
 export class App {
   private config: ReadonlyConfig
@@ -16,19 +17,40 @@ export class App {
   private server: ServerService
   private logging: LoggingService
   private logger: Logger
+  private intervals: AppIntervals | undefined
 
   constructor(config: ReadonlyConfig) {
     this.config = config
     this.logging = LoggingService.withAppConfig(config)
     this.stateCache = new StateCacheService(config, this.logging)
-    this.content = new ContentService(config, this.logging, this.stateCache)
     this.networking = new NetworkingService(config, this.stateCache, this.logging)
+    this.content = new ContentService(config, this.logging, this.networking, this.stateCache)
     this.server = new ServerService(config, this.stateCache, this.content, this.logging, this.networking)
     this.logger = this.logging.createLogger('App')
   }
 
+  private setIntervals() {
+    this.intervals = {
+      saveCacheState: setInterval(() => this.stateCache.save(), this.config.intervals.saveCacheState * 1000),
+      checkStorageNodeResponseTimes: setInterval(
+        () => this.networking.checkActiveStorageNodeEndpoints(),
+        this.config.intervals.checkStorageNodeResponseTimes * 1000
+      ),
+      cacheCleanup: setInterval(() => this.content.cacheCleanup(), this.config.intervals.cacheCleanup * 1000),
+    }
+  }
+
+  private clearIntervals() {
+    if (this.intervals) {
+      Object.values(this.intervals).forEach((interval) => clearInterval(interval))
+    }
+  }
+
   private checkConfigDirectories(): void {
     Object.entries(this.config.directories).forEach(([name, path]) => {
+      if (path === undefined) {
+        return
+      }
       const dirInfo = `${name} directory (${path})`
       if (!fs.existsSync(path)) {
         try {
@@ -51,12 +73,17 @@ export class App {
   }
 
   public async start(): Promise<void> {
-    this.logger.info('Starting the app')
-    this.checkConfigDirectories()
-    this.stateCache.load()
-    const dataObjects = await this.networking.fetchSupportedDataObjects()
-    await this.content.startupInit(dataObjects)
-    this.server.start()
+    this.logger.info('Starting the app', { config: this.config })
+    try {
+      this.checkConfigDirectories()
+      this.stateCache.load()
+      await this.content.startupInit()
+      this.setIntervals()
+      this.server.start()
+    } catch (err) {
+      this.logger.error('Node initialization failed!', { err })
+      process.exit(-1)
+    }
     nodeCleanup(this.exitHandler.bind(this))
   }
 
@@ -65,11 +92,6 @@ export class App {
     // We can try to wait until some pending downloads are finished here etc.
     this.logger.info('Graceful exit initialized')
 
-    // Stop accepting any new requests and save cache
-    this.server.stop()
-    this.stateCache.clearInterval()
-    this.stateCache.saveSync()
-
     // Try to process remaining downloads
     const MAX_RETRY_ATTEMPTS = 3
     let retryCounter = 0
@@ -95,17 +117,18 @@ export class App {
   }
 
   private exitCritically(): void {
-    this.logger.info('Critical exit initialized')
-    // Handling exits due to an error - only some critical, synchronous work can be done here
-    this.server.stop()
-    this.stateCache.clearInterval()
-    this.stateCache.saveSync()
+    // Some additional synchronous work if required...
     this.logger.info('Critical exit finished')
   }
 
   private exitHandler(exitCode: number | null, signal: string | null): boolean | undefined {
-    this.logger.info('Exiting')
-    this.stateCache.clearInterval()
+    this.logger.info('Exiting...')
+    // Clear intervals
+    this.clearIntervals()
+    // Stop the server
+    this.server.stop()
+    // Save cache
+    this.stateCache.saveSync()
     if (signal) {
       // Async exit can be executed
       this.exitGracefully()

+ 3 - 0
distributor-node/src/command-base/ExitCodes.ts

@@ -2,5 +2,8 @@ enum ExitCodes {
   OK = 0,
   Error = 1,
   ApiError = 200,
+  InvalidInput = 400,
+  FileNotFound = 401,
+  InvalidFile = 402,
 }
 export = ExitCodes

+ 100 - 3
distributor-node/src/command-base/accounts.ts

@@ -3,6 +3,10 @@ import { AccountId } from '@polkadot/types/interfaces'
 import { Keyring } from '@polkadot/api'
 import { KeyringInstance, KeyringOptions, KeyringPair } from '@polkadot/keyring/types'
 import { CLIError } from '@oclif/errors'
+import ExitCodes from './ExitCodes'
+import fs from 'fs'
+import path from 'path'
+import inquirer from 'inquirer'
 
 export const DEFAULT_ACCOUNT_TYPE = 'sr25519'
 export const KEYRING_OPTIONS: KeyringOptions = {
@@ -15,6 +19,46 @@ export const KEYRING_OPTIONS: KeyringOptions = {
 export default abstract class AccountsCommandBase extends ApiCommandBase {
   private keyring!: KeyringInstance
 
+  fetchAccountFromJsonFile(jsonBackupFilePath: string): KeyringPair {
+    if (!fs.existsSync(jsonBackupFilePath)) {
+      throw new CLIError(`Keypair backup json file does not exist: ${jsonBackupFilePath}`, {
+        exit: ExitCodes.FileNotFound,
+      })
+    }
+    if (path.extname(jsonBackupFilePath) !== '.json') {
+      throw new CLIError(`Keypair backup json file is invalid: File extension should be .json: ${jsonBackupFilePath}`, {
+        exit: ExitCodes.InvalidFile,
+      })
+    }
+    let accountJsonObj: any
+    try {
+      accountJsonObj = require(jsonBackupFilePath)
+    } catch (e) {
+      throw new CLIError(`Keypair backup json file is invalid or cannot be accessed: ${jsonBackupFilePath}`, {
+        exit: ExitCodes.InvalidFile,
+      })
+    }
+    if (typeof accountJsonObj !== 'object' || accountJsonObj === null) {
+      throw new CLIError(`Keypair backup json file is is not valid: ${jsonBackupFilePath}`, {
+        exit: ExitCodes.InvalidFile,
+      })
+    }
+
+    const keyring = new Keyring()
+    let account: KeyringPair
+    try {
+      // Try adding and retrieving the keys in order to validate that the backup file is correct
+      keyring.addFromJson(accountJsonObj)
+      account = keyring.getPair(accountJsonObj.address)
+    } catch (e) {
+      throw new CLIError(`Keypair backup json file is is not valid: ${jsonBackupFilePath}`, {
+        exit: ExitCodes.InvalidFile,
+      })
+    }
+
+    return account
+  }
+
   isKeyAvailable(key: AccountId | string): boolean {
     return this.keyring.getPairs().some((p) => p.address === key.toString())
   }
@@ -32,13 +76,66 @@ export default abstract class AccountsCommandBase extends ApiCommandBase {
   }
 
   async getDecodedPair(key: string): Promise<KeyringPair> {
-    // Just for Joystream CLI compatibility currently
-    return this.getPair(key)
+    const pair = this.getPair(key)
+    return this.requestPairDecoding(pair)
+  }
+
+  async requestPairDecoding(pair: KeyringPair, message?: string): Promise<KeyringPair> {
+    // Skip if pair already unlocked
+    if (!pair.isLocked) {
+      return pair
+    }
+
+    // Try decoding using empty string
+    try {
+      pair.decodePkcs8('')
+      return pair
+    } catch (e) {
+      // Continue...
+    }
+
+    let isPassValid = false
+    while (!isPassValid) {
+      try {
+        const password = await this.promptForPassword(
+          message || `Enter ${pair.meta.name ? pair.meta.name : pair.address} account password`
+        )
+        pair.decodePkcs8(password)
+        isPassValid = true
+      } catch (e) {
+        this.warn('Invalid password... Try again.')
+      }
+    }
+
+    return pair
+  }
+
+  async promptForPassword(message = "Your account's password"): Promise<string> {
+    const { password } = await inquirer.prompt([
+      {
+        name: 'password',
+        type: 'password',
+        message,
+      },
+    ])
+
+    return password
   }
 
   initKeyring(): void {
     this.keyring = new Keyring(KEYRING_OPTIONS)
-    this.appConfig.keys.forEach((suri) => this.keyring.addFromUri(suri))
+    this.appConfig.keys.forEach((keyData) => {
+      if ('suri' in keyData) {
+        this.keyring.addFromUri(keyData.suri, undefined, keyData.type)
+      }
+      if ('mnemonic' in keyData) {
+        this.keyring.addFromMnemonic(keyData.mnemonic, undefined, keyData.type)
+      }
+      if ('keyfile' in keyData) {
+        const acc = this.fetchAccountFromJsonFile(keyData.keyfile)
+        this.keyring.addPair(acc)
+      }
+    })
   }
 
   async getDistributorLeadKey(): Promise<string> {

+ 1 - 1
distributor-node/src/command-base/api.ts

@@ -16,7 +16,7 @@ export default abstract class ApiCommandBase extends DefaultCommandBase {
 
   async init(): Promise<void> {
     await super.init()
-    this.api = await RuntimeApi.create(this.logging, this.appConfig.endpoints.substrateNode)
+    this.api = await RuntimeApi.create(this.logging, this.appConfig.endpoints.joystreamNodeWs)
   }
 
   async sendAndFollowTx(account: KeyringPair, tx: SubmittableExtrinsic<'promise'>): Promise<SubmittableResult> {

+ 3 - 0
distributor-node/src/command-base/default.ts

@@ -91,6 +91,9 @@ export default abstract class DefaultCommandBase extends Command {
 
   async finally(err: any): Promise<void> {
     if (!err) this.exit(ExitCodes.OK)
+    if (process.env.DEBUG === 'true') {
+      console.error(err)
+    }
     super.finally(err)
   }
 }

+ 3 - 4
distributor-node/src/commands/dev/batchUpload.ts

@@ -1,7 +1,7 @@
 import AccountsCommandBase from '../../command-base/accounts'
 import DefaultCommandBase, { flags } from '../../command-base/default'
 import { hash } from 'blake3'
-import { PublicApi, Configuration, TokenRequest } from '../../services/networking/storage-node/generated'
+import { FilesApi, Configuration, TokenRequest } from '../../services/networking/storage-node/generated'
 import { u8aToHex } from '@polkadot/util'
 import * as multihash from 'multihashes'
 import FormData from 'form-data'
@@ -31,7 +31,7 @@ export default class DevBatchUpload extends AccountsCommandBase {
     }),
     bucketId: flags.integer({
       char: 'B',
-      description: 'Distribution bucket id',
+      description: 'Storage bucket id',
       required: true,
     }),
     batchSize: flags.integer({
@@ -49,10 +49,9 @@ export default class DevBatchUpload extends AccountsCommandBase {
     const { bagId, bucketId, batchSize, batchesCount } = this.parse(DevBatchUpload).flags
     const sudoKey = (await api.query.sudo.key()).toHuman()
     const dataFee = await api.query.storage.dataObjectPerMegabyteFee()
-    const storageApi = new PublicApi(
+    const storageApi = new FilesApi(
       new Configuration({
         basePath: 'http://127.0.0.1:3333/api/v1',
-        formDataCtor: FormData,
       })
     )
 

+ 1 - 1
distributor-node/src/commands/leader/create-bucket.ts

@@ -32,7 +32,7 @@ export default class LeaderCreateBucket extends AccountsCommandBase {
     const event = this.api.getEvent(result, 'storage', 'DistributionBucketCreated')
 
     this.log('Bucket succesfully created!')
-    const bucketId = event.data[0]
+    const bucketId = event.data[2]
     this.output(bucketId.toString())
   }
 }

+ 43 - 2
distributor-node/src/commands/leader/set-bucket-family-metadata.ts

@@ -2,7 +2,14 @@ import fs from 'fs'
 import AccountsCommandBase from '../../command-base/accounts'
 import DefaultCommandBase, { flags } from '../../command-base/default'
 import { ValidationService } from '../../services/validation/ValidationService'
-import { DistributionBucketFamilyMetadata, IDistributionBucketFamilyMetadata } from '@joystream/metadata-protobuf'
+import {
+  DistributionBucketFamilyMetadata,
+  GeographicalArea,
+  IDistributionBucketFamilyMetadata,
+} from '@joystream/metadata-protobuf'
+import { FamilyMetadataJson } from '../../types/generated/FamilyMetadataJson'
+import { isValidCountryCode, isValidSubdivisionCode } from '@joystream/metadata-protobuf/utils'
+import ExitCodes from '../../command-base/ExitCodes'
 
 export default class LeaderSetBucketFamilyMetadata extends AccountsCommandBase {
   static description = `Set/update distribution bucket family metadata.
@@ -22,15 +29,49 @@ export default class LeaderSetBucketFamilyMetadata extends AccountsCommandBase {
     ...DefaultCommandBase.flags,
   }
 
+  parseAndValidateMetadata(input: FamilyMetadataJson): IDistributionBucketFamilyMetadata {
+    const areas: IDistributionBucketFamilyMetadata['areas'] = []
+    input.areas?.forEach((a) => {
+      if ('continentCode' in a && a.continentCode) {
+        areas.push({ continent: GeographicalArea.Continent[a.continentCode] })
+        return
+      }
+      if ('countryCode' in a && a.countryCode) {
+        if (!isValidCountryCode(a.countryCode)) {
+          this.error(`Invalid country code: ${a.countryCode}`, { exit: ExitCodes.InvalidInput })
+        }
+        areas.push({ countryCode: a.countryCode })
+        return
+      }
+      if ('subdivisionCode' in a && a.subdivisionCode) {
+        if (!isValidSubdivisionCode(a.subdivisionCode)) {
+          this.error(`Invalid subdivision code: ${a.subdivisionCode}`, { exit: ExitCodes.InvalidInput })
+        }
+        areas.push({ subdivisionCode: a.subdivisionCode })
+        return
+      }
+      areas.push({})
+    })
+
+    const meta = { ...input, areas }
+    const error = DistributionBucketFamilyMetadata.verify(meta)
+    if (error) {
+      this.error(`Metadata validation failed: ${error}`, { exit: ExitCodes.InvalidInput })
+    }
+
+    return meta
+  }
+
   async run(): Promise<void> {
     const { familyId, input } = this.parse(LeaderSetBucketFamilyMetadata).flags
     const leadKey = await this.getDistributorLeadKey()
 
     const validation = new ValidationService()
-    const metadata: IDistributionBucketFamilyMetadata = validation.validate(
+    const metadataInput: FamilyMetadataJson = validation.validate(
       'FamilyMetadata',
       JSON.parse(fs.readFileSync(input).toString())
     )
+    const metadata = this.parseAndValidateMetadata(metadataInput)
 
     this.log(`Setting bucket family metadata (family: ${familyId})`, metadata)
     await this.sendAndFollowTx(

+ 1 - 1
distributor-node/src/commands/start.ts

@@ -10,7 +10,7 @@ export default class StartNode extends DefaultCommandBase {
 
   async run(): Promise<void> {
     const app = new App(this.appConfig)
-    app.start()
+    await app.start()
   }
 
   async finally(): Promise<void> {

+ 215 - 0
distributor-node/src/schemas/configSchema.ts

@@ -0,0 +1,215 @@
+import { JSONSchema4 } from 'json-schema'
+import winston from 'winston'
+import { MAX_CONCURRENT_RESPONSE_TIME_CHECKS } from '../services/networking/NetworkingService'
+
+export const bytesizeUnits = ['B', 'K', 'M', 'G', 'T']
+export const bytesizeRegex = new RegExp(`^[0-9]+(${bytesizeUnits.join('|')})$`)
+
+export const configSchema: JSONSchema4 = {
+  title: 'Distributor node configuration',
+  description: 'Configuration schema for distirubtor CLI and node',
+  type: 'object',
+  required: ['id', 'endpoints', 'directories', 'buckets', 'keys', 'port', 'workerId', 'limits', 'intervals'],
+  additionalProperties: false,
+  properties: {
+    id: {
+      type: 'string',
+      description: 'Node identifier used when sending elasticsearch logs and exposed on /status endpoint',
+      minLength: 1,
+    },
+    endpoints: {
+      type: 'object',
+      description: 'Specifies external endpoints that the distributor node will connect to',
+      additionalProperties: false,
+      required: ['queryNode', 'joystreamNodeWs'],
+      properties: {
+        queryNode: {
+          description: 'Query node graphql server uri (for example: http://localhost:8081/graphql)',
+          type: 'string',
+        },
+        joystreamNodeWs: {
+          description: 'Joystream node websocket api uri (for example: ws://localhost:9944)',
+          type: 'string',
+        },
+        elasticSearch: {
+          description: 'Elasticsearch uri used for submitting the distributor node logs (if enabled via `log.elastic`)',
+          type: 'string',
+        },
+      },
+    },
+    directories: {
+      type: 'object',
+      required: ['assets', 'cacheState'],
+      additionalProperties: false,
+      description: "Specifies paths where node's data will be stored",
+      properties: {
+        assets: {
+          description: 'Path to a directory where all the cached assets will be stored',
+          type: 'string',
+        },
+        cacheState: {
+          description:
+            'Path to a directory where information about the current cache state will be stored (LRU-SP cache data, stored assets mime types etc.)',
+          type: 'string',
+        },
+        logs: {
+          description:
+            'Path to a directory where logs will be stored if logging to a file was enabled (via `log.file`).',
+          type: 'string',
+        },
+      },
+    },
+    log: {
+      type: 'object',
+      additionalProperties: false,
+      description: 'Specifies minimum log levels by supported log outputs',
+      properties: {
+        file: {
+          description: 'Minimum level of logs written to a file specified in `directories.logs`',
+          type: 'string',
+          enum: [...Object.keys(winston.config.npm.levels), 'off'],
+        },
+        console: {
+          description: 'Minimum level of logs outputted to a console',
+          type: 'string',
+          enum: [...Object.keys(winston.config.npm.levels), 'off'],
+        },
+        elastic: {
+          description: 'Minimum level of logs sent to elasticsearch endpoint specified in `endpoints.elasticSearch`',
+          type: 'string',
+          enum: [...Object.keys(winston.config.npm.levels), 'off'],
+        },
+      },
+    },
+    limits: {
+      type: 'object',
+      required: [
+        'storage',
+        'maxConcurrentStorageNodeDownloads',
+        'maxConcurrentOutboundConnections',
+        'outboundRequestsTimeout',
+      ],
+      description: 'Specifies node limits w.r.t. storage, outbound connections etc.',
+      additionalProperties: false,
+      properties: {
+        storage: {
+          description: 'Maximum total size of all (cached) assets stored in `directories.assets`',
+          type: 'string',
+          pattern: bytesizeRegex.source,
+        },
+        maxConcurrentStorageNodeDownloads: {
+          description: 'Maximum number of concurrent downloads from the storage node(s)',
+          type: 'integer',
+          minimum: 1,
+        },
+        maxConcurrentOutboundConnections: {
+          description: 'Maximum number of total simultaneous outbound connections to storage node(s)',
+          type: 'integer',
+          minimum: 1,
+        },
+        outboundRequestsTimeout: {
+          description: 'Timeout for all outbound storage node http requests in miliseconds',
+          type: 'integer',
+          minimum: 1,
+        },
+      },
+    },
+    intervals: {
+      type: 'object',
+      required: ['saveCacheState', 'checkStorageNodeResponseTimes', 'cacheCleanup'],
+      additionalProperties: false,
+      description: 'Specifies how often periodic tasks (for example cache cleanup) are executed by the node.',
+      properties: {
+        saveCacheState: {
+          description:
+            'How often, in seconds, will the cache state be saved in `directories.state`. ' +
+            'Independently of the specified interval, the node will always try to save cache state before exiting.',
+          type: 'integer',
+          minimum: 1,
+        },
+        checkStorageNodeResponseTimes: {
+          description:
+            'How often, in seconds, will the distributor node attempt to send requests to all current storage node endpoints ' +
+            'in order to check how quickly they respond. ' +
+            `The node will never make more than ${MAX_CONCURRENT_RESPONSE_TIME_CHECKS} such requests concurrently.`,
+          type: 'integer',
+          minimum: 1,
+        },
+        cacheCleanup: {
+          description:
+            'How often, in seconds, will the distributor node fetch data about all its distribution obligations from the query node ' +
+            'and remove all the no-longer assigned data objects from local storage and cache state',
+          type: 'integer',
+          minimum: 1,
+        },
+      },
+    },
+    port: { description: 'Distributor node http server port', type: 'integer', minimum: 0 },
+    keys: {
+      description: 'Specifies the keys available within distributor node CLI.',
+      type: 'array',
+      items: {
+        oneOf: [
+          {
+            type: 'object',
+            title: 'Substrate uri',
+            description: "Keypair's substrate uri (for example: //Alice)",
+            required: ['suri'],
+            additionalProperties: false,
+            properties: {
+              type: { type: 'string', enum: ['ed25519', 'sr25519', 'ecdsa'], default: 'sr25519' },
+              suri: { type: 'string' },
+            },
+          },
+          {
+            type: 'object',
+            title: 'Mnemonic phrase',
+            description: 'Menomonic phrase',
+            required: ['mnemonic'],
+            additionalProperties: false,
+            properties: {
+              type: { type: 'string', enum: ['ed25519', 'sr25519', 'ecdsa'], default: 'sr25519' },
+              mnemonic: { type: 'string' },
+            },
+          },
+          {
+            type: 'object',
+            title: 'JSON backup file',
+            description: 'Path to JSON backup file from polkadot signer / polakdot/apps (relative to config file path)',
+            required: ['keyfile'],
+            additionalProperties: false,
+            properties: {
+              keyfile: { type: 'string' },
+            },
+          },
+        ],
+      },
+      minItems: 1,
+    },
+    buckets: {
+      description: 'Specifies the buckets distributed by the node',
+      oneOf: [
+        {
+          title: 'Bucket ids',
+          description: 'List of distribution bucket ids',
+          type: 'array',
+          items: { type: 'integer', minimum: 0 },
+          minItems: 1,
+        },
+        {
+          title: 'All buckets',
+          description: 'Distribute all buckets assigned to worker specified in `workerId`',
+          type: 'string',
+          enum: ['all'],
+        },
+      ],
+    },
+    workerId: {
+      description: 'ID of the node operator (distribution working group worker)',
+      type: 'integer',
+      minimum: 0,
+    },
+  },
+}
+
+export default configSchema

+ 46 - 0
distributor-node/src/schemas/familyMetadataSchema.ts

@@ -0,0 +1,46 @@
+import { JSONSchema4 } from 'json-schema'
+
+export const familyMetadataSchema: JSONSchema4 = {
+  type: 'object',
+  additionalProperties: false,
+  properties: {
+    region: { type: 'string' },
+    description: { type: 'string' },
+    areas: {
+      type: 'array',
+      items: {
+        type: 'object',
+        oneOf: [
+          // Continent:
+          {
+            additionalProperties: false,
+            required: ['continentCode'],
+            properties: { continentCode: { type: 'string', enum: ['AF', 'AN', 'AS', 'EU', 'NA', 'OC', 'SA'] } },
+          },
+          // Country:
+          {
+            additionalProperties: false,
+            required: ['countryCode'],
+            properties: { countryCode: { type: 'string', minLength: 2, maxLength: 2 } },
+          },
+          // Subdivision:
+          {
+            additionalProperties: false,
+            required: ['subdivisionCode'],
+            properties: { subdivisionCode: { type: 'string' } },
+          },
+          // Empty object (for clearing purposes):
+          { additionalProperties: false },
+        ],
+      },
+    },
+    latencyTestTargets: {
+      type: 'array',
+      items: {
+        type: 'string',
+      },
+    },
+  },
+}
+
+export default familyMetadataSchema

+ 3 - 3
distributor-node/src/services/validation/schemas/index.ts → distributor-node/src/schemas/index.ts

@@ -1,6 +1,6 @@
-import { ConfigJson } from '../../../types/generated/ConfigJson'
-import { OperatorMetadataJson } from '../../../types/generated/OperatorMetadataJson'
-import { FamilyMetadataJson } from '../../../types/generated/FamilyMetadataJson'
+import { DistributorNodeConfiguration as ConfigJson } from '../types/generated/ConfigJson'
+import { OperatorMetadataJson } from '../types/generated/OperatorMetadataJson'
+import { FamilyMetadataJson } from '../types/generated/FamilyMetadataJson'
 import { configSchema } from './configSchema'
 import { familyMetadataSchema } from './familyMetadataSchema'
 import { operatorMetadataSchema } from './operatorMetadataSchema'

+ 0 - 0
distributor-node/src/services/validation/schemas/operatorMetadataSchema.ts → distributor-node/src/schemas/operatorMetadataSchema.ts


+ 8 - 0
distributor-node/src/schemas/scripts/generateConfigDoc.ts

@@ -0,0 +1,8 @@
+import { jsonschema2md } from '@adobe/jsonschema2md'
+import { configSchema } from '../configSchema'
+import path from 'path'
+
+console.log(configSchema)
+jsonschema2md(configSchema, {
+  outDir: path.resolve(__dirname, `../../../docs/schema`),
+})

+ 4 - 4
distributor-node/src/services/validation/generateTypes.ts → distributor-node/src/schemas/scripts/generateTypes.ts

@@ -1,13 +1,13 @@
 import fs from 'fs'
 import path from 'path'
 import { compile } from 'json-schema-to-typescript'
-import { schemas } from './schemas'
+import { schemas } from '..'
 
 // eslint-disable-next-line @typescript-eslint/no-var-requires
 const prettierConfig = require('@joystream/prettier-config')
 
 Object.entries(schemas).forEach(([schemaKey, schema]) => {
-  compile(schema, `${schemaKey}Json`, { style: prettierConfig }).then((output) =>
-    fs.writeFileSync(path.resolve(__dirname, `../../types/generated/${schemaKey}Json.d.ts`), output)
-  )
+  compile(schema, `${schemaKey}Json`, { style: prettierConfig })
+    .then((output) => fs.writeFileSync(path.resolve(__dirname, `../../types/generated/${schemaKey}Json.d.ts`), output))
+    .catch(console.error)
 })

+ 66 - 81
distributor-node/src/services/cache/StateCacheService.ts

@@ -32,41 +32,30 @@ export class StateCacheService {
   private logger: Logger
   private config: ReadonlyConfig
   private cacheFilePath: string
-  private saveInterval: NodeJS.Timeout
 
   private memoryState = {
-    pendingDownloadsByContentHash: new Map<string, PendingDownloadData>(),
-    contentHashByObjectId: new Map<string, string>(),
+    pendingDownloadsByObjectId: new Map<string, PendingDownloadData>(),
     storageNodeEndpointDataByEndpoint: new Map<string, StorageNodeEndpointData>(),
-    groupNumberByContentHash: new Map<string, number>(),
+    groupNumberByObjectId: new Map<string, number>(),
   }
 
   private storedState = {
     lruCacheGroups: Array.from({ length: CACHE_GROUPS_COUNT }).map(() => new Map<string, CacheItemData>()),
-    mimeTypeByContentHash: new Map<string, string>(),
+    mimeTypeByObjectId: new Map<string, string>(),
   }
 
-  public constructor(config: ReadonlyConfig, logging: LoggingService, saveIntervalMs = 60 * 1000) {
+  public constructor(config: ReadonlyConfig, logging: LoggingService) {
     this.logger = logging.createLogger('StateCacheService')
-    this.cacheFilePath = `${config.directories.cache}/cache.json`
+    this.cacheFilePath = `${config.directories.cacheState}/cache.json`
     this.config = config
-    this.saveInterval = setInterval(() => this.save(), saveIntervalMs)
   }
 
-  public setContentMimeType(contentHash: string, mimeType: string): void {
-    this.storedState.mimeTypeByContentHash.set(contentHash, mimeType)
+  public setContentMimeType(objectId: string, mimeType: string): void {
+    this.storedState.mimeTypeByObjectId.set(objectId, mimeType)
   }
 
-  public getContentMimeType(contentHash: string): string | undefined {
-    return this.storedState.mimeTypeByContentHash.get(contentHash)
-  }
-
-  public setObjectContentHash(objectId: string, hash: string): void {
-    this.memoryState.contentHashByObjectId.set(objectId, hash)
-  }
-
-  public getObjectContentHash(objectId: string): string | undefined {
-    return this.memoryState.contentHashByObjectId.get(objectId)
+  public getContentMimeType(objectId: string): string | undefined {
+    return this.storedState.mimeTypeByObjectId.get(objectId)
   }
 
   private calcCacheGroup({ sizeKB, popularity }: CacheItemData) {
@@ -76,23 +65,23 @@ export class StateCacheService {
     )
   }
 
-  public getCachedContentHashes(): string[] {
-    let hashes: string[] = []
+  public getCachedObjectsIds(): string[] {
+    let objectIds: string[] = []
     for (const [, group] of this.storedState.lruCacheGroups.entries()) {
-      hashes = hashes.concat(Array.from(group.keys()))
+      objectIds = objectIds.concat(Array.from(group.keys()))
     }
-    return hashes
+    return objectIds
   }
 
-  public getCachedContentLength(): number {
+  public getCachedObjectsCount(): number {
     return this.storedState.lruCacheGroups.reduce((a, b) => a + b.size, 0)
   }
 
-  public newContent(contentHash: string, sizeInBytes: number): void {
-    const { groupNumberByContentHash } = this.memoryState
+  public newContent(objectId: string, sizeInBytes: number): void {
+    const { groupNumberByObjectId } = this.memoryState
     const { lruCacheGroups } = this.storedState
-    if (groupNumberByContentHash.get(contentHash)) {
-      this.logger.warn('newContent was called for content that already exists, ignoring the call', { contentHash })
+    if (groupNumberByObjectId.get(objectId)) {
+      this.logger.warn('newContent was called for content that already exists, ignoring the call', { objectId })
       return
     }
     const cacheItemData: CacheItemData = {
@@ -101,33 +90,33 @@ export class StateCacheService {
       sizeKB: Math.ceil(sizeInBytes / 1024),
     }
     const groupNumber = this.calcCacheGroup(cacheItemData)
-    groupNumberByContentHash.set(contentHash, groupNumber)
-    lruCacheGroups[groupNumber].set(contentHash, cacheItemData)
+    groupNumberByObjectId.set(objectId, groupNumber)
+    lruCacheGroups[groupNumber].set(objectId, cacheItemData)
   }
 
-  public peekContent(contentHash: string): CacheItemData | undefined {
-    const groupNumber = this.memoryState.groupNumberByContentHash.get(contentHash)
+  public peekContent(objectId: string): CacheItemData | undefined {
+    const groupNumber = this.memoryState.groupNumberByObjectId.get(objectId)
     if (groupNumber !== undefined) {
-      return this.storedState.lruCacheGroups[groupNumber].get(contentHash)
+      return this.storedState.lruCacheGroups[groupNumber].get(objectId)
     }
   }
 
-  public useContent(contentHash: string): void {
-    const { groupNumberByContentHash } = this.memoryState
+  public useContent(objectId: string): void {
+    const { groupNumberByObjectId } = this.memoryState
     const { lruCacheGroups } = this.storedState
-    const groupNumber = groupNumberByContentHash.get(contentHash)
+    const groupNumber = groupNumberByObjectId.get(objectId)
     if (groupNumber === undefined) {
-      this.logger.warn('groupNumberByContentHash missing when trying to update LRU of content', { contentHash })
+      this.logger.warn('groupNumberByObjectId missing when trying to update LRU of content', { objectId })
       return
     }
     const group = lruCacheGroups[groupNumber]
-    const cacheItemData = group.get(contentHash)
+    const cacheItemData = group.get(objectId)
     if (!cacheItemData) {
-      this.logger.warn('Cache inconsistency: item missing in group retrieved from by groupNumberByContentHash map!', {
-        contentHash,
+      this.logger.warn('Cache inconsistency: item missing in group retrieved from by groupNumberByObjectId map!', {
+        objectId,
         groupNumber,
       })
-      groupNumberByContentHash.delete(contentHash)
+      groupNumberByObjectId.delete(objectId)
       return
     }
     cacheItemData.lastAccessTime = Date.now()
@@ -135,25 +124,25 @@ export class StateCacheService {
     // Move object to the top of the current group / new group
     const targetGroupNumber = this.calcCacheGroup(cacheItemData)
     const targetGroup = lruCacheGroups[targetGroupNumber]
-    group.delete(contentHash)
-    targetGroup.set(contentHash, cacheItemData)
+    group.delete(objectId)
+    targetGroup.set(objectId, cacheItemData)
     if (targetGroupNumber !== groupNumber) {
-      groupNumberByContentHash.set(contentHash, targetGroupNumber)
+      groupNumberByObjectId.set(objectId, targetGroupNumber)
     }
   }
 
-  public getCacheEvictCandidateHash(): string | null {
+  public getCacheEvictCandidateObjectId(): string | null {
     let highestCost = 0
     let bestCandidate: string | null = null
     for (const group of this.storedState.lruCacheGroups) {
       const lastItemInGroup = Array.from(group.entries())[0]
       if (lastItemInGroup) {
-        const [contentHash, objectData] = lastItemInGroup
+        const [objectId, objectData] = lastItemInGroup
         const elapsedSinceLastAccessed = Math.ceil((Date.now() - objectData.lastAccessTime) / 60_000)
         const itemCost = (elapsedSinceLastAccessed * objectData.sizeKB) / objectData.popularity
         if (itemCost >= highestCost) {
           highestCost = itemCost
-          bestCandidate = contentHash
+          bestCandidate = objectId
         }
       }
     }
@@ -161,7 +150,7 @@ export class StateCacheService {
   }
 
   public newPendingDownload(
-    contentHash: string,
+    objectId: string,
     objectSize: number,
     promise: Promise<StorageNodeDownloadResponse>
   ): PendingDownloadData {
@@ -170,31 +159,31 @@ export class StateCacheService {
       objectSize,
       promise,
     }
-    this.memoryState.pendingDownloadsByContentHash.set(contentHash, pendingDownload)
+    this.memoryState.pendingDownloadsByObjectId.set(objectId, pendingDownload)
     return pendingDownload
   }
 
   public getPendingDownloadsCount(): number {
-    return this.memoryState.pendingDownloadsByContentHash.size
+    return this.memoryState.pendingDownloadsByObjectId.size
   }
 
-  public getPendingDownload(contentHash: string): PendingDownloadData | undefined {
-    return this.memoryState.pendingDownloadsByContentHash.get(contentHash)
+  public getPendingDownload(objectId: string): PendingDownloadData | undefined {
+    return this.memoryState.pendingDownloadsByObjectId.get(objectId)
   }
 
-  public dropPendingDownload(contentHash: string): void {
-    this.memoryState.pendingDownloadsByContentHash.delete(contentHash)
+  public dropPendingDownload(objectId: string): void {
+    this.memoryState.pendingDownloadsByObjectId.delete(objectId)
   }
 
-  public dropByHash(contentHash: string): void {
-    this.logger.debug('Dropping all state by content hash', contentHash)
-    this.storedState.mimeTypeByContentHash.delete(contentHash)
-    this.memoryState.pendingDownloadsByContentHash.delete(contentHash)
-    const cacheGroupNumber = this.memoryState.groupNumberByContentHash.get(contentHash)
-    this.logger.debug('Cache group by hash established', { contentHash, cacheGroupNumber })
+  public dropById(objectId: string): void {
+    this.logger.debug('Dropping all state by object id', { objectId })
+    this.storedState.mimeTypeByObjectId.delete(objectId)
+    this.memoryState.pendingDownloadsByObjectId.delete(objectId)
+    const cacheGroupNumber = this.memoryState.groupNumberByObjectId.get(objectId)
+    this.logger.debug('Cache group by object id established', { objectId, cacheGroupNumber })
     if (cacheGroupNumber) {
-      this.memoryState.groupNumberByContentHash.delete(contentHash)
-      this.storedState.lruCacheGroups[cacheGroupNumber].delete(contentHash)
+      this.memoryState.groupNumberByObjectId.delete(objectId)
+      this.storedState.lruCacheGroups[cacheGroupNumber].delete(objectId)
     }
   }
 
@@ -222,11 +211,11 @@ export class StateCacheService {
   }
 
   private serializeData() {
-    const { lruCacheGroups, mimeTypeByContentHash } = this.storedState
+    const { lruCacheGroups, mimeTypeByObjectId } = this.storedState
     return JSON.stringify(
       {
         lruCacheGroups: lruCacheGroups.map((g) => Array.from(g.entries())),
-        mimeTypeByContentHash: Array.from(mimeTypeByContentHash.entries()),
+        mimeTypeByObjectId: Array.from(mimeTypeByObjectId.entries()),
       },
       null,
       2 // TODO: Only for debugging
@@ -255,23 +244,23 @@ export class StateCacheService {
     fs.writeFileSync(this.cacheFilePath, serialized)
   }
 
-  private loadGroupNumberByContentHashMap() {
-    const contentHashes = _.uniq(this.getCachedContentHashes())
+  private loadGroupNumberByObjectIdMap() {
+    const objectIds = _.uniq(this.getCachedObjectsIds())
     const { lruCacheGroups: groups } = this.storedState
-    const { groupNumberByContentHash } = this.memoryState
+    const { groupNumberByObjectId } = this.memoryState
 
-    contentHashes.forEach((contentHash) => {
+    objectIds.forEach((objectId) => {
       groups.forEach((group, groupNumber) => {
-        if (group.has(contentHash)) {
-          if (!groupNumberByContentHash.has(contentHash)) {
-            groupNumberByContentHash.set(contentHash, groupNumber)
+        if (group.has(objectId)) {
+          if (!groupNumberByObjectId.has(objectId)) {
+            groupNumberByObjectId.set(objectId, groupNumber)
           } else {
             // Content duplicated in multiple groups - remove!
             this.logger.warn(
-              `Content hash ${contentHash} was found in in multiple lru cache groups. Removing from group ${groupNumber}...`,
-              { firstGroup: groupNumberByContentHash.get(contentHash), currentGroup: groupNumber }
+              `Object id ${objectId} was found in in multiple lru cache groups. Removing from group ${groupNumber}...`,
+              { firstGroup: groupNumberByObjectId.get(objectId), currentGroup: groupNumber }
             )
-            group.delete(contentHash)
+            group.delete(objectId)
           }
         }
       })
@@ -286,8 +275,8 @@ export class StateCacheService {
         ;((fileContent.lruCacheGroups || []) as Array<Array<[string, CacheItemData]>>).forEach((group, groupIndex) => {
           this.storedState.lruCacheGroups[groupIndex] = new Map<string, CacheItemData>(group)
         })
-        this.storedState.mimeTypeByContentHash = new Map<string, string>(fileContent.mimeTypeByContentHash || [])
-        this.loadGroupNumberByContentHashMap()
+        this.storedState.mimeTypeByObjectId = new Map<string, string>(fileContent.mimeTypeByObjectId || [])
+        this.loadGroupNumberByObjectIdMap()
       } catch (err) {
         this.logger.error('Error while trying to load data from cache file! Will start from scratch', {
           file: this.cacheFilePath,
@@ -298,8 +287,4 @@ export class StateCacheService {
       this.logger.warn(`Cache file (${this.cacheFilePath}) is empty. Starting from scratch`)
     }
   }
-
-  public clearInterval(): void {
-    clearInterval(this.saveInterval)
-  }
 }

+ 116 - 75
distributor-node/src/services/content/ContentService.ts

@@ -1,12 +1,14 @@
 import fs from 'fs'
-import { ReadonlyConfig, DataObjectData } from '../../types'
+import { ReadonlyConfig } from '../../types'
 import { StateCacheService } from '../cache/StateCacheService'
 import { LoggingService } from '../logging'
 import { Logger } from 'winston'
 import { FileContinousReadStream, FileContinousReadStreamOptions } from './FileContinousReadStream'
 import FileType from 'file-type'
-import _ from 'lodash'
 import { Readable, pipeline } from 'stream'
+import { NetworkingService } from '../networking'
+import { createHash } from 'blake3'
+import * as multihash from 'multihashes'
 
 export const DEFAULT_CONTENT_TYPE = 'application/octet-stream'
 
@@ -15,6 +17,7 @@ export class ContentService {
   private dataDir: string
   private logger: Logger
   private stateCache: StateCacheService
+  private networking: NetworkingService
 
   private contentSizeSum = 0
 
@@ -26,65 +29,88 @@ export class ContentService {
     return this.config.limits.storage - this.contentSizeSum
   }
 
-  public constructor(config: ReadonlyConfig, logging: LoggingService, stateCache: StateCacheService) {
+  public constructor(
+    config: ReadonlyConfig,
+    logging: LoggingService,
+    networking: NetworkingService,
+    stateCache: StateCacheService
+  ) {
     this.config = config
     this.logger = logging.createLogger('ContentService')
     this.stateCache = stateCache
-    this.dataDir = config.directories.data
+    this.networking = networking
+    this.dataDir = config.directories.assets
   }
 
-  public async startupInit(supportedObjects: DataObjectData[]): Promise<void> {
-    const dataObjectsByHash = _.groupBy(supportedObjects, (o) => o.contentHash)
+  public async cacheCleanup(): Promise<void> {
+    const supportedObjects = await this.networking.fetchSupportedDataObjects()
+    const cachedObjectsIds = this.stateCache.getCachedObjectsIds()
+    let droppedObjects = 0
+
+    this.logger.verbose('Performing cache cleanup...', {
+      supportedObjects: supportedObjects.size,
+      objectsInCache: cachedObjectsIds.length,
+    })
+
+    for (const objectId of cachedObjectsIds) {
+      if (!supportedObjects.has(objectId)) {
+        this.drop(objectId, 'No longer supported')
+        ++droppedObjects
+      }
+    }
+
+    this.logger.verbose('Cache cleanup finished', {
+      droppedObjects,
+    })
+  }
+
+  public async startupInit(): Promise<void> {
+    const supportedObjects = await this.networking.fetchSupportedDataObjects()
     const dataDirFiles = fs.readdirSync(this.dataDir)
     const filesCountOnStartup = dataDirFiles.length
-    const cachedContentHashes = this.stateCache.getCachedContentHashes()
-    const cacheItemsOnStartup = cachedContentHashes.length
+    const cachedObjectsIds = this.stateCache.getCachedObjectsIds()
+    const cacheItemsCountOnStartup = cachedObjectsIds.length
 
     this.logger.info('ContentService initializing...', {
-      supportedObjects: supportedObjects.length,
+      supportedObjects: supportedObjects.size,
       filesCountOnStartup,
-      cacheItemsOnStartup,
+      cacheItemsCountOnStartup,
     })
     let filesDropped = 0
-    for (const contentHash of dataDirFiles) {
-      this.logger.debug('Checking content file', { contentHash })
+    for (const objectId of dataDirFiles) {
+      this.logger.debug('Checking content file', { objectId })
       // Add fileSize to contentSizeSum for each file. If the file ends up dropped - contentSizeSum will be reduced by this.drop().
-      const fileSize = this.fileSize(contentHash)
+      const fileSize = this.fileSize(objectId)
       this.contentSizeSum += fileSize
 
       // Drop files that are not part of current chain assignment
-      const objectsByHash = dataObjectsByHash[contentHash] || []
-      if (!objectsByHash.length) {
-        this.drop(contentHash, 'Not supported')
+      const dataObject = supportedObjects.get(objectId)
+      if (!dataObject) {
+        this.drop(objectId, 'Not supported')
         continue
       }
 
       // Compare file size to expected one
-      const { size: dataObjectSize } = objectsByHash[0]
+      const { size: dataObjectSize } = dataObject
       if (fileSize !== dataObjectSize) {
         // Existing file size does not match the expected one
         const msg = `Unexpected file size. Expected: ${dataObjectSize}, actual: ${fileSize}`
         this.logger.warn(msg, { fileSize, dataObjectSize })
-        this.drop(contentHash, msg)
+        this.drop(objectId, msg)
         ++filesDropped
       } else {
         // Existing file size is OK - detect mimeType if missing
-        if (!this.stateCache.getContentMimeType(contentHash)) {
-          this.stateCache.setContentMimeType(contentHash, await this.guessMimeType(contentHash))
+        if (!this.stateCache.getContentMimeType(objectId)) {
+          this.stateCache.setContentMimeType(objectId, await this.detectMimeType(objectId))
         }
       }
-
-      // Recreate contentHashByObjectId map for all supported data objects
-      objectsByHash.forEach(({ contentHash, objectId }) => {
-        this.stateCache.setObjectContentHash(objectId, contentHash)
-      })
     }
 
     let cacheItemsDropped = 0
-    for (const contentHash of cachedContentHashes) {
-      if (!this.exists(contentHash)) {
+    for (const objectId of cachedObjectsIds) {
+      if (!this.exists(objectId)) {
         // Content is part of cache data, but does not exist in filesystem - drop from cache
-        this.stateCache.dropByHash(contentHash)
+        this.stateCache.dropById(objectId)
         ++cacheItemsDropped
       }
     }
@@ -96,57 +122,54 @@ export class ContentService {
     })
   }
 
-  public drop(contentHash: string, reason?: string): void {
-    if (this.exists(contentHash)) {
-      const size = this.fileSize(contentHash)
-      fs.unlinkSync(this.path(contentHash))
+  public drop(objectId: string, reason?: string): void {
+    if (this.exists(objectId)) {
+      const size = this.fileSize(objectId)
+      fs.unlinkSync(this.path(objectId))
       this.contentSizeSum -= size
-      this.logger.debug('Dropping content', { contentHash, reason, size, contentSizeSum: this.contentSizeSum })
+      this.logger.debug('Dropping content', { objectId, reason, size, contentSizeSum: this.contentSizeSum })
     } else {
-      this.logger.warn('Trying to drop content that no loger exists', { contentHash, reason })
+      this.logger.warn('Trying to drop content that no loger exists', { objectId, reason })
     }
-    this.stateCache.dropByHash(contentHash)
+    this.stateCache.dropById(objectId)
   }
 
-  public fileSize(contentHash: string): number {
-    return fs.statSync(this.path(contentHash)).size
+  public fileSize(objectId: string): number {
+    return fs.statSync(this.path(objectId)).size
   }
 
-  public path(contentHash: string): string {
-    return `${this.dataDir}/${contentHash}`
+  public path(objectId: string): string {
+    return `${this.dataDir}/${objectId}`
   }
 
-  public exists(contentHash: string): boolean {
-    return fs.existsSync(this.path(contentHash))
+  public exists(objectId: string): boolean {
+    return fs.existsSync(this.path(objectId))
   }
 
-  public createReadStream(contentHash: string): fs.ReadStream {
-    return fs.createReadStream(this.path(contentHash))
+  public createReadStream(objectId: string): fs.ReadStream {
+    return fs.createReadStream(this.path(objectId))
   }
 
-  public createWriteStream(contentHash: string): fs.WriteStream {
-    return fs.createWriteStream(this.path(contentHash), { autoClose: true, emitClose: true })
+  public createWriteStream(objectId: string): fs.WriteStream {
+    return fs.createWriteStream(this.path(objectId), { autoClose: true, emitClose: true })
   }
 
-  public createContinousReadStream(
-    contentHash: string,
-    options: FileContinousReadStreamOptions
-  ): FileContinousReadStream {
-    return new FileContinousReadStream(this.path(contentHash), options)
+  public createContinousReadStream(objectId: string, options: FileContinousReadStreamOptions): FileContinousReadStream {
+    return new FileContinousReadStream(this.path(objectId), options)
   }
 
-  public async guessMimeType(contentHash: string): Promise<string> {
-    const guessResult = await FileType.fromFile(this.path(contentHash))
-    return guessResult?.mime || DEFAULT_CONTENT_TYPE
+  public async detectMimeType(objectId: string): Promise<string> {
+    const result = await FileType.fromFile(this.path(objectId))
+    return result?.mime || DEFAULT_CONTENT_TYPE
   }
 
   private async evictCacheUntilFreeSpaceReached(targetFreeSpace: number): Promise<void> {
     this.logger.verbose('Cache eviction triggered.', { targetFreeSpace, currentFreeSpace: this.freeSpace })
     let itemsDropped = 0
     while (this.freeSpace < targetFreeSpace) {
-      const evictCandidateHash = this.stateCache.getCacheEvictCandidateHash()
-      if (evictCandidateHash) {
-        this.drop(evictCandidateHash, 'Cache eviction')
+      const evictCandidateId = this.stateCache.getCacheEvictCandidateObjectId()
+      if (evictCandidateId) {
+        this.drop(evictCandidateId, 'Cache eviction')
         ++itemsDropped
       } else {
         this.logger.verbose('Nothing to drop from cache, waiting...', { freeSpace: this.freeSpace, targetFreeSpace })
@@ -156,9 +179,14 @@ export class ContentService {
     this.logger.verbose('Cache eviction finalized.', { currentfreeSpace: this.freeSpace, itemsDropped })
   }
 
-  public async handleNewContent(contentHash: string, expectedSize: number, dataStream: Readable): Promise<void> {
+  public async handleNewContent(
+    objectId: string,
+    expectedSize: number,
+    expectedHash: string,
+    dataStream: Readable
+  ): Promise<void> {
     this.logger.verbose('Handling new content', {
-      contentHash,
+      objectId,
       expectedSize,
     })
 
@@ -170,22 +198,25 @@ export class ContentService {
     // Reserve space for the new object
     this.contentSizeSum += expectedSize
     this.logger.verbose('Reserved space for new data object', {
-      contentHash,
+      objectId,
       expectedSize,
       newContentSizeSum: this.contentSizeSum,
     })
 
     // Return a promise that resolves when the new file is created
     return new Promise<void>((resolve, reject) => {
-      const fileStream = this.createWriteStream(contentHash)
+      const fileStream = this.createWriteStream(objectId)
 
       let bytesRecieved = 0
+      const hash = createHash()
 
       pipeline(dataStream, fileStream, async (err) => {
         const { bytesWritten } = fileStream
+        const finalHash = multihash.toB58String(multihash.encode(hash.digest(), 'blake3'))
         const logMetadata = {
-          contentHash,
+          objectId,
           expectedSize,
+          expectedHash,
           bytesRecieved,
           bytesWritten,
         }
@@ -194,22 +225,30 @@ export class ContentService {
             err,
             ...logMetadata,
           })
-          this.drop(contentHash)
+          this.drop(objectId)
           reject(err)
-        } else {
-          if (bytesWritten === bytesRecieved && bytesWritten === expectedSize) {
-            const mimeType = await this.guessMimeType(contentHash)
-            this.logger.info('New content accepted', { ...logMetadata })
-            this.stateCache.dropPendingDownload(contentHash)
-            this.stateCache.newContent(contentHash, expectedSize)
-            this.stateCache.setContentMimeType(contentHash, mimeType)
-          } else {
-            this.logger.error('Content rejected: Bytes written/recieved/expected mismatch!', {
-              ...logMetadata,
-            })
-            this.drop(contentHash)
-          }
+          return
+        }
+
+        if (bytesWritten !== bytesRecieved || bytesWritten !== expectedSize) {
+          this.logger.error('Content rejected: Bytes written/recieved/expected mismatch!', {
+            ...logMetadata,
+          })
+          this.drop(objectId)
+          return
         }
+
+        if (finalHash !== expectedHash) {
+          this.logger.error('Content rejected: Hash mismatch!', { ...logMetadata })
+          this.drop(objectId)
+          return
+        }
+
+        const mimeType = await this.detectMimeType(objectId)
+        this.logger.info('New content accepted', { ...logMetadata })
+        this.stateCache.dropPendingDownload(objectId)
+        this.stateCache.newContent(objectId, expectedSize)
+        this.stateCache.setContentMimeType(objectId, mimeType)
       })
 
       fileStream.on('open', () => {
@@ -219,6 +258,8 @@ export class ContentService {
 
       dataStream.on('data', (chunk) => {
         bytesRecieved += chunk.length
+        hash.update(chunk)
+
         if (bytesRecieved > expectedSize) {
           dataStream.destroy(new Error('Unexpected content size: Too much data recieved from source!'))
         }

+ 14 - 17
distributor-node/src/services/logging/LoggingService.ts

@@ -79,26 +79,23 @@ export class LoggingService {
       transports.push(esTransport)
     }
 
-    const fileTransport =
-      config.log?.file && config.log.file !== 'off'
-        ? new winston.transports.File({
-            filename: `${config.directories.logs}/logs.json`,
-            level: config.log.file,
-            format: winston.format.combine(pauseFormat({ id: 'file' }), escFormat()),
-          })
-        : undefined
-    if (fileTransport) {
+    if (config.log?.file && config.log.file !== 'off') {
+      if (!config.directories.logs) {
+        throw new Error('config.directories.logs must be provided when file logging is enabled!')
+      }
+      const fileTransport = new winston.transports.File({
+        filename: `${config.directories.logs}/logs.json`,
+        level: config.log.file,
+        format: winston.format.combine(pauseFormat({ id: 'file' }), escFormat()),
+      })
       transports.push(fileTransport)
     }
 
-    const consoleTransport =
-      config.log?.console && config.log.console !== 'off'
-        ? new winston.transports.Console({
-            level: config.log.console,
-            format: winston.format.combine(pauseFormat({ id: 'cli' }), cliFormat),
-          })
-        : undefined
-    if (consoleTransport) {
+    if (config.log?.console && config.log.console !== 'off') {
+      const consoleTransport = new winston.transports.Console({
+        level: config.log.console,
+        format: winston.format.combine(pauseFormat({ id: 'cli' }), cliFormat),
+      })
       transports.push(consoleTransport)
     }
 

+ 60 - 62
distributor-node/src/services/networking/NetworkingService.ts

@@ -5,7 +5,7 @@ import { LoggingService } from '../logging'
 import { StorageNodeApi } from './storage-node/api'
 import { PendingDownloadData, StateCacheService } from '../cache/StateCacheService'
 import { DataObjectDetailsFragment } from './query-node/generated/queries'
-import axios from 'axios'
+import axios, { AxiosRequestConfig } from 'axios'
 import {
   StorageNodeEndpointData,
   DataObjectAccessPoints,
@@ -20,9 +20,9 @@ import http from 'http'
 import https from 'https'
 import { parseAxiosError } from '../parsers/errors'
 
-const MAX_CONCURRENT_AVAILABILITY_CHECKS_PER_DOWNLOAD = 10
-const MAX_CONCURRENT_RESPONSE_TIME_CHECKS = 10
-const STORAGE_NODE_ENDPOINTS_CHECK_INTERVAL_MS = 60000
+// Concurrency limits
+export const MAX_CONCURRENT_AVAILABILITY_CHECKS_PER_DOWNLOAD = 10
+export const MAX_CONCURRENT_RESPONSE_TIME_CHECKS = 10
 
 export class NetworkingService {
   private config: ReadonlyConfig
@@ -32,7 +32,6 @@ export class NetworkingService {
   private stateCache: StateCacheService
   private logger: Logger
 
-  private storageNodeEndpointsCheckInterval: NodeJS.Timeout
   private testLatencyQueue: queue
   private downloadQueue: queue
 
@@ -49,13 +48,9 @@ export class NetworkingService {
     this.logging = logging
     this.stateCache = stateCache
     this.logger = logging.createLogger('NetworkingManager')
-    this.queryNodeApi = new QueryNodeApi(config.endpoints.queryNode)
+    this.queryNodeApi = new QueryNodeApi(config.endpoints.queryNode, this.logging)
     // this.runtimeApi = new RuntimeApi(config.endpoints.substrateNode)
-    this.checkActiveStorageNodeEndpoints()
-    this.storageNodeEndpointsCheckInterval = setInterval(
-      this.checkActiveStorageNodeEndpoints.bind(this),
-      STORAGE_NODE_ENDPOINTS_CHECK_INTERVAL_MS
-    )
+    void this.checkActiveStorageNodeEndpoints()
     // Queues
     this.testLatencyQueue = queue({ concurrency: MAX_CONCURRENT_RESPONSE_TIME_CHECKS, autostart: true }).on(
       'end',
@@ -68,10 +63,6 @@ export class NetworkingService {
     this.downloadQueue = queue({ concurrency: config.limits.maxConcurrentStorageNodeDownloads, autostart: true })
   }
 
-  public clearIntervals(): void {
-    clearInterval(this.storageNodeEndpointsCheckInterval)
-  }
-
   private validateNodeEndpoint(endpoint: string): void {
     const endpointUrl = new URL(endpoint)
     if (endpointUrl.protocol !== 'http:' && endpointUrl.protocol !== 'https:') {
@@ -96,6 +87,10 @@ export class NetworkingService {
     })
   }
 
+  private getApiEndpoint(rootEndpoint: string) {
+    return rootEndpoint.endsWith('/') ? rootEndpoint + 'api/v1' : rootEndpoint + '/api/v1'
+  }
+
   private prepareStorageNodeEndpoints(details: DataObjectDetailsFragment) {
     const endpointsData = details.storageBag.storageAssignments
       .filter(
@@ -103,10 +98,14 @@ export class NetworkingService {
           a.storageBucket.operatorStatus.__typename === 'StorageBucketOperatorStatusActive' &&
           a.storageBucket.operatorMetadata?.nodeEndpoint
       )
-      .map((a) => ({
-        bucketId: a.storageBucket.id,
-        endpoint: a.storageBucket.operatorMetadata!.nodeEndpoint!,
-      }))
+      .map((a) => {
+        const rootEndpoint = a.storageBucket.operatorMetadata!.nodeEndpoint!
+        const apiEndpoint = this.getApiEndpoint(rootEndpoint)
+        return {
+          bucketId: a.storageBucket.id,
+          endpoint: apiEndpoint,
+        }
+      })
 
     return this.filterStorageNodeEndpoints(endpointsData)
   }
@@ -119,9 +118,6 @@ export class NetworkingService {
 
   public async dataObjectInfo(objectId: string): Promise<DataObjectInfo> {
     const details = await this.queryNodeApi.getDataObjectDetails(objectId)
-    if (details) {
-      this.stateCache.setObjectContentHash(objectId, details.ipfsHash)
-    }
     return {
       exists: !!details,
       isSupported:
@@ -164,7 +160,7 @@ export class NetworkingService {
     onFinished?: () => void
   ): Promise<void> {
     const {
-      objectData: { contentHash, accessPoints },
+      objectData: { objectId, accessPoints },
       startAt,
     } = downloadData
 
@@ -173,13 +169,13 @@ export class NetworkingService {
     return new Promise<void>((resolve, reject) => {
       // Handlers:
       const fail = (message: string) => {
-        this.stateCache.dropPendingDownload(contentHash)
+        this.stateCache.dropPendingDownload(objectId)
         onError(new Error(message))
         reject(new Error(message))
       }
 
       const sourceFound = (response: StorageNodeDownloadResponse) => {
-        this.logger.info('Download source chosen', { contentHash, source: response.config.url })
+        this.logger.info('Download source chosen', { objectId, source: response.config.url })
         pendingDownload.status = 'Downloading'
         onSourceFound(response)
       }
@@ -194,7 +190,7 @@ export class NetworkingService {
       )
 
       this.logger.info('Downloading new data object', {
-        contentHash,
+        objectId,
         possibleSources: storageEndpoints.map((e) => ({
           endpoint: e,
           meanResponseTime: this.stateCache.getStorageNodeEndpointMeanResponseTime(e),
@@ -213,7 +209,7 @@ export class NetworkingService {
       storageEndpoints.forEach(async (endpoint) => {
         availabilityQueue.push(async () => {
           const api = new StorageNodeApi(endpoint, this.logging)
-          const available = await api.isObjectAvailable(contentHash)
+          const available = await api.isObjectAvailable(objectId)
           if (!available) {
             throw new Error('Not avilable')
           }
@@ -225,7 +221,7 @@ export class NetworkingService {
         availabilityQueue.stop()
         const job = async () => {
           const api = new StorageNodeApi(endpoint, this.logging)
-          const response = await api.downloadObject(contentHash, startAt)
+          const response = await api.downloadObject(objectId, startAt)
           return response
         }
         objectDownloadQueue.push(job)
@@ -267,10 +263,10 @@ export class NetworkingService {
 
   public downloadDataObject(downloadData: DownloadData): Promise<StorageNodeDownloadResponse> | null {
     const {
-      objectData: { contentHash, size },
+      objectData: { objectId, size },
     } = downloadData
 
-    if (this.stateCache.getPendingDownload(contentHash)) {
+    if (this.stateCache.getPendingDownload(objectId)) {
       // Already downloading
       return null
     }
@@ -282,23 +278,23 @@ export class NetworkingService {
     })
 
     // Queue the download
-    const pendingDownload = this.stateCache.newPendingDownload(contentHash, size, downloadPromise)
+    const pendingDownload = this.stateCache.newPendingDownload(objectId, size, downloadPromise)
     this.downloadQueue.push(() => this.downloadJob(pendingDownload, downloadData, resolveDownload, rejectDownload))
 
     return downloadPromise
   }
 
-  async fetchSupportedDataObjects(): Promise<DataObjectData[]> {
+  async fetchSupportedDataObjects(): Promise<Map<string, DataObjectData>> {
     const data =
       this.config.buckets === 'all'
         ? await this.queryNodeApi.getDistributionBucketsWithObjectsByWorkerId(this.config.workerId)
         : await this.queryNodeApi.getDistributionBucketsWithObjectsByIds(this.config.buckets.map((id) => id.toString()))
-    const objectsData: DataObjectData[] = []
+    const objectsData = new Map<string, DataObjectData>()
     data.forEach((bucket) => {
       bucket.bagAssignments.forEach((a) => {
         a.storageBag.objects.forEach((object) => {
           const { ipfsHash, id, size } = object
-          objectsData.push({ contentHash: ipfsHash, objectId: id, size: parseInt(size) })
+          objectsData.set(id, { contentHash: ipfsHash, objectId: id, size: parseInt(size) })
         })
       })
     })
@@ -307,45 +303,47 @@ export class NetworkingService {
   }
 
   async checkActiveStorageNodeEndpoints(): Promise<void> {
-    const activeStorageOperators = await this.queryNodeApi.getActiveStorageBucketOperatorsData()
-    const endpoints = this.filterStorageNodeEndpoints(
-      activeStorageOperators.map(({ id, operatorMetadata }) => ({
-        bucketId: id,
-        endpoint: operatorMetadata!.nodeEndpoint!,
-      }))
-    )
-    this.logger.verbose('Checking nearby storage nodes...', { validEndpointsCount: endpoints.length })
+    try {
+      const activeStorageOperators = await this.queryNodeApi.getActiveStorageBucketOperatorsData()
+      const endpoints = this.filterStorageNodeEndpoints(
+        activeStorageOperators.map(({ id, operatorMetadata }) => ({
+          bucketId: id,
+          endpoint: this.getApiEndpoint(operatorMetadata!.nodeEndpoint!),
+        }))
+      )
+      this.logger.verbose('Checking nearby storage nodes...', { validEndpointsCount: endpoints.length })
 
-    endpoints.forEach(({ endpoint }) =>
-      this.testLatencyQueue.push(async () => {
-        await this.checkResponseTime(endpoint)
-      })
-    )
+      endpoints.forEach(({ endpoint }) =>
+        this.testLatencyQueue.push(async () => {
+          await this.checkResponseTime(endpoint)
+        })
+      )
+    } catch (err) {
+      this.logger.error("Couldn't check active storage node endpooints", { err })
+    }
   }
 
   async checkResponseTime(endpoint: string): Promise<void> {
     const start = Date.now()
     this.logger.debug(`Sending storage node response-time check request to: ${endpoint}`, { endpoint })
     try {
-      // TODO: Use a status endpoint once available?
-      await axios.get(endpoint, {
-        headers: {
-          connection: 'close',
-        },
-      })
-      throw new Error('Unexpected status 200')
+      const api = new StorageNodeApi(endpoint, this.logging)
+      const reqConfig: AxiosRequestConfig = { headers: { connection: 'close' } }
+      await api.stateApi.stateApiGetVersion(reqConfig)
+      const responseTime = Date.now() - start
+      this.logger.debug(`${endpoint} check request response time: ${responseTime}`, { endpoint, responseTime })
+      this.stateCache.setStorageNodeEndpointResponseTime(endpoint, responseTime)
     } catch (err) {
-      if (axios.isAxiosError(err) && err.response?.status === 404) {
-        // This is the expected outcome currently
-        const responseTime = Date.now() - start
-        this.logger.debug(`${endpoint} check request response time: ${responseTime}`, { endpoint, responseTime })
-        this.stateCache.setStorageNodeEndpointResponseTime(endpoint, responseTime)
-      } else {
-        this.logger.warn(`${endpoint} check request unexpected response`, {
+      if (axios.isAxiosError(err)) {
+        const parsedErr = parseAxiosError(err)
+        this.logger.warn(`${endpoint} check request error: ${parsedErr.message}`, {
           endpoint,
-          err: axios.isAxiosError(err) ? parseAxiosError(err) : err,
+          err: parsedErr,
           '@pauseFor': 900,
         })
+      } else {
+        const message = err instanceof Error ? err.message : 'Unknown'
+        this.logger.error(`${endpoint} check unexpected error: ${message}`, { endpoint, err, '@pauseFor': 900 })
       }
     }
   }

+ 30 - 16
distributor-node/src/services/networking/distributor-node/generated/api.ts

@@ -124,11 +124,15 @@ export const PublicApiAxiosParamCreator = function (configuration?: Configuratio
     return {
         /**
          * Returns a media file.
+         * @param {string} objectId Data Object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicAsset: async (options: any = {}): Promise<RequestArgs> => {
-            const localVarPath = `/asset/{objectId}`;
+        publicAsset: async (objectId: string, options: any = {}): Promise<RequestArgs> => {
+            // verify required parameter 'objectId' is not null or undefined
+            assertParamExists('publicAsset', 'objectId', objectId)
+            const localVarPath = `/asset/{objectId}`
+                .replace(`{${"objectId"}}`, encodeURIComponent(String(objectId)));
             // use dummy base URL string because the URL constructor only accepts absolute URLs.
             const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
             let baseOptions;
@@ -153,11 +157,15 @@ export const PublicApiAxiosParamCreator = function (configuration?: Configuratio
         },
         /**
          * Returns asset response headers (cache status, content type and/or length, accepted ranges etc.)
+         * @param {string} objectId Data Object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicAssetHead: async (options: any = {}): Promise<RequestArgs> => {
-            const localVarPath = `/asset/{objectId}`;
+        publicAssetHead: async (objectId: string, options: any = {}): Promise<RequestArgs> => {
+            // verify required parameter 'objectId' is not null or undefined
+            assertParamExists('publicAssetHead', 'objectId', objectId)
+            const localVarPath = `/asset/{objectId}`
+                .replace(`{${"objectId"}}`, encodeURIComponent(String(objectId)));
             // use dummy base URL string because the URL constructor only accepts absolute URLs.
             const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
             let baseOptions;
@@ -250,20 +258,22 @@ export const PublicApiFp = function(configuration?: Configuration) {
     return {
         /**
          * Returns a media file.
+         * @param {string} objectId Data Object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        async publicAsset(options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<any>> {
-            const localVarAxiosArgs = await localVarAxiosParamCreator.publicAsset(options);
+        async publicAsset(objectId: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<any>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.publicAsset(objectId, options);
             return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
         },
         /**
          * Returns asset response headers (cache status, content type and/or length, accepted ranges etc.)
+         * @param {string} objectId Data Object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        async publicAssetHead(options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<void>> {
-            const localVarAxiosArgs = await localVarAxiosParamCreator.publicAssetHead(options);
+        async publicAssetHead(objectId: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<void>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.publicAssetHead(objectId, options);
             return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
         },
         /**
@@ -296,19 +306,21 @@ export const PublicApiFactory = function (configuration?: Configuration, basePat
     return {
         /**
          * Returns a media file.
+         * @param {string} objectId Data Object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicAsset(options?: any): AxiosPromise<any> {
-            return localVarFp.publicAsset(options).then((request) => request(axios, basePath));
+        publicAsset(objectId: string, options?: any): AxiosPromise<any> {
+            return localVarFp.publicAsset(objectId, options).then((request) => request(axios, basePath));
         },
         /**
          * Returns asset response headers (cache status, content type and/or length, accepted ranges etc.)
+         * @param {string} objectId Data Object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicAssetHead(options?: any): AxiosPromise<void> {
-            return localVarFp.publicAssetHead(options).then((request) => request(axios, basePath));
+        publicAssetHead(objectId: string, options?: any): AxiosPromise<void> {
+            return localVarFp.publicAssetHead(objectId, options).then((request) => request(axios, basePath));
         },
         /**
          * Returns list of distributed buckets
@@ -338,22 +350,24 @@ export const PublicApiFactory = function (configuration?: Configuration, basePat
 export class PublicApi extends BaseAPI {
     /**
      * Returns a media file.
+     * @param {string} objectId Data Object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      * @memberof PublicApi
      */
-    public publicAsset(options?: any) {
-        return PublicApiFp(this.configuration).publicAsset(options).then((request) => request(this.axios, this.basePath));
+    public publicAsset(objectId: string, options?: any) {
+        return PublicApiFp(this.configuration).publicAsset(objectId, options).then((request) => request(this.axios, this.basePath));
     }
 
     /**
      * Returns asset response headers (cache status, content type and/or length, accepted ranges etc.)
+     * @param {string} objectId Data Object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      * @memberof PublicApi
      */
-    public publicAssetHead(options?: any) {
-        return PublicApiFp(this.configuration).publicAssetHead(options).then((request) => request(this.axios, this.basePath));
+    public publicAssetHead(objectId: string, options?: any) {
+        return PublicApiFp(this.configuration).publicAssetHead(objectId, options).then((request) => request(this.axios, this.basePath));
     }
 
     /**

+ 13 - 3
distributor-node/src/services/networking/query-node/api.ts

@@ -1,5 +1,8 @@
-import { ApolloClient, NormalizedCacheObject, HttpLink, InMemoryCache, DocumentNode } from '@apollo/client/core'
+import { ApolloClient, NormalizedCacheObject, HttpLink, InMemoryCache, DocumentNode, from } from '@apollo/client/core'
+import { onError } from '@apollo/client/link/error'
 import fetch from 'cross-fetch'
+import { Logger } from 'winston'
+import { LoggingService } from '../../logging'
 import {
   DataObjectDetailsFragment,
   GetDataObjectDetails,
@@ -21,10 +24,17 @@ import { Maybe } from './generated/schema'
 
 export class QueryNodeApi {
   private apolloClient: ApolloClient<NormalizedCacheObject>
+  private logger: Logger
 
-  public constructor(endpoint: string) {
+  public constructor(endpoint: string, logging: LoggingService, exitOnError = false) {
+    this.logger = logging.createLogger('QueryNodeApi')
+    const errorLink = onError(({ graphQLErrors, networkError }) => {
+      const message = networkError?.message || 'Graphql syntax errors found'
+      this.logger.error('Error when trying to execute a query!', { err: { message, graphQLErrors, networkError } })
+      exitOnError && process.exit(-1)
+    })
     this.apolloClient = new ApolloClient({
-      link: new HttpLink({ uri: endpoint, fetch }),
+      link: from([errorLink, new HttpLink({ uri: endpoint, fetch })]),
       cache: new InMemoryCache(),
       defaultOptions: { query: { fetchPolicy: 'no-cache', errorPolicy: 'all' } },
     })

File diff suppressed because it is too large
+ 314 - 556
distributor-node/src/services/networking/query-node/generated/schema.ts


+ 2 - 2
distributor-node/src/services/networking/runtime/api.ts

@@ -120,8 +120,8 @@ export class RuntimeApi {
                 let errorMsg = dispatchError.toString()
                 if (dispatchError.isModule) {
                   try {
-                    const { name, documentation } = this._api.registry.findMetaError(dispatchError.asModule)
-                    errorMsg = `${name} (${documentation})`
+                    const { name, docs } = this._api.registry.findMetaError(dispatchError.asModule)
+                    errorMsg = `${name} (${docs.join(', ')})`
                   } catch (e) {
                     // This probably means we don't have this error in the metadata
                     // In this case - continue (we'll just display dispatchError.toString())

+ 15 - 13
distributor-node/src/services/networking/storage-node/api.ts

@@ -1,5 +1,5 @@
 import { Configuration } from './generated'
-import { PublicApi } from './generated/api'
+import { FilesApi, StateApi } from './generated/api'
 import axios, { AxiosRequestConfig } from 'axios'
 import { LoggingService } from '../../logging'
 import { Logger } from 'winston'
@@ -8,42 +8,44 @@ import { parseAxiosError } from '../../parsers/errors'
 
 export class StorageNodeApi {
   private logger: Logger
-  private publicApi: PublicApi
-  private endpoint: string
+  public filesApi: FilesApi
+  public stateApi: StateApi
+  public endpoint: string
 
   public constructor(endpoint: string, logging: LoggingService) {
     const config = new Configuration({
       basePath: endpoint,
     })
-    this.publicApi = new PublicApi(config)
+    this.filesApi = new FilesApi(config)
+    this.stateApi = new StateApi(config)
     this.endpoint = new URL(endpoint).toString()
     this.logger = logging.createLogger('StorageNodeApi', { endpoint })
   }
 
-  public async isObjectAvailable(contentHash: string): Promise<boolean> {
-    this.logger.debug('Checking object availibility', { contentHash })
+  public async isObjectAvailable(objectId: string): Promise<boolean> {
+    this.logger.debug('Checking object availibility', { objectId })
     try {
-      await this.publicApi.publicApiGetFileHeaders(contentHash)
-      this.logger.debug('Data object available', { contentHash })
+      await this.filesApi.publicApiGetFileHeaders(objectId)
+      this.logger.debug('Data object available', { objectId })
       return true
     } catch (err) {
       if (axios.isAxiosError(err)) {
-        this.logger.debug('Data object not available', { err: parseAxiosError(err) })
+        this.logger.debug('Data object not available', { objectId, err: parseAxiosError(err) })
         return false
       }
-      this.logger.error('Unexpected error while requesting data object', { err })
+      this.logger.error('Unexpected error while requesting data object', { objectId, err })
       throw err
     }
   }
 
-  public async downloadObject(contentHash: string, startAt?: number): Promise<StorageNodeDownloadResponse> {
-    this.logger.verbose('Sending download request', { contentHash, startAt })
+  public async downloadObject(objectId: string, startAt?: number): Promise<StorageNodeDownloadResponse> {
+    this.logger.verbose('Sending download request', { objectId, startAt })
     const options: AxiosRequestConfig = {
       responseType: 'stream',
     }
     if (startAt) {
       options.headers.Range = `bytes=${startAt}-`
     }
-    return this.publicApi.publicApiGetFile(contentHash, options)
+    return this.filesApi.publicApiGetFile(objectId, options)
   }
 }

+ 366 - 47
distributor-node/src/services/networking/storage-node/generated/api.ts

@@ -21,6 +21,37 @@ import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObj
 // @ts-ignore
 import { BASE_PATH, COLLECTION_FORMATS, RequestArgs, BaseAPI, RequiredError } from './base';
 
+/**
+ * 
+ * @export
+ * @interface DataStatsResponse
+ */
+export interface DataStatsResponse {
+    /**
+     * 
+     * @type {number}
+     * @memberof DataStatsResponse
+     */
+    totalSize: number;
+    /**
+     * 
+     * @type {number}
+     * @memberof DataStatsResponse
+     */
+    objectNumber: number;
+    /**
+     * 
+     * @type {number}
+     * @memberof DataStatsResponse
+     */
+    tempDirSize?: number;
+    /**
+     * 
+     * @type {number}
+     * @memberof DataStatsResponse
+     */
+    tempDownloads?: number;
+}
 /**
  * 
  * @export
@@ -122,12 +153,31 @@ export interface TokenRequestData {
      */
     bagId: string;
 }
+/**
+ * 
+ * @export
+ * @interface VersionResponse
+ */
+export interface VersionResponse {
+    /**
+     * 
+     * @type {string}
+     * @memberof VersionResponse
+     */
+    version: string;
+    /**
+     * 
+     * @type {string}
+     * @memberof VersionResponse
+     */
+    userAgent?: string;
+}
 
 /**
- * PublicApi - axios parameter creator
+ * FilesApi - axios parameter creator
  * @export
  */
-export const PublicApiAxiosParamCreator = function (configuration?: Configuration) {
+export const FilesApiAxiosParamCreator = function (configuration?: Configuration) {
     return {
         /**
          * Get auth token from a server.
@@ -164,15 +214,15 @@ export const PublicApiAxiosParamCreator = function (configuration?: Configuratio
         },
         /**
          * Returns a media file.
-         * @param {string} cid Content ID
+         * @param {string} id Data object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicApiGetFile: async (cid: string, options: any = {}): Promise<RequestArgs> => {
-            // verify required parameter 'cid' is not null or undefined
-            assertParamExists('publicApiGetFile', 'cid', cid)
-            const localVarPath = `/files/{cid}`
-                .replace(`{${"cid"}}`, encodeURIComponent(String(cid)));
+        publicApiGetFile: async (id: string, options: any = {}): Promise<RequestArgs> => {
+            // verify required parameter 'id' is not null or undefined
+            assertParamExists('publicApiGetFile', 'id', id)
+            const localVarPath = `/files/{id}`
+                .replace(`{${"id"}}`, encodeURIComponent(String(id)));
             // use dummy base URL string because the URL constructor only accepts absolute URLs.
             const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
             let baseOptions;
@@ -197,15 +247,15 @@ export const PublicApiAxiosParamCreator = function (configuration?: Configuratio
         },
         /**
          * Returns a media file headers.
-         * @param {string} cid Content ID
+         * @param {string} id Data object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicApiGetFileHeaders: async (cid: string, options: any = {}): Promise<RequestArgs> => {
-            // verify required parameter 'cid' is not null or undefined
-            assertParamExists('publicApiGetFileHeaders', 'cid', cid)
-            const localVarPath = `/files/{cid}`
-                .replace(`{${"cid"}}`, encodeURIComponent(String(cid)));
+        publicApiGetFileHeaders: async (id: string, options: any = {}): Promise<RequestArgs> => {
+            // verify required parameter 'id' is not null or undefined
+            assertParamExists('publicApiGetFileHeaders', 'id', id)
+            const localVarPath = `/files/{id}`
+                .replace(`{${"id"}}`, encodeURIComponent(String(id)));
             // use dummy base URL string because the URL constructor only accepts absolute URLs.
             const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
             let baseOptions;
@@ -294,11 +344,11 @@ export const PublicApiAxiosParamCreator = function (configuration?: Configuratio
 };
 
 /**
- * PublicApi - functional programming interface
+ * FilesApi - functional programming interface
  * @export
  */
-export const PublicApiFp = function(configuration?: Configuration) {
-    const localVarAxiosParamCreator = PublicApiAxiosParamCreator(configuration)
+export const FilesApiFp = function(configuration?: Configuration) {
+    const localVarAxiosParamCreator = FilesApiAxiosParamCreator(configuration)
     return {
         /**
          * Get auth token from a server.
@@ -312,22 +362,22 @@ export const PublicApiFp = function(configuration?: Configuration) {
         },
         /**
          * Returns a media file.
-         * @param {string} cid Content ID
+         * @param {string} id Data object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        async publicApiGetFile(cid: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<any>> {
-            const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiGetFile(cid, options);
+        async publicApiGetFile(id: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<any>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiGetFile(id, options);
             return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
         },
         /**
          * Returns a media file headers.
-         * @param {string} cid Content ID
+         * @param {string} id Data object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        async publicApiGetFileHeaders(cid: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<void>> {
-            const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiGetFileHeaders(cid, options);
+        async publicApiGetFileHeaders(id: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<void>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiGetFileHeaders(id, options);
             return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
         },
         /**
@@ -347,11 +397,11 @@ export const PublicApiFp = function(configuration?: Configuration) {
 };
 
 /**
- * PublicApi - factory interface
+ * FilesApi - factory interface
  * @export
  */
-export const PublicApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
-    const localVarFp = PublicApiFp(configuration)
+export const FilesApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
+    const localVarFp = FilesApiFp(configuration)
     return {
         /**
          * Get auth token from a server.
@@ -364,21 +414,21 @@ export const PublicApiFactory = function (configuration?: Configuration, basePat
         },
         /**
          * Returns a media file.
-         * @param {string} cid Content ID
+         * @param {string} id Data object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicApiGetFile(cid: string, options?: any): AxiosPromise<any> {
-            return localVarFp.publicApiGetFile(cid, options).then((request) => request(axios, basePath));
+        publicApiGetFile(id: string, options?: any): AxiosPromise<any> {
+            return localVarFp.publicApiGetFile(id, options).then((request) => request(axios, basePath));
         },
         /**
          * Returns a media file headers.
-         * @param {string} cid Content ID
+         * @param {string} id Data object ID
          * @param {*} [options] Override http request option.
          * @throws {RequiredError}
          */
-        publicApiGetFileHeaders(cid: string, options?: any): AxiosPromise<void> {
-            return localVarFp.publicApiGetFileHeaders(cid, options).then((request) => request(axios, basePath));
+        publicApiGetFileHeaders(id: string, options?: any): AxiosPromise<void> {
+            return localVarFp.publicApiGetFileHeaders(id, options).then((request) => request(axios, basePath));
         },
         /**
          * Upload data
@@ -396,43 +446,43 @@ export const PublicApiFactory = function (configuration?: Configuration, basePat
 };
 
 /**
- * PublicApi - object-oriented interface
+ * FilesApi - object-oriented interface
  * @export
- * @class PublicApi
+ * @class FilesApi
  * @extends {BaseAPI}
  */
-export class PublicApi extends BaseAPI {
+export class FilesApi extends BaseAPI {
     /**
      * Get auth token from a server.
      * @param {TokenRequest} [tokenRequest] Token request parameters,
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
-     * @memberof PublicApi
+     * @memberof FilesApi
      */
     public publicApiAuthTokenForUploading(tokenRequest?: TokenRequest, options?: any) {
-        return PublicApiFp(this.configuration).publicApiAuthTokenForUploading(tokenRequest, options).then((request) => request(this.axios, this.basePath));
+        return FilesApiFp(this.configuration).publicApiAuthTokenForUploading(tokenRequest, options).then((request) => request(this.axios, this.basePath));
     }
 
     /**
      * Returns a media file.
-     * @param {string} cid Content ID
+     * @param {string} id Data object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
-     * @memberof PublicApi
+     * @memberof FilesApi
      */
-    public publicApiGetFile(cid: string, options?: any) {
-        return PublicApiFp(this.configuration).publicApiGetFile(cid, options).then((request) => request(this.axios, this.basePath));
+    public publicApiGetFile(id: string, options?: any) {
+        return FilesApiFp(this.configuration).publicApiGetFile(id, options).then((request) => request(this.axios, this.basePath));
     }
 
     /**
      * Returns a media file headers.
-     * @param {string} cid Content ID
+     * @param {string} id Data object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
-     * @memberof PublicApi
+     * @memberof FilesApi
      */
-    public publicApiGetFileHeaders(cid: string, options?: any) {
-        return PublicApiFp(this.configuration).publicApiGetFileHeaders(cid, options).then((request) => request(this.axios, this.basePath));
+    public publicApiGetFileHeaders(id: string, options?: any) {
+        return FilesApiFp(this.configuration).publicApiGetFileHeaders(id, options).then((request) => request(this.axios, this.basePath));
     }
 
     /**
@@ -443,10 +493,279 @@ export class PublicApi extends BaseAPI {
      * @param {any} [file] Data file
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
-     * @memberof PublicApi
+     * @memberof FilesApi
      */
     public publicApiUploadFile(dataObjectId: string, storageBucketId: string, bagId: string, file?: any, options?: any) {
-        return PublicApiFp(this.configuration).publicApiUploadFile(dataObjectId, storageBucketId, bagId, file, options).then((request) => request(this.axios, this.basePath));
+        return FilesApiFp(this.configuration).publicApiUploadFile(dataObjectId, storageBucketId, bagId, file, options).then((request) => request(this.axios, this.basePath));
+    }
+}
+
+
+/**
+ * StateApi - axios parameter creator
+ * @export
+ */
+export const StateApiAxiosParamCreator = function (configuration?: Configuration) {
+    return {
+        /**
+         * Returns all local data objects.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetAllLocalDataObjects: async (options: any = {}): Promise<RequestArgs> => {
+            const localVarPath = `/state/data-objects`;
+            // use dummy base URL string because the URL constructor only accepts absolute URLs.
+            const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
+            let baseOptions;
+            if (configuration) {
+                baseOptions = configuration.baseOptions;
+            }
+
+            const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
+            const localVarHeaderParameter = {} as any;
+            const localVarQueryParameter = {} as any;
+
+
+    
+            setSearchParams(localVarUrlObj, localVarQueryParameter, options.query);
+            let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
+            localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
+
+            return {
+                url: toPathString(localVarUrlObj),
+                options: localVarRequestOptions,
+            };
+        },
+        /**
+         * Returns local data objects for the bag.
+         * @param {string} bagId Bag ID
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetLocalDataObjectsByBagId: async (bagId: string, options: any = {}): Promise<RequestArgs> => {
+            // verify required parameter 'bagId' is not null or undefined
+            assertParamExists('stateApiGetLocalDataObjectsByBagId', 'bagId', bagId)
+            const localVarPath = `/state/bags/{bagId}/data-objects`
+                .replace(`{${"bagId"}}`, encodeURIComponent(String(bagId)));
+            // use dummy base URL string because the URL constructor only accepts absolute URLs.
+            const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
+            let baseOptions;
+            if (configuration) {
+                baseOptions = configuration.baseOptions;
+            }
+
+            const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
+            const localVarHeaderParameter = {} as any;
+            const localVarQueryParameter = {} as any;
+
+
+    
+            setSearchParams(localVarUrlObj, localVarQueryParameter, options.query);
+            let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
+            localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
+
+            return {
+                url: toPathString(localVarUrlObj),
+                options: localVarRequestOptions,
+            };
+        },
+        /**
+         * Returns local uploading directory stats.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetLocalDataStats: async (options: any = {}): Promise<RequestArgs> => {
+            const localVarPath = `/state/data`;
+            // use dummy base URL string because the URL constructor only accepts absolute URLs.
+            const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
+            let baseOptions;
+            if (configuration) {
+                baseOptions = configuration.baseOptions;
+            }
+
+            const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
+            const localVarHeaderParameter = {} as any;
+            const localVarQueryParameter = {} as any;
+
+
+    
+            setSearchParams(localVarUrlObj, localVarQueryParameter, options.query);
+            let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
+            localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
+
+            return {
+                url: toPathString(localVarUrlObj),
+                options: localVarRequestOptions,
+            };
+        },
+        /**
+         * Returns server version.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetVersion: async (options: any = {}): Promise<RequestArgs> => {
+            const localVarPath = `/version`;
+            // use dummy base URL string because the URL constructor only accepts absolute URLs.
+            const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
+            let baseOptions;
+            if (configuration) {
+                baseOptions = configuration.baseOptions;
+            }
+
+            const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
+            const localVarHeaderParameter = {} as any;
+            const localVarQueryParameter = {} as any;
+
+
+    
+            setSearchParams(localVarUrlObj, localVarQueryParameter, options.query);
+            let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
+            localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
+
+            return {
+                url: toPathString(localVarUrlObj),
+                options: localVarRequestOptions,
+            };
+        },
+    }
+};
+
+/**
+ * StateApi - functional programming interface
+ * @export
+ */
+export const StateApiFp = function(configuration?: Configuration) {
+    const localVarAxiosParamCreator = StateApiAxiosParamCreator(configuration)
+    return {
+        /**
+         * Returns all local data objects.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        async stateApiGetAllLocalDataObjects(options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Array<string>>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.stateApiGetAllLocalDataObjects(options);
+            return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
+        },
+        /**
+         * Returns local data objects for the bag.
+         * @param {string} bagId Bag ID
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        async stateApiGetLocalDataObjectsByBagId(bagId: string, options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<Array<string>>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.stateApiGetLocalDataObjectsByBagId(bagId, options);
+            return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
+        },
+        /**
+         * Returns local uploading directory stats.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        async stateApiGetLocalDataStats(options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<DataStatsResponse>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.stateApiGetLocalDataStats(options);
+            return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
+        },
+        /**
+         * Returns server version.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        async stateApiGetVersion(options?: any): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<VersionResponse>> {
+            const localVarAxiosArgs = await localVarAxiosParamCreator.stateApiGetVersion(options);
+            return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
+        },
+    }
+};
+
+/**
+ * StateApi - factory interface
+ * @export
+ */
+export const StateApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
+    const localVarFp = StateApiFp(configuration)
+    return {
+        /**
+         * Returns all local data objects.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetAllLocalDataObjects(options?: any): AxiosPromise<Array<string>> {
+            return localVarFp.stateApiGetAllLocalDataObjects(options).then((request) => request(axios, basePath));
+        },
+        /**
+         * Returns local data objects for the bag.
+         * @param {string} bagId Bag ID
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetLocalDataObjectsByBagId(bagId: string, options?: any): AxiosPromise<Array<string>> {
+            return localVarFp.stateApiGetLocalDataObjectsByBagId(bagId, options).then((request) => request(axios, basePath));
+        },
+        /**
+         * Returns local uploading directory stats.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetLocalDataStats(options?: any): AxiosPromise<DataStatsResponse> {
+            return localVarFp.stateApiGetLocalDataStats(options).then((request) => request(axios, basePath));
+        },
+        /**
+         * Returns server version.
+         * @param {*} [options] Override http request option.
+         * @throws {RequiredError}
+         */
+        stateApiGetVersion(options?: any): AxiosPromise<VersionResponse> {
+            return localVarFp.stateApiGetVersion(options).then((request) => request(axios, basePath));
+        },
+    };
+};
+
+/**
+ * StateApi - object-oriented interface
+ * @export
+ * @class StateApi
+ * @extends {BaseAPI}
+ */
+export class StateApi extends BaseAPI {
+    /**
+     * Returns all local data objects.
+     * @param {*} [options] Override http request option.
+     * @throws {RequiredError}
+     * @memberof StateApi
+     */
+    public stateApiGetAllLocalDataObjects(options?: any) {
+        return StateApiFp(this.configuration).stateApiGetAllLocalDataObjects(options).then((request) => request(this.axios, this.basePath));
+    }
+
+    /**
+     * Returns local data objects for the bag.
+     * @param {string} bagId Bag ID
+     * @param {*} [options] Override http request option.
+     * @throws {RequiredError}
+     * @memberof StateApi
+     */
+    public stateApiGetLocalDataObjectsByBagId(bagId: string, options?: any) {
+        return StateApiFp(this.configuration).stateApiGetLocalDataObjectsByBagId(bagId, options).then((request) => request(this.axios, this.basePath));
+    }
+
+    /**
+     * Returns local uploading directory stats.
+     * @param {*} [options] Override http request option.
+     * @throws {RequiredError}
+     * @memberof StateApi
+     */
+    public stateApiGetLocalDataStats(options?: any) {
+        return StateApiFp(this.configuration).stateApiGetLocalDataStats(options).then((request) => request(this.axios, this.basePath));
+    }
+
+    /**
+     * Returns server version.
+     * @param {*} [options] Override http request option.
+     * @throws {RequiredError}
+     * @memberof StateApi
+     */
+    public stateApiGetVersion(options?: any) {
+        return StateApiFp(this.configuration).stateApiGetVersion(options).then((request) => request(this.axios, this.basePath));
     }
 }
 

+ 8 - 14
distributor-node/src/services/parsers/BagIdParserService.ts

@@ -1,14 +1,8 @@
-import { BagId } from '@joystream/types/storage'
-import { registry } from '@joystream/types'
-import { createType } from '@polkadot/types'
-import { InterfaceTypes } from '@polkadot/types/types'
-import { WorkingGroup } from '@joystream/types/common'
+import { BagId, StaticBagId } from '@joystream/types/storage'
+import { createType } from '@joystream/types'
+import { WorkingGroup, WorkingGroupKey } from '@joystream/types/common'
 
 export class BagIdParserService {
-  private createType<T extends keyof InterfaceTypes>(type: T, value: any) {
-    return createType(registry, type, value)
-  }
-
   public parseBagId(bagId: string): BagId {
     const bagIdParts = bagId.toLowerCase().split(':')
 
@@ -31,8 +25,8 @@ export class BagIdParserService {
     // Try to construct static council bag ID.
     if (bagIdParts[1] === 'council') {
       if (bagIdParts.length === 2) {
-        const staticBagId = this.createType('StaticBagId', 'Council')
-        const constructedBagId = this.createType('BagId', {
+        const staticBagId = createType<StaticBagId, 'StaticBagId'>('StaticBagId', 'Council')
+        const constructedBagId = createType<BagId, 'BagId'>('BagId', {
           'Static': staticBagId,
         })
 
@@ -42,13 +36,13 @@ export class BagIdParserService {
 
     // Try to construct static working group bag ID.
     if (bagIdParts[1] === 'wg' && bagIdParts.length === 3) {
-      const groups = Object.keys(WorkingGroup.typeDefinitions)
+      const groups = Object.keys(WorkingGroup.typeDefinitions) as WorkingGroupKey[]
       const inputGroup = bagIdParts[2]
 
       if (groups.find((g) => g.toLocaleLowerCase() === inputGroup)) {
-        return this.createType('BagId', {
+        return createType<BagId, 'BagId'>('BagId', {
           Static: {
-            WorkingGroup: inputGroup,
+            WorkingGroup: inputGroup as WorkingGroupKey,
           },
         })
       }

+ 12 - 2
distributor-node/src/services/parsers/ConfigParserService.ts

@@ -4,7 +4,7 @@ import fs from 'fs'
 import path from 'path'
 import YAML from 'yaml'
 import _ from 'lodash'
-import configSchema, { bytesizeUnits } from '../validation/schemas/configSchema'
+import configSchema, { bytesizeUnits } from '../../schemas/configSchema'
 import { JSONSchema4 } from 'json-schema'
 
 const MIN_CACHE_SIZE = 20 * Math.pow(1024, 3)
@@ -17,7 +17,15 @@ export class ConfigParserService {
   }
 
   public resolveConfigDirectoryPaths(paths: Config['directories'], configFilePath: string): Config['directories'] {
-    return _.mapValues(paths, (v) => path.resolve(path.dirname(configFilePath), v))
+    return _.mapValues(paths, (v) =>
+      typeof v === 'string' ? path.resolve(path.dirname(configFilePath), v) : v
+    ) as Config['directories']
+  }
+
+  public resolveConfigKeysPaths(keys: Config['keys'], configFilePath: string): Config['keys'] {
+    return keys.map((k) =>
+      'keyfile' in k ? { keyfile: path.resolve(path.dirname(configFilePath), k.keyfile) } : k
+    ) as Config['keys']
   }
 
   private parseBytesize(bytesize: string) {
@@ -94,6 +102,7 @@ export class ConfigParserService {
 
     // Normalize values
     const directories = this.resolveConfigDirectoryPaths(configJson.directories, configPath)
+    const keys = this.resolveConfigKeysPaths(configJson.keys, configPath)
     const storageLimit = this.parseBytesize(configJson.limits.storage)
 
     if (storageLimit < MIN_CACHE_SIZE) {
@@ -103,6 +112,7 @@ export class ConfigParserService {
     const parsedConfig: Config = {
       ...configJson,
       directories,
+      keys,
       limits: {
         ...configJson.limits,
         storage: storageLimit,

+ 38 - 41
distributor-node/src/services/server/controllers/public.ts

@@ -37,19 +37,18 @@ export class PublicApiController {
     req: express.Request<AssetRouteParams>,
     res: express.Response,
     next: express.NextFunction,
-    contentHash: string
+    objectId: string
   ): void {
-    // TODO: FIXME: Actually check if we are still supposed to serve it and just remove after responding if not
-    // TODO: Limit the number of times useContent is trigerred for similar requests
+    // TODO: Limit the number of times useContent is trigerred for similar requests?
     // (for example: same ip, 3 different request within a minute = 1 request)
-    this.stateCache.useContent(contentHash)
+    this.stateCache.useContent(objectId)
 
-    const path = this.content.path(contentHash)
+    const path = this.content.path(objectId)
     const stream = send(req, path, {
       maxAge: CACHED_MAX_AGE,
       lastModified: false,
     })
-    const mimeType = this.stateCache.getContentMimeType(contentHash)
+    const mimeType = this.stateCache.getContentMimeType(objectId)
 
     stream.on('headers', (res) => {
       res.setHeader('x-cache', 'hit')
@@ -77,9 +76,9 @@ export class PublicApiController {
     req: express.Request<AssetRouteParams>,
     res: express.Response,
     next: express.NextFunction,
-    contentHash: string
+    objectId: string
   ) {
-    const pendingDownload = this.stateCache.getPendingDownload(contentHash)
+    const pendingDownload = this.stateCache.getPendingDownload(objectId)
     if (!pendingDownload) {
       throw new Error('Trying to serve pending download asset that is not pending download!')
     }
@@ -94,14 +93,15 @@ export class PublicApiController {
     res.setHeader('cache-control', `max-age=${PENDING_MAX_AGE}, must-revalidate`)
 
     // Handle request using pending download file if this makes sense in current context:
-    if (this.content.exists(contentHash)) {
+    if (this.content.exists(objectId)) {
+      const partiallyDownloadedContentSize = this.content.fileSize(objectId)
       const range = req.range(objectSize)
       if (!range || range === -1 || range === -2 || range.length !== 1 || range.type !== 'bytes') {
         // Range is not provided / invalid - serve data from pending download file
-        return this.servePendingDownloadAssetFromFile(req, res, next, contentHash, objectSize)
-      } else if (range[0].start === 0) {
-        // Range starts from the beginning of the content - serve data from pending download file
-        return this.servePendingDownloadAssetFromFile(req, res, next, contentHash, objectSize, range[0].end)
+        return this.servePendingDownloadAssetFromFile(req, res, next, objectId, objectSize)
+      } else if (range[0].start <= partiallyDownloadedContentSize) {
+        // Range starts at the already downloaded part of the content - serve data from pending download file
+        return this.servePendingDownloadAssetFromFile(req, res, next, objectId, objectSize, range[0])
       }
     }
 
@@ -115,21 +115,21 @@ export class PublicApiController {
     req: express.Request<AssetRouteParams>,
     res: express.Response,
     next: express.NextFunction,
-    contentHash: string,
+    objectId: string,
     objectSize: number,
-    rangeEnd?: number
+    range?: { start: number; end: number }
   ) {
-    const isRange = rangeEnd !== undefined
-    this.logger.verbose(`Serving pending download asset from file`, { contentHash, isRange, objectSize, rangeEnd })
-    const stream = this.content.createContinousReadStream(contentHash, {
-      end: isRange ? rangeEnd || 0 : objectSize - 1,
+    this.logger.verbose(`Serving pending download asset from file`, { objectId, objectSize, range })
+    const stream = this.content.createContinousReadStream(objectId, {
+      start: range?.start,
+      end: range !== undefined ? range.end : objectSize - 1,
     })
-    res.status(isRange ? 206 : 200)
+    res.status(range !== undefined ? 206 : 200)
     res.setHeader('accept-ranges', 'bytes')
     res.setHeader('x-data-source', 'local')
     res.setHeader('content-disposition', 'inline')
-    if (isRange) {
-      res.setHeader('content-range', `bytes 0-${rangeEnd}/${objectSize}`)
+    if (range !== undefined) {
+      res.setHeader('content-range', `bytes ${range.start}-${range.end}/${objectSize}`)
     }
     stream.pipe(res)
     req.on('close', () => {
@@ -140,20 +140,19 @@ export class PublicApiController {
 
   public async assetHead(req: express.Request<AssetRouteParams>, res: express.Response): Promise<void> {
     const objectId = req.params.objectId
-    const contentHash = this.stateCache.getObjectContentHash(objectId)
-    const pendingDownload = contentHash && this.stateCache.getPendingDownload(contentHash)
+    const pendingDownload = this.stateCache.getPendingDownload(objectId)
 
     res.setHeader('timing-allow-origin', '*')
     res.setHeader('accept-ranges', 'bytes')
     res.setHeader('content-disposition', 'inline')
 
-    if (contentHash && !pendingDownload && this.content.exists(contentHash)) {
+    if (!pendingDownload && this.content.exists(objectId)) {
       res.status(200)
       res.setHeader('x-cache', 'hit')
       res.setHeader('cache-control', `max-age=${CACHED_MAX_AGE}`)
-      res.setHeader('content-type', this.stateCache.getContentMimeType(contentHash) || DEFAULT_CONTENT_TYPE)
-      res.setHeader('content-length', this.content.fileSize(contentHash))
-    } else if (contentHash && pendingDownload) {
+      res.setHeader('content-type', this.stateCache.getContentMimeType(objectId) || DEFAULT_CONTENT_TYPE)
+      res.setHeader('content-length', this.content.fileSize(objectId))
+    } else if (pendingDownload) {
       res.status(200)
       res.setHeader('x-cache', 'pending')
       res.setHeader('cache-control', `max-age=${PENDING_MAX_AGE}, must-revalidate`)
@@ -181,24 +180,22 @@ export class PublicApiController {
     next: express.NextFunction
   ): Promise<void> {
     const objectId = req.params.objectId
-    const contentHash = this.stateCache.getObjectContentHash(objectId)
-    const pendingDownload = contentHash && this.stateCache.getPendingDownload(contentHash)
+    const pendingDownload = this.stateCache.getPendingDownload(objectId)
 
     this.logger.verbose('Data object requested', {
       objectId,
-      contentHash,
       status: pendingDownload && pendingDownload.status,
     })
 
     res.setHeader('timing-allow-origin', '*')
 
-    if (contentHash && !pendingDownload && this.content.exists(contentHash)) {
-      this.logger.verbose('Requested file found in filesystem', { path: this.content.path(contentHash) })
-      return this.serveAssetFromFilesystem(req, res, next, contentHash)
-    } else if (contentHash && pendingDownload) {
-      this.logger.verbose('Requested file is in pending download state', { path: this.content.path(contentHash) })
+    if (!pendingDownload && this.content.exists(objectId)) {
+      this.logger.verbose('Requested file found in filesystem', { path: this.content.path(objectId) })
+      return this.serveAssetFromFilesystem(req, res, next, objectId)
+    } else if (pendingDownload) {
+      this.logger.verbose('Requested file is in pending download state', { path: this.content.path(objectId) })
       res.setHeader('x-cache', 'pending')
-      return this.servePendingDownloadAsset(req, res, next, contentHash)
+      return this.servePendingDownloadAsset(req, res, next, objectId)
     } else {
       this.logger.verbose('Requested file not found in filesystem')
       const objectInfo = await this.networking.dataObjectInfo(objectId)
@@ -218,18 +215,18 @@ export class PublicApiController {
         if (!objectData) {
           throw new Error('Missing data object data')
         }
-        const { contentHash, size } = objectData
+        const { size, contentHash } = objectData
 
         const downloadResponse = await this.networking.downloadDataObject({ objectData })
 
         if (downloadResponse) {
           // Note: Await will only wait unil the file is created, so we may serve the response from it
-          await this.content.handleNewContent(contentHash, size, downloadResponse.data)
+          await this.content.handleNewContent(objectId, size, contentHash, downloadResponse.data)
           res.setHeader('x-cache', 'miss')
         } else {
           res.setHeader('x-cache', 'pending')
         }
-        return this.servePendingDownloadAsset(req, res, next, contentHash)
+        return this.servePendingDownloadAsset(req, res, next, objectId)
       }
     }
   }
@@ -237,7 +234,7 @@ export class PublicApiController {
   public async status(req: express.Request, res: express.Response<StatusResponse>): Promise<void> {
     const data: StatusResponse = {
       id: this.config.id,
-      objectsInCache: this.stateCache.getCachedContentLength(),
+      objectsInCache: this.stateCache.getCachedObjectsCount(),
       storageLimit: this.config.limits.storage,
       storageUsed: this.content.usedSpace,
       uptime: Math.floor(process.uptime()),

+ 1 - 1
distributor-node/src/services/validation/ValidationService.ts

@@ -1,5 +1,5 @@
 import Ajv from 'ajv'
-import { SchemaKey, schemas, TypeBySchemaKey } from './schemas'
+import { SchemaKey, schemas, TypeBySchemaKey } from '../../schemas'
 
 class ValidationError extends Error {
   public readonly errors: string[]

+ 0 - 56
distributor-node/src/services/validation/schemas/configSchema.ts

@@ -1,56 +0,0 @@
-import { JSONSchema4 } from 'json-schema'
-import { strictObject } from './utils'
-import winston from 'winston'
-
-export const bytesizeUnits = ['B', 'K', 'M', 'G', 'T']
-export const bytesizeRegex = new RegExp(`^[0-9]+(${bytesizeUnits.join('|')})$`)
-
-export const configSchema: JSONSchema4 = {
-  type: 'object',
-  required: ['id', 'endpoints', 'directories', 'buckets', 'keys', 'port', 'workerId', 'limits'],
-  additionalProperties: false,
-  properties: {
-    id: { type: 'string' },
-    endpoints: {
-      type: 'object',
-      additionalProperties: false,
-      required: ['queryNode', 'substrateNode'],
-      properties: {
-        queryNode: { type: 'string' },
-        substrateNode: { type: 'string' },
-        elasticSearch: { type: 'string' },
-      },
-    },
-    directories: strictObject({
-      data: { type: 'string' },
-      cache: { type: 'string' },
-      logs: { type: 'string' },
-    }),
-    log: {
-      type: 'object',
-      additionalProperties: false,
-      properties: {
-        file: { type: 'string', enum: [...Object.keys(winston.config.npm.levels), 'off'] },
-        console: { type: 'string', enum: [...Object.keys(winston.config.npm.levels), 'off'] },
-        elastic: { type: 'string', enum: [...Object.keys(winston.config.npm.levels), 'off'] },
-      },
-    },
-    limits: strictObject({
-      storage: { type: 'string', pattern: bytesizeRegex.source },
-      maxConcurrentStorageNodeDownloads: { type: 'integer', minimum: 1 },
-      maxConcurrentOutboundConnections: { type: 'integer', minimum: 1 },
-      outboundRequestsTimeout: { type: 'integer', minimum: 1 },
-    }),
-    port: { type: 'integer', minimum: 0 },
-    keys: { type: 'array', items: { type: 'string' }, minItems: 1 },
-    buckets: {
-      oneOf: [
-        { type: 'array', items: { type: 'integer', minimum: 0 }, minItems: 1 },
-        { type: 'string', enum: ['all'] },
-      ],
-    },
-    workerId: { type: 'integer', minimum: 0 },
-  },
-}
-
-export default configSchema

+ 0 - 23
distributor-node/src/services/validation/schemas/familyMetadataSchema.ts

@@ -1,23 +0,0 @@
-import { JSONSchema4 } from 'json-schema'
-
-export const familyMetadataSchema: JSONSchema4 = {
-  type: 'object',
-  additionalProperties: false,
-  properties: {
-    region: { type: 'string' },
-    description: { type: 'string' },
-    boundary: {
-      type: 'array',
-      items: {
-        type: 'object',
-        additionalProperties: false,
-        properties: {
-          latitude: { type: 'number', minimum: -180, maximum: 180 },
-          longitude: { type: 'number', minimum: -180, maximum: 180 },
-        },
-      },
-    },
-  },
-}
-
-export default familyMetadataSchema

+ 0 - 10
distributor-node/src/services/validation/schemas/utils.ts

@@ -1,10 +0,0 @@
-import { JSONSchema4 } from 'json-schema'
-
-export function strictObject(properties: Exclude<JSONSchema4['properties'], undefined>): JSONSchema4 {
-  return {
-    type: 'object',
-    additionalProperties: false,
-    required: Object.keys(properties),
-    properties,
-  }
-}

+ 5 - 0
distributor-node/src/types/app.ts

@@ -0,0 +1,5 @@
+export type AppIntervals = {
+  saveCacheState: NodeJS.Timeout
+  checkStorageNodeResponseTimes: NodeJS.Timeout
+  cacheCleanup: NodeJS.Timeout
+}

+ 1 - 1
distributor-node/src/types/config.ts

@@ -1,4 +1,4 @@
-import { ConfigJson } from './generated/ConfigJson'
+import { DistributorNodeConfiguration as ConfigJson } from './generated/ConfigJson'
 import { DeepReadonly } from './common'
 
 export type Config = Omit<ConfigJson, 'limits'> & {

+ 122 - 7
distributor-node/src/types/generated/ConfigJson.d.ts

@@ -5,31 +5,146 @@
  * and run json-schema-to-typescript to regenerate this file.
  */
 
-export interface ConfigJson {
+/**
+ * List of distribution bucket ids
+ */
+export type BucketIds = [number, ...number[]]
+/**
+ * Distribute all buckets assigned to worker specified in `workerId`
+ */
+export type AllBuckets = 'all'
+
+/**
+ * Configuration schema for distirubtor CLI and node
+ */
+export interface DistributorNodeConfiguration {
+  /**
+   * Node identifier used when sending elasticsearch logs and exposed on /status endpoint
+   */
   id: string
+  /**
+   * Specifies external endpoints that the distributor node will connect to
+   */
   endpoints: {
+    /**
+     * Query node graphql server uri (for example: http://localhost:8081/graphql)
+     */
     queryNode: string
-    substrateNode: string
+    /**
+     * Joystream node websocket api uri (for example: ws://localhost:9944)
+     */
+    joystreamNodeWs: string
+    /**
+     * Elasticsearch uri used for submitting the distributor node logs (if enabled via `log.elastic`)
+     */
     elasticSearch?: string
   }
+  /**
+   * Specifies paths where node's data will be stored
+   */
   directories: {
-    data: string
-    cache: string
-    logs: string
+    /**
+     * Path to a directory where all the cached assets will be stored
+     */
+    assets: string
+    /**
+     * Path to a directory where information about the current cache state will be stored (LRU-SP cache data, stored assets mime types etc.)
+     */
+    cacheState: string
+    /**
+     * Path to a directory where logs will be stored if logging to a file was enabled (via `log.file`).
+     */
+    logs?: string
   }
+  /**
+   * Specifies minimum log levels by supported log outputs
+   */
   log?: {
+    /**
+     * Minimum level of logs written to a file specified in `directories.logs`
+     */
     file?: 'error' | 'warn' | 'info' | 'http' | 'verbose' | 'debug' | 'silly' | 'off'
+    /**
+     * Minimum level of logs outputted to a console
+     */
     console?: 'error' | 'warn' | 'info' | 'http' | 'verbose' | 'debug' | 'silly' | 'off'
+    /**
+     * Minimum level of logs sent to elasticsearch endpoint specified in `endpoints.elasticSearch`
+     */
     elastic?: 'error' | 'warn' | 'info' | 'http' | 'verbose' | 'debug' | 'silly' | 'off'
   }
+  /**
+   * Specifies node limits w.r.t. storage, outbound connections etc.
+   */
   limits: {
+    /**
+     * Maximum total size of all (cached) assets stored in `directories.assets`
+     */
     storage: string
+    /**
+     * Maximum number of concurrent downloads from the storage node(s)
+     */
     maxConcurrentStorageNodeDownloads: number
+    /**
+     * Maximum number of total simultaneous outbound connections to storage node(s)
+     */
     maxConcurrentOutboundConnections: number
+    /**
+     * Timeout for all outbound storage node http requests in miliseconds
+     */
     outboundRequestsTimeout: number
   }
+  /**
+   * Specifies how often periodic tasks (for example cache cleanup) are executed by the node.
+   */
+  intervals: {
+    /**
+     * How often, in seconds, will the cache state be saved in `directories.state`. Independently of the specified interval, the node will always try to save cache state before exiting.
+     */
+    saveCacheState: number
+    /**
+     * How often, in seconds, will the distributor node attempt to send requests to all current storage node endpoints in order to check how quickly they respond. The node will never make more than 10 such requests concurrently.
+     */
+    checkStorageNodeResponseTimes: number
+    /**
+     * How often, in seconds, will the distributor node fetch data about all its distribution obligations from the query node and remove all the no-longer assigned data objects from local storage and cache state
+     */
+    cacheCleanup: number
+  }
+  /**
+   * Distributor node http server port
+   */
   port: number
-  keys: [string, ...string[]]
-  buckets: [number, ...number[]] | 'all'
+  /**
+   * Specifies the keys available within distributor node CLI.
+   */
+  keys: [SubstrateUri | MnemonicPhrase | JSONBackupFile, ...(SubstrateUri | MnemonicPhrase | JSONBackupFile)[]]
+  /**
+   * Specifies the buckets distributed by the node
+   */
+  buckets: BucketIds | AllBuckets
+  /**
+   * ID of the node operator (distribution working group worker)
+   */
   workerId: number
 }
+/**
+ * Keypair's substrate uri (for example: //Alice)
+ */
+export interface SubstrateUri {
+  type?: 'ed25519' | 'sr25519' | 'ecdsa'
+  suri: string
+}
+/**
+ * Menomonic phrase
+ */
+export interface MnemonicPhrase {
+  type?: 'ed25519' | 'sr25519' | 'ecdsa'
+  mnemonic: string
+}
+/**
+ * Path to JSON backup file from polkadot signer / polakdot/apps (relative to config file path)
+ */
+export interface JSONBackupFile {
+  keyfile: string
+}

+ 13 - 4
distributor-node/src/types/generated/FamilyMetadataJson.d.ts

@@ -8,8 +8,17 @@
 export interface FamilyMetadataJson {
   region?: string
   description?: string
-  boundary?: {
-    latitude?: number
-    longitude?: number
-  }[]
+  areas?: (
+    | {
+        continentCode: 'AF' | 'AN' | 'AS' | 'EU' | 'NA' | 'OC' | 'SA'
+      }
+    | {
+        countryCode: string
+      }
+    | {
+        subdivisionCode: string
+      }
+    | {}
+  )[]
+  latencyTestTargets?: string[]
 }

+ 6 - 7
docker-compose.yml

@@ -59,22 +59,21 @@ services:
     environment:
       # JOYSTREAM_DISTRIBUTOR__ID: node-id
       JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE: http://${GRAPHQL_SERVER_HOST}:${GRAPHQL_SERVER_PORT}/graphql
-    #   JOYSTREAM_DISTRIBUTOR__ENDPOINTS__SUBSTRATE_NODE: sn-endpoint
+    #   JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS: sn-endpoint
     #   JOYSTREAM_DISTRIBUTOR__ENDPOINTS__ELASTIC_SEARCH: es-endpoint
-    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__DATA: data-dir
-    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__CACHE: cache-dir
+    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__ASSETS: assets-dir
+    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__CACHE_STATE: cache-state-dir
     #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__LOGS: logs-dir
     #   JOYSTREAM_DISTRIBUTOR__LOG__CONSOLE: "off"
     #   JOYSTREAM_DISTRIBUTOR__LOG__FILE: "off"
     #   JOYSTREAM_DISTRIBUTOR__LOG__ELASTIC: "off"
     #   JOYSTREAM_DISTRIBUTOR__LIMITS__STORAGE: 50G
     #   JOYSTREAM_DISTRIBUTOR__PORT: 1234
-    #   JOYSTREAM_DISTRIBUTOR__KEYS: "[\"//Bob\"]"
+    #   JOYSTREAM_DISTRIBUTOR__KEYS="[{\"suri\":\"//Bob\"}]"
     #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[1,2]"
     #   JOYSTREAM_DISTRIBUTOR__WORKER_ID: 0
-    command: ['start']
-      # enable ElasticSearch server
-      # - ELASTIC_SEARCH_HOST=host.docker.internal:9200
+    command: ["start"]
+
   db:
     image: postgres:12
     restart: always

+ 1 - 2
package.json

@@ -48,8 +48,7 @@
     "bn.js": "4.12.0",
     "rxjs": "^7.4.0",
     "typeorm": "0.2.34",
-    "pg": "^8.4.0",
-    "chalk": "^4.0.0"
+    "pg": "^8.4.0"
   },
   "devDependencies": {
     "eslint": "^7.25.0",

+ 2 - 2
storage-node-v2/scripts/run-all-commands.sh

@@ -25,7 +25,7 @@ ${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev
 ${CLI} leader:set-bucket-limits -i=${BUCKET_ID} -o=100 -s=10000000 --dev
 ${CLI} leader:update-bucket-status -i=${BUCKET_ID} --set on --dev
 ${CLI} leader:update-bag -a=${BUCKET_ID} -i static:council --dev
-${CLI} operator:set-metadata -w=0 -i=${BUCKET_ID} -e="http://localhost:3333/api/v1/" --dev
+${CLI} operator:set-metadata -w=0 -i=${BUCKET_ID} -e="http://localhost:3333" --dev
 
 # Create and delete a bucket
 BUCKET_ID=`${CLI} leader:create-bucket -a -n=100 -s=10000000  --dev` # bucketId = 1
@@ -44,4 +44,4 @@ ${CLI} leader:set-global-uploading-status --set on --dev
 ${CLI} leader:set-global-uploading-status --set off --dev
 
 # Blacklist.
-${CLI} leader:update-blacklist -a BLACKLISTED_CID -r SOME_CID --dev
+${CLI} leader:update-blacklist -a BLACKLISTED_CID -r SOME_CID --dev

+ 5 - 4
types/augment/all/defs.json

@@ -522,10 +522,11 @@
         "prize": "u128"
     },
     "Bag": {
-        "objects": "BTreeMap<DataObjectId,DataObject>",
-        "stored_by": "StorageBucketIdSet",
-        "distributed_by": "DistributionBucketIdSet",
-        "deletion_prize": "Option<u128>"
+        "stored_by": "BTreeSet<StorageBucketId>",
+        "distributed_by": "BTreeSet<DistributionBucketId>",
+        "deletion_prize": "Option<u128>",
+        "objects_total_size": "u64",
+        "objects_number": "u64"
     },
     "StorageBucket": {
         "operator_status": "StorageBucketOperatorStatus",

+ 4 - 3
types/augment/all/types.ts

@@ -154,10 +154,11 @@ export interface Backers extends Vec<Backer> {}
 
 /** @name Bag */
 export interface Bag extends Struct {
-  readonly objects: BTreeMap<DataObjectId, DataObject>;
-  readonly stored_by: StorageBucketIdSet;
-  readonly distributed_by: DistributionBucketIdSet;
+  readonly stored_by: BTreeSet<StorageBucketId>;
+  readonly distributed_by: BTreeSet<DistributionBucketId>;
   readonly deletion_prize: Option<u128>;
+  readonly objects_total_size: u64;
+  readonly objects_number: u64;
 }
 
 /** @name BagId */

+ 12 - 8
types/src/storage.ts

@@ -9,7 +9,9 @@ import {
   BTreeMap,
   Option,
   u32,
+  u128,
 } from '@polkadot/types'
+import { Balance } from '@polkadot/types/interfaces'
 import { RegistryTypes } from '@polkadot/types/types'
 import { JoyEnum, JoyStructDecorated, WorkingGroup, BalanceOf } from './common'
 import { MemberId } from './members'
@@ -68,18 +70,20 @@ export class DynamicBagDeletionPrize
 export class DynamicBagDeletionPrizeRecord extends DynamicBagDeletionPrize {}
 
 export type IBag = {
-  objects: BTreeMap<DataObjectId, DataObject>
-  stored_by: StorageBucketIdSet
-  distributed_by: DistributionBucketIdSet
-  deletion_prize: Option<BalanceOf>
+  stored_by: BTreeSet<StorageBucketId>
+  distributed_by: BTreeSet<DistributionBucketId>
+  deletion_prize: Option<Balance>
+  objects_total_size: u64
+  objects_number: u64
 }
 
 export class Bag
   extends JoyStructDecorated({
-    objects: BTreeMap.with(DataObjectId, DataObject),
-    stored_by: StorageBucketIdSet,
-    distributed_by: DistributionBucketIdSet,
-    deletion_prize: Option.with(BalanceOf),
+    stored_by: BTreeSet.with(StorageBucketId),
+    distributed_by: BTreeSet.with(DistributionBucketId),
+    deletion_prize: Option.with(u128),
+    objects_total_size: u64,
+    objects_number: u64,
   })
   implements IBag {}
 

File diff suppressed because it is too large
+ 472 - 33
yarn.lock


Some files were not shown because too many files changed in this diff