Browse Source

Merge branch 'giza_staging' into giza_staging_fix_mergeConflictsLatest

ignazio 3 years ago
parent
commit
be3a6ef1d6
100 changed files with 1675 additions and 1025 deletions
  1. 9 2
      .env
  2. 4 4
      .github/workflows/storage-node.yml
  3. 3 3
      Cargo.lock
  4. 1 1
      build-packages.sh
  5. 0 0
      chain-metadata.json
  6. 1 1
      cli/package.json
  7. 6 5
      colossus.Dockerfile
  8. 2 2
      devops/aws/deploy-infra.sh
  9. 1 1
      devops/aws/deploy-playground-playbook.yml
  10. 4 3
      devops/aws/group_vars/all
  11. 5 5
      devops/aws/roles/common/tasks/chain-spec-node-keys.yml
  12. 1 0
      devops/aws/roles/rpc/tasks/main.yml
  13. 1 0
      devops/aws/roles/validators/tasks/main.yml
  14. 5 5
      devops/aws/templates/Playground-Caddyfile.j2
  15. 2 0
      devops/kubernetes/.gitignore
  16. 157 0
      devops/kubernetes/HOWTO.md
  17. 5 0
      devops/kubernetes/argus/.gitignore
  18. 35 0
      devops/kubernetes/argus/Pulumi.yaml
  19. 123 0
      devops/kubernetes/argus/README.md
  20. 5 0
      devops/kubernetes/argus/docker_dummy/Dockerfile
  21. 229 0
      devops/kubernetes/argus/index.ts
  22. 15 0
      devops/kubernetes/argus/package.json
  23. 18 0
      devops/kubernetes/argus/tsconfig.json
  24. 16 0
      devops/kubernetes/kind-cluster.yaml
  25. 2 43
      devops/kubernetes/node-network/index.ts
  26. 18 0
      devops/kubernetes/node-network/ingress.yaml
  27. 29 0
      devops/kubernetes/pulumi-common/configMap.ts
  28. 2 0
      devops/kubernetes/pulumi-common/index.ts
  29. 43 0
      devops/kubernetes/pulumi-common/volume.ts
  30. 9 0
      devops/kubernetes/query-node/Pulumi.yaml
  31. 2 4
      devops/kubernetes/query-node/README.md
  32. 1 1
      devops/kubernetes/query-node/configMap.ts
  33. 22 18
      devops/kubernetes/query-node/index.ts
  34. 32 61
      devops/kubernetes/query-node/indexerDeployment.ts
  35. 27 0
      devops/kubernetes/query-node/ingress.yaml
  36. 46 19
      devops/kubernetes/query-node/processorDeployment.ts
  37. 16 12
      devops/kubernetes/storage-node/Pulumi.yaml
  38. 11 9
      devops/kubernetes/storage-node/README.md
  39. 5 0
      devops/kubernetes/storage-node/docker_dummy/Dockerfile
  40. 120 111
      devops/kubernetes/storage-node/index.ts
  41. 2 2
      distributor-node/config.yml
  42. 4 4
      distributor-node/docs/api/operator/index.md
  43. 3 3
      distributor-node/docs/api/public/index.md
  44. 2 0
      distributor-node/docs/commands/dev.md
  45. 11 23
      distributor-node/docs/commands/leader.md
  46. 7 1
      distributor-node/docs/commands/node.md
  47. 2 6
      distributor-node/docs/commands/operator.md
  48. 0 7
      distributor-node/docs/schema/definition-properties-bucket-ids-items.md
  49. 13 0
      distributor-node/docs/schema/definition-properties-distributed-buckets-ids-items.md
  50. 1 1
      distributor-node/docs/schema/definition-properties-distributed-buckets-ids.md
  51. 1 1
      distributor-node/docs/schema/definition-properties-logs-properties-file-logging-options.md
  52. 17 17
      distributor-node/docs/schema/definition.md
  53. 3 3
      distributor-node/package.json
  54. 7 6
      distributor-node/scripts/init-bucket.sh
  55. 37 21
      distributor-node/scripts/test-commands.sh
  56. 2 2
      distributor-node/src/api-spec/operator.yml
  57. 2 2
      distributor-node/src/api-spec/public.yml
  58. 9 0
      distributor-node/src/command-base/default.ts
  59. 7 29
      distributor-node/src/commands/dev/batchUpload.ts
  60. 7 11
      distributor-node/src/commands/leader/cancel-invitation.ts
  61. 5 2
      distributor-node/src/commands/leader/create-bucket.ts
  62. 1 1
      distributor-node/src/commands/leader/delete-bucket-family.ts
  63. 5 13
      distributor-node/src/commands/leader/delete-bucket.ts
  64. 7 11
      distributor-node/src/commands/leader/invite-bucket-operator.ts
  65. 7 11
      distributor-node/src/commands/leader/remove-bucket-operator.ts
  66. 4 1
      distributor-node/src/commands/leader/set-bucket-family-metadata.ts
  67. 1 1
      distributor-node/src/commands/leader/set-buckets-per-bag-limit.ts
  68. 10 12
      distributor-node/src/commands/leader/update-bag.ts
  69. 4 11
      distributor-node/src/commands/leader/update-bucket-mode.ts
  70. 5 13
      distributor-node/src/commands/leader/update-bucket-status.ts
  71. 6 2
      distributor-node/src/commands/leader/update-dynamic-bag-policy.ts
  72. 12 6
      distributor-node/src/commands/node/set-buckets.ts
  73. 0 4
      distributor-node/src/commands/node/shutdown.ts
  74. 0 4
      distributor-node/src/commands/node/start-public-api.ts
  75. 0 4
      distributor-node/src/commands/node/stop-public-api.ts
  76. 7 11
      distributor-node/src/commands/operator/accept-invitation.ts
  77. 7 11
      distributor-node/src/commands/operator/set-metadata.ts
  78. 8 4
      distributor-node/src/schemas/configSchema.ts
  79. 11 3
      distributor-node/src/services/cache/StateCacheService.ts
  80. 3 2
      distributor-node/src/services/content/ContentService.ts
  81. 1 1
      distributor-node/src/services/content/FileContinousReadStream.ts
  82. 14 8
      distributor-node/src/services/httpApi/controllers/public.ts
  83. 1 0
      distributor-node/src/services/logging/LoggingService.ts
  84. 30 18
      distributor-node/src/services/networking/PendingDownload.ts
  85. 22 11
      distributor-node/src/services/networking/query-node/generated/schema.ts
  86. 2 2
      distributor-node/src/services/networking/storage-node/api.ts
  87. 26 110
      distributor-node/src/services/networking/storage-node/generated/api.ts
  88. 21 0
      distributor-node/src/services/parsers/BucketIdParserService.ts
  89. 4 4
      distributor-node/src/types/generated/ConfigJson.d.ts
  90. 1 1
      distributor-node/src/types/generated/OperatorApi.ts
  91. 1 5
      distributor-node/src/types/generated/PublicApi.ts
  92. 22 13
      docker-compose.yml
  93. 5 1
      metadata-protobuf/package.json
  94. 1 1
      node/Cargo.toml
  95. 3 2
      package.json
  96. 0 3
      query-node/build.sh
  97. 2 3
      query-node/codegen/package.json
  98. 242 240
      query-node/codegen/yarn.lock
  99. 4 6
      query-node/mappings/package.json
  100. 0 20
      query-node/mappings/scripts/postCodegen.ts

+ 9 - 2
.env

@@ -24,6 +24,9 @@ BLOCK_HEIGHT=0
 # Query node GraphQL server port
 GRAPHQL_SERVER_PORT=8081
 
+# Query node playground subscription endpoint
+GRAPHQL_PLAYGROUND_SUBSCRIPTION_ENDPOINT=ws://localhost:8081/graphql
+
 # Hydra indexer gateway GraphQL server port
 HYDRA_INDEXER_GATEWAY_PORT=4000
 
@@ -44,13 +47,17 @@ PROCESSOR_INDEXER_GATEWAY=http://hydra-indexer-gateway:${HYDRA_INDEXER_GATEWAY_P
 
 # Colossus services identities
 COLOSSUS_1_WORKER_ID=0
-COLOSSUS_1_ACCOUNT_URI=//testing//worker//Storage//${COLOSSUS_1_WORKER_ID}
+COLOSSUS_1_WORKER_URI=//testing//worker//Storage//${COLOSSUS_1_WORKER_ID}
+COLOSSUS_1_TRANSACTOR_URI=//Colossus1
+
 COLOSSUS_2_WORKER_ID=1
-COLOSSUS_2_ACCOUNT_URI=//testing//worker//Storage//${COLOSSUS_2_WORKER_ID}
+COLOSSUS_2_WORKER_URI=//testing//worker//Storage//${COLOSSUS_2_WORKER_ID}
+COLOSSUS_2_TRANSACTOR_URI=//Colossus2
 
 # Distributor node services identities
 DISTRIBUTOR_1_WORKER_ID=0
 DISTRIBUTOR_1_ACCOUNT_URI=//testing//worker//Distribution//${DISTRIBUTOR_1_WORKER_ID}
+
 DISTRIBUTOR_2_WORKER_ID=1
 DISTRIBUTOR_2_ACCOUNT_URI=//testing//worker//Distribution//${DISTRIBUTOR_2_WORKER_ID}
 

+ 4 - 4
.github/workflows/storage-node.yml

@@ -19,8 +19,8 @@ jobs:
         yarn install --frozen-lockfile
         yarn workspace @joystream/types build
         yarn workspace @joystream/metadata-protobuf build
-        yarn workspace storage-node-v2 lint --quiet
-        yarn workspace storage-node-v2 build
+        yarn workspace storage-node lint --quiet
+        yarn workspace storage-node build
 
   storage_node_build_osx:
     name: MacOS Checks
@@ -39,5 +39,5 @@ jobs:
         yarn install --frozen-lockfile --network-timeout 120000
         yarn workspace @joystream/types build
         yarn workspace @joystream/metadata-protobuf build
-        yarn workspace storage-node-v2 lint --quiet
-        yarn workspace storage-node-v2 build
+        yarn workspace storage-node lint --quiet
+        yarn workspace storage-node build

+ 3 - 3
Cargo.lock

@@ -731,7 +731,7 @@ dependencies = [
 
 [[package]]
 name = "chain-spec-builder"
-version = "3.1.1"
+version = "3.3.0"
 dependencies = [
  "ansi_term 0.12.1",
  "enum-utils",
@@ -2332,7 +2332,7 @@ dependencies = [
 
 [[package]]
 name = "joystream-node"
-version = "5.9.0"
+version = "5.13.0"
 dependencies = [
  "frame-benchmarking",
  "frame-benchmarking-cli",
@@ -2393,7 +2393,7 @@ dependencies = [
 
 [[package]]
 name = "joystream-node-runtime"
-version = "9.11.0"
+version = "9.13.0"
 dependencies = [
  "frame-benchmarking",
  "frame-executive",

+ 1 - 1
build-packages.sh

@@ -7,6 +7,6 @@ yarn workspace @joystream/types build
 yarn workspace @joystream/metadata-protobuf build
 yarn workspace query-node-root build
 yarn workspace @joystream/cli build
-yarn workspace storage-node-v2 build
+yarn workspace storage-node build
 yarn workspace @joystream/distributor-cli build
 yarn workspace pioneer build

File diff suppressed because it is too large
+ 0 - 0
chain-metadata.json


+ 1 - 1
cli/package.json

@@ -11,7 +11,7 @@
     "@apidevtools/json-schema-ref-parser": "^9.0.6",
     "@ffprobe-installer/ffprobe": "^1.1.0",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.0",
+    "@joystream/types": "^0.17.1",
     "@oclif/command": "^1.5.19",
     "@oclif/config": "^1.14.0",
     "@oclif/plugin-autocomplete": "^0.2.0",

+ 6 - 5
colossus.Dockerfile

@@ -7,10 +7,10 @@ RUN yarn --frozen-lockfile
 
 RUN yarn workspace @joystream/types build
 RUN yarn workspace @joystream/metadata-protobuf build
-RUN yarn workspace storage-node-v2 build
+RUN yarn workspace storage-node build
 
 # Use these volumes to persist uploading data and to pass the keyfile.
-VOLUME ["/data", "/keystore"]
+VOLUME ["/data", "/keystore", "/logs"]
 
 # Required variables
 ENV WS_PROVIDER_ENDPOINT_URI=ws://not-set
@@ -31,10 +31,11 @@ ENV ACCOUNT_URI=
 # Colossus node port
 EXPOSE ${COLOSSUS_PORT}
 
-WORKDIR /joystream/storage-node-v2
+WORKDIR /joystream/storage-node
 ENTRYPOINT yarn storage-node server --queryNodeEndpoint ${QUERY_NODE_ENDPOINT} \
     --port ${COLOSSUS_PORT} --uploads /data  \
     --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=${SYNC_INTERVAL} \
-    --elasticSearchHost=${ELASTIC_SEARCH_HOST} \
+    --elasticSearchEndpoint=${ELASTIC_SEARCH_ENDPOINT} \
     --accountUri=${ACCOUNT_URI} \
-    --worker ${WORKER_ID}
+    --worker ${WORKER_ID} \
+    --logFilePath=/logs

+ 2 - 2
devops/aws/deploy-infra.sh

@@ -75,7 +75,7 @@ if [ $? -eq 0 ]; then
   then
     echo -e "\n\n=========== Compile joystream-node on build server ==========="
     ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH build-code.yml \
-      --extra-vars "branch_name=$BRANCH_NAME git_repo=$GIT_REPO build_local_code=$BUILD_LOCAL_CODE data_path=data-$NEW_STACK_NAME"
+      --extra-vars "branch_name=$BRANCH_NAME git_repo=$GIT_REPO build_local_code=$BUILD_LOCAL_CODE data_path=$DATA_PATH"
 
     echo -e "\n\n=========== Install additional utils on build server ==========="
     ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH setup-admin.yml
@@ -84,7 +84,7 @@ if [ $? -eq 0 ]; then
   echo -e "\n\n=========== Configure and start new validators, rpc node and pioneer ==========="
   ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH chain-spec-pioneer.yml \
     --extra-vars "local_dir=$LOCAL_CODE_PATH network_suffix=$NETWORK_SUFFIX
-                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS
+                  data_path=$DATA_PATH bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS
                   deployment_type=$DEPLOYMENT_TYPE initial_balances_file=$INITIAL_BALANCES_PATH initial_members_file=$INITIAL_MEMBERS_PATH"
 
   echo -e "\n\n Pioneer URL: https://$DOMAIN_NAME"

+ 1 - 1
devops/aws/deploy-playground-playbook.yml

@@ -97,7 +97,7 @@
       debug:
         msg:
           - 'The services should now be accesible at:'
-          - 'Pioneer: https://{{ nip_domain }}/pioneer/'
+          - 'Pioneer: https://{{ nip_domain }}/'
           - 'WebSocket RPC: wss://{{ nip_domain }}/ws-rpc'
           - 'HTTP RPC: https://{{ nip_domain }}/http-rpc'
           - 'Colossus: https://{{ nip_domain }}/colossus-1'

+ 4 - 3
devops/aws/group_vars/all

@@ -9,9 +9,10 @@ local_dir: ~/Joystream/joystream
 # Generates random number between 1000..9999
 network_suffix: "{{ 10000 | random(1000) }}"
 
-data_path: ./data
-chain_spec_path: "{{ data_path }}/chainspec.json"
-raw_chain_spec_path: "{{ data_path }}/chainspec-raw.json"
+remote_data_path: ./data
+chain_spec_path: "{{ remote_data_path }}/chainspec.json"
+raw_chain_spec_path: "{{ remote_data_path }}/chainspec-raw.json"
+local_raw_chain_spec_path: "{{ data_path }}/chainspec-raw.json"
 remote_code_path: "/home/ubuntu/joystream"
 remote_chain_spec_path: "{{ remote_code_path }}/chainspec.json"
 run_on_admin_server: true

+ 5 - 5
devops/aws/roles/common/tasks/chain-spec-node-keys.yml

@@ -3,7 +3,7 @@
 
 - name: Debug to test variable
   debug:
-    msg: 'Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}'
+    msg: 'Remote Data path: {{ remote_data_path }}, Local Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}'
   run_once: true
 
 - name: Copying initial members file to the server
@@ -24,7 +24,7 @@
   shell: >
     {{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }}
     --chain-spec-path {{ chain_spec_path }}
-    --endowed 1 --keystore-path {{ data_path }}
+    --endowed 1 --keystore-path {{ remote_data_path }}
     {% if deployment_type is defined and deployment_type|length > 0 %}--deployment {{ deployment_type }}{% endif %}
     {% if initial_members_file is defined and initial_members_file|length > 0 %}--initial-balances-path {{ admin_code_dir }}/initial-balances.json{% endif %}
     {% if initial_balances_file is defined and initial_balances_file|length > 0 %}--initial-members-path {{ admin_code_dir }}/initial-members.json{% endif %}
@@ -50,7 +50,7 @@
 - name: Save output of chain spec to local file
   copy:
     content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
-    dest: '{{ data_path }}/chain_spec_output.txt'
+    dest: '{{ remote_data_path }}/chain_spec_output.txt'
   delegate_to: '{{ local_or_admin }}'
   run_once: true
 
@@ -75,7 +75,7 @@
 
 - name: Copying chain spec files to localhost
   synchronize:
-    src: '/home/ubuntu/{{ data_path }}/'
+    src: '/home/ubuntu/{{ remote_data_path }}/'
     dest: '{{ data_path }}'
     mode: pull
   run_once: true
@@ -92,5 +92,5 @@
 
 - name: Copying raw chain spec file to all servers
   copy:
-    src: '{{ raw_chain_spec_path }}'
+    src: '{{ local_raw_chain_spec_path }}'
     dest: '{{ remote_chain_spec_path }}'

+ 1 - 0
devops/aws/roles/rpc/tasks/main.yml

@@ -19,6 +19,7 @@
   service:
     name: joystream-node
     state: started
+    enabled: yes
   become: yes
 
 - name: Set websocket and http endpoint variables

+ 1 - 0
devops/aws/roles/validators/tasks/main.yml

@@ -42,4 +42,5 @@
   service:
     name: joystream-node
     state: started
+    enabled: yes
   become: yes

+ 5 - 5
devops/aws/templates/Playground-Caddyfile.j2

@@ -8,11 +8,6 @@
     reverse_proxy localhost:9933
 }
 
-{{ nip_domain }}/pioneer* {
-    uri strip_prefix /pioneer
-    reverse_proxy localhost:3000
-}
-
 {{ nip_domain }}/colossus-1* {
     uri strip_prefix /colossus-1
     reverse_proxy localhost:3333
@@ -47,3 +42,8 @@
     uri strip_prefix /member-faucet
     reverse_proxy localhost:3002
 }
+
+# Pioneer
+{{ nip_domain }}/* {
+    reverse_proxy localhost:3000
+}

+ 2 - 0
devops/kubernetes/.gitignore

@@ -0,0 +1,2 @@
+kubeconfig
+

+ 157 - 0
devops/kubernetes/HOWTO.md

@@ -0,0 +1,157 @@
+### Requirements (in addition to nodejs and npm)
+- kind     - https://kind.sigs.k8s.io/docs/user/quick-start/#installation
+- minikube - https://minikube.sigs.k8s.io/docs/start/
+- pulumi   - https://www.pulumi.com/docs/get-started/install/
+- kubectl  - https://kubernetes.io/docs/tasks/tools/#kubectl 
+
+Note that minikube works better in Mac Docker Desktop. Linux is recommended for best results.
+
+# Minikube
+
+### create a minikube cluster
+minikube start
+
+### deploy nginx ingress controller
+minikube addons enable ingress
+
+### check deployment succeeded
+kubectl get pods --all-namespaces -l app=ingress-nginx
+
+### load required docker images
+minikube load image joystream/node:giza --daemon 
+
+### deploy node-network or query-node pulumi stack..
+```
+pulumi up ...
+# get the namespace name of the deployed stack
+NAMESPACE_NAME=$(pulumi stack output namespaceName)
+```
+
+### deploy ingress for query-node
+
+```
+kubectl apply -f query-node/ingress.yaml --namespace $NAMESPACE_NAME
+```
+
+### deploy ingress for node-network
+
+```
+kubectl apply -f node-network/ingress.yaml --namespace $NAMESPACE_NAME
+```
+
+### 
+In a separate terminal run the tunnel service, which will attempt to listen on port 80:
+
+```
+minikube tunnel
+```
+
+You could run it in the background with the "&" ampersand
+
+```
+minikube tunnel &
+```
+
+With ingress deployed you will be able to access the exposed services on:
+
+```
+ws://localhost/ws-rpc
+http://localhost/indexer/graphql
+http://localhost/server/graphql
+```
+
+kill the background job by its given number (assuming job number 1)
+
+```
+kill %1
+```
+
+### Destroying stack and cluster
+
+```
+pulumi destroy
+minikube stop
+minikube delete
+```
+
+# Kind
+
+### Create a 'kind' cluster
+Our cluster configuration will try to listen on port 80 so there cannot be another service running on that port.
+
+```
+# optionally save cluster config in specific location,
+# remember to set this again when using kubectl command to access the cluster.
+export KUBECONFIG=$(PWD)/kubeconfig
+
+# create the cluster with custom configuration
+kind create cluster --config ./kind-cluster.yaml --name joystream
+```
+
+```
+# confirm current context is set to newly created cluster
+kubectl config current-context
+# you should see:
+kind-joystream
+
+# if you have switched to a different context you can switch back to the cluster with:
+kubectl config set current-context kind-joystream
+
+# list cluster nodes
+kubectl get nodes
+
+# get cluster details
+kubectl cluster-info
+```
+
+### Deploy nginx ingress controller
+
+```
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
+```
+
+### Wait for controller to be ready
+
+```
+kubectl wait --namespace ingress-nginx \
+  --for=condition=ready pod \
+  --selector=app.kubernetes.io/component=controller \
+  --timeout=90s
+```
+
+### Load the required docker images
+Load docker images for the stacks. Do not use `:latest` tag otherwise it will be pulled from docker hub.
+
+```
+kind load docker-image joystream/node:giza 
+kind load docker-image joystream/apps:giza
+```
+
+### Deploy a pulumi stack..
+For more details see the README files for each stack.
+```
+pulumi up  ...
+# get the namespace name for the stack that was deployed
+NAMESPACE_NAME=$(pulumi stack output namespaceName)
+```
+
+### Deploy ingress (node network)
+kubectl apply -f node-network/ingress.yaml --namespace $NAMESPACE_NAME
+
+### Deploy ingress (query node)
+kubectl apply -f query-node/ingress.yaml --namespace $NAMESPACE_NAME
+
+With ingress deployed you will be able to access the exposed services on:
+
+```
+ws://localhost/ws-rpc
+http://localhost/indexer/graphql
+http://localhost/server/graphql
+```
+
+### Destroy the stack and cluster
+
+```
+pulumi destroy
+kind delete cluster
+```

+ 5 - 0
devops/kubernetes/argus/.gitignore

@@ -0,0 +1,5 @@
+/bin/
+/node_modules/
+kubeconfig*
+package-lock.json
+Pulumi.*.yaml

+ 35 - 0
devops/kubernetes/argus/Pulumi.yaml

@@ -0,0 +1,35 @@
+name: argus
+runtime: nodejs
+description: A Pulumi program to deploy Argus node
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    queryNodeHost:
+      description: Query node GraphQL endpoint
+      default: 'https://hydra.joystream.org/graphql'
+    wsProviderEndpointURI:
+      description: Chain RPC endpoint
+      default: 'wss://rome-rpc-endpoint.joystream.org:9944/'
+    argusImage:
+      description: The distributor node image to use for running the node
+    keys:
+      description: Specifies the keys available within distributor node CLI
+    buckets:
+      description: Specifies the buckets distributed by the node
+    workerId:
+      description: ID of the node operator (distribution working group worker)
+    dataStorage:
+      description: Amount of storage (in Gi) assigned for the data directory
+      default: 10
+    logStorage:
+      description: Amount of storage (in Gi) assigned for the logs directory
+      default: 2
+    cacheStorage:
+      description: Amount of storage (in Gi) assigned for the cache directory
+      default: 10

+ 123 - 0
devops/kubernetes/argus/README.md

@@ -0,0 +1,123 @@
+# Argus deployment on Minikube or EKS
+
+This project deploys an Argus node on an EKS or a minikube cluster
+
+## Deploying the App
+
+To deploy your infrastructure, follow the below steps.
+
+### Prerequisites
+
+1. [Install Pulumi](https://www.pulumi.com/docs/get-started/install/)
+1. [Install Node.js](https://nodejs.org/en/download/)
+1. Install a package manager for Node.js, such as [npm](https://www.npmjs.com/get-npm) or [Yarn](https://yarnpkg.com/en/docs/install).
+1. [Configure AWS Credentials](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/)
+1. Optional (for debugging): [Install kubectl](https://kubernetes.io/docs/tasks/tools/)
+
+### Steps
+
+After cloning this repo, from this working directory, run these commands:
+
+1. Install the required Node.js packages:
+
+   This installs the dependent packages [needed](https://www.pulumi.com/docs/intro/concepts/how-pulumi-works/) for our Pulumi program.
+
+   ```bash
+   $ npm install
+   ```
+
+1. Create a new stack, which is an isolated deployment target for this example:
+
+   This will initialize the Pulumi program in TypeScript.
+
+   ```bash
+   $ pulumi stack init
+   ```
+
+1. Set the required configuration variables in `Pulumi.<stack>.yaml`
+
+   ```bash
+   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
+    --plaintext queryNodeHost='https://34.197.252.42.nip.io/server/graphql' --plaintext isMinikube=true \
+    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
+    --plaintext argusImage='joystream/distributor-node:latest' \
+    --plaintext keys='[{ "suri": "//Alice" }]' --plaintext buckets='["1:0","1:1"]' --plaintext workerId=0
+   ```
+
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
+
+   ```bash
+   $ pulumi config set isMinikube false
+   ```
+
+1. Stand up the EKS cluster:
+
+   Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
+   new EKS cluster takes between 10-15 minutes.
+
+1. If you are using Minikube, run `minikube service argus-node -n $(pulumi stack output namespaceName)`
+
+   This will setup a proxy for your `argus-node` service, which can then be accessed at
+   the URL given in the output
+
+1. Once the stack if up and running, we will modify the Caddy config to get SSL certificate for the load balancer
+
+   Modify the config variable `isLoadBalancerReady`
+
+   ```bash
+   $ pulumi config set isLoadBalancerReady true
+   ```
+
+   Run `pulumi up -y` to update the Caddy config
+
+1. Access the Kubernetes Cluster using `kubectl`
+
+   To access your new Kubernetes cluster using `kubectl`, we need to set up the
+   `kubeconfig` file and download `kubectl`. We can leverage the Pulumi
+   stack output in the CLI, as Pulumi facilitates exporting these objects for us.
+
+   ```bash
+   $ pulumi stack output kubeconfig --show-secrets > kubeconfig
+   $ export KUBECONFIG=$PWD/kubeconfig
+   $ kubectl get nodes
+   ```
+
+   We can also use the stack output to query the cluster for our newly created Deployment:
+
+   ```bash
+   $ kubectl get deployment $(pulumi stack output deploymentName) --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get service $(pulumi stack output serviceName) --namespace=$(pulumi stack output namespaceName)
+   ```
+
+   To get logs
+
+   ```bash
+   $ kubectl config set-context --current --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get pods
+   $ kubectl logs <PODNAME> --all-containers
+   ```
+
+   To run a command on a pod
+
+   ```bash
+   $ kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1}
+   ```
+
+   To see complete pulumi stack output
+
+   ```bash
+   $ pulumi stack output
+   ```
+
+   To execute a command
+
+   ```bash
+   $ kubectl exec --stdin --tty <PODNAME> -c colossus -- /bin/bash
+   ```
+
+1. Once you've finished experimenting, tear down your stack's resources by destroying and removing it:
+
+   ```bash
+   $ pulumi destroy --yes
+   $ pulumi stack rm --yes
+   ```

+ 5 - 0
devops/kubernetes/argus/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

+ 229 - 0
devops/kubernetes/argus/index.ts

@@ -0,0 +1,229 @@
+import * as awsx from '@pulumi/awsx'
+import * as aws from '@pulumi/aws'
+import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { CaddyServiceDeployment, CustomPersistentVolume } from 'pulumi-common'
+
+const awsConfig = new pulumi.Config('aws')
+const config = new pulumi.Config()
+
+const queryNodeHost = config.require('queryNodeHost')
+const wsProviderEndpointURI = config.require('wsProviderEndpointURI')
+const configArgusImage = config.require('argusImage')
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+const keys = config.require('keys')
+const buckets = config.require('buckets')
+const workerId = config.require('workerId')
+const name = 'argus-node'
+const isMinikube = config.getBoolean('isMinikube')
+const dataStorage = config.getNumber('dataStorage') || 10
+const logStorage = config.getNumber('logStorage') || 2
+const cacheStorage = config.getNumber('cacheStorage') || 10
+
+export let kubeconfig: pulumi.Output<any>
+export let argusImage: pulumi.Output<string> = pulumi.interpolate`${configArgusImage}`
+let provider: k8s.Provider
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('argus-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-argus-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    desiredCapacity: 2,
+    maxSize: 2,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+
+  // Create a repository
+  const repo = new awsx.ecr.Repository('distributor-node')
+
+  // Build an image and publish it to our ECR repository.
+  argusImage = repo.buildAndPushImage({
+    context: './docker_dummy',
+    dockerfile: './docker_dummy/Dockerfile',
+    args: { SOURCE_IMAGE: argusImage! },
+  })
+
+  // Uncomment the below line to use an existing image
+  // argusImage = pulumi.interpolate`ahhda/distributor-node:latest`
+}
+
+const resourceOptions = { provider: provider }
+
+// Create a Kubernetes Namespace
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const appLabels = { appClass: name }
+
+const dataPVC = new CustomPersistentVolume(
+  'data',
+  { namespaceName: namespaceName, storage: dataStorage },
+  resourceOptions
+)
+const logsPVC = new CustomPersistentVolume(
+  'logs',
+  { namespaceName: namespaceName, storage: logStorage },
+  resourceOptions
+)
+const cachePVC = new CustomPersistentVolume(
+  'cache',
+  { namespaceName: namespaceName, storage: cacheStorage },
+  resourceOptions
+)
+
+// Create a Deployment
+const deployment = new k8s.apps.v1.Deployment(
+  name,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'argus',
+              image: argusImage,
+              imagePullPolicy: 'IfNotPresent',
+              workingDir: '/joystream/distributor-node',
+              env: [
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE',
+                  value: queryNodeHost,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS',
+                  value: wsProviderEndpointURI,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__KEYS',
+                  value: keys,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__BUCKETS',
+                  value: buckets,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__WORKER_ID',
+                  value: workerId,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__PORT',
+                  value: '3334',
+                },
+              ],
+              args: ['start'],
+              ports: [{ containerPort: 3334 }],
+              volumeMounts: [
+                {
+                  name: 'data',
+                  mountPath: '/data',
+                  subPath: 'data',
+                },
+                {
+                  name: 'logs',
+                  mountPath: '/logs',
+                  subPath: 'logs',
+                },
+                {
+                  name: 'cache',
+                  mountPath: '/cache',
+                  subPath: 'cache',
+                },
+              ],
+            },
+          ],
+          volumes: [
+            {
+              name: 'data',
+              persistentVolumeClaim: {
+                claimName: dataPVC.pvc.metadata.name,
+              },
+            },
+            {
+              name: 'logs',
+              persistentVolumeClaim: {
+                claimName: logsPVC.pvc.metadata.name,
+              },
+            },
+            {
+              name: 'cache',
+              persistentVolumeClaim: {
+                claimName: cachePVC.pvc.metadata.name,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  resourceOptions
+)
+
+// Create a LoadBalancer Service for the Deployment
+const service = new k8s.core.v1.Service(
+  name,
+  {
+    metadata: {
+      labels: appLabels,
+      namespace: namespaceName,
+      name: name,
+    },
+    spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
+      ports: [{ name: 'port-1', port: 3334 }],
+      selector: appLabels,
+    },
+  },
+  resourceOptions
+)
+
+// Export the Service name
+export const serviceName = service.metadata.name
+
+// Export the Deployment name
+export const deploymentName = deployment.metadata.name
+
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
+
+const caddyEndpoints = [
+  ` {
+    reverse_proxy ${name}:3334
+}`,
+]
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 15 - 0
devops/kubernetes/argus/package.json

@@ -0,0 +1,15 @@
+{
+  "name": "eks-cluster",
+  "devDependencies": {
+    "@types/node": "^10.0.0"
+  },
+  "dependencies": {
+    "@pulumi/aws": "^4.0.0",
+    "@pulumi/awsx": "^0.30.0",
+    "@pulumi/eks": "^0.31.0",
+    "@pulumi/kubernetes": "^3.0.0",
+    "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
+    "pulumi-common": "file:../pulumi-common"
+  }
+}

+ 18 - 0
devops/kubernetes/argus/tsconfig.json

@@ -0,0 +1,18 @@
+{
+    "compilerOptions": {
+        "strict": true,
+        "outDir": "bin",
+        "target": "es2016",
+        "module": "commonjs",
+        "moduleResolution": "node",
+        "sourceMap": true,
+        "experimentalDecorators": true,
+        "pretty": true,
+        "noFallthroughCasesInSwitch": true,
+        "noImplicitReturns": true,
+        "forceConsistentCasingInFileNames": true
+    },
+    "files": [
+        "index.ts"
+    ]
+}

+ 16 - 0
devops/kubernetes/kind-cluster.yaml

@@ -0,0 +1,16 @@
+# single node kind cluster mapping port 80 to an ingress controller
+# that is deployed separately
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+  kubeadmConfigPatches:
+  - |
+    kind: InitConfiguration
+    nodeRegistration:
+      kubeletExtraArgs:
+        node-labels: "ingress-ready=true"
+  extraPortMappings:
+  - containerPort: 80
+    hostPort: 80
+    protocol: TCP

+ 2 - 43
devops/kubernetes/node-network/index.ts

@@ -60,50 +60,9 @@ const nodeImage = config.get('nodeImage') || 'joystream/node:latest'
 const encryptKey = config.get('encryptionKey') || '1234'
 
 const subkeyContainers = getSubkeyContainers(numberOfValidators, chainDataPath)
-let pvcClaimName: pulumi.Output<any>
 
-if (isMinikube) {
-  const pvc = new k8s.core.v1.PersistentVolumeClaim(
-    `${name}-pvc`,
-    {
-      metadata: {
-        labels: appLabels,
-        namespace: namespaceName,
-        name: `${name}-pvc`,
-      },
-      spec: {
-        accessModes: ['ReadWriteMany'],
-        resources: {
-          requests: {
-            storage: `1Gi`,
-          },
-        },
-      },
-    },
-    resourceOptions
-  )
-
-  const pv = new k8s.core.v1.PersistentVolume(`${name}-pv`, {
-    metadata: {
-      labels: { ...appLabels, type: 'local' },
-      namespace: namespaceName,
-      name: `${name}-pv`,
-    },
-    spec: {
-      accessModes: ['ReadWriteMany'],
-      capacity: {
-        storage: `1Gi`,
-      },
-      hostPath: {
-        path: '/mnt/data/',
-      },
-    },
-  })
-  pvcClaimName = pvc.metadata.apply((m) => m.name)
-} else {
-  const nfsVolume = new NFSServiceDeployment('nfs-server', { namespace: namespaceName }, resourceOptions)
-  pvcClaimName = nfsVolume.pvc.metadata.apply((m) => m.name)
-}
+const nfsVolume = new NFSServiceDeployment('nfs-server', { namespace: namespaceName }, resourceOptions)
+const pvcClaimName = nfsVolume.pvc.metadata.apply((m) => m.name)
 
 const jsonModifyConfig = new configMapFromFile(
   'json-modify-config',

+ 18 - 0
devops/kubernetes/node-network/ingress.yaml

@@ -0,0 +1,18 @@
+# deploy this ingress with kubectl:
+# kubectl apply -f ingress.yaml --namespace NAMESPACE_NAME
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: ws-rpc-ingress
+spec:
+  rules:
+  - http:
+      paths:
+      # rpc node websocket endpoint
+      - pathType: Prefix
+        path: /ws-rpc
+        backend:
+          service:
+            name: node-network
+            port:
+              name: port-1

+ 29 - 0
devops/kubernetes/pulumi-common/configMap.ts

@@ -0,0 +1,29 @@
+import * as pulumi from '@pulumi/pulumi'
+import * as k8s from '@pulumi/kubernetes'
+import * as fs from 'fs'
+
+export class configMapFromFile extends pulumi.ComponentResource {
+  public readonly configName?: pulumi.Output<string>
+
+  constructor(name: string, args: ConfigMapArgs, opts: pulumi.ComponentResourceOptions = {}) {
+    super('pkg:query-node:configMap', name, {}, opts)
+
+    this.configName = new k8s.core.v1.ConfigMap(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+        },
+        data: {
+          'fileData': fs.readFileSync(args.filePath).toString(),
+        },
+      },
+      opts
+    ).metadata.apply((m) => m.name)
+  }
+}
+
+export interface ConfigMapArgs {
+  filePath: string
+  namespaceName: pulumi.Output<string>
+}

+ 2 - 0
devops/kubernetes/pulumi-common/index.ts

@@ -1,2 +1,4 @@
 export { CaddyServiceDeployment } from './caddy'
 export { PostgresServiceDeployment } from './database'
+export { configMapFromFile } from './configMap'
+export { CustomPersistentVolume } from './volume'

+ 43 - 0
devops/kubernetes/pulumi-common/volume.ts

@@ -0,0 +1,43 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+
+/**
+ * This is an abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class creates a Persistent Volume
+ */
+export class CustomPersistentVolume extends pulumi.ComponentResource {
+  public readonly pvc: k8s.core.v1.PersistentVolumeClaim
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('volume:service:CustomPersistentVolume', name, {}, opts)
+
+    const volumeLabels = { app: name }
+    const pvcName = `${name}-pvc`
+
+    this.pvc = new k8s.core.v1.PersistentVolumeClaim(
+      pvcName,
+      {
+        metadata: {
+          labels: volumeLabels,
+          namespace: args.namespaceName,
+          name: pvcName,
+        },
+        spec: {
+          accessModes: ['ReadWriteOnce'],
+          resources: {
+            requests: {
+              storage: `${args.storage}Gi`,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  storage: Number
+}

+ 9 - 0
devops/kubernetes/query-node/Pulumi.yaml

@@ -25,3 +25,12 @@ template:
     appsImage:
       description: The joystream image to use for running GraphQL servers
       default: joystream/apps:latest
+    dbPassword:
+      description: database password for indexer and processor databases  
+      required: true
+    blockHeight:
+      descroption: Block height to start indexing at
+      default: 0
+    joystreamWsEndpoint:
+      description: Joystream-node websocket endpoint used by indexer
+      required: true

+ 2 - 4
devops/kubernetes/query-node/README.md

@@ -38,6 +38,8 @@ After cloning this repo, from this working directory, run these commands:
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
+    --plaintext dbPassword=password --plaintext blockHeight=0 \
+    --plaintext joystreamWsEndpoint=ws://endpoint.somewhere.net:9944 \
     --plaintext isMinikube=true --plaintext skipProcessor=false
    ```
 
@@ -66,10 +68,6 @@ After cloning this repo, from this working directory, run these commands:
 
    If not using minikube, just specify the `appsImage` config.
 
-1. Create a `.env` file in this directory (`cp ../../../.env ./.env`) and set the database and other variables in it
-
-   Make sure to set `GRAPHQL_SERVER_PORT=4001`
-
 1. Stand up the Kubernetes cluster:
 
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a

+ 1 - 1
devops/kubernetes/query-node/configMap.ts

@@ -2,7 +2,7 @@ import * as pulumi from '@pulumi/pulumi'
 import * as k8s from '@pulumi/kubernetes'
 import * as fs from 'fs'
 
-export class configMapFromFile extends pulumi.ComponentResource {
+export class ConfigMapFromFile extends pulumi.ComponentResource {
   public readonly configName?: pulumi.Output<string>
 
   constructor(name: string, args: ConfigMapArgs, opts: pulumi.ComponentResourceOptions = {}) {

+ 22 - 18
devops/kubernetes/query-node/index.ts

@@ -2,14 +2,12 @@ import * as awsx from '@pulumi/awsx'
 import * as eks from '@pulumi/eks'
 import * as docker from '@pulumi/docker'
 import * as pulumi from '@pulumi/pulumi'
-import { configMapFromFile } from './configMap'
+import { ConfigMapFromFile } from './configMap'
 import * as k8s from '@pulumi/kubernetes'
 import { IndexerServiceDeployment } from './indexerDeployment'
 import { ProcessorServiceDeployment } from './processorDeployment'
 import { CaddyServiceDeployment } from 'pulumi-common'
 
-require('dotenv').config()
-
 const config = new pulumi.Config()
 const awsConfig = new pulumi.Config('aws')
 const isMinikube = config.getBoolean('isMinikube')
@@ -19,12 +17,11 @@ const skipProcessor = config.getBoolean('skipProcessor')
 const useLocalRepo = config.getBoolean('useLocalRepo')
 
 export let kubeconfig: pulumi.Output<any>
-export let joystreamAppsImage: pulumi.Output<string>
+export let joystreamAppsImage: pulumi.Output<string> = pulumi.interpolate`${appsImage}`
 let provider: k8s.Provider
 
 if (skipProcessor && externalIndexerUrl) {
-  pulumi.log.error('Need to deploy atleast one component, Indexer or Processor')
-  throw new Error(`Please check the config settings for skipProcessor and externalIndexerUrl`)
+  pulumi.log.info('No Indexer or Processor will be deployed only the cluster')
 }
 
 if (isMinikube) {
@@ -59,15 +56,19 @@ if (isMinikube) {
   // Export the cluster's kubeconfig.
   kubeconfig = cluster.kubeconfig
 
-  // Create a repository
-  const repo = new awsx.ecr.Repository('joystream/apps')
-
-  // Build an image from an existing local/docker hub image and push to ECR
-  joystreamAppsImage = repo.buildAndPushImage({
-    context: './docker_dummy',
-    dockerfile: './docker_dummy/Dockerfile',
-    args: { SOURCE_IMAGE: appsImage! },
-  })
+  // Only deploy ECR and push image if we need to deploy processor from
+  // local image build.
+  if (!skipProcessor && useLocalRepo) {
+    // Create a repository
+    const repo = new awsx.ecr.Repository('joystream/apps')
+
+    // Build an image from an existing local/docker hub image and push to ECR
+    joystreamAppsImage = repo.buildAndPushImage({
+      context: './docker_dummy',
+      dockerfile: './docker_dummy/Dockerfile',
+      args: { SOURCE_IMAGE: appsImage },
+    })
+  }
 }
 
 const resourceOptions = { provider: provider }
@@ -80,7 +81,7 @@ const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
 // Export the Namespace name
 export const namespaceName = ns.metadata.name
 
-const defsConfig = new configMapFromFile(
+const defsConfig = new ConfigMapFromFile(
   'defs-config',
   {
     filePath: '../../../types/augment/all/defs.json',
@@ -109,11 +110,14 @@ const caddyEndpoints = [
   `/indexer* {
     uri strip_prefix /indexer
     reverse_proxy indexer:4000
-}`,
+  }`,
   `/server* {
     uri strip_prefix /server
     reverse_proxy graphql-server:8081
-}`,
+  }`,
+  `/@apollographql/* {
+    reverse_proxy graphql-server:8081
+  }`,
 ]
 
 const lbReady = config.get('isLoadBalancerReady') === 'true'

+ 32 - 61
devops/kubernetes/query-node/indexerDeployment.ts

@@ -14,9 +14,18 @@ export class IndexerServiceDeployment extends pulumi.ComponentResource {
   constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
     super('indexer:service:IndexerServiceDeployment', name, {}, opts)
 
+    const config = new pulumi.Config()
+    const DB_PASS = config.require('dbPassword')
+    const BLOCK_HEIGHT = config.require('blockHeight') || '0'
+    const WS_PROVIDER_ENDPOINT_URI = config.require('joystreamWsEndpoint')
+
+    const DB_USERNAME = 'postgres'
+    const INDEXER_DATABASE_NAME = 'indexer'
+    const DB_PORT = '5432'
+
     // Name passed in the constructor will be the endpoint for accessing the service
     const serviceName = name
-    let appLabels = { appClass: 'indexer' }
+    const appLabels = { appClass: 'indexer' }
 
     const indexerDbName = 'indexer-db'
     const indexerDb = new PostgresServiceDeployment(
@@ -24,56 +33,16 @@ export class IndexerServiceDeployment extends pulumi.ComponentResource {
       {
         namespaceName: args.namespaceName,
         env: [
-          { name: 'POSTGRES_USER', value: process.env.DB_USER! },
-          { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
-          { name: 'POSTGRES_DB', value: process.env.INDEXER_DB_NAME! },
+          { name: 'POSTGRES_USER', value: DB_USERNAME },
+          { name: 'POSTGRES_PASSWORD', value: DB_PASS },
+          { name: 'POSTGRES_DB', value: INDEXER_DATABASE_NAME },
+          { name: 'PGPORT', value: DB_PORT },
         ],
         storage: args.storage,
       },
       { parent: this }
     )
 
-    const indexerMigrationJob = new k8s.batch.v1.Job(
-      'indexer-db-migration',
-      {
-        metadata: {
-          namespace: args.namespaceName,
-        },
-        spec: {
-          backoffLimit: 0,
-          template: {
-            spec: {
-              containers: [
-                {
-                  name: 'db-migration',
-                  image: args.joystreamAppsImage,
-                  imagePullPolicy: 'IfNotPresent',
-                  resources: { requests: { cpu: '100m', memory: '100Mi' } },
-                  env: [
-                    {
-                      name: 'WARTHOG_DB_HOST',
-                      value: indexerDbName,
-                    },
-                    {
-                      name: 'DB_HOST',
-                      value: indexerDbName,
-                    },
-                    { name: 'WARTHOG_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
-                    { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
-                    { name: 'DB_PASS', value: process.env.DB_PASS! },
-                  ],
-                  command: ['/bin/sh', '-c'],
-                  args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
-                },
-              ],
-              restartPolicy: 'Never',
-            },
-          },
-        },
-      },
-      { parent: this, dependsOn: indexerDb.service }
-    )
-
     this.deployment = new k8s.apps.v1.Deployment(
       'indexer',
       {
@@ -100,17 +69,18 @@ export class IndexerServiceDeployment extends pulumi.ComponentResource {
                   image: 'joystream/hydra-indexer:3.0.0',
                   env: [
                     { name: 'DB_HOST', value: indexerDbName },
-                    { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
-                    { name: 'DB_PASS', value: process.env.DB_PASS! },
-                    { name: 'DB_USER', value: process.env.DB_USER! },
-                    { name: 'DB_PORT', value: process.env.DB_PORT! },
+                    { name: 'DB_NAME', value: INDEXER_DATABASE_NAME },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
                     { name: 'INDEXER_WORKERS', value: '5' },
+                    // localhost for redis should work since it is in the same deployment
                     { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
                     { name: 'DEBUG', value: 'index-builder:*' },
-                    { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+                    { name: 'WS_PROVIDER_ENDPOINT_URI', value: WS_PROVIDER_ENDPOINT_URI },
                     { name: 'TYPES_JSON', value: 'types.json' },
-                    { name: 'PGUSER', value: process.env.DB_USER! },
-                    { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
+                    { name: 'PGUSER', value: DB_USERNAME },
+                    { name: 'BLOCK_HEIGHT', value: BLOCK_HEIGHT },
                   ],
                   volumeMounts: [
                     {
@@ -126,17 +96,18 @@ export class IndexerServiceDeployment extends pulumi.ComponentResource {
                   name: 'hydra-indexer-gateway',
                   image: 'joystream/hydra-indexer-gateway:3.0.0',
                   env: [
-                    { name: 'WARTHOG_STARTER_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
+                    { name: 'WARTHOG_STARTER_DB_DATABASE', value: INDEXER_DATABASE_NAME },
                     { name: 'WARTHOG_STARTER_DB_HOST', value: indexerDbName },
-                    { name: 'WARTHOG_STARTER_DB_PASSWORD', value: process.env.DB_PASS! },
-                    { name: 'WARTHOG_STARTER_DB_PORT', value: process.env.DB_PORT! },
-                    { name: 'WARTHOG_STARTER_DB_USERNAME', value: process.env.DB_USER! },
+                    { name: 'WARTHOG_STARTER_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_STARTER_DB_PORT', value: DB_PORT },
+                    { name: 'WARTHOG_STARTER_DB_USERNAME', value: DB_USERNAME },
+                    // localhost for redis should work since it is in the same deployment
                     { name: 'WARTHOG_STARTER_REDIS_URI', value: 'redis://localhost:6379/0' },
-                    { name: 'WARTHOG_APP_PORT', value: process.env.WARTHOG_APP_PORT! },
-                    { name: 'PORT', value: process.env.WARTHOG_APP_PORT! },
+                    { name: 'WARTHOG_APP_PORT', value: '4001' },
+                    { name: 'PORT', value: '4001' },
                     { name: 'DEBUG', value: '*' },
                   ],
-                  ports: [{ name: 'hydra-port', containerPort: Number(process.env.WARTHOG_APP_PORT!) }],
+                  ports: [{ name: 'hydra-port', containerPort: 4001 }],
                 },
               ],
               volumes: [
@@ -151,7 +122,7 @@ export class IndexerServiceDeployment extends pulumi.ComponentResource {
           },
         },
       },
-      { parent: this, dependsOn: indexerMigrationJob }
+      { parent: this, dependsOn: indexerDb.service }
     )
 
     // Create a Service for the Indexer
@@ -183,5 +154,5 @@ export interface ServiceDeploymentArgs {
   joystreamAppsImage: pulumi.Output<string>
   defsConfig: pulumi.Output<string> | undefined
   env?: Environment[]
-  storage: Number
+  storage: number
 }

+ 27 - 0
devops/kubernetes/query-node/ingress.yaml

@@ -0,0 +1,27 @@
+# deploy this ingress with kubectl:
+# kubectl apply -f ingress.yaml --namespace NAMESPACE_NAME
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: graphql-ingress
+  # From example at: https://kubernetes.github.io/ingress-nginx/examples/rewrite/
+  annotations:
+    nginx.ingress.kubernetes.io/rewrite-target: /$2
+spec:
+  rules:
+  - http:
+      paths:
+      - pathType: Prefix
+        path: /server(/|$)(.*)
+        backend:
+          service:
+            name: graphql-server
+            port:
+              name: port-1
+      - pathType: Prefix
+        path: /indexer(/|$)(.*)
+        backend:
+          service:
+            name: indexer
+            port:
+              name: port-1

+ 46 - 19
devops/kubernetes/query-node/processorDeployment.ts

@@ -15,6 +15,12 @@ export class ProcessorServiceDeployment extends pulumi.ComponentResource {
   constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
     super('processor:service:ProcessorServiceDeployment', name, {}, opts)
 
+    const config = new pulumi.Config()
+    const DB_PASS = config.require('dbPassword')
+    const DB_USERNAME = 'postgres'
+    const PROCESSOR_DATABASE_NAME = 'processor'
+    const DB_PORT = '5432'
+
     // Name passed in the constructor will be the endpoint for accessing the service
     this.endpoint = 'graphql-server'
 
@@ -24,9 +30,10 @@ export class ProcessorServiceDeployment extends pulumi.ComponentResource {
       {
         namespaceName: args.namespaceName,
         env: [
-          { name: 'POSTGRES_USER', value: process.env.DB_USER! },
-          { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
-          { name: 'POSTGRES_DB', value: process.env.DB_NAME! },
+          { name: 'POSTGRES_USER', value: DB_USERNAME },
+          { name: 'POSTGRES_PASSWORD', value: DB_PASS },
+          { name: 'POSTGRES_DB', value: PROCESSOR_DATABASE_NAME },
+          { name: 'PGPORT', value: DB_PORT },
         ],
         storage: args.storage,
       },
@@ -58,12 +65,20 @@ export class ProcessorServiceDeployment extends pulumi.ComponentResource {
                       name: 'DB_HOST',
                       value: processorDbName,
                     },
-                    { name: 'WARTHOG_DB_DATABASE', value: process.env.DB_NAME! },
-                    { name: 'DB_NAME', value: process.env.DB_NAME! },
-                    { name: 'DB_PASS', value: process.env.DB_PASS! },
+                    { name: 'WARTHOG_DB_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_USERNAME', value: DB_USERNAME },
+                    { name: 'WARTHOG_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_DB_PORT', value: DB_PORT },
+                    { name: 'DB_NAME', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
                   ],
                   command: ['/bin/sh', '-c'],
-                  args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
+                  args: [
+                    // 'yarn workspace query-node config:dev;',
+                    'yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate',
+                  ],
                 },
               ],
               restartPolicy: 'Never',
@@ -98,15 +113,18 @@ export class ProcessorServiceDeployment extends pulumi.ComponentResource {
                   imagePullPolicy: 'IfNotPresent',
                   env: [
                     { name: 'DB_HOST', value: processorDbName },
-                    { name: 'DB_PASS', value: process.env.DB_PASS! },
-                    { name: 'DB_USER', value: process.env.DB_USER! },
-                    { name: 'DB_PORT', value: process.env.DB_PORT! },
-                    { name: 'DB_NAME', value: process.env.DB_NAME! },
-                    { name: 'GRAPHQL_SERVER_HOST', value: process.env.GRAPHQL_SERVER_HOST! },
-                    { name: 'GRAPHQL_SERVER_PORT', value: process.env.GRAPHQL_SERVER_PORT! },
-                    { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
+                    { name: 'DB_NAME', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_USERNAME', value: DB_USERNAME },
+                    { name: 'WARTHOG_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_APP_PORT', value: '4002' },
+                    // Why do we need this anyway?
+                    { name: 'GRAPHQL_SERVER_HOST', value: 'graphql-server' },
                   ],
-                  ports: [{ name: 'graph-ql-port', containerPort: Number(process.env.GRAPHQL_SERVER_PORT!) }],
+                  ports: [{ name: 'graph-ql-port', containerPort: 4002 }],
                   args: ['workspace', 'query-node-root', 'query-node:start:prod'],
                 },
               ],
@@ -163,9 +181,19 @@ export class ProcessorServiceDeployment extends pulumi.ComponentResource {
                       value: indexerURL,
                     },
                     { name: 'TYPEORM_HOST', value: processorDbName },
-                    { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
+                    { name: 'TYPEORM_DATABASE', value: PROCESSOR_DATABASE_NAME },
                     { name: 'DEBUG', value: 'index-builder:*' },
                     { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
+                    { name: 'WARTHOG_DB_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_USERNAME', value: DB_USERNAME },
+                    { name: 'WARTHOG_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_DB_PORT', value: DB_PORT },
+                    // These are note required but must be defined or processor will not startup
+                    { name: 'WARTHOG_APP_HOST', value: 'graphql-server' },
+                    { name: 'WARTHOG_APP_PORT', value: '4002' },
                   ],
                   volumeMounts: [
                     {
@@ -174,8 +202,7 @@ export class ProcessorServiceDeployment extends pulumi.ComponentResource {
                       subPath: 'fileData',
                     },
                   ],
-                  command: ['/bin/sh', '-c'],
-                  args: ['cd query-node && yarn hydra-processor run -e ../.env'],
+                  args: ['workspace', 'query-node-root', 'processor:start'],
                 },
               ],
               volumes: [
@@ -206,5 +233,5 @@ export interface ServiceDeploymentArgs {
   defsConfig: pulumi.Output<string> | undefined
   externalIndexerUrl: string | undefined
   env?: Environment[]
-  storage: Number
+  storage: number
 }

+ 16 - 12
devops/kubernetes/storage-node/Pulumi.yaml

@@ -1,33 +1,37 @@
-name: eks-cluster
+name: storage-node
 runtime: nodejs
-description: A Pulumi program to deploy storage node to cloud environment
+description: A Pulumi program to deploy storage node to Kubernetes
 template:
   config:
     aws:profile:
       default: joystream-user
     aws:region:
       default: us-east-1
+    isMinikube:
+      description: Whether you are deploying to minikube
+      default: false
     wsProviderEndpointURI:
       description: Chain RPC endpoint
-      default: 'wss://rome-rpc-endpoint.joystream.org:9944/'
-    isAnonymous:
-      description: Whether you are deploying an anonymous storage node
-      default: true
     isLoadBalancerReady:
       description: Whether the load balancer service is ready and has been assigned an IP
       default: false
     colossusPort:
       description: Port that is exposed for the colossus container
-      default: 3000
+      default: 3333
     storage:
       description: Amount of storage in gigabytes for ipfs volume
       default: 40
-    providerId:
-      description: StorageProviderId assigned to you in working group
     keyFile:
-      description: Path to JSON key export file to use as the storage provider (role account)
-    publicURL:
-      description: API Public URL to announce
+      description: Key file for the account
     passphrase:
       description: Optional passphrase to use to decrypt the key-file
       secret: true
+    colossusImage:
+      description: The colossus image to use for running the storage node
+      default: joystream/colossus:latest
+    queryNodeEndpoint:
+      description: Full URL for Query node endpoint
+    workerId:
+      description: ID of the node operator (distribution working group worker)
+    accountURI:
+      description: Account URI

+ 11 - 9
devops/kubernetes/storage-node/README.md

@@ -1,6 +1,6 @@
 # Amazon EKS Cluster: Hello World!
 
-This example deploys an EKS Kubernetes cluster with custom ipfs image
+Deploy storage-node to a Kubernetes cluster
 
 ## Deploying the App
 
@@ -37,20 +37,22 @@ After cloning this repo, from this working directory, run these commands:
 1. Set the required configuration variables in `Pulumi.<stack>.yaml`
 
    ```bash
-   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
-    --plaintext isMinikube=true --plaintext isAnonymous=true
+   $ pulumi config set-all --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
+    --plaintext queryNodeEndpoint='http://graphql-server.query-node-yszsbs2i:8081' \
+    --plaintext keyFile='../../../keyfile.json' --secret passphrase='' \
+    --plaintext accountURI='//Alice' workerId=0 \
+    --plaintext isMinikube=true --plaintext colossusImage='joystream/colossus:latest' \
+    --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user
    ```
 
-   If running for production use the below mentioned config
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
 
    ```bash
-   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false --plaintext isMinikube=false \
-    --plaintext providerId=<ID> --plaintext keyFile=<PATH> --plaintext publicURL=<DOMAIN> --secret passphrase=<PASSPHRASE>
+   $ pulumi config set isMinikube false
    ```
 
-   You can also set the `storage` and the `colossusPort` config parameters if required
+   You can also set the `storage` and the `colossusPort` config parameters if required. Check `Pulumi.yaml` file
+   for additional parameters.
 
 1. Stand up the EKS cluster:
 

+ 5 - 0
devops/kubernetes/storage-node/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

+ 120 - 111
devops/kubernetes/storage-node/index.ts

@@ -4,40 +4,40 @@ import * as eks from '@pulumi/eks'
 import * as docker from '@pulumi/docker'
 import * as k8s from '@pulumi/kubernetes'
 import * as pulumi from '@pulumi/pulumi'
-import { CaddyServiceDeployment } from 'pulumi-common'
+import { CaddyServiceDeployment, configMapFromFile } from 'pulumi-common'
 import * as fs from 'fs'
 
 const awsConfig = new pulumi.Config('aws')
 const config = new pulumi.Config()
 
+const name = 'storage-node'
+
 const wsProviderEndpointURI = config.require('wsProviderEndpointURI')
-const isAnonymous = config.require('isAnonymous') === 'true'
+const queryNodeHost = config.require('queryNodeEndpoint')
+const workerId = config.require('workerId')
+const accountURI = config.get('accountURI')
+const keyFile = config.get('keyFile')
 const lbReady = config.get('isLoadBalancerReady') === 'true'
-const name = 'storage-node'
-const colossusPort = parseInt(config.get('colossusPort') || '3000')
+const configColossusImage = config.get('colossusImage') || `joystream/colossus:latest`
+const colossusPort = parseInt(config.get('colossusPort') || '3333')
 const storage = parseInt(config.get('storage') || '40')
 const isMinikube = config.getBoolean('isMinikube')
 
-let additionalParams: string[] | pulumi.Input<string>[] = []
-let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
-let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
+const additionalVolumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
+const additionalVolumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
+
+if (!accountURI && !keyFile) {
+  throw new Error('Must specify either Key file or Account URI')
+}
+
+const additionalParams: string[] | pulumi.Input<string>[] = []
 
 export let kubeconfig: pulumi.Output<any>
-export let colossusImage: pulumi.Output<string>
+export let colossusImage: pulumi.Output<string> = pulumi.interpolate`${configColossusImage}`
 let provider: k8s.Provider
 
 if (isMinikube) {
   provider = new k8s.Provider('local', {})
-  // Create image from local app
-  colossusImage = new docker.Image('joystream/colossus', {
-    build: {
-      context: '../../../',
-      dockerfile: '../../../colossus.Dockerfile',
-    },
-    imageName: 'joystream/colossus:latest',
-    skipPush: true,
-  }).baseImageName
-  // colossusImage = pulumi.interpolate`joystream/colossus:latest`
 } else {
   // Create a VPC for our cluster.
   const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
@@ -61,8 +61,9 @@ if (isMinikube) {
 
   // Build an image and publish it to our ECR repository.
   colossusImage = repo.buildAndPushImage({
-    dockerfile: '../../../colossus.Dockerfile',
-    context: '../../../',
+    context: './docker_dummy',
+    dockerfile: './docker_dummy/Dockerfile',
+    args: { SOURCE_IMAGE: colossusImage! },
   })
 }
 
@@ -96,74 +97,36 @@ const pvc = new k8s.core.v1.PersistentVolumeClaim(
   resourceOptions
 )
 
-volumes.push({
-  name: 'ipfs-data',
-  persistentVolumeClaim: {
-    claimName: `${name}-pvc`,
-  },
-})
-
-const caddyEndpoints = [
-  ` {
-    reverse_proxy storage-node:${colossusPort}
-}`,
-]
-
-export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
-export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
-
-if (!isMinikube) {
-  const caddy = new CaddyServiceDeployment(
-    'caddy-proxy',
-    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+if (keyFile) {
+  const keyConfigName = new configMapFromFile(
+    'key-config',
+    {
+      filePath: keyFile,
+      namespaceName: namespaceName,
+    },
     resourceOptions
-  )
-
-  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
-  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
-}
-
-export let appLink: pulumi.Output<string>
-
-if (lbReady) {
-  appLink = pulumi.interpolate`https://${endpoint1}`
-
-  if (!isAnonymous) {
-    const remoteKeyFilePath = '/joystream/key-file.json'
-    const providerId = config.require('providerId')
-    const keyFile = config.require('keyFile')
-    const publicUrl = config.get('publicURL') ? config.get('publicURL')! : appLink
-
-    const keyConfig = new k8s.core.v1.ConfigMap('key-config', {
-      metadata: { namespace: namespaceName, labels: appLabels },
-      data: { 'fileData': fs.readFileSync(keyFile).toString() },
-    })
-    const keyConfigName = keyConfig.metadata.apply((m) => m.name)
-
-    additionalParams = ['--provider-id', providerId, '--key-file', remoteKeyFilePath, '--public-url', publicUrl]
+  ).configName
 
-    volumeMounts.push({
-      mountPath: remoteKeyFilePath,
-      name: 'keyfile-volume',
-      subPath: 'fileData',
-    })
+  const remoteKeyFilePath = '/joystream/key-file.json'
+  additionalParams.push(`--keyFile=${remoteKeyFilePath}`)
 
-    volumes.push({
-      name: 'keyfile-volume',
-      configMap: {
-        name: keyConfigName,
-      },
-    })
-
-    const passphrase = config.get('passphrase')
-    if (passphrase) {
-      additionalParams.push('--passphrase', passphrase)
-    }
+  const passphrase = config.get('passphrase')
+  if (passphrase) {
+    additionalParams.push(`--password=${passphrase}`)
   }
-}
 
-if (isAnonymous) {
-  additionalParams.push('--anonymous')
+  additionalVolumes.push({
+    name: 'keyfile-volume',
+    configMap: {
+      name: keyConfigName,
+    },
+  })
+
+  additionalVolumeMounts.push({
+    mountPath: remoteKeyFilePath,
+    name: 'keyfile-volume',
+    subPath: 'fileData',
+  })
 }
 
 // Create a Deployment
@@ -182,31 +145,12 @@ const deployment = new k8s.apps.v1.Deployment(
           labels: appLabels,
         },
         spec: {
-          hostname: 'ipfs',
           containers: [
-            {
-              name: 'ipfs',
-              image: 'ipfs/go-ipfs:latest',
-              ports: [{ containerPort: 5001 }, { containerPort: 8080 }],
-              command: ['/bin/sh', '-c'],
-              args: [
-                'set -e; \
-                /usr/local/bin/start_ipfs config profile apply lowpower; \
-                /usr/local/bin/start_ipfs config --json Gateway.PublicGateways \'{"localhost": null }\'; \
-                /usr/local/bin/start_ipfs config Datastore.StorageMax 200GB; \
-                /sbin/tini -- /usr/local/bin/start_ipfs daemon --migrate=true',
-              ],
-              volumeMounts: [
-                {
-                  name: 'ipfs-data',
-                  mountPath: '/data/ipfs',
-                },
-              ],
-            },
             {
               name: 'colossus',
               image: colossusImage,
               imagePullPolicy: 'IfNotPresent',
+              workingDir: '/joystream/storage-node',
               env: [
                 {
                   name: 'WS_PROVIDER_ENDPOINT_URI',
@@ -217,21 +161,66 @@ const deployment = new k8s.apps.v1.Deployment(
                   name: 'DEBUG',
                   value: 'joystream:*',
                 },
+                {
+                  name: 'COLOSSUS_PORT',
+                  value: `${colossusPort}`,
+                },
+                {
+                  name: 'QUERY_NODE_ENDPOINT',
+                  value: queryNodeHost,
+                },
+                {
+                  name: 'WORKER_ID',
+                  value: workerId,
+                },
+                // ACCOUNT_URI takes precedence over keyFile
+                {
+                  name: 'ACCOUNT_URI',
+                  value: accountURI,
+                },
               ],
-              volumeMounts,
-              command: [
-                'yarn',
-                'colossus',
-                '--ws-provider',
+              volumeMounts: [
+                {
+                  name: 'colossus-data',
+                  mountPath: '/data',
+                  subPath: 'data',
+                },
+                {
+                  name: 'colossus-data',
+                  mountPath: '/keystore',
+                  subPath: 'keystore',
+                },
+                ...additionalVolumeMounts,
+              ],
+              command: ['yarn'],
+              args: [
+                'storage-node',
+                'server',
+                '--worker',
+                workerId,
+                '--port',
+                `${colossusPort}`,
+                '--uploads=/data',
+                '--sync',
+                '--syncInterval=1',
+                '--queryNodeEndpoint',
+                queryNodeHost,
+                '--apiUrl',
                 wsProviderEndpointURI,
-                '--ipfs-host',
-                'ipfs',
                 ...additionalParams,
               ],
               ports: [{ containerPort: colossusPort }],
             },
           ],
-          volumes,
+          volumes: [
+            {
+              name: 'colossus-data',
+              persistentVolumeClaim: {
+                claimName: `${name}-pvc`,
+              },
+            },
+            ...additionalVolumes,
+          ],
         },
       },
     },
@@ -262,3 +251,23 @@ export const serviceName = service.metadata.name
 
 // Export the Deployment name
 export const deploymentName = deployment.metadata.name
+
+const caddyEndpoints = [
+  ` {
+    reverse_proxy storage-node:${colossusPort}
+}`,
+]
+
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 2 - 2
distributor-node/config.yml

@@ -9,8 +9,8 @@ logs:
   file:
     level: debug
     path: ./local/logs
-    maxFiles: 5
-    maxSize: 1000000
+    maxFiles: 30 # 30 days or 30 * 50 MB
+    maxSize: 50485760 # 50 MB
   console:
     level: verbose
   # elastic:

+ 4 - 4
distributor-node/docs/api/operator/index.md

@@ -264,7 +264,7 @@ OperatorAuth
 ```javascript
 const inputBody = '{
   "buckets": [
-    0
+    "string"
   ]
 }';
 const headers = {
@@ -303,7 +303,7 @@ Updates buckets supported by the node.
 ```json
 {
   "buckets": [
-    0
+    "string"
   ]
 }
 ```
@@ -359,7 +359,7 @@ OperatorAuth
 ```json
 {
   "buckets": [
-    0
+    "string"
   ]
 }
 
@@ -369,7 +369,7 @@ OperatorAuth
 
 |Name|Type|Required|Restrictions|Description|
 |---|---|---|---|---|
-|buckets|[integer]|false|none|Set of bucket ids to be distributed by the node. If not provided - all buckets assigned to currently configured worker will be distributed.|
+|buckets|[string]|false|none|Set of bucket ids to be distributed by the node. If not provided - all buckets assigned to currently configured worker will be distributed.|
 
 undefined
 

+ 3 - 3
distributor-node/docs/api/public/index.md

@@ -141,7 +141,7 @@ Returns list of distributed buckets
 ```json
 {
   "bucketIds": [
-    0
+    "string"
   ]
 }
 ```
@@ -356,7 +356,7 @@ This operation does not require authentication
 ```json
 {
   "bucketIds": [
-    0
+    "string"
   ]
 }
 
@@ -369,7 +369,7 @@ oneOf
 |Name|Type|Required|Restrictions|Description|
 |---|---|---|---|---|
 |*anonymous*|object|false|none|none|
-|» bucketIds|[integer]|true|none|none|
+|» bucketIds|[string]|true|none|none|
 
 xor
 

+ 2 - 0
distributor-node/docs/commands/dev.md

@@ -21,6 +21,8 @@ OPTIONS
   -c, --configPath=configPath      [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                    directory)
 
+  -e, --endpoint=endpoint          (required)
+
   -y, --yes                        Answer "yes" to any prompt, skipping any manual confirmations
 ```
 

+ 11 - 23
distributor-node/docs/commands/leader.md

@@ -26,13 +26,11 @@ USAGE
   $ joystream-distributor leader:cancel-invitation
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
 
   -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
@@ -90,13 +88,11 @@ USAGE
   $ joystream-distributor leader:delete-bucket
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
 ```
 
@@ -130,13 +126,11 @@ USAGE
   $ joystream-distributor leader:invite-bucket-operator
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -w, --workerId=workerId      (required) ID of the distribution group worker to invite as bucket operator
 
   -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
@@ -157,13 +151,11 @@ USAGE
   $ joystream-distributor leader:remove-bucket-operator
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -w, --workerId=workerId      (required) ID of the operator (distribution working group worker) to remove from the
                                bucket
 
@@ -228,7 +220,7 @@ USAGE
 
 OPTIONS
   -a, --add=add
-      [default: ] ID of a bucket to add to bag
+      [default: ] Index(es) (within the family) of bucket(s) to add to the bag
 
   -b, --bagId=bagId
       (required) Bag ID. Format: {bag_type}:{sub_type}:{id}.
@@ -250,13 +242,13 @@ OPTIONS
       (required) ID of the distribution bucket family
 
   -r, --remove=remove
-      [default: ] ID of a bucket to remove from bag
+      [default: ] Index(es) (within the family) of bucket(s) to remove from the bag
 
   -y, --yes
       Answer "yes" to any prompt, skipping any manual confirmations
 
 EXAMPLE
-  $ joystream-distributor leader:update-bag -b 1 -f 1 -a 1 -a 2 -a 3 -r 4 -r 5
+  $ joystream-distributor leader:update-bag -b 1 -f 1 -a 1 2 3 -r 4 5
 ```
 
 _See code: [src/commands/leader/update-bag.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bag.ts)_
@@ -270,15 +262,13 @@ USAGE
   $ joystream-distributor leader:update-bucket-mode
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
   -d, --mode=(on|off)          (required) Whether the bucket should be "on" (distributing) or "off" (not distributing)
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
 ```
 
@@ -293,14 +283,12 @@ USAGE
   $ joystream-distributor leader:update-bucket-status
 
 OPTIONS
-  -B, --bucketId=bucketId       (required) Distribution bucket id
+  -B, --bucketId=bucketId       (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
   -a, --acceptingBags=(yes|no)  (required) Whether the bucket should accept new bags
 
   -c, --configPath=configPath   [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                 directory)
 
-  -f, --familyId=familyId       (required) Distribution bucket family id
-
   -y, --yes                     Answer "yes" to any prompt, skipping any manual confirmations
 ```
 
@@ -318,7 +306,7 @@ OPTIONS
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
-  -p, --policy=policy          Key-value pair of {familyId}:{numberOfBuckets}
+  -p, --policy=policy          [default: ] Key-value pair of {familyId}:{numberOfBuckets}
 
   -t, --type=(Member|Channel)  (required) Dynamic bag type
 
@@ -328,7 +316,7 @@ DESCRIPTION
   Requires distribution working group leader permissions.
 
 EXAMPLE
-  $ joystream-distributor leader:update-dynamic-bag-policy -t Member -p 1:5 -p 2:10 -p 3:5
+  $ joystream-distributor leader:update-dynamic-bag-policy -t Member -p 1:5 2:10 3:5
 ```
 
 _See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-dynamic-bag-policy.ts)_

+ 7 - 1
distributor-node/docs/commands/node.md

@@ -18,7 +18,9 @@ USAGE
   $ joystream-distributor node:set-buckets
 
 OPTIONS
-  -B, --bucketIds=bucketIds    Set of bucket ids to distribute
+  -B, --bucketIds=bucketIds    Set of bucket ids to distribute. Each bucket id should be in {familyId}:{bucketIndex}
+                               format. Multiple ids can be provided, separated by space.
+
   -a, --all                    Distribute all buckets belonging to configured worker
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
@@ -29,6 +31,10 @@ OPTIONS
   -u, --url=url                (required) Distributor node operator api base url (ie. http://localhost:3335)
 
   -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
+
+EXAMPLES
+  $ joystream-distributor node:set-buckets --bucketIds 1:1 1:2 1:3 2:1 2:2
+  $ joystream-distributor node:set-buckets --all
 ```
 
 _See code: [src/commands/node/set-buckets.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/node/set-buckets.ts)_

+ 2 - 6
distributor-node/docs/commands/operator.md

@@ -15,13 +15,11 @@ USAGE
   $ joystream-distributor operator:accept-invitation
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -w, --workerId=workerId      (required) ID of the invited operator (distribution group worker)
 
   -y, --yes                    Answer "yes" to any prompt, skipping any manual confirmations
@@ -41,15 +39,13 @@ USAGE
   $ joystream-distributor operator:set-metadata
 
 OPTIONS
-  -B, --bucketId=bucketId      (required) Distribution bucket id
+  -B, --bucketId=bucketId      (required) Distribution bucket ID in {familyId}:{bucketIndex} format.
 
   -c, --configPath=configPath  [default: ./config.yml] Path to config JSON/YAML file (relative to current working
                                directory)
 
   -e, --endpoint=endpoint      Root distribution node endpoint
 
-  -f, --familyId=familyId      (required) Distribution bucket family id
-
   -i, --input=input            Path to JSON metadata file
 
   -w, --workerId=workerId      (required) ID of the operator (distribution group worker)

+ 0 - 7
distributor-node/docs/schema/definition-properties-bucket-ids-items.md

@@ -1,7 +0,0 @@
-## items Type
-
-`integer`
-
-## items Constraints
-
-**minimum**: the value of this number must greater than or equal to: `0`

+ 13 - 0
distributor-node/docs/schema/definition-properties-distributed-buckets-ids-items.md

@@ -0,0 +1,13 @@
+## items Type
+
+`string`
+
+## items Constraints
+
+**pattern**: the string must match the following regular expression: 
+
+```regexp
+^[0-9]+:[0-9]+$
+```
+
+[try pattern](https://regexr.com/?expression=%5E%5B0-9%5D%2B%3A%5B0-9%5D%2B%24 "try regular expression with regexr.com")

+ 1 - 1
distributor-node/docs/schema/definition-properties-bucket-ids.md → distributor-node/docs/schema/definition-properties-distributed-buckets-ids.md

@@ -1,6 +1,6 @@
 ## buckets Type
 
-`integer[]`
+`string[]`
 
 ## buckets Constraints
 

+ 1 - 1
distributor-node/docs/schema/definition-properties-logs-properties-file-logging-options.md

@@ -65,7 +65,7 @@ Path where the logs will be stored (absolute or relative to config file)
 
 ## maxFiles
 
-Maximum number of log files to store
+Maximum number of log files to store. Recommended to be at least 7 when frequency is set to `daily` and at least 24 \* 7 when frequency is set to `hourly`
 
 `maxFiles`
 

+ 17 - 17
distributor-node/docs/schema/definition.md

@@ -4,19 +4,19 @@
 
 # Distributor node configuration Properties
 
-| Property                    | Type      | Required | Nullable       | Defined by                                                                                                                                  |
-| :-------------------------- | :-------- | :------- | :------------- | :------------------------------------------------------------------------------------------------------------------------------------------ |
-| [id](#id)                   | `string`  | Required | cannot be null | [Distributor node configuration](definition-properties-id.md "https://joystream.org/schemas/argus/config#/properties/id")                   |
-| [endpoints](#endpoints)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-endpoints.md "https://joystream.org/schemas/argus/config#/properties/endpoints")     |
-| [directories](#directories) | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-directories.md "https://joystream.org/schemas/argus/config#/properties/directories") |
-| [logs](#logs)               | `object`  | Optional | cannot be null | [Distributor node configuration](definition-properties-logs.md "https://joystream.org/schemas/argus/config#/properties/logs")               |
-| [limits](#limits)           | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-limits.md "https://joystream.org/schemas/argus/config#/properties/limits")           |
-| [intervals](#intervals)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-intervals.md "https://joystream.org/schemas/argus/config#/properties/intervals")     |
-| [publicApi](#publicapi)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-publicapi.md "https://joystream.org/schemas/argus/config#/properties/publicApi")     |
-| [operatorApi](#operatorapi) | `object`  | Optional | cannot be null | [Distributor node configuration](definition-properties-operatorapi.md "https://joystream.org/schemas/argus/config#/properties/operatorApi") |
-| [keys](#keys)               | `array`   | Optional | cannot be null | [Distributor node configuration](definition-properties-keys.md "https://joystream.org/schemas/argus/config#/properties/keys")               |
-| [buckets](#buckets)         | `array`   | Optional | cannot be null | [Distributor node configuration](definition-properties-bucket-ids.md "https://joystream.org/schemas/argus/config#/properties/buckets")      |
-| [workerId](#workerid)       | `integer` | Optional | cannot be null | [Distributor node configuration](definition-properties-workerid.md "https://joystream.org/schemas/argus/config#/properties/workerId")       |
+| Property                    | Type      | Required | Nullable       | Defined by                                                                                                                                          |
+| :-------------------------- | :-------- | :------- | :------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [id](#id)                   | `string`  | Required | cannot be null | [Distributor node configuration](definition-properties-id.md "https://joystream.org/schemas/argus/config#/properties/id")                           |
+| [endpoints](#endpoints)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-endpoints.md "https://joystream.org/schemas/argus/config#/properties/endpoints")             |
+| [directories](#directories) | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-directories.md "https://joystream.org/schemas/argus/config#/properties/directories")         |
+| [logs](#logs)               | `object`  | Optional | cannot be null | [Distributor node configuration](definition-properties-logs.md "https://joystream.org/schemas/argus/config#/properties/logs")                       |
+| [limits](#limits)           | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-limits.md "https://joystream.org/schemas/argus/config#/properties/limits")                   |
+| [intervals](#intervals)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-intervals.md "https://joystream.org/schemas/argus/config#/properties/intervals")             |
+| [publicApi](#publicapi)     | `object`  | Required | cannot be null | [Distributor node configuration](definition-properties-publicapi.md "https://joystream.org/schemas/argus/config#/properties/publicApi")             |
+| [operatorApi](#operatorapi) | `object`  | Optional | cannot be null | [Distributor node configuration](definition-properties-operatorapi.md "https://joystream.org/schemas/argus/config#/properties/operatorApi")         |
+| [keys](#keys)               | `array`   | Optional | cannot be null | [Distributor node configuration](definition-properties-keys.md "https://joystream.org/schemas/argus/config#/properties/keys")                       |
+| [buckets](#buckets)         | `array`   | Optional | cannot be null | [Distributor node configuration](definition-properties-distributed-buckets-ids.md "https://joystream.org/schemas/argus/config#/properties/buckets") |
+| [workerId](#workerid)       | `integer` | Optional | cannot be null | [Distributor node configuration](definition-properties-workerid.md "https://joystream.org/schemas/argus/config#/properties/workerId")               |
 
 ## id
 
@@ -190,21 +190,21 @@ an array of merged types ([Details](definition-properties-keys-items.md))
 
 ## buckets
 
-Set of bucket ids distributed by the node. If not specified, all buckets currently assigned to worker specified in `config.workerId` will be distributed.
+Set of bucket ids distributed by the node. If not specified, all buckets currently assigned to worker specified in `config.workerId` will be distributed. Expected bucket id format is: {familyId}:{bucketIndex}
 
 `buckets`
 
 *   is optional
 
-*   Type: `integer[]`
+*   Type: `string[]`
 
 *   cannot be null
 
-*   defined in: [Distributor node configuration](definition-properties-bucket-ids.md "https://joystream.org/schemas/argus/config#/properties/buckets")
+*   defined in: [Distributor node configuration](definition-properties-distributed-buckets-ids.md "https://joystream.org/schemas/argus/config#/properties/buckets")
 
 ### buckets Type
 
-`integer[]`
+`string[]`
 
 ### buckets Constraints
 

+ 3 - 3
distributor-node/package.json

@@ -11,7 +11,7 @@
     "@apollo/client": "^3.2.5",
     "@elastic/ecs-winston-format": "^1.1.0",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.0",
+    "@joystream/types": "^0.17.1",
     "@oclif/command": "^1",
     "@oclif/config": "^1",
     "@oclif/plugin-help": "^3",
@@ -82,7 +82,7 @@
   },
   "volta": {
     "node": "14.16.1",
-    "yarn": "1.22.4"
+    "yarn": "1.22.15"
   },
   "files": [
     "/bin",
@@ -133,7 +133,7 @@
     "generate:types:operator-api": "yarn openapi-typescript ./src/api-spec/operator.yml -o ./src/types/generated/OperatorApi.ts -c ../prettierrc.js",
     "generate:types:api": "yarn generate:types:public-api && yarn generate:types:operator-api",
     "generate:types:all": "yarn generate:types:json-schema && yarn generate:types:graphql && yarn generate:types:api",
-    "generate:api:storage-node": "yarn openapi-generator-cli generate -i ../storage-node-v2/src/api-spec/openapi.yaml -g typescript-axios -o ./src/services/networking/storage-node/generated",
+    "generate:api:storage-node": "yarn openapi-generator-cli generate -i ../storage-node/src/api-spec/openapi.yaml -g typescript-axios -o ./src/services/networking/storage-node/generated",
     "generate:api:all": "yarn generate:api:storage-node",
     "generate:docs:cli": "yarn oclif-dev readme --multi --dir ./docs/commands",
     "generate:docs:config": "yarn ts-node --transpile-only ./src/schemas/scripts/generateConfigDoc.ts",

+ 7 - 6
distributor-node/scripts/init-bucket.sh

@@ -10,11 +10,12 @@ CLI=../bin/run
 ${CLI} dev:init
 ${CLI} leader:set-buckets-per-bag-limit -l 10
 FAMILY_ID=`${CLI} leader:create-bucket-family ${CONFIG}`
-BUCKET_ID=`${CLI} leader:create-bucket -f ${FAMILY_ID} -a yes`
-${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bucket-mode -f ${FAMILY_ID} -B ${BUCKET_ID} --mode on
-${CLI} leader:invite-bucket-operator -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0
-${CLI} operator:accept-invitation -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0
-${CLI} operator:set-metadata -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0 -e http://localhost:3334
+BUCKET_INDEX=`${CLI} leader:create-bucket -f ${FAMILY_ID} -a yes`
+BUCKET_ID="${FAMILY_ID}:${BUCKET_INDEX}"
+${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -a ${BUCKET_INDEX}
+${CLI} leader:update-bucket-mode -B ${BUCKET_ID} --mode on
+${CLI} leader:invite-bucket-operator -B ${BUCKET_ID} -w 0
+${CLI} operator:accept-invitation -B ${BUCKET_ID} -w 0
+${CLI} operator:set-metadata -B ${BUCKET_ID} -w 0 -e http://localhost:3334
 ${CLI} leader:update-dynamic-bag-policy -t Channel -p ${FAMILY_ID}:1
 ${CLI} leader:update-dynamic-bag-policy -t Member -p ${FAMILY_ID}:1

+ 37 - 21
distributor-node/scripts/test-commands.sh

@@ -9,33 +9,49 @@ CLI=../bin/run
 
 ${CLI} dev:init
 ${CLI} leader:set-buckets-per-bag-limit -l 10
+# Create family and buckets
 FAMILY_ID=`${CLI} leader:create-bucket-family`
-BUCKET_ID=`${CLI} leader:create-bucket -f ${FAMILY_ID} -a yes`
-${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:storage -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:content -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:operationsAlpha -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:operationsBeta -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:operationsGamma -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:gateway -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bag -b static:wg:distribution -f ${FAMILY_ID} -a ${BUCKET_ID}
-${CLI} leader:update-bucket-status -f ${FAMILY_ID} -B ${BUCKET_ID}  --acceptingBags yes
-${CLI} leader:update-bucket-mode -f ${FAMILY_ID} -B ${BUCKET_ID} --mode on
+BUCKET_1_INDEX=`${CLI} leader:create-bucket -f ${FAMILY_ID} -a yes`
+BUCKET_2_INDEX=`${CLI} leader:create-bucket -f ${FAMILY_ID} -a yes`
+BUCKET_1_ID="${FAMILY_ID}:${BUCKET_1_INDEX}"
+BUCKET_2_ID="${FAMILY_ID}:${BUCKET_2_INDEX}"
+# Test adding 2 buckets to bag at once
+${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -a ${BUCKET_1_INDEX} ${BUCKET_2_INDEX}
+# Test removing 2 buckets from bag at once
+${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -r ${BUCKET_1_INDEX} ${BUCKET_2_INDEX}
+# Adding single bucket to all static bags
+${CLI} leader:update-bag -b static:council -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:storage -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:content -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:operationsAlpha -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:operationsBeta -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:operationsGamma -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:gateway -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+${CLI} leader:update-bag -b static:wg:distribution -f ${FAMILY_ID} -a ${BUCKET_1_INDEX}
+# Update bucket status / mode
+${CLI} leader:update-bucket-status -B ${BUCKET_1_ID}  --acceptingBags yes
+${CLI} leader:update-bucket-mode -B ${BUCKET_1_ID} --mode on
+${CLI} leader:update-bucket-status -B ${BUCKET_2_ID}  --acceptingBags no
+${CLI} leader:update-bucket-mode -B ${BUCKET_2_ID} --mode off
+# Update dynamic bag policies
 ${CLI} leader:update-dynamic-bag-policy -t Channel -p ${FAMILY_ID}:5
 ${CLI} leader:update-dynamic-bag-policy -t Member -p ${FAMILY_ID}:5
 ${CLI} leader:update-dynamic-bag-policy -t Member
-${CLI} leader:invite-bucket-operator -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0
-${CLI} leader:cancel-invitation -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0
-${CLI} leader:invite-bucket-operator -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0
-${CLI} operator:accept-invitation -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0
-${CLI} operator:set-metadata -f ${FAMILY_ID} -B ${BUCKET_ID} -w 0 -i ./data/operator-metadata.json
+# Bucket invitations + cancelling and accepting
+${CLI} leader:invite-bucket-operator -B ${BUCKET_1_ID} -w 0
+${CLI} leader:invite-bucket-operator -B ${BUCKET_2_ID} -w 0
+${CLI} operator:accept-invitation -B ${BUCKET_1_ID} -w 0
+${CLI} leader:cancel-invitation -B ${BUCKET_2_ID} -w 0
+# Setting metadata
+${CLI} operator:set-metadata -B ${BUCKET_1_ID} -w 0 -i ./data/operator-metadata.json
 ${CLI} leader:set-bucket-family-metadata -f ${FAMILY_ID} -i ./data/family-metadata.json
 
 # Deletion commands tested separately
 FAMILY_TO_DELETE_ID=`${CLI} leader:create-bucket-family`
-BUCKET_TO_DELETE_ID=`${CLI} leader:create-bucket -f ${FAMILY_TO_DELETE_ID} -a yes`
-${CLI} leader:invite-bucket-operator -f ${FAMILY_TO_DELETE_ID} -B ${BUCKET_TO_DELETE_ID} -w 0
-${CLI} operator:accept-invitation -f ${FAMILY_TO_DELETE_ID} -B ${BUCKET_TO_DELETE_ID} -w 0
-${CLI} leader:remove-bucket-operator -f ${FAMILY_TO_DELETE_ID} -B ${BUCKET_TO_DELETE_ID} -w 0
-${CLI} leader:delete-bucket -f ${FAMILY_TO_DELETE_ID} -B ${BUCKET_TO_DELETE_ID}
+BUCKET_TO_DELETE_INDEX=`${CLI} leader:create-bucket -f ${FAMILY_TO_DELETE_ID} -a yes`
+BUCKET_TO_DELETE_ID="${FAMILY_TO_DELETE_ID}:${BUCKET_TO_DELETE_INDEX}"
+${CLI} leader:invite-bucket-operator -B ${BUCKET_TO_DELETE_ID} -w 0
+${CLI} operator:accept-invitation -B ${BUCKET_TO_DELETE_ID} -w 0
+${CLI} leader:remove-bucket-operator -B ${BUCKET_TO_DELETE_ID} -w 0
+${CLI} leader:delete-bucket -B ${BUCKET_TO_DELETE_ID}
 ${CLI} leader:delete-bucket-family -f ${FAMILY_TO_DELETE_ID}

+ 2 - 2
distributor-node/src/api-spec/operator.yml

@@ -112,8 +112,8 @@ components:
           type: array
           minItems: 1
           items:
-            type: integer
-            minimum: 0
+            type: string
+            pattern: ^[0-9]+:[0-9]+$
 
 security:
   - OperatorAuth: []

+ 2 - 2
distributor-node/src/api-spec/public.yml

@@ -192,8 +192,8 @@ components:
             bucketIds:
               type: array
               items:
-                type: integer
-                minimum: 0
+                type: string
+                pattern: ^[0-9]+:[0-9]+$
         - type: object
           required:
             - 'allByWorkerId'

+ 9 - 0
distributor-node/src/command-base/default.ts

@@ -6,6 +6,7 @@ import { ConfigParserService } from '../services/parsers/ConfigParserService'
 import { LoggingService } from '../services/logging'
 import { Logger } from 'winston'
 import { BagIdParserService } from '../services/parsers/BagIdParserService'
+import { BucketIdParserService } from '../services/parsers/BucketIdParserService'
 
 export const flags = {
   ...oclifFlags,
@@ -21,6 +22,7 @@ export const flags = {
     },
   }),
   bagId: oclifFlags.build({
+    char: 'b',
     parse: (value: string) => {
       const parser = new BagIdParserService(value)
       return parser.parse()
@@ -37,6 +39,13 @@ export const flags = {
     - static:wg:storage
     - dynamic:member:4`,
   }),
+  bucketId: oclifFlags.build({
+    char: 'B',
+    parse: (value: string) => {
+      return BucketIdParserService.parseBucketId(value)
+    },
+    description: `Distribution bucket ID in {familyId}:{bucketIndex} format.`,
+  }),
 }
 export default abstract class DefaultCommandBase extends Command {
   protected appConfig!: ReadonlyConfig

+ 7 - 29
distributor-node/src/commands/dev/batchUpload.ts

@@ -1,13 +1,12 @@
 import AccountsCommandBase from '../../command-base/accounts'
 import DefaultCommandBase, { flags } from '../../command-base/default'
-import { FilesApi, Configuration, TokenRequest } from '../../services/networking/storage-node/generated'
-import { u8aToHex } from '@polkadot/util'
 import FormData from 'form-data'
 import imgGen from 'js-image-generator'
 import { SubmittableExtrinsic } from '@polkadot/api/types'
 import { BagIdParserService } from '../../services/parsers/BagIdParserService'
 import axios from 'axios'
 import { ContentHash } from '../../services/crypto/ContentHash'
+import urljoin from 'url-join'
 
 async function generateRandomImage(): Promise<Buffer> {
   return new Promise((resolve, reject) => {
@@ -41,18 +40,17 @@ export default class DevBatchUpload extends AccountsCommandBase {
       char: 'C',
       required: true,
     }),
+    endpoint: flags.string({
+      char: 'e',
+      required: true,
+    }),
   }
 
   async run(): Promise<void> {
     const { api } = this
-    const { bagId, bucketId, batchSize, batchesCount } = this.parse(DevBatchUpload).flags
+    const { bagId, bucketId, batchSize, batchesCount, endpoint } = this.parse(DevBatchUpload).flags
     const sudoKey = (await api.query.sudo.key()).toHuman()
     const dataFee = await api.query.storage.dataObjectPerMegabyteFee()
-    const storageApi = new FilesApi(
-      new Configuration({
-        basePath: 'http://127.0.0.1:3333/api/v1',
-      })
-    )
 
     for (let i = 0; i < batchesCount; ++i) {
       const nextObjectId = (await api.query.storage.nextDataObjectId()).toNumber()
@@ -85,25 +83,6 @@ export default class DevBatchUpload extends AccountsCommandBase {
       await Promise.all(
         batch.map(async ([, dataObject], k) => {
           const dataObjectId = nextObjectId + k
-          const data: TokenRequest['data'] = {
-            accountId: sudoKey,
-            bagId,
-            dataObjectId,
-            memberId: 0,
-            storageBucketId: bucketId,
-          }
-          const message = JSON.stringify(data)
-          const signature = u8aToHex(this.getPair(sudoKey).sign(message))
-          const {
-            data: { token },
-          } = await storageApi.publicApiAuthTokenForUploading({
-            data,
-            signature,
-          })
-          if (!token) {
-            throw new Error('Received empty token!')
-          }
-
           const formData = new FormData()
           formData.append('dataObjectId', dataObjectId.toString())
           formData.append('storageBucketId', bucketId.toString())
@@ -113,10 +92,9 @@ export default class DevBatchUpload extends AccountsCommandBase {
           try {
             await axios({
               method: 'POST',
-              url: 'http://127.0.0.1:3333/api/v1/files',
+              url: urljoin(endpoint, 'api/v1/files'),
               data: formData,
               headers: {
-                'x-api-key': token,
                 'content-type': 'multipart/form-data',
                 ...formData.getHeaders(),
               },

+ 7 - 11
distributor-node/src/commands/leader/cancel-invitation.ts

@@ -6,14 +6,7 @@ export default class LeaderCancelInvitation extends AccountsCommandBase {
   Requires distribution working group leader permissions.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     workerId: flags.integer({
@@ -25,13 +18,16 @@ export default class LeaderCancelInvitation extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, workerId } = this.parse(LeaderCancelInvitation).flags
+    const { bucketId, workerId } = this.parse(LeaderCancelInvitation).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Canceling distribution bucket operator invitation (bucket: ${bucketId}, worker: ${workerId})...`)
+    this.log(`Canceling distribution bucket operator invitation...`, {
+      bucketId: bucketId.toHuman(),
+      workerId: workerId,
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
-      this.api.tx.storage.cancelDistributionBucketOperatorInvite(familyId, bucketId, workerId)
+      this.api.tx.storage.cancelDistributionBucketOperatorInvite(bucketId, workerId)
     )
     this.log('Invitation succesfully canceled!')
   }

+ 5 - 2
distributor-node/src/commands/leader/create-bucket.ts

@@ -24,7 +24,10 @@ export default class LeaderCreateBucket extends AccountsCommandBase {
     const { familyId, acceptingBags } = this.parse(LeaderCreateBucket).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log('Creating new distribution bucket...')
+    this.log('Creating new distribution bucket...', {
+      familyId,
+      acceptingBags,
+    })
     const result = await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
       this.api.tx.storage.createDistributionBucket(familyId, acceptingBags === 'yes')
@@ -33,6 +36,6 @@ export default class LeaderCreateBucket extends AccountsCommandBase {
 
     this.log('Bucket succesfully created!')
     const bucketId = event.data[2]
-    this.output(bucketId.toString())
+    this.output(bucketId.distribution_bucket_index.toString())
   }
 }

+ 1 - 1
distributor-node/src/commands/leader/delete-bucket-family.ts

@@ -18,7 +18,7 @@ export default class LeaderDeleteBucketFamily extends AccountsCommandBase {
     const { familyId } = this.parse(LeaderDeleteBucketFamily).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Deleting distribution bucket family (${familyId})...`)
+    this.log(`Deleting distribution bucket family...`, { familyId })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
       this.api.tx.storage.deleteDistributionBucketFamily(familyId)

+ 5 - 13
distributor-node/src/commands/leader/delete-bucket.ts

@@ -1,32 +1,24 @@
-import { flags } from '@oclif/command'
 import AccountsCommandBase from '../../command-base/accounts'
-import DefaultCommandBase from '../../command-base/default'
+import DefaultCommandBase, { flags } from '../../command-base/default'
 
 export default class LeaderDeleteBucket extends AccountsCommandBase {
   static description = `Delete distribution bucket. The bucket must have no operators. Requires distribution working group leader permissions.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     ...DefaultCommandBase.flags,
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId } = this.parse(LeaderDeleteBucket).flags
+    const { bucketId } = this.parse(LeaderDeleteBucket).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Deleting distribution bucket (${bucketId})...`)
+    this.log(`Deleting distribution bucket...`, { bucketId: bucketId.toHuman() })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
-      this.api.tx.storage.deleteDistributionBucket(familyId, bucketId)
+      this.api.tx.storage.deleteDistributionBucket(bucketId)
     )
     this.log('Bucket succesfully deleted!')
   }

+ 7 - 11
distributor-node/src/commands/leader/invite-bucket-operator.ts

@@ -7,14 +7,7 @@ export default class LeaderInviteBucketOperator extends AccountsCommandBase {
   Requires distribution working group leader permissions.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     workerId: flags.integer({
@@ -26,13 +19,16 @@ export default class LeaderInviteBucketOperator extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, workerId } = this.parse(LeaderInviteBucketOperator).flags
+    const { bucketId, workerId } = this.parse(LeaderInviteBucketOperator).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Inviting distribution bucket operator (bucket: ${bucketId}, worker: ${workerId})...`)
+    this.log(`Inviting distribution bucket operator...`, {
+      bucketId: bucketId.toHuman(),
+      workerId,
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
-      this.api.tx.storage.inviteDistributionBucketOperator(familyId, bucketId, workerId)
+      this.api.tx.storage.inviteDistributionBucketOperator(bucketId, workerId)
     )
     this.log('Bucket operator succesfully invited!')
   }

+ 7 - 11
distributor-node/src/commands/leader/remove-bucket-operator.ts

@@ -6,14 +6,7 @@ export default class LeaderRemoveBucketOperator extends AccountsCommandBase {
   Requires distribution working group leader permissions.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     workerId: flags.integer({
@@ -25,13 +18,16 @@ export default class LeaderRemoveBucketOperator extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, workerId } = this.parse(LeaderRemoveBucketOperator).flags
+    const { bucketId, workerId } = this.parse(LeaderRemoveBucketOperator).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Removing distribution bucket operator (bucket: ${bucketId}, worker: ${workerId})...`)
+    this.log(`Removing distribution bucket operator...`, {
+      bucketId: bucketId.toHuman(),
+      workerId,
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
-      this.api.tx.storage.removeDistributionBucketOperator(familyId, bucketId, workerId)
+      this.api.tx.storage.removeDistributionBucketOperator(bucketId, workerId)
     )
     this.log('Bucket operator succesfully removed!')
   }

+ 4 - 1
distributor-node/src/commands/leader/set-bucket-family-metadata.ts

@@ -73,7 +73,10 @@ export default class LeaderSetBucketFamilyMetadata extends AccountsCommandBase {
     )
     const metadata = this.parseAndValidateMetadata(metadataInput)
 
-    this.log(`Setting bucket family metadata (family: ${familyId})`, metadata)
+    this.log(`Setting bucket family metadata`, {
+      familyId,
+      metadata,
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
       this.api.tx.storage.setDistributionBucketFamilyMetadata(

+ 1 - 1
distributor-node/src/commands/leader/set-buckets-per-bag-limit.ts

@@ -18,7 +18,7 @@ export default class LeaderSetBucketsPerBagLimit extends AccountsCommandBase {
     const { limit } = this.parse(LeaderSetBucketsPerBagLimit).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Setting new buckets per bag limit (${limit})...`)
+    this.log(`Setting new buckets per bag limit...`, { limit })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
       this.api.tx.storage.updateDistributionBucketsPerBagLimit(limit)

+ 10 - 12
distributor-node/src/commands/leader/update-bag.ts

@@ -1,3 +1,5 @@
+import { createType } from '@joystream/types'
+import { DistributionBucketIndexSet } from '@joystream/types/storage'
 import AccountsCommandBase from '../../command-base/accounts'
 import DefaultCommandBase, { flags } from '../../command-base/default'
 
@@ -14,39 +16,35 @@ export default class LeaderUpdateBag extends AccountsCommandBase {
       description: 'ID of the distribution bucket family',
       required: true,
     }),
-    add: flags.integerArr({
+    add: flags.integer({
       char: 'a',
-      description: 'ID of a bucket to add to bag',
+      description: 'Index(es) (within the family) of bucket(s) to add to the bag',
       default: [],
       multiple: true,
     }),
-    remove: flags.integerArr({
+    remove: flags.integer({
       char: 'r',
-      description: 'ID of a bucket to remove from bag',
+      description: 'Index(es) (within the family) of bucket(s) to remove from the bag',
       default: [],
       multiple: true,
     }),
     ...DefaultCommandBase.flags,
   }
 
-  static examples = [`$ joystream-distributor leader:update-bag -b 1 -f 1 -a 1 -a 2 -a 3 -r 4 -r 5`]
+  static examples = [`$ joystream-distributor leader:update-bag -b 1 -f 1 -a 1 2 3 -r 4 5`]
 
   async run(): Promise<void> {
     const { bagId, familyId, add, remove } = this.parse(LeaderUpdateBag).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(
-      `Updating distribution buckets for bag ${bagId} (adding: ${add.join(',' || 'NONE')}, removing: ${
-        remove.join(',') || 'NONE'
-      })...`
-    )
+    this.log(`Updating distribution buckets for bag...`, { bagId: bagId.toHuman(), familyId, add, remove })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
       this.api.tx.storage.updateDistributionBucketsForBag(
         bagId,
         familyId,
-        this.api.createType('DistributionBucketIdSet', add),
-        this.api.createType('DistributionBucketIdSet', remove)
+        createType<DistributionBucketIndexSet, 'DistributionBucketIndexSet'>('DistributionBucketIndexSet', add),
+        createType<DistributionBucketIndexSet, 'DistributionBucketIndexSet'>('DistributionBucketIndexSet', remove)
       )
     )
     this.log('Bag succesfully updated!')

+ 4 - 11
distributor-node/src/commands/leader/update-bucket-mode.ts

@@ -5,14 +5,7 @@ export default class LeaderUpdateBucketMode extends AccountsCommandBase {
   static description = `Update distribution bucket mode ("distributing" flag). Requires distribution working group leader permissions.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     mode: flags.enum<'on' | 'off'>({
@@ -25,13 +18,13 @@ export default class LeaderUpdateBucketMode extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, mode } = this.parse(LeaderUpdateBucketMode).flags
+    const { bucketId, mode } = this.parse(LeaderUpdateBucketMode).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Updating distribution bucket mode (${bucketId}, distributing: ${mode})...`)
+    this.log(`Updating distribution bucket mode...`, { bucketId: bucketId.toHuman(), mode })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
-      this.api.tx.storage.updateDistributionBucketMode(familyId, bucketId, mode === 'on')
+      this.api.tx.storage.updateDistributionBucketMode(bucketId, mode === 'on')
     )
     this.log('Bucket mode succesfully updated!')
   }

+ 5 - 13
distributor-node/src/commands/leader/update-bucket-status.ts

@@ -1,19 +1,11 @@
-import { flags } from '@oclif/command'
 import AccountsCommandBase from '../../command-base/accounts'
-import DefaultCommandBase from '../../command-base/default'
+import DefaultCommandBase, { flags } from '../../command-base/default'
 
 export default class LeaderUpdateBucketStatus extends AccountsCommandBase {
   static description = `Update distribution bucket status ("acceptingNewBags" flag). Requires distribution working group leader permissions.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     acceptingBags: flags.enum<'yes' | 'no'>({
@@ -26,13 +18,13 @@ export default class LeaderUpdateBucketStatus extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, acceptingBags } = this.parse(LeaderUpdateBucketStatus).flags
+    const { bucketId, acceptingBags } = this.parse(LeaderUpdateBucketStatus).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Updating distribution bucket status (${bucketId}, acceptingNewBags: ${acceptingBags})...`)
+    this.log(`Updating distribution bucket status...`, { bucketId: bucketId.toHuman(), acceptingBags })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
-      this.api.tx.storage.updateDistributionBucketStatus(familyId, bucketId, acceptingBags === 'yes')
+      this.api.tx.storage.updateDistributionBucketStatus(bucketId, acceptingBags === 'yes')
     )
     this.log('Bucket status succesfully updated!')
   }

+ 6 - 2
distributor-node/src/commands/leader/update-dynamic-bag-policy.ts

@@ -27,17 +27,21 @@ export default class LeaderUpdateDynamicBagPolicy extends AccountsCommandBase {
       char: 'p',
       description: 'Key-value pair of {familyId}:{numberOfBuckets}',
       multiple: true,
+      default: [],
     }),
     ...DefaultCommandBase.flags,
   }
 
-  static examples = [`$ joystream-distributor leader:update-dynamic-bag-policy -t Member -p 1:5 -p 2:10 -p 3:5`]
+  static examples = [`$ joystream-distributor leader:update-dynamic-bag-policy -t Member -p 1:5 2:10 3:5`]
 
   async run(): Promise<void> {
     const { type, policy } = this.parse(LeaderUpdateDynamicBagPolicy).flags
     const leadKey = await this.getDistributorLeadKey()
 
-    this.log(`Updating dynamic bag policy (${type})...`)
+    this.log(`Updating dynamic bag policy...`, {
+      type,
+      policy: policy.map(([familyId, numberOfBuckets]) => ({ familyId, numberOfBuckets })),
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(leadKey),
       this.api.tx.storage.updateFamiliesInDynamicBagCreationPolicy(

+ 12 - 6
distributor-node/src/commands/node/set-buckets.ts

@@ -1,7 +1,8 @@
-import { flags } from '@oclif/command'
 import ExitCodes from '../../command-base/ExitCodes'
 import NodeCommandBase from '../../command-base/node'
+import { flags } from '../../command-base/default'
 import { SetBucketsOperation } from '../../types'
+import { BucketIdParserService } from '../../services/parsers/BucketIdParserService'
 
 export default class NodeSetBucketsCommand extends NodeCommandBase {
   static description = `Send an api request to change the set of buckets distributed by given distributor node.`
@@ -12,15 +13,20 @@ export default class NodeSetBucketsCommand extends NodeCommandBase {
       description: 'Distribute all buckets belonging to configured worker',
       exclusive: ['bucketIds'],
     }),
-    bucketIds: flags.integer({
-      char: 'B',
-      description: 'Set of bucket ids to distribute',
-      exclusive: ['all'],
+    bucketIds: flags.bucketId({
+      description:
+        'Set of bucket ids to distribute. Each bucket id should be in {familyId}:{bucketIndex} format. ' +
+        'Multiple ids can be provided, separated by space.',
       multiple: true,
     }),
     ...NodeCommandBase.flags,
   }
 
+  static examples = [
+    '$ joystream-distributor node:set-buckets --bucketIds 1:1 1:2 1:3 2:1 2:2',
+    '$ joystream-distributor node:set-buckets --all',
+  ]
+
   protected reqUrl(): string {
     return '/api/v1/set-buckets'
   }
@@ -35,7 +41,7 @@ export default class NodeSetBucketsCommand extends NodeCommandBase {
     return all
       ? {}
       : {
-          buckets: bucketIds,
+          buckets: bucketIds.map((b) => BucketIdParserService.formatBucketId(b)),
         }
   }
 }

+ 0 - 4
distributor-node/src/commands/node/shutdown.ts

@@ -10,8 +10,4 @@ export default class NodeShutdownCommand extends NodeCommandBase {
   protected reqUrl(): string {
     return '/api/v1/shutdown'
   }
-
-  protected reqBody(): Record<string, unknown> {
-    return {}
-  }
 }

+ 0 - 4
distributor-node/src/commands/node/start-public-api.ts

@@ -10,8 +10,4 @@ export default class NodeStartPublicApiCommand extends NodeCommandBase {
   protected reqUrl(): string {
     return '/api/v1/start-api'
   }
-
-  protected reqBody(): Record<string, unknown> {
-    return {}
-  }
 }

+ 0 - 4
distributor-node/src/commands/node/stop-public-api.ts

@@ -10,8 +10,4 @@ export default class NodeStopPublicApiCommand extends NodeCommandBase {
   protected reqUrl(): string {
     return '/api/v1/stop-api'
   }
-
-  protected reqBody(): Record<string, unknown> {
-    return {}
-  }
 }

+ 7 - 11
distributor-node/src/commands/operator/accept-invitation.ts

@@ -6,14 +6,7 @@ export default class OperatorAcceptInvitation extends AccountsCommandBase {
   Requires the invited distribution group worker role key.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     workerId: flags.integer({
@@ -25,13 +18,16 @@ export default class OperatorAcceptInvitation extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, workerId } = this.parse(OperatorAcceptInvitation).flags
+    const { bucketId, workerId } = this.parse(OperatorAcceptInvitation).flags
     const workerKey = await this.getDistributorWorkerRoleKey(workerId)
 
-    this.log(`Accepting distribution bucket operator invitation (bucket: ${bucketId}, worker: ${workerId})...`)
+    this.log(`Accepting distribution bucket operator invitation...`, {
+      bucketId: bucketId.toHuman(),
+      workerId,
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(workerKey),
-      this.api.tx.storage.acceptDistributionBucketInvitation(workerId, familyId, bucketId)
+      this.api.tx.storage.acceptDistributionBucketInvitation(workerId, bucketId)
     )
     this.log('Invitation succesfully accepted!')
   }

+ 7 - 11
distributor-node/src/commands/operator/set-metadata.ts

@@ -9,14 +9,7 @@ export default class OperatorSetMetadata extends AccountsCommandBase {
   Requires active distribution bucket operator worker role key.`
 
   static flags = {
-    bucketId: flags.integer({
-      char: 'B',
-      description: 'Distribution bucket id',
-      required: true,
-    }),
-    familyId: flags.integer({
-      char: 'f',
-      description: 'Distribution bucket family id',
+    bucketId: flags.bucketId({
       required: true,
     }),
     workerId: flags.integer({
@@ -38,7 +31,7 @@ export default class OperatorSetMetadata extends AccountsCommandBase {
   }
 
   async run(): Promise<void> {
-    const { bucketId, familyId, workerId, input, endpoint } = this.parse(OperatorSetMetadata).flags
+    const { bucketId, workerId, input, endpoint } = this.parse(OperatorSetMetadata).flags
     const workerKey = await this.getDistributorWorkerRoleKey(workerId)
 
     const validation = new ValidationService()
@@ -46,12 +39,15 @@ export default class OperatorSetMetadata extends AccountsCommandBase {
       ? validation.validate('OperatorMetadata', JSON.parse(fs.readFileSync(input).toString()))
       : { endpoint }
 
-    this.log(`Setting bucket operator metadata (bucket: ${bucketId}, worker: ${workerId})...`, metadata)
+    this.log(`Setting bucket operator metadata...`, {
+      bucketId: bucketId.toHuman(),
+      workerId,
+      metadata,
+    })
     await this.sendAndFollowTx(
       await this.getDecodedPair(workerKey),
       this.api.tx.storage.setDistributionOperatorMetadata(
         workerId,
-        familyId,
         bucketId,
         '0x' + Buffer.from(DistributionBucketOperatorMetadata.encode(metadata).finish()).toString('hex')
       )

+ 8 - 4
distributor-node/src/schemas/configSchema.ts

@@ -1,6 +1,7 @@
 import { JSONSchema4 } from 'json-schema'
 import winston from 'winston'
 import { MAX_CONCURRENT_RESPONSE_TIME_CHECKS } from '../services/networking/NetworkingService'
+import { BucketIdParserService } from '../services/parsers/BucketIdParserService'
 import { objectSchema } from './utils'
 
 export const bytesizeUnits = ['B', 'K', 'M', 'G', 'T']
@@ -64,7 +65,8 @@ export const configSchema: JSONSchema4 = objectSchema({
               type: 'string',
             },
             maxFiles: {
-              description: 'Maximum number of log files to store',
+              description:
+                'Maximum number of log files to store. Recommended to be at least 7 when frequency is set to `daily` and at least 24 * 7 when frequency is set to `hourly`',
               type: 'integer',
               minimum: 1,
             },
@@ -236,11 +238,13 @@ export const configSchema: JSONSchema4 = objectSchema({
     },
     buckets: {
       description:
-        'Set of bucket ids distributed by the node. If not specified, all buckets currently assigned to worker specified in `config.workerId` will be distributed.',
-      title: 'Bucket ids',
+        'Set of bucket ids distributed by the node. ' +
+        'If not specified, all buckets currently assigned to worker specified in `config.workerId` will be distributed. ' +
+        'Expected bucket id format is: {familyId}:{bucketIndex}',
+      title: "Distributed buckets' ids",
       type: 'array',
       uniqueItems: true,
-      items: { type: 'integer', minimum: 0 },
+      items: { type: 'string', pattern: BucketIdParserService.bucketIdStrRegex.source },
       minItems: 1,
     },
     workerId: {

+ 11 - 3
distributor-node/src/services/cache/StateCacheService.ts

@@ -4,7 +4,7 @@ import { LoggingService } from '../logging'
 import _ from 'lodash'
 import fs from 'fs'
 import NodeCache from 'node-cache'
-import { PendingDownload } from '../networking/PendingDownload'
+import { PendingDownload, PendingDownloadStatusType } from '../networking/PendingDownload'
 
 // LRU-SP cache parameters
 // Since size is in KB, these parameters should be enough for grouping objects of size up to 2^24 KB = 16 GB
@@ -160,10 +160,18 @@ export class StateCacheService {
     return this.memoryState.pendingDownloadsByObjectId.get(objectId)
   }
 
-  public dropPendingDownload(objectId: string): void {
+  public dropPendingDownload(
+    objectId: string,
+    status: PendingDownloadStatusType.Failed | PendingDownloadStatusType.Completed = PendingDownloadStatusType.Failed
+  ): void {
     const pendingDownload = this.memoryState.pendingDownloadsByObjectId.get(objectId)
     if (pendingDownload) {
-      pendingDownload.cleanup()
+      this.logger.debug(`Finalizing pending download`, {
+        objectId,
+        previousStatus: pendingDownload.getStatus(),
+        finalStatus: status,
+      })
+      pendingDownload.setStatus({ type: status })
       this.memoryState.pendingDownloadsByObjectId.delete(objectId)
     }
   }

+ 3 - 2
distributor-node/src/services/content/ContentService.ts

@@ -9,6 +9,7 @@ import { Readable, pipeline } from 'stream'
 import { NetworkingService } from '../networking'
 import { ContentHash } from '../crypto/ContentHash'
 import readChunk from 'read-chunk'
+import { PendingDownloadStatusType } from '../networking/PendingDownload'
 
 export const DEFAULT_CONTENT_TYPE = 'application/octet-stream'
 export const MIME_TYPE_DETECTION_CHUNK_SIZE = 4100
@@ -245,7 +246,7 @@ export class ContentService {
         hash.update(chunk)
 
         if (bytesReceived > expectedSize) {
-          dataStream.destroy(new Error('Unexpected content size: Too much data received from source!'))
+          fileStream.destroy(new Error('Unexpected content size: Too much data received from source!'))
         }
       }
 
@@ -282,7 +283,7 @@ export class ContentService {
 
         const mimeType = await this.detectMimeType(objectId)
         this.logger.info('New content accepted', { ...logMetadata })
-        this.stateCache.dropPendingDownload(objectId)
+        this.stateCache.dropPendingDownload(objectId, PendingDownloadStatusType.Completed)
         this.stateCache.newContent(objectId, expectedSize)
         this.stateCache.setContentMimeType(objectId, mimeType)
       })

+ 1 - 1
distributor-node/src/services/content/FileContinousReadStream.ts

@@ -31,7 +31,6 @@ export class FileContinousReadStream extends Readable {
   }
 
   private finish() {
-    fs.closeSync(this.fd)
     this.finished = true
   }
 
@@ -83,5 +82,6 @@ export class FileContinousReadStream extends Readable {
     if (this.interval) {
       clearInterval(this.interval)
     }
+    fs.closeSync(this.fd)
   }
 }

+ 14 - 8
distributor-node/src/services/httpApi/controllers/public.ts

@@ -15,7 +15,7 @@ import {
 import { LoggingService } from '../../logging'
 import { ContentService, DEFAULT_CONTENT_TYPE } from '../../content/ContentService'
 import proxy from 'express-http-proxy'
-import { PendingDownloadStatusDownloading, PendingDownloadStatusType } from '../../networking/PendingDownload'
+import { PendingDownload, PendingDownloadStatusType } from '../../networking/PendingDownload'
 import urljoin from 'url-join'
 
 const CACHED_MAX_AGE = 31536000
@@ -149,15 +149,15 @@ export class PublicApiController {
     objectId: string
   ) {
     const pendingDownload = this.stateCache.getPendingDownload(objectId)
-    if (!pendingDownload) {
-      throw new Error('Trying to serve pending download asset that is not pending download!')
+    if (!pendingDownload || pendingDownload.getStatus().type === PendingDownloadStatusType.Completed) {
+      throw new Error('Trying to serve pending download asset that is not in pending download state!')
     }
     const status = pendingDownload.getStatus().type
-    this.logger.verbose('Serving object in pending download state', { objectId, status })
+    this.logger.verbose('Serving object in pending download state', { objectId, currentStatus: status })
 
-    await pendingDownload.untilStatus(PendingDownloadStatusType.Downloading)
     const objectSize = pendingDownload.getObjectSize()
-    const { source, contentType } = pendingDownload.getStatus() as PendingDownloadStatusDownloading
+    const { source, contentType } = await pendingDownload.sourceData()
+
     res.setHeader('content-type', contentType || DEFAULT_CONTENT_TYPE)
     // Allow caching pendingDownload reponse only for very short period of time and requite revalidation,
     // since the data coming from the source may not be valid
@@ -169,10 +169,10 @@ export class PublicApiController {
       const range = req.range(objectSize)
       if (!range || range === -1 || range === -2 || range.length !== 1 || range.type !== 'bytes') {
         // Range is not provided / invalid - serve data from pending download file
-        return this.servePendingDownloadAssetFromFile(req, res, next, objectId, objectSize)
+        return this.servePendingDownloadAssetFromFile(req, res, next, pendingDownload, objectId, objectSize)
       } else if (range[0].start <= partiallyDownloadedContentSize) {
         // Range starts at the already downloaded part of the content - serve data from pending download file
-        return this.servePendingDownloadAssetFromFile(req, res, next, objectId, objectSize, range[0])
+        return this.servePendingDownloadAssetFromFile(req, res, next, pendingDownload, objectId, objectSize, range[0])
       }
     }
 
@@ -184,6 +184,7 @@ export class PublicApiController {
     req: express.Request<AssetRouteParams>,
     res: express.Response,
     next: express.NextFunction,
+    pendingDownload: PendingDownload,
     objectId: string,
     objectSize: number,
     range?: { start: number; end: number }
@@ -201,10 +202,15 @@ export class PublicApiController {
       res.setHeader('content-range', `bytes ${range.start}-${range.end}/${objectSize}`)
     }
     stream.pipe(res)
+    // Cleanup & infinite loading prevention
     req.on('close', () => {
       stream.destroy()
       res.end()
     })
+    pendingDownload.onError(() => {
+      stream.destroy()
+      next(new Error(`Failed to get valid data for object ${objectId}`))
+    })
   }
 
   public async assetHead(req: express.Request<AssetRouteParams>, res: express.Response): Promise<void> {

+ 1 - 0
distributor-node/src/services/logging/LoggingService.ts

@@ -81,6 +81,7 @@ export class LoggingService {
         index: 'distributor-node',
         level: config.logs.elastic.level,
         format: winston.format.combine(pauseFormat({ id: 'es' }), escFormat()),
+        retryLimit: 10,
         flushInterval: 5000,
         source: config.id,
         clientOpts: {

+ 30 - 18
distributor-node/src/services/networking/PendingDownload.ts

@@ -2,6 +2,8 @@ export enum PendingDownloadStatusType {
   Waiting = 'Waiting',
   LookingForSource = 'LookingForSource',
   Downloading = 'Downloading',
+  Failed = 'Failed',
+  Completed = 'Completed',
 }
 
 export type PendingDownloadStatusWaiting = {
@@ -18,23 +20,26 @@ export type PendingDownloadStatusDownloading = {
   contentType?: string
 }
 
+export type PendingDownloadStatusFailed = {
+  type: PendingDownloadStatusType.Failed
+}
+
+export type PendingDownloadStatusCompleted = {
+  type: PendingDownloadStatusType.Completed
+}
+
 export type PendingDownloadStatus =
   | PendingDownloadStatusWaiting
   | PendingDownloadStatusLookingForSource
   | PendingDownloadStatusDownloading
-
-export const STATUS_ORDER = [
-  PendingDownloadStatusType.Waiting,
-  PendingDownloadStatusType.LookingForSource,
-  PendingDownloadStatusType.Downloading,
-] as const
+  | PendingDownloadStatusFailed
+  | PendingDownloadStatusCompleted
 
 export class PendingDownload {
   private objectId: string
   private objectSize: number
   private status: PendingDownloadStatus = { type: PendingDownloadStatusType.Waiting }
   private statusHandlers: Map<PendingDownloadStatusType, (() => void)[]> = new Map()
-  private cleanupHandlers: (() => void)[] = []
 
   constructor(objectId: string, objectSize: number) {
     this.objectId = objectId
@@ -64,21 +69,28 @@ export class PendingDownload {
     this.statusHandlers.set(statusType, [...currentHandlers, handler])
   }
 
-  private registerCleanupHandler(handler: () => void) {
-    this.cleanupHandlers.push(handler)
+  public onError(handler: () => void): void {
+    this.registerStatusHandler(PendingDownloadStatusType.Failed, handler)
   }
 
-  untilStatus<T extends PendingDownloadStatusType>(statusType: T): Promise<void> {
+  sourceData(): Promise<PendingDownloadStatusDownloading> {
     return new Promise((resolve, reject) => {
-      if (STATUS_ORDER.indexOf(this.status.type) >= STATUS_ORDER.indexOf(statusType)) {
-        return resolve()
+      if (this.status.type === PendingDownloadStatusType.Completed) {
+        return reject(new Error(`Trying to get source data from already completed download task`))
+      }
+      if (this.status.type === PendingDownloadStatusType.Failed) {
+        return reject(new Error(`Could not download object ${this.objectId} from any source`))
+      }
+      if (this.status.type === PendingDownloadStatusType.Downloading) {
+        return resolve(this.status)
       }
-      this.registerStatusHandler(statusType, () => resolve())
-      this.registerCleanupHandler(() => reject(new Error(`Could not download object ${this.objectId} from any source`)))
-    })
-  }
 
-  cleanup(): void {
-    this.cleanupHandlers.forEach((handler) => handler())
+      this.registerStatusHandler(PendingDownloadStatusType.Downloading, () =>
+        resolve({ ...this.status } as PendingDownloadStatusDownloading)
+      )
+      this.registerStatusHandler(PendingDownloadStatusType.Failed, () =>
+        reject(new Error(`Could not download object ${this.objectId} from any source`))
+      )
+    })
   }
 }

+ 22 - 11
distributor-node/src/services/networking/query-node/generated/schema.ts

@@ -91,8 +91,6 @@ export type Channel = BaseGraphQlObject & {
   categoryId?: Maybe<Scalars['String']>
   /** Reward account where revenue is sent if set. */
   rewardAccount?: Maybe<Scalars['String']>
-  /** Destination account for the prize associated with channel deletion */
-  deletionPrizeDestAccount: Scalars['String']
   /** The title of the Channel */
   title?: Maybe<Scalars['String']>
   /** The description of a Channel */
@@ -108,7 +106,9 @@ export type Channel = BaseGraphQlObject & {
   language?: Maybe<Language>
   languageId?: Maybe<Scalars['String']>
   videos: Array<Video>
+  /** Number of the block the channel was created in */
   createdInBlock: Scalars['Int']
+  collaborators: Array<Membership>
 }
 
 export type ChannelCategoriesByNameFtsOutput = {
@@ -228,7 +228,6 @@ export type ChannelCreateInput = {
   ownerCuratorGroup?: Maybe<Scalars['ID']>
   category?: Maybe<Scalars['ID']>
   rewardAccount?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount: Scalars['String']
   title?: Maybe<Scalars['String']>
   description?: Maybe<Scalars['String']>
   coverPhoto?: Maybe<Scalars['ID']>
@@ -259,8 +258,6 @@ export enum ChannelOrderByInput {
   CategoryDesc = 'category_DESC',
   RewardAccountAsc = 'rewardAccount_ASC',
   RewardAccountDesc = 'rewardAccount_DESC',
-  DeletionPrizeDestAccountAsc = 'deletionPrizeDestAccount_ASC',
-  DeletionPrizeDestAccountDesc = 'deletionPrizeDestAccount_DESC',
   TitleAsc = 'title_ASC',
   TitleDesc = 'title_DESC',
   DescriptionAsc = 'description_ASC',
@@ -284,7 +281,6 @@ export type ChannelUpdateInput = {
   ownerCuratorGroup?: Maybe<Scalars['ID']>
   category?: Maybe<Scalars['ID']>
   rewardAccount?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount?: Maybe<Scalars['String']>
   title?: Maybe<Scalars['String']>
   description?: Maybe<Scalars['String']>
   coverPhoto?: Maybe<Scalars['ID']>
@@ -325,11 +321,6 @@ export type ChannelWhereInput = {
   rewardAccount_startsWith?: Maybe<Scalars['String']>
   rewardAccount_endsWith?: Maybe<Scalars['String']>
   rewardAccount_in?: Maybe<Array<Scalars['String']>>
-  deletionPrizeDestAccount_eq?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_contains?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_startsWith?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_endsWith?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_in?: Maybe<Array<Scalars['String']>>
   title_eq?: Maybe<Scalars['String']>
   title_contains?: Maybe<Scalars['String']>
   title_startsWith?: Maybe<Scalars['String']>
@@ -359,6 +350,9 @@ export type ChannelWhereInput = {
   videos_none?: Maybe<VideoWhereInput>
   videos_some?: Maybe<VideoWhereInput>
   videos_every?: Maybe<VideoWhereInput>
+  collaborators_none?: Maybe<MembershipWhereInput>
+  collaborators_some?: Maybe<MembershipWhereInput>
+  collaborators_every?: Maybe<MembershipWhereInput>
   AND?: Maybe<Array<ChannelWhereInput>>
   OR?: Maybe<Array<ChannelWhereInput>>
 }
@@ -512,6 +506,8 @@ export type DistributionBucket = BaseGraphQlObject & {
   version: Scalars['Int']
   family: DistributionBucketFamily
   familyId: Scalars['String']
+  /** Bucket index within the family */
+  bucketIndex: Scalars['Int']
   operators: Array<DistributionBucketOperator>
   /** Whether the bucket is accepting any new bags */
   acceptingNewBags: Scalars['Boolean']
@@ -528,6 +524,7 @@ export type DistributionBucketConnection = {
 
 export type DistributionBucketCreateInput = {
   family: Scalars['ID']
+  bucketIndex: Scalars['Float']
   acceptingNewBags: Scalars['Boolean']
   distributing: Scalars['Boolean']
 }
@@ -1028,6 +1025,8 @@ export enum DistributionBucketOrderByInput {
   DeletedAtDesc = 'deletedAt_DESC',
   FamilyAsc = 'family_ASC',
   FamilyDesc = 'family_DESC',
+  BucketIndexAsc = 'bucketIndex_ASC',
+  BucketIndexDesc = 'bucketIndex_DESC',
   AcceptingNewBagsAsc = 'acceptingNewBags_ASC',
   AcceptingNewBagsDesc = 'acceptingNewBags_DESC',
   DistributingAsc = 'distributing_ASC',
@@ -1036,6 +1035,7 @@ export enum DistributionBucketOrderByInput {
 
 export type DistributionBucketUpdateInput = {
   family?: Maybe<Scalars['ID']>
+  bucketIndex?: Maybe<Scalars['Float']>
   acceptingNewBags?: Maybe<Scalars['Boolean']>
   distributing?: Maybe<Scalars['Boolean']>
 }
@@ -1065,6 +1065,12 @@ export type DistributionBucketWhereInput = {
   deletedAt_gte?: Maybe<Scalars['DateTime']>
   deletedById_eq?: Maybe<Scalars['ID']>
   deletedById_in?: Maybe<Array<Scalars['ID']>>
+  bucketIndex_eq?: Maybe<Scalars['Int']>
+  bucketIndex_gt?: Maybe<Scalars['Int']>
+  bucketIndex_gte?: Maybe<Scalars['Int']>
+  bucketIndex_lt?: Maybe<Scalars['Int']>
+  bucketIndex_lte?: Maybe<Scalars['Int']>
+  bucketIndex_in?: Maybe<Array<Scalars['Int']>>
   acceptingNewBags_eq?: Maybe<Scalars['Boolean']>
   acceptingNewBags_in?: Maybe<Array<Scalars['Boolean']>>
   distributing_eq?: Maybe<Scalars['Boolean']>
@@ -1483,6 +1489,7 @@ export type Membership = BaseGraphQlObject & {
   /** The type of subscription the member has purchased if any. */
   subscription?: Maybe<Scalars['Int']>
   channels: Array<Channel>
+  collaboratorInChannels: Array<Channel>
 }
 
 export type MembershipConnection = {
@@ -1616,6 +1623,9 @@ export type MembershipWhereInput = {
   channels_none?: Maybe<ChannelWhereInput>
   channels_some?: Maybe<ChannelWhereInput>
   channels_every?: Maybe<ChannelWhereInput>
+  collaboratorInChannels_none?: Maybe<ChannelWhereInput>
+  collaboratorInChannels_some?: Maybe<ChannelWhereInput>
+  collaboratorInChannels_every?: Maybe<ChannelWhereInput>
   AND?: Maybe<Array<MembershipWhereInput>>
   OR?: Maybe<Array<MembershipWhereInput>>
 }
@@ -2618,6 +2628,7 @@ export type StorageBucketOperatorStatus =
 
 export type StorageBucketOperatorStatusActive = {
   workerId: Scalars['Int']
+  transactorAccountId: Scalars['String']
 }
 
 export type StorageBucketOperatorStatusInvited = {

+ 2 - 2
distributor-node/src/services/networking/storage-node/api.ts

@@ -44,7 +44,7 @@ export class StorageNodeApi {
     const [options, timeout] = this.reqConfigWithTimeout({}, this.config.limits.outboundRequestsTimeoutMs)
     this.logger.debug('Checking object availibility', { objectId })
     try {
-      await this.filesApi.publicApiGetFileHeaders(objectId, options)
+      await this.filesApi.filesApiGetFileHeaders(objectId, options)
       this.logger.debug('Data object available', { objectId })
       return true
     } catch (err) {
@@ -71,7 +71,7 @@ export class StorageNodeApi {
       options.headers.Range = `bytes=${startAt}-`
     }
     try {
-      const response: StorageNodeDownloadResponse = await this.filesApi.publicApiGetFile(objectId, options)
+      const response: StorageNodeDownloadResponse = await this.filesApi.filesApiGetFile(objectId, options)
       response.data.on('end', () => clearTimeout(timeout))
       response.data.on('error', () => clearTimeout(timeout))
       return response

+ 26 - 110
distributor-node/src/services/networking/storage-node/generated/api.ts

@@ -94,19 +94,6 @@ export interface InlineResponse201 {
    */
   id?: string
 }
-/**
- *
- * @export
- * @interface InlineResponse2011
- */
-export interface InlineResponse2011 {
-  /**
-   *
-   * @type {string}
-   * @memberof InlineResponse2011
-   */
-  token?: string
-}
 /**
  *
  * @export
@@ -189,46 +176,15 @@ export interface VersionResponse {
  */
 export const FilesApiAxiosParamCreator = function (configuration?: Configuration) {
   return {
-    /**
-     * Get auth token from a server.
-     * @param {TokenRequest} [tokenRequest] Token request parameters,
-     * @param {*} [options] Override http request option.
-     * @throws {RequiredError}
-     */
-    publicApiAuthTokenForUploading: async (tokenRequest?: TokenRequest, options: any = {}): Promise<RequestArgs> => {
-      const localVarPath = `/authToken`
-      // use dummy base URL string because the URL constructor only accepts absolute URLs.
-      const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL)
-      let baseOptions
-      if (configuration) {
-        baseOptions = configuration.baseOptions
-      }
-
-      const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options }
-      const localVarHeaderParameter = {} as any
-      const localVarQueryParameter = {} as any
-
-      localVarHeaderParameter['Content-Type'] = 'application/json'
-
-      setSearchParams(localVarUrlObj, localVarQueryParameter, options.query)
-      let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}
-      localVarRequestOptions.headers = { ...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers }
-      localVarRequestOptions.data = serializeDataIfNeeded(tokenRequest, localVarRequestOptions, configuration)
-
-      return {
-        url: toPathString(localVarUrlObj),
-        options: localVarRequestOptions,
-      }
-    },
     /**
      * Returns a media file.
      * @param {string} id Data object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    publicApiGetFile: async (id: string, options: any = {}): Promise<RequestArgs> => {
+    filesApiGetFile: async (id: string, options: any = {}): Promise<RequestArgs> => {
       // verify required parameter 'id' is not null or undefined
-      assertParamExists('publicApiGetFile', 'id', id)
+      assertParamExists('filesApiGetFile', 'id', id)
       const localVarPath = `/files/{id}`.replace(`{${'id'}}`, encodeURIComponent(String(id)))
       // use dummy base URL string because the URL constructor only accepts absolute URLs.
       const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL)
@@ -256,9 +212,9 @@ export const FilesApiAxiosParamCreator = function (configuration?: Configuration
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    publicApiGetFileHeaders: async (id: string, options: any = {}): Promise<RequestArgs> => {
+    filesApiGetFileHeaders: async (id: string, options: any = {}): Promise<RequestArgs> => {
       // verify required parameter 'id' is not null or undefined
-      assertParamExists('publicApiGetFileHeaders', 'id', id)
+      assertParamExists('filesApiGetFileHeaders', 'id', id)
       const localVarPath = `/files/{id}`.replace(`{${'id'}}`, encodeURIComponent(String(id)))
       // use dummy base URL string because the URL constructor only accepts absolute URLs.
       const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL)
@@ -289,7 +245,7 @@ export const FilesApiAxiosParamCreator = function (configuration?: Configuration
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    publicApiUploadFile: async (
+    filesApiUploadFile: async (
       dataObjectId: string,
       storageBucketId: string,
       bagId: string,
@@ -297,11 +253,11 @@ export const FilesApiAxiosParamCreator = function (configuration?: Configuration
       options: any = {}
     ): Promise<RequestArgs> => {
       // verify required parameter 'dataObjectId' is not null or undefined
-      assertParamExists('publicApiUploadFile', 'dataObjectId', dataObjectId)
+      assertParamExists('filesApiUploadFile', 'dataObjectId', dataObjectId)
       // verify required parameter 'storageBucketId' is not null or undefined
-      assertParamExists('publicApiUploadFile', 'storageBucketId', storageBucketId)
+      assertParamExists('filesApiUploadFile', 'storageBucketId', storageBucketId)
       // verify required parameter 'bagId' is not null or undefined
-      assertParamExists('publicApiUploadFile', 'bagId', bagId)
+      assertParamExists('filesApiUploadFile', 'bagId', bagId)
       const localVarPath = `/files`
       // use dummy base URL string because the URL constructor only accepts absolute URLs.
       const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL)
@@ -315,9 +271,6 @@ export const FilesApiAxiosParamCreator = function (configuration?: Configuration
       const localVarQueryParameter = {} as any
       const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)()
 
-      // authentication UploadAuth required
-      await setApiKeyToObject(localVarHeaderParameter, 'x-api-key', configuration)
-
       if (file !== undefined) {
         localVarFormParams.append('file', file as any)
       }
@@ -356,30 +309,17 @@ export const FilesApiAxiosParamCreator = function (configuration?: Configuration
 export const FilesApiFp = function (configuration?: Configuration) {
   const localVarAxiosParamCreator = FilesApiAxiosParamCreator(configuration)
   return {
-    /**
-     * Get auth token from a server.
-     * @param {TokenRequest} [tokenRequest] Token request parameters,
-     * @param {*} [options] Override http request option.
-     * @throws {RequiredError}
-     */
-    async publicApiAuthTokenForUploading(
-      tokenRequest?: TokenRequest,
-      options?: any
-    ): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<InlineResponse2011>> {
-      const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiAuthTokenForUploading(tokenRequest, options)
-      return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)
-    },
     /**
      * Returns a media file.
      * @param {string} id Data object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    async publicApiGetFile(
+    async filesApiGetFile(
       id: string,
       options?: any
     ): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<any>> {
-      const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiGetFile(id, options)
+      const localVarAxiosArgs = await localVarAxiosParamCreator.filesApiGetFile(id, options)
       return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)
     },
     /**
@@ -388,11 +328,11 @@ export const FilesApiFp = function (configuration?: Configuration) {
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    async publicApiGetFileHeaders(
+    async filesApiGetFileHeaders(
       id: string,
       options?: any
     ): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<void>> {
-      const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiGetFileHeaders(id, options)
+      const localVarAxiosArgs = await localVarAxiosParamCreator.filesApiGetFileHeaders(id, options)
       return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)
     },
     /**
@@ -404,14 +344,14 @@ export const FilesApiFp = function (configuration?: Configuration) {
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    async publicApiUploadFile(
+    async filesApiUploadFile(
       dataObjectId: string,
       storageBucketId: string,
       bagId: string,
       file?: any,
       options?: any
     ): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<InlineResponse201>> {
-      const localVarAxiosArgs = await localVarAxiosParamCreator.publicApiUploadFile(
+      const localVarAxiosArgs = await localVarAxiosParamCreator.filesApiUploadFile(
         dataObjectId,
         storageBucketId,
         bagId,
@@ -430,25 +370,14 @@ export const FilesApiFp = function (configuration?: Configuration) {
 export const FilesApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
   const localVarFp = FilesApiFp(configuration)
   return {
-    /**
-     * Get auth token from a server.
-     * @param {TokenRequest} [tokenRequest] Token request parameters,
-     * @param {*} [options] Override http request option.
-     * @throws {RequiredError}
-     */
-    publicApiAuthTokenForUploading(tokenRequest?: TokenRequest, options?: any): AxiosPromise<InlineResponse2011> {
-      return localVarFp
-        .publicApiAuthTokenForUploading(tokenRequest, options)
-        .then((request) => request(axios, basePath))
-    },
     /**
      * Returns a media file.
      * @param {string} id Data object ID
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    publicApiGetFile(id: string, options?: any): AxiosPromise<any> {
-      return localVarFp.publicApiGetFile(id, options).then((request) => request(axios, basePath))
+    filesApiGetFile(id: string, options?: any): AxiosPromise<any> {
+      return localVarFp.filesApiGetFile(id, options).then((request) => request(axios, basePath))
     },
     /**
      * Returns a media file headers.
@@ -456,8 +385,8 @@ export const FilesApiFactory = function (configuration?: Configuration, basePath
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    publicApiGetFileHeaders(id: string, options?: any): AxiosPromise<void> {
-      return localVarFp.publicApiGetFileHeaders(id, options).then((request) => request(axios, basePath))
+    filesApiGetFileHeaders(id: string, options?: any): AxiosPromise<void> {
+      return localVarFp.filesApiGetFileHeaders(id, options).then((request) => request(axios, basePath))
     },
     /**
      * Upload data
@@ -468,7 +397,7 @@ export const FilesApiFactory = function (configuration?: Configuration, basePath
      * @param {*} [options] Override http request option.
      * @throws {RequiredError}
      */
-    publicApiUploadFile(
+    filesApiUploadFile(
       dataObjectId: string,
       storageBucketId: string,
       bagId: string,
@@ -476,7 +405,7 @@ export const FilesApiFactory = function (configuration?: Configuration, basePath
       options?: any
     ): AxiosPromise<InlineResponse201> {
       return localVarFp
-        .publicApiUploadFile(dataObjectId, storageBucketId, bagId, file, options)
+        .filesApiUploadFile(dataObjectId, storageBucketId, bagId, file, options)
         .then((request) => request(axios, basePath))
     },
   }
@@ -489,19 +418,6 @@ export const FilesApiFactory = function (configuration?: Configuration, basePath
  * @extends {BaseAPI}
  */
 export class FilesApi extends BaseAPI {
-  /**
-   * Get auth token from a server.
-   * @param {TokenRequest} [tokenRequest] Token request parameters,
-   * @param {*} [options] Override http request option.
-   * @throws {RequiredError}
-   * @memberof FilesApi
-   */
-  public publicApiAuthTokenForUploading(tokenRequest?: TokenRequest, options?: any) {
-    return FilesApiFp(this.configuration)
-      .publicApiAuthTokenForUploading(tokenRequest, options)
-      .then((request) => request(this.axios, this.basePath))
-  }
-
   /**
    * Returns a media file.
    * @param {string} id Data object ID
@@ -509,9 +425,9 @@ export class FilesApi extends BaseAPI {
    * @throws {RequiredError}
    * @memberof FilesApi
    */
-  public publicApiGetFile(id: string, options?: any) {
+  public filesApiGetFile(id: string, options?: any) {
     return FilesApiFp(this.configuration)
-      .publicApiGetFile(id, options)
+      .filesApiGetFile(id, options)
       .then((request) => request(this.axios, this.basePath))
   }
 
@@ -522,9 +438,9 @@ export class FilesApi extends BaseAPI {
    * @throws {RequiredError}
    * @memberof FilesApi
    */
-  public publicApiGetFileHeaders(id: string, options?: any) {
+  public filesApiGetFileHeaders(id: string, options?: any) {
     return FilesApiFp(this.configuration)
-      .publicApiGetFileHeaders(id, options)
+      .filesApiGetFileHeaders(id, options)
       .then((request) => request(this.axios, this.basePath))
   }
 
@@ -538,9 +454,9 @@ export class FilesApi extends BaseAPI {
    * @throws {RequiredError}
    * @memberof FilesApi
    */
-  public publicApiUploadFile(dataObjectId: string, storageBucketId: string, bagId: string, file?: any, options?: any) {
+  public filesApiUploadFile(dataObjectId: string, storageBucketId: string, bagId: string, file?: any, options?: any) {
     return FilesApiFp(this.configuration)
-      .publicApiUploadFile(dataObjectId, storageBucketId, bagId, file, options)
+      .filesApiUploadFile(dataObjectId, storageBucketId, bagId, file, options)
       .then((request) => request(this.axios, this.basePath))
   }
 }

+ 21 - 0
distributor-node/src/services/parsers/BucketIdParserService.ts

@@ -0,0 +1,21 @@
+import { DistributionBucketId } from '@joystream/types/storage'
+import { createType } from '@joystream/types'
+
+export class BucketIdParserService {
+  static readonly bucketIdStrRegex = /^[0-9]+:[0-9]+$/
+
+  public static parseBucketId(bucketIdStr: string): DistributionBucketId {
+    if (!BucketIdParserService.bucketIdStrRegex.test(bucketIdStr)) {
+      throw new Error(`Invalid bucket id! Expected format: {familyId}:{bucketIndex}`)
+    }
+    const [familyId, bucketIndex] = bucketIdStr.split(':')
+    return createType<DistributionBucketId, 'DistributionBucketId'>('DistributionBucketId', {
+      distribution_bucket_family_id: parseInt(familyId),
+      distribution_bucket_index: parseInt(bucketIndex),
+    })
+  }
+
+  public static formatBucketId(bucketId: DistributionBucketId): string {
+    return `${bucketId.distribution_bucket_family_id.toString()}:${bucketId.distribution_bucket_index.toString()}`
+  }
+}

+ 4 - 4
distributor-node/src/types/generated/ConfigJson.d.ts

@@ -6,9 +6,9 @@
  */
 
 /**
- * Set of bucket ids distributed by the node. If not specified, all buckets currently assigned to worker specified in `config.workerId` will be distributed.
+ * Set of bucket ids distributed by the node. If not specified, all buckets currently assigned to worker specified in `config.workerId` will be distributed. Expected bucket id format is: {familyId}:{bucketIndex}
  */
-export type BucketIds = number[]
+export type DistributedBucketsIds = string[]
 
 /**
  * Configuration schema for distirubtor CLI and node
@@ -128,7 +128,7 @@ export interface DistributorNodeConfiguration {
    * Specifies the keys available within distributor node CLI.
    */
   keys?: (SubstrateUri | MnemonicPhrase | JSONBackupFile)[]
-  buckets?: BucketIds
+  buckets?: DistributedBucketsIds
   /**
    * ID of the node operator (distribution working group worker)
    */
@@ -144,7 +144,7 @@ export interface FileLoggingOptions {
    */
   path: string
   /**
-   * Maximum number of log files to store
+   * Maximum number of log files to store. Recommended to be at least 7 when frequency is set to `daily` and at least 24 * 7 when frequency is set to `hourly`
    */
   maxFiles?: number
   /**

+ 1 - 1
distributor-node/src/types/generated/OperatorApi.ts

@@ -33,7 +33,7 @@ export interface components {
     }
     'SetBucketsOperation': {
       /** Set of bucket ids to be distributed by the node. If not provided - all buckets assigned to currently configured worker will be distributed. */
-      'buckets'?: number[]
+      'buckets'?: string[]
     }
   }
 }

+ 1 - 5
distributor-node/src/types/generated/PublicApi.ts

@@ -22,10 +22,6 @@ export interface paths {
 
 export interface components {
   schemas: {
-    'SetConfigBody': {
-      /** Config setting path (ie. limits.storage) */
-      'path'?: string
-    }
     'ErrorResponse': {
       'type'?: string
       'message': string
@@ -40,7 +36,7 @@ export interface components {
     }
     'BucketsResponse':
       | {
-          'bucketIds': number[]
+          'bucketIds': string[]
         }
       | {
           'allByWorkerId': number

+ 22 - 13
docker-compose.yml

@@ -26,10 +26,11 @@ services:
     volumes:
       - colossus-1-data:/data
       - colossus-1-keystore:/keystore
+      - colossus-1-logs:/logs
       - type: bind
         source: .
         target: /joystream
-    working_dir: /joystream/storage-node-v2
+    working_dir: /joystream/storage-node
     ports:
       - 3333:3333
     env_file:
@@ -37,12 +38,13 @@ services:
       - .env
     environment:
       # ACCOUNT_URI overrides command line arg --accountUri
-      - ACCOUNT_URI=${COLOSSUS_1_ACCOUNT_URI}
+      - ACCOUNT_URI=${COLOSSUS_1_TRANSACTOR_URI}
     command: [
       'yarn', 'storage-node', 'server', '--worker=${COLOSSUS_1_WORKER_ID}', '--port=3333', '--uploads=/data',
       '--sync', '--syncInterval=1',
       '--queryNodeEndpoint=${COLOSSUS_QUERY_NODE_URL}',
-      '--apiUrl=${JOYSTREAM_NODE_WS}'
+      '--apiUrl=${JOYSTREAM_NODE_WS}',
+      '--logFilePath=/logs'
     ]
 
   distributor-1:
@@ -81,7 +83,7 @@ services:
     #   JOYSTREAM_DISTRIBUTOR__LOGS__FILE: "off"
     #   JOYSTREAM_DISTRIBUTOR__LOGS__ELASTIC: "off"
     #   JOYSTREAM_DISTRIBUTOR__LIMITS__STORAGE: 50G
-    #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[1,2]"
+    #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[\"1:0\",\"1:1\"]"
     command: ['yarn', 'joystream-distributor', 'start']
 
   colossus-2:
@@ -91,10 +93,11 @@ services:
     volumes:
       - colossus-2-data:/data
       - colossus-2-keystore:/keystore
+      - colossus-2-logs:/logs
       - type: bind
         source: .
         target: /joystream
-    working_dir: /joystream/storage-node-v2
+    working_dir: /joystream/storage-node
     ports:
       - 3335:3333
     env_file:
@@ -102,12 +105,13 @@ services:
       - .env
     environment:
       # ACCOUNT_URI overrides command line arg --accountUri
-      - ACCOUNT_URI=${COLOSSUS_2_ACCOUNT_URI}
+      - ACCOUNT_URI=${COLOSSUS_2_TRANSACTOR_URI}
     command: [
       'yarn', 'storage-node', 'server', '--worker=${COLOSSUS_2_WORKER_ID}', '--port=3333', '--uploads=/data',
       '--sync', '--syncInterval=1',
       '--queryNodeEndpoint=${COLOSSUS_QUERY_NODE_URL}',
-      '--apiUrl=${JOYSTREAM_NODE_WS}'
+      '--apiUrl=${JOYSTREAM_NODE_WS}',
+      '--logFilePath=/logs'
     ]
 
   distributor-2:
@@ -146,7 +150,7 @@ services:
     #   JOYSTREAM_DISTRIBUTOR__LOGS__FILE: "off"
     #   JOYSTREAM_DISTRIBUTOR__LOGS__ELASTIC: "off"
     #   JOYSTREAM_DISTRIBUTOR__LIMITS__STORAGE: 50G
-    #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[1,2]"
+    #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[\"1:0\",\"1:1\"]"
     command: ['yarn', 'joystream-distributor', 'start']
 
   db:
@@ -177,7 +181,7 @@ services:
       - DB_HOST=db
       - WARTHOG_APP_PORT=${GRAPHQL_SERVER_PORT}
     ports:
-      - '127.0.0.1:${GRAPHQL_SERVER_PORT}:${GRAPHQL_SERVER_PORT}'
+      - "${GRAPHQL_SERVER_PORT}:${GRAPHQL_SERVER_PORT}"
     depends_on:
       - db
     volumes:
@@ -247,7 +251,7 @@ services:
       - PORT=${HYDRA_INDEXER_GATEWAY_PORT}
       - DEBUG=*
     ports:
-      - '127.0.0.1:${HYDRA_INDEXER_GATEWAY_PORT}:${HYDRA_INDEXER_GATEWAY_PORT}'
+      - "${HYDRA_INDEXER_GATEWAY_PORT}:${HYDRA_INDEXER_GATEWAY_PORT}"
     depends_on:
       - db
       - redis
@@ -257,7 +261,7 @@ services:
     container_name: redis
     restart: unless-stopped
     ports:
-      - '127.0.0.1:6379:6379'
+      - "127.0.0.1:6379:6379"
 
   pioneer:
     image: nginx
@@ -266,7 +270,7 @@ services:
     volumes:
       - ./pioneer/packages/apps/build:/usr/share/nginx/html
     ports:
-      - "127.0.0.1:3000:80"
+      - "3000:80"
     environment:
       - NGINX_PORT=80
 
@@ -280,7 +284,7 @@ services:
       - PROVIDER=ws://joystream-node:9944
       - ENDOWMENT=0
     ports:
-      - "127.0.0.1:3002:3002"
+      - "3002:3002"
 
   orion:
     container_name: orion
@@ -289,6 +293,7 @@ services:
       - ORION_PORT=6116
       - ORION_MONGO_HOSTNAME=mongo
       - ORION_FEATURED_CONTENT_SECRET=password123
+      - ORION_QUERY_NODE_URL=http://graphql-server:${GRAPHQL_SERVER_PORT}/graphql
     ports:
       - "6116:6116"
     depends_on:
@@ -311,10 +316,14 @@ volumes:
     driver: local
   colossus-1-keystore:
     driver: local
+  colossus-1-logs:
+    driver: local
   colossus-2-data:
     driver: local
   colossus-2-keystore:
     driver: local
+  colossus-2-logs:
+    driver: local
   distributor-1-logs:
     driver: local
   distributor-1-cache:

+ 5 - 1
metadata-protobuf/package.json

@@ -19,6 +19,10 @@
   "author": "Joystream Contributors",
   "license": "MIT",
   "private": false,
+  "publishConfig": {
+    "access": "public",
+    "registry": "https://registry.npmjs.org"
+  },
   "scripts": {
     "build": "yarn compile && rm -rf lib && tsc",
     "compile": "yarn ts-node ./scripts/compile.ts",
@@ -27,7 +31,7 @@
     "lint": "eslint ./src --ext .ts",
     "checks": "tsc --noEmit --pretty && prettier ./ --check && yarn lint",
     "format": "prettier ./ --write",
-    "prepublish": "yarn build"
+    "prepack": "npm run build && npm run checks"
   },
   "files": [
     "lib/**/*",

+ 1 - 1
node/Cargo.toml

@@ -3,7 +3,7 @@ authors = ['Joystream contributors']
 build = 'build.rs'
 edition = '2018'
 name = 'joystream-node'
-version = '5.9.0'
+version = '5.13.0'
 default-run = "joystream-node"
 
 [[bin]]

+ 3 - 2
package.json

@@ -17,13 +17,14 @@
     "tests/network-tests",
     "cli",
     "types",
-    "storage-node-v2",
+    "storage-node",
     "distributor-node",
     "devops/eslint-config",
     "devops/prettier-config",
     "pioneer",
     "pioneer/packages/*",
     "utils/api-scripts",
+    "utils/migration-scripts",
     "query-node",
     "query-node/mappings",
     "query-node/generated/graphql-server",
@@ -49,7 +50,7 @@
     "typeorm": "0.2.34",
     "pg": "^8.4.0",
     "chalk": "^4.0.0",
-    "@joystream/warthog": "2.39.0"
+    "@joystream/warthog": "2.41.2"
   },
   "devDependencies": {
     "eslint": "^7.25.0",

+ 0 - 3
query-node/build.sh

@@ -16,9 +16,6 @@ yarn clean
 yarn codegen:noinstall
 yarn typegen # if this fails try to run this command outside of yarn workspaces
 
-# Post-codegen - fixes in autogenerated files
-yarn ts-node --project ./mappings/tsconfig.json ./mappings/scripts/postCodegen.ts
-
 # We run yarn again to ensure graphql-server dependencies are installed
 # and are inline with root workspace resolutions
 yarn

+ 2 - 3
query-node/codegen/package.json

@@ -5,10 +5,9 @@
   "author": "",
   "license": "ISC",
   "scripts": {
-    "postinstall": "cd .. && yarn workspace query-node-mappings postHydraCLIInstall"
   },
   "dependencies": {
-    "@joystream/hydra-cli": "3.1.0-alpha.13",
-    "@joystream/hydra-typegen": "3.1.0-alpha.13"
+    "@joystream/hydra-cli": "3.1.0-alpha.16",
+    "@joystream/hydra-typegen": "3.1.0-alpha.16"
   }
 }

+ 242 - 240
query-node/codegen/yarn.lock

@@ -33,9 +33,9 @@
   dependencies:
     xss "^1.0.8"
 
-"@apollographql/graphql-playground-react@https://github.com/Joystream/graphql-playground/releases/download/query-templates%401.7.27/graphql-playground-react-v1.7.27.tgz":
-  version "1.7.27"
-  resolved "https://github.com/Joystream/graphql-playground/releases/download/query-templates%401.7.27/graphql-playground-react-v1.7.27.tgz#f29765a3a182204bf2bb166a3ed10c7273637af9"
+"@apollographql/graphql-playground-react@https://github.com/Joystream/graphql-playground/releases/download/joystream%401.7.28/graphql-playground-react-v1.7.28.tgz":
+  version "1.7.28"
+  resolved "https://github.com/Joystream/graphql-playground/releases/download/joystream%401.7.28/graphql-playground-react-v1.7.28.tgz#24c9c54e14ae0ba13c894738b4b87301f5801b26"
   dependencies:
     "@types/lru-cache" "^4.1.1"
     apollo-link "^1.2.13"
@@ -205,10 +205,10 @@
   dependencies:
     regenerator-runtime "^0.13.4"
 
-"@babel/runtime@^7.14.6", "@babel/runtime@^7.15.3":
-  version "7.15.4"
-  resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.15.4.tgz#fd17d16bfdf878e6dd02d19753a39fa8a8d9c84a"
-  integrity sha512-99catp6bHCaxr4sJ/DbTGgHS4+Rs2RVd2g7iOap6SLGPDknRK9ztKNsE/Fg6QhSeh1FGE5f6gHGQmvvn3I3xhw==
+"@babel/runtime@^7.16.3":
+  version "7.16.7"
+  resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.16.7.tgz#03ff99f64106588c9c403c6ecb8c3bafbbdff1fa"
+  integrity sha512-9E9FJowqAsytyOY6LG+1KuueckRL+aQW+mKvXRXnuFGyRAyepJPmEo9vgMfXUA6O9u3IeEdv9MAkppFcaQwogQ==
   dependencies:
     regenerator-runtime "^0.13.4"
 
@@ -306,15 +306,15 @@
   resolved "https://registry.yarnpkg.com/@josephg/resolvable/-/resolvable-1.0.1.tgz#69bc4db754d79e1a2f17a650d3466e038d94a5eb"
   integrity sha512-CtzORUwWTTOTqfVtHaKRJ0I1kNQd1bpn3sUh8I3nJDVY+5/M/Oe1DnEWzPQvqq/xPIIkzzzIP7mfCoAjFRvDhg==
 
-"@joystream/hydra-cli@3.1.0-alpha.13":
-  version "3.1.0-alpha.13"
-  resolved "https://registry.yarnpkg.com/@joystream/hydra-cli/-/hydra-cli-3.1.0-alpha.13.tgz#230485159e285f303757443e173d87fbe97f2835"
-  integrity sha512-hSUaSDRTHg8Y2atiRTl810aiscIKkdSEHUVlsfMb1fD7n9vCAX7hel2oUyfPMoW6NpnQaptkOtVinaLyAr/bkg==
+"@joystream/hydra-cli@3.1.0-alpha.16":
+  version "3.1.0-alpha.16"
+  resolved "https://registry.yarnpkg.com/@joystream/hydra-cli/-/hydra-cli-3.1.0-alpha.16.tgz#3bebe326b2ae6ad96b821797ca699c581217ac45"
+  integrity sha512-2Dq5fBqJWdqE0OhvI/kBA0i3gngnDmd0AaSFhJ03LE3mKTvzhapaOyWmEgO9vqQCSopSi0wWorizzksnu2+GQw==
   dependencies:
     "@inquirer/input" "^0.0.13-alpha.0"
     "@inquirer/password" "^0.0.12-alpha.0"
     "@inquirer/select" "^0.0.13-alpha.0"
-    "@joystream/warthog" "^2.40.0"
+    "@joystream/warthog" "~2.41.2"
     "@oclif/command" "^1.5.20"
     "@oclif/config" "^1"
     "@oclif/errors" "^1.3.3"
@@ -342,15 +342,15 @@
     pluralize "^8.0.0"
     tslib "1.11.2"
 
-"@joystream/hydra-typegen@3.1.0-alpha.13":
-  version "3.1.0-alpha.13"
-  resolved "https://registry.yarnpkg.com/@joystream/hydra-typegen/-/hydra-typegen-3.1.0-alpha.13.tgz#cb19dbe4b496a1b003b6c0a663ffa961743a07ca"
-  integrity sha512-ayIYrPc7ofQEsRIKL71Hvdm8/tqFNo4s1WwjwW7xAScTqIjimgG4y/3OjQbsgXzcLB03E4UOE0ECLwqzoYDrug==
+"@joystream/hydra-typegen@3.1.0-alpha.16":
+  version "3.1.0-alpha.16"
+  resolved "https://registry.yarnpkg.com/@joystream/hydra-typegen/-/hydra-typegen-3.1.0-alpha.16.tgz#5756b714767be8f3b237dba270386113c64b1245"
+  integrity sha512-ik1iegF7qZXeumsJ8baeff5VAxgrc6+yyRIZNFgWrCRDVEnP613XNFpUIcKzuXme7BhCVeaY5ynLaQUtU6lcUw==
   dependencies:
     "@oclif/command" "^1.8.0"
     "@oclif/config" "^1"
     "@oclif/errors" "^1.3.3"
-    "@polkadot/api" "4.16.2"
+    "@polkadot/api" "5.9.1"
     debug "^4.3.1"
     handlebars "^4.7.6"
     lodash "^4.17.20"
@@ -358,12 +358,12 @@
     yaml "^1.10.0"
     yaml-validator "^3.0.0"
 
-"@joystream/warthog@^2.40.0":
-  version "2.40.0"
-  resolved "https://registry.yarnpkg.com/@joystream/warthog/-/warthog-2.40.0.tgz#6384803b0326dd43b554aac65c68838249f1119e"
-  integrity sha512-fNlN0rzCPWvt1lrBXz24UFdwMMJBrrGPB1ObruQXJXTbZeZ+OuqIJLCCw2j+JjeT/Tl569VM4/S69jA+usCfng==
+"@joystream/warthog@~2.41.2":
+  version "2.41.2"
+  resolved "https://registry.yarnpkg.com/@joystream/warthog/-/warthog-2.41.2.tgz#6d3cf5c977320d1c77be518e848e011a9699b22d"
+  integrity sha512-1w6aT5P3xiI/HaTtqJrVj4Yp1/gxG8cGTeYgzlwr3iq8J11skwE4rLCHQucHfVueyBX49AaqWrhl+wI2ACqk4Q==
   dependencies:
-    "@apollographql/graphql-playground-react" "https://github.com/Joystream/graphql-playground/releases/download/query-templates%401.7.27/graphql-playground-react-v1.7.27.tgz"
+    "@apollographql/graphql-playground-react" "https://github.com/Joystream/graphql-playground/releases/download/joystream%401.7.28/graphql-playground-react-v1.7.28.tgz"
     "@types/app-root-path" "^1.2.4"
     "@types/bn.js" "^4.11.6"
     "@types/caller" "^1.0.0"
@@ -422,7 +422,7 @@
     typedi "^0.8.0"
     typeorm "0.2.37"
     typeorm-typedi-extensions "^0.4.1"
-    typescript "^3.9.7"
+    typescript "^4.4"
 
 "@nodelib/fs.scandir@2.1.5":
   version "2.1.5"
@@ -560,226 +560,204 @@
   resolved "https://registry.yarnpkg.com/@oclif/screen/-/screen-1.0.4.tgz#b740f68609dfae8aa71c3a6cab15d816407ba493"
   integrity sha512-60CHpq+eqnTxLZQ4PGHYNwUX572hgpMHGPtTWMjdTMsAvlm69lZV/4ly6O3sAYkomo4NggGcomrDpBe34rxUqw==
 
-"@polkadot/api-derive@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/api-derive/-/api-derive-4.16.2.tgz#8ed97fec7965a1be1c5d87a3639752d5cdfdbc8a"
-  integrity sha512-xRAIGoeULK+E7uep5D0eDUN6m0KcMV4eOPkmvyfp7ndxfaf94ydfEOw+QemrnT1T/chA/qq96EYvuBe3lv5w1Q==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/api" "4.16.2"
-    "@polkadot/rpc-core" "4.16.2"
-    "@polkadot/types" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-    "@polkadot/util-crypto" "^6.10.1"
-    "@polkadot/x-rxjs" "^6.10.1"
-
-"@polkadot/api@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/api/-/api-4.16.2.tgz#361fbeb690d8b646387e9f8bec22929aca09d691"
-  integrity sha512-x+fWc7mE3ZuGxoFCTf/Tnv0z7rDTM198M9LnWUJdadyNT3QAtE+Cjgo1bCrroTnuD3whd0jhFLfLQCwz95RrwA==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/api-derive" "4.16.2"
-    "@polkadot/keyring" "^6.10.1"
-    "@polkadot/metadata" "4.16.2"
-    "@polkadot/rpc-core" "4.16.2"
-    "@polkadot/rpc-provider" "4.16.2"
-    "@polkadot/types" "4.16.2"
-    "@polkadot/types-known" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-    "@polkadot/util-crypto" "^6.10.1"
-    "@polkadot/x-rxjs" "^6.10.1"
+"@polkadot/api-derive@5.9.1":
+  version "5.9.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/api-derive/-/api-derive-5.9.1.tgz#5937069920ded1439e6672b9d6be1072421b256b"
+  integrity sha512-iMrVKnYIS3UQciDlFqww6AFyXgG+iN8UqWu8QbTuZecri3qrSmM3Nn8Jkvju3meZIacwWIMSmBcnj8+zef3rkQ==
+  dependencies:
+    "@babel/runtime" "^7.15.4"
+    "@polkadot/api" "5.9.1"
+    "@polkadot/rpc-core" "5.9.1"
+    "@polkadot/types" "5.9.1"
+    "@polkadot/util" "^7.3.1"
+    "@polkadot/util-crypto" "^7.3.1"
+    rxjs "^7.3.0"
+
+"@polkadot/api@5.9.1":
+  version "5.9.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/api/-/api-5.9.1.tgz#ce314cc34f0a47098d039db7b9036bb491c2898c"
+  integrity sha512-POpIXn/Ao+NLB0uMldXdXU44dVbRr6+6Ax77Z0R285M8Z2EiF5jl2K3SPvlowLo4SntxiCSaHQxCekYhUcJKlw==
+  dependencies:
+    "@babel/runtime" "^7.15.4"
+    "@polkadot/api-derive" "5.9.1"
+    "@polkadot/keyring" "^7.3.1"
+    "@polkadot/rpc-core" "5.9.1"
+    "@polkadot/rpc-provider" "5.9.1"
+    "@polkadot/types" "5.9.1"
+    "@polkadot/types-known" "5.9.1"
+    "@polkadot/util" "^7.3.1"
+    "@polkadot/util-crypto" "^7.3.1"
     eventemitter3 "^4.0.7"
+    rxjs "^7.3.0"
+
+"@polkadot/keyring@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/keyring/-/keyring-7.9.2.tgz#1f5bf6b7bdb5942d275aebf72d4ed98abe874fa8"
+  integrity sha512-6UGoIxhiTyISkYEZhUbCPpgVxaneIfb/DBVlHtbvaABc8Mqh1KuqcTIq19Mh9wXlBuijl25rw4lUASrE/9sBqg==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/util" "7.9.2"
+    "@polkadot/util-crypto" "7.9.2"
+
+"@polkadot/networks@7.9.2", "@polkadot/networks@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/networks/-/networks-7.9.2.tgz#03e3f3ac6bdea177517436537826055df60bcb9a"
+  integrity sha512-4obI1RdW5/7TFwbwKA9oqw8aggVZ65JAUvIFMd2YmMC2T4+NiZLnok0WhRkhZkUnqjLIHXYNwq7Ho1i39dte0g==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+
+"@polkadot/rpc-core@5.9.1":
+  version "5.9.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/rpc-core/-/rpc-core-5.9.1.tgz#68e2a2ea18c15aa15743e7487a407fdd65d1d900"
+  integrity sha512-5fXiICAcjp7ow81DnIl2Dq/xuCtJUqyjJkxe9jNHJWBluBxOouqYDb8bYPPGSdckiaVyYe0l8lA9fBUFMdEt6w==
+  dependencies:
+    "@babel/runtime" "^7.15.4"
+    "@polkadot/rpc-provider" "5.9.1"
+    "@polkadot/types" "5.9.1"
+    "@polkadot/util" "^7.3.1"
+    rxjs "^7.3.0"
 
-"@polkadot/keyring@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/keyring/-/keyring-6.11.1.tgz#2510c349c965c74cc2f108f114f1048856940604"
-  integrity sha512-rW8INl7pO6Dmaffd6Df1yAYCRWa2RmWQ0LGfJeA/M6seVIkI6J3opZqAd4q2Op+h9a7z4TESQGk8yggOEL+Csg==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/util" "6.11.1"
-    "@polkadot/util-crypto" "6.11.1"
-
-"@polkadot/metadata@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/metadata/-/metadata-4.16.2.tgz#2a90c9e6ac500ee1b176a5e0e08b64c8d7bf5458"
-  integrity sha512-wx5DwAxV8zEDQzgdeDFRRlDb89CqmgY/eKusvMgzRuLG5Z4Hu4jxQ6LnBsjVmA70BBhgs+uAuJ7mzY76OO4wDw==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/types" "4.16.2"
-    "@polkadot/types-known" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-    "@polkadot/util-crypto" "^6.10.1"
-
-"@polkadot/networks@6.11.1", "@polkadot/networks@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/networks/-/networks-6.11.1.tgz#8fd189593f6ee4f8bf64378d0aaae09e39a37d35"
-  integrity sha512-0C6Ha2kvr42se3Gevx6UhHzv3KnPHML0N73Amjwvdr4y0HLZ1Nfw+vcm5yqpz5gpiehqz97XqFrsPRauYdcksQ==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-
-"@polkadot/rpc-core@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/rpc-core/-/rpc-core-4.16.2.tgz#a839407a1c00048a10ed711ad3dd1b52f8fd20cc"
-  integrity sha512-NAMkN5rtccLL7G0aeMqxx/R38exkJ/xVNEZh9Y/okw8w0iOCnZk72ge9ABkd/SJbLxm6l+5c87cTXUK77r1zTQ==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/metadata" "4.16.2"
-    "@polkadot/rpc-provider" "4.16.2"
-    "@polkadot/types" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-    "@polkadot/x-rxjs" "^6.10.1"
-
-"@polkadot/rpc-provider@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/rpc-provider/-/rpc-provider-4.16.2.tgz#73a0b6818ec57d10b735b1e471eb7d88dd8a39db"
-  integrity sha512-aAq3mHkgHziQrZQdNuxGSrkKKksA8Kk0N8WWsW1DZOkjt7rlF3vdmCguHTPlOzO4NHmeDsGVlGGBzjOza8QNbA==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/types" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-    "@polkadot/util-crypto" "^6.10.1"
-    "@polkadot/x-fetch" "^6.10.1"
-    "@polkadot/x-global" "^6.10.1"
-    "@polkadot/x-ws" "^6.10.1"
+"@polkadot/rpc-provider@5.9.1":
+  version "5.9.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/rpc-provider/-/rpc-provider-5.9.1.tgz#8e67769c05ba71ecf4f5bc0c5a60eb9afc699167"
+  integrity sha512-9zamxfnsY7iCswXIK22W0Ji1XHLprm97js3WLw3lP2hr/uSim4Cv4y07zY/z4dDQyF0gJtjKwR27Wo9CZqdr6A==
+  dependencies:
+    "@babel/runtime" "^7.15.4"
+    "@polkadot/types" "5.9.1"
+    "@polkadot/util" "^7.3.1"
+    "@polkadot/util-crypto" "^7.3.1"
+    "@polkadot/x-fetch" "^7.3.1"
+    "@polkadot/x-global" "^7.3.1"
+    "@polkadot/x-ws" "^7.3.1"
     eventemitter3 "^4.0.7"
 
-"@polkadot/types-known@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/types-known/-/types-known-4.16.2.tgz#94e54adb3ba767342f9aed226eb4aa973520b911"
-  integrity sha512-ydeS1SnO25O//TThzUBYjthCOH3h70j1IRVQ+CPVhVbZJoMRr47hIysFTBjyxyKVTQtj20vniZV8+qq6oiWggA==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/networks" "^6.10.1"
-    "@polkadot/types" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-
-"@polkadot/types@4.16.2":
-  version "4.16.2"
-  resolved "https://registry.yarnpkg.com/@polkadot/types/-/types-4.16.2.tgz#06dfedf19a50d659863c068ba1444efbc214c302"
-  integrity sha512-JSIvVKIBhRHCswDPYMoy4TLvR9O1NT5mqyIBoLjNKur0WShLk1jVtiyKbU+2/AuCbM1nehiWagmAlWmMFNaDMw==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/metadata" "4.16.2"
-    "@polkadot/util" "^6.10.1"
-    "@polkadot/util-crypto" "^6.10.1"
-    "@polkadot/x-rxjs" "^6.10.1"
-
-"@polkadot/util-crypto@6.11.1", "@polkadot/util-crypto@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/util-crypto/-/util-crypto-6.11.1.tgz#7a36acf5c8bf52541609ec0b0b2a69af295d652e"
-  integrity sha512-fWA1Nz17FxWJslweZS4l0Uo30WXb5mYV1KEACVzM+BSZAvG5eoiOAYX6VYZjyw6/7u53XKrWQlD83iPsg3KvZw==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/networks" "6.11.1"
-    "@polkadot/util" "6.11.1"
-    "@polkadot/wasm-crypto" "^4.0.2"
-    "@polkadot/x-randomvalues" "6.11.1"
-    base-x "^3.0.8"
-    base64-js "^1.5.1"
+"@polkadot/types-known@5.9.1":
+  version "5.9.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/types-known/-/types-known-5.9.1.tgz#e52fc7b803bc7cb3f41028f88963deb4ccee40af"
+  integrity sha512-7lpLuIVGaKziQRzPMnTxyjlYy3spL6WqUg3CcEzmJUKQeUonHglOliQh8JSSz1bcP+YuNHGXK1cKsTjHb+GYxA==
+  dependencies:
+    "@babel/runtime" "^7.15.4"
+    "@polkadot/networks" "^7.3.1"
+    "@polkadot/types" "5.9.1"
+    "@polkadot/util" "^7.3.1"
+
+"@polkadot/types@5.9.1":
+  version "5.9.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/types/-/types-5.9.1.tgz#74cf4695795f2aa365ff85d3873e22c430100bc9"
+  integrity sha512-30vcSlNBxPyWYZaxKDr/BoMhfLCRKB265XxpnnNJmbdZZsL+N4Zp2mJR9/UbA6ypmJBkUjD7b1s9AYsLwUs+8w==
+  dependencies:
+    "@babel/runtime" "^7.15.4"
+    "@polkadot/util" "^7.3.1"
+    "@polkadot/util-crypto" "^7.3.1"
+    rxjs "^7.3.0"
+
+"@polkadot/util-crypto@7.9.2", "@polkadot/util-crypto@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/util-crypto/-/util-crypto-7.9.2.tgz#cdc336f92a6bc3d40c5a23734e1974fb777817f0"
+  integrity sha512-nNwqUwP44eCH9jKKcPie+IHLKkg9LMe6H7hXo91hy3AtoslnNrT51tP3uAm5yllhLvswJfnAgnlHq7ybCgqeFw==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/networks" "7.9.2"
+    "@polkadot/util" "7.9.2"
+    "@polkadot/wasm-crypto" "^4.4.1"
+    "@polkadot/x-randomvalues" "7.9.2"
     blakejs "^1.1.1"
-    bn.js "^4.11.9"
+    bn.js "^4.12.0"
     create-hash "^1.2.0"
+    ed2curve "^0.3.0"
     elliptic "^6.5.4"
     hash.js "^1.1.7"
     js-sha3 "^0.8.0"
+    micro-base "^0.9.0"
     scryptsy "^2.1.0"
     tweetnacl "^1.0.3"
     xxhashjs "^0.2.2"
 
-"@polkadot/util@6.11.1", "@polkadot/util@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/util/-/util-6.11.1.tgz#8950b038ba3e6ebfc0a7ff47feeb972e81b2626c"
-  integrity sha512-TEdCetr9rsdUfJZqQgX/vxLuV4XU8KMoKBMJdx+JuQ5EWemIdQkEtMBdL8k8udNGbgSNiYFA6rPppATeIxAScg==
+"@polkadot/util@7.9.2", "@polkadot/util@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/util/-/util-7.9.2.tgz#567ac659516d6b685ed7e796919901d92e5cbe6b"
+  integrity sha512-6ABY6ErgkCsM4C6+X+AJSY4pBGwbKlHZmUtHftaiTvbaj4XuA4nTo3GU28jw8wY0Jh2cJZJvt6/BJ5GVkm5tBA==
   dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/x-textdecoder" "6.11.1"
-    "@polkadot/x-textencoder" "6.11.1"
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/x-textdecoder" "7.9.2"
+    "@polkadot/x-textencoder" "7.9.2"
     "@types/bn.js" "^4.11.6"
-    bn.js "^4.11.9"
-    camelcase "^5.3.1"
+    bn.js "^4.12.0"
+    camelcase "^6.2.1"
     ip-regex "^4.3.0"
 
-"@polkadot/wasm-crypto-asmjs@^4.2.1":
-  version "4.2.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-4.2.1.tgz#6b7eae1c011709f8042dfd30872a5fc5e9e021c0"
-  integrity sha512-ON9EBpTNDCI3QRUmuQJIegYoAcwvxDaNNA7uwKTaEEStu8LjCIbQxbt4WbOBYWI0PoUpl4iIluXdT3XZ3V3jXA==
-  dependencies:
-    "@babel/runtime" "^7.15.3"
-
-"@polkadot/wasm-crypto-wasm@^4.2.1":
-  version "4.2.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-4.2.1.tgz#2a86f9b405e7195c3f523798c6ce4afffd19737e"
-  integrity sha512-Rs2CKiR4D+2hKzmKBfPNYxcd2E8NfLWia0av4fgicjT9YsWIWOGQUi9AtSOfazPOR9FrjxKJy+chQxAkcfKMnQ==
-  dependencies:
-    "@babel/runtime" "^7.15.3"
-
-"@polkadot/wasm-crypto@^4.0.2":
-  version "4.2.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/wasm-crypto/-/wasm-crypto-4.2.1.tgz#4d09402f5ac71a90962fb58cbe4b1707772a4fb6"
-  integrity sha512-C/A/QnemOilRTLnM0LfhPY2N/x3ZFd1ihm9sXYyuh98CxtekSVYI9h4IJ5Jrgz5imSUHgvt9oJLqJ5GbWQV/Zg==
-  dependencies:
-    "@babel/runtime" "^7.15.3"
-    "@polkadot/wasm-crypto-asmjs" "^4.2.1"
-    "@polkadot/wasm-crypto-wasm" "^4.2.1"
-
-"@polkadot/x-fetch@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-fetch/-/x-fetch-6.11.1.tgz#97d44d78ef0285eec6f6dbc4006302308ec8e24c"
-  integrity sha512-qJyLLnm+4SQEZ002UDz2wWnXbnnH84rIS0mLKZ5k82H4lMYY+PQflvzv6sbu463e/lgiEao+6zvWS6DSKv1Yog==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/x-global" "6.11.1"
-    "@types/node-fetch" "^2.5.10"
-    node-fetch "^2.6.1"
-
-"@polkadot/x-global@6.11.1", "@polkadot/x-global@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-global/-/x-global-6.11.1.tgz#c292b3825fea60e9b33fff1790323fc57de1ca5d"
-  integrity sha512-lsBK/e4KbjfieyRmnPs7bTiGbP/6EoCZz7rqD/voNS5qsJAaXgB9LR+ilubun9gK/TDpebyxgO+J19OBiQPIRw==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-
-"@polkadot/x-randomvalues@6.11.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-randomvalues/-/x-randomvalues-6.11.1.tgz#f006fa250c8e82c92ccb769976a45a8e7f3df28b"
-  integrity sha512-2MfUfGZSOkuPt7GF5OJkPDbl4yORI64SUuKM25EGrJ22o1UyoBnPOClm9eYujLMD6BfDZRM/7bQqqoLW+NuHVw==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/x-global" "6.11.1"
-
-"@polkadot/x-rxjs@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-rxjs/-/x-rxjs-6.11.1.tgz#5454708b61da70eea05708611d9148fce9372498"
-  integrity sha512-zIciEmij7SUuXXg9g/683Irx6GogxivrQS2pgBir2DI/YZq+um52+Dqg1mqsEZt74N4KMTMnzAZAP6LJOBOMww==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    rxjs "^6.6.7"
-
-"@polkadot/x-textdecoder@6.11.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-textdecoder/-/x-textdecoder-6.11.1.tgz#6cc314645681cc4639085c03b65328671c7f182c"
-  integrity sha512-DI1Ym2lyDSS/UhnTT2e9WutukevFZ0WGpzj4eotuG2BTHN3e21uYtYTt24SlyRNMrWJf5+TkZItmZeqs1nwAfQ==
+"@polkadot/wasm-crypto-asmjs@^4.5.1":
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-4.5.1.tgz#e1025a49e106db11d1187caf65f56c960ea2ad2b"
+  integrity sha512-DOdRiWhxVvmqTvp+E9z1j+Yr0zDOGsDvqnT/eNw0Dl1FVUOImsEa7FKns/urASmcxCVEE1jtUWSnij29jrORMQ==
   dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/x-global" "6.11.1"
+    "@babel/runtime" "^7.16.3"
 
-"@polkadot/x-textencoder@6.11.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-textencoder/-/x-textencoder-6.11.1.tgz#73e89da5b91954ae380042c19314c90472f59d9e"
-  integrity sha512-8ipjWdEuqFo+R4Nxsc3/WW9CSEiprX4XU91a37ZyRVC4e9R1bmvClrpXmRQLVcAQyhRvG8DKOOtWbz8xM+oXKg==
+"@polkadot/wasm-crypto-wasm@^4.5.1":
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-4.5.1.tgz#063a58ff7ddd939b7886a6a238109a8d2c416e46"
+  integrity sha512-hPwke85HxpgG/RAlwdCE8u5w7bThvWg399mlB+XjogXMxOUWBZSgq2XYbgzROUXx27inK9nStF4Pnc4zJnqs9A==
   dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/x-global" "6.11.1"
+    "@babel/runtime" "^7.16.3"
 
-"@polkadot/x-ws@^6.10.1":
-  version "6.11.1"
-  resolved "https://registry.yarnpkg.com/@polkadot/x-ws/-/x-ws-6.11.1.tgz#338adc7309e3a8e660fce8eb42f975426da48d10"
-  integrity sha512-GNu4ywrMlVi0QF6QSpKwYWMK6JRK+kadgN/zEhMoH1z5h8LwpqDLv128j5WspWbQti2teCQtridjf7t2Lzoe8Q==
-  dependencies:
-    "@babel/runtime" "^7.14.6"
-    "@polkadot/x-global" "6.11.1"
-    "@types/websocket" "^1.0.3"
+"@polkadot/wasm-crypto@^4.4.1":
+  version "4.5.1"
+  resolved "https://registry.yarnpkg.com/@polkadot/wasm-crypto/-/wasm-crypto-4.5.1.tgz#e1ac6d846a0ad8e991cec128994524183ef6e8fd"
+  integrity sha512-Cr21ais3Kq3aedIHZ3J1tjgeD/+K8FCiwEawr0oRywNBSJR8wyuZMePs4swR/6xm8wbBkpqoBVHz/UQHqqQJmA==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/wasm-crypto-asmjs" "^4.5.1"
+    "@polkadot/wasm-crypto-wasm" "^4.5.1"
+
+"@polkadot/x-fetch@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/x-fetch/-/x-fetch-7.9.2.tgz#fe943be5854f7355630388b1b5d2bb52f1a3afb2"
+  integrity sha512-zutLkFJVaLVpY3cIGYJD0AReLfAnPr2J82Ca4pvy/BxqwwGYuGLcn36A4m6nliGBP2lcH4oYY+mcCqIwoPWQUQ==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/x-global" "7.9.2"
+    "@types/node-fetch" "^2.5.12"
+    node-fetch "^2.6.6"
+
+"@polkadot/x-global@7.9.2", "@polkadot/x-global@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/x-global/-/x-global-7.9.2.tgz#b272b0a3bedaad3bcbf075ec4682abe68cf2a850"
+  integrity sha512-JX5CrGWckHf1P9xKXq4vQCAuMUbL81l2hOWX7xeP8nv4caHEpmf5T1wD1iMdQBL5PFifo6Pg0V6/oZBB+bts7A==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+
+"@polkadot/x-randomvalues@7.9.2":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/x-randomvalues/-/x-randomvalues-7.9.2.tgz#0c9bb7b48a0791c2a32e9605a31a5ce56fee621d"
+  integrity sha512-svQfG31yCXf6yVyIgP0NgCzEy7oc3Lw054ZspkaqjOivxYdrXaf5w3JSSUyM/MRjI2+nk+B/EyJoMYcfSwTfsQ==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/x-global" "7.9.2"
+
+"@polkadot/x-textdecoder@7.9.2":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/x-textdecoder/-/x-textdecoder-7.9.2.tgz#a78548e33efeb3a25f761fec9787b2bcae7f0608"
+  integrity sha512-wfwbSHXPhrOAl12QvlIOGNkMH/N/h8PId2ytIjvM/8zPPFB5Il6DWSFLtVapOGEpIFjEWbd5t8Td4pHBVXIEbg==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/x-global" "7.9.2"
+
+"@polkadot/x-textencoder@7.9.2":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/x-textencoder/-/x-textencoder-7.9.2.tgz#b32bfd6fbff8587c56452f58252a52d62bbcd5b9"
+  integrity sha512-A19wwYINuZwU2dUyQ/mMzB0ISjyfc4cISfL4zCMUAVgj7xVoXMYV2GfjNdMpA8Wsjch3su6pxLbtJ2wU03sRTQ==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/x-global" "7.9.2"
+
+"@polkadot/x-ws@^7.3.1":
+  version "7.9.2"
+  resolved "https://registry.yarnpkg.com/@polkadot/x-ws/-/x-ws-7.9.2.tgz#016df26fa829b74f8b1e31a1dcd6e34256c1231f"
+  integrity sha512-+yppMsZtvDztVOSmkqAQuhR6TfV1Axa6ergAsWb52DrfXvFP5geqtARsI6ZdDgMsE3qHSVQTcJz8vgNOr5+ztQ==
+  dependencies:
+    "@babel/runtime" "^7.16.3"
+    "@polkadot/x-global" "7.9.2"
+    "@types/websocket" "^1.0.4"
     websocket "^1.0.34"
 
 "@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2":
@@ -1164,7 +1142,7 @@
   resolved "https://registry.yarnpkg.com/@types/node-emoji/-/node-emoji-1.8.1.tgz#689cb74fdf6e84309bcafce93a135dfecd01de3f"
   integrity sha512-0fRfA90FWm6KJfw6P9QGyo0HDTCmthZ7cWaBQndITlaWLTZ6njRyKwrwpzpg+n6kBXBIGKeUHEQuBx7bphGJkA==
 
-"@types/node-fetch@^2.5.10":
+"@types/node-fetch@^2.5.12":
   version "2.5.12"
   resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.5.12.tgz#8a6f779b1d4e60b7a57fb6fd48d84fb545b9cc66"
   integrity sha512-MKgC4dlq4kKNa/mYrwpKfzQMB5X3ee5U6fSprkKpToBqBmX4nFZL9cW5jl6sWn+xpRJ7ypWh2yyqqr8UUCstSw==
@@ -1280,7 +1258,7 @@
   resolved "https://registry.yarnpkg.com/@types/validator/-/validator-13.6.3.tgz#31ca2e997bf13a0fffca30a25747d5b9f7dbb7de"
   integrity sha512-fWG42pMJOL4jKsDDZZREnXLjc3UE0R8LOJfARWYg6U966rxDT7TYejYzLnUF5cvSObGg34nd0+H2wHHU5Omdfw==
 
-"@types/websocket@^1.0.3":
+"@types/websocket@^1.0.4":
   version "1.0.4"
   resolved "https://registry.yarnpkg.com/@types/websocket/-/websocket-1.0.4.tgz#1dc497280d8049a5450854dd698ee7e6ea9e60b8"
   integrity sha512-qn1LkcFEKK8RPp459jkjzsfpbsx36BBt3oC3pITYtkoBw/aVX+EZFa5j3ThCRTNpLFvIMr5dSTD4RaMdilIOpA==
@@ -1706,14 +1684,7 @@ balanced-match@^1.0.0:
   resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
   integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
 
-base-x@^3.0.8:
-  version "3.0.8"
-  resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.8.tgz#1e1106c2537f0162e8b52474a557ebb09000018d"
-  integrity sha512-Rl/1AWP4J/zRrk54hhlxH4drNxPJXYUaKffODVI53/dAsV4t9fBxyxYKAVPU1XBHxYwOWP9h9H0hM2MVw4YfJA==
-  dependencies:
-    safe-buffer "^5.0.1"
-
-base64-js@^1.3.1, base64-js@^1.5.1:
+base64-js@^1.3.1:
   version "1.5.1"
   resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a"
   integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
@@ -1738,7 +1709,7 @@ bluebird@^3.3.5, bluebird@^3.5.5:
   resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f"
   integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==
 
-bn.js@^4.11.9:
+bn.js@^4.11.9, bn.js@^4.12.0:
   version "4.12.0"
   resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88"
   integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==
@@ -1861,11 +1832,16 @@ camelcase@^3.0.0:
   resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a"
   integrity sha1-MvxLn82vhF/N9+c7uXysImHwqwo=
 
-camelcase@^5.0.0, camelcase@^5.3.1:
+camelcase@^5.0.0:
   version "5.3.1"
   resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
   integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==
 
+camelcase@^6.2.1:
+  version "6.3.0"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a"
+  integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
+
 camelize@^1.0.0:
   version "1.0.0"
   resolved "https://registry.yarnpkg.com/camelize/-/camelize-1.0.0.tgz#164a5483e630fa4321e5af07020e531831b2609b"
@@ -2491,6 +2467,13 @@ duplexer@^0.1.1:
   resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.2.tgz#3abe43aef3835f8ae077d136ddce0f276b0400e6"
   integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==
 
+ed2curve@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/ed2curve/-/ed2curve-0.3.0.tgz#322b575152a45305429d546b071823a93129a05d"
+  integrity sha512-8w2fmmq3hv9rCrcI7g9hms2pMunQr1JINfcjwR9tAyZqhtyaMN991lF/ZfHfr5tzZQ8c7y7aBgZbjfbd0fjFwQ==
+  dependencies:
+    tweetnacl "1.x.x"
+
 ee-first@1.1.1:
   version "1.1.1"
   resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
@@ -4213,6 +4196,11 @@ methods@~1.1.2:
   resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
   integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=
 
+micro-base@^0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/micro-base/-/micro-base-0.9.0.tgz#09cfe20285bec0ea97f41dc3d10e3fba3d0266ee"
+  integrity sha512-4+tOMKidYT5nQ6/UNmYrGVO5PMcnJdfuR4NC8HK8s2H61B4itOhA9yrsjBdqGV7ecdtej36x3YSIfPLRmPrspg==
+
 micromatch@^4.0.4:
   version "4.0.4"
   resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.4.tgz#896d519dfe9db25fce94ceb7a500919bf881ebf9"
@@ -4373,6 +4361,13 @@ node-fetch@^2.6.1:
   dependencies:
     whatwg-url "^5.0.0"
 
+node-fetch@^2.6.6:
+  version "2.6.6"
+  resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.6.tgz#1751a7c01834e8e1697758732e9efb6eeadfaf89"
+  integrity sha512-Z8/6vRlTUChSdIgMa51jxQ4lrw/Jy5SOW10ObaA47/RElsAN2c5Pn8bTgFGWn/ibwzXTE8qwr1Yzx28vsecXEA==
+  dependencies:
+    whatwg-url "^5.0.0"
+
 node-fingerprint@0.0.2:
   version "0.0.2"
   resolved "https://registry.yarnpkg.com/node-fingerprint/-/node-fingerprint-0.0.2.tgz#31cbabeb71a67ae7dd5a7dc042e51c3c75868501"
@@ -5352,13 +5347,20 @@ run-parallel@^1.1.9:
   dependencies:
     queue-microtask "^1.2.2"
 
-rxjs@^6.3.3, rxjs@^6.5.1, rxjs@^6.6.7:
+rxjs@^6.3.3, rxjs@^6.5.1:
   version "6.6.7"
   resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.7.tgz#90ac018acabf491bf65044235d5863c4dab804c9"
   integrity sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==
   dependencies:
     tslib "^1.9.0"
 
+rxjs@^7.3.0:
+  version "7.5.1"
+  resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.5.1.tgz#af73df343cbcab37628197f43ea0c8256f54b157"
+  integrity sha512-KExVEeZWxMZnZhUZtsJcFwz8IvPvgu4G2Z2QyqjZQzUGr32KDYuSxrEYO4w3tFFNbfLozcrKUTvTPi+E9ywJkQ==
+  dependencies:
+    tslib "^2.1.0"
+
 safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
   version "5.1.2"
   resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
@@ -5927,7 +5929,7 @@ tunnel-agent@^0.6.0:
   dependencies:
     safe-buffer "^5.0.1"
 
-tweetnacl@^1.0.3:
+tweetnacl@1.x.x, tweetnacl@^1.0.3:
   version "1.0.3"
   resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596"
   integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==
@@ -6041,10 +6043,10 @@ typescript-tuple@^2.2.1:
   dependencies:
     typescript-compare "^0.0.2"
 
-typescript@^3.9.7:
-  version "3.9.10"
-  resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.9.10.tgz#70f3910ac7a51ed6bef79da7800690b19bf778b8"
-  integrity sha512-w6fIxVE/H1PkLKcCPsFqKE7Kv7QUwhU8qQY2MueZXWx5cPZdwFupLgKK3vntcK98BtNHZtAF4LA/yl2a7k8R6Q==
+typescript@^4.4:
+  version "4.5.4"
+  resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.5.4.tgz#a17d3a0263bf5c8723b9c52f43c5084edf13c2e8"
+  integrity sha512-VgYs2A2QIRuGphtzFV7aQJduJ2gyfTljngLzjpfW9FoYZF6xuw1W0vW9ghCKLfcWrCFxK81CSGRAvS1pn4fIUg==
 
 uc.micro@^1.0.1, uc.micro@^1.0.5:
   version "1.0.6"

+ 4 - 6
query-node/mappings/package.json

@@ -11,8 +11,6 @@
     "lint": "eslint . --quiet --ext .ts",
     "checks": "prettier ./ --check && yarn lint",
     "format": "prettier ./ --write ",
-    "postinstall": "yarn ts-node ./scripts/postInstall.ts",
-    "postHydraCLIInstall": "yarn ts-node ./scripts/postHydraCLIInstall.ts",
     "bootstrap-data:fetch:members": "yarn ts-node ./bootstrap-data/scripts/fetchMembersData.ts",
     "bootstrap-data:fetch:categories": "yarn ts-node ./bootstrap-data/scripts/fetchCategories.ts",
     "bootstrap-data:fetch:workingGroups": "yarn ts-node ./bootstrap-data/scripts/fetchWorkingGroupsData.ts",
@@ -20,11 +18,11 @@
   },
   "dependencies": {
     "@polkadot/types": "5.9.1",
-    "@joystream/hydra-common": "3.1.0-alpha.13",
-    "@joystream/hydra-db-utils": "3.1.0-alpha.13",
+    "@joystream/hydra-common": "3.1.0-alpha.16",
+    "@joystream/hydra-db-utils": "3.1.0-alpha.16",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.0",
-    "@joystream/warthog": "2.39.0",
+    "@joystream/types": "^0.17.1",
+    "@joystream/warthog": "2.41.2",
     "@apollo/client": "^3.2.5"
   },
   "devDependencies": {

+ 0 - 20
query-node/mappings/scripts/postCodegen.ts

@@ -1,20 +0,0 @@
-// A script to be executed post hydra codegen, that may include modifications to autogenerated files
-import fs from 'fs'
-import path from 'path'
-
-// TS4 useUnknownInCatchVariables is not compatible with auto-generated code inside generated/graphql-server
-const serverTsConfigPath = path.resolve(__dirname, '../../generated/graphql-server/tsconfig.json')
-const serverTsConfig = JSON.parse(fs.readFileSync(serverTsConfigPath).toString())
-serverTsConfig.compilerOptions.useUnknownInCatchVariables = false
-fs.writeFileSync(serverTsConfigPath, JSON.stringify(serverTsConfig, undefined, 2))
-
-// Type assertions are no longer needed for createTypeUnsafe in @polkadot/api 5.9.1 (and they break the build)
-// Here we're relpacing createTypeUnsafe<Assertions>(...params) to createTypeUnsafe(...params) in all generated types:
-const generatedTypesPaths = path.resolve(__dirname, '../generated/types')
-fs.readdirSync(generatedTypesPaths).map((fileName) => {
-  if (path.extname(fileName) === '.ts') {
-    const filePath = path.join(generatedTypesPaths, fileName)
-    const fileContent = fs.readFileSync(filePath).toString()
-    fs.writeFileSync(filePath, fileContent.replace(/createTypeUnsafe<[^(]+[(]/g, 'createTypeUnsafe('))
-  }
-})

Some files were not shown because too many files changed in this diff