소스 검색

Merge branch 'giza' into giza-cli

Leszek Wiesner 3 년 전
부모
커밋
5b037cdba1
100개의 변경된 파일7086개의 추가작업 그리고 5151개의 파일을 삭제
  1. 0 3
      .env
  2. 25 6
      colossus.Dockerfile
  3. 15 25
      docker-compose.yml
  4. 1 1
      package.json
  5. 39 24
      query-node/mappings/storage/index.ts
  6. 28 3
      query-node/mappings/storage/utils.ts
  7. 4 1
      query-node/schemas/storage.graphql
  8. 1 0
      storage-node-v2/.eslintignore
  9. 405 195
      storage-node-v2/README.md
  10. 42 8
      storage-node-v2/package.json
  11. 4 3
      storage-node-v2/scripts/init-dev-bucket.sh
  12. 12 0
      storage-node-v2/scripts/operatorMetadata.json
  13. 2 1
      storage-node-v2/scripts/run-all-commands.sh
  14. 96 45
      storage-node-v2/src/api-spec/openapi.yaml
  15. 40 16
      storage-node-v2/src/command-base/ApiCommandBase.ts
  16. 3 0
      storage-node-v2/src/command-base/ExitCodes.ts
  17. 4 0
      storage-node-v2/src/commands/dev/init.ts
  18. 77 0
      storage-node-v2/src/commands/dev/sync.ts
  19. 5 23
      storage-node-v2/src/commands/dev/verify-bag-id.ts
  20. 6 18
      storage-node-v2/src/commands/leader/update-bag.ts
  21. 0 8
      storage-node-v2/src/commands/leader/update-bucket-status.ts
  22. 1 1
      storage-node-v2/src/commands/leader/update-dynamic-bag-policy.ts
  23. 21 9
      storage-node-v2/src/commands/operator/set-metadata.ts
  24. 165 5
      storage-node-v2/src/commands/server.ts
  25. 84 0
      storage-node-v2/src/services/caching/localDataObjects.ts
  26. 35 0
      storage-node-v2/src/services/caching/newUploads.ts
  27. 2 2
      storage-node-v2/src/services/caching/tokenNonceKeeper.ts
  28. 4 3
      storage-node-v2/src/services/helpers/auth.ts
  29. 29 27
      storage-node-v2/src/services/helpers/bagTypes.ts
  30. 134 30
      storage-node-v2/src/services/logger.ts
  31. 20 0
      storage-node-v2/src/services/metadata/generateTypes.ts
  32. 19 0
      storage-node-v2/src/services/metadata/generated/OperatorMetadataJson.d.ts
  33. 12 0
      storage-node-v2/src/services/metadata/schemas/index.ts
  34. 29 0
      storage-node-v2/src/services/metadata/schemas/operatorMetadataSchema.ts
  35. 37 0
      storage-node-v2/src/services/metadata/validationService.ts
  36. 263 0
      storage-node-v2/src/services/queryNode/api.ts
  37. 33 0
      storage-node-v2/src/services/queryNode/codegen.yml
  38. 218 0
      storage-node-v2/src/services/queryNode/generated/queries.ts
  39. 3704 0
      storage-node-v2/src/services/queryNode/generated/schema.ts
  40. 116 0
      storage-node-v2/src/services/queryNode/queries/queries.graphql
  41. 69 14
      storage-node-v2/src/services/runtime/api.ts
  42. 7 2
      storage-node-v2/src/services/runtime/extrinsics.ts
  43. 0 34
      storage-node-v2/src/services/runtime/transactionNonceKeeper.ts
  44. 63 0
      storage-node-v2/src/services/sync/remoteStorageData.ts
  45. 225 0
      storage-node-v2/src/services/sync/storageObligations.ts
  46. 179 0
      storage-node-v2/src/services/sync/synchronizer.ts
  47. 240 0
      storage-node-v2/src/services/sync/tasks.ts
  48. 123 0
      storage-node-v2/src/services/sync/workingProcess.ts
  49. 39 27
      storage-node-v2/src/services/webApi/app.ts
  50. 149 0
      storage-node-v2/src/services/webApi/controllers/common.ts
  51. 100 186
      storage-node-v2/src/services/webApi/controllers/filesApi.ts
  52. 156 0
      storage-node-v2/src/services/webApi/controllers/stateApi.ts
  53. 1 0
      storage-node-v2/tsconfig.json
  54. 0 35
      storage-node/.eslintrc.js
  55. 0 31
      storage-node/.gitignore
  56. 0 1
      storage-node/.prettierignore
  57. 0 675
      storage-node/LICENSE.md
  58. 0 90
      storage-node/README.md
  59. 0 54
      storage-node/docs/json-signing.md
  60. 0 55
      storage-node/package.json
  61. 0 4
      storage-node/packages/cli/.eslintignore
  62. 0 40
      storage-node/packages/cli/README.md
  63. 0 14
      storage-node/packages/cli/bin/cli.js
  64. 0 56
      storage-node/packages/cli/package.json
  65. 0 126
      storage-node/packages/cli/src/cli.ts
  66. 0 93
      storage-node/packages/cli/src/commands/base.ts
  67. 0 265
      storage-node/packages/cli/src/commands/dev.ts
  68. 0 70
      storage-node/packages/cli/src/commands/download.ts
  69. 0 48
      storage-node/packages/cli/src/commands/head.ts
  70. 0 202
      storage-node/packages/cli/src/commands/upload.ts
  71. 0 1
      storage-node/packages/cli/src/test/index.ts
  72. 0 9
      storage-node/packages/cli/tsconfig.json
  73. 0 1
      storage-node/packages/colossus/.eslintrc.js
  74. 0 81
      storage-node/packages/colossus/README.md
  75. 0 33
      storage-node/packages/colossus/api-base.yml
  76. 0 340
      storage-node/packages/colossus/bin/cli.js
  77. 0 78
      storage-node/packages/colossus/lib/app.js
  78. 0 43
      storage-node/packages/colossus/lib/middleware/file_uploads.js
  79. 0 77
      storage-node/packages/colossus/lib/middleware/ipfs_proxy.js
  80. 0 61
      storage-node/packages/colossus/lib/middleware/validate_responses.js
  81. 0 120
      storage-node/packages/colossus/lib/sync.js
  82. 0 73
      storage-node/packages/colossus/package.json
  83. 0 385
      storage-node/packages/colossus/paths/asset/v0/{id}.js
  84. 0 1
      storage-node/packages/colossus/test/index.js
  85. 0 3
      storage-node/packages/helios/.gitignore
  86. 0 9
      storage-node/packages/helios/README.md
  87. 0 128
      storage-node/packages/helios/bin/cli.js
  88. 0 21
      storage-node/packages/helios/package.json
  89. 0 1
      storage-node/packages/helios/test/index.js
  90. 0 1
      storage-node/packages/runtime-api/.eslintrc.js
  91. 0 3
      storage-node/packages/runtime-api/.gitignore
  92. 0 6
      storage-node/packages/runtime-api/README.md
  93. 0 210
      storage-node/packages/runtime-api/assets.js
  94. 0 79
      storage-node/packages/runtime-api/balances.js
  95. 0 246
      storage-node/packages/runtime-api/identities.js
  96. 0 379
      storage-node/packages/runtime-api/index.js
  97. 0 58
      storage-node/packages/runtime-api/package.json
  98. 0 33
      storage-node/packages/runtime-api/system.js
  99. 0 48
      storage-node/packages/runtime-api/test/assets.js
  100. 0 44
      storage-node/packages/runtime-api/test/balances.js

+ 0 - 3
.env

@@ -14,9 +14,6 @@ DB_PORT=5432
 DEBUG=index-builder:*
 TYPEORM_LOGGING=error
 
-DEBUG=index-builder:*
-TYPEORM_LOGGING=error
-
 ###########################
 #    Indexer options      #
 ###########################

+ 25 - 6
colossus.Dockerfile

@@ -2,15 +2,34 @@ FROM --platform=linux/x86-64 node:14 as builder
 
 WORKDIR /joystream
 COPY . /joystream
-RUN  rm -fr /joystream/pioneer
-
-EXPOSE 3001
 
 RUN yarn --frozen-lockfile
 
 RUN yarn workspace @joystream/types build
-RUN yarn workspace storage-node build
+RUN yarn workspace @joystream/metadata-protobuf build
+RUN yarn workspace storage-node-v2 build
+
+# Use these volumes to persist uploading data and to pass the keyfile.
+VOLUME ["/data", "/keystore"]
+
+# Required variables
+ENV WS_PROVIDER_ENDPOINT_URI=ws://not-set
+ENV COLOSSUS_PORT=3333
+ENV QUERY_NODE_ENDPOINT=http://not-set/graphql
+ENV WORKER_ID=not-set
+# - set external key file using the `/keystore` volume
+ENV ACCOUNT_KEYFILE=
+ENV ACCOUNT_PWD=
+# Optional variables
+ENV SYNC_INTERVAL=1
+ENV ELASTIC_SEARCH_ENDPOINT=
+# warn, error, debug, info
+ENV ELASTIC_LOG_LEVEL=debug
+# - overrides account key file
+ENV ACCOUNT_URI=
 
-RUN yarn
+# Colossus node port
+EXPOSE ${COLOSSUS_PORT}
 
-ENTRYPOINT yarn colossus --dev --ws-provider $WS_PROVIDER_ENDPOINT_URI
+WORKDIR /joystream/storage-node-v2
+ENTRYPOINT yarn storage-node server --queryNodeEndpoint ${QUERY_NODE_ENDPOINT} --port ${COLOSSUS_PORT} --uploads /data --worker ${WORKER_ID} --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=${SYNC_INTERVAL} --keyFile=${ACCOUNT_KEYFILE} --elasticSearchEndpoint=${ELASTIC_SEARCH_ENDPOINT}

+ 15 - 25
docker-compose.yml

@@ -18,39 +18,29 @@ services:
       - "127.0.0.1:9944:9944"
       - "127.0.0.1:9933:9933"
 
-  ipfs:
-    image: ipfs/go-ipfs:latest
-    ports:
-      - '127.0.0.1:5001:5001'
-      - '127.0.0.1:8080:8080'
-    volumes:
-      - /data/ipfs
-    entrypoint: ''
-    command: |
-      /bin/sh -c "
-        set -e
-        /usr/local/bin/start_ipfs config profile apply lowpower
-        /usr/local/bin/start_ipfs config --json Gateway.PublicGateways '{\"localhost\": null }'
-        /sbin/tini -- /usr/local/bin/start_ipfs daemon --migrate=true
-      "
-
   colossus:
-    image: joystream/apps
+    image: joystream/colossus:latest
     restart: on-failure
-    depends_on:
-      - "ipfs"
     build:
       context: .
-      dockerfile: apps.Dockerfile
+      dockerfile: colossus.Dockerfile
+    depends_on:
+      - graphql-server-mnt
+    volumes:
+      - /data
+      - /keystore
+    ports:
+      - '127.0.0.1:3333:3333'
     env_file:
       # relative to working directory where docker-compose was run from
       - .env
-    ports:
-      - '127.0.0.1:3001:3001'
-    command: colossus --dev --ws-provider ${WS_PROVIDER_ENDPOINT_URI} --ipfs-host ipfs
     environment:
-      - DEBUG=*
-
+      - COLOSSUS_PORT=3333
+      - QUERY_NODE_ENDPOINT=http://graphql-server-mnt:${GRAPHQL_SERVER_PORT}/graphql
+      - WORKER_ID=0
+      - ACCOUNT_URI=//Alice
+      # enable ElasticSearch server
+      # - ELASTIC_SEARCH_ENDPOINT=host.docker.internal:9200
   db:
     image: postgres:12
     restart: always

+ 1 - 1
package.json

@@ -69,7 +69,7 @@
     "yarn": "^1.22.0"
   },
   "volta": {
-    "node": "14.16.1",
+    "node": "14.18.0",
     "yarn": "1.22.4"
   }
 }

+ 39 - 24
query-node/mappings/storage/index.ts

@@ -38,6 +38,9 @@ import {
   getDynamicBag,
   getDistributionBucketFamilyWithMetadata,
   getDistributionBucketOperatorWithMetadata,
+  distributionBucketId,
+  distributionOperatorId,
+  distributionBucketIdByFamilyAndIndex,
 } from './utils'
 
 // STORAGE BUCKETS
@@ -201,7 +204,7 @@ export async function storage_DynamicBagCreated({ event, store }: EventContext &
     owner: getDynamicBagOwner(bagId),
     storageBuckets: Array.from(storageBucketIdsSet).map((id) => new StorageBucket({ id: id.toString() })),
     distributionBuckets: Array.from(distributionBucketIdsSet).map(
-      (id) => new DistributionBucket({ id: id.toString() })
+      (id) => new DistributionBucket({ id: distributionBucketId(id) })
     ),
   })
   await store.save<StorageBag>(storageBag)
@@ -295,7 +298,8 @@ export async function storage_DistributionBucketCreated({ event, store }: EventC
 
   const family = await getById(store, DistributionBucketFamily, familyId.toString())
   const bucket = new DistributionBucket({
-    id: bucketId.toString(),
+    id: distributionBucketId(bucketId),
+    bucketIndex: bucketId.distribution_bucket_index.toNumber(),
     acceptingNewBags: acceptingNewBags.valueOf(),
     distributing: true, // Runtime default
     family,
@@ -308,28 +312,30 @@ export async function storage_DistributionBucketStatusUpdated({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [, bucketId, acceptingNewBags] = new Storage.DistributionBucketStatusUpdatedEvent(event).params
+  const [bucketId, acceptingNewBags] = new Storage.DistributionBucketStatusUpdatedEvent(event).params
 
-  const bucket = await getById(store, DistributionBucket, bucketId.toString())
+  const bucket = await getById(store, DistributionBucket, distributionBucketId(bucketId))
   bucket.acceptingNewBags = acceptingNewBags.valueOf()
 
   await store.save<DistributionBucket>(bucket)
 }
 
 export async function storage_DistributionBucketDeleted({ event, store }: EventContext & StoreContext): Promise<void> {
-  const [, bucketId] = new Storage.DistributionBucketDeletedEvent(event).params
+  const [bucketId] = new Storage.DistributionBucketDeletedEvent(event).params
   // TODO: Cascade remove on db level (would require changes in Hydra / comitting autogenerated files)
   const distributionBucket = await store.get(DistributionBucket, {
-    where: { id: bucketId.toString() },
+    where: { id: distributionBucketId(bucketId) },
     relations: ['bags', 'bags.distributionBuckets'],
   })
   if (!distributionBucket) {
-    inconsistentState(`Distribution bucket by id ${bucketId.toString()} not found!`)
+    inconsistentState(`Distribution bucket by id ${distributionBucketId(bucketId)} not found!`)
   }
   // Remove relations
   await Promise.all(
     (distributionBucket.bags || []).map((bag) => {
-      bag.distributionBuckets = (bag.distributionBuckets || []).filter((bucket) => bucket.id !== bucketId.toString())
+      bag.distributionBuckets = (bag.distributionBuckets || []).filter(
+        (bucket) => bucket.id !== distributionBucketId(bucketId)
+      )
       return store.save<StorageBag>(bag)
     })
   )
@@ -340,11 +346,20 @@ export async function storage_DistributionBucketsUpdatedForBag({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [bagId, , addedBucketsSet, removedBucketsSet] = new Storage.DistributionBucketsUpdatedForBagEvent(event).params
+  const [
+    bagId,
+    familyId,
+    addedBucketsIndices,
+    removedBucketsIndices,
+  ] = new Storage.DistributionBucketsUpdatedForBagEvent(event).params
   // Get or create bag
   const storageBag = await getBag(store, bagId, ['distributionBuckets'])
-  const removedBucketsIds = Array.from(removedBucketsSet).map((id) => id.toString())
-  const addedBucketsIds = Array.from(addedBucketsSet).map((id) => id.toString())
+  const removedBucketsIds = Array.from(removedBucketsIndices).map((bucketIndex) =>
+    distributionBucketIdByFamilyAndIndex(familyId, bucketIndex)
+  )
+  const addedBucketsIds = Array.from(addedBucketsIndices).map((bucketIndex) =>
+    distributionBucketIdByFamilyAndIndex(familyId, bucketIndex)
+  )
   storageBag.distributionBuckets = (storageBag.distributionBuckets || [])
     .filter((bucket) => !removedBucketsIds.includes(bucket.id))
     .concat(addedBucketsIds.map((id) => new DistributionBucket({ id })))
@@ -355,9 +370,9 @@ export async function storage_DistributionBucketModeUpdated({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [, bucketId, distributing] = new Storage.DistributionBucketModeUpdatedEvent(event).params
+  const [bucketId, distributing] = new Storage.DistributionBucketModeUpdatedEvent(event).params
 
-  const bucket = await getById(store, DistributionBucket, bucketId.toString())
+  const bucket = await getById(store, DistributionBucket, distributionBucketId(bucketId))
   bucket.distributing = distributing.valueOf()
 
   await store.save<DistributionBucket>(bucket)
@@ -367,11 +382,11 @@ export async function storage_DistributionBucketOperatorInvited({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [, bucketId, workerId] = new Storage.DistributionBucketOperatorInvitedEvent(event).params
+  const [bucketId, workerId] = new Storage.DistributionBucketOperatorInvitedEvent(event).params
 
-  const bucket = await getById(store, DistributionBucket, bucketId.toString())
+  const bucket = await getById(store, DistributionBucket, distributionBucketId(bucketId))
   const invitedOperator = new DistributionBucketOperator({
-    id: `${bucketId}-${workerId}`,
+    id: distributionOperatorId(bucketId, workerId),
     distributionBucket: bucket,
     status: DistributionBucketOperatorStatus.INVITED,
     workerId: workerId.toNumber(),
@@ -384,9 +399,9 @@ export async function storage_DistributionBucketInvitationCancelled({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [, bucketId, workerId] = new Storage.DistributionBucketOperatorInvitedEvent(event).params
+  const [bucketId, workerId] = new Storage.DistributionBucketOperatorInvitedEvent(event).params
 
-  const invitedOperator = await getById(store, DistributionBucketOperator, `${bucketId}-${workerId}`)
+  const invitedOperator = await getById(store, DistributionBucketOperator, distributionOperatorId(bucketId, workerId))
 
   await store.remove<DistributionBucketOperator>(invitedOperator)
 }
@@ -395,9 +410,9 @@ export async function storage_DistributionBucketInvitationAccepted({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [workerId, , bucketId] = new Storage.DistributionBucketInvitationAcceptedEvent(event).params
+  const [workerId, bucketId] = new Storage.DistributionBucketInvitationAcceptedEvent(event).params
 
-  const invitedOperator = await getById(store, DistributionBucketOperator, `${bucketId}-${workerId}`)
+  const invitedOperator = await getById(store, DistributionBucketOperator, distributionOperatorId(bucketId, workerId))
   invitedOperator.status = DistributionBucketOperatorStatus.ACTIVE
 
   await store.save<DistributionBucketOperator>(invitedOperator)
@@ -407,9 +422,9 @@ export async function storage_DistributionBucketMetadataSet({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [workerId, , bucketId, metadataBytes] = new Storage.DistributionBucketMetadataSetEvent(event).params
+  const [workerId, bucketId, metadataBytes] = new Storage.DistributionBucketMetadataSetEvent(event).params
 
-  const operator = await getDistributionBucketOperatorWithMetadata(store, `${bucketId}-${workerId}`)
+  const operator = await getDistributionBucketOperatorWithMetadata(store, distributionOperatorId(bucketId, workerId))
   operator.metadata = await processDistributionOperatorMetadata(store, operator.metadata, metadataBytes)
 
   await store.save<DistributionBucketOperator>(operator)
@@ -419,11 +434,11 @@ export async function storage_DistributionBucketOperatorRemoved({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [, bucketId, workerId] = new Storage.DistributionBucketOperatorRemovedEvent(event).params
+  const [bucketId, workerId] = new Storage.DistributionBucketOperatorRemovedEvent(event).params
 
   // TODO: Cascade remove on db level (would require changes in Hydra / comitting autogenerated files)
 
-  const operator = await getDistributionBucketOperatorWithMetadata(store, `${bucketId}-${workerId}`)
+  const operator = await getDistributionBucketOperatorWithMetadata(store, distributionOperatorId(bucketId, workerId))
   await store.remove<DistributionBucketOperator>(operator)
   if (operator.metadata) {
     await store.remove<DistributionBucketOperatorMetadata>(operator.metadata)

+ 28 - 3
query-node/mappings/storage/utils.ts

@@ -23,7 +23,16 @@ import { unsetAssetRelations } from '../content/utils'
 
 import { BTreeSet } from '@polkadot/types'
 import _ from 'lodash'
-import { DataObjectId, BagId, DynamicBagId, StaticBagId } from '@joystream/types/augment/all'
+import {
+  DataObjectId,
+  BagId,
+  DynamicBagId,
+  StaticBagId,
+  DistributionBucketId,
+  DistributionBucketFamilyId,
+  DistributionBucketIndex,
+  WorkerId,
+} from '@joystream/types/augment/all'
 import { Balance } from '@polkadot/types/interfaces'
 
 export async function getDataObjectsInBag(
@@ -60,7 +69,7 @@ export function getStaticBagOwner(bagId: StaticBagId): typeof StorageBagOwner {
   }
 }
 
-export function getDynamicBagOwner(bagId: DynamicBagId) {
+export function getDynamicBagOwner(bagId: DynamicBagId): typeof StorageBagOwner {
   if (bagId.isChannel) {
     const owner = new StorageBagOwnerChannel()
     owner.channelId = bagId.asChannel.toNumber()
@@ -94,7 +103,7 @@ export function getDynamicBagId(bagId: DynamicBagId): string {
   }
 }
 
-export function getBagId(bagId: BagId) {
+export function getBagId(bagId: BagId): string {
   return bagId.isStatic ? getStaticBagId(bagId.asStatic) : getDynamicBagId(bagId.asDynamic)
 }
 
@@ -239,3 +248,19 @@ export async function removeDataObject(store: DatabaseManager, object: StorageDa
   await unsetAssetRelations(store, object)
   await store.remove<StorageDataObject>(object)
 }
+
+export function distributionBucketId(runtimeBucketId: DistributionBucketId): string {
+  const { distribution_bucket_family_id: familyId, distribution_bucket_index: bucketIndex } = runtimeBucketId
+  return distributionBucketIdByFamilyAndIndex(familyId, bucketIndex)
+}
+
+export function distributionBucketIdByFamilyAndIndex(
+  familyId: DistributionBucketFamilyId,
+  bucketIndex: DistributionBucketIndex
+): string {
+  return `${familyId.toString()}:${bucketIndex.toString()}`
+}
+
+export function distributionOperatorId(bucketId: DistributionBucketId, workerId: WorkerId): string {
+  return `${distributionBucketId(bucketId)}-${workerId.toString()}`
+}

+ 4 - 1
query-node/schemas/storage.graphql

@@ -273,12 +273,15 @@ type DistributionBucketOperator @entity {
 }
 
 type DistributionBucket @entity {
-  "Runtime bucket id"
+  "Runtime bucket id in {familyId}:{bucketIndex} format"
   id: ID!
 
   "Distribution family the bucket is part of"
   family: DistributionBucketFamily!
 
+  "Bucket index within the family"
+  bucketIndex: Int!
+
   "Distribution bucket operators (either active or invited)"
   operators: [DistributionBucketOperator!] @derivedFrom(field: "distributionBucket")
 

+ 1 - 0
storage-node-v2/.eslintignore

@@ -1,2 +1,3 @@
 /lib
 .eslintrc.js
+**/generated/*

+ 405 - 195
storage-node-v2/README.md

@@ -1,35 +1,146 @@
-storage-node-v2
+Colossus v2
 ===============
 
-Jostream storage subsystem.
+Joystream storage subsystem.
 
 [![oclif](https://img.shields.io/badge/cli-oclif-brightgreen.svg)](https://oclif.io)
-[![Version](https://img.shields.io/npm/v/storage-node-v2.svg)](https://npmjs.org/package/storage-node-v2)
-[![Downloads/week](https://img.shields.io/npm/dw/storage-node-v2.svg)](https://npmjs.org/package/storage-node-v2)
-[![License](https://img.shields.io/npm/l/storage-node-v2.svg)](https://github.com/shamil-gadelshin/storage-node-v2/blob/master/package.json)
+![License](https://img.shields.io/github/license/Joystream/joystream)
 
 <!-- toc -->
+* [Description](#description)
+  * [API](#api)
+  * [CLI](#cli)
+  * [Metadata](#metadata)
+  * [Data](#data)
+    * [Uploading](#uploading)
+    * [Synchronization](#synchronization)
+    * [Distribution](#distribution)
+  * [Comments](#comments)
+* [Installation](#installation)
 * [Usage](#usage)
-* [Commands](#commands)
+  * [Prerequisites](#prerequisites)
+  * [CLI Command](#cli-command)
+  * [Docker](#docker)
+* [CLI Commands](#cli-commands)
 <!-- tocstop -->
+
+# Description
+
+The main responsibility of Colossus is handling media data for users. The data could be images, audio, or video files.
+Colossus receives uploads and saves files in the local folder, registers uploads in the blockchain, and later serves files 
+to Argus nodes (distribution nodes). Colossus instances spread the data using peer-to-peer synchronization.
+Data management is blockchain-based, it relies on the concepts of buckets, bags, data objects.
+The full description of the blockchain smart contracts could be found [here](https://github.com/Joystream/joystream/issues/2224).
+
+### API
+
+Colossus provides REST API for its clients and other Colossus instances. It's based on the OpenAPI Specification v3. Here is the complete [spec](./src/api-spec/openapi.yaml) (work in progress).
+
+API endpoints:
+- files
+    - get - get the data file by its ID
+    - head - get the data file headers by its ID
+    - post - upload a file
+- state
+    - version - Colossus version and system environment
+    - all data objects IDs
+    - data objects IDs for bag
+    - data statistics - total data folder size and data object number
+### CLI
+
+There is a command-line interface to manage Storage Working Group operations like create a bucket or change storage settings. Full description could be found [below](#cli-commands).
+
+There are several groups of command:
+- *leader* - manages the Storage Working group in the blockchain. Requires leader privileges.
+- *operator* - Storage Provider group - it manages data object uploads and storage provider metadata(endpoint is the most important). Requires active Storage Working group membership.
+- *dev* - development support commands. Requires development blockchain setup with Alice account.
+- *ungroupped* - server and help commands. `server` starts Colossus and `help` shows the full command list.
+
+### Metadata
+The storage provider should provide *metadata* for Colossus instance to be discoverable by other Colossus or
+Argus (distributor node) instances. At the very least an endpoint should be registered in the blockchain.
+For some complex scenarios, Colossus should provide its geolocation.
+
+Metadata could be registered using [operator:set-metadata](#storage-node-operatorset-metadata) command.
+A simple endpoint could be set using the `--endpoint` flag of the command. Complex metadata requires JSON file ([example](./scripts/operatorMetadata.json)).
+JSON file format based on the *protobuf* format described [here](../metadata-protobuf/proto/Storage.proto).
+
+### Data 
+#### Uploading
+
+Colossus accepts files using its API. The data must be uploaded using POST http method with `multipart/form-data`.
+Simplified process (file uploading):
+   - accepting the data upload in the temp folder
+   - data hash & size verification
+   - moving the data to the data folder
+   - registering the data object as `accepted` in the blockchain
+    
+#### Synchronization
+
+Several instances of Colossus should contain the data replica in order to provide some degree of reliability.
+When some Colossus instance receives the data upload it marks the related data object as `accepted`.
+Other instances that have the same obligations to store the data (they serve storage buckets assigned to the same bag)
+will eventually load this data object from the initial receiver (or some other node that already downloaded a new
+data object from the initial receiver) using REST API.
+
+#### Distribution
+
+The actual data distribution (serving to end-users) is done via Argus - the distributor node. It gets data from Colossus using the same `get` endpoint on a single data object basis.
+
+### Comments
+- Colossus relies on the [Query Node (Hydra)](https://www.joystream.org/hydra/) to get the blockchain data in a structured form.
+- Using Colossus as a functioning Storage Provider requires providing [account URI or key file and password](https://wiki.polkadot.network/docs/learn-accounts) as well as active `WorkerId` from the Storage Working group.
+
+# Installation
+```shell
+# Ubuntu Linux
+
+# Install packages required for installation
+apt update
+apt install git curl
+
+# Clone the code repository
+git clone https://github.com/Joystream/joystream
+cd joystream
+
+# Install volta
+curl https://get.volta.sh | bash
+bash
+
+# Install project dependencies and build it
+yarn
+yarn workspace @joystream/types build
+yarn workspace @joystream/metadata-protobuf build
+yarn workspace storage-node-v2 build
+
+# Verify installation
+cd storage-node-v2
+yarn storage-node version
+```
 # Usage
-<!-- usage -->
+
 ```sh-session
-$ npm install -g storage-node-v2
-$ storage-node COMMAND
-running command...
-$ storage-node (-v|--version|version)
-storage-node-v2/0.1.0 darwin-x64 node-v14.17.0
-$ storage-node --help [COMMAND]
-USAGE
-  $ storage-node COMMAND
-...
+$ yarn storage-node server --apiUrl ws://localhost:9944  -w 0 --accountUri //Alice -q localhost:8081 -o 3333 -d ~/uploads --sync
 ```
-<!-- usagestop -->
-# Commands
+
+### Prerequisites
+- accountURI or keyfile and password
+- workerId from the Storage working group that matches with the account above
+- Joystream node websocket endpoint URL
+- QueryNode URL
+- (optional) ElasticSearch URL
+- created directory for data uploading
+
+### CLI command
+ Full command description could be find [below](#storage-node-server).
+### Docker
+There is also an option to run Colossus as [Docker container](../colossus.Dockerfile).
+
+# CLI Commands
 <!-- commands -->
 * [`storage-node dev:init`](#storage-node-devinit)
 * [`storage-node dev:multihash`](#storage-node-devmultihash)
+* [`storage-node dev:sync`](#storage-node-devsync)
 * [`storage-node dev:upload`](#storage-node-devupload)
 * [`storage-node dev:verify-bag-id`](#storage-node-devverify-bag-id)
 * [`storage-node help [COMMAND]`](#storage-node-help-command)
@@ -60,14 +171,17 @@ USAGE
   $ storage-node dev:init
 
 OPTIONS
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/dev/init.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/init.ts)_
+_See code: [src/commands/dev/init.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/dev/init.ts)_
 
 ## `storage-node dev:multihash`
 
@@ -82,7 +196,36 @@ OPTIONS
   -h, --help       show CLI help
 ```
 
-_See code: [src/commands/dev/multihash.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/multihash.ts)_
+_See code: [src/commands/dev/multihash.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/dev/multihash.ts)_
+
+## `storage-node dev:sync`
+
+Synchronizes the data - it fixes the differences between local data folder and worker ID obligations from the runtime.
+
+```
+USAGE
+  $ storage-node dev:sync
+
+OPTIONS
+  -d, --uploads=uploads                              (required) Data uploading directory (absolute path).
+  -h, --help                                         show CLI help
+
+  -o, --dataSourceOperatorUrl=dataSourceOperatorUrl  [default: http://localhost:3333] Storage node url base (e.g.:
+                                                     http://some.com:3333) to get data from.
+
+  -p, --syncWorkersNumber=syncWorkersNumber          [default: 20] Sync workers number (max async operations in
+                                                     progress).
+
+  -q, --queryNodeEndpoint=queryNodeEndpoint          [default: http://localhost:8081/graphql] Query node endpoint (e.g.:
+                                                     http://some.com:8081/graphql)
+
+  -t, --syncWorkersTimeout=syncWorkersTimeout        [default: 30] Asset downloading timeout for the syncronization (in
+                                                     minutes).
+
+  -w, --workerId=workerId                            (required) Storage node operator worker ID.
+```
+
+_See code: [src/commands/dev/sync.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/dev/sync.ts)_
 
 ## `storage-node dev:upload`
 
@@ -93,16 +236,19 @@ USAGE
   $ storage-node dev:upload
 
 OPTIONS
-  -c, --cid=cid            (required) Data object IPFS content ID.
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -s, --size=size          (required) Data object size.
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -c, --cid=cid                (required) Data object IPFS content ID.
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -s, --size=size              (required) Data object size.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/dev/upload.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/upload.ts)_
+_See code: [src/commands/dev/upload.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/dev/upload.ts)_
 
 ## `storage-node dev:verify-bag-id`
 
@@ -117,33 +263,20 @@ OPTIONS
       show CLI help
 
   -i, --bagId=bagId
-      (required) 
-             Bag ID. Format: {bag_type}:{sub_type}:{id}.
-             - Bag types: 'static', 'dynamic'
-             - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
-             - Id: 
-               - absent for 'static:council'
-               - working group name for 'static:wg'
-               - integer for 'dynamic:member' and 'dynamic:channel'
-             Examples:
-             - static:council
-             - static:wg:storage
-             - dynamic:member:4
-
-  -k, --keyfile=keyfile
-      Key file for the account. Mandatory in non-dev environment.
-
-  -m, --dev
-      Use development mode
-
-  -p, --password=password
-      Key file password (optional).
-
-  -u, --apiUrl=apiUrl
-      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+      (required) Bag ID. Format: {bag_type}:{sub_type}:{id}.
+           - Bag types: 'static', 'dynamic'
+           - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
+           - Id:
+             - absent for 'static:council'
+             - working group name for 'static:wg'
+             - integer for 'dynamic:member' and 'dynamic:channel'
+           Examples:
+           - static:council
+           - static:wg:storage
+           - dynamic:member:4
 ```
 
-_See code: [src/commands/dev/verify-bag-id.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/dev/verify-bag-id.ts)_
+_See code: [src/commands/dev/verify-bag-id.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/dev/verify-bag-id.ts)_
 
 ## `storage-node help [COMMAND]`
 
@@ -160,7 +293,7 @@ OPTIONS
   --all  see all commands in CLI
 ```
 
-_See code: [@oclif/plugin-help](https://github.com/oclif/plugin-help/blob/v3.0.1/src/commands/help.ts)_
+_See code: [@oclif/plugin-help](https://github.com/oclif/plugin-help/blob/v3.2.2/src/commands/help.ts)_
 
 ## `storage-node leader:cancel-invite`
 
@@ -171,15 +304,18 @@ USAGE
   $ storage-node leader:cancel-invite
 
 OPTIONS
-  -h, --help               show CLI help
-  -i, --bucketId=bucketId  (required) Storage bucket ID
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -i, --bucketId=bucketId      (required) Storage bucket ID
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/cancel-invite.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/cancel-invite.ts)_
+_See code: [src/commands/leader/cancel-invite.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/cancel-invite.ts)_
 
 ## `storage-node leader:create-bucket`
 
@@ -190,18 +326,21 @@ USAGE
   $ storage-node leader:create-bucket
 
 OPTIONS
-  -a, --allow              Accepts new bags
-  -h, --help               show CLI help
-  -i, --invited=invited    Invited storage operator ID (storage WG worker ID)
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -n, --number=number      Storage bucket max total objects number
-  -p, --password=password  Key file password (optional).
-  -s, --size=size          Storage bucket max total objects size
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -a, --allow                  Accepts new bags
+  -h, --help                   show CLI help
+  -i, --invited=invited        Invited storage operator ID (storage WG worker ID)
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -n, --number=number          Storage bucket max total objects number
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -s, --size=size              Storage bucket max total objects size
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/create-bucket.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/create-bucket.ts)_
+_See code: [src/commands/leader/create-bucket.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/create-bucket.ts)_
 
 ## `storage-node leader:delete-bucket`
 
@@ -212,15 +351,18 @@ USAGE
   $ storage-node leader:delete-bucket
 
 OPTIONS
-  -h, --help               show CLI help
-  -i, --bucketId=bucketId  (required) Storage bucket ID
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -i, --bucketId=bucketId      (required) Storage bucket ID
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/delete-bucket.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/delete-bucket.ts)_
+_See code: [src/commands/leader/delete-bucket.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/delete-bucket.ts)_
 
 ## `storage-node leader:invite-operator`
 
@@ -233,14 +375,17 @@ USAGE
 OPTIONS
   -h, --help                   show CLI help
   -i, --bucketId=bucketId      (required) Storage bucket ID
-  -k, --keyfile=keyfile        Key file for the account. Mandatory in non-dev environment.
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
   -m, --dev                    Use development mode
-  -p, --password=password      Key file password (optional).
-  -u, --apiUrl=apiUrl          Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
   -w, --operatorId=operatorId  (required) Storage bucket operator ID (storage group worker ID)
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/invite-operator.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/invite-operator.ts)_
+_See code: [src/commands/leader/invite-operator.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/invite-operator.ts)_
 
 ## `storage-node leader:remove-operator`
 
@@ -251,15 +396,18 @@ USAGE
   $ storage-node leader:remove-operator
 
 OPTIONS
-  -h, --help               show CLI help
-  -i, --bucketId=bucketId  (required) Storage bucket ID
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -i, --bucketId=bucketId      (required) Storage bucket ID
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/remove-operator.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/remove-operator.ts)_
+_See code: [src/commands/leader/remove-operator.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/remove-operator.ts)_
 
 ## `storage-node leader:set-bucket-limits`
 
@@ -270,17 +418,20 @@ USAGE
   $ storage-node leader:set-bucket-limits
 
 OPTIONS
-  -h, --help               show CLI help
-  -i, --bucketId=bucketId  (required) Storage bucket ID
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -o, --objects=objects    (required) New 'voucher object number limit' value
-  -p, --password=password  Key file password (optional).
-  -s, --size=size          (required) New 'voucher object size limit' value
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -i, --bucketId=bucketId      (required) Storage bucket ID
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -o, --objects=objects        (required) New 'voucher object number limit' value
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -s, --size=size              (required) New 'voucher object size limit' value
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/set-bucket-limits.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/set-bucket-limits.ts)_
+_See code: [src/commands/leader/set-bucket-limits.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/set-bucket-limits.ts)_
 
 ## `storage-node leader:set-global-uploading-status`
 
@@ -291,15 +442,18 @@ USAGE
   $ storage-node leader:set-global-uploading-status
 
 OPTIONS
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -s, --set=(on|off)       (required) Sets global uploading block (on/off).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -s, --set=(on|off)           (required) Sets global uploading block (on/off).
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/set-global-uploading-status.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/set-global-uploading-status.ts)_
+_See code: [src/commands/leader/set-global-uploading-status.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/set-global-uploading-status.ts)_
 
 ## `storage-node leader:update-bag`
 
@@ -317,36 +471,39 @@ OPTIONS
       show CLI help
 
   -i, --bagId=bagId
-      (required) 
-             Bag ID. Format: {bag_type}:{sub_type}:{id}.
-             - Bag types: 'static', 'dynamic'
-             - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
-             - Id: 
-               - absent for 'static:council'
-               - working group name for 'static:wg'
-               - integer for 'dynamic:member' and 'dynamic:channel'
-             Examples:
-             - static:council
-             - static:wg:storage
-             - dynamic:member:4
-
-  -k, --keyfile=keyfile
+      (required) Bag ID. Format: {bag_type}:{sub_type}:{id}.
+           - Bag types: 'static', 'dynamic'
+           - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
+           - Id:
+             - absent for 'static:council'
+             - working group name for 'static:wg'
+             - integer for 'dynamic:member' and 'dynamic:channel'
+           Examples:
+           - static:council
+           - static:wg:storage
+           - dynamic:member:4
+
+  -k, --keyFile=keyFile
       Key file for the account. Mandatory in non-dev environment.
 
   -m, --dev
       Use development mode
 
   -p, --password=password
-      Key file password (optional).
+      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
 
   -r, --remove=remove
       [default: ] ID of a bucket to remove from bag
 
   -u, --apiUrl=apiUrl
-      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+      [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri
+      Account URI (optional). Has a priority over the keyFile and password flags. Could be overriden by ACCOUNT_URI 
+      environment variable.
 ```
 
-_See code: [src/commands/leader/update-bag.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bag.ts)_
+_See code: [src/commands/leader/update-bag.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-bag.ts)_
 
 ## `storage-node leader:update-bag-limit`
 
@@ -357,15 +514,18 @@ USAGE
   $ storage-node leader:update-bag-limit
 
 OPTIONS
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -l, --limit=limit        (required) New StorageBucketsPerBagLimit value
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -l, --limit=limit            (required) New StorageBucketsPerBagLimit value
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/update-bag-limit.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bag-limit.ts)_
+_See code: [src/commands/leader/update-bag-limit.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-bag-limit.ts)_
 
 ## `storage-node leader:update-blacklist`
 
@@ -376,16 +536,19 @@ USAGE
   $ storage-node leader:update-blacklist
 
 OPTIONS
-  -a, --add=add            [default: ] Content ID to add
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -r, --remove=remove      [default: ] Content ID to remove
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -a, --add=add                [default: ] Content ID to add
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -r, --remove=remove          [default: ] Content ID to remove
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/update-blacklist.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-blacklist.ts)_
+_See code: [src/commands/leader/update-blacklist.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-blacklist.ts)_
 
 ## `storage-node leader:update-bucket-status`
 
@@ -396,18 +559,19 @@ USAGE
   $ storage-node leader:update-bucket-status
 
 OPTIONS
-  -d, --disable            Disables accepting new bags.
-  -e, --enable             Enables accepting new bags (default).
-  -h, --help               show CLI help
-  -i, --bucketId=bucketId  (required) Storage bucket ID
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -s, --set=(on|off)       (required) Sets 'accepting new bags' parameter for the bucket (on/off).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -i, --bucketId=bucketId      (required) Storage bucket ID
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -s, --set=(on|off)           (required) Sets 'accepting new bags' parameter for the bucket (on/off).
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/update-bucket-status.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-bucket-status.ts)_
+_See code: [src/commands/leader/update-bucket-status.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-bucket-status.ts)_
 
 ## `storage-node leader:update-data-fee`
 
@@ -418,15 +582,18 @@ USAGE
   $ storage-node leader:update-data-fee
 
 OPTIONS
-  -f, --fee=fee            (required) New data size fee
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -f, --fee=fee                (required) New data size fee
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/update-data-fee.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-data-fee.ts)_
+_See code: [src/commands/leader/update-data-fee.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-data-fee.ts)_
 
 ## `storage-node leader:update-dynamic-bag-policy`
 
@@ -438,15 +605,18 @@ USAGE
 
 OPTIONS
   -h, --help                      show CLI help
-  -k, --keyfile=keyfile           Key file for the account. Mandatory in non-dev environment.
+  -k, --keyFile=keyFile           Key file for the account. Mandatory in non-dev environment.
   -m, --dev                       Use development mode
   -n, --number=number             (required) New storage buckets number
-  -p, --password=password         Key file password (optional).
+  -p, --password=password         Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
   -t, --bagType=(Channel|Member)  (required) Dynamic bag type (Channel, Member).
-  -u, --apiUrl=apiUrl             Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -u, --apiUrl=apiUrl             [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri     Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                                  overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-dynamic-bag-policy.ts)_
+_See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-dynamic-bag-policy.ts)_
 
 ## `storage-node leader:update-voucher-limits`
 
@@ -457,16 +627,19 @@ USAGE
   $ storage-node leader:update-voucher-limits
 
 OPTIONS
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -o, --objects=objects    (required) New 'max voucher object number limit' value
-  -p, --password=password  Key file password (optional).
-  -s, --size=size          (required) New 'max voucher object size limit' value
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -h, --help                   show CLI help
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -o, --objects=objects        (required) New 'max voucher object number limit' value
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -s, --size=size              (required) New 'max voucher object size limit' value
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/leader/update-voucher-limits.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/leader/update-voucher-limits.ts)_
+_See code: [src/commands/leader/update-voucher-limits.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/leader/update-voucher-limits.ts)_
 
 ## `storage-node operator:accept-invitation`
 
@@ -477,37 +650,44 @@ USAGE
   $ storage-node operator:accept-invitation
 
 OPTIONS
-  -h, --help               show CLI help
-  -i, --bucketId=bucketId  (required) Storage bucket ID
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
-  -w, --workerId=workerId  (required) Storage operator worker ID
+  -h, --help                   show CLI help
+  -i, --bucketId=bucketId      (required) Storage bucket ID
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
+  -m, --dev                    Use development mode
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
+  -w, --workerId=workerId      (required) Storage operator worker ID
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/operator/accept-invitation.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/operator/accept-invitation.ts)_
+_See code: [src/commands/operator/accept-invitation.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/operator/accept-invitation.ts)_
 
 ## `storage-node operator:set-metadata`
 
-Accept pending storage bucket invitation.
+Set metadata for the storage bucket.
 
 ```
 USAGE
   $ storage-node operator:set-metadata
 
 OPTIONS
+  -e, --endpoint=endpoint      Root distribution node endpoint
   -h, --help                   show CLI help
   -i, --bucketId=bucketId      (required) Storage bucket ID
-  -k, --keyfile=keyfile        Key file for the account. Mandatory in non-dev environment.
+  -j, --jsonFile=jsonFile      Path to JSON metadata file
+  -k, --keyFile=keyFile        Key file for the account. Mandatory in non-dev environment.
   -m, --dev                    Use development mode
-  -m, --metadata=metadata      Storage bucket operator metadata
-  -p, --password=password      Key file password (optional).
-  -u, --apiUrl=apiUrl          Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
+  -p, --password=password      Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.
+  -u, --apiUrl=apiUrl          [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment.
   -w, --operatorId=operatorId  (required) Storage bucket operator ID (storage group worker ID)
+
+  -y, --accountUri=accountUri  Account URI (optional). Has a priority over the keyFile and password flags. Could be
+                               overriden by ACCOUNT_URI environment variable.
 ```
 
-_See code: [src/commands/operator/set-metadata.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/operator/set-metadata.ts)_
+_See code: [src/commands/operator/set-metadata.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/operator/set-metadata.ts)_
 
 ## `storage-node server`
 
@@ -518,16 +698,46 @@ USAGE
   $ storage-node server
 
 OPTIONS
-  -d, --uploads=uploads    (required) Data uploading directory (absolute path).
-  -h, --help               show CLI help
-  -k, --keyfile=keyfile    Key file for the account. Mandatory in non-dev environment.
-  -m, --dev                Use development mode
-  -o, --port=port          (required) Server port.
-  -p, --password=password  Key file password (optional).
-  -u, --apiUrl=apiUrl      Runtime API URL. Mandatory in non-dev environment. Default is ws://localhost:9944
-  -w, --worker=worker      (required) Storage provider worker ID
-```
+  -d, --uploads=uploads                              (required) Data uploading directory (absolute path).
+
+  -e, --elasticSearchEndpoint=elasticSearchEndpoint  Elasticsearch endpoint (e.g.: http://some.com:8081).
+                                                     Log level could be set using the ELASTIC_LOG_LEVEL enviroment
+                                                     variable.
+                                                     Supported values: warn, error, debug, info. Default:debug
+
+  -h, --help                                         show CLI help
 
-_See code: [src/commands/server.ts](https://github.com/Joystream/joystream/blob/v0.1.0/src/commands/server.ts)_
+  -i, --syncInterval=syncInterval                    [default: 1] Interval between synchronizations (in minutes)
+
+  -k, --keyFile=keyFile                              Key file for the account. Mandatory in non-dev environment.
+
+  -m, --dev                                          Use development mode
+
+  -o, --port=port                                    (required) Server port.
+
+  -p, --password=password                            Key file password (optional). Could be overriden by ACCOUNT_PWD
+                                                     environment variable.
+
+  -q, --queryNodeEndpoint=queryNodeEndpoint          (required) [default: http://localhost:8081/graphql] Query node
+                                                     endpoint (e.g.: http://some.com:8081/graphql)
+
+  -r, --syncWorkersNumber=syncWorkersNumber          [default: 20] Sync workers number (max async operations in
+                                                     progress).
+
+  -s, --sync                                         Enable data synchronization.
+
+  -t, --syncWorkersTimeout=syncWorkersTimeout        [default: 30] Asset downloading timeout for the syncronization (in
+                                                     minutes).
+
+  -u, --apiUrl=apiUrl                                [default: ws://localhost:9944] Runtime API URL. Mandatory in
+                                                     non-dev environment.
+
+  -w, --worker=worker                                (required) Storage provider worker ID
+
+  -y, --accountUri=accountUri                        Account URI (optional). Has a priority over the keyFile and
+                                                     password flags. Could be overriden by ACCOUNT_URI environment
+                                                     variable.
+```
 
+_See code: [src/commands/server.ts](https://github.com/Joystream/joystream/blob/v2.0.0/src/commands/server.ts)_
 <!-- commandsstop -->

+ 42 - 8
storage-node-v2/package.json

@@ -1,13 +1,16 @@
 {
   "name": "storage-node-v2",
   "description": "Joystream storage subsystem.",
-  "version": "0.1.0",
+  "version": "2.0.0",
   "author": "Joystream contributors",
   "bin": {
     "storage-node": "./bin/run"
   },
   "bugs": "https://github.com/Joystream/joystream/issues",
   "dependencies": {
+    "@apollo/client": "^3.3.21",
+    "@elastic/ecs-winston-format": "^1.3.1",
+    "@joystream/metadata-protobuf": "^1.0.0",
     "@joystream/types": "^0.17.0",
     "@oclif/command": "^1",
     "@oclif/config": "^1",
@@ -16,34 +19,57 @@
     "@types/base64url": "^2.0.0",
     "@types/express": "4.17.13",
     "@types/file-type": "^10.9.1",
+    "@types/lodash": "^4.14.171",
     "@types/multer": "^1.4.5",
     "@types/node-cache": "^4.2.5",
+    "@types/promise-timeout": "^1.3.0",
     "@types/read-chunk": "^3.1.0",
+    "@types/rimraf": "^3.0.2",
     "@types/send": "^0.17.0",
+    "@types/superagent": "^4.1.12",
+    "@types/url-join": "^4.0.1",
+    "@types/uuid": "^8.3.1",
     "@types/winston": "^2.4.4",
+    "ajv": "^7",
     "await-lock": "^2.1.0",
     "base64url": "^3.0.1",
     "blake3": "^2.1.4",
+    "cross-fetch": "^3.1.4",
     "express": "4.17.1",
-    "express-openapi-validator": "^4.12.4",
+    "express-openapi-validator": "4.12.4",
     "express-winston": "^4.1.0",
+    "fast-folder-size": "^1.4.0",
+    "fast-safe-stringify": "^2.1.1",
     "file-type": "^16.5.0",
     "lodash": "^4.17.21",
     "multihashes": "^4.0.2",
     "node-cache": "^5.1.2",
     "openapi-editor": "^0.3.0",
+    "promise-timeout": "^1.3.0",
     "read-chunk": "^3.2.0",
+    "rimraf": "^3.0.2",
     "send": "^0.17.1",
+    "sleep-promise": "^9.1.0",
+    "superagent": "^6.1.0",
     "tslib": "^1",
-    "winston": "^3.3.3"
+    "url-join": "^4.0.1",
+    "uuid": "^8.3.2",
+    "winston": "^3.3.3",
+    "winston-elasticsearch": "^0.15.8"
   },
   "devDependencies": {
+    "@graphql-codegen/cli": "^1.21.4",
+    "@graphql-codegen/import-types-preset": "^1.18.1",
+    "@graphql-codegen/typescript": "^1.22.0",
+    "@graphql-codegen/typescript-document-nodes": "^1.17.11",
+    "@graphql-codegen/typescript-operations": "^1.17.16",
     "@joystream/eslint-config": "^1.0.0",
     "@oclif/dev-cli": "^1",
     "@oclif/test": "^1",
     "@types/chai": "^4",
     "@types/mocha": "^5",
     "@types/node": "^10",
+    "@types/pg": "^8.6.1",
     "@types/swagger-ui-express": "^4.1.2",
     "@typescript-eslint/eslint-plugin": "3.8.0",
     "@typescript-eslint/parser": "3.8.0",
@@ -54,6 +80,7 @@
     "globby": "^10",
     "mocha": "^5",
     "nyc": "^14",
+    "pg": "^8.7.1",
     "prettier": "^2.3.0",
     "sinon": "^11.1.1",
     "swagger-ui-express": "^4.1.6",
@@ -88,11 +115,14 @@
       "@oclif/plugin-help"
     ],
     "topics": {
-      "wg": {
-        "description": "Storage working group commands."
+      "dev": {
+        "description": "Development mode commands."
       },
-      "wg:leader": {
+      "leader": {
         "description": "Storage working group leader commands."
+      },
+      "operator": {
+        "description": "Storage provider(operator) commands."
       }
     }
   },
@@ -107,9 +137,13 @@
     "prepack": "rm -rf lib && tsc -b && oclif-dev manifest && oclif-dev readme",
     "version": "oclif-dev readme && git add README.md",
     "build": "tsc --build tsconfig.json",
-    "format": "prettier ./src --write",
+    "format": "yarn prettier ./src --write",
     "lint": "eslint ./src --ext .ts",
-    "api:edit": "openapi-editor --file ./src/api-spec/openapi.yaml --port 10021"
+    "api:edit": "openapi-editor --file ./src/api-spec/openapi.yaml --port 10021",
+    "generate:types:graphql": "yarn graphql-codegen -c ./src/services/queryNode/codegen.yml",
+    "generate:types:json-schema": "yarn ts-node ./src/services/metadata/generateTypes.ts",
+    "ensure": "yarn format && yarn lint --fix && yarn build",
+    "checks": "tsc --noEmit --pretty && prettier ./src --check && yarn lint"
   },
   "types": "lib/index.d.ts"
 }

+ 4 - 3
storage-node-v2/scripts/init-dev-bucket.sh

@@ -11,7 +11,8 @@ CLI=../bin/run
 
 ${CLI} dev:init
 ${CLI} leader:update-bag-limit -l 7 --dev
-${CLI} leader:update-voucher-limits -o 100 -s 10000000 --dev
-BUCKET_ID=`${CLI} leader:create-bucket -i=0 -a -n=100 -s=10000000  --dev` 
+${CLI} leader:update-voucher-limits -o 10000 -s 1000000000 --dev
+BUCKET_ID=`${CLI} leader:create-bucket -i=0 -a -n=10000 -s=1000000000  --dev` 
 ${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev
-${CLI} leader:update-bag -a=${BUCKET_ID} -i static:council --dev 
+${CLI} leader:update-bag -a=${BUCKET_ID} -i static:council --dev 
+${CLI} operator:set-metadata -w 0 -i=${BUCKET_ID} -e http://localhost:3333 --dev

+ 12 - 0
storage-node-v2/scripts/operatorMetadata.json

@@ -0,0 +1,12 @@
+{
+  "endpoint": "http://localhost:3333",
+  "location": {
+    "countryCode": "US",
+    "city": "Chicago",
+    "coordinates": {
+      "latitude": 50,
+      "longitude": 50
+    }
+  },
+  "extra": "Extra"
+}

+ 2 - 1
storage-node-v2/scripts/run-all-commands.sh

@@ -25,7 +25,8 @@ ${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev
 ${CLI} leader:set-bucket-limits -i=${BUCKET_ID} -o=100 -s=10000000 --dev
 ${CLI} leader:update-bucket-status -i=${BUCKET_ID} --set on --dev
 ${CLI} leader:update-bag -a=${BUCKET_ID} -i static:council --dev 
-${CLI} operator:set-metadata -w=0 -i=${BUCKET_ID} -m=http://google.com --dev
+${CLI} operator:set-metadata -w=0 -i=${BUCKET_ID} -e=http://localhost:3333 --dev
+${CLI} operator:set-metadata -w=0 -i=${BUCKET_ID} -j=./operatorMetadata.json --dev
 
 # Create and delete a bucket
 BUCKET_ID=`${CLI} leader:create-bucket -a -n=100 -s=10000000  --dev` # bucketId = 1

+ 96 - 45
storage-node-v2/src/api-spec/openapi.yaml

@@ -15,21 +15,23 @@ servers:
   - url: http://localhost:3333/api/v1/
 
 tags:
-  - name: public
-    description: Public storage node API
+  - name: files
+    description: Storage node Files API
+  - name: state
+    description: Storage node State API
 
 paths:
-  /files/{cid}:
+  /files/{id}:
     get:
-      operationId: publicApi.getFile
+      operationId: filesApi.getFile
       description: Returns a media file.
       tags:
-        - public
+        - files
       parameters:
-        - name: cid
+        - name: id
           required: true
           in: path
-          description: Content ID
+          description: Data object ID
           schema:
             type: string
       responses:
@@ -73,15 +75,15 @@ paths:
         500:
           description: Unknown server error
     head:
-      operationId: publicApi.getFileHeaders
+      operationId: filesApi.getFileHeaders
       description: Returns a media file headers.
       tags:
-        - public
+        - files
       parameters:
-        - name: cid
+        - name: id
           required: true
           in: path
-          description: Content ID
+          description: Data object ID
           schema:
             type: string
       responses:
@@ -95,12 +97,10 @@ paths:
           description: Unknown server error
   /files:
     post:
-      security:
-        - UploadAuth: []
       description: Upload data
-      operationId: publicApi.uploadFile
+      operationId: filesApi.uploadFile
       tags:
-        - public
+        - files
       requestBody:
         content:
           multipart/form-data:
@@ -143,50 +143,70 @@ paths:
             application/json:
               schema:
                 $ref: '#/components/schemas/ErrorResponse'
-        401:
-          description: Unauthorized
 
-  /authToken:
-    post:
-      description: Get auth token from a server.
-      operationId: publicApi.authTokenForUploading
+  /state/data-objects:
+    get:
+      operationId: stateApi.getAllLocalDataObjects
+      description: Returns all local data objects.
       tags:
-        - public
-      requestBody:
-        description: Token request parameters,
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/TokenRequest'
+        - state
       responses:
-        201:
-          description: Created
+        200:
+          description: Ok
           content:
             application/json:
               schema:
-                type: object
-                properties:
-                  token:
-                    type: string
-        400:
-          description: Bad request
+                $ref: '#/components/schemas/DataObjectResponse'
+
+  /state/bags/{bagId}/data-objects:
+    get:
+      operationId: stateApi.getLocalDataObjectsByBagId
+      description: Returns local data objects for the bag.
+      tags:
+        - state
+      parameters:
+        - name: bagId
+          required: true
+          in: path
+          description: Bag ID
+          schema:
+            type: string
+      responses:
+        200:
+          description: Ok
           content:
             application/json:
               schema:
-                $ref: '#/components/schemas/ErrorResponse'
-        401:
-          description: Unauthorized
+                $ref: '#/components/schemas/DataObjectResponse'
+
+  /version:
+    get:
+      operationId: stateApi.getVersion
+      description: Returns server version.
+      tags:
+        - state
+      responses:
+        200:
+          description: Ok
           content:
             application/json:
               schema:
-                $ref: '#/components/schemas/ErrorResponse'
+                $ref: '#/components/schemas/VersionResponse'
+  /state/data:
+    get:
+      operationId: stateApi.getLocalDataStats
+      description: Returns local uploading directory stats.
+      tags:
+        - state
+      responses:
+        200:
+          description: Ok
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/DataStatsResponse'
 
 components:
-  securitySchemes:
-    UploadAuth:
-      type: apiKey
-      in: header
-      name: x-api-key
   schemas:
     TokenRequest:
       type: object
@@ -227,3 +247,34 @@ components:
           type: string
         message:
           type: string
+    DataStatsResponse:
+      type: object
+      required:
+        - totalSize
+        - objectNumber
+      properties:
+        totalSize:
+          type: integer
+          format: int64
+        objectNumber:
+          type: integer
+          format: int64
+        tempDirSize:
+          type: integer
+          format: int64
+        tempDownloads:
+          type: integer
+          format: int64
+    VersionResponse:
+      type: object
+      required:
+        - version
+      properties:
+        version:
+          type: string
+        userAgent:
+          type: string
+    DataObjectResponse:
+      type: array
+      items:
+        type: string

+ 40 - 16
storage-node-v2/src/command-base/ApiCommandBase.ts

@@ -1,6 +1,7 @@
 import { Command, flags } from '@oclif/command'
 import { createApi } from '../services/runtime/api'
 import { getAccountFromJsonFile, getAlicePair, getAccountFromUri } from '../services/runtime/accounts'
+import { parseBagId } from '../services/helpers/bagTypes'
 import { KeyringPair } from '@polkadot/keyring/types'
 import { ApiPromise } from '@polkadot/api'
 import logger from '../services/logger'
@@ -23,7 +24,7 @@ export default abstract class ApiCommandBase extends Command {
       description: 'Runtime API URL. Mandatory in non-dev environment.',
       default: 'ws://localhost:9944',
     }),
-    keyfile: flags.string({
+    keyFile: flags.string({
       char: 'k',
       description: 'Key file for the account. Mandatory in non-dev environment.',
     }),
@@ -31,10 +32,29 @@ export default abstract class ApiCommandBase extends Command {
       char: 'p',
       description: 'Key file password (optional). Could be overriden by ACCOUNT_PWD environment variable.',
     }),
-    accountURI: flags.string({
+    accountUri: flags.string({
       char: 'y',
       description:
-        'Account URI (optional). Has a priority over the keyfile and password flags. Could be overriden by ACCOUNT_URI environment variable.',
+        'Account URI (optional). Has a priority over the keyFile and password flags. Could be overriden by ACCOUNT_URI environment variable.',
+    }),
+  }
+
+  static extraFlags = {
+    bagId: flags.build({
+      parse: (value: string) => {
+        return parseBagId(value)
+      },
+      description: `Bag ID. Format: {bag_type}:{sub_type}:{id}.
+    - Bag types: 'static', 'dynamic'
+    - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
+    - Id:
+      - absent for 'static:council'
+      - working group name for 'static:wg'
+      - integer for 'dynamic:member' and 'dynamic:channel'
+    Examples:
+    - static:council
+    - static:wg:storage
+    - dynamic:member:4`,
     }),
   }
 
@@ -74,9 +94,13 @@ export default abstract class ApiCommandBase extends Command {
 
     // Some dev commands doesn't contain flags variables.
     const apiUrl = flags.apiUrl ?? 'ws://localhost:9944'
-    this.api = await createApi(apiUrl)
 
     logger.info(`Initialized runtime connection: ${apiUrl}`)
+    try {
+      this.api = await createApi(apiUrl)
+    } catch (err) {
+      logger.error(`Creating runtime API error: ${err.target?._url}`)
+    }
 
     await this.getApi()
   }
@@ -104,21 +128,21 @@ export default abstract class ApiCommandBase extends Command {
    * JSON-file or loads 'Alice' Keypair when in the development mode.
    *
    * @param dev - indicates the development mode (optional).
-   * @param keyfile - key file path (optional).
+   * @param keyFile - key file path (optional).
    * @param password - password for the key file (optional).
-   * @param accountURI - accountURI (optional). Overrides keyfile and password flags.
+   * @param accountURI - accountURI (optional). Overrides keyFile and password flags.
    * @returns KeyringPair instance.
    */
-  getAccount(flags: { dev: boolean; keyfile?: string; password?: string; accountURI?: string }): KeyringPair {
+  getAccount(flags: { dev: boolean; keyFile?: string; password?: string; accountUri?: string }): KeyringPair {
     // Select account URI variable from flags key and environment variable.
-    let accountURI = flags.accountURI ?? ''
+    let accountUri = flags.accountUri ?? ''
     if (!_.isEmpty(process.env.ACCOUNT_URI)) {
-      if (!_.isEmpty(flags.accountURI)) {
+      if (!_.isEmpty(flags.accountUri)) {
         logger.warn(
           `Both enviroment variable and command line argument were provided for the account URI. Environment variable has a priority.`
         )
       }
-      accountURI = process.env.ACCOUNT_URI ?? ''
+      accountUri = process.env.ACCOUNT_URI ?? ''
     }
 
     // Select password variable from flags key and environment variable.
@@ -132,18 +156,18 @@ export default abstract class ApiCommandBase extends Command {
       password = process.env.ACCOUNT_PWD ?? ''
     }
 
-    const keyfile = flags.keyfile ?? ''
+    const keyFile = flags.keyFile ?? ''
     // Create the Alice account for development mode.
     if (flags.dev) {
       return getAlicePair()
     }
     // Create an account using account URI
-    else if (!_.isEmpty(accountURI)) {
-      return getAccountFromUri(accountURI)
+    else if (!_.isEmpty(accountUri)) {
+      return getAccountFromUri(accountUri)
     }
-    // Create an account using the keyfile and password.
-    else if (!_.isEmpty(keyfile)) {
-      const account = getAccountFromJsonFile(keyfile)
+    // Create an account using the keyFile and password.
+    else if (!_.isEmpty(keyFile)) {
+      const account = getAccountFromJsonFile(keyFile)
       account.unlock(password)
 
       return account

+ 3 - 0
storage-node-v2/src/command-base/ExitCodes.ts

@@ -8,6 +8,9 @@ enum ExitCodes {
   InvalidParameters = 100,
   DevelopmentModeOnly,
   FileError,
+  InvalidWorkerId,
+  InvalidIntegerArray,
+  ServerError,
   ApiError = 200,
   UnsuccessfulRuntimeCall,
 }

+ 4 - 0
storage-node-v2/src/commands/dev/init.ts

@@ -12,6 +12,10 @@ import ApiCommandBase from '../../command-base/ApiCommandBase'
 export default class DevInit extends ApiCommandBase {
   static description = 'Initialize development environment. Sets Alice as storage working group leader.'
 
+  static flags = {
+    ...ApiCommandBase.flags,
+  }
+
   async run(): Promise<void> {
     await this.ensureDevelopmentChain()
 

+ 77 - 0
storage-node-v2/src/commands/dev/sync.ts

@@ -0,0 +1,77 @@
+import { Command, flags } from '@oclif/command'
+import { performSync } from '../../services/sync/synchronizer'
+import logger from '../../services/logger'
+import stringify from 'fast-safe-stringify'
+
+/**
+ * CLI command:
+ * Synchronizes data: fixes the difference between node obligations and local
+ * storage.
+ *
+ * @remarks
+ * Should be run only during the development.
+ * Shell command: "dev:sync"
+ */
+export default class DevSync extends Command {
+  static description =
+    'Synchronizes the data - it fixes the differences between local data folder and worker ID obligations from the runtime.'
+
+  static flags = {
+    help: flags.help({ char: 'h' }),
+    workerId: flags.integer({
+      char: 'w',
+      required: true,
+      description: 'Storage node operator worker ID.',
+    }),
+    syncWorkersNumber: flags.integer({
+      char: 'p',
+      required: false,
+      description: 'Sync workers number (max async operations in progress).',
+      default: 20,
+    }),
+    syncWorkersTimeout: flags.integer({
+      char: 't',
+      required: false,
+      description: 'Asset downloading timeout for the syncronization (in minutes).',
+      default: 30,
+    }),
+    queryNodeEndpoint: flags.string({
+      char: 'q',
+      required: false,
+      default: 'http://localhost:8081/graphql',
+      description: 'Query node endpoint (e.g.: http://some.com:8081/graphql)',
+    }),
+    dataSourceOperatorUrl: flags.string({
+      char: 'o',
+      required: false,
+      description: 'Storage node url base (e.g.: http://some.com:3333) to get data from.',
+      default: 'http://localhost:3333',
+    }),
+    uploads: flags.string({
+      char: 'd',
+      required: true,
+      description: 'Data uploading directory (absolute path).',
+    }),
+  }
+
+  async run(): Promise<void> {
+    const { flags } = this.parse(DevSync)
+
+    logger.info('Syncing...')
+
+    try {
+      await performSync(
+        undefined,
+        flags.workerId,
+        flags.syncWorkersNumber,
+        flags.syncWorkersTimeout,
+        flags.queryNodeEndpoint,
+        flags.uploads,
+        flags.dataSourceOperatorUrl
+      )
+    } catch (err) {
+      logger.error(err)
+      logger.error(stringify(err))
+    }
+  }
+}

+ 5 - 23
storage-node-v2/src/commands/dev/verify-bag-id.ts

@@ -1,6 +1,5 @@
-import { flags } from '@oclif/command'
+import { Command, flags } from '@oclif/command'
 import ApiCommandBase from '../../command-base/ApiCommandBase'
-import { parseBagId } from '../../services/helpers/bagTypes'
 import logger from '../../services/logger'
 
 /**
@@ -11,37 +10,20 @@ import logger from '../../services/logger'
  * Should be run only during the development.
  * Shell command: "dev:verify-bag-id"
  */
-export default class DevVerifyBagId extends ApiCommandBase {
+export default class DevVerifyBagId extends Command {
   static description = 'The command verifies bag id supported by the storage node. Requires chain connection.'
 
   static flags = {
-    bagId: flags.string({
+    help: flags.help({ char: 'h' }),
+    bagId: ApiCommandBase.extraFlags.bagId({
       char: 'i',
       required: true,
-      description: `
-      Bag ID. Format: {bag_type}:{sub_type}:{id}.
-      - Bag types: 'static', 'dynamic'
-      - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
-      - Id: 
-        - absent for 'static:council'
-        - working group name for 'static:wg'
-        - integer for 'dynamic:member' and 'dynamic:channel'
-      Examples:
-      - static:council
-      - static:wg:storage
-      - dynamic:member:4
-      `,
     }),
-    ...ApiCommandBase.flags,
   }
 
   async run(): Promise<void> {
     const { flags } = this.parse(DevVerifyBagId)
 
-    const api = await this.getApi()
-    const parsedBagId = parseBagId(api, flags.bagId)
-
-    logger.info(`Correct bag id: ${flags.bagId}`)
-    logger.info(`Parsed: ${parsedBagId}`)
+    logger.info(`Parsed: ${flags.bagId}`)
   }
 }

+ 6 - 18
storage-node-v2/src/commands/leader/update-bag.ts

@@ -1,10 +1,10 @@
 import { flags } from '@oclif/command'
 import { updateStorageBucketsForBag } from '../../services/runtime/extrinsics'
 import ApiCommandBase from '../../command-base/ApiCommandBase'
-import { parseBagId } from '../../services/helpers/bagTypes'
 import logger from '../../services/logger'
 import ExitCodes from '../../command-base/ExitCodes'
 import _ from 'lodash'
+import { CLIError } from '@oclif/errors'
 
 // Custom 'integer array' oclif flag.
 const integerArrFlags = {
@@ -12,7 +12,9 @@ const integerArrFlags = {
     parse: (value: string) => {
       const arr: number[] = value.split(',').map((v) => {
         if (!/^-?\d+$/.test(v)) {
-          throw new Error(`Expected comma-separated integers, but received: ${value}`)
+          throw new CLIError(`Expected comma-separated integers, but received: ${value}`, {
+            exit: ExitCodes.InvalidIntegerArray,
+          })
         }
         return parseInt(v)
       })
@@ -43,22 +45,9 @@ export default class LeaderUpdateBag extends ApiCommandBase {
       description: 'ID of a bucket to remove from bag',
       default: [],
     }),
-    bagId: flags.string({
+    bagId: ApiCommandBase.extraFlags.bagId({
       char: 'i',
       required: true,
-      description: `
-      Bag ID. Format: {bag_type}:{sub_type}:{id}.
-      - Bag types: 'static', 'dynamic'
-      - Sub types: 'static:council', 'static:wg', 'dynamic:member', 'dynamic:channel'
-      - Id: 
-        - absent for 'static:council'
-        - working group name for 'static:wg'
-        - integer for 'dynamic:member' and 'dynamic:channel'
-      Examples:
-      - static:council
-      - static:wg:storage
-      - dynamic:member:4
-      `,
     }),
     ...ApiCommandBase.flags,
   }
@@ -78,9 +67,8 @@ export default class LeaderUpdateBag extends ApiCommandBase {
 
     const account = this.getAccount(flags)
     const api = await this.getApi()
-    const bagId = parseBagId(api, flags.bagId)
 
-    const success = await updateStorageBucketsForBag(api, bagId, account, flags.add, flags.remove)
+    const success = await updateStorageBucketsForBag(api, flags.bagId, account, flags.add, flags.remove)
 
     this.exitAfterRuntimeCall(success)
   }

+ 0 - 8
storage-node-v2/src/commands/leader/update-bucket-status.ts

@@ -20,14 +20,6 @@ export default class LeaderUpdateStorageBucketStatus extends ApiCommandBase {
       required: true,
       description: 'Storage bucket ID',
     }),
-    enable: flags.boolean({
-      char: 'e',
-      description: 'Enables accepting new bags (default).',
-    }),
-    disable: flags.boolean({
-      char: 'd',
-      description: 'Disables accepting new bags.',
-    }),
     set: flags.enum({
       char: 's',
       description: `Sets 'accepting new bags' parameter for the bucket (on/off).`,

+ 1 - 1
storage-node-v2/src/commands/leader/update-dynamic-bag-policy.ts

@@ -44,7 +44,7 @@ export default class LeaderUpdateDynamicBagPolicy extends ApiCommandBase {
     const newNumber = flags.number
 
     const api = await this.getApi()
-    const dynamicBagType = parseDynamicBagType(api, flags.bagType)
+    const dynamicBagType = parseDynamicBagType(flags.bagType)
     const success = await updateNumberOfStorageBucketsInDynamicBagCreationPolicy(
       api,
       account,

+ 21 - 9
storage-node-v2/src/commands/operator/set-metadata.ts

@@ -2,7 +2,9 @@ import { flags } from '@oclif/command'
 import { setStorageOperatorMetadata } from '../../services/runtime/extrinsics'
 import ApiCommandBase from '../../command-base/ApiCommandBase'
 import logger from '../../services/logger'
-
+import { ValidationService } from '../../services/metadata/validationService'
+import { StorageBucketOperatorMetadata, IStorageBucketOperatorMetadata } from '@joystream/metadata-protobuf'
+import fs from 'fs'
 /**
  * CLI command:
  * Sets metadata for the storage bucket.
@@ -13,7 +15,7 @@ import logger from '../../services/logger'
  * Shell command: "operator:set-metadata"
  */
 export default class OperatorSetMetadata extends ApiCommandBase {
-  static description = 'Accept pending storage bucket invitation.'
+  static description = 'Set metadata for the storage bucket.'
 
   static flags = {
     bucketId: flags.integer({
@@ -26,19 +28,29 @@ export default class OperatorSetMetadata extends ApiCommandBase {
       required: true,
       description: 'Storage bucket operator ID (storage group worker ID)',
     }),
-    metadata: flags.string({
-      char: 'm',
-      description: 'Storage bucket operator metadata',
+    endpoint: flags.string({
+      char: 'e',
+      description: 'Root distribution node endpoint',
+      exclusive: ['jsonFile'],
+    }),
+    jsonFile: flags.string({
+      char: 'j',
+      description: 'Path to JSON metadata file',
+      exclusive: ['endpoint'],
     }),
     ...ApiCommandBase.flags,
   }
 
   async run(): Promise<void> {
     const { flags } = this.parse(OperatorSetMetadata)
+    const { operatorId, bucketId, jsonFile, endpoint } = flags
+
+    const validation = new ValidationService()
+    const metadata: IStorageBucketOperatorMetadata = jsonFile
+      ? validation.validate('OperatorMetadata', JSON.parse(fs.readFileSync(jsonFile).toString()))
+      : { endpoint }
 
-    const operator = flags.operatorId
-    const bucket = flags.bucketId
-    const metadata = flags.metadata ?? ''
+    const encodedMetadata = '0x' + Buffer.from(StorageBucketOperatorMetadata.encode(metadata).finish()).toString('hex')
 
     logger.info('Setting the storage operator metadata...')
     if (flags.dev) {
@@ -48,7 +60,7 @@ export default class OperatorSetMetadata extends ApiCommandBase {
     const account = this.getAccount(flags)
 
     const api = await this.getApi()
-    const success = await setStorageOperatorMetadata(api, account, operator, bucket, metadata)
+    const success = await setStorageOperatorMetadata(api, account, operatorId, bucketId, encodedMetadata)
 
     this.exitAfterRuntimeCall(success)
   }

+ 165 - 5
storage-node-v2/src/commands/server.ts

@@ -1,7 +1,18 @@
 import { flags } from '@oclif/command'
 import { createApp } from '../services/webApi/app'
 import ApiCommandBase from '../command-base/ApiCommandBase'
-import logger from '../services/logger'
+import logger, { initElasticLogger } from '../services/logger'
+import { loadDataObjectIdCache } from '../services/caching/localDataObjects'
+import { ApiPromise } from '@polkadot/api'
+import { performSync, TempDirName } from '../services/sync/synchronizer'
+import sleep from 'sleep-promise'
+import rimraf from 'rimraf'
+import _ from 'lodash'
+import path from 'path'
+import { promisify } from 'util'
+import ExitCodes from './../command-base/ExitCodes'
+import fs from 'fs'
+const fsPromises = fs.promises
 
 /**
  * CLI command:
@@ -29,30 +40,112 @@ export default class Server extends ApiCommandBase {
       required: true,
       description: 'Server port.',
     }),
+    sync: flags.boolean({
+      char: 's',
+      description: 'Enable data synchronization.',
+      default: false,
+    }),
+    syncInterval: flags.integer({
+      char: 'i',
+      description: 'Interval between synchronizations (in minutes)',
+      default: 1,
+    }),
+    queryNodeEndpoint: flags.string({
+      char: 'q',
+      required: true,
+      default: 'http://localhost:8081/graphql',
+      description: 'Query node endpoint (e.g.: http://some.com:8081/graphql)',
+    }),
+    syncWorkersNumber: flags.integer({
+      char: 'r',
+      required: false,
+      description: 'Sync workers number (max async operations in progress).',
+      default: 20,
+    }),
+    syncWorkersTimeout: flags.integer({
+      char: 't',
+      required: false,
+      description: 'Asset downloading timeout for the syncronization (in minutes).',
+      default: 30,
+    }),
+    elasticSearchEndpoint: flags.string({
+      char: 'e',
+      required: false,
+      description: `Elasticsearch endpoint (e.g.: http://some.com:8081).
+Log level could be set using the ELASTIC_LOG_LEVEL enviroment variable.
+Supported values: warn, error, debug, info. Default:debug`,
+    }),
     ...ApiCommandBase.flags,
   }
 
   async run(): Promise<void> {
     const { flags } = this.parse(Server)
 
+    await recreateTempDirectory(flags.uploads, TempDirName)
+
+    const logSource = `StorageProvider_${flags.worker}`
+
+    if (fs.existsSync(flags.uploads)) {
+      await loadDataObjectIdCache(flags.uploads, TempDirName)
+    }
+
+    if (!_.isEmpty(flags.elasticSearchEndpoint)) {
+      initElasticLogger(logSource, flags.elasticSearchEndpoint ?? '')
+    }
+
+    logger.info(`Query node endpoint set: ${flags.queryNodeEndpoint}`)
+
     if (flags.dev) {
       await this.ensureDevelopmentChain()
     }
 
-    const account = this.getAccount(flags)
     const api = await this.getApi()
 
+    if (flags.sync) {
+      logger.info(`Synchronization enabled.`)
+      setTimeout(
+        async () =>
+          runSyncWithInterval(
+            api,
+            flags.worker,
+            flags.queryNodeEndpoint,
+            flags.uploads,
+            TempDirName,
+            flags.syncWorkersNumber,
+            flags.syncWorkersTimeout,
+            flags.syncInterval
+          ),
+        0
+      )
+    }
+
+    const storageProviderAccount = this.getAccount(flags)
+
     try {
       const port = flags.port
-      const workerId = flags.worker ?? 0
+      const workerId = flags.worker
       const maxFileSize = await api.consts.storage.maxDataObjectSize.toNumber()
+      const tempFileUploadingDir = path.join(flags.uploads, TempDirName)
       logger.debug(`Max file size runtime parameter: ${maxFileSize}`)
 
-      const app = await createApp(api, account, workerId, flags.uploads, maxFileSize)
+      const app = await createApp({
+        api,
+        storageProviderAccount,
+        workerId,
+        maxFileSize,
+        uploadsDir: flags.uploads,
+        tempFileUploadingDir,
+        process: this.config,
+        queryNodeEndpoint: flags.queryNodeEndpoint,
+        enableUploadingAuth: false,
+        elasticSearchEndpoint: flags.elasticSearchEndpoint,
+        logSource,
+      })
       logger.info(`Listening on http://localhost:${port}`)
       app.listen(port)
     } catch (err) {
-      logger.error(`Error: ${err}`)
+      logger.error(`Server error: ${err}`)
+      this.exit(ExitCodes.ServerError)
     }
   }
 
@@ -60,3 +153,70 @@ export default class Server extends ApiCommandBase {
   /* eslint-disable @typescript-eslint/no-empty-function */
   async finally(): Promise<void> {}
 }
+
+/**
+ * Run the data syncronization process.
+ *
+ * @param workerId - worker ID
+ * @param queryNodeUrl - Query Node for data fetching
+ * @param uploadsDir - data uploading directory
+ * @param tempDirectory - temporary data uploading directory
+ * @param syncWorkersNumber - defines a number of the async processes for sync
+ * @param syncWorkersTimeout - downloading asset timeout
+ * @param syncIntervalMinutes - defines an interval between sync runs
+ *
+ * @returns void promise.
+ */
+async function runSyncWithInterval(
+  api: ApiPromise,
+  workerId: number,
+  queryNodeUrl: string,
+  uploadsDirectory: string,
+  tempDirectory: string,
+  syncWorkersNumber: number,
+  syncWorkersTimeout: number,
+  syncIntervalMinutes: number
+) {
+  const sleepInteval = syncIntervalMinutes * 60 * 1000
+  while (true) {
+    logger.info(`Sync paused for ${syncIntervalMinutes} minute(s).`)
+    await sleep(sleepInteval)
+    try {
+      logger.info(`Resume syncing....`)
+      await performSync(
+        api,
+        workerId,
+        syncWorkersNumber,
+        syncWorkersTimeout,
+        queryNodeUrl,
+        uploadsDirectory,
+        tempDirectory
+      )
+    } catch (err) {
+      logger.error(`Critical sync error: ${err}`)
+    }
+  }
+}
+
+/**
+ * Removes and recreates the temporary directory from the uploading directory.
+ * All files in the temp directory are deleted.
+ *
+ * @param uploadsDirectory - data uploading directory
+ * @param tempDirName - temporary directory name within the uploading directory
+ * @returns void promise.
+ */
+async function recreateTempDirectory(uploadsDirectory: string, tempDirName: string): Promise<void> {
+  try {
+    const tempFileUploadingDir = path.join(uploadsDirectory, tempDirName)
+
+    logger.info(`Removing temp directory ...`)
+    const rimrafAsync = promisify(rimraf)
+    await rimrafAsync(tempFileUploadingDir)
+
+    logger.info(`Creating temp directory ...`)
+    await fsPromises.mkdir(tempFileUploadingDir)
+  } catch (err) {
+    logger.error(`Temp directory IO error: ${err}`)
+  }
+}

+ 84 - 0
storage-node-v2/src/services/caching/localDataObjects.ts

@@ -0,0 +1,84 @@
+import AwaitLock from 'await-lock'
+import path from 'path'
+import fs from 'fs'
+import logger from '../logger'
+const fsPromises = fs.promises
+
+// Local in-memory cache for IDs.
+let idCache = new Set<string>()
+
+const lock = new AwaitLock()
+
+/**
+ * Return the current ID cache.
+ *
+ * @returns ID array.
+ *
+ */
+export async function getDataObjectIDs(): Promise<string[]> {
+  await lock.acquireAsync()
+  const ids = Array.from(idCache)
+  lock.release()
+
+  return ids
+}
+
+/**
+ * Loads ID cache from the uploading directory.
+ *
+ * @returns empty promise.
+ *
+ * @param uploadDir - uploading directory
+ * @param tempDirName - temp directory name
+ */
+export async function loadDataObjectIdCache(uploadDir: string, tempDirName: string): Promise<void> {
+  await lock.acquireAsync()
+
+  const localIds = await getLocalFileNames(uploadDir)
+  // Filter temporary directory name.
+  const tempDirectoryName = path.parse(tempDirName).name
+  const ids = localIds.filter((dataObjectId) => dataObjectId !== tempDirectoryName)
+
+  idCache = new Set(ids)
+
+  logger.debug(`Local ID cache loaded.`)
+
+  lock.release()
+}
+
+/**
+ * Adds data object ID to the local cache.
+ *
+ * @param dataObjectId - uploading directory
+ *
+ * @returns empty promise.
+ */
+export async function addDataObjectIdToCache(dataObjectId: string): Promise<void> {
+  await lock.acquireAsync()
+
+  idCache.add(dataObjectId)
+
+  lock.release()
+}
+
+/**
+ * Deletes data object ID from the local cache.
+ *
+ * @param dataObjectId - uploading directory
+ */
+export async function deleteDataObjectIdFromCache(dataObjectId: string): Promise<void> {
+  await lock.acquireAsync()
+
+  idCache.delete(dataObjectId)
+
+  lock.release()
+}
+
+/**
+ * Returns file names from the local directory.
+ *
+ * @param directory - local directory to get file names from
+ */
+function getLocalFileNames(directory: string): Promise<string[]> {
+  return fsPromises.readdir(directory)
+}

+ 35 - 0
storage-node-v2/src/services/caching/newUploads.ts

@@ -0,0 +1,35 @@
+import NodeCache from 'node-cache'
+
+// Expiration period in seconds for the new uploads data.
+const ExpirationPeriod = 3600 // seconds (1 hour)
+
+// Max ID number in local cache
+const MaxEntries = 100000
+
+// Local in-memory cache for new data objects.
+const newDataObjects = new NodeCache({
+  stdTTL: ExpirationPeriod,
+  deleteOnExpire: true,
+  maxKeys: MaxEntries,
+})
+
+/**
+ * Adds a data object ID to the cache for new data objects with expiration time.
+ *
+ * * @param dataObjectId - data object ID.
+ *
+ * @returns nonce string.
+ */
+export function registerNewDataObjectId(dataObjectId: string): void {
+  newDataObjects.set(dataObjectId, null, ExpirationPeriod)
+}
+
+/**
+ * Verifies that a data object with provided ID was recently uploaded .
+ *
+ * @param dataObjectId - data object ID.
+ * @returns true if ID was present in local cache.
+ */
+export function isNewDataObject(dataObjectId: string): boolean {
+  return newDataObjects.has(dataObjectId)
+}

+ 2 - 2
storage-node-v2/src/services/helpers/tokenNonceKeeper.ts → storage-node-v2/src/services/caching/tokenNonceKeeper.ts

@@ -1,7 +1,7 @@
 import NodeCache from 'node-cache'
 
 // Expiration period in seconds for the local nonce cache.
-const TokenExpirationPeriod: number = 30 * 1000 // seconds
+const TokenExpirationPeriod = 30 // seconds
 
 // Max nonce number in local cache
 const MaxNonces = 100000
@@ -17,7 +17,7 @@ const nonceCache = new NodeCache({
  * Constructs and returns an expiration time for a token.
  */
 export function getTokenExpirationTime(): number {
-  return Date.now() + TokenExpirationPeriod
+  return Date.now() + 1000 * TokenExpirationPeriod
 }
 
 /**

+ 4 - 3
storage-node-v2/src/services/helpers/auth.ts

@@ -2,6 +2,7 @@ import { KeyringPair } from '@polkadot/keyring/types'
 import { u8aToHex } from '@polkadot/util'
 import { signatureVerify } from '@polkadot/util-crypto'
 import base64url from 'base64url'
+import stringify from 'fast-safe-stringify'
 
 /**
  * Represents an upload token request.
@@ -97,7 +98,7 @@ export function parseUploadToken(tokenString: string): UploadToken {
  * @returns The UploadToken instance.
  */
 export function verifyTokenSignature(token: UploadToken | UploadTokenRequest, address: string): boolean {
-  const message = JSON.stringify(token.data)
+  const message = stringify(token.data)
   const { isValid } = signatureVerify(message, token.signature, address)
 
   return isValid
@@ -111,7 +112,7 @@ export function verifyTokenSignature(token: UploadToken | UploadTokenRequest, ad
  * @returns object signature.
  */
 export function signTokenBody(tokenBody: UploadTokenBody | UploadTokenRequestBody, account: KeyringPair): string {
-  const message = JSON.stringify(tokenBody)
+  const message = stringify(tokenBody)
   const signature = u8aToHex(account.sign(message))
 
   return signature
@@ -132,5 +133,5 @@ export function createUploadToken(tokenBody: UploadTokenBody, account: KeyringPa
     signature,
   }
 
-  return base64url.encode(JSON.stringify(token))
+  return base64url.encode(stringify(token))
 }

+ 29 - 27
storage-node-v2/src/services/helpers/bagTypes.ts

@@ -1,9 +1,21 @@
 import { BagId, DynamicBagType, DynamicBagTypeKey, Static, Dynamic } from '@joystream/types/storage'
 import { WorkingGroup } from '@joystream/types/common'
-import { ApiPromise } from '@polkadot/api'
+import { createType } from '@joystream/types'
 import ExitCodes from '../../command-base/ExitCodes'
 import { CLIError } from '@oclif/errors'
 
+/**
+ * Special error type for bagId parsing. Extends the CLIError with setting
+ * the `InvalidParameters` exit code.
+ */
+export class BagIdValidationError extends CLIError {
+  constructor(err: string) {
+    super(err, {
+      exit: ExitCodes.InvalidParameters,
+    })
+  }
+}
+
 /**
  * Parses the type string and returns the DynamicBagType instance.
  *
@@ -14,8 +26,8 @@ import { CLIError } from '@oclif/errors'
  * @param bagType - dynamic bag type string
  * @returns The DynamicBagType instance.
  */
-export function parseDynamicBagType(api: ApiPromise, bagType: DynamicBagTypeKey): DynamicBagType {
-  return api.createType('DynamicBagType', bagType)
+export function parseDynamicBagType(bagType: DynamicBagTypeKey): DynamicBagType {
+  return createType('DynamicBagType', bagType)
 }
 
 /**
@@ -29,8 +41,8 @@ export function parseDynamicBagType(api: ApiPromise, bagType: DynamicBagTypeKey)
  * @param bagId - bag ID in string format
  * @returns The BagId instance.
  */
-export function parseBagId(api: ApiPromise, bagId: string): BagId {
-  const parser = new BagIdParser(api, bagId)
+export function parseBagId(bagId: string): BagId {
+  const parser = new BagIdParser(bagId)
 
   return parser.parse()
 }
@@ -40,19 +52,15 @@ export function parseBagId(api: ApiPromise, bagId: string): BagId {
  */
 class BagIdParser {
   bagId: string
-  api: ApiPromise
   bagIdParts: string[]
 
-  constructor(api: ApiPromise, bagId: string) {
+  constructor(bagId: string) {
     this.bagId = bagId
-    this.api = api
 
     this.bagIdParts = bagId.trim().toLowerCase().split(':')
 
     if (this.bagIdParts.length > 3 || this.bagIdParts.length < 2) {
-      throw new CLIError(`Invalid bagId: ${bagId}`, {
-        exit: ExitCodes.InvalidParameters,
-      })
+      throw new BagIdValidationError(`Invalid bagId: ${bagId}`)
     }
   }
 
@@ -69,9 +77,7 @@ class BagIdParser {
       return this.parseDynamicBagId()
     }
 
-    throw new CLIError(`Invalid bagId: ${this.bagId}`, {
-      exit: ExitCodes.InvalidParameters,
-    })
+    throw new BagIdValidationError(`Invalid bagId: ${this.bagId}`)
   }
 
   /**
@@ -81,8 +87,8 @@ class BagIdParser {
     // Try to construct static council bag ID.
     if (this.bagIdParts[1] === 'council') {
       if (this.bagIdParts.length === 2) {
-        const staticBagId: Static = this.api.createType('Static', 'Council')
-        const constructedBagId: BagId = this.api.createType('BagId', {
+        const staticBagId: Static = createType('Static', 'Council')
+        const constructedBagId: BagId = createType('BagId', {
           'Static': staticBagId,
         })
 
@@ -98,11 +104,11 @@ class BagIdParser {
 
         for (const group of groups) {
           if (group.toLowerCase() === actualGroup) {
-            const workingGroup: WorkingGroup = this.api.createType('WorkingGroup', group)
-            const staticBagId: Static = this.api.createType('Static', {
+            const workingGroup: WorkingGroup = createType('WorkingGroup', group)
+            const staticBagId: Static = createType('Static', {
               'WorkingGroup': workingGroup,
             })
-            const constructedBagId: BagId = this.api.createType('BagId', {
+            const constructedBagId: BagId = createType('BagId', {
               'Static': staticBagId,
             })
 
@@ -112,9 +118,7 @@ class BagIdParser {
       }
     }
 
-    throw new CLIError(`Invalid static bagId: ${this.bagId}`, {
-      exit: ExitCodes.InvalidParameters,
-    })
+    throw new BagIdValidationError(`Invalid static bagId: ${this.bagId}`)
   }
 
   /**
@@ -136,8 +140,8 @@ class BagIdParser {
             const dynamic = {} as Record<DynamicBagTypeKey, number>
             dynamic[dynamicBagType as DynamicBagTypeKey] = parsedId
 
-            const dynamicBagId: Dynamic = this.api.createType('Dynamic', dynamic)
-            const constructedBagId: BagId = this.api.createType('BagId', {
+            const dynamicBagId: Dynamic = createType('Dynamic', dynamic)
+            const constructedBagId: BagId = createType('BagId', {
               'Dynamic': dynamicBagId,
             })
 
@@ -147,8 +151,6 @@ class BagIdParser {
       }
     }
 
-    throw new CLIError(`Invalid dynamic bagId: ${this.bagId}`, {
-      exit: ExitCodes.InvalidParameters,
-    })
+    throw new BagIdValidationError(`Invalid dynamic bagId: ${this.bagId}`)
   }
 }

+ 134 - 30
storage-node-v2/src/services/logger.ts

@@ -1,28 +1,34 @@
-import winston from 'winston'
+import winston, { transport } from 'winston'
+import ecsformat from '@elastic/ecs-winston-format'
 import expressWinston from 'express-winston'
 import { Handler, ErrorRequestHandler } from 'express'
+import { ElasticsearchTransport } from 'winston-elasticsearch'
+
+/**
+ * Possible log levels.
+ */
+const levels = {
+  error: 0,
+  warn: 1,
+  info: 2,
+  http: 3,
+  debug: 4,
+}
 
 /**
  * Creates basic Winston logger. Console output redirected to the stderr.
  *
- * @returns Winston logger
+ * @returns Winston logger options
  *
  */
-function createDefaultLogger(): winston.Logger {
-  const levels = {
-    error: 0,
-    warn: 1,
-    info: 2,
-    http: 3,
-    debug: 4,
-  }
-
+function createDefaultLoggerOptions(): winston.LoggerOptions {
   const level = () => {
     const env = process.env.NODE_ENV || 'development'
     const isDevelopment = env === 'development'
     return isDevelopment ? 'debug' : 'warn'
   }
 
+  // Colors
   const colors = {
     error: 'red',
     warn: 'yellow',
@@ -30,39 +36,76 @@ function createDefaultLogger(): winston.Logger {
     http: 'magenta',
     debug: 'white',
   }
-
   winston.addColors(colors)
 
+  // Formats
   const format = winston.format.combine(
     winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss:ms' }),
-    winston.format.colorize({ all: true }),
+    winston.format.colorize(),
     winston.format.printf((info) => `${info.timestamp} ${info.level}: ${info.message}`)
   )
 
   // Redirect all logs to the stderr
-  const transports = [new winston.transports.Console({ stderrLevels: Object.keys(levels) })]
+  const transports = [new winston.transports.Console({ stderrLevels: Object.keys(levels), format })]
 
-  return winston.createLogger({
+  return {
     level: level(),
     levels,
-    format,
     transports,
-  })
+  }
+}
+
+/**
+ * Creates basic Winston logger.
+ *
+ * @returns Winston logger
+ *
+ */
+function createDefaultLogger(): winston.Logger {
+  const defaultOptions = createDefaultLoggerOptions()
+
+  return winston.createLogger(defaultOptions)
 }
 
-const Logger = createDefaultLogger()
+// Default global logger variable
+let InnerLogger = createDefaultLogger()
+
+// Enables changing the underlying logger which is default import in other modules.
+const proxy = new Proxy(InnerLogger, {
+  get(target: winston.Logger, propKey: symbol) {
+    const method = Reflect.get(target, propKey)
+    return (...args: unknown[]) => {
+      return method.apply(InnerLogger, args)
+    }
+  },
+})
+
+export default proxy
 
-export default Logger
 /**
  * Creates Express-Winston logger handler.
- *
+ * @param logSource - source tag for log entries.
+ * @param elasticSearchEndpoint - elastic search engine endpoint (optional).
  * @returns  Express-Winston logger handler
  *
  */
-export function httpLogger(): Handler {
+export function httpLogger(logSource: string, elasticSearchEndpoint?: string): Handler {
+  // ElasticSearch server date format.
+  const elasticDateFormat = 'YYYY-MM-DDTHH:mm:ss'
+
+  const transports: winston.transport[] = [
+    new winston.transports.Console({
+      format: winston.format.combine(winston.format.timestamp({ format: elasticDateFormat }), winston.format.json()),
+    }),
+  ]
+
+  if (elasticSearchEndpoint) {
+    const esTransport = createElasticTransport(logSource, elasticSearchEndpoint)
+    transports.push(esTransport)
+  }
+
   const opts: expressWinston.LoggerOptions = {
-    transports: [new winston.transports.Console()],
-    format: winston.format.combine(winston.format.json()),
+    transports,
     meta: true,
     msg: 'HTTP {{req.method}} {{req.url}}',
     expressFormat: true,
@@ -92,13 +135,6 @@ export function errorLogger(): ErrorRequestHandler {
  *
  */
 export function createStdConsoleLogger(): winston.Logger {
-  const levels = {
-    error: 0,
-    warn: 1,
-    info: 2,
-    http: 3,
-    debug: 4,
-  }
   const format = winston.format.printf((info) => `${info.message}`)
 
   const transports = [new winston.transports.Console()]
@@ -109,3 +145,71 @@ export function createStdConsoleLogger(): winston.Logger {
     transports,
   })
 }
+/**
+ * Creates Winston logger with Elastic search.
+ * @param logSource - source tag for log entries.
+ * @param elasticSearchEndpoint - elastic search engine endpoint.
+ * @returns Winston logger
+ *
+ */
+function createElasticLogger(logSource: string, elasticSearchEndpoint: string): winston.Logger {
+  const loggerOptions = createDefaultLoggerOptions()
+
+  // Transports
+  let transports: transport[] = []
+  if (loggerOptions.transports !== undefined) {
+    transports = Array.isArray(loggerOptions.transports) ? loggerOptions.transports : [loggerOptions.transports]
+  }
+
+  const esTransport = createElasticTransport(logSource, elasticSearchEndpoint)
+  transports.push(esTransport)
+
+  // Logger
+  const logger = winston.createLogger(loggerOptions)
+
+  // Handle logger error.
+  logger.on('error', (err) => {
+    // Allow console for logging errors of the logger.
+    /* eslint-disable no-console */
+    console.error('Error in logger caught:', err)
+  })
+
+  return logger
+}
+
+/**
+ * Updates the default system logger with elastic search capabilities.
+ *
+ * @param logSource - source tag for log entries.
+ * @param elasticSearchEndpoint - elastic search engine endpoint.
+ */
+export function initElasticLogger(logSource: string, elasticSearchEndpoint: string): void {
+  InnerLogger = createElasticLogger(logSource, elasticSearchEndpoint)
+}
+
+/**
+ * Creates winston logger transport for the elastic search engine.
+ *
+ * @param logSource - source tag for log entries.
+ * @param elasticSearchEndpoint - elastic search engine endpoint.
+ * @returns elastic search winston transport
+ */
+function createElasticTransport(logSource: string, elasticSearchEndpoint: string): winston.transport {
+  const possibleLevels = ['warn', 'error', 'debug', 'info']
+
+  let elasticLogLevel = process.env.ELASTIC_LOG_LEVEL ?? ''
+  elasticLogLevel = elasticLogLevel.toLowerCase().trim()
+
+  if (!possibleLevels.includes(elasticLogLevel)) {
+    elasticLogLevel = 'debug' // default
+  }
+
+  const esTransportOpts = {
+    level: elasticLogLevel,
+    clientOpts: { node: elasticSearchEndpoint, maxRetries: 5 },
+    index: 'storage-node',
+    format: ecsformat(),
+    source: logSource,
+  }
+  return new ElasticsearchTransport(esTransportOpts)
+}

+ 20 - 0
storage-node-v2/src/services/metadata/generateTypes.ts

@@ -0,0 +1,20 @@
+/**
+ * OperatorMetadataJson file generating script.
+ */
+
+import fs from 'fs'
+import path from 'path'
+import { compile } from 'json-schema-to-typescript'
+import { schemas } from './schemas'
+
+// eslint-disable-next-line @typescript-eslint/no-var-requires
+const prettierConfig = require('@joystream/prettier-config')
+
+Object.entries(schemas).forEach(([schemaKey, schema]) => {
+  compile(schema, `${schemaKey}Json`, { style: prettierConfig }).then(
+    (output) => fs.writeFileSync(path.resolve(__dirname, `./generated/${schemaKey}Json.d.ts`), output),
+    () => {
+      // onReject
+    }
+  )
+})

+ 19 - 0
storage-node-v2/src/services/metadata/generated/OperatorMetadataJson.d.ts

@@ -0,0 +1,19 @@
+/* tslint:disable */
+/**
+ * This file was automatically generated by json-schema-to-typescript.
+ * DO NOT MODIFY IT BY HAND. Instead, modify the source JSONSchema file,
+ * and run json-schema-to-typescript to regenerate this file.
+ */
+
+export interface OperatorMetadataJson {
+  endpoint?: string
+  location?: {
+    countryCode?: string
+    city?: string
+    coordinates?: {
+      latitude?: number
+      longitude?: number
+    }
+  }
+  extra?: string
+}

+ 12 - 0
storage-node-v2/src/services/metadata/schemas/index.ts

@@ -0,0 +1,12 @@
+import { OperatorMetadataJson } from '../generated/OperatorMetadataJson'
+import { operatorMetadataSchema } from './operatorMetadataSchema'
+
+export const schemas = {
+  OperatorMetadata: operatorMetadataSchema,
+} as const
+
+export type SchemaKey = keyof typeof schemas & string
+
+export type TypeBySchemaKey<T extends SchemaKey> = T extends 'OperatorMetadata' ? OperatorMetadataJson : never
+
+export default schemas

+ 29 - 0
storage-node-v2/src/services/metadata/schemas/operatorMetadataSchema.ts

@@ -0,0 +1,29 @@
+import { JSONSchema4 } from 'json-schema'
+
+// Storage node operator metadata JSON schema.
+export const operatorMetadataSchema: JSONSchema4 = {
+  type: 'object',
+  additionalProperties: false,
+  properties: {
+    endpoint: { type: 'string' },
+    location: {
+      type: 'object',
+      additionalProperties: false,
+      properties: {
+        countryCode: { type: 'string' },
+        city: { type: 'string' },
+        coordinates: {
+          type: 'object',
+          additionalProperties: false,
+          properties: {
+            latitude: { type: 'number', minimum: -180, maximum: 180 },
+            longitude: { type: 'number', minimum: -180, maximum: 180 },
+          },
+        },
+      },
+    },
+    extra: { type: 'string' },
+  },
+}
+
+export default operatorMetadataSchema

+ 37 - 0
storage-node-v2/src/services/metadata/validationService.ts

@@ -0,0 +1,37 @@
+import Ajv from 'ajv'
+import { SchemaKey, schemas, TypeBySchemaKey } from './schemas'
+import stringify from 'fast-safe-stringify'
+
+/**
+ * JSON schema validation error
+ */
+class ValidationError extends Error {
+  public readonly errors: string[]
+
+  public constructor(message: string, errors: string[]) {
+    super(`${message}\n\n${errors.join('\n')}`)
+    this.errors = errors
+  }
+}
+
+/**
+ * Validates JSON schema for the storage operator metadata
+ */
+export class ValidationService {
+  private ajv: Ajv
+
+  public constructor() {
+    this.ajv = new Ajv({ allErrors: true, schemas })
+  }
+
+  validate<SK extends SchemaKey>(schemaKey: SK, input: unknown): TypeBySchemaKey<SK> {
+    const valid = this.ajv.validate(schemaKey, input) as boolean
+    if (!valid) {
+      throw new ValidationError(
+        `${schemaKey} is not valid`,
+        this.ajv.errors?.map((e) => `${e.dataPath}: ${e.message} (${stringify(e.params)})`) || []
+      )
+    }
+    return input as TypeBySchemaKey<SK>
+  }
+}

+ 263 - 0
storage-node-v2/src/services/queryNode/api.ts

@@ -0,0 +1,263 @@
+import { ApolloClient, NormalizedCacheObject, HttpLink, InMemoryCache, DocumentNode } from '@apollo/client'
+import fetch from 'cross-fetch'
+import {
+  GetBagConnection,
+  GetBagConnectionQuery,
+  GetBagConnectionQueryVariables,
+  GetStorageBucketDetails,
+  GetStorageBucketDetailsQuery,
+  GetStorageBucketDetailsByWorkerIdQuery,
+  GetStorageBucketDetailsByWorkerIdQueryVariables,
+  GetStorageBucketDetailsQueryVariables,
+  StorageBucketDetailsFragment,
+  StorageBagDetailsFragment,
+  DataObjectDetailsFragment,
+  GetDataObjectConnectionQuery,
+  GetDataObjectConnectionQueryVariables,
+  GetDataObjectConnection,
+  StorageBucketIdsFragment,
+  GetStorageBucketsConnection,
+  GetStorageBucketsConnectionQuery,
+  GetStorageBucketsConnectionQueryVariables,
+  GetStorageBucketDetailsByWorkerId,
+} from './generated/queries'
+import { Maybe, StorageBagWhereInput } from './generated/schema'
+
+import logger from '../logger'
+
+/**
+ * Defines query paging limits.
+ */
+export const MAX_RESULTS_PER_QUERY = 1000
+
+type PaginationQueryVariables = {
+  limit: number
+  lastCursor?: Maybe<string>
+}
+
+type PaginationQueryResult<T = unknown> = {
+  edges: { node: T }[]
+  pageInfo: {
+    hasNextPage: boolean
+    endCursor?: Maybe<string>
+  }
+}
+
+/**
+ * Query node class helper. Incapsulates custom queries.
+ *
+ */
+export class QueryNodeApi {
+  private apolloClient: ApolloClient<NormalizedCacheObject>
+
+  public constructor(endpoint: string) {
+    this.apolloClient = new ApolloClient({
+      link: new HttpLink({ uri: endpoint, fetch }),
+      cache: new InMemoryCache(),
+      defaultOptions: {
+        query: { fetchPolicy: 'no-cache', errorPolicy: 'none' },
+      },
+    })
+  }
+
+  /**
+   * Get entity by unique input
+   *
+   * @param query - actual query
+   * @param variables - query parameters
+   * @param resultKey - hepls result parsing
+   */
+  protected async uniqueEntityQuery<
+    QueryT extends { [k: string]: Maybe<Record<string, unknown>> | undefined },
+    VariablesT extends Record<string, unknown>
+  >(
+    query: DocumentNode,
+    variables: VariablesT,
+    resultKey: keyof QueryT
+  ): Promise<Required<QueryT>[keyof QueryT] | null> {
+    const result = await this.apolloClient.query<QueryT, VariablesT>({
+      query,
+      variables,
+    })
+
+    if (result?.data === null) {
+      return null
+    }
+
+    return result.data[resultKey]
+  }
+
+  // Get entities by "non-unique" input and return first result
+  protected async firstEntityQuery<
+    QueryT extends { [k: string]: unknown[] },
+    VariablesT extends Record<string, unknown>
+  >(query: DocumentNode, variables: VariablesT, resultKey: keyof QueryT): Promise<QueryT[keyof QueryT][number] | null> {
+    const result = await this.apolloClient.query<QueryT, VariablesT>({
+      query,
+      variables,
+    })
+
+    if (result?.data === null) {
+      return null
+    }
+    return result.data[resultKey][0]
+  }
+
+  protected async multipleEntitiesWithPagination<
+    NodeT,
+    QueryT extends { [k: string]: PaginationQueryResult<NodeT> },
+    CustomVariablesT extends Record<string, unknown>
+  >(
+    query: DocumentNode,
+    variables: CustomVariablesT,
+    resultKey: keyof QueryT,
+    itemsPerPage = MAX_RESULTS_PER_QUERY
+  ): Promise<NodeT[]> {
+    let hasNextPage = true
+    let results: NodeT[] = []
+    let lastCursor: string | undefined
+    while (hasNextPage) {
+      const paginationVariables = { limit: itemsPerPage, cursor: lastCursor }
+      const queryVariables = { ...variables, ...paginationVariables }
+      logger.debug(`Query - ${resultKey}`)
+      const result = await this.apolloClient.query<QueryT, PaginationQueryVariables & CustomVariablesT>({
+        query,
+        variables: queryVariables,
+      })
+
+      if (!result?.data) {
+        return results
+      }
+
+      const page = result.data[resultKey]
+      results = results.concat(page.edges.map((e) => e.node))
+      hasNextPage = page.pageInfo.hasNextPage
+      lastCursor = page.pageInfo.endCursor || undefined
+    }
+    return results
+  }
+
+  /**
+   * Query-node: get multiple entities
+   *
+   * @param query - actual query
+   * @param variables - query parameters
+   * @param resultKey - hepls result parsing
+   */
+  protected async multipleEntitiesQuery<
+    QueryT extends { [k: string]: unknown[] },
+    VariablesT extends Record<string, unknown>
+  >(query: DocumentNode, variables: VariablesT, resultKey: keyof QueryT): Promise<QueryT[keyof QueryT] | null> {
+    const result = await this.apolloClient.query<QueryT, VariablesT>({
+      query,
+      variables,
+    })
+
+    if (result?.data === null) {
+      return null
+    }
+
+    return result.data[resultKey]
+  }
+
+  /**
+   * Returns storage bucket IDs filtered by worker ID.
+   *
+   * @param workerId - worker ID
+   */
+  public async getStorageBucketIdsByWorkerId(workerId: string): Promise<Array<StorageBucketIdsFragment>> {
+    const result = await this.multipleEntitiesWithPagination<
+      StorageBucketIdsFragment,
+      GetStorageBucketDetailsByWorkerIdQuery,
+      GetStorageBucketDetailsByWorkerIdQueryVariables
+    >(GetStorageBucketDetailsByWorkerId, { workerId, limit: MAX_RESULTS_PER_QUERY }, 'storageBucketsConnection')
+
+    if (!result) {
+      return []
+    }
+
+    return result
+  }
+
+  /**
+   * Returns storage bucket info by pages.
+   *
+   * @param ids - bucket IDs to fetch
+   * @param offset - starting record of the page
+   * @param limit - page size
+   */
+  public async getStorageBucketDetails(
+    ids: string[],
+    offset: number,
+    limit: number
+  ): Promise<Array<StorageBucketDetailsFragment>> {
+    const result = await this.multipleEntitiesQuery<
+      GetStorageBucketDetailsQuery,
+      GetStorageBucketDetailsQueryVariables
+    >(GetStorageBucketDetails, { offset, limit, ids }, 'storageBuckets')
+
+    if (result === null) {
+      return []
+    }
+
+    return result
+  }
+
+  /**
+   * Returns storage bag info by pages for the given buckets.
+   *
+   * @param bucketIds - query filter: bucket IDs
+   */
+  public async getStorageBagsDetails(bucketIds: string[]): Promise<Array<StorageBagDetailsFragment>> {
+    const result = await this.multipleEntitiesWithPagination<
+      StorageBagDetailsFragment,
+      GetBagConnectionQuery,
+      GetBagConnectionQueryVariables
+    >(GetBagConnection, { limit: MAX_RESULTS_PER_QUERY, bucketIds }, 'storageBagsConnection')
+
+    if (!result) {
+      return []
+    }
+
+    return result
+  }
+
+  /**
+   * Returns data objects info by pages for the given bags.
+   *
+   * @param bagIds - query filter: bag IDs
+   * @param offset - starting record of the page
+   */
+  public async getDataObjectDetails(bagIds: string[]): Promise<Array<DataObjectDetailsFragment>> {
+    const input: StorageBagWhereInput = { id_in: bagIds }
+    const result = await this.multipleEntitiesWithPagination<
+      DataObjectDetailsFragment,
+      GetDataObjectConnectionQuery,
+      GetDataObjectConnectionQueryVariables
+    >(GetDataObjectConnection, { limit: MAX_RESULTS_PER_QUERY, bagIds: input }, 'storageDataObjectsConnection')
+
+    if (!result) {
+      return []
+    }
+
+    return result
+  }
+
+  /**
+   * Returns storage bucket IDs.
+   *
+   */
+  public async getStorageBucketIds(): Promise<Array<StorageBucketIdsFragment>> {
+    const result = await this.multipleEntitiesWithPagination<
+      StorageBucketIdsFragment,
+      GetStorageBucketsConnectionQuery,
+      GetStorageBucketsConnectionQueryVariables
+    >(GetStorageBucketsConnection, { limit: MAX_RESULTS_PER_QUERY }, 'storageBucketsConnection')
+
+    if (!result) {
+      return []
+    }
+
+    return result
+  }
+}

+ 33 - 0
storage-node-v2/src/services/queryNode/codegen.yml

@@ -0,0 +1,33 @@
+# Paths are relative to root package directory
+overwrite: true
+
+schema: '../query-node/generated/graphql-server/generated/schema.graphql'
+
+documents:
+  - 'src/services/queryNode/queries/*.graphql'
+
+config:
+  scalars:
+    Date: Date
+  preResolveTypes: true # avoid using Pick
+  skipTypename: true # skip __typename field in typings unless it's part of the query
+
+generates:
+  src/services/queryNode/generated/schema.ts:
+    hooks:
+      afterOneFileWrite:
+        - prettier --write
+        - eslint --fix
+    plugins:
+      - typescript
+  src/services/queryNode/generated/queries.ts:
+    preset: import-types
+    presetConfig:
+      typesPath: ./schema
+    hooks:
+      afterOneFileWrite:
+        - prettier --write
+        - eslint --fix
+    plugins:
+      - typescript-operations
+      - typescript-document-nodes

+ 218 - 0
storage-node-v2/src/services/queryNode/generated/queries.ts

@@ -0,0 +1,218 @@
+import * as Types from './schema'
+
+import gql from 'graphql-tag'
+export type StorageBucketIdsFragment = { id: string }
+
+export type GetStorageBucketsConnectionQueryVariables = Types.Exact<{
+  limit?: Types.Maybe<Types.Scalars['Int']>
+  cursor?: Types.Maybe<Types.Scalars['String']>
+}>
+
+export type GetStorageBucketsConnectionQuery = {
+  storageBucketsConnection: {
+    totalCount: number
+    edges: Array<{ cursor: string; node: StorageBucketIdsFragment }>
+    pageInfo: { hasNextPage: boolean; endCursor?: Types.Maybe<string> }
+  }
+}
+
+export type GetStorageBucketDetailsByWorkerIdQueryVariables = Types.Exact<{
+  workerId?: Types.Maybe<Types.Scalars['ID']>
+  limit?: Types.Maybe<Types.Scalars['Int']>
+  cursor?: Types.Maybe<Types.Scalars['String']>
+}>
+
+export type GetStorageBucketDetailsByWorkerIdQuery = {
+  storageBucketsConnection: {
+    totalCount: number
+    edges: Array<{ cursor: string; node: StorageBucketIdsFragment }>
+    pageInfo: { hasNextPage: boolean; endCursor?: Types.Maybe<string> }
+  }
+}
+
+export type StorageBucketDetailsFragment = {
+  id: string
+  operatorMetadata?: Types.Maybe<{ id: string; nodeEndpoint?: Types.Maybe<string> }>
+  operatorStatus: { workerId: number } | { workerId: number }
+}
+
+export type GetStorageBucketDetailsQueryVariables = Types.Exact<{
+  ids?: Types.Maybe<Array<Types.Scalars['ID']> | Types.Scalars['ID']>
+  offset?: Types.Maybe<Types.Scalars['Int']>
+  limit?: Types.Maybe<Types.Scalars['Int']>
+}>
+
+export type GetStorageBucketDetailsQuery = { storageBuckets: Array<StorageBucketDetailsFragment> }
+
+export type StorageBagDetailsFragment = { id: string; storageBuckets: Array<{ id: string }> }
+
+export type GetStorageBagDetailsQueryVariables = Types.Exact<{
+  bucketIds?: Types.Maybe<Array<Types.Scalars['ID']> | Types.Scalars['ID']>
+  offset?: Types.Maybe<Types.Scalars['Int']>
+  limit?: Types.Maybe<Types.Scalars['Int']>
+}>
+
+export type GetStorageBagDetailsQuery = { storageBags: Array<StorageBagDetailsFragment> }
+
+export type GetBagConnectionQueryVariables = Types.Exact<{
+  bucketIds?: Types.Maybe<Array<Types.Scalars['ID']> | Types.Scalars['ID']>
+  limit?: Types.Maybe<Types.Scalars['Int']>
+  cursor?: Types.Maybe<Types.Scalars['String']>
+}>
+
+export type GetBagConnectionQuery = {
+  storageBagsConnection: {
+    totalCount: number
+    edges: Array<{ cursor: string; node: StorageBagDetailsFragment }>
+    pageInfo: { hasNextPage: boolean; endCursor?: Types.Maybe<string> }
+  }
+}
+
+export type DataObjectDetailsFragment = { id: string; storageBagId: string }
+
+export type GetDataObjectConnectionQueryVariables = Types.Exact<{
+  bagIds?: Types.Maybe<Types.StorageBagWhereInput>
+  limit?: Types.Maybe<Types.Scalars['Int']>
+  cursor?: Types.Maybe<Types.Scalars['String']>
+}>
+
+export type GetDataObjectConnectionQuery = {
+  storageDataObjectsConnection: {
+    totalCount: number
+    edges: Array<{ cursor: string; node: DataObjectDetailsFragment }>
+    pageInfo: { hasNextPage: boolean; endCursor?: Types.Maybe<string> }
+  }
+}
+
+export const StorageBucketIds = gql`
+  fragment StorageBucketIds on StorageBucket {
+    id
+  }
+`
+export const StorageBucketDetails = gql`
+  fragment StorageBucketDetails on StorageBucket {
+    id
+    operatorMetadata {
+      id
+      nodeEndpoint
+    }
+    operatorStatus {
+      ... on StorageBucketOperatorStatusActive {
+        workerId
+      }
+      ... on StorageBucketOperatorStatusInvited {
+        workerId
+      }
+    }
+  }
+`
+export const StorageBagDetails = gql`
+  fragment StorageBagDetails on StorageBag {
+    id
+    storageBuckets {
+      id
+    }
+  }
+`
+export const DataObjectDetails = gql`
+  fragment DataObjectDetails on StorageDataObject {
+    id
+    storageBagId
+  }
+`
+export const GetStorageBucketsConnection = gql`
+  query getStorageBucketsConnection($limit: Int, $cursor: String) {
+    storageBucketsConnection(
+      first: $limit
+      after: $cursor
+      where: { operatorStatus_json: { isTypeOf_eq: "StorageBucketOperatorStatusActive" } }
+    ) {
+      edges {
+        cursor
+        node {
+          ...StorageBucketIds
+        }
+      }
+      pageInfo {
+        hasNextPage
+        endCursor
+      }
+      totalCount
+    }
+  }
+  ${StorageBucketIds}
+`
+export const GetStorageBucketDetailsByWorkerId = gql`
+  query getStorageBucketDetailsByWorkerId($workerId: ID, $limit: Int, $cursor: String) {
+    storageBucketsConnection(
+      first: $limit
+      after: $cursor
+      where: { operatorStatus_json: { isTypeOf_eq: "StorageBucketOperatorStatusActive", workerId_eq: $workerId } }
+    ) {
+      edges {
+        cursor
+        node {
+          ...StorageBucketIds
+        }
+      }
+      pageInfo {
+        hasNextPage
+        endCursor
+      }
+      totalCount
+    }
+  }
+  ${StorageBucketIds}
+`
+export const GetStorageBucketDetails = gql`
+  query getStorageBucketDetails($ids: [ID!], $offset: Int, $limit: Int) {
+    storageBuckets(where: { id_in: $ids }, offset: $offset, limit: $limit) {
+      ...StorageBucketDetails
+    }
+  }
+  ${StorageBucketDetails}
+`
+export const GetStorageBagDetails = gql`
+  query getStorageBagDetails($bucketIds: [ID!], $offset: Int, $limit: Int) {
+    storageBags(offset: $offset, limit: $limit, where: { storageBuckets_some: { id_in: $bucketIds } }) {
+      ...StorageBagDetails
+    }
+  }
+  ${StorageBagDetails}
+`
+export const GetBagConnection = gql`
+  query getBagConnection($bucketIds: [ID!], $limit: Int, $cursor: String) {
+    storageBagsConnection(first: $limit, after: $cursor, where: { storageBuckets_some: { id_in: $bucketIds } }) {
+      edges {
+        cursor
+        node {
+          ...StorageBagDetails
+        }
+      }
+      pageInfo {
+        hasNextPage
+        endCursor
+      }
+      totalCount
+    }
+  }
+  ${StorageBagDetails}
+`
+export const GetDataObjectConnection = gql`
+  query getDataObjectConnection($bagIds: StorageBagWhereInput, $limit: Int, $cursor: String) {
+    storageDataObjectsConnection(first: $limit, after: $cursor, where: { storageBag: $bagIds, isAccepted_eq: true }) {
+      edges {
+        cursor
+        node {
+          ...DataObjectDetails
+        }
+      }
+      pageInfo {
+        hasNextPage
+        endCursor
+      }
+      totalCount
+    }
+  }
+  ${DataObjectDetails}
+`

+ 3704 - 0
storage-node-v2/src/services/queryNode/generated/schema.ts

@@ -0,0 +1,3704 @@
+export type Maybe<T> = T | null
+export type Exact<T extends { [key: string]: unknown }> = { [K in keyof T]: T[K] }
+export type MakeOptional<T, K extends keyof T> = Omit<T, K> & { [SubKey in K]?: Maybe<T[SubKey]> }
+export type MakeMaybe<T, K extends keyof T> = Omit<T, K> & { [SubKey in K]: Maybe<T[SubKey]> }
+/** All built-in and custom scalars, mapped to their actual values */
+export type Scalars = {
+  ID: string
+  String: string
+  Boolean: boolean
+  Int: number
+  Float: number
+  /** The javascript `Date` as string. Type represents date and time as the ISO Date string. */
+  DateTime: any
+  /** GraphQL representation of BigInt */
+  BigInt: any
+  /** The `JSONObject` scalar type represents JSON objects as specified by [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf). */
+  JSONObject: any
+}
+
+export type BaseGraphQlObject = {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+}
+
+export type BaseModel = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+}
+
+export type BaseModelUuid = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+}
+
+export type BaseWhereInput = {
+  id_eq?: Maybe<Scalars['String']>
+  id_in?: Maybe<Array<Scalars['String']>>
+  createdAt_eq?: Maybe<Scalars['String']>
+  createdAt_lt?: Maybe<Scalars['String']>
+  createdAt_lte?: Maybe<Scalars['String']>
+  createdAt_gt?: Maybe<Scalars['String']>
+  createdAt_gte?: Maybe<Scalars['String']>
+  createdById_eq?: Maybe<Scalars['String']>
+  updatedAt_eq?: Maybe<Scalars['String']>
+  updatedAt_lt?: Maybe<Scalars['String']>
+  updatedAt_lte?: Maybe<Scalars['String']>
+  updatedAt_gt?: Maybe<Scalars['String']>
+  updatedAt_gte?: Maybe<Scalars['String']>
+  updatedById_eq?: Maybe<Scalars['String']>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['String']>
+  deletedAt_lt?: Maybe<Scalars['String']>
+  deletedAt_lte?: Maybe<Scalars['String']>
+  deletedAt_gt?: Maybe<Scalars['String']>
+  deletedAt_gte?: Maybe<Scalars['String']>
+  deletedById_eq?: Maybe<Scalars['String']>
+}
+
+export type Channel = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  ownerMember?: Maybe<Membership>
+  ownerMemberId?: Maybe<Scalars['String']>
+  ownerCuratorGroup?: Maybe<CuratorGroup>
+  ownerCuratorGroupId?: Maybe<Scalars['String']>
+  category?: Maybe<ChannelCategory>
+  categoryId?: Maybe<Scalars['String']>
+  /** Reward account where revenue is sent if set. */
+  rewardAccount?: Maybe<Scalars['String']>
+  /** Destination account for the prize associated with channel deletion */
+  deletionPrizeDestAccount: Scalars['String']
+  /** The title of the Channel */
+  title?: Maybe<Scalars['String']>
+  /** The description of a Channel */
+  description?: Maybe<Scalars['String']>
+  coverPhoto?: Maybe<StorageDataObject>
+  coverPhotoId?: Maybe<Scalars['String']>
+  avatarPhoto?: Maybe<StorageDataObject>
+  avatarPhotoId?: Maybe<Scalars['String']>
+  /** Flag signaling whether a channel is public. */
+  isPublic?: Maybe<Scalars['Boolean']>
+  /** Flag signaling whether a channel is censored. */
+  isCensored: Scalars['Boolean']
+  language?: Maybe<Language>
+  languageId?: Maybe<Scalars['String']>
+  videos: Array<Video>
+  createdInBlock: Scalars['Int']
+}
+
+export type ChannelCategoriesByNameFtsOutput = {
+  item: ChannelCategoriesByNameSearchResult
+  rank: Scalars['Float']
+  isTypeOf: Scalars['String']
+  highlight: Scalars['String']
+}
+
+export type ChannelCategoriesByNameSearchResult = ChannelCategory
+
+/** Category of media channel */
+export type ChannelCategory = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** The name of the category */
+  name?: Maybe<Scalars['String']>
+  channels: Array<Channel>
+  createdInBlock: Scalars['Int']
+}
+
+export type ChannelCategoryConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<ChannelCategoryEdge>
+  pageInfo: PageInfo
+}
+
+export type ChannelCategoryCreateInput = {
+  name?: Maybe<Scalars['String']>
+  createdInBlock: Scalars['Float']
+}
+
+export type ChannelCategoryEdge = {
+  node: ChannelCategory
+  cursor: Scalars['String']
+}
+
+export enum ChannelCategoryOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  NameAsc = 'name_ASC',
+  NameDesc = 'name_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+}
+
+export type ChannelCategoryUpdateInput = {
+  name?: Maybe<Scalars['String']>
+  createdInBlock?: Maybe<Scalars['Float']>
+}
+
+export type ChannelCategoryWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  name_eq?: Maybe<Scalars['String']>
+  name_contains?: Maybe<Scalars['String']>
+  name_startsWith?: Maybe<Scalars['String']>
+  name_endsWith?: Maybe<Scalars['String']>
+  name_in?: Maybe<Array<Scalars['String']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  channels_none?: Maybe<ChannelWhereInput>
+  channels_some?: Maybe<ChannelWhereInput>
+  channels_every?: Maybe<ChannelWhereInput>
+  AND?: Maybe<Array<ChannelCategoryWhereInput>>
+  OR?: Maybe<Array<ChannelCategoryWhereInput>>
+}
+
+export type ChannelCategoryWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type ChannelConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<ChannelEdge>
+  pageInfo: PageInfo
+}
+
+export type ChannelCreateInput = {
+  ownerMember?: Maybe<Scalars['ID']>
+  ownerCuratorGroup?: Maybe<Scalars['ID']>
+  category?: Maybe<Scalars['ID']>
+  rewardAccount?: Maybe<Scalars['String']>
+  deletionPrizeDestAccount: Scalars['String']
+  title?: Maybe<Scalars['String']>
+  description?: Maybe<Scalars['String']>
+  coverPhoto?: Maybe<Scalars['ID']>
+  avatarPhoto?: Maybe<Scalars['ID']>
+  isPublic?: Maybe<Scalars['Boolean']>
+  isCensored: Scalars['Boolean']
+  language?: Maybe<Scalars['ID']>
+  createdInBlock: Scalars['Float']
+}
+
+export type ChannelEdge = {
+  node: Channel
+  cursor: Scalars['String']
+}
+
+export enum ChannelOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  OwnerMemberAsc = 'ownerMember_ASC',
+  OwnerMemberDesc = 'ownerMember_DESC',
+  OwnerCuratorGroupAsc = 'ownerCuratorGroup_ASC',
+  OwnerCuratorGroupDesc = 'ownerCuratorGroup_DESC',
+  CategoryAsc = 'category_ASC',
+  CategoryDesc = 'category_DESC',
+  RewardAccountAsc = 'rewardAccount_ASC',
+  RewardAccountDesc = 'rewardAccount_DESC',
+  DeletionPrizeDestAccountAsc = 'deletionPrizeDestAccount_ASC',
+  DeletionPrizeDestAccountDesc = 'deletionPrizeDestAccount_DESC',
+  TitleAsc = 'title_ASC',
+  TitleDesc = 'title_DESC',
+  DescriptionAsc = 'description_ASC',
+  DescriptionDesc = 'description_DESC',
+  CoverPhotoAsc = 'coverPhoto_ASC',
+  CoverPhotoDesc = 'coverPhoto_DESC',
+  AvatarPhotoAsc = 'avatarPhoto_ASC',
+  AvatarPhotoDesc = 'avatarPhoto_DESC',
+  IsPublicAsc = 'isPublic_ASC',
+  IsPublicDesc = 'isPublic_DESC',
+  IsCensoredAsc = 'isCensored_ASC',
+  IsCensoredDesc = 'isCensored_DESC',
+  LanguageAsc = 'language_ASC',
+  LanguageDesc = 'language_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+}
+
+export type ChannelUpdateInput = {
+  ownerMember?: Maybe<Scalars['ID']>
+  ownerCuratorGroup?: Maybe<Scalars['ID']>
+  category?: Maybe<Scalars['ID']>
+  rewardAccount?: Maybe<Scalars['String']>
+  deletionPrizeDestAccount?: Maybe<Scalars['String']>
+  title?: Maybe<Scalars['String']>
+  description?: Maybe<Scalars['String']>
+  coverPhoto?: Maybe<Scalars['ID']>
+  avatarPhoto?: Maybe<Scalars['ID']>
+  isPublic?: Maybe<Scalars['Boolean']>
+  isCensored?: Maybe<Scalars['Boolean']>
+  language?: Maybe<Scalars['ID']>
+  createdInBlock?: Maybe<Scalars['Float']>
+}
+
+export type ChannelWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  rewardAccount_eq?: Maybe<Scalars['String']>
+  rewardAccount_contains?: Maybe<Scalars['String']>
+  rewardAccount_startsWith?: Maybe<Scalars['String']>
+  rewardAccount_endsWith?: Maybe<Scalars['String']>
+  rewardAccount_in?: Maybe<Array<Scalars['String']>>
+  deletionPrizeDestAccount_eq?: Maybe<Scalars['String']>
+  deletionPrizeDestAccount_contains?: Maybe<Scalars['String']>
+  deletionPrizeDestAccount_startsWith?: Maybe<Scalars['String']>
+  deletionPrizeDestAccount_endsWith?: Maybe<Scalars['String']>
+  deletionPrizeDestAccount_in?: Maybe<Array<Scalars['String']>>
+  title_eq?: Maybe<Scalars['String']>
+  title_contains?: Maybe<Scalars['String']>
+  title_startsWith?: Maybe<Scalars['String']>
+  title_endsWith?: Maybe<Scalars['String']>
+  title_in?: Maybe<Array<Scalars['String']>>
+  description_eq?: Maybe<Scalars['String']>
+  description_contains?: Maybe<Scalars['String']>
+  description_startsWith?: Maybe<Scalars['String']>
+  description_endsWith?: Maybe<Scalars['String']>
+  description_in?: Maybe<Array<Scalars['String']>>
+  isPublic_eq?: Maybe<Scalars['Boolean']>
+  isPublic_in?: Maybe<Array<Scalars['Boolean']>>
+  isCensored_eq?: Maybe<Scalars['Boolean']>
+  isCensored_in?: Maybe<Array<Scalars['Boolean']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  ownerMember?: Maybe<MembershipWhereInput>
+  ownerCuratorGroup?: Maybe<CuratorGroupWhereInput>
+  category?: Maybe<ChannelCategoryWhereInput>
+  coverPhoto?: Maybe<StorageDataObjectWhereInput>
+  avatarPhoto?: Maybe<StorageDataObjectWhereInput>
+  language?: Maybe<LanguageWhereInput>
+  videos_none?: Maybe<VideoWhereInput>
+  videos_some?: Maybe<VideoWhereInput>
+  videos_every?: Maybe<VideoWhereInput>
+  AND?: Maybe<Array<ChannelWhereInput>>
+  OR?: Maybe<Array<ChannelWhereInput>>
+}
+
+export type ChannelWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export enum Continent {
+  Af = 'AF',
+  Na = 'NA',
+  Oc = 'OC',
+  An = 'AN',
+  As = 'AS',
+  Eu = 'EU',
+  Sa = 'SA',
+}
+
+export type CuratorGroup = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Curators belonging to this group */
+  curatorIds: Array<Scalars['Int']>
+  /** Is group active or not */
+  isActive: Scalars['Boolean']
+  channels: Array<Channel>
+}
+
+export type CuratorGroupConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<CuratorGroupEdge>
+  pageInfo: PageInfo
+}
+
+export type CuratorGroupCreateInput = {
+  curatorIds: Array<Scalars['Int']>
+  isActive: Scalars['Boolean']
+}
+
+export type CuratorGroupEdge = {
+  node: CuratorGroup
+  cursor: Scalars['String']
+}
+
+export enum CuratorGroupOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  IsActiveAsc = 'isActive_ASC',
+  IsActiveDesc = 'isActive_DESC',
+}
+
+export type CuratorGroupUpdateInput = {
+  curatorIds?: Maybe<Array<Scalars['Int']>>
+  isActive?: Maybe<Scalars['Boolean']>
+}
+
+export type CuratorGroupWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  curatorIds_containsAll?: Maybe<Array<Scalars['Int']>>
+  curatorIds_containsNone?: Maybe<Array<Scalars['Int']>>
+  curatorIds_containsAny?: Maybe<Array<Scalars['Int']>>
+  isActive_eq?: Maybe<Scalars['Boolean']>
+  isActive_in?: Maybe<Array<Scalars['Boolean']>>
+  channels_none?: Maybe<ChannelWhereInput>
+  channels_some?: Maybe<ChannelWhereInput>
+  channels_every?: Maybe<ChannelWhereInput>
+  AND?: Maybe<Array<CuratorGroupWhereInput>>
+  OR?: Maybe<Array<CuratorGroupWhereInput>>
+}
+
+export type CuratorGroupWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type DataObjectType =
+  | DataObjectTypeChannelAvatar
+  | DataObjectTypeChannelCoverPhoto
+  | DataObjectTypeVideoMedia
+  | DataObjectTypeVideoThumbnail
+  | DataObjectTypeUnknown
+
+export type DataObjectTypeChannelAvatar = {
+  /** Related channel entity */
+  channel?: Maybe<Channel>
+}
+
+export type DataObjectTypeChannelCoverPhoto = {
+  /** Related channel entity */
+  channel?: Maybe<Channel>
+}
+
+export type DataObjectTypeUnknown = {
+  phantom?: Maybe<Scalars['Int']>
+}
+
+export type DataObjectTypeVideoMedia = {
+  /** Related video entity */
+  video?: Maybe<Video>
+}
+
+export type DataObjectTypeVideoThumbnail = {
+  /** Related video entity */
+  video?: Maybe<Video>
+}
+
+export type DeleteResponse = {
+  id: Scalars['ID']
+}
+
+export type DistributionBucket = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  family: DistributionBucketFamily
+  familyId: Scalars['String']
+  operators: Array<DistributionBucketOperator>
+  /** Whether the bucket is accepting any new bags */
+  acceptingNewBags: Scalars['Boolean']
+  /** Whether the bucket is currently distributing content */
+  distributing: Scalars['Boolean']
+  bags: Array<StorageBag>
+}
+
+export type DistributionBucketConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<DistributionBucketEdge>
+  pageInfo: PageInfo
+}
+
+export type DistributionBucketCreateInput = {
+  family: Scalars['ID']
+  acceptingNewBags: Scalars['Boolean']
+  distributing: Scalars['Boolean']
+}
+
+export type DistributionBucketEdge = {
+  node: DistributionBucket
+  cursor: Scalars['String']
+}
+
+export type DistributionBucketFamily = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  metadata?: Maybe<DistributionBucketFamilyMetadata>
+  metadataId?: Maybe<Scalars['String']>
+  buckets: Array<DistributionBucket>
+}
+
+export type DistributionBucketFamilyConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<DistributionBucketFamilyEdge>
+  pageInfo: PageInfo
+}
+
+export type DistributionBucketFamilyCreateInput = {
+  metadata?: Maybe<Scalars['ID']>
+}
+
+export type DistributionBucketFamilyEdge = {
+  node: DistributionBucketFamily
+  cursor: Scalars['String']
+}
+
+export type DistributionBucketFamilyGeographicArea = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Geographical area (continent / country / subdivision) */
+  area: GeographicalArea
+  distributionBucketFamilyMetadata: DistributionBucketFamilyMetadata
+  distributionBucketFamilyMetadataId: Scalars['String']
+}
+
+export type DistributionBucketFamilyGeographicAreaConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<DistributionBucketFamilyGeographicAreaEdge>
+  pageInfo: PageInfo
+}
+
+export type DistributionBucketFamilyGeographicAreaCreateInput = {
+  area: Scalars['JSONObject']
+  distributionBucketFamilyMetadata: Scalars['ID']
+}
+
+export type DistributionBucketFamilyGeographicAreaEdge = {
+  node: DistributionBucketFamilyGeographicArea
+  cursor: Scalars['String']
+}
+
+export enum DistributionBucketFamilyGeographicAreaOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  DistributionBucketFamilyMetadataAsc = 'distributionBucketFamilyMetadata_ASC',
+  DistributionBucketFamilyMetadataDesc = 'distributionBucketFamilyMetadata_DESC',
+}
+
+export type DistributionBucketFamilyGeographicAreaUpdateInput = {
+  area?: Maybe<Scalars['JSONObject']>
+  distributionBucketFamilyMetadata?: Maybe<Scalars['ID']>
+}
+
+export type DistributionBucketFamilyGeographicAreaWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  area_json?: Maybe<Scalars['JSONObject']>
+  distributionBucketFamilyMetadata?: Maybe<DistributionBucketFamilyMetadataWhereInput>
+  AND?: Maybe<Array<DistributionBucketFamilyGeographicAreaWhereInput>>
+  OR?: Maybe<Array<DistributionBucketFamilyGeographicAreaWhereInput>>
+}
+
+export type DistributionBucketFamilyGeographicAreaWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type DistributionBucketFamilyMetadata = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Name of the geographical region covered by the family (ie.: us-east-1) */
+  region?: Maybe<Scalars['String']>
+  /** Optional, more specific description of the region covered by the family */
+  description?: Maybe<Scalars['String']>
+  areas: Array<DistributionBucketFamilyGeographicArea>
+  /** List of targets (hosts/ips) best suited latency measurements for the family */
+  latencyTestTargets?: Maybe<Array<Scalars['String']>>
+  distributionbucketfamilymetadata?: Maybe<Array<DistributionBucketFamily>>
+}
+
+export type DistributionBucketFamilyMetadataConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<DistributionBucketFamilyMetadataEdge>
+  pageInfo: PageInfo
+}
+
+export type DistributionBucketFamilyMetadataCreateInput = {
+  region?: Maybe<Scalars['String']>
+  description?: Maybe<Scalars['String']>
+  latencyTestTargets?: Maybe<Array<Scalars['String']>>
+}
+
+export type DistributionBucketFamilyMetadataEdge = {
+  node: DistributionBucketFamilyMetadata
+  cursor: Scalars['String']
+}
+
+export enum DistributionBucketFamilyMetadataOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  RegionAsc = 'region_ASC',
+  RegionDesc = 'region_DESC',
+  DescriptionAsc = 'description_ASC',
+  DescriptionDesc = 'description_DESC',
+}
+
+export type DistributionBucketFamilyMetadataUpdateInput = {
+  region?: Maybe<Scalars['String']>
+  description?: Maybe<Scalars['String']>
+  latencyTestTargets?: Maybe<Array<Scalars['String']>>
+}
+
+export type DistributionBucketFamilyMetadataWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  region_eq?: Maybe<Scalars['String']>
+  region_contains?: Maybe<Scalars['String']>
+  region_startsWith?: Maybe<Scalars['String']>
+  region_endsWith?: Maybe<Scalars['String']>
+  region_in?: Maybe<Array<Scalars['String']>>
+  description_eq?: Maybe<Scalars['String']>
+  description_contains?: Maybe<Scalars['String']>
+  description_startsWith?: Maybe<Scalars['String']>
+  description_endsWith?: Maybe<Scalars['String']>
+  description_in?: Maybe<Array<Scalars['String']>>
+  latencyTestTargets_containsAll?: Maybe<Array<Scalars['String']>>
+  latencyTestTargets_containsNone?: Maybe<Array<Scalars['String']>>
+  latencyTestTargets_containsAny?: Maybe<Array<Scalars['String']>>
+  areas_none?: Maybe<DistributionBucketFamilyGeographicAreaWhereInput>
+  areas_some?: Maybe<DistributionBucketFamilyGeographicAreaWhereInput>
+  areas_every?: Maybe<DistributionBucketFamilyGeographicAreaWhereInput>
+  distributionbucketfamilymetadata_none?: Maybe<DistributionBucketFamilyWhereInput>
+  distributionbucketfamilymetadata_some?: Maybe<DistributionBucketFamilyWhereInput>
+  distributionbucketfamilymetadata_every?: Maybe<DistributionBucketFamilyWhereInput>
+  AND?: Maybe<Array<DistributionBucketFamilyMetadataWhereInput>>
+  OR?: Maybe<Array<DistributionBucketFamilyMetadataWhereInput>>
+}
+
+export type DistributionBucketFamilyMetadataWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export enum DistributionBucketFamilyOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  MetadataAsc = 'metadata_ASC',
+  MetadataDesc = 'metadata_DESC',
+}
+
+export type DistributionBucketFamilyUpdateInput = {
+  metadata?: Maybe<Scalars['ID']>
+}
+
+export type DistributionBucketFamilyWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  metadata?: Maybe<DistributionBucketFamilyMetadataWhereInput>
+  buckets_none?: Maybe<DistributionBucketWhereInput>
+  buckets_some?: Maybe<DistributionBucketWhereInput>
+  buckets_every?: Maybe<DistributionBucketWhereInput>
+  AND?: Maybe<Array<DistributionBucketFamilyWhereInput>>
+  OR?: Maybe<Array<DistributionBucketFamilyWhereInput>>
+}
+
+export type DistributionBucketFamilyWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type DistributionBucketOperator = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  distributionBucket: DistributionBucket
+  distributionBucketId: Scalars['String']
+  /** ID of the distribution group worker */
+  workerId: Scalars['Int']
+  /** Current operator status */
+  status: DistributionBucketOperatorStatus
+  metadata?: Maybe<DistributionBucketOperatorMetadata>
+  metadataId?: Maybe<Scalars['String']>
+}
+
+export type DistributionBucketOperatorConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<DistributionBucketOperatorEdge>
+  pageInfo: PageInfo
+}
+
+export type DistributionBucketOperatorCreateInput = {
+  distributionBucket: Scalars['ID']
+  workerId: Scalars['Float']
+  status: DistributionBucketOperatorStatus
+  metadata?: Maybe<Scalars['ID']>
+}
+
+export type DistributionBucketOperatorEdge = {
+  node: DistributionBucketOperator
+  cursor: Scalars['String']
+}
+
+export type DistributionBucketOperatorMetadata = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Root distributor node api endpoint */
+  nodeEndpoint?: Maybe<Scalars['String']>
+  nodeLocation?: Maybe<NodeLocationMetadata>
+  nodeLocationId?: Maybe<Scalars['String']>
+  /** Additional information about the node/operator */
+  extra?: Maybe<Scalars['String']>
+  distributionbucketoperatormetadata?: Maybe<Array<DistributionBucketOperator>>
+}
+
+export type DistributionBucketOperatorMetadataConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<DistributionBucketOperatorMetadataEdge>
+  pageInfo: PageInfo
+}
+
+export type DistributionBucketOperatorMetadataCreateInput = {
+  nodeEndpoint?: Maybe<Scalars['String']>
+  nodeLocation?: Maybe<Scalars['ID']>
+  extra?: Maybe<Scalars['String']>
+}
+
+export type DistributionBucketOperatorMetadataEdge = {
+  node: DistributionBucketOperatorMetadata
+  cursor: Scalars['String']
+}
+
+export enum DistributionBucketOperatorMetadataOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  NodeEndpointAsc = 'nodeEndpoint_ASC',
+  NodeEndpointDesc = 'nodeEndpoint_DESC',
+  NodeLocationAsc = 'nodeLocation_ASC',
+  NodeLocationDesc = 'nodeLocation_DESC',
+  ExtraAsc = 'extra_ASC',
+  ExtraDesc = 'extra_DESC',
+}
+
+export type DistributionBucketOperatorMetadataUpdateInput = {
+  nodeEndpoint?: Maybe<Scalars['String']>
+  nodeLocation?: Maybe<Scalars['ID']>
+  extra?: Maybe<Scalars['String']>
+}
+
+export type DistributionBucketOperatorMetadataWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  nodeEndpoint_eq?: Maybe<Scalars['String']>
+  nodeEndpoint_contains?: Maybe<Scalars['String']>
+  nodeEndpoint_startsWith?: Maybe<Scalars['String']>
+  nodeEndpoint_endsWith?: Maybe<Scalars['String']>
+  nodeEndpoint_in?: Maybe<Array<Scalars['String']>>
+  extra_eq?: Maybe<Scalars['String']>
+  extra_contains?: Maybe<Scalars['String']>
+  extra_startsWith?: Maybe<Scalars['String']>
+  extra_endsWith?: Maybe<Scalars['String']>
+  extra_in?: Maybe<Array<Scalars['String']>>
+  nodeLocation?: Maybe<NodeLocationMetadataWhereInput>
+  distributionbucketoperatormetadata_none?: Maybe<DistributionBucketOperatorWhereInput>
+  distributionbucketoperatormetadata_some?: Maybe<DistributionBucketOperatorWhereInput>
+  distributionbucketoperatormetadata_every?: Maybe<DistributionBucketOperatorWhereInput>
+  AND?: Maybe<Array<DistributionBucketOperatorMetadataWhereInput>>
+  OR?: Maybe<Array<DistributionBucketOperatorMetadataWhereInput>>
+}
+
+export type DistributionBucketOperatorMetadataWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export enum DistributionBucketOperatorOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  DistributionBucketAsc = 'distributionBucket_ASC',
+  DistributionBucketDesc = 'distributionBucket_DESC',
+  WorkerIdAsc = 'workerId_ASC',
+  WorkerIdDesc = 'workerId_DESC',
+  StatusAsc = 'status_ASC',
+  StatusDesc = 'status_DESC',
+  MetadataAsc = 'metadata_ASC',
+  MetadataDesc = 'metadata_DESC',
+}
+
+export enum DistributionBucketOperatorStatus {
+  Invited = 'INVITED',
+  Active = 'ACTIVE',
+}
+
+export type DistributionBucketOperatorUpdateInput = {
+  distributionBucket?: Maybe<Scalars['ID']>
+  workerId?: Maybe<Scalars['Float']>
+  status?: Maybe<DistributionBucketOperatorStatus>
+  metadata?: Maybe<Scalars['ID']>
+}
+
+export type DistributionBucketOperatorWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  workerId_eq?: Maybe<Scalars['Int']>
+  workerId_gt?: Maybe<Scalars['Int']>
+  workerId_gte?: Maybe<Scalars['Int']>
+  workerId_lt?: Maybe<Scalars['Int']>
+  workerId_lte?: Maybe<Scalars['Int']>
+  workerId_in?: Maybe<Array<Scalars['Int']>>
+  status_eq?: Maybe<DistributionBucketOperatorStatus>
+  status_in?: Maybe<Array<DistributionBucketOperatorStatus>>
+  distributionBucket?: Maybe<DistributionBucketWhereInput>
+  metadata?: Maybe<DistributionBucketOperatorMetadataWhereInput>
+  AND?: Maybe<Array<DistributionBucketOperatorWhereInput>>
+  OR?: Maybe<Array<DistributionBucketOperatorWhereInput>>
+}
+
+export type DistributionBucketOperatorWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export enum DistributionBucketOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  FamilyAsc = 'family_ASC',
+  FamilyDesc = 'family_DESC',
+  AcceptingNewBagsAsc = 'acceptingNewBags_ASC',
+  AcceptingNewBagsDesc = 'acceptingNewBags_DESC',
+  DistributingAsc = 'distributing_ASC',
+  DistributingDesc = 'distributing_DESC',
+}
+
+export type DistributionBucketUpdateInput = {
+  family?: Maybe<Scalars['ID']>
+  acceptingNewBags?: Maybe<Scalars['Boolean']>
+  distributing?: Maybe<Scalars['Boolean']>
+}
+
+export type DistributionBucketWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  acceptingNewBags_eq?: Maybe<Scalars['Boolean']>
+  acceptingNewBags_in?: Maybe<Array<Scalars['Boolean']>>
+  distributing_eq?: Maybe<Scalars['Boolean']>
+  distributing_in?: Maybe<Array<Scalars['Boolean']>>
+  family?: Maybe<DistributionBucketFamilyWhereInput>
+  operators_none?: Maybe<DistributionBucketOperatorWhereInput>
+  operators_some?: Maybe<DistributionBucketOperatorWhereInput>
+  operators_every?: Maybe<DistributionBucketOperatorWhereInput>
+  bags_none?: Maybe<StorageBagWhereInput>
+  bags_some?: Maybe<StorageBagWhereInput>
+  bags_every?: Maybe<StorageBagWhereInput>
+  AND?: Maybe<Array<DistributionBucketWhereInput>>
+  OR?: Maybe<Array<DistributionBucketWhereInput>>
+}
+
+export type DistributionBucketWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type GeoCoordinates = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  latitude: Scalars['Float']
+  longitude: Scalars['Float']
+  nodelocationmetadatacoordinates?: Maybe<Array<NodeLocationMetadata>>
+}
+
+export type GeoCoordinatesConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<GeoCoordinatesEdge>
+  pageInfo: PageInfo
+}
+
+export type GeoCoordinatesCreateInput = {
+  latitude: Scalars['Float']
+  longitude: Scalars['Float']
+}
+
+export type GeoCoordinatesEdge = {
+  node: GeoCoordinates
+  cursor: Scalars['String']
+}
+
+export enum GeoCoordinatesOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  LatitudeAsc = 'latitude_ASC',
+  LatitudeDesc = 'latitude_DESC',
+  LongitudeAsc = 'longitude_ASC',
+  LongitudeDesc = 'longitude_DESC',
+}
+
+export type GeoCoordinatesUpdateInput = {
+  latitude?: Maybe<Scalars['Float']>
+  longitude?: Maybe<Scalars['Float']>
+}
+
+export type GeoCoordinatesWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  latitude_eq?: Maybe<Scalars['Float']>
+  latitude_gt?: Maybe<Scalars['Float']>
+  latitude_gte?: Maybe<Scalars['Float']>
+  latitude_lt?: Maybe<Scalars['Float']>
+  latitude_lte?: Maybe<Scalars['Float']>
+  latitude_in?: Maybe<Array<Scalars['Float']>>
+  longitude_eq?: Maybe<Scalars['Float']>
+  longitude_gt?: Maybe<Scalars['Float']>
+  longitude_gte?: Maybe<Scalars['Float']>
+  longitude_lt?: Maybe<Scalars['Float']>
+  longitude_lte?: Maybe<Scalars['Float']>
+  longitude_in?: Maybe<Array<Scalars['Float']>>
+  nodelocationmetadatacoordinates_none?: Maybe<NodeLocationMetadataWhereInput>
+  nodelocationmetadatacoordinates_some?: Maybe<NodeLocationMetadataWhereInput>
+  nodelocationmetadatacoordinates_every?: Maybe<NodeLocationMetadataWhereInput>
+  AND?: Maybe<Array<GeoCoordinatesWhereInput>>
+  OR?: Maybe<Array<GeoCoordinatesWhereInput>>
+}
+
+export type GeoCoordinatesWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type GeographicalArea = GeographicalAreaContinent | GeographicalAreaCountry | GeographicalAreaSubdivistion
+
+export type GeographicalAreaContinent = {
+  code?: Maybe<Continent>
+}
+
+export type GeographicalAreaContinentCreateInput = {
+  code?: Maybe<Continent>
+}
+
+export type GeographicalAreaContinentUpdateInput = {
+  code?: Maybe<Continent>
+}
+
+export type GeographicalAreaContinentWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  code_eq?: Maybe<Continent>
+  code_in?: Maybe<Array<Continent>>
+  AND?: Maybe<Array<GeographicalAreaContinentWhereInput>>
+  OR?: Maybe<Array<GeographicalAreaContinentWhereInput>>
+}
+
+export type GeographicalAreaContinentWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type GeographicalAreaCountry = {
+  /** ISO 3166-1 alpha-2 country code */
+  code?: Maybe<Scalars['String']>
+}
+
+export type GeographicalAreaSubdivistion = {
+  /** ISO 3166-2 subdivision code */
+  code?: Maybe<Scalars['String']>
+}
+
+export type Language = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Language identifier ISO 639-1 */
+  iso: Scalars['String']
+  createdInBlock: Scalars['Int']
+  channellanguage?: Maybe<Array<Channel>>
+  videolanguage?: Maybe<Array<Video>>
+}
+
+export type LanguageConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<LanguageEdge>
+  pageInfo: PageInfo
+}
+
+export type LanguageCreateInput = {
+  iso: Scalars['String']
+  createdInBlock: Scalars['Float']
+}
+
+export type LanguageEdge = {
+  node: Language
+  cursor: Scalars['String']
+}
+
+export enum LanguageOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  IsoAsc = 'iso_ASC',
+  IsoDesc = 'iso_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+}
+
+export type LanguageUpdateInput = {
+  iso?: Maybe<Scalars['String']>
+  createdInBlock?: Maybe<Scalars['Float']>
+}
+
+export type LanguageWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  iso_eq?: Maybe<Scalars['String']>
+  iso_contains?: Maybe<Scalars['String']>
+  iso_startsWith?: Maybe<Scalars['String']>
+  iso_endsWith?: Maybe<Scalars['String']>
+  iso_in?: Maybe<Array<Scalars['String']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  channellanguage_none?: Maybe<ChannelWhereInput>
+  channellanguage_some?: Maybe<ChannelWhereInput>
+  channellanguage_every?: Maybe<ChannelWhereInput>
+  videolanguage_none?: Maybe<VideoWhereInput>
+  videolanguage_some?: Maybe<VideoWhereInput>
+  videolanguage_every?: Maybe<VideoWhereInput>
+  AND?: Maybe<Array<LanguageWhereInput>>
+  OR?: Maybe<Array<LanguageWhereInput>>
+}
+
+export type LanguageWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type License = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** License code defined by Joystream */
+  code?: Maybe<Scalars['Int']>
+  /** Attribution (if required by the license) */
+  attribution?: Maybe<Scalars['String']>
+  /** Custom license content */
+  customText?: Maybe<Scalars['String']>
+  videolicense?: Maybe<Array<Video>>
+}
+
+export type LicenseConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<LicenseEdge>
+  pageInfo: PageInfo
+}
+
+export type LicenseCreateInput = {
+  code?: Maybe<Scalars['Float']>
+  attribution?: Maybe<Scalars['String']>
+  customText?: Maybe<Scalars['String']>
+}
+
+export type LicenseEdge = {
+  node: License
+  cursor: Scalars['String']
+}
+
+export enum LicenseOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  CodeAsc = 'code_ASC',
+  CodeDesc = 'code_DESC',
+  AttributionAsc = 'attribution_ASC',
+  AttributionDesc = 'attribution_DESC',
+  CustomTextAsc = 'customText_ASC',
+  CustomTextDesc = 'customText_DESC',
+}
+
+export type LicenseUpdateInput = {
+  code?: Maybe<Scalars['Float']>
+  attribution?: Maybe<Scalars['String']>
+  customText?: Maybe<Scalars['String']>
+}
+
+export type LicenseWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  code_eq?: Maybe<Scalars['Int']>
+  code_gt?: Maybe<Scalars['Int']>
+  code_gte?: Maybe<Scalars['Int']>
+  code_lt?: Maybe<Scalars['Int']>
+  code_lte?: Maybe<Scalars['Int']>
+  code_in?: Maybe<Array<Scalars['Int']>>
+  attribution_eq?: Maybe<Scalars['String']>
+  attribution_contains?: Maybe<Scalars['String']>
+  attribution_startsWith?: Maybe<Scalars['String']>
+  attribution_endsWith?: Maybe<Scalars['String']>
+  attribution_in?: Maybe<Array<Scalars['String']>>
+  customText_eq?: Maybe<Scalars['String']>
+  customText_contains?: Maybe<Scalars['String']>
+  customText_startsWith?: Maybe<Scalars['String']>
+  customText_endsWith?: Maybe<Scalars['String']>
+  customText_in?: Maybe<Array<Scalars['String']>>
+  videolicense_none?: Maybe<VideoWhereInput>
+  videolicense_some?: Maybe<VideoWhereInput>
+  videolicense_every?: Maybe<VideoWhereInput>
+  AND?: Maybe<Array<LicenseWhereInput>>
+  OR?: Maybe<Array<LicenseWhereInput>>
+}
+
+export type LicenseWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type MembersByHandleFtsOutput = {
+  item: MembersByHandleSearchResult
+  rank: Scalars['Float']
+  isTypeOf: Scalars['String']
+  highlight: Scalars['String']
+}
+
+export type MembersByHandleSearchResult = Membership
+
+/** Stored information about a registered user */
+export type Membership = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** The unique handle chosen by member */
+  handle: Scalars['String']
+  /** A Url to member's Avatar image */
+  avatarUri?: Maybe<Scalars['String']>
+  /** Short text chosen by member to share information about themselves */
+  about?: Maybe<Scalars['String']>
+  /** Member's controller account id */
+  controllerAccount: Scalars['String']
+  /** Member's root account id */
+  rootAccount: Scalars['String']
+  /** Blocknumber when member was registered */
+  createdInBlock: Scalars['Int']
+  /** How the member was registered */
+  entry: MembershipEntryMethod
+  /** The type of subscription the member has purchased if any. */
+  subscription?: Maybe<Scalars['Int']>
+  channels: Array<Channel>
+}
+
+export type MembershipConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<MembershipEdge>
+  pageInfo: PageInfo
+}
+
+export type MembershipCreateInput = {
+  handle: Scalars['String']
+  avatarUri?: Maybe<Scalars['String']>
+  about?: Maybe<Scalars['String']>
+  controllerAccount: Scalars['String']
+  rootAccount: Scalars['String']
+  createdInBlock: Scalars['Float']
+  entry: MembershipEntryMethod
+  subscription?: Maybe<Scalars['Float']>
+}
+
+export type MembershipEdge = {
+  node: Membership
+  cursor: Scalars['String']
+}
+
+export enum MembershipEntryMethod {
+  Paid = 'PAID',
+  Screening = 'SCREENING',
+  Genesis = 'GENESIS',
+}
+
+export enum MembershipOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  HandleAsc = 'handle_ASC',
+  HandleDesc = 'handle_DESC',
+  AvatarUriAsc = 'avatarUri_ASC',
+  AvatarUriDesc = 'avatarUri_DESC',
+  AboutAsc = 'about_ASC',
+  AboutDesc = 'about_DESC',
+  ControllerAccountAsc = 'controllerAccount_ASC',
+  ControllerAccountDesc = 'controllerAccount_DESC',
+  RootAccountAsc = 'rootAccount_ASC',
+  RootAccountDesc = 'rootAccount_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+  EntryAsc = 'entry_ASC',
+  EntryDesc = 'entry_DESC',
+  SubscriptionAsc = 'subscription_ASC',
+  SubscriptionDesc = 'subscription_DESC',
+}
+
+export type MembershipUpdateInput = {
+  handle?: Maybe<Scalars['String']>
+  avatarUri?: Maybe<Scalars['String']>
+  about?: Maybe<Scalars['String']>
+  controllerAccount?: Maybe<Scalars['String']>
+  rootAccount?: Maybe<Scalars['String']>
+  createdInBlock?: Maybe<Scalars['Float']>
+  entry?: Maybe<MembershipEntryMethod>
+  subscription?: Maybe<Scalars['Float']>
+}
+
+export type MembershipWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  handle_eq?: Maybe<Scalars['String']>
+  handle_contains?: Maybe<Scalars['String']>
+  handle_startsWith?: Maybe<Scalars['String']>
+  handle_endsWith?: Maybe<Scalars['String']>
+  handle_in?: Maybe<Array<Scalars['String']>>
+  avatarUri_eq?: Maybe<Scalars['String']>
+  avatarUri_contains?: Maybe<Scalars['String']>
+  avatarUri_startsWith?: Maybe<Scalars['String']>
+  avatarUri_endsWith?: Maybe<Scalars['String']>
+  avatarUri_in?: Maybe<Array<Scalars['String']>>
+  about_eq?: Maybe<Scalars['String']>
+  about_contains?: Maybe<Scalars['String']>
+  about_startsWith?: Maybe<Scalars['String']>
+  about_endsWith?: Maybe<Scalars['String']>
+  about_in?: Maybe<Array<Scalars['String']>>
+  controllerAccount_eq?: Maybe<Scalars['String']>
+  controllerAccount_contains?: Maybe<Scalars['String']>
+  controllerAccount_startsWith?: Maybe<Scalars['String']>
+  controllerAccount_endsWith?: Maybe<Scalars['String']>
+  controllerAccount_in?: Maybe<Array<Scalars['String']>>
+  rootAccount_eq?: Maybe<Scalars['String']>
+  rootAccount_contains?: Maybe<Scalars['String']>
+  rootAccount_startsWith?: Maybe<Scalars['String']>
+  rootAccount_endsWith?: Maybe<Scalars['String']>
+  rootAccount_in?: Maybe<Array<Scalars['String']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  entry_eq?: Maybe<MembershipEntryMethod>
+  entry_in?: Maybe<Array<MembershipEntryMethod>>
+  subscription_eq?: Maybe<Scalars['Int']>
+  subscription_gt?: Maybe<Scalars['Int']>
+  subscription_gte?: Maybe<Scalars['Int']>
+  subscription_lt?: Maybe<Scalars['Int']>
+  subscription_lte?: Maybe<Scalars['Int']>
+  subscription_in?: Maybe<Array<Scalars['Int']>>
+  channels_none?: Maybe<ChannelWhereInput>
+  channels_some?: Maybe<ChannelWhereInput>
+  channels_every?: Maybe<ChannelWhereInput>
+  AND?: Maybe<Array<MembershipWhereInput>>
+  OR?: Maybe<Array<MembershipWhereInput>>
+}
+
+export type MembershipWhereUniqueInput = {
+  id?: Maybe<Scalars['ID']>
+  handle?: Maybe<Scalars['String']>
+}
+
+export type NodeLocationMetadata = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** ISO 3166-1 alpha-2 country code (2 letters) */
+  countryCode?: Maybe<Scalars['String']>
+  /** City name */
+  city?: Maybe<Scalars['String']>
+  coordinates?: Maybe<GeoCoordinates>
+  coordinatesId?: Maybe<Scalars['String']>
+  distributionbucketoperatormetadatanodeLocation?: Maybe<Array<DistributionBucketOperatorMetadata>>
+  storagebucketoperatormetadatanodeLocation?: Maybe<Array<StorageBucketOperatorMetadata>>
+}
+
+export type NodeLocationMetadataConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<NodeLocationMetadataEdge>
+  pageInfo: PageInfo
+}
+
+export type NodeLocationMetadataCreateInput = {
+  countryCode?: Maybe<Scalars['String']>
+  city?: Maybe<Scalars['String']>
+  coordinates?: Maybe<Scalars['ID']>
+}
+
+export type NodeLocationMetadataEdge = {
+  node: NodeLocationMetadata
+  cursor: Scalars['String']
+}
+
+export enum NodeLocationMetadataOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  CountryCodeAsc = 'countryCode_ASC',
+  CountryCodeDesc = 'countryCode_DESC',
+  CityAsc = 'city_ASC',
+  CityDesc = 'city_DESC',
+  CoordinatesAsc = 'coordinates_ASC',
+  CoordinatesDesc = 'coordinates_DESC',
+}
+
+export type NodeLocationMetadataUpdateInput = {
+  countryCode?: Maybe<Scalars['String']>
+  city?: Maybe<Scalars['String']>
+  coordinates?: Maybe<Scalars['ID']>
+}
+
+export type NodeLocationMetadataWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  countryCode_eq?: Maybe<Scalars['String']>
+  countryCode_contains?: Maybe<Scalars['String']>
+  countryCode_startsWith?: Maybe<Scalars['String']>
+  countryCode_endsWith?: Maybe<Scalars['String']>
+  countryCode_in?: Maybe<Array<Scalars['String']>>
+  city_eq?: Maybe<Scalars['String']>
+  city_contains?: Maybe<Scalars['String']>
+  city_startsWith?: Maybe<Scalars['String']>
+  city_endsWith?: Maybe<Scalars['String']>
+  city_in?: Maybe<Array<Scalars['String']>>
+  coordinates?: Maybe<GeoCoordinatesWhereInput>
+  distributionbucketoperatormetadatanodeLocation_none?: Maybe<DistributionBucketOperatorMetadataWhereInput>
+  distributionbucketoperatormetadatanodeLocation_some?: Maybe<DistributionBucketOperatorMetadataWhereInput>
+  distributionbucketoperatormetadatanodeLocation_every?: Maybe<DistributionBucketOperatorMetadataWhereInput>
+  storagebucketoperatormetadatanodeLocation_none?: Maybe<StorageBucketOperatorMetadataWhereInput>
+  storagebucketoperatormetadatanodeLocation_some?: Maybe<StorageBucketOperatorMetadataWhereInput>
+  storagebucketoperatormetadatanodeLocation_every?: Maybe<StorageBucketOperatorMetadataWhereInput>
+  AND?: Maybe<Array<NodeLocationMetadataWhereInput>>
+  OR?: Maybe<Array<NodeLocationMetadataWhereInput>>
+}
+
+export type NodeLocationMetadataWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type PageInfo = {
+  hasNextPage: Scalars['Boolean']
+  hasPreviousPage: Scalars['Boolean']
+  startCursor?: Maybe<Scalars['String']>
+  endCursor?: Maybe<Scalars['String']>
+}
+
+export type ProcessorState = {
+  lastCompleteBlock: Scalars['Float']
+  lastProcessedEvent: Scalars['String']
+  indexerHead: Scalars['Float']
+  chainHead: Scalars['Float']
+}
+
+export type Query = {
+  channelCategories: Array<ChannelCategory>
+  channelCategoryByUniqueInput?: Maybe<ChannelCategory>
+  channelCategoriesConnection: ChannelCategoryConnection
+  channels: Array<Channel>
+  channelByUniqueInput?: Maybe<Channel>
+  channelsConnection: ChannelConnection
+  curatorGroups: Array<CuratorGroup>
+  curatorGroupByUniqueInput?: Maybe<CuratorGroup>
+  curatorGroupsConnection: CuratorGroupConnection
+  distributionBucketFamilyGeographicAreas: Array<DistributionBucketFamilyGeographicArea>
+  distributionBucketFamilyGeographicAreaByUniqueInput?: Maybe<DistributionBucketFamilyGeographicArea>
+  distributionBucketFamilyGeographicAreasConnection: DistributionBucketFamilyGeographicAreaConnection
+  distributionBucketFamilyMetadata: Array<DistributionBucketFamilyMetadata>
+  distributionBucketFamilyMetadataByUniqueInput?: Maybe<DistributionBucketFamilyMetadata>
+  distributionBucketFamilyMetadataConnection: DistributionBucketFamilyMetadataConnection
+  distributionBucketFamilies: Array<DistributionBucketFamily>
+  distributionBucketFamilyByUniqueInput?: Maybe<DistributionBucketFamily>
+  distributionBucketFamiliesConnection: DistributionBucketFamilyConnection
+  distributionBucketOperatorMetadata: Array<DistributionBucketOperatorMetadata>
+  distributionBucketOperatorMetadataByUniqueInput?: Maybe<DistributionBucketOperatorMetadata>
+  distributionBucketOperatorMetadataConnection: DistributionBucketOperatorMetadataConnection
+  distributionBucketOperators: Array<DistributionBucketOperator>
+  distributionBucketOperatorByUniqueInput?: Maybe<DistributionBucketOperator>
+  distributionBucketOperatorsConnection: DistributionBucketOperatorConnection
+  distributionBuckets: Array<DistributionBucket>
+  distributionBucketByUniqueInput?: Maybe<DistributionBucket>
+  distributionBucketsConnection: DistributionBucketConnection
+  geoCoordinates: Array<GeoCoordinates>
+  geoCoordinatesByUniqueInput?: Maybe<GeoCoordinates>
+  geoCoordinatesConnection: GeoCoordinatesConnection
+  languages: Array<Language>
+  languageByUniqueInput?: Maybe<Language>
+  languagesConnection: LanguageConnection
+  licenses: Array<License>
+  licenseByUniqueInput?: Maybe<License>
+  licensesConnection: LicenseConnection
+  memberships: Array<Membership>
+  membershipByUniqueInput?: Maybe<Membership>
+  membershipsConnection: MembershipConnection
+  nodeLocationMetadata: Array<NodeLocationMetadata>
+  nodeLocationMetadataByUniqueInput?: Maybe<NodeLocationMetadata>
+  nodeLocationMetadataConnection: NodeLocationMetadataConnection
+  channelCategoriesByName: Array<ChannelCategoriesByNameFtsOutput>
+  membersByHandle: Array<MembersByHandleFtsOutput>
+  search: Array<SearchFtsOutput>
+  videoCategoriesByName: Array<VideoCategoriesByNameFtsOutput>
+  storageBags: Array<StorageBag>
+  storageBagByUniqueInput?: Maybe<StorageBag>
+  storageBagsConnection: StorageBagConnection
+  storageBucketOperatorMetadata: Array<StorageBucketOperatorMetadata>
+  storageBucketOperatorMetadataByUniqueInput?: Maybe<StorageBucketOperatorMetadata>
+  storageBucketOperatorMetadataConnection: StorageBucketOperatorMetadataConnection
+  storageBuckets: Array<StorageBucket>
+  storageBucketByUniqueInput?: Maybe<StorageBucket>
+  storageBucketsConnection: StorageBucketConnection
+  storageDataObjects: Array<StorageDataObject>
+  storageDataObjectByUniqueInput?: Maybe<StorageDataObject>
+  storageDataObjectsConnection: StorageDataObjectConnection
+  storageSystemParameters: Array<StorageSystemParameters>
+  storageSystemParametersByUniqueInput?: Maybe<StorageSystemParameters>
+  storageSystemParametersConnection: StorageSystemParametersConnection
+  videoCategories: Array<VideoCategory>
+  videoCategoryByUniqueInput?: Maybe<VideoCategory>
+  videoCategoriesConnection: VideoCategoryConnection
+  videoMediaEncodings: Array<VideoMediaEncoding>
+  videoMediaEncodingByUniqueInput?: Maybe<VideoMediaEncoding>
+  videoMediaEncodingsConnection: VideoMediaEncodingConnection
+  videoMediaMetadata: Array<VideoMediaMetadata>
+  videoMediaMetadataByUniqueInput?: Maybe<VideoMediaMetadata>
+  videoMediaMetadataConnection: VideoMediaMetadataConnection
+  videos: Array<Video>
+  videoByUniqueInput?: Maybe<Video>
+  videosConnection: VideoConnection
+  workers: Array<Worker>
+  workerByUniqueInput?: Maybe<Worker>
+  workersConnection: WorkerConnection
+}
+
+export type QueryChannelCategoriesArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<ChannelCategoryWhereInput>
+  orderBy?: Maybe<Array<ChannelCategoryOrderByInput>>
+}
+
+export type QueryChannelCategoryByUniqueInputArgs = {
+  where: ChannelCategoryWhereUniqueInput
+}
+
+export type QueryChannelCategoriesConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<ChannelCategoryWhereInput>
+  orderBy?: Maybe<Array<ChannelCategoryOrderByInput>>
+}
+
+export type QueryChannelsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<ChannelWhereInput>
+  orderBy?: Maybe<Array<ChannelOrderByInput>>
+}
+
+export type QueryChannelByUniqueInputArgs = {
+  where: ChannelWhereUniqueInput
+}
+
+export type QueryChannelsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<ChannelWhereInput>
+  orderBy?: Maybe<Array<ChannelOrderByInput>>
+}
+
+export type QueryCuratorGroupsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<CuratorGroupWhereInput>
+  orderBy?: Maybe<Array<CuratorGroupOrderByInput>>
+}
+
+export type QueryCuratorGroupByUniqueInputArgs = {
+  where: CuratorGroupWhereUniqueInput
+}
+
+export type QueryCuratorGroupsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<CuratorGroupWhereInput>
+  orderBy?: Maybe<Array<CuratorGroupOrderByInput>>
+}
+
+export type QueryDistributionBucketFamilyGeographicAreasArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<DistributionBucketFamilyGeographicAreaWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketFamilyGeographicAreaOrderByInput>>
+}
+
+export type QueryDistributionBucketFamilyGeographicAreaByUniqueInputArgs = {
+  where: DistributionBucketFamilyGeographicAreaWhereUniqueInput
+}
+
+export type QueryDistributionBucketFamilyGeographicAreasConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<DistributionBucketFamilyGeographicAreaWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketFamilyGeographicAreaOrderByInput>>
+}
+
+export type QueryDistributionBucketFamilyMetadataArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<DistributionBucketFamilyMetadataWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketFamilyMetadataOrderByInput>>
+}
+
+export type QueryDistributionBucketFamilyMetadataByUniqueInputArgs = {
+  where: DistributionBucketFamilyMetadataWhereUniqueInput
+}
+
+export type QueryDistributionBucketFamilyMetadataConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<DistributionBucketFamilyMetadataWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketFamilyMetadataOrderByInput>>
+}
+
+export type QueryDistributionBucketFamiliesArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<DistributionBucketFamilyWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketFamilyOrderByInput>>
+}
+
+export type QueryDistributionBucketFamilyByUniqueInputArgs = {
+  where: DistributionBucketFamilyWhereUniqueInput
+}
+
+export type QueryDistributionBucketFamiliesConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<DistributionBucketFamilyWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketFamilyOrderByInput>>
+}
+
+export type QueryDistributionBucketOperatorMetadataArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<DistributionBucketOperatorMetadataWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketOperatorMetadataOrderByInput>>
+}
+
+export type QueryDistributionBucketOperatorMetadataByUniqueInputArgs = {
+  where: DistributionBucketOperatorMetadataWhereUniqueInput
+}
+
+export type QueryDistributionBucketOperatorMetadataConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<DistributionBucketOperatorMetadataWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketOperatorMetadataOrderByInput>>
+}
+
+export type QueryDistributionBucketOperatorsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<DistributionBucketOperatorWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketOperatorOrderByInput>>
+}
+
+export type QueryDistributionBucketOperatorByUniqueInputArgs = {
+  where: DistributionBucketOperatorWhereUniqueInput
+}
+
+export type QueryDistributionBucketOperatorsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<DistributionBucketOperatorWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketOperatorOrderByInput>>
+}
+
+export type QueryDistributionBucketsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<DistributionBucketWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketOrderByInput>>
+}
+
+export type QueryDistributionBucketByUniqueInputArgs = {
+  where: DistributionBucketWhereUniqueInput
+}
+
+export type QueryDistributionBucketsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<DistributionBucketWhereInput>
+  orderBy?: Maybe<Array<DistributionBucketOrderByInput>>
+}
+
+export type QueryGeoCoordinatesArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<GeoCoordinatesWhereInput>
+  orderBy?: Maybe<Array<GeoCoordinatesOrderByInput>>
+}
+
+export type QueryGeoCoordinatesByUniqueInputArgs = {
+  where: GeoCoordinatesWhereUniqueInput
+}
+
+export type QueryGeoCoordinatesConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<GeoCoordinatesWhereInput>
+  orderBy?: Maybe<Array<GeoCoordinatesOrderByInput>>
+}
+
+export type QueryLanguagesArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<LanguageWhereInput>
+  orderBy?: Maybe<Array<LanguageOrderByInput>>
+}
+
+export type QueryLanguageByUniqueInputArgs = {
+  where: LanguageWhereUniqueInput
+}
+
+export type QueryLanguagesConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<LanguageWhereInput>
+  orderBy?: Maybe<Array<LanguageOrderByInput>>
+}
+
+export type QueryLicensesArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<LicenseWhereInput>
+  orderBy?: Maybe<Array<LicenseOrderByInput>>
+}
+
+export type QueryLicenseByUniqueInputArgs = {
+  where: LicenseWhereUniqueInput
+}
+
+export type QueryLicensesConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<LicenseWhereInput>
+  orderBy?: Maybe<Array<LicenseOrderByInput>>
+}
+
+export type QueryMembershipsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<MembershipWhereInput>
+  orderBy?: Maybe<Array<MembershipOrderByInput>>
+}
+
+export type QueryMembershipByUniqueInputArgs = {
+  where: MembershipWhereUniqueInput
+}
+
+export type QueryMembershipsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<MembershipWhereInput>
+  orderBy?: Maybe<Array<MembershipOrderByInput>>
+}
+
+export type QueryNodeLocationMetadataArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<NodeLocationMetadataWhereInput>
+  orderBy?: Maybe<Array<NodeLocationMetadataOrderByInput>>
+}
+
+export type QueryNodeLocationMetadataByUniqueInputArgs = {
+  where: NodeLocationMetadataWhereUniqueInput
+}
+
+export type QueryNodeLocationMetadataConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<NodeLocationMetadataWhereInput>
+  orderBy?: Maybe<Array<NodeLocationMetadataOrderByInput>>
+}
+
+export type QueryChannelCategoriesByNameArgs = {
+  whereChannelCategory?: Maybe<ChannelCategoryWhereInput>
+  skip?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  text: Scalars['String']
+}
+
+export type QueryMembersByHandleArgs = {
+  whereMembership?: Maybe<MembershipWhereInput>
+  skip?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  text: Scalars['String']
+}
+
+export type QuerySearchArgs = {
+  whereVideo?: Maybe<VideoWhereInput>
+  whereChannel?: Maybe<ChannelWhereInput>
+  skip?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  text: Scalars['String']
+}
+
+export type QueryVideoCategoriesByNameArgs = {
+  whereVideoCategory?: Maybe<VideoCategoryWhereInput>
+  skip?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  text: Scalars['String']
+}
+
+export type QueryStorageBagsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<StorageBagWhereInput>
+  orderBy?: Maybe<Array<StorageBagOrderByInput>>
+}
+
+export type QueryStorageBagByUniqueInputArgs = {
+  where: StorageBagWhereUniqueInput
+}
+
+export type QueryStorageBagsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<StorageBagWhereInput>
+  orderBy?: Maybe<Array<StorageBagOrderByInput>>
+}
+
+export type QueryStorageBucketOperatorMetadataArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<StorageBucketOperatorMetadataWhereInput>
+  orderBy?: Maybe<Array<StorageBucketOperatorMetadataOrderByInput>>
+}
+
+export type QueryStorageBucketOperatorMetadataByUniqueInputArgs = {
+  where: StorageBucketOperatorMetadataWhereUniqueInput
+}
+
+export type QueryStorageBucketOperatorMetadataConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<StorageBucketOperatorMetadataWhereInput>
+  orderBy?: Maybe<Array<StorageBucketOperatorMetadataOrderByInput>>
+}
+
+export type QueryStorageBucketsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<StorageBucketWhereInput>
+  orderBy?: Maybe<Array<StorageBucketOrderByInput>>
+}
+
+export type QueryStorageBucketByUniqueInputArgs = {
+  where: StorageBucketWhereUniqueInput
+}
+
+export type QueryStorageBucketsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<StorageBucketWhereInput>
+  orderBy?: Maybe<Array<StorageBucketOrderByInput>>
+}
+
+export type QueryStorageDataObjectsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<StorageDataObjectWhereInput>
+  orderBy?: Maybe<Array<StorageDataObjectOrderByInput>>
+}
+
+export type QueryStorageDataObjectByUniqueInputArgs = {
+  where: StorageDataObjectWhereUniqueInput
+}
+
+export type QueryStorageDataObjectsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<StorageDataObjectWhereInput>
+  orderBy?: Maybe<Array<StorageDataObjectOrderByInput>>
+}
+
+export type QueryStorageSystemParametersArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<StorageSystemParametersWhereInput>
+  orderBy?: Maybe<Array<StorageSystemParametersOrderByInput>>
+}
+
+export type QueryStorageSystemParametersByUniqueInputArgs = {
+  where: StorageSystemParametersWhereUniqueInput
+}
+
+export type QueryStorageSystemParametersConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<StorageSystemParametersWhereInput>
+  orderBy?: Maybe<Array<StorageSystemParametersOrderByInput>>
+}
+
+export type QueryVideoCategoriesArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<VideoCategoryWhereInput>
+  orderBy?: Maybe<Array<VideoCategoryOrderByInput>>
+}
+
+export type QueryVideoCategoryByUniqueInputArgs = {
+  where: VideoCategoryWhereUniqueInput
+}
+
+export type QueryVideoCategoriesConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<VideoCategoryWhereInput>
+  orderBy?: Maybe<Array<VideoCategoryOrderByInput>>
+}
+
+export type QueryVideoMediaEncodingsArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<VideoMediaEncodingWhereInput>
+  orderBy?: Maybe<Array<VideoMediaEncodingOrderByInput>>
+}
+
+export type QueryVideoMediaEncodingByUniqueInputArgs = {
+  where: VideoMediaEncodingWhereUniqueInput
+}
+
+export type QueryVideoMediaEncodingsConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<VideoMediaEncodingWhereInput>
+  orderBy?: Maybe<Array<VideoMediaEncodingOrderByInput>>
+}
+
+export type QueryVideoMediaMetadataArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<VideoMediaMetadataWhereInput>
+  orderBy?: Maybe<Array<VideoMediaMetadataOrderByInput>>
+}
+
+export type QueryVideoMediaMetadataByUniqueInputArgs = {
+  where: VideoMediaMetadataWhereUniqueInput
+}
+
+export type QueryVideoMediaMetadataConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<VideoMediaMetadataWhereInput>
+  orderBy?: Maybe<Array<VideoMediaMetadataOrderByInput>>
+}
+
+export type QueryVideosArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<VideoWhereInput>
+  orderBy?: Maybe<Array<VideoOrderByInput>>
+}
+
+export type QueryVideoByUniqueInputArgs = {
+  where: VideoWhereUniqueInput
+}
+
+export type QueryVideosConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<VideoWhereInput>
+  orderBy?: Maybe<Array<VideoOrderByInput>>
+}
+
+export type QueryWorkersArgs = {
+  offset?: Maybe<Scalars['Int']>
+  limit?: Maybe<Scalars['Int']>
+  where?: Maybe<WorkerWhereInput>
+  orderBy?: Maybe<Array<WorkerOrderByInput>>
+}
+
+export type QueryWorkerByUniqueInputArgs = {
+  where: WorkerWhereUniqueInput
+}
+
+export type QueryWorkersConnectionArgs = {
+  first?: Maybe<Scalars['Int']>
+  after?: Maybe<Scalars['String']>
+  last?: Maybe<Scalars['Int']>
+  before?: Maybe<Scalars['String']>
+  where?: Maybe<WorkerWhereInput>
+  orderBy?: Maybe<Array<WorkerOrderByInput>>
+}
+
+export type SearchFtsOutput = {
+  item: SearchSearchResult
+  rank: Scalars['Float']
+  isTypeOf: Scalars['String']
+  highlight: Scalars['String']
+}
+
+export type SearchSearchResult = Channel | Video
+
+export type StandardDeleteResponse = {
+  id: Scalars['ID']
+}
+
+export type StorageBag = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  objects: Array<StorageDataObject>
+  storageBuckets: Array<StorageBucket>
+  distributionBuckets: Array<DistributionBucket>
+  /** Owner of the storage bag */
+  owner: StorageBagOwner
+}
+
+export type StorageBagConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<StorageBagEdge>
+  pageInfo: PageInfo
+}
+
+export type StorageBagCreateInput = {
+  owner: Scalars['JSONObject']
+}
+
+export type StorageBagEdge = {
+  node: StorageBag
+  cursor: Scalars['String']
+}
+
+export enum StorageBagOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+}
+
+export type StorageBagOwner =
+  | StorageBagOwnerCouncil
+  | StorageBagOwnerWorkingGroup
+  | StorageBagOwnerMember
+  | StorageBagOwnerChannel
+  | StorageBagOwnerDao
+
+export type StorageBagOwnerChannel = {
+  channelId?: Maybe<Scalars['Int']>
+}
+
+export type StorageBagOwnerCouncil = {
+  phantom?: Maybe<Scalars['Int']>
+}
+
+export type StorageBagOwnerDao = {
+  daoId?: Maybe<Scalars['Int']>
+}
+
+export type StorageBagOwnerMember = {
+  memberId?: Maybe<Scalars['Int']>
+}
+
+export type StorageBagOwnerWorkingGroup = {
+  workingGroupId?: Maybe<Scalars['String']>
+}
+
+export type StorageBagUpdateInput = {
+  owner?: Maybe<Scalars['JSONObject']>
+}
+
+export type StorageBagWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  owner_json?: Maybe<Scalars['JSONObject']>
+  objects_none?: Maybe<StorageDataObjectWhereInput>
+  objects_some?: Maybe<StorageDataObjectWhereInput>
+  objects_every?: Maybe<StorageDataObjectWhereInput>
+  storageBuckets_none?: Maybe<StorageBucketWhereInput>
+  storageBuckets_some?: Maybe<StorageBucketWhereInput>
+  storageBuckets_every?: Maybe<StorageBucketWhereInput>
+  distributionBuckets_none?: Maybe<DistributionBucketWhereInput>
+  distributionBuckets_some?: Maybe<DistributionBucketWhereInput>
+  distributionBuckets_every?: Maybe<DistributionBucketWhereInput>
+  AND?: Maybe<Array<StorageBagWhereInput>>
+  OR?: Maybe<Array<StorageBagWhereInput>>
+}
+
+export type StorageBagWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type StorageBucket = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Current bucket operator status */
+  operatorStatus: StorageBucketOperatorStatus
+  operatorMetadata?: Maybe<StorageBucketOperatorMetadata>
+  operatorMetadataId?: Maybe<Scalars['String']>
+  /** Whether the bucket is accepting any new storage bags */
+  acceptingNewBags: Scalars['Boolean']
+  bags: Array<StorageBag>
+  /** Bucket's data object size limit in bytes */
+  dataObjectsSizeLimit: Scalars['BigInt']
+  /** Bucket's data object count limit */
+  dataObjectCountLimit: Scalars['BigInt']
+  /** Number of assigned data objects */
+  dataObjectsCount: Scalars['BigInt']
+  /** Total size of assigned data objects */
+  dataObjectsSize: Scalars['BigInt']
+}
+
+export type StorageBucketConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<StorageBucketEdge>
+  pageInfo: PageInfo
+}
+
+export type StorageBucketCreateInput = {
+  operatorStatus: Scalars['JSONObject']
+  operatorMetadata?: Maybe<Scalars['ID']>
+  acceptingNewBags: Scalars['Boolean']
+  dataObjectsSizeLimit: Scalars['String']
+  dataObjectCountLimit: Scalars['String']
+  dataObjectsCount: Scalars['String']
+  dataObjectsSize: Scalars['String']
+}
+
+export type StorageBucketEdge = {
+  node: StorageBucket
+  cursor: Scalars['String']
+}
+
+export type StorageBucketOperatorMetadata = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Root node endpoint */
+  nodeEndpoint?: Maybe<Scalars['String']>
+  nodeLocation?: Maybe<NodeLocationMetadata>
+  nodeLocationId?: Maybe<Scalars['String']>
+  /** Additional information about the node/operator */
+  extra?: Maybe<Scalars['String']>
+  storagebucketoperatorMetadata?: Maybe<Array<StorageBucket>>
+}
+
+export type StorageBucketOperatorMetadataConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<StorageBucketOperatorMetadataEdge>
+  pageInfo: PageInfo
+}
+
+export type StorageBucketOperatorMetadataCreateInput = {
+  nodeEndpoint?: Maybe<Scalars['String']>
+  nodeLocation?: Maybe<Scalars['ID']>
+  extra?: Maybe<Scalars['String']>
+}
+
+export type StorageBucketOperatorMetadataEdge = {
+  node: StorageBucketOperatorMetadata
+  cursor: Scalars['String']
+}
+
+export enum StorageBucketOperatorMetadataOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  NodeEndpointAsc = 'nodeEndpoint_ASC',
+  NodeEndpointDesc = 'nodeEndpoint_DESC',
+  NodeLocationAsc = 'nodeLocation_ASC',
+  NodeLocationDesc = 'nodeLocation_DESC',
+  ExtraAsc = 'extra_ASC',
+  ExtraDesc = 'extra_DESC',
+}
+
+export type StorageBucketOperatorMetadataUpdateInput = {
+  nodeEndpoint?: Maybe<Scalars['String']>
+  nodeLocation?: Maybe<Scalars['ID']>
+  extra?: Maybe<Scalars['String']>
+}
+
+export type StorageBucketOperatorMetadataWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  nodeEndpoint_eq?: Maybe<Scalars['String']>
+  nodeEndpoint_contains?: Maybe<Scalars['String']>
+  nodeEndpoint_startsWith?: Maybe<Scalars['String']>
+  nodeEndpoint_endsWith?: Maybe<Scalars['String']>
+  nodeEndpoint_in?: Maybe<Array<Scalars['String']>>
+  extra_eq?: Maybe<Scalars['String']>
+  extra_contains?: Maybe<Scalars['String']>
+  extra_startsWith?: Maybe<Scalars['String']>
+  extra_endsWith?: Maybe<Scalars['String']>
+  extra_in?: Maybe<Array<Scalars['String']>>
+  nodeLocation?: Maybe<NodeLocationMetadataWhereInput>
+  storagebucketoperatorMetadata_none?: Maybe<StorageBucketWhereInput>
+  storagebucketoperatorMetadata_some?: Maybe<StorageBucketWhereInput>
+  storagebucketoperatorMetadata_every?: Maybe<StorageBucketWhereInput>
+  AND?: Maybe<Array<StorageBucketOperatorMetadataWhereInput>>
+  OR?: Maybe<Array<StorageBucketOperatorMetadataWhereInput>>
+}
+
+export type StorageBucketOperatorMetadataWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type StorageBucketOperatorStatus =
+  | StorageBucketOperatorStatusMissing
+  | StorageBucketOperatorStatusInvited
+  | StorageBucketOperatorStatusActive
+
+export type StorageBucketOperatorStatusActive = {
+  workerId: Scalars['Int']
+}
+
+export type StorageBucketOperatorStatusInvited = {
+  workerId: Scalars['Int']
+}
+
+export type StorageBucketOperatorStatusMissing = {
+  phantom?: Maybe<Scalars['Int']>
+}
+
+export enum StorageBucketOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  OperatorMetadataAsc = 'operatorMetadata_ASC',
+  OperatorMetadataDesc = 'operatorMetadata_DESC',
+  AcceptingNewBagsAsc = 'acceptingNewBags_ASC',
+  AcceptingNewBagsDesc = 'acceptingNewBags_DESC',
+  DataObjectsSizeLimitAsc = 'dataObjectsSizeLimit_ASC',
+  DataObjectsSizeLimitDesc = 'dataObjectsSizeLimit_DESC',
+  DataObjectCountLimitAsc = 'dataObjectCountLimit_ASC',
+  DataObjectCountLimitDesc = 'dataObjectCountLimit_DESC',
+  DataObjectsCountAsc = 'dataObjectsCount_ASC',
+  DataObjectsCountDesc = 'dataObjectsCount_DESC',
+  DataObjectsSizeAsc = 'dataObjectsSize_ASC',
+  DataObjectsSizeDesc = 'dataObjectsSize_DESC',
+}
+
+export type StorageBucketUpdateInput = {
+  operatorStatus?: Maybe<Scalars['JSONObject']>
+  operatorMetadata?: Maybe<Scalars['ID']>
+  acceptingNewBags?: Maybe<Scalars['Boolean']>
+  dataObjectsSizeLimit?: Maybe<Scalars['String']>
+  dataObjectCountLimit?: Maybe<Scalars['String']>
+  dataObjectsCount?: Maybe<Scalars['String']>
+  dataObjectsSize?: Maybe<Scalars['String']>
+}
+
+export type StorageBucketWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  operatorStatus_json?: Maybe<Scalars['JSONObject']>
+  acceptingNewBags_eq?: Maybe<Scalars['Boolean']>
+  acceptingNewBags_in?: Maybe<Array<Scalars['Boolean']>>
+  dataObjectsSizeLimit_eq?: Maybe<Scalars['BigInt']>
+  dataObjectsSizeLimit_gt?: Maybe<Scalars['BigInt']>
+  dataObjectsSizeLimit_gte?: Maybe<Scalars['BigInt']>
+  dataObjectsSizeLimit_lt?: Maybe<Scalars['BigInt']>
+  dataObjectsSizeLimit_lte?: Maybe<Scalars['BigInt']>
+  dataObjectsSizeLimit_in?: Maybe<Array<Scalars['BigInt']>>
+  dataObjectCountLimit_eq?: Maybe<Scalars['BigInt']>
+  dataObjectCountLimit_gt?: Maybe<Scalars['BigInt']>
+  dataObjectCountLimit_gte?: Maybe<Scalars['BigInt']>
+  dataObjectCountLimit_lt?: Maybe<Scalars['BigInt']>
+  dataObjectCountLimit_lte?: Maybe<Scalars['BigInt']>
+  dataObjectCountLimit_in?: Maybe<Array<Scalars['BigInt']>>
+  dataObjectsCount_eq?: Maybe<Scalars['BigInt']>
+  dataObjectsCount_gt?: Maybe<Scalars['BigInt']>
+  dataObjectsCount_gte?: Maybe<Scalars['BigInt']>
+  dataObjectsCount_lt?: Maybe<Scalars['BigInt']>
+  dataObjectsCount_lte?: Maybe<Scalars['BigInt']>
+  dataObjectsCount_in?: Maybe<Array<Scalars['BigInt']>>
+  dataObjectsSize_eq?: Maybe<Scalars['BigInt']>
+  dataObjectsSize_gt?: Maybe<Scalars['BigInt']>
+  dataObjectsSize_gte?: Maybe<Scalars['BigInt']>
+  dataObjectsSize_lt?: Maybe<Scalars['BigInt']>
+  dataObjectsSize_lte?: Maybe<Scalars['BigInt']>
+  dataObjectsSize_in?: Maybe<Array<Scalars['BigInt']>>
+  operatorMetadata?: Maybe<StorageBucketOperatorMetadataWhereInput>
+  bags_none?: Maybe<StorageBagWhereInput>
+  bags_some?: Maybe<StorageBagWhereInput>
+  bags_every?: Maybe<StorageBagWhereInput>
+  AND?: Maybe<Array<StorageBucketWhereInput>>
+  OR?: Maybe<Array<StorageBucketWhereInput>>
+}
+
+export type StorageBucketWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type StorageDataObject = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Whether the data object was uploaded and accepted by the storage provider */
+  isAccepted: Scalars['Boolean']
+  /** Data object size in bytes */
+  size: Scalars['BigInt']
+  storageBag: StorageBag
+  storageBagId: Scalars['String']
+  /** IPFS content hash */
+  ipfsHash: Scalars['String']
+  /** The type of the asset that the data object represents (if known) */
+  type: DataObjectType
+  /** Prize for removing the data object */
+  deletionPrize: Scalars['BigInt']
+  /** If the object is no longer used as an asset - the time at which it was unset (if known) */
+  unsetAt?: Maybe<Scalars['DateTime']>
+  channelcoverPhoto?: Maybe<Array<Channel>>
+  channelavatarPhoto?: Maybe<Array<Channel>>
+  videothumbnailPhoto?: Maybe<Array<Video>>
+  videomedia?: Maybe<Array<Video>>
+}
+
+export type StorageDataObjectConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<StorageDataObjectEdge>
+  pageInfo: PageInfo
+}
+
+export type StorageDataObjectCreateInput = {
+  isAccepted: Scalars['Boolean']
+  size: Scalars['String']
+  storageBag: Scalars['ID']
+  ipfsHash: Scalars['String']
+  type: Scalars['JSONObject']
+  deletionPrize: Scalars['String']
+  unsetAt?: Maybe<Scalars['DateTime']>
+}
+
+export type StorageDataObjectEdge = {
+  node: StorageDataObject
+  cursor: Scalars['String']
+}
+
+export enum StorageDataObjectOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  IsAcceptedAsc = 'isAccepted_ASC',
+  IsAcceptedDesc = 'isAccepted_DESC',
+  SizeAsc = 'size_ASC',
+  SizeDesc = 'size_DESC',
+  StorageBagAsc = 'storageBag_ASC',
+  StorageBagDesc = 'storageBag_DESC',
+  IpfsHashAsc = 'ipfsHash_ASC',
+  IpfsHashDesc = 'ipfsHash_DESC',
+  DeletionPrizeAsc = 'deletionPrize_ASC',
+  DeletionPrizeDesc = 'deletionPrize_DESC',
+  UnsetAtAsc = 'unsetAt_ASC',
+  UnsetAtDesc = 'unsetAt_DESC',
+}
+
+export type StorageDataObjectUpdateInput = {
+  isAccepted?: Maybe<Scalars['Boolean']>
+  size?: Maybe<Scalars['String']>
+  storageBag?: Maybe<Scalars['ID']>
+  ipfsHash?: Maybe<Scalars['String']>
+  type?: Maybe<Scalars['JSONObject']>
+  deletionPrize?: Maybe<Scalars['String']>
+  unsetAt?: Maybe<Scalars['DateTime']>
+}
+
+export type StorageDataObjectWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  isAccepted_eq?: Maybe<Scalars['Boolean']>
+  isAccepted_in?: Maybe<Array<Scalars['Boolean']>>
+  size_eq?: Maybe<Scalars['BigInt']>
+  size_gt?: Maybe<Scalars['BigInt']>
+  size_gte?: Maybe<Scalars['BigInt']>
+  size_lt?: Maybe<Scalars['BigInt']>
+  size_lte?: Maybe<Scalars['BigInt']>
+  size_in?: Maybe<Array<Scalars['BigInt']>>
+  ipfsHash_eq?: Maybe<Scalars['String']>
+  ipfsHash_contains?: Maybe<Scalars['String']>
+  ipfsHash_startsWith?: Maybe<Scalars['String']>
+  ipfsHash_endsWith?: Maybe<Scalars['String']>
+  ipfsHash_in?: Maybe<Array<Scalars['String']>>
+  type_json?: Maybe<Scalars['JSONObject']>
+  deletionPrize_eq?: Maybe<Scalars['BigInt']>
+  deletionPrize_gt?: Maybe<Scalars['BigInt']>
+  deletionPrize_gte?: Maybe<Scalars['BigInt']>
+  deletionPrize_lt?: Maybe<Scalars['BigInt']>
+  deletionPrize_lte?: Maybe<Scalars['BigInt']>
+  deletionPrize_in?: Maybe<Array<Scalars['BigInt']>>
+  unsetAt_eq?: Maybe<Scalars['DateTime']>
+  unsetAt_lt?: Maybe<Scalars['DateTime']>
+  unsetAt_lte?: Maybe<Scalars['DateTime']>
+  unsetAt_gt?: Maybe<Scalars['DateTime']>
+  unsetAt_gte?: Maybe<Scalars['DateTime']>
+  storageBag?: Maybe<StorageBagWhereInput>
+  channelcoverPhoto_none?: Maybe<ChannelWhereInput>
+  channelcoverPhoto_some?: Maybe<ChannelWhereInput>
+  channelcoverPhoto_every?: Maybe<ChannelWhereInput>
+  channelavatarPhoto_none?: Maybe<ChannelWhereInput>
+  channelavatarPhoto_some?: Maybe<ChannelWhereInput>
+  channelavatarPhoto_every?: Maybe<ChannelWhereInput>
+  videothumbnailPhoto_none?: Maybe<VideoWhereInput>
+  videothumbnailPhoto_some?: Maybe<VideoWhereInput>
+  videothumbnailPhoto_every?: Maybe<VideoWhereInput>
+  videomedia_none?: Maybe<VideoWhereInput>
+  videomedia_some?: Maybe<VideoWhereInput>
+  videomedia_every?: Maybe<VideoWhereInput>
+  AND?: Maybe<Array<StorageDataObjectWhereInput>>
+  OR?: Maybe<Array<StorageDataObjectWhereInput>>
+}
+
+export type StorageDataObjectWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+/** Global storage system parameters */
+export type StorageSystemParameters = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Blacklisted content hashes */
+  blacklist: Array<Scalars['String']>
+  /** How many buckets can be assigned to store a bag */
+  storageBucketsPerBagLimit: Scalars['Int']
+  /** How many buckets can be assigned to distribute a bag */
+  distributionBucketsPerBagLimit: Scalars['Int']
+  /** Whether the uploading is globally blocked */
+  uploadingBlocked: Scalars['Boolean']
+  /** Additional fee for storing 1 MB of data */
+  dataObjectFeePerMb: Scalars['BigInt']
+  /** Global max. number of objects a storage bucket can store (can also be further limitted the provider) */
+  storageBucketMaxObjectsCountLimit: Scalars['BigInt']
+  /** Global max. size of objects a storage bucket can store (can also be further limitted the provider) */
+  storageBucketMaxObjectsSizeLimit: Scalars['BigInt']
+  /** ID of the next data object when created */
+  nextDataObjectId: Scalars['BigInt']
+}
+
+export type StorageSystemParametersConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<StorageSystemParametersEdge>
+  pageInfo: PageInfo
+}
+
+export type StorageSystemParametersCreateInput = {
+  blacklist: Array<Scalars['String']>
+  storageBucketsPerBagLimit: Scalars['Float']
+  distributionBucketsPerBagLimit: Scalars['Float']
+  uploadingBlocked: Scalars['Boolean']
+  dataObjectFeePerMb: Scalars['String']
+  storageBucketMaxObjectsCountLimit: Scalars['String']
+  storageBucketMaxObjectsSizeLimit: Scalars['String']
+  nextDataObjectId: Scalars['String']
+}
+
+export type StorageSystemParametersEdge = {
+  node: StorageSystemParameters
+  cursor: Scalars['String']
+}
+
+export enum StorageSystemParametersOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  StorageBucketsPerBagLimitAsc = 'storageBucketsPerBagLimit_ASC',
+  StorageBucketsPerBagLimitDesc = 'storageBucketsPerBagLimit_DESC',
+  DistributionBucketsPerBagLimitAsc = 'distributionBucketsPerBagLimit_ASC',
+  DistributionBucketsPerBagLimitDesc = 'distributionBucketsPerBagLimit_DESC',
+  UploadingBlockedAsc = 'uploadingBlocked_ASC',
+  UploadingBlockedDesc = 'uploadingBlocked_DESC',
+  DataObjectFeePerMbAsc = 'dataObjectFeePerMb_ASC',
+  DataObjectFeePerMbDesc = 'dataObjectFeePerMb_DESC',
+  StorageBucketMaxObjectsCountLimitAsc = 'storageBucketMaxObjectsCountLimit_ASC',
+  StorageBucketMaxObjectsCountLimitDesc = 'storageBucketMaxObjectsCountLimit_DESC',
+  StorageBucketMaxObjectsSizeLimitAsc = 'storageBucketMaxObjectsSizeLimit_ASC',
+  StorageBucketMaxObjectsSizeLimitDesc = 'storageBucketMaxObjectsSizeLimit_DESC',
+  NextDataObjectIdAsc = 'nextDataObjectId_ASC',
+  NextDataObjectIdDesc = 'nextDataObjectId_DESC',
+}
+
+export type StorageSystemParametersUpdateInput = {
+  blacklist?: Maybe<Array<Scalars['String']>>
+  storageBucketsPerBagLimit?: Maybe<Scalars['Float']>
+  distributionBucketsPerBagLimit?: Maybe<Scalars['Float']>
+  uploadingBlocked?: Maybe<Scalars['Boolean']>
+  dataObjectFeePerMb?: Maybe<Scalars['String']>
+  storageBucketMaxObjectsCountLimit?: Maybe<Scalars['String']>
+  storageBucketMaxObjectsSizeLimit?: Maybe<Scalars['String']>
+  nextDataObjectId?: Maybe<Scalars['String']>
+}
+
+export type StorageSystemParametersWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  blacklist_containsAll?: Maybe<Array<Scalars['String']>>
+  blacklist_containsNone?: Maybe<Array<Scalars['String']>>
+  blacklist_containsAny?: Maybe<Array<Scalars['String']>>
+  storageBucketsPerBagLimit_eq?: Maybe<Scalars['Int']>
+  storageBucketsPerBagLimit_gt?: Maybe<Scalars['Int']>
+  storageBucketsPerBagLimit_gte?: Maybe<Scalars['Int']>
+  storageBucketsPerBagLimit_lt?: Maybe<Scalars['Int']>
+  storageBucketsPerBagLimit_lte?: Maybe<Scalars['Int']>
+  storageBucketsPerBagLimit_in?: Maybe<Array<Scalars['Int']>>
+  distributionBucketsPerBagLimit_eq?: Maybe<Scalars['Int']>
+  distributionBucketsPerBagLimit_gt?: Maybe<Scalars['Int']>
+  distributionBucketsPerBagLimit_gte?: Maybe<Scalars['Int']>
+  distributionBucketsPerBagLimit_lt?: Maybe<Scalars['Int']>
+  distributionBucketsPerBagLimit_lte?: Maybe<Scalars['Int']>
+  distributionBucketsPerBagLimit_in?: Maybe<Array<Scalars['Int']>>
+  uploadingBlocked_eq?: Maybe<Scalars['Boolean']>
+  uploadingBlocked_in?: Maybe<Array<Scalars['Boolean']>>
+  dataObjectFeePerMb_eq?: Maybe<Scalars['BigInt']>
+  dataObjectFeePerMb_gt?: Maybe<Scalars['BigInt']>
+  dataObjectFeePerMb_gte?: Maybe<Scalars['BigInt']>
+  dataObjectFeePerMb_lt?: Maybe<Scalars['BigInt']>
+  dataObjectFeePerMb_lte?: Maybe<Scalars['BigInt']>
+  dataObjectFeePerMb_in?: Maybe<Array<Scalars['BigInt']>>
+  storageBucketMaxObjectsCountLimit_eq?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsCountLimit_gt?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsCountLimit_gte?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsCountLimit_lt?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsCountLimit_lte?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsCountLimit_in?: Maybe<Array<Scalars['BigInt']>>
+  storageBucketMaxObjectsSizeLimit_eq?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsSizeLimit_gt?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsSizeLimit_gte?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsSizeLimit_lt?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsSizeLimit_lte?: Maybe<Scalars['BigInt']>
+  storageBucketMaxObjectsSizeLimit_in?: Maybe<Array<Scalars['BigInt']>>
+  nextDataObjectId_eq?: Maybe<Scalars['BigInt']>
+  nextDataObjectId_gt?: Maybe<Scalars['BigInt']>
+  nextDataObjectId_gte?: Maybe<Scalars['BigInt']>
+  nextDataObjectId_lt?: Maybe<Scalars['BigInt']>
+  nextDataObjectId_lte?: Maybe<Scalars['BigInt']>
+  nextDataObjectId_in?: Maybe<Array<Scalars['BigInt']>>
+  AND?: Maybe<Array<StorageSystemParametersWhereInput>>
+  OR?: Maybe<Array<StorageSystemParametersWhereInput>>
+}
+
+export type StorageSystemParametersWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type Subscription = {
+  stateSubscription: ProcessorState
+}
+
+export type Video = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  channel: Channel
+  channelId: Scalars['String']
+  category?: Maybe<VideoCategory>
+  categoryId?: Maybe<Scalars['String']>
+  /** The title of the video */
+  title?: Maybe<Scalars['String']>
+  /** The description of the Video */
+  description?: Maybe<Scalars['String']>
+  /** Video duration in seconds */
+  duration?: Maybe<Scalars['Int']>
+  thumbnailPhoto?: Maybe<StorageDataObject>
+  thumbnailPhotoId?: Maybe<Scalars['String']>
+  language?: Maybe<Language>
+  languageId?: Maybe<Scalars['String']>
+  /** Whether or not Video contains marketing */
+  hasMarketing?: Maybe<Scalars['Boolean']>
+  /** If the Video was published on other platform before beeing published on Joystream - the original publication date */
+  publishedBeforeJoystream?: Maybe<Scalars['DateTime']>
+  /** Whether the Video is supposed to be publically displayed */
+  isPublic?: Maybe<Scalars['Boolean']>
+  /** Flag signaling whether a video is censored. */
+  isCensored: Scalars['Boolean']
+  /** Whether the Video contains explicit material. */
+  isExplicit?: Maybe<Scalars['Boolean']>
+  license?: Maybe<License>
+  licenseId?: Maybe<Scalars['String']>
+  media?: Maybe<StorageDataObject>
+  mediaId?: Maybe<Scalars['String']>
+  mediaMetadata?: Maybe<VideoMediaMetadata>
+  mediaMetadataId?: Maybe<Scalars['String']>
+  createdInBlock: Scalars['Int']
+  /** Is video featured or not */
+  isFeatured: Scalars['Boolean']
+}
+
+export type VideoCategoriesByNameFtsOutput = {
+  item: VideoCategoriesByNameSearchResult
+  rank: Scalars['Float']
+  isTypeOf: Scalars['String']
+  highlight: Scalars['String']
+}
+
+export type VideoCategoriesByNameSearchResult = VideoCategory
+
+export type VideoCategory = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** The name of the category */
+  name?: Maybe<Scalars['String']>
+  videos: Array<Video>
+  createdInBlock: Scalars['Int']
+}
+
+export type VideoCategoryConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<VideoCategoryEdge>
+  pageInfo: PageInfo
+}
+
+export type VideoCategoryCreateInput = {
+  name?: Maybe<Scalars['String']>
+  createdInBlock: Scalars['Float']
+}
+
+export type VideoCategoryEdge = {
+  node: VideoCategory
+  cursor: Scalars['String']
+}
+
+export enum VideoCategoryOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  NameAsc = 'name_ASC',
+  NameDesc = 'name_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+}
+
+export type VideoCategoryUpdateInput = {
+  name?: Maybe<Scalars['String']>
+  createdInBlock?: Maybe<Scalars['Float']>
+}
+
+export type VideoCategoryWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  name_eq?: Maybe<Scalars['String']>
+  name_contains?: Maybe<Scalars['String']>
+  name_startsWith?: Maybe<Scalars['String']>
+  name_endsWith?: Maybe<Scalars['String']>
+  name_in?: Maybe<Array<Scalars['String']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  videos_none?: Maybe<VideoWhereInput>
+  videos_some?: Maybe<VideoWhereInput>
+  videos_every?: Maybe<VideoWhereInput>
+  AND?: Maybe<Array<VideoCategoryWhereInput>>
+  OR?: Maybe<Array<VideoCategoryWhereInput>>
+}
+
+export type VideoCategoryWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type VideoConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<VideoEdge>
+  pageInfo: PageInfo
+}
+
+export type VideoCreateInput = {
+  channel: Scalars['ID']
+  category?: Maybe<Scalars['ID']>
+  title?: Maybe<Scalars['String']>
+  description?: Maybe<Scalars['String']>
+  duration?: Maybe<Scalars['Float']>
+  thumbnailPhoto?: Maybe<Scalars['ID']>
+  language?: Maybe<Scalars['ID']>
+  hasMarketing?: Maybe<Scalars['Boolean']>
+  publishedBeforeJoystream?: Maybe<Scalars['DateTime']>
+  isPublic?: Maybe<Scalars['Boolean']>
+  isCensored: Scalars['Boolean']
+  isExplicit?: Maybe<Scalars['Boolean']>
+  license?: Maybe<Scalars['ID']>
+  media?: Maybe<Scalars['ID']>
+  mediaMetadata?: Maybe<Scalars['ID']>
+  createdInBlock: Scalars['Float']
+  isFeatured: Scalars['Boolean']
+}
+
+export type VideoEdge = {
+  node: Video
+  cursor: Scalars['String']
+}
+
+export type VideoMediaEncoding = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Encoding of the video media object */
+  codecName?: Maybe<Scalars['String']>
+  /** Media container format */
+  container?: Maybe<Scalars['String']>
+  /** Content MIME type */
+  mimeMediaType?: Maybe<Scalars['String']>
+  videomediametadataencoding?: Maybe<Array<VideoMediaMetadata>>
+}
+
+export type VideoMediaEncodingConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<VideoMediaEncodingEdge>
+  pageInfo: PageInfo
+}
+
+export type VideoMediaEncodingCreateInput = {
+  codecName?: Maybe<Scalars['String']>
+  container?: Maybe<Scalars['String']>
+  mimeMediaType?: Maybe<Scalars['String']>
+}
+
+export type VideoMediaEncodingEdge = {
+  node: VideoMediaEncoding
+  cursor: Scalars['String']
+}
+
+export enum VideoMediaEncodingOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  CodecNameAsc = 'codecName_ASC',
+  CodecNameDesc = 'codecName_DESC',
+  ContainerAsc = 'container_ASC',
+  ContainerDesc = 'container_DESC',
+  MimeMediaTypeAsc = 'mimeMediaType_ASC',
+  MimeMediaTypeDesc = 'mimeMediaType_DESC',
+}
+
+export type VideoMediaEncodingUpdateInput = {
+  codecName?: Maybe<Scalars['String']>
+  container?: Maybe<Scalars['String']>
+  mimeMediaType?: Maybe<Scalars['String']>
+}
+
+export type VideoMediaEncodingWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  codecName_eq?: Maybe<Scalars['String']>
+  codecName_contains?: Maybe<Scalars['String']>
+  codecName_startsWith?: Maybe<Scalars['String']>
+  codecName_endsWith?: Maybe<Scalars['String']>
+  codecName_in?: Maybe<Array<Scalars['String']>>
+  container_eq?: Maybe<Scalars['String']>
+  container_contains?: Maybe<Scalars['String']>
+  container_startsWith?: Maybe<Scalars['String']>
+  container_endsWith?: Maybe<Scalars['String']>
+  container_in?: Maybe<Array<Scalars['String']>>
+  mimeMediaType_eq?: Maybe<Scalars['String']>
+  mimeMediaType_contains?: Maybe<Scalars['String']>
+  mimeMediaType_startsWith?: Maybe<Scalars['String']>
+  mimeMediaType_endsWith?: Maybe<Scalars['String']>
+  mimeMediaType_in?: Maybe<Array<Scalars['String']>>
+  videomediametadataencoding_none?: Maybe<VideoMediaMetadataWhereInput>
+  videomediametadataencoding_some?: Maybe<VideoMediaMetadataWhereInput>
+  videomediametadataencoding_every?: Maybe<VideoMediaMetadataWhereInput>
+  AND?: Maybe<Array<VideoMediaEncodingWhereInput>>
+  OR?: Maybe<Array<VideoMediaEncodingWhereInput>>
+}
+
+export type VideoMediaEncodingWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type VideoMediaMetadata = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  encoding?: Maybe<VideoMediaEncoding>
+  encodingId?: Maybe<Scalars['String']>
+  /** Video media width in pixels */
+  pixelWidth?: Maybe<Scalars['Int']>
+  /** Video media height in pixels */
+  pixelHeight?: Maybe<Scalars['Int']>
+  /** Video media size in bytes */
+  size?: Maybe<Scalars['BigInt']>
+  video?: Maybe<Video>
+  createdInBlock: Scalars['Int']
+}
+
+export type VideoMediaMetadataConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<VideoMediaMetadataEdge>
+  pageInfo: PageInfo
+}
+
+export type VideoMediaMetadataCreateInput = {
+  encoding?: Maybe<Scalars['ID']>
+  pixelWidth?: Maybe<Scalars['Float']>
+  pixelHeight?: Maybe<Scalars['Float']>
+  size?: Maybe<Scalars['String']>
+  createdInBlock: Scalars['Float']
+}
+
+export type VideoMediaMetadataEdge = {
+  node: VideoMediaMetadata
+  cursor: Scalars['String']
+}
+
+export enum VideoMediaMetadataOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  EncodingAsc = 'encoding_ASC',
+  EncodingDesc = 'encoding_DESC',
+  PixelWidthAsc = 'pixelWidth_ASC',
+  PixelWidthDesc = 'pixelWidth_DESC',
+  PixelHeightAsc = 'pixelHeight_ASC',
+  PixelHeightDesc = 'pixelHeight_DESC',
+  SizeAsc = 'size_ASC',
+  SizeDesc = 'size_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+}
+
+export type VideoMediaMetadataUpdateInput = {
+  encoding?: Maybe<Scalars['ID']>
+  pixelWidth?: Maybe<Scalars['Float']>
+  pixelHeight?: Maybe<Scalars['Float']>
+  size?: Maybe<Scalars['String']>
+  createdInBlock?: Maybe<Scalars['Float']>
+}
+
+export type VideoMediaMetadataWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  pixelWidth_eq?: Maybe<Scalars['Int']>
+  pixelWidth_gt?: Maybe<Scalars['Int']>
+  pixelWidth_gte?: Maybe<Scalars['Int']>
+  pixelWidth_lt?: Maybe<Scalars['Int']>
+  pixelWidth_lte?: Maybe<Scalars['Int']>
+  pixelWidth_in?: Maybe<Array<Scalars['Int']>>
+  pixelHeight_eq?: Maybe<Scalars['Int']>
+  pixelHeight_gt?: Maybe<Scalars['Int']>
+  pixelHeight_gte?: Maybe<Scalars['Int']>
+  pixelHeight_lt?: Maybe<Scalars['Int']>
+  pixelHeight_lte?: Maybe<Scalars['Int']>
+  pixelHeight_in?: Maybe<Array<Scalars['Int']>>
+  size_eq?: Maybe<Scalars['BigInt']>
+  size_gt?: Maybe<Scalars['BigInt']>
+  size_gte?: Maybe<Scalars['BigInt']>
+  size_lt?: Maybe<Scalars['BigInt']>
+  size_lte?: Maybe<Scalars['BigInt']>
+  size_in?: Maybe<Array<Scalars['BigInt']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  encoding?: Maybe<VideoMediaEncodingWhereInput>
+  video?: Maybe<VideoWhereInput>
+  AND?: Maybe<Array<VideoMediaMetadataWhereInput>>
+  OR?: Maybe<Array<VideoMediaMetadataWhereInput>>
+}
+
+export type VideoMediaMetadataWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export enum VideoOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  ChannelAsc = 'channel_ASC',
+  ChannelDesc = 'channel_DESC',
+  CategoryAsc = 'category_ASC',
+  CategoryDesc = 'category_DESC',
+  TitleAsc = 'title_ASC',
+  TitleDesc = 'title_DESC',
+  DescriptionAsc = 'description_ASC',
+  DescriptionDesc = 'description_DESC',
+  DurationAsc = 'duration_ASC',
+  DurationDesc = 'duration_DESC',
+  ThumbnailPhotoAsc = 'thumbnailPhoto_ASC',
+  ThumbnailPhotoDesc = 'thumbnailPhoto_DESC',
+  LanguageAsc = 'language_ASC',
+  LanguageDesc = 'language_DESC',
+  HasMarketingAsc = 'hasMarketing_ASC',
+  HasMarketingDesc = 'hasMarketing_DESC',
+  PublishedBeforeJoystreamAsc = 'publishedBeforeJoystream_ASC',
+  PublishedBeforeJoystreamDesc = 'publishedBeforeJoystream_DESC',
+  IsPublicAsc = 'isPublic_ASC',
+  IsPublicDesc = 'isPublic_DESC',
+  IsCensoredAsc = 'isCensored_ASC',
+  IsCensoredDesc = 'isCensored_DESC',
+  IsExplicitAsc = 'isExplicit_ASC',
+  IsExplicitDesc = 'isExplicit_DESC',
+  LicenseAsc = 'license_ASC',
+  LicenseDesc = 'license_DESC',
+  MediaAsc = 'media_ASC',
+  MediaDesc = 'media_DESC',
+  MediaMetadataAsc = 'mediaMetadata_ASC',
+  MediaMetadataDesc = 'mediaMetadata_DESC',
+  CreatedInBlockAsc = 'createdInBlock_ASC',
+  CreatedInBlockDesc = 'createdInBlock_DESC',
+  IsFeaturedAsc = 'isFeatured_ASC',
+  IsFeaturedDesc = 'isFeatured_DESC',
+}
+
+export type VideoUpdateInput = {
+  channel?: Maybe<Scalars['ID']>
+  category?: Maybe<Scalars['ID']>
+  title?: Maybe<Scalars['String']>
+  description?: Maybe<Scalars['String']>
+  duration?: Maybe<Scalars['Float']>
+  thumbnailPhoto?: Maybe<Scalars['ID']>
+  language?: Maybe<Scalars['ID']>
+  hasMarketing?: Maybe<Scalars['Boolean']>
+  publishedBeforeJoystream?: Maybe<Scalars['DateTime']>
+  isPublic?: Maybe<Scalars['Boolean']>
+  isCensored?: Maybe<Scalars['Boolean']>
+  isExplicit?: Maybe<Scalars['Boolean']>
+  license?: Maybe<Scalars['ID']>
+  media?: Maybe<Scalars['ID']>
+  mediaMetadata?: Maybe<Scalars['ID']>
+  createdInBlock?: Maybe<Scalars['Float']>
+  isFeatured?: Maybe<Scalars['Boolean']>
+}
+
+export type VideoWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  title_eq?: Maybe<Scalars['String']>
+  title_contains?: Maybe<Scalars['String']>
+  title_startsWith?: Maybe<Scalars['String']>
+  title_endsWith?: Maybe<Scalars['String']>
+  title_in?: Maybe<Array<Scalars['String']>>
+  description_eq?: Maybe<Scalars['String']>
+  description_contains?: Maybe<Scalars['String']>
+  description_startsWith?: Maybe<Scalars['String']>
+  description_endsWith?: Maybe<Scalars['String']>
+  description_in?: Maybe<Array<Scalars['String']>>
+  duration_eq?: Maybe<Scalars['Int']>
+  duration_gt?: Maybe<Scalars['Int']>
+  duration_gte?: Maybe<Scalars['Int']>
+  duration_lt?: Maybe<Scalars['Int']>
+  duration_lte?: Maybe<Scalars['Int']>
+  duration_in?: Maybe<Array<Scalars['Int']>>
+  hasMarketing_eq?: Maybe<Scalars['Boolean']>
+  hasMarketing_in?: Maybe<Array<Scalars['Boolean']>>
+  publishedBeforeJoystream_eq?: Maybe<Scalars['DateTime']>
+  publishedBeforeJoystream_lt?: Maybe<Scalars['DateTime']>
+  publishedBeforeJoystream_lte?: Maybe<Scalars['DateTime']>
+  publishedBeforeJoystream_gt?: Maybe<Scalars['DateTime']>
+  publishedBeforeJoystream_gte?: Maybe<Scalars['DateTime']>
+  isPublic_eq?: Maybe<Scalars['Boolean']>
+  isPublic_in?: Maybe<Array<Scalars['Boolean']>>
+  isCensored_eq?: Maybe<Scalars['Boolean']>
+  isCensored_in?: Maybe<Array<Scalars['Boolean']>>
+  isExplicit_eq?: Maybe<Scalars['Boolean']>
+  isExplicit_in?: Maybe<Array<Scalars['Boolean']>>
+  createdInBlock_eq?: Maybe<Scalars['Int']>
+  createdInBlock_gt?: Maybe<Scalars['Int']>
+  createdInBlock_gte?: Maybe<Scalars['Int']>
+  createdInBlock_lt?: Maybe<Scalars['Int']>
+  createdInBlock_lte?: Maybe<Scalars['Int']>
+  createdInBlock_in?: Maybe<Array<Scalars['Int']>>
+  isFeatured_eq?: Maybe<Scalars['Boolean']>
+  isFeatured_in?: Maybe<Array<Scalars['Boolean']>>
+  channel?: Maybe<ChannelWhereInput>
+  category?: Maybe<VideoCategoryWhereInput>
+  thumbnailPhoto?: Maybe<StorageDataObjectWhereInput>
+  language?: Maybe<LanguageWhereInput>
+  license?: Maybe<LicenseWhereInput>
+  media?: Maybe<StorageDataObjectWhereInput>
+  mediaMetadata?: Maybe<VideoMediaMetadataWhereInput>
+  AND?: Maybe<Array<VideoWhereInput>>
+  OR?: Maybe<Array<VideoWhereInput>>
+}
+
+export type VideoWhereUniqueInput = {
+  id: Scalars['ID']
+}
+
+export type Worker = BaseGraphQlObject & {
+  id: Scalars['ID']
+  createdAt: Scalars['DateTime']
+  createdById: Scalars['String']
+  updatedAt?: Maybe<Scalars['DateTime']>
+  updatedById?: Maybe<Scalars['String']>
+  deletedAt?: Maybe<Scalars['DateTime']>
+  deletedById?: Maybe<Scalars['String']>
+  version: Scalars['Int']
+  /** Sign of worker still being active */
+  isActive: Scalars['Boolean']
+  /** Runtime identifier */
+  workerId: Scalars['String']
+  /** Associated working group */
+  type: WorkerType
+  /** Custom metadata set by provider */
+  metadata?: Maybe<Scalars['String']>
+}
+
+export type WorkerConnection = {
+  totalCount: Scalars['Int']
+  edges: Array<WorkerEdge>
+  pageInfo: PageInfo
+}
+
+export type WorkerCreateInput = {
+  isActive: Scalars['Boolean']
+  workerId: Scalars['String']
+  type: WorkerType
+  metadata?: Maybe<Scalars['String']>
+}
+
+export type WorkerEdge = {
+  node: Worker
+  cursor: Scalars['String']
+}
+
+export enum WorkerOrderByInput {
+  CreatedAtAsc = 'createdAt_ASC',
+  CreatedAtDesc = 'createdAt_DESC',
+  UpdatedAtAsc = 'updatedAt_ASC',
+  UpdatedAtDesc = 'updatedAt_DESC',
+  DeletedAtAsc = 'deletedAt_ASC',
+  DeletedAtDesc = 'deletedAt_DESC',
+  IsActiveAsc = 'isActive_ASC',
+  IsActiveDesc = 'isActive_DESC',
+  WorkerIdAsc = 'workerId_ASC',
+  WorkerIdDesc = 'workerId_DESC',
+  TypeAsc = 'type_ASC',
+  TypeDesc = 'type_DESC',
+  MetadataAsc = 'metadata_ASC',
+  MetadataDesc = 'metadata_DESC',
+}
+
+export enum WorkerType {
+  Gateway = 'GATEWAY',
+  Storage = 'STORAGE',
+}
+
+export type WorkerUpdateInput = {
+  isActive?: Maybe<Scalars['Boolean']>
+  workerId?: Maybe<Scalars['String']>
+  type?: Maybe<WorkerType>
+  metadata?: Maybe<Scalars['String']>
+}
+
+export type WorkerWhereInput = {
+  id_eq?: Maybe<Scalars['ID']>
+  id_in?: Maybe<Array<Scalars['ID']>>
+  createdAt_eq?: Maybe<Scalars['DateTime']>
+  createdAt_lt?: Maybe<Scalars['DateTime']>
+  createdAt_lte?: Maybe<Scalars['DateTime']>
+  createdAt_gt?: Maybe<Scalars['DateTime']>
+  createdAt_gte?: Maybe<Scalars['DateTime']>
+  createdById_eq?: Maybe<Scalars['ID']>
+  createdById_in?: Maybe<Array<Scalars['ID']>>
+  updatedAt_eq?: Maybe<Scalars['DateTime']>
+  updatedAt_lt?: Maybe<Scalars['DateTime']>
+  updatedAt_lte?: Maybe<Scalars['DateTime']>
+  updatedAt_gt?: Maybe<Scalars['DateTime']>
+  updatedAt_gte?: Maybe<Scalars['DateTime']>
+  updatedById_eq?: Maybe<Scalars['ID']>
+  updatedById_in?: Maybe<Array<Scalars['ID']>>
+  deletedAt_all?: Maybe<Scalars['Boolean']>
+  deletedAt_eq?: Maybe<Scalars['DateTime']>
+  deletedAt_lt?: Maybe<Scalars['DateTime']>
+  deletedAt_lte?: Maybe<Scalars['DateTime']>
+  deletedAt_gt?: Maybe<Scalars['DateTime']>
+  deletedAt_gte?: Maybe<Scalars['DateTime']>
+  deletedById_eq?: Maybe<Scalars['ID']>
+  deletedById_in?: Maybe<Array<Scalars['ID']>>
+  isActive_eq?: Maybe<Scalars['Boolean']>
+  isActive_in?: Maybe<Array<Scalars['Boolean']>>
+  workerId_eq?: Maybe<Scalars['String']>
+  workerId_contains?: Maybe<Scalars['String']>
+  workerId_startsWith?: Maybe<Scalars['String']>
+  workerId_endsWith?: Maybe<Scalars['String']>
+  workerId_in?: Maybe<Array<Scalars['String']>>
+  type_eq?: Maybe<WorkerType>
+  type_in?: Maybe<Array<WorkerType>>
+  metadata_eq?: Maybe<Scalars['String']>
+  metadata_contains?: Maybe<Scalars['String']>
+  metadata_startsWith?: Maybe<Scalars['String']>
+  metadata_endsWith?: Maybe<Scalars['String']>
+  metadata_in?: Maybe<Array<Scalars['String']>>
+  AND?: Maybe<Array<WorkerWhereInput>>
+  OR?: Maybe<Array<WorkerWhereInput>>
+}
+
+export type WorkerWhereUniqueInput = {
+  id: Scalars['ID']
+}

+ 116 - 0
storage-node-v2/src/services/queryNode/queries/queries.graphql

@@ -0,0 +1,116 @@
+# TODO: remove after issue fix: https://github.com/Joystream/joystream/issues/2811
+fragment StorageBucketIds on StorageBucket {
+  id
+}
+
+query getStorageBucketsConnection($limit: Int, $cursor: String) {
+  storageBucketsConnection(
+    first: $limit
+    after: $cursor
+    where: { operatorStatus_json: { isTypeOf_eq: "StorageBucketOperatorStatusActive" } }
+  ) {
+    edges {
+      cursor
+      node {
+        ...StorageBucketIds
+      }
+    }
+    pageInfo {
+      hasNextPage
+      endCursor
+    }
+    totalCount
+  }
+}
+
+query getStorageBucketDetailsByWorkerId($workerId: ID, $limit: Int, $cursor: String) {
+  storageBucketsConnection(
+    first: $limit
+    after: $cursor
+    where: { operatorStatus_json: { isTypeOf_eq: "StorageBucketOperatorStatusActive", workerId_eq: $workerId } }
+  ) {
+    edges {
+      cursor
+      node {
+        ...StorageBucketIds
+      }
+    }
+    pageInfo {
+      hasNextPage
+      endCursor
+    }
+    totalCount
+  }
+}
+
+fragment StorageBucketDetails on StorageBucket {
+  id
+  operatorMetadata {
+    id
+    nodeEndpoint
+  }
+  operatorStatus {
+    ... on StorageBucketOperatorStatusActive {
+      workerId
+    }
+    ... on StorageBucketOperatorStatusInvited {
+      workerId
+    }
+  }
+}
+
+query getStorageBucketDetails($ids: [ID!], $offset: Int, $limit: Int) {
+  storageBuckets(where: { id_in: $ids }, offset: $offset, limit: $limit) {
+    ...StorageBucketDetails
+  }
+}
+
+fragment StorageBagDetails on StorageBag {
+  id
+  storageBuckets {
+    id
+  }
+}
+
+query getStorageBagDetails($bucketIds: [ID!], $offset: Int, $limit: Int) {
+  storageBags(offset: $offset, limit: $limit, where: { storageBuckets_some: { id_in: $bucketIds } }) {
+    ...StorageBagDetails
+  }
+}
+
+query getBagConnection($bucketIds: [ID!], $limit: Int, $cursor: String) {
+  storageBagsConnection(first: $limit, after: $cursor, where: { storageBuckets_some: { id_in: $bucketIds } }) {
+    edges {
+      cursor
+      node {
+        ...StorageBagDetails
+      }
+    }
+    pageInfo {
+      hasNextPage
+      endCursor
+    }
+    totalCount
+  }
+}
+
+fragment DataObjectDetails on StorageDataObject {
+  id
+  storageBagId
+}
+
+query getDataObjectConnection($bagIds: StorageBagWhereInput, $limit: Int, $cursor: String) {
+  storageDataObjectsConnection(first: $limit, after: $cursor, where: { storageBag: $bagIds, isAccepted_eq: true }) {
+    edges {
+      cursor
+      node {
+        ...DataObjectDetails
+      }
+    }
+    pageInfo {
+      hasNextPage
+      endCursor
+    }
+    totalCount
+  }
+}

+ 69 - 14
storage-node-v2/src/services/runtime/api.ts

@@ -6,10 +6,12 @@ import { TypeRegistry } from '@polkadot/types'
 import { KeyringPair } from '@polkadot/keyring/types'
 import { SubmittableExtrinsic, AugmentedEvent } from '@polkadot/api/types'
 import { DispatchError, DispatchResult } from '@polkadot/types/interfaces/system'
-import { getNonce } from './transactionNonceKeeper'
 import logger from '../../services/logger'
 import ExitCodes from '../../command-base/ExitCodes'
 import { CLIError } from '@oclif/errors'
+import stringify from 'fast-safe-stringify'
+import sleep from 'sleep-promise'
+import AwaitLock from 'await-lock'
 
 /**
  * Dedicated error for the failed extrinsics.
@@ -24,25 +26,61 @@ export class ExtrinsicFailedError extends CLIError {}
  */
 export async function createApi(apiUrl: string): Promise<ApiPromise> {
   const provider = new WsProvider(apiUrl)
+  provider.on('error', (err) => logger.error(`Api provider error: ${err.target?._url}`, { err }))
 
-  return await ApiPromise.create({ provider, types })
+  const api = new ApiPromise({ provider, types })
+  await api.isReadyOrError
+  await untilChainIsSynced(api)
+
+  api.on('error', (err) => logger.error(`Api promise error: ${err.target?._url}`, { err }))
+
+  return api
+}
+
+/**
+ * Awaits the chain to be fully synchronized.
+ */
+async function untilChainIsSynced(api: ApiPromise) {
+  logger.info('Waiting for chain to be synced before proceeding.')
+  while (true) {
+    const isSyncing = await chainIsSyncing(api)
+    if (isSyncing) {
+      logger.info('Still waiting for chain to be synced.')
+      await sleep(1 * 30 * 1000)
+    } else {
+      return
+    }
+  }
+}
+
+/**
+ * Checks the chain sync status.
+ *
+ * @param api api promise
+ * @returns
+ */
+async function chainIsSyncing(api: ApiPromise) {
+  const { isSyncing } = await api.rpc.system.health()
+  return isSyncing.isTrue
 }
 
+const lock = new AwaitLock()
+
 /**
  * Sends an extrinsic to the runtime and follows the result.
  *
  * @param api - API promise
  * @param account - KeyPair instance
  * @param tx - runtime transaction object to send
- * @param nonce - transaction nonce for a given account.
  * @returns extrinsic result promise.
  */
-function sendExtrinsic(
+async function sendExtrinsic(
   api: ApiPromise,
   account: KeyringPair,
-  tx: SubmittableExtrinsic<'promise'>,
-  nonce: Index
+  tx: SubmittableExtrinsic<'promise'>
 ): Promise<ISubmittableResult> {
+  const nonce = await lockAndGetNonce(api, account)
+
   return new Promise((resolve, reject) => {
     let unsubscribe: () => void
     tx.signAndSend(account, { nonce }, (result) => {
@@ -100,17 +138,38 @@ function sendExtrinsic(
         )
       }
     })
-      .then((unsubFunc) => (unsubscribe = unsubFunc))
+      .then((unsubFunc) => {
+        unsubscribe = unsubFunc
+      })
       .catch((e) =>
         reject(
-          new ExtrinsicFailedError(`Cannot send the extrinsic: ${e.message ? e.message : JSON.stringify(e)}`, {
+          new ExtrinsicFailedError(`Cannot send the extrinsic: ${e.message ? e.message : stringify(e)}`, {
             exit: ExitCodes.ApiError,
           })
         )
       )
+      .finally(() => lock.release())
   })
 }
 
+/**
+ * Set the API lock and gets the last account nonce. It removes the lock on
+ * exception and rethrows the error.
+ *
+ * @param api runtime API promise
+ * @param account account to get the last nonce from.
+ * @returns
+ */
+async function lockAndGetNonce(api: ApiPromise, account: KeyringPair): Promise<Index> {
+  await lock.acquireAsync()
+  try {
+    return await api.rpc.system.accountNextIndex(account.address)
+  } catch (err) {
+    lock.release()
+    throw err
+  }
+}
+
 /**
  * Helper function for formatting dispatch error.
  *
@@ -149,14 +208,10 @@ export async function sendAndFollowNamedTx<T>(
   eventParser: ((result: ISubmittableResult) => T) | null = null
 ): Promise<T | void> {
   logger.debug(`Sending ${tx.method.section}.${tx.method.method} extrinsic...`)
-
   if (sudoCall) {
     tx = api.tx.sudo.sudo(tx)
   }
-  const nonce = await getNonce(api, account)
-
-  const result = await sendExtrinsic(api, account, tx, nonce)
-
+  const result = await sendExtrinsic(api, account, tx)
   let eventResult: T | void
   if (eventParser) {
     eventResult = eventParser(result)
@@ -202,7 +257,7 @@ export function getEvent<
   const event = result.findRecord(section, eventName)?.event as EventType | undefined
 
   if (!event) {
-    throw new Error(`Cannot find expected ${section}.${eventName} event in result: ${result.toHuman()}`)
+    throw new ExtrinsicFailedError(`Cannot find expected ${section}.${eventName} event in result: ${result.toHuman()}`)
   }
   return event as EventType
 }

+ 7 - 2
storage-node-v2/src/services/runtime/extrinsics.ts

@@ -4,6 +4,7 @@ import { KeyringPair } from '@polkadot/keyring/types'
 import { ApiPromise } from '@polkadot/api'
 import { BagId, DynamicBagType } from '@joystream/types/storage'
 import logger from '../../services/logger'
+import { timeout } from 'promise-timeout'
 
 /**
  * Creates storage bucket.
@@ -269,9 +270,13 @@ export async function inviteStorageBucketOperator(
  * after logging.
  * @returns promise with a success flag.
  */
-async function extrinsicWrapper(extrinsic: () => Promise<void>, throwErr = false): Promise<boolean> {
+async function extrinsicWrapper(
+  extrinsic: () => Promise<void>,
+  throwErr = false,
+  timeoutMs = 25000 // 25s - default extrinsic timeout
+): Promise<boolean> {
   try {
-    await extrinsic()
+    await timeout(extrinsic(), timeoutMs)
   } catch (err) {
     logger.error(`Api Error: ${err}`)
 

+ 0 - 34
storage-node-v2/src/services/runtime/transactionNonceKeeper.ts

@@ -1,34 +0,0 @@
-import { KeyringPair } from '@polkadot/keyring/types'
-import type { Index } from '@polkadot/types/interfaces/runtime'
-import BN from 'bn.js'
-import AwaitLock from 'await-lock'
-import { ApiPromise } from '@polkadot/api'
-import logger from '../logger'
-
-let nonce: Index | null = null
-const lock = new AwaitLock()
-
-/**
- * Return the current transaction nonce for an account from the runtime.
- *
- * @param api - runtime API promise
- * @param account - KeyPair instance
- * @returns promise with transaction nonce for a given account.
- *
- */
-export async function getNonce(api: ApiPromise, account: KeyringPair): Promise<Index> {
-  await lock.acquireAsync()
-  try {
-    if (nonce === null) {
-      nonce = await api.rpc.system.accountNextIndex(account.address)
-    } else {
-      nonce = nonce.add(new BN(1)) as Index
-    }
-  } finally {
-    lock.release()
-  }
-
-  logger.debug(`Last transaction nonce:${nonce}`)
-
-  return nonce as Index
-}

+ 63 - 0
storage-node-v2/src/services/sync/remoteStorageData.ts

@@ -0,0 +1,63 @@
+import superagent from 'superagent'
+import urljoin from 'url-join'
+import logger from '../logger'
+import NodeCache from 'node-cache'
+
+// Expiration period in seconds for the local cache.
+const ExpirationPeriod: number = 3 * 60 // minutes
+
+// Max data entries in local cache
+const MaxEntries = 10000
+
+// Local in-memory cache for data object IDs by operator URL.
+const availableIDsCache = new NodeCache({
+  stdTTL: ExpirationPeriod,
+  deleteOnExpire: true,
+  maxKeys: MaxEntries,
+})
+
+// Local in-memory cache for faulty operator URL. Prevents fetching from the
+// offline storage nodes.
+const badOperatorUrls = new NodeCache({
+  stdTTL: ExpirationPeriod,
+  deleteOnExpire: true,
+  maxKeys: MaxEntries,
+})
+
+/**
+ * Queries the remote storage node for its data object IDs from the storage.
+ * It caches the result (including errors) for some limited time.
+ *
+ * @param operatorUrl - remote storage node URL
+ */
+export async function getRemoteDataObjects(operatorUrl: string): Promise<string[]> {
+  const url = urljoin(operatorUrl, 'api/v1/state/data-objects')
+
+  const faultyOperator = badOperatorUrls.has(operatorUrl)
+  if (faultyOperator) {
+    logger.debug(`Sync - cached error for the ${url} skipping ....`)
+    return []
+  }
+
+  const cachedData = availableIDsCache.get<string[]>(url)
+  if (cachedData) {
+    logger.debug(`Sync - getting from cache available data for ${url}`)
+    return cachedData
+  }
+
+  try {
+    logger.debug(`Sync - fetching available data for ${url}`)
+    const timeoutMs = 120 * 1000 // 2 min
+    const response = await superagent.get(url).timeout(timeoutMs)
+
+    const data = response.body
+    availableIDsCache.set(url, data, ExpirationPeriod)
+
+    return data
+  } catch (err) {
+    logger.error(`Sync - fetching data error from ${url}: ${err}`)
+    badOperatorUrls.set(operatorUrl, null, ExpirationPeriod)
+  }
+
+  return []
+}

+ 225 - 0
storage-node-v2/src/services/sync/storageObligations.ts

@@ -0,0 +1,225 @@
+import { MAX_RESULTS_PER_QUERY, QueryNodeApi } from '../queryNode/api'
+import logger from '../logger'
+import _ from 'lodash'
+import {
+  StorageBagDetailsFragment,
+  StorageBucketDetailsFragment,
+  DataObjectDetailsFragment,
+} from '../queryNode/generated/queries'
+
+/**
+ * Defines storage provider data obligations.
+ */
+export type DataObligations = {
+  /**
+   * All storage buckets in the system.
+   */
+  storageBuckets: StorageBucket[]
+
+  /**
+   * Assigned bags for the storage provider.
+   */
+  bags: Bag[]
+
+  /**
+   * Assigned data objects for the storage provider.
+   */
+  dataObjects: DataObject[]
+}
+
+/**
+ * Storage bucket abstraction.
+ */
+type StorageBucket = {
+  /**
+   * Storage bucket ID
+   */
+  id: string
+
+  /**
+   * Storage operator URL
+   */
+  operatorUrl: string
+
+  /**
+   * Storage working group ID.
+   */
+  workerId: number
+}
+
+/**
+ * Storage bag abstracton.
+ */
+type Bag = {
+  /**
+   * Storage bag ID
+   */
+  id: string
+
+  /**
+   * Assigned storage bucket IDs.
+   */
+  buckets: string[]
+}
+
+/**
+ * Data object abstraction.
+ */
+type DataObject = {
+  /**
+   * Data object ID
+   */
+  id: string
+
+  /**
+   * Assigned bag ID
+   */
+  bagId: string
+}
+
+/**
+ * Get storage provider obligations like (assigned data objects) from the
+ * runtime (Query Node).
+ *
+ * @param queryNodeUrl - Query Node URL
+ * @param workerId - worker ID
+ * @returns promise for the DataObligations
+ */
+export async function getStorageObligationsFromRuntime(
+  queryNodeUrl: string,
+  workerId: number
+): Promise<DataObligations> {
+  const api = new QueryNodeApi(queryNodeUrl)
+
+  const allBuckets = await getAllBuckets(api)
+
+  const bucketIds = allBuckets
+    .filter((bucket) => bucket.operatorStatus?.workerId === workerId)
+    .map((bucket) => bucket.id)
+  const assignedBags = await getAllAssignedBags(api, bucketIds)
+
+  const bagIds = assignedBags.map((bag) => bag.id)
+  const assignedDataObjects = await getAllAssignedDataObjects(api, bagIds)
+
+  const model: DataObligations = {
+    storageBuckets: allBuckets.map((bucket) => ({
+      id: bucket.id,
+      operatorUrl: bucket.operatorMetadata?.nodeEndpoint ?? '',
+      workerId: bucket.operatorStatus?.workerId,
+    })),
+    bags: assignedBags.map((bag) => ({
+      id: bag.id,
+      buckets: bag.storageBuckets.map((bucketInBag) => bucketInBag.id),
+    })),
+    dataObjects: assignedDataObjects.map((dataObject) => ({
+      id: dataObject.id,
+      bagId: dataObject.storageBagId,
+    })),
+  }
+
+  return model
+}
+
+/**
+ * Get storage bucket IDs assigned to the worker.
+ *
+ * @param queryNodeUrl - Query Node URL
+ * @param workerId - worker ID
+ * @returns storage bucket IDs
+ */
+export async function getStorageBucketIdsByWorkerId(queryNodeUrl: string, workerId: number): Promise<string[]> {
+  const api = new QueryNodeApi(queryNodeUrl)
+
+  const idFragments = await api.getStorageBucketIdsByWorkerId(workerId.toString())
+  const ids = idFragments.map((frag) => frag.id)
+
+  return ids
+}
+
+/**
+ * Get IDs of the data objects assigned to the bag ID.
+ *
+ * @param api - initialiazed QueryNodeApi instance
+ * @param bagId - bag ID
+ * @returns data object IDs
+ */
+export async function getDataObjectIDsByBagId(queryNodeUrl: string, bagId: string): Promise<string[]> {
+  const api = new QueryNodeApi(queryNodeUrl)
+  const dataObjects = await getAllAssignedDataObjects(api, [bagId])
+
+  return dataObjects.map((obj) => obj.id)
+}
+
+/**
+ * Get all storage buckets registered in the runtime (Query Node).
+ *
+ * @param api - initialiazed QueryNodeApi instance
+ * @returns storage buckets data
+ */
+async function getAllBuckets(api: QueryNodeApi): Promise<StorageBucketDetailsFragment[]> {
+  const idFragments = await api.getStorageBucketIds()
+  const ids = idFragments.map((frag) => frag.id)
+
+  return await getAllObjectsWithPaging(async (offset, limit) => {
+    const idsPart = ids.slice(offset, offset + limit)
+    if (!_.isEmpty(idsPart)) {
+      logger.debug(`Sync - getting all storage buckets: offset = ${offset}, limit = ${limit}`)
+      return await api.getStorageBucketDetails(idsPart, 0, limit)
+    } else {
+      return false
+    }
+  })
+}
+
+/**
+ * Get all data objects assigned to storage provider.
+ *
+ * @param api - initialiazed QueryNodeApi instance
+ * @param bagIds - assigned storage bags' IDs
+ * @returns storage bag data
+ */
+async function getAllAssignedDataObjects(api: QueryNodeApi, bagIds: string[]): Promise<DataObjectDetailsFragment[]> {
+  return await api.getDataObjectDetails(bagIds)
+}
+
+/**
+ * Get all bags assigned to storage provider.
+ *
+ * @param api - initialiazed QueryNodeApi instance
+ * @param bucketIds - assigned storage provider buckets' IDs
+ * @returns storage bag data
+ */
+async function getAllAssignedBags(api: QueryNodeApi, bucketIds: string[]): Promise<StorageBagDetailsFragment[]> {
+  return await api.getStorageBagsDetails(bucketIds)
+}
+
+/**
+ * Abstract object acquiring function for the QueryNode. It uses paging for
+ * queries and gets data using record offset and limit (hardcoded to 1000).
+ *
+ * @param objectName - object name(type) to get from the QueryNode
+ * @param query - actual query function
+ * @returns storage operator URL
+ */
+async function getAllObjectsWithPaging<T>(
+  query: (offset: number, limit: number) => Promise<T[] | false>
+): Promise<T[]> {
+  const result = []
+  const limit = MAX_RESULTS_PER_QUERY
+  let offset = 0
+
+  let resultPart = []
+  do {
+    const queryResult = await query(offset, limit)
+    if (queryResult === false) {
+      return result
+    } else {
+      resultPart = queryResult
+    }
+
+    offset += limit
+    result.push(...resultPart)
+  } while (resultPart.length > 0)
+
+  return result
+}

+ 179 - 0
storage-node-v2/src/services/sync/synchronizer.ts

@@ -0,0 +1,179 @@
+import { getStorageObligationsFromRuntime, DataObligations } from './storageObligations'
+import logger from '../../services/logger'
+import { getDataObjectIDs } from '../../services/caching/localDataObjects'
+import { SyncTask, DownloadFileTask, PrepareDownloadFileTask } from './tasks'
+import { WorkingStack, TaskProcessorSpawner, TaskSink } from './workingProcess'
+import _ from 'lodash'
+import { ApiPromise } from '@polkadot/api'
+
+/**
+ * Temporary directory name for data uploading.
+ */
+export const TempDirName = 'temp'
+
+/**
+ * Runs the data synchronization workflow. It compares the current node's
+ * storage obligations with the local storage and fixes the difference.
+ * The sync process uses the QueryNode for defining storage obligations and
+ * remote storage nodes' URL for data obtaining.
+ *
+ * @param api - (optional) runtime API promise
+ * @param workerId - current storage provider ID
+ * @param asyncWorkersNumber - maximum parallel downloads number
+ * @param asyncWorkersTimeout - downloading asset timeout
+ * @param queryNodeUrl - Query Node endpoint URL
+ * @param uploadDirectory - local directory to get file names from
+ * @param operatorUrl - (optional) defines the data source URL. If not set
+ * the source URL is resolved for each data object separately using the Query
+ * Node information about the storage providers.
+ */
+export async function performSync(
+  api: ApiPromise | undefined,
+  workerId: number,
+  asyncWorkersNumber: number,
+  asyncWorkersTimeout: number,
+  queryNodeUrl: string,
+  uploadDirectory: string,
+  tempDirectory: string,
+  operatorUrl?: string
+): Promise<void> {
+  logger.info('Started syncing...')
+  const [model, files] = await Promise.all([
+    getStorageObligationsFromRuntime(queryNodeUrl, workerId),
+    getDataObjectIDs(),
+  ])
+
+  const requiredIds = model.dataObjects.map((obj) => obj.id)
+
+  const added = _.difference(requiredIds, files)
+  const deleted = _.difference(files, requiredIds)
+
+  logger.debug(`Sync - new objects: ${added.length}`)
+  logger.debug(`Sync - obsolete objects: ${deleted.length}`)
+
+  const workingStack = new WorkingStack()
+
+  let addedTasks: SyncTask[]
+  if (operatorUrl === undefined) {
+    addedTasks = await getPrepareDownloadTasks(
+      api,
+      model,
+      workerId,
+      added,
+      uploadDirectory,
+      tempDirectory,
+      workingStack,
+      asyncWorkersTimeout
+    )
+  } else {
+    addedTasks = await getDownloadTasks(operatorUrl, added, uploadDirectory, tempDirectory, asyncWorkersTimeout)
+  }
+
+  logger.debug(`Sync - started processing...`)
+
+  const processSpawner = new TaskProcessorSpawner(workingStack, asyncWorkersNumber)
+
+  await workingStack.add(addedTasks)
+
+  await processSpawner.process()
+  logger.info('Sync ended.')
+}
+
+/**
+ * Creates the download preparation tasks.
+ *
+ * @param api - Runtime API promise
+ * @param dataObligations - defines the current data obligations for the node
+ * @param addedIds - data object IDs to download
+ * @param uploadDirectory - local directory for data uploading
+ * @param tempDirectory - local directory for temporary data uploading
+ * @param taskSink - a destination for the newly created tasks
+ * @param asyncWorkersTimeout - downloading asset timeout
+ */
+async function getPrepareDownloadTasks(
+  api: ApiPromise | undefined,
+  dataObligations: DataObligations,
+  currentWorkerId: number,
+  addedIds: string[],
+  uploadDirectory: string,
+  tempDirectory: string,
+  taskSink: TaskSink,
+  asyncWorkersTimeout: number
+): Promise<PrepareDownloadFileTask[]> {
+  const bagIdByDataObjectId = new Map()
+  for (const entry of dataObligations.dataObjects) {
+    bagIdByDataObjectId.set(entry.id, entry.bagId)
+  }
+
+  const bucketOperatorUrlById = new Map()
+  for (const entry of dataObligations.storageBuckets) {
+    // Skip all buckets of the current WorkerId (this storage provider)
+    if (entry.workerId !== currentWorkerId) {
+      bucketOperatorUrlById.set(entry.id, entry.operatorUrl)
+    }
+  }
+
+  const bagOperatorsUrlsById = new Map()
+  for (const entry of dataObligations.bags) {
+    const operatorUrls = []
+
+    for (const bucket of entry.buckets) {
+      if (bucketOperatorUrlById.has(bucket)) {
+        const operatorUrl = bucketOperatorUrlById.get(bucket)
+        if (operatorUrl) {
+          operatorUrls.push(operatorUrl)
+        }
+      }
+    }
+
+    bagOperatorsUrlsById.set(entry.id, operatorUrls)
+  }
+
+  const tasks = addedIds.map((id) => {
+    let operatorUrls: string[] = [] // can be empty after look up
+    let bagId = null
+    if (bagIdByDataObjectId.has(id)) {
+      bagId = bagIdByDataObjectId.get(id)
+      if (bagOperatorsUrlsById.has(bagId)) {
+        operatorUrls = bagOperatorsUrlsById.get(bagId)
+      }
+    }
+
+    return new PrepareDownloadFileTask(
+      operatorUrls,
+      bagId,
+      id,
+      uploadDirectory,
+      tempDirectory,
+      taskSink,
+      asyncWorkersTimeout,
+      api
+    )
+  })
+
+  return tasks
+}
+
+/**
+ * Creates the download file tasks.
+ *
+ * @param operatorUrl - defines the data source URL.
+ * @param addedIds - data object IDs to download
+ * @param uploadDirectory - local directory for data uploading
+ * @param tempDirectory - local directory for temporary data uploading
+ * @param downloadTimeout - asset downloading timeout (in minutes)
+ */
+async function getDownloadTasks(
+  operatorUrl: string,
+  addedIds: string[],
+  uploadDirectory: string,
+  tempDirectory: string,
+  downloadTimeout: number
+): Promise<DownloadFileTask[]> {
+  const addedTasks = addedIds.map(
+    (fileName) =>
+      new DownloadFileTask(operatorUrl, fileName, undefined, uploadDirectory, tempDirectory, downloadTimeout)
+  )
+
+  return addedTasks
+}

+ 240 - 0
storage-node-v2/src/services/sync/tasks.ts

@@ -0,0 +1,240 @@
+import fs from 'fs'
+import path from 'path'
+import { pipeline } from 'stream'
+import { promisify } from 'util'
+import superagent from 'superagent'
+import urljoin from 'url-join'
+import { v4 as uuidv4 } from 'uuid'
+import logger from '../../services/logger'
+import _ from 'lodash'
+import { getRemoteDataObjects } from './remoteStorageData'
+import { TaskSink } from './workingProcess'
+import { isNewDataObject } from '../caching/newUploads'
+import { addDataObjectIdToCache, deleteDataObjectIdFromCache } from '../caching/localDataObjects'
+import { hashFile } from '../helpers/hashing'
+import { parseBagId } from '../helpers/bagTypes'
+import { hexToString } from '@polkadot/util'
+import { ApiPromise } from '@polkadot/api'
+const fsPromises = fs.promises
+
+/**
+ * Defines syncronization task abstraction.
+ */
+export interface SyncTask {
+  /**
+   * Returns human-friendly task description.
+   */
+  description(): string
+
+  /**
+   * Performs the task.
+   */
+  execute(): Promise<void>
+}
+
+/**
+ * Deletes the file in the local storage by its name.
+ */
+export class DeleteLocalFileTask implements SyncTask {
+  uploadsDirectory: string
+  filename: string
+
+  constructor(uploadsDirectory: string, filename: string) {
+    this.uploadsDirectory = uploadsDirectory
+    this.filename = filename
+  }
+
+  description(): string {
+    return `Sync - deleting local file: ${this.filename} ....`
+  }
+
+  async execute(): Promise<void> {
+    const dataObjectId = this.filename
+    if (isNewDataObject(dataObjectId)) {
+      logger.warn(`Sync - possible QueryNode update delay (new file) - deleting file canceled: ${this.filename}`)
+      return
+    }
+
+    const fullPath = path.join(this.uploadsDirectory, this.filename)
+    await fsPromises.unlink(fullPath)
+
+    await deleteDataObjectIdFromCache(dataObjectId)
+  }
+}
+
+/**
+ * Download the file from the remote storage node to the local storage.
+ */
+export class DownloadFileTask implements SyncTask {
+  dataObjectId: string
+  expectedHash?: string
+  uploadsDirectory: string
+  tempDirectory: string
+  url: string
+  downloadTimeout: number
+  constructor(
+    baseUrl: string,
+    dataObjectId: string,
+    expectedHash: string | undefined,
+    uploadsDirectory: string,
+    tempDirectory: string,
+    downloadTimeout: number
+  ) {
+    this.dataObjectId = dataObjectId
+    this.expectedHash = expectedHash
+    this.uploadsDirectory = uploadsDirectory
+    this.tempDirectory = tempDirectory
+    this.downloadTimeout = downloadTimeout
+    this.url = urljoin(baseUrl, 'api/v1/files', dataObjectId)
+  }
+
+  description(): string {
+    return `Sync - downloading file: ${this.url} to ${this.uploadsDirectory} ....`
+  }
+
+  async execute(): Promise<void> {
+    const streamPipeline = promisify(pipeline)
+    const filepath = path.join(this.uploadsDirectory, this.dataObjectId)
+    // We create tempfile first to mitigate partial downloads on app (or remote node) crash.
+    // This partial downloads will be cleaned up during the next sync iteration.
+    const tempFilePath = path.join(this.uploadsDirectory, this.tempDirectory, uuidv4())
+    try {
+      const timeoutMs = this.downloadTimeout * 60 * 1000
+      // Casting because of:
+      // https://stackoverflow.com/questions/38478034/pipe-superagent-response-to-express-response
+      const request = (superagent.get(this.url).timeout(timeoutMs) as unknown) as NodeJS.ReadableStream
+      const fileStream = fs.createWriteStream(tempFilePath)
+
+      request.on('response', (res) => {
+        if (!res.ok) {
+          logger.error(`Sync - unexpected status code(${res.statusCode}) for ${res?.request?.url}`)
+        }
+      })
+      await streamPipeline(request, fileStream)
+      await this.verifyDownloadedFile(tempFilePath)
+      await fsPromises.rename(tempFilePath, filepath)
+      await addDataObjectIdToCache(this.dataObjectId)
+    } catch (err) {
+      logger.error(`Sync - fetching data error for ${this.url}: ${err}`, { err })
+      try {
+        logger.warn(`Cleaning up file ${tempFilePath}`)
+        await fsPromises.unlink(tempFilePath)
+      } catch (err) {
+        logger.error(`Sync - cannot cleanup file ${tempFilePath}: ${err}`, { err })
+      }
+    }
+  }
+
+  /** Compares expected and real IPFS hashes
+   *
+   * @param filePath downloaded file path
+   */
+  async verifyDownloadedFile(filePath: string): Promise<void> {
+    if (!_.isEmpty(this.expectedHash)) {
+      const hash = await hashFile(filePath)
+
+      if (hash !== this.expectedHash) {
+        throw new Error(`Invalid file hash. Expected: ${this.expectedHash} - real: ${hash}`)
+      }
+    }
+  }
+}
+
+/**
+ * Resolve remote storage node URLs and creates file downloading tasks (DownloadFileTask).
+ */
+export class PrepareDownloadFileTask implements SyncTask {
+  bagId: string
+  dataObjectId: string
+  operatorUrlCandidates: string[]
+  taskSink: TaskSink
+  uploadsDirectory: string
+  tempDirectory: string
+  api?: ApiPromise
+  downloadTimeout: number
+
+  constructor(
+    operatorUrlCandidates: string[],
+    bagId: string,
+    dataObjectId: string,
+    uploadsDirectory: string,
+    tempDirectory: string,
+    taskSink: TaskSink,
+    downloadTimeout: number,
+    api?: ApiPromise
+  ) {
+    this.api = api
+    this.bagId = bagId
+    this.dataObjectId = dataObjectId
+    this.taskSink = taskSink
+    this.operatorUrlCandidates = operatorUrlCandidates
+    this.uploadsDirectory = uploadsDirectory
+    this.tempDirectory = tempDirectory
+    this.downloadTimeout = downloadTimeout
+  }
+
+  description(): string {
+    return `Sync - preparing for download of: ${this.dataObjectId} ....`
+  }
+
+  async execute(): Promise<void> {
+    // Create an array of operator URL indices to maintain a random URL choice
+    // cannot use the original array because we shouldn't modify the original data.
+    // And cloning it seems like a heavy operation.
+    const operatorUrlIndices: number[] = [...Array(this.operatorUrlCandidates.length).keys()]
+
+    if (_.isEmpty(this.bagId)) {
+      logger.error(`Sync - invalid task - no bagId for ${this.dataObjectId}`)
+      return
+    }
+
+    while (!_.isEmpty(operatorUrlIndices)) {
+      const randomUrlIndex = _.sample(operatorUrlIndices)
+      if (randomUrlIndex === undefined) {
+        logger.warn(`Sync - cannot get a random URL`)
+        break
+      }
+
+      const randomUrl = this.operatorUrlCandidates[randomUrlIndex]
+      logger.debug(`Sync - random storage node URL was chosen ${randomUrl}`)
+
+      // Remove random url from the original list.
+      _.remove(operatorUrlIndices, (index) => index === randomUrlIndex)
+
+      try {
+        const chosenBaseUrl = randomUrl
+        const [remoteOperatorIds, hash] = await Promise.all([
+          getRemoteDataObjects(chosenBaseUrl),
+          this.getExpectedHash(),
+        ])
+
+        if (remoteOperatorIds.includes(this.dataObjectId)) {
+          const newTask = new DownloadFileTask(
+            chosenBaseUrl,
+            this.dataObjectId,
+            hash,
+            this.uploadsDirectory,
+            this.tempDirectory,
+            this.downloadTimeout
+          )
+
+          return this.taskSink.add([newTask])
+        }
+      } catch (err) {
+        logger.error(`Sync - fetching data error for ${this.dataObjectId}: ${err}`, { err })
+      }
+    }
+
+    logger.warn(`Sync - cannot get operator URLs for ${this.dataObjectId}`)
+  }
+
+  async getExpectedHash(): Promise<string | undefined> {
+    if (this.api !== undefined) {
+      const convertedBagId = parseBagId(this.bagId)
+      const dataObject = await this.api.query.storage.dataObjectsById(convertedBagId, this.dataObjectId)
+      return hexToString(dataObject.ipfsContentId.toString())
+    }
+
+    return undefined
+  }
+}

+ 123 - 0
storage-node-v2/src/services/sync/workingProcess.ts

@@ -0,0 +1,123 @@
+import sleep from 'sleep-promise'
+import { SyncTask } from './tasks'
+import logger from '../../services/logger'
+
+/**
+ * Defines task destination abstraction.
+ */
+export interface TaskSink {
+  /**
+   * Adds task array to the pending tasks collection.
+   *
+   * @param tasks tasks to add.
+   */
+  add(tasks: SyncTask[]): Promise<void>
+}
+
+/**
+ * Defines task source abstraction.
+ */
+export interface TaskSource {
+  /**
+   * Gets the next task from the pending tasks collection.
+   *
+   * @returns next task or null if empty.
+   */
+  get(): Promise<SyncTask | null>
+}
+
+/**
+ * Defines pending tasks collections. Implements LIFO semantics.
+ */
+export class WorkingStack implements TaskSink, TaskSource {
+  workingStack: SyncTask[]
+
+  constructor() {
+    this.workingStack = []
+  }
+
+  async get(): Promise<SyncTask | null> {
+    const task = this.workingStack.pop()
+
+    if (task !== undefined) {
+      return task
+    } else {
+      return null
+    }
+  }
+
+  async add(tasks: SyncTask[]): Promise<void> {
+    if (tasks !== null) {
+      this.workingStack.push(...tasks)
+    }
+  }
+}
+
+/**
+ * Defines working process. It consumes and executes tasks from the pending
+ * tasks source.
+ */
+export class TaskProcessor {
+  taskSource: TaskSource
+  exitOnCompletion: boolean
+  sleepTime: number
+
+  constructor(taskSource: TaskSource, exitOnCompletion = true, sleepTime = 3000) {
+    this.taskSource = taskSource
+    this.exitOnCompletion = exitOnCompletion
+    this.sleepTime = sleepTime
+  }
+
+  /**
+   * Starts the task processor that pick tasks one by one from the pending task
+   * collection and executes them. It exits on empty task source or pauses
+   * depending on the configuration.
+   *
+   * @returns empty promise
+   */
+  async process(): Promise<void> {
+    while (true) {
+      const task = await this.taskSource.get()
+
+      if (task !== null) {
+        logger.debug(task.description())
+        await task.execute()
+      } else {
+        if (this.exitOnCompletion) {
+          return
+        }
+
+        await sleep(this.sleepTime)
+      }
+    }
+  }
+}
+
+/**
+ * Manages task processors pack. Runs multiple instances and waits for their
+ * execution.
+ */
+export class TaskProcessorSpawner {
+  processNumber: number
+  taskSource: TaskSource
+  constructor(taskSource: TaskSource, processNumber: number) {
+    this.taskSource = taskSource
+    this.processNumber = processNumber
+  }
+
+  /**
+   * Starts the task processor pack and waits for its completion.
+   *
+   * @returns empty promise
+   */
+  async process(): Promise<void> {
+    const processes = []
+
+    for (let i = 0; i < this.processNumber; i++) {
+      const processor = new TaskProcessor(this.taskSource)
+      processes.push(processor.process())
+    }
+
+    await Promise.all(processes)
+  }
+}

+ 39 - 27
storage-node-v2/src/services/webApi/app.ts

@@ -3,45 +3,33 @@ import path from 'path'
 import cors from 'cors'
 import { Express, NextFunction } from 'express-serve-static-core'
 import * as OpenApiValidator from 'express-openapi-validator'
-import { HttpError, OpenAPIV3 } from 'express-openapi-validator/dist/framework/types'
+import { HttpError, OpenAPIV3, ValidateSecurityOpts } from 'express-openapi-validator/dist/framework/types'
 import { KeyringPair } from '@polkadot/keyring/types'
 import { ApiPromise } from '@polkadot/api'
 import { RequestData, verifyTokenSignature, parseUploadToken, UploadToken } from '../helpers/auth'
-import { checkRemoveNonce } from '../../services/helpers/tokenNonceKeeper'
+import { checkRemoveNonce } from '../caching/tokenNonceKeeper'
 import { httpLogger, errorLogger } from '../../services/logger'
+import { AppConfig } from './controllers/common'
 
 /**
  * Creates Express web application. Uses the OAS spec file for the API.
  *
- * @param api - runtime API promise
- * @param account - KeyringPair instance
- * @param workerId - storage provider ID (worker ID)
- * @param uploadsDir - directory for the file uploading
- * @param maxFileSize - max allowed file size
+ * @param config - web app configuration parameters
  * @returns Express promise.
  */
-export async function createApp(
-  api: ApiPromise,
-  account: KeyringPair,
-  workerId: number,
-  uploadsDir: string,
-  maxFileSize: number
-): Promise<Express> {
+export async function createApp(config: AppConfig): Promise<Express> {
   const spec = path.join(__dirname, './../../api-spec/openapi.yaml')
-
   const app = express()
 
   app.use(cors())
   app.use(express.json())
-  app.use(httpLogger())
+  app.use(httpLogger(config.logSource, config.elasticSearchEndpoint))
 
   app.use(
     // Set parameters for each request.
     (req: express.Request, res: express.Response, next: NextFunction) => {
-      res.locals.uploadsDir = uploadsDir
-      res.locals.storageProviderAccount = account
-      res.locals.workerId = workerId
-      res.locals.api = api
+      res.locals = config
+
       next()
     },
     // Setup OpenAPiValidator
@@ -55,20 +43,16 @@ export async function createApp(
         resolver: OpenApiValidator.resolvers.modulePathResolver,
       },
       fileUploader: {
-        dest: uploadsDir,
+        dest: config.tempFileUploadingDir,
         // Busboy library settings
         limits: {
           // For multipart forms, the max number of file fields (Default: Infinity)
           files: 1,
           // For multipart forms, the max file size (in bytes) (Default: Infinity)
-          fileSize: maxFileSize,
-        },
-      },
-      validateSecurity: {
-        handlers: {
-          UploadAuth: validateUpload(api, account),
+          fileSize: config.maxFileSize,
         },
       },
+      validateSecurity: setupUploadingValidation(config.enableUploadingAuth, config.api, config.storageProviderAccount),
     })
   ) // Required signature.
 
@@ -101,6 +85,34 @@ export async function createApp(
   return app
 }
 
+/**
+ * Setup uploading validation. It disables the validation or returns the
+ * 'validation security' configuration.
+ *
+ * @param enableUploadingAuth - enables uploading auth-schema validation
+ * @param api - runtime API promise
+ * @param account - KeyringPair instance
+ *
+ * @returns false (disabled validation) or validation options.
+ */
+function setupUploadingValidation(
+  enableUploadingAuth: boolean,
+  api: ApiPromise,
+  account: KeyringPair
+): boolean | ValidateSecurityOpts {
+  if (enableUploadingAuth) {
+    const opts = {
+      handlers: {
+        UploadAuth: validateUpload(api, account),
+      },
+    }
+
+    return opts
+  }
+
+  return false
+}
+
 // Defines a signature for a upload validation function.
 type ValidateUploadFunction = (
   req: express.Request,

+ 149 - 0
storage-node-v2/src/services/webApi/controllers/common.ts

@@ -0,0 +1,149 @@
+import * as express from 'express'
+import { ExtrinsicFailedError } from '../../runtime/api'
+import { BagIdValidationError } from '../../helpers/bagTypes'
+import { ApiPromise } from '@polkadot/api'
+import { KeyringPair } from '@polkadot/keyring/types'
+
+/**
+ * Dedicated error for the web api requests.
+ */
+export class WebApiError extends Error {
+  httpStatusCode: number
+
+  constructor(err: string, httpStatusCode: number) {
+    super(err)
+
+    this.httpStatusCode = httpStatusCode
+  }
+}
+
+/**
+ * Dedicated server error for the web api requests.
+ */
+export class ServerError extends WebApiError {
+  constructor(err: string) {
+    super(err, 500)
+  }
+}
+
+/**
+ * Handles errors and sends a response.
+ *
+ * @param res - Response instance
+ * @param err - error
+ * @param errorType - defines request type
+ * @returns void promise.
+ */
+export function sendResponseWithError(
+  res: express.Response,
+  next: express.NextFunction,
+  err: Error,
+  errorType: string
+): void {
+  const message = isNofileError(err) ? `File not found.` : err.toString()
+
+  res.status(getHttpStatusCodeByError(err)).json({
+    type: errorType,
+    message,
+  })
+
+  next(err)
+}
+
+/**
+ * Checks the error for 'no-file' error (ENOENT).
+ *
+ * @param err - error
+ * @returns true when error code contains 'ENOENT'.
+ */
+function isNofileError(err: Error): boolean {
+  return err.toString().includes('ENOENT')
+}
+
+/**
+ * Get the status code by error.
+ *
+ * @param err - error
+ * @returns HTTP status code
+ */
+export function getHttpStatusCodeByError(err: Error): number {
+  if (isNofileError(err)) {
+    return 404
+  }
+
+  if (err instanceof ExtrinsicFailedError) {
+    return 500
+  }
+
+  if (err instanceof WebApiError) {
+    return err.httpStatusCode
+  }
+
+  if (err instanceof BagIdValidationError) {
+    return 400
+  }
+
+  return 500
+}
+
+/**
+ * Web application parameters.
+ */
+export type AppConfig = {
+  /**
+   * Runtime API promise
+   */
+  api: ApiPromise
+
+  /**
+   * KeyringPair instance
+   */
+  storageProviderAccount: KeyringPair
+
+  /**
+   * Storage provider ID (worker ID)
+   */
+  workerId: number
+
+  /**
+   * Directory for the file uploading
+   */
+  uploadsDir: string
+  /**
+   * Directory for temporary file uploading
+   */
+  tempFileUploadingDir: string
+
+  /**
+   *  Environment configuration
+   */
+  process: {
+    version: string
+    userAgent: string
+  }
+
+  /**
+   * Query Node endpoint URL
+   */
+  queryNodeEndpoint: string
+
+  /**
+   * Enables uploading auth-schema validation
+   */
+  enableUploadingAuth: boolean
+
+  /**
+   * Source tag for log entries for ElasticSearch.
+   */
+  logSource: string
+
+  /**
+   * ElasticSearch logging endpoint URL
+   */
+  elasticSearchEndpoint?: string
+
+  /**
+   * Max file size for uploading limit.
+   */
+  maxFileSize: number
+}

+ 100 - 186
storage-node-v2/src/services/webApi/controllers/publicApi.ts → storage-node-v2/src/services/webApi/controllers/filesApi.ts

@@ -1,5 +1,4 @@
 import { acceptPendingDataObjects } from '../../runtime/extrinsics'
-import { ExtrinsicFailedError } from '../../runtime/api'
 import {
   RequestData,
   UploadTokenRequest,
@@ -7,52 +6,38 @@ import {
   createUploadToken,
   verifyTokenSignature,
 } from '../../helpers/auth'
-import { hashFile } from '../../../services/helpers/hashing'
-import { createNonce, getTokenExpirationTime } from '../../../services/helpers/tokenNonceKeeper'
-import { getFileInfo } from '../../../services/helpers/fileInfo'
-import { parseBagId } from '../../helpers/bagTypes'
+import { hashFile } from '../../helpers/hashing'
+import { registerNewDataObjectId } from '../../caching/newUploads'
+import { addDataObjectIdToCache } from '../../caching/localDataObjects'
+import { createNonce, getTokenExpirationTime } from '../../caching/tokenNonceKeeper'
+import { getFileInfo } from '../../helpers/fileInfo'
 import { BagId } from '@joystream/types/storage'
-import logger from '../../../services/logger'
-import { KeyringPair } from '@polkadot/keyring/types'
+import logger from '../../logger'
 import { ApiPromise } from '@polkadot/api'
 import * as express from 'express'
 import fs from 'fs'
 import path from 'path'
 import send from 'send'
-import { CLIError } from '@oclif/errors'
 import { hexToString } from '@polkadot/util'
+import { parseBagId } from '../../helpers/bagTypes'
+import { timeout } from 'promise-timeout'
+import { WebApiError, sendResponseWithError, getHttpStatusCodeByError, AppConfig } from './common'
+import { getStorageBucketIdsByWorkerId } from '../../sync/storageObligations'
+import { Membership } from '@joystream/types/members'
 const fsPromises = fs.promises
 
 /**
- * Dedicated error for the web api requests.
+ * A public endpoint: serves files by data object ID.
  */
-export class WebApiError extends CLIError {
-  httpStatusCode: number
-
-  constructor(err: string, httpStatusCode: number) {
-    super(err)
-
-    this.httpStatusCode = httpStatusCode
-  }
-}
-
-/**
- * Dedicated server error for the web api requests.
- */
-export class ServerError extends WebApiError {
-  constructor(err: string) {
-    super(err, 500)
-  }
-}
-
-/**
- * A public endpoint: serves files by CID.
- */
-export async function getFile(req: express.Request, res: express.Response): Promise<void> {
+export async function getFile(
+  req: express.Request,
+  res: express.Response<unknown, AppConfig>,
+  next: express.NextFunction
+): Promise<void> {
   try {
-    const cid = getCid(req)
-    const uploadsDir = getUploadsDir(res)
-    const fullPath = path.resolve(uploadsDir, cid)
+    const dataObjectId = getDataObjectId(req)
+    const uploadsDir = res.locals.uploadsDir
+    const fullPath = path.resolve(uploadsDir, dataObjectId)
 
     const fileInfo = await getFileInfo(fullPath)
     const fileStats = await fsPromises.stat(fullPath)
@@ -67,23 +52,23 @@ export async function getFile(req: express.Request, res: express.Response): Prom
     })
 
     stream.on('error', (err) => {
-      sendResponseWithError(res, err, 'files')
+      sendResponseWithError(res, next, err, 'files')
     })
 
     stream.pipe(res)
   } catch (err) {
-    sendResponseWithError(res, err, 'files')
+    sendResponseWithError(res, next, err, 'files')
   }
 }
 
 /**
- * A public endpoint: sends file headers by CID.
+ * A public endpoint: sends file headers by data object ID.
  */
-export async function getFileHeaders(req: express.Request, res: express.Response): Promise<void> {
+export async function getFileHeaders(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
   try {
-    const cid = getCid(req)
-    const uploadsDir = getUploadsDir(res)
-    const fullPath = path.resolve(uploadsDir, cid)
+    const dataObjectId = getDataObjectId(req)
+    const uploadsDir = res.locals.uploadsDir
+    const fullPath = path.resolve(uploadsDir, dataObjectId)
     const fileInfo = await getFileInfo(fullPath)
     const fileStats = await fsPromises.stat(fullPath)
 
@@ -100,7 +85,11 @@ export async function getFileHeaders(req: express.Request, res: express.Response
 /**
  * A public endpoint: receives file.
  */
-export async function uploadFile(req: express.Request, res: express.Response): Promise<void> {
+export async function uploadFile(
+  req: express.Request,
+  res: express.Response<unknown, AppConfig>,
+  next: express.NextFunction
+): Promise<void> {
   const uploadRequest: RequestData = req.body
 
   // saved filename to delete on verification or extrinsic errors
@@ -108,50 +97,67 @@ export async function uploadFile(req: express.Request, res: express.Response): P
   try {
     const fileObj = getFileObject(req)
     cleanupFileName = fileObj.path
+    const queryNodeUrl = res.locals.queryNodeEndpoint
+    const workerId = res.locals.workerId
 
-    const api = getApi(res)
-    await verifyFileMimeType(fileObj.path)
-
-    const hash = await hashFile(fileObj.path)
-    const bagId = parseBagId(api, uploadRequest.bagId)
+    const [, hash] = await Promise.all([
+      verifyBucketId(queryNodeUrl, workerId, uploadRequest.storageBucketId),
+      hashFile(fileObj.path),
+    ])
 
+    const api = res.locals.api
+    const bagId = parseBagId(uploadRequest.bagId)
     const accepted = await verifyDataObjectInfo(api, bagId, uploadRequest.dataObjectId, fileObj.size, hash)
 
     // Prepare new file name
-    const newPath = fileObj.path.replace(fileObj.filename, hash)
+    const dataObjectId = uploadRequest.dataObjectId.toString()
+    const uploadsDir = res.locals.uploadsDir
+    const newPath = path.join(uploadsDir, dataObjectId)
+
+    registerNewDataObjectId(dataObjectId)
+    await addDataObjectIdToCache(dataObjectId)
 
     // Overwrites existing file.
     await fsPromises.rename(fileObj.path, newPath)
     cleanupFileName = newPath
 
-    const workerId = getWorkerId(res)
     if (!accepted) {
-      await acceptPendingDataObjects(api, bagId, getAccount(res), workerId, uploadRequest.storageBucketId, [
-        uploadRequest.dataObjectId,
-      ])
+      await acceptPendingDataObjects(
+        api,
+        bagId,
+        res.locals.storageProviderAccount,
+        workerId,
+        uploadRequest.storageBucketId,
+        [uploadRequest.dataObjectId]
+      )
     } else {
       logger.warn(
         `Received already accepted data object. DataObjectId = ${uploadRequest.dataObjectId} WorkerId = ${workerId}`
       )
     }
+
     res.status(201).json({
       id: hash,
     })
   } catch (err) {
     await cleanupFileOnError(cleanupFileName, err.toString())
 
-    sendResponseWithError(res, err, 'upload')
+    sendResponseWithError(res, next, err, 'upload')
   }
 }
 
 /**
  * A public endpoint: creates auth token for file uploads.
  */
-export async function authTokenForUploading(req: express.Request, res: express.Response): Promise<void> {
+export async function authTokenForUploading(
+  req: express.Request,
+  res: express.Response<unknown, AppConfig>,
+  next: express.NextFunction
+): Promise<void> {
   try {
-    const account = getAccount(res)
+    const account = res.locals.storageProviderAccount
     const tokenRequest = getTokenRequest(req)
-    const api = getApi(res)
+    const api = res.locals.api
 
     await validateTokenRequest(api, tokenRequest)
 
@@ -166,7 +172,7 @@ export async function authTokenForUploading(req: express.Request, res: express.R
       token: signedToken,
     })
   } catch (err) {
-    sendResponseWithError(res, err, 'authtoken')
+    sendResponseWithError(res, next, err, 'authtoken')
   }
 }
 
@@ -175,7 +181,7 @@ export async function authTokenForUploading(req: express.Request, res: express.R
  *
  * @remarks
  * This is a helper function. It parses the request object for a variable and
- * throws an error on failier.
+ * throws an error on failure.
  */
 function getFileObject(req: express.Request): Express.Multer.File {
   if (req.file) {
@@ -191,79 +197,19 @@ function getFileObject(req: express.Request): Express.Multer.File {
 }
 
 /**
- * Returns worker ID from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-function getWorkerId(res: express.Response): number {
-  if (res.locals.workerId || res.locals.workerId === 0) {
-    return res.locals.workerId
-  }
-
-  throw new ServerError('No Joystream worker ID loaded.')
-}
-
-/**
- * Returns a directory for file uploading from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failier.
- */
-function getUploadsDir(res: express.Response): string {
-  if (res.locals.uploadsDir) {
-    return res.locals.uploadsDir
-  }
-
-  throw new ServerError('No upload directory path loaded.')
-}
-
-/**
- * Returns a KeyPair instance from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failier.
- */
-function getAccount(res: express.Response): KeyringPair {
-  if (res.locals.storageProviderAccount) {
-    return res.locals.storageProviderAccount
-  }
-
-  throw new ServerError('No Joystream account loaded.')
-}
-
-/**
- * Returns API promise from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failier.
- */
-function getApi(res: express.Response): ApiPromise {
-  if (res.locals.api) {
-    return res.locals.api
-  }
-
-  throw new ServerError('No Joystream API loaded.')
-}
-
-/**
- * Returns Content ID from the request.
+ * Returns data object ID from the request.
  *
  * @remarks
  * This is a helper function. It parses the request object for a variable and
- * throws an error on failier.
+ * throws an error on failure.
  */
-function getCid(req: express.Request): string {
-  const cid = req.params.cid || ''
-  if (cid.length > 0) {
-    return cid
+function getDataObjectId(req: express.Request): string {
+  const id = req.params.id || ''
+  if (id.length > 0) {
+    return id
   }
 
-  throw new WebApiError('No CID provided.', 400)
+  throw new WebApiError('No data object ID provided.', 400)
 }
 
 /**
@@ -271,7 +217,7 @@ function getCid(req: express.Request): string {
  *
  * @remarks
  * This is a helper function. It parses the request object for a variable and
- * throws an error on failier.
+ * throws an error on failure.
  */
 function getTokenRequest(req: express.Request): UploadTokenRequest {
   const tokenRequest = req.body as UploadTokenRequest
@@ -297,7 +243,10 @@ async function validateTokenRequest(api: ApiPromise, tokenRequest: UploadTokenRe
     throw new WebApiError('Invalid upload token request signature.', 401)
   }
 
-  const membership = await api.query.members.membershipById(tokenRequest.data.memberId)
+  const membershipPromise = api.query.members.membershipById(tokenRequest.data.memberId)
+
+  const membership = (await timeout(membershipPromise, 5000)) as Membership
+
   if (membership.controller_account.toString() !== tokenRequest.data.accountId) {
     throw new WebApiError(`Provided controller account and member id don't match.`, 401)
   }
@@ -359,73 +308,38 @@ async function cleanupFileOnError(cleanupFileName: string, error: string): Promi
 }
 
 /**
- * Verifies the mime type of the file by its content. It throws an exception
- * if the mime type differs from allowed list ('image/', 'video/', 'audio/').
- *
- * @param filePath - file path to detect mime types
- * @param error - external error
- * @returns void promise.
+ * A public endpoint: return the server version.
  */
-async function verifyFileMimeType(filePath: string): Promise<void> {
-  const allowedMimeTypes = ['image/', 'video/', 'audio/']
-
-  const fileInfo = await getFileInfo(filePath)
-  const correctMimeType = allowedMimeTypes.some((allowedType) => fileInfo.mimeType.startsWith(allowedType))
+export async function getVersion(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
+  try {
+    const config = res.locals.process
 
-  if (!correctMimeType) {
-    throw new WebApiError(`Incorrect mime type detected: ${fileInfo.mimeType}`, 400)
+    // Copy from an object, because the actual object could contain more data.
+    res.status(200).json({
+      version: config.version,
+      userAgent: config.userAgent,
+    })
+  } catch (err) {
+    res.status(500).json({
+      type: 'version',
+      message: err.toString(),
+    })
   }
 }
 
 /**
- * Handles errors and sends a response.
+ * Validates the storage bucket ID obligations for the worker (storage provider).
+ * It throws an error when storage bucket doesn't belong to the worker.
  *
- * @param res - Response instance
- * @param err - error
- * @param errorType - defines request type
+ * @param queryNodeUrl - Query Node URL
+ * @param workerId - worker(storage provider) ID
+ * @param bucketId - storage bucket ID
  * @returns void promise.
  */
-function sendResponseWithError(res: express.Response, err: Error, errorType: string): void {
-  const message = isNofileError(err) ? `File not found.` : err.toString()
-
-  res.status(getHttpStatusCodeByError(err)).json({
-    type: errorType,
-    message,
-  })
-}
-
-/**
- * Checks the error for 'no-file' error (ENOENT).
- *
- * @param err - error
- * @returns true when error code contains 'ENOENT'.
- */
-function isNofileError(err: Error): boolean {
-  return err.toString().includes('ENOENT')
-}
-
-/**
- * Get the status code by error.
- *
- * @param err - error
- * @returns HTTP status code
- */
-function getHttpStatusCodeByError(err: Error): number {
-  if (isNofileError(err)) {
-    return 404
-  }
+async function verifyBucketId(queryNodeUrl: string, workerId: number, bucketId: number): Promise<void> {
+  const bucketIds = await getStorageBucketIdsByWorkerId(queryNodeUrl, workerId)
 
-  if (err instanceof ExtrinsicFailedError) {
-    return 400
+  if (!bucketIds.includes(bucketId.toString())) {
+    throw new WebApiError('Incorrect storage bucket ID.', 400)
   }
-
-  if (err instanceof WebApiError) {
-    return err.httpStatusCode
-  }
-
-  if (err instanceof CLIError) {
-    return 400
-  }
-
-  return 500
 }

+ 156 - 0
storage-node-v2/src/services/webApi/controllers/stateApi.ts

@@ -0,0 +1,156 @@
+import { getDataObjectIDs } from '../../../services/caching/localDataObjects'
+import * as express from 'express'
+import _ from 'lodash'
+import { getDataObjectIDsByBagId } from '../../sync/storageObligations'
+import { WebApiError, sendResponseWithError, AppConfig } from './common'
+import fastFolderSize from 'fast-folder-size'
+import { promisify } from 'util'
+import fs from 'fs'
+import NodeCache from 'node-cache'
+const fsPromises = fs.promises
+
+// Expiration period in seconds for the local cache.
+const ExpirationPeriod = 30
+
+// Local in-memory cache for data
+const dataCache = new NodeCache({
+  stdTTL: ExpirationPeriod,
+  deleteOnExpire: true,
+})
+
+/**
+ * A public endpoint: return all local data objects.
+ */
+export async function getAllLocalDataObjects(
+  req: express.Request,
+  res: express.Response<unknown, AppConfig>,
+  next: express.NextFunction
+): Promise<void> {
+  try {
+    const ids = await getDataObjectIDs()
+
+    res.status(200).json(ids)
+  } catch (err) {
+    sendResponseWithError(res, next, err, 'all_data_objects')
+  }
+}
+
+/**
+ * A public endpoint: serves local data uploading directory stats.
+ *
+ *  @return total size and count of the data objects.
+ */
+export async function getLocalDataStats(
+  req: express.Request,
+  res: express.Response<unknown, AppConfig>,
+  next: express.NextFunction
+): Promise<void> {
+  try {
+    const uploadsDir = res.locals.uploadsDir
+    const tempFileDir = res.locals.tempFileUploadingDir
+    const fastFolderSizeAsync = promisify(fastFolderSize)
+
+    const tempFolderExists = fs.existsSync(tempFileDir)
+    const statsPromise = fsPromises.readdir(uploadsDir)
+    const sizePromise = fastFolderSizeAsync(uploadsDir)
+
+    const [stats, totalSize] = await Promise.all([statsPromise, sizePromise])
+
+    let objectNumber = stats.length
+    let tempDownloads = 0
+    let tempDirSize = 0
+    if (tempFolderExists) {
+      if (objectNumber > 0) {
+        objectNumber--
+      }
+
+      const tempDirStatsPromise = fsPromises.readdir(tempFileDir)
+      const tempDirSizePromise = fastFolderSizeAsync(tempFileDir)
+
+      const [tempDirStats, tempSize] = await Promise.all([tempDirStatsPromise, tempDirSizePromise])
+
+      tempDirSize = tempSize ?? 0
+      tempDownloads = tempDirStats.length
+    }
+
+    res.status(200).json({
+      objectNumber,
+      totalSize,
+      tempDownloads,
+      tempDirSize,
+    })
+  } catch (err) {
+    sendResponseWithError(res, next, err, 'local_data_stats')
+  }
+}
+
+/**
+ * A public endpoint: return local data objects for the bag.
+ */
+export async function getLocalDataObjectsByBagId(
+  req: express.Request,
+  res: express.Response<unknown, AppConfig>,
+  next: express.NextFunction
+): Promise<void> {
+  try {
+    const queryNodeUrl = res.locals.queryNodeEndpoint
+    const bagId = getBagId(req)
+
+    const [ids, requiredIds] = await Promise.all([
+      getDataObjectIDs(),
+      getCachedDataObjectsObligations(queryNodeUrl, bagId),
+    ])
+
+    const localDataForBag = _.intersection(ids, requiredIds)
+
+    res.status(200).json(localDataForBag)
+  } catch (err) {
+    sendResponseWithError(res, next, err, 'data_objects_by_bag')
+  }
+}
+
+/**
+ * A public endpoint: return the server version.
+ */
+export async function getVersion(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
+  const config = res.locals.process
+
+  // Copy from an object, because the actual object could contain more data.
+  res.status(200).json({
+    version: config.version,
+    userAgent: config.userAgent,
+  })
+}
+
+/**
+ * Returns Bag ID from the request.
+ *
+ * @remarks
+ * This is a helper function. It parses the request object for a variable and
+ * throws an error on failure.
+ */
+function getBagId(req: express.Request): string {
+  const bagId = req.params.bagId || ''
+  if (bagId.length > 0) {
+    return bagId
+  }
+
+  throw new WebApiError('No bagId provided.', 400)
+}
+
+/**
+ * Returns cached data objects IDs from the local data storage. Data could be
+ * obsolete until cache expiration.
+ *
+ */
+async function getCachedDataObjectsObligations(queryNodeUrl: string, bagId: string): Promise<string[]> {
+  const entryName = `data_object_obligations_${bagId}`
+
+  if (!dataCache.has(entryName)) {
+    const data = await getDataObjectIDsByBagId(queryNodeUrl, bagId)
+
+    dataCache.set(entryName, data)
+  }
+
+  return dataCache.get(entryName) ?? []
+}

+ 1 - 0
storage-node-v2/tsconfig.json

@@ -6,6 +6,7 @@
     "outDir": "lib",
     "rootDir": "src",
     "strict": true,
+    "strictNullChecks": true,
     "target": "es2017",
     "skipLibCheck": true,
     "baseUrl": ".",

+ 0 - 35
storage-node/.eslintrc.js

@@ -1,35 +0,0 @@
-module.exports = {
-  env: {
-    node: true,
-    es6: true,
-    mocha: true,
-  },
-  rules: {
-    'import/no-commonjs': 'off', // remove after converting to TS.
-    // Disabling Rules because of monorepo environment:
-    // https://github.com/benmosher/eslint-plugin-import/issues/1174
-    'import/no-extraneous-dependencies': 'off',
-    'import/no-nodejs-modules': 'off', // nodejs project
-    'no-console': 'off', // we use console in the project
-    '@typescript-eslint/no-var-requires': 'warn',
-    '@typescript-eslint/naming-convention': 'off',
-  },
-  overrides: [
-    {
-      files: [
-        '**/test/ranges.js',
-        '**/test/lru.js',
-        '**/test/fs/walk.js',
-        '**/test/storage.js',
-        '**/test/identities.js',
-        '**/test/balances.js',
-        '**/test/assets.js',
-      ],
-      rules: {
-        // Disabling Rules because of used chai lib:
-        // https://stackoverflow.com/questions/45079454/no-unused-expressions-in-mocha-chai-unit-test-using-standardjs
-        'no-unused-expressions': 'off',
-      },
-    },
-  ],
-}

+ 0 - 31
storage-node/.gitignore

@@ -1,31 +0,0 @@
-build/
-coverage/
-dist/
-tmp/
-.DS_Store
-
-.env.local
-.env.development.local
-.env.test.local
-.env.production.local
-
-.npmrc
-package-lock.json
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-
-# IDEs
-.idea
-.vscode
-.*.sw*
-
-# Node modules
-node_modules/
-
-# Ignore nvm config file
-.nvmrc
-
-yarn.lock
-
-*.tsbuildinfo

+ 0 - 1
storage-node/.prettierignore

@@ -1 +0,0 @@
-packages/cli/dist

+ 0 - 675
storage-node/LICENSE.md

@@ -1,675 +0,0 @@
-### GNU GENERAL PUBLIC LICENSE
-
-Version 3, 29 June 2007
-
-Copyright (C) 2007 Free Software Foundation, Inc.
-<https://fsf.org/>
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-### Preamble
-
-The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom
-to share and change all versions of a program--to make sure it remains
-free software for all its users. We, the Free Software Foundation, use
-the GNU General Public License for most of our software; it applies
-also to any other work released this way by its authors. You can apply
-it to your programs, too.
-
-When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you
-have certain responsibilities if you distribute copies of the
-software, or if you modify it: responsibilities to respect the freedom
-of others.
-
-For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
-Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the
-manufacturer can do so. This is fundamentally incompatible with the
-aim of protecting users' freedom to change the software. The
-systematic pattern of such abuse occurs in the area of products for
-individuals to use, which is precisely where it is most unacceptable.
-Therefore, we have designed this version of the GPL to prohibit the
-practice for those products. If such problems arise substantially in
-other domains, we stand ready to extend this provision to those
-domains in future versions of the GPL, as needed to protect the
-freedom of users.
-
-Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish
-to avoid the special danger that patents applied to a free program
-could make it effectively proprietary. To prevent this, the GPL
-assures that patents cannot be used to render the program non-free.
-
-The precise terms and conditions for copying, distribution and
-modification follow.
-
-### TERMS AND CONDITIONS
-
-#### 0. Definitions.
-
-"This License" refers to version 3 of the GNU General Public License.
-
-"Copyright" also means copyright-like laws that apply to other kinds
-of works, such as semiconductor masks.
-
-"The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
-To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of
-an exact copy. The resulting work is called a "modified version" of
-the earlier work or a work "based on" the earlier work.
-
-A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user
-through a computer network, with no transfer of a copy, is not
-conveying.
-
-An interactive user interface displays "Appropriate Legal Notices" to
-the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-#### 1. Source Code.
-
-The "source code" for a work means the preferred form of the work for
-making modifications to it. "Object code" means any non-source form of
-a work.
-
-A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-The Corresponding Source need not include anything that users can
-regenerate automatically from other parts of the Corresponding Source.
-
-The Corresponding Source for a work in source code form is that same
-work.
-
-#### 2. Basic Permissions.
-
-All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-You may make, run and propagate covered works that you do not convey,
-without conditions so long as your license otherwise remains in force.
-You may convey covered works to others for the sole purpose of having
-them make modifications exclusively for you, or provide you with
-facilities for running those works, provided that you comply with the
-terms of this License in conveying all material for which you do not
-control copyright. Those thus making or running the covered works for
-you must do so exclusively on your behalf, under your direction and
-control, on terms that prohibit them from making any copies of your
-copyrighted material outside their relationship with you.
-
-Conveying under any other circumstances is permitted solely under the
-conditions stated below. Sublicensing is not allowed; section 10 makes
-it unnecessary.
-
-#### 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such
-circumvention is effected by exercising rights under this License with
-respect to the covered work, and you disclaim any intention to limit
-operation or modification of the work as a means of enforcing, against
-the work's users, your or third parties' legal rights to forbid
-circumvention of technological measures.
-
-#### 4. Conveying Verbatim Copies.
-
-You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-#### 5. Conveying Modified Source Versions.
-
-You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these
-conditions:
-
-- a) The work must carry prominent notices stating that you modified
-  it, and giving a relevant date.
-- b) The work must carry prominent notices stating that it is
-  released under this License and any conditions added under
-  section 7. This requirement modifies the requirement in section 4
-  to "keep intact all notices".
-- c) You must license the entire work, as a whole, under this
-  License to anyone who comes into possession of a copy. This
-  License will therefore apply, along with any applicable section 7
-  additional terms, to the whole of the work, and all its parts,
-  regardless of how they are packaged. This License gives no
-  permission to license the work in any other way, but it does not
-  invalidate such permission if you have separately received it.
-- d) If the work has interactive user interfaces, each must display
-  Appropriate Legal Notices; however, if the Program has interactive
-  interfaces that do not display Appropriate Legal Notices, your
-  work need not make them do so.
-
-A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-#### 6. Conveying Non-Source Forms.
-
-You may convey a covered work in object code form under the terms of
-sections 4 and 5, provided that you also convey the machine-readable
-Corresponding Source under the terms of this License, in one of these
-ways:
-
-- a) Convey the object code in, or embodied in, a physical product
-  (including a physical distribution medium), accompanied by the
-  Corresponding Source fixed on a durable physical medium
-  customarily used for software interchange.
-- b) Convey the object code in, or embodied in, a physical product
-  (including a physical distribution medium), accompanied by a
-  written offer, valid for at least three years and valid for as
-  long as you offer spare parts or customer support for that product
-  model, to give anyone who possesses the object code either (1) a
-  copy of the Corresponding Source for all the software in the
-  product that is covered by this License, on a durable physical
-  medium customarily used for software interchange, for a price no
-  more than your reasonable cost of physically performing this
-  conveying of source, or (2) access to copy the Corresponding
-  Source from a network server at no charge.
-- c) Convey individual copies of the object code with a copy of the
-  written offer to provide the Corresponding Source. This
-  alternative is allowed only occasionally and noncommercially, and
-  only if you received the object code with such an offer, in accord
-  with subsection 6b.
-- d) Convey the object code by offering access from a designated
-  place (gratis or for a charge), and offer equivalent access to the
-  Corresponding Source in the same way through the same place at no
-  further charge. You need not require recipients to copy the
-  Corresponding Source along with the object code. If the place to
-  copy the object code is a network server, the Corresponding Source
-  may be on a different server (operated by you or a third party)
-  that supports equivalent copying facilities, provided you maintain
-  clear directions next to the object code saying where to find the
-  Corresponding Source. Regardless of what server hosts the
-  Corresponding Source, you remain obligated to ensure that it is
-  available for as long as needed to satisfy these requirements.
-- e) Convey the object code using peer-to-peer transmission,
-  provided you inform other peers where the object code and
-  Corresponding Source of the work are being offered to the general
-  public at no charge under subsection 6d.
-
-A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal,
-family, or household purposes, or (2) anything designed or sold for
-incorporation into a dwelling. In determining whether a product is a
-consumer product, doubtful cases shall be resolved in favor of
-coverage. For a particular product received by a particular user,
-"normally used" refers to a typical or common use of that class of
-product, regardless of the status of the particular user or of the way
-in which the particular user actually uses, or expects or is expected
-to use, the product. A product is a consumer product regardless of
-whether the product has substantial commercial, industrial or
-non-consumer uses, unless such uses represent the only significant
-mode of use of the product.
-
-"Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to
-install and execute modified versions of a covered work in that User
-Product from a modified version of its Corresponding Source. The
-information must suffice to ensure that the continued functioning of
-the modified object code is in no case prevented or interfered with
-solely because modification has been made.
-
-If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or
-updates for a work that has been modified or installed by the
-recipient, or for the User Product in which it has been modified or
-installed. Access to a network may be denied when the modification
-itself materially and adversely affects the operation of the network
-or violates the rules and protocols for communication across the
-network.
-
-Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-#### 7. Additional Terms.
-
-"Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders
-of that material) supplement the terms of this License with terms:
-
-- a) Disclaiming warranty or limiting liability differently from the
-  terms of sections 15 and 16 of this License; or
-- b) Requiring preservation of specified reasonable legal notices or
-  author attributions in that material or in the Appropriate Legal
-  Notices displayed by works containing it; or
-- c) Prohibiting misrepresentation of the origin of that material,
-  or requiring that modified versions of such material be marked in
-  reasonable ways as different from the original version; or
-- d) Limiting the use for publicity purposes of names of licensors
-  or authors of the material; or
-- e) Declining to grant rights under trademark law for use of some
-  trade names, trademarks, or service marks; or
-- f) Requiring indemnification of licensors and authors of that
-  material by anyone who conveys the material (or modified versions
-  of it) with contractual assumptions of liability to the recipient,
-  for any liability that these contractual assumptions directly
-  impose on those licensors and authors.
-
-All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions; the
-above requirements apply either way.
-
-#### 8. Termination.
-
-You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-However, if you cease all violation of this License, then your license
-from a particular copyright holder is reinstated (a) provisionally,
-unless and until the copyright holder explicitly and finally
-terminates your license, and (b) permanently, if the copyright holder
-fails to notify you of the violation by some reasonable means prior to
-60 days after the cessation.
-
-Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-#### 9. Acceptance Not Required for Having Copies.
-
-You are not required to accept this License in order to receive or run
-a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-#### 10. Automatic Licensing of Downstream Recipients.
-
-Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
-An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-#### 11. Patents.
-
-A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
-A contributor's "essential patent claims" are all patent claims owned
-or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-A patent license is "discriminatory" if it does not include within the
-scope of its coverage, prohibits the exercise of, or is conditioned on
-the non-exercise of one or more of the rights that are specifically
-granted under this License. You may not convey a covered work if you
-are a party to an arrangement with a third party that is in the
-business of distributing software, under which you make payment to the
-third party based on the extent of your activity of conveying the
-work, and under which the third party grants, to any of the parties
-who would receive the covered work from you, a discriminatory patent
-license (a) in connection with copies of the covered work conveyed by
-you (or copies made from those copies), or (b) primarily for and in
-connection with specific products or compilations that contain the
-covered work, unless you entered into that arrangement, or that patent
-license was granted, prior to 28 March 2007.
-
-Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-#### 12. No Surrender of Others' Freedom.
-
-If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under
-this License and any other pertinent obligations, then as a
-consequence you may not convey it at all. For example, if you agree to
-terms that obligate you to collect a royalty for further conveying
-from those to whom you convey the Program, the only way you could
-satisfy both those terms and this License would be to refrain entirely
-from conveying the Program.
-
-#### 13. Use with the GNU Affero General Public License.
-
-Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-#### 14. Revised Versions of this License.
-
-The Free Software Foundation may publish revised and/or new versions
-of the GNU General Public License from time to time. Such new versions
-will be similar in spirit to the present version, but may differ in
-detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies that a certain numbered version of the GNU General Public
-License "or any later version" applies to it, you have the option of
-following the terms and conditions either of that numbered version or
-of any later version published by the Free Software Foundation. If the
-Program does not specify a version number of the GNU General Public
-License, you may choose any version ever published by the Free
-Software Foundation.
-
-If the Program specifies that a proxy can decide which future versions
-of the GNU General Public License can be used, that proxy's public
-statement of acceptance of a version permanently authorizes you to
-choose that version for the Program.
-
-Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-#### 15. Disclaimer of Warranty.
-
-THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT
-WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
-PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
-DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR
-CORRECTION.
-
-#### 16. Limitation of Liability.
-
-IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR
-CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
-ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT
-NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR
-LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM
-TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER
-PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-#### 17. Interpretation of Sections 15 and 16.
-
-If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-END OF TERMS AND CONDITIONS
-
-### How to Apply These Terms to Your New Programs
-
-If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these
-terms.
-
-To do so, attach the following notices to the program. It is safest to
-attach them to the start of each source file to most effectively state
-the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-        <one line to give the program's name and a brief idea of what it does.>
-        Copyright (C) <year>  <name of author>
-
-        This program is free software: you can redistribute it and/or modify
-        it under the terms of the GNU General Public License as published by
-        the Free Software Foundation, either version 3 of the License, or
-        (at your option) any later version.
-
-        This program is distributed in the hope that it will be useful,
-        but WITHOUT ANY WARRANTY; without even the implied warranty of
-        MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-        GNU General Public License for more details.
-
-        You should have received a copy of the GNU General Public License
-        along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper
-mail.
-
-If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-        <program>  Copyright (C) <year>  <name of author>
-        This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-        This is free software, and you are welcome to redistribute it
-        under certain conditions; type `show c' for details.
-
-The hypothetical commands \`show w' and \`show c' should show the
-appropriate parts of the General Public License. Of course, your
-program's commands might be different; for a GUI interface, you would
-use an "about box".
-
-You should also get your employer (if you work as a programmer) or
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. For more information on this, and how to apply and follow
-the GNU GPL, see <https://www.gnu.org/licenses/>.
-
-The GNU General Public License does not permit incorporating your
-program into proprietary programs. If your program is a subroutine
-library, you may consider it more useful to permit linking proprietary
-applications with the library. If this is what you want to do, use the
-GNU Lesser General Public License instead of this License. But first,
-please read <https://www.gnu.org/licenses/why-not-lgpl.html>.

+ 0 - 90
storage-node/README.md

@@ -1,90 +0,0 @@
-![Storage Nodes for Joystream](./storage-node_new.svg)
-
-This repository contains several Node packages, located under the `packages/`
-subdirectory. See each individual package for details:
-
-- [colossus](./packages/colossus/README.md) - the main colossus app.
-- [storage-node-backend](./packages/storage/README.md) - abstraction over the storage backend.
-- [storage-runtime-api](./packages/runtime-api/README.md) - convenience wrappers for the runtime API.
-- [storage-utils](./packages/util/README.md) - general utility functions.
-- [discovery](./packages/discovery/README.md) - service discovery using IPNS.
-- [storage-cli](./packages/cli/README.md) - cli for uploading and downloading content from the network
-- [helios](./packages/helios/README.md) - cli tool for getting status of storage network
-
-## Installation
-
-_Requirements_
-
-This project uses [yarn](https://yarnpkg.com/) as Node package manager. It also
-uses some node packages with native components, so make sure to install your
-system's basic build tools.
-
-On Debian-based systems:
-
-```bash
-$ apt install build-essential
-```
-
-On Mac OS (using [homebrew](https://brew.sh/)):
-
-```bash
-$ brew install libtool automake autoconf
-```
-
-_Building_
-
-```bash
-$ yarn install
-$ yarn build
-```
-
-The command will install dependencies, and make a `colossus` executable available:
-
-```bash
-$ yarn colossus --help
-```
-
-_Testing_
-
-Run an ipfs node and a joystream-node development chain (in separate terminals)
-
-```sh
-ipfs daemon
-```
-
-```sh
-joystream-node --dev
-```
-
-```sh
-$ yarn workspace storage-node test
-```
-
-Running a development environment, after starting the ipfs node and development chain
-
-```sh
-yarn storage-cli dev-init
-```
-
-This will configure the running chain with alice as the storage lead and with a known role key for
-the storage provider.
-
-Run colossus in development mode:
-
-```sh
-yarn colossus --dev
-```
-
-Start pioneer ui:
-
-```sh
-yarn workspace pioneer start
-
-```
-
-Browse pioneer on http://localhost:3000/
-You should find Alice account is the storage working group lead and is a storage provider.
-
-## Detailed Setup and Configuration Guide
-
-For details on how to setup a storage node on the Joystream network, follow this [step by step guide](https://github.com/Joystream/helpdesk/tree/master/roles/storage-providers).

+ 0 - 54
storage-node/docs/json-signing.md

@@ -1,54 +0,0 @@
-# JSON Data Signing
-
-As serializing and deserializing JSON is not deterministic, but may depend
-on the order in which keys are added or even the system's collation method,
-signing JSON cryptographically is fraught with issues. We circumvent them
-by wrapping any JSON to be signed in another JSON object:
-
-- `version` contains the version of the wrapper JSON, currently always `1`.
-- `serialized` contains the serialized version of the data, currently this
-  will be the base64 encoded, serialized JSON payload.
-- `signature` contains the base64 encoded signature of the `serialized` field
-  value prior to its base64 encoding.
-- `payload` [optional] contains the deserialized JSON object corresponding
-  to the `serialized` payload.
-
-For signing and verification, we'll use polkadot's _ed25519_ or _sr25519_ keys
-directly.
-
-## Signing Process
-
-Given some structured data:
-
-1. Serialize the structured data into a JSON string.
-1. Create a signature over the serialized JSON string.
-1. Create a new structured data with the appropriate `version` field.
-1. Add a base64 encoded version of the serialized JSON string as the `serialized` field.
-1. Add a base64 encoded version of the signature as the `signature` field.
-1. Optionally add the original structured data as the `payload` field.
-
-## Verification Process
-
-1. Verify data contains a `version`, `serialized` and `signature` field.
-1. Currently, verify that the `version` field's value is `1`.
-1. Try to base64 decode the `serialized` and `signature` fields.
-1. Verify that the decoded `signature` is valid for the decoded `serialized`
-   field.
-1. JSON deserialize the decoded `serialized` field.
-1. Add the resulting structured data as the `payload` field, and return the
-   modified object.
-
-# Alternatives
-
-There are alternative schemes available for signing JSON objects, but they
-have specific issues we'd like to avoid.
-
-- [JOSE](https://jose.readthedocs.io/en/latest/) has no support for the _ed25519_
-  or _sr25519_ keys used in polkadot apps, and
-  [appears to be fraught with security issues](https://paragonie.com/blog/2017/03/jwt-json-web-tokens-is-bad-standard-that-everyone-should-avoid).
-  Either makes its use hard to justify.
-- While [PASETO](https://paseto.io/) does use _ed25519_ keys and seems to have
-  a reasonably robuts JavaScript implementation, it requires its secret keys to
-  be 512 bits long, while polkadot provides 256 bit secret keys. The implication
-  is that we would have to manage 512 bit keys and their corresponding public
-  keys as linked to polkadot's keys, which is cumbersome at the very least.

+ 0 - 55
storage-node/package.json

@@ -1,55 +0,0 @@
-{
-  "private": true,
-  "name": "storage-node",
-  "version": "1.0.0",
-  "engines": {
-    "node": ">=14.0.0",
-    "yarn": "^1.22.0"
-  },
-  "homepage": "https://github.com/Joystream/joystream/",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "scripts": {
-    "test": "wsrun --serial test",
-    "lint": "eslint --ext .js,.ts --ignore-path .gitignore .",
-    "build": "yarn workspace @joystream/storage-cli run build",
-    "checks": "prettier . --check && yarn lint",
-    "format": "prettier . --write"
-  },
-  "devDependencies": {
-    "@types/chai": "^4.2.11",
-    "@types/mocha": "^7.0.2",
-    "eslint": "^7.6.0",
-    "eslint-config-esnext": "^4.1.0",
-    "eslint-config-prettier": "^6.11.0",
-    "eslint-plugin-babel": "^5.3.1",
-    "eslint-plugin-prettier": "^3.1.4",
-    "prettier": "^2.0.5",
-    "typescript": "^4.4.3",
-    "wsrun": "^3.6.5"
-  },
-  "volta": {
-    "extends": "../package.json"
-  }
-}

+ 0 - 4
storage-node/packages/cli/.eslintignore

@@ -1,4 +0,0 @@
-**/build/*
-**/dist/*
-**/coverage/*
-**/node_modules/*

+ 0 - 40
storage-node/packages/cli/README.md

@@ -1,40 +0,0 @@
-# A CLI for the Joystream Runtime & Colossus
-
-- CLI access for some functionality from other packages in the storage-node workspace
-- Colossus/storage node functionality:
-  - File uploads
-  - File downloads
-- Development
-  - Setup development environment
-
-Running the storage cli tool:
-
-```sh
-$ yarn storage-cli --help
-```
-
-```sh
-
-  Joystream tool for uploading and downloading files to the network
-
-  Usage:
-    $ storage-cli command [arguments..] [key_file] [passphrase]
-
-  Some commands require a key file as the last option holding the identity for
-  interacting with the runtime API.
-
-  Commands:
-    upload            Upload a file to a Colossus storage node. Requires a
-                      storage node URL, and a local file name to upload. As
-                      an optional third parameter, you can provide a Data
-                      Object Type ID - this defaults to "1" if not provided.
-    download          Retrieve a file. Requires a storage node URL and a content
-                      ID, as well as an output filename.
-    head              Send a HEAD request for a file, and print headers.
-                      Requires a storage node URL and a content ID.
-
-  Dev Commands:       Commands to run on a development chain.
-    dev-init          Setup chain with Alice as lead and storage provider.
-    dev-check         Check the chain is setup with Alice as lead and storage provider.
-
-```

+ 0 - 14
storage-node/packages/cli/bin/cli.js

@@ -1,14 +0,0 @@
-#!/usr/bin/env node
-
-const chalk = require('chalk')
-const { main } = require('../dist/cli')
-
-main()
-  .then(() => {
-    process.exit(0)
-  })
-  .catch((err) => {
-    console.error(chalk.red(`Error: ${JSON.stringify(err)}`))
-    console.error(chalk.red(`Stack: ${err.stack}`))
-    process.exit(-1)
-  })

+ 0 - 56
storage-node/packages/cli/package.json

@@ -1,56 +0,0 @@
-{
-  "name": "@joystream/storage-cli",
-  "private": true,
-  "version": "0.1.0",
-  "description": "Joystream tool for uploading and downloading files to the network",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'dist/test/**/*.js'",
-    "lint": "eslint --ext .js,.ts . && tsc --noEmit --pretty",
-    "build": "(rm tsconfig.tsbuildinfo || :) && tsc --build"
-  },
-  "bin": {
-    "storage-cli": "./bin/cli.js"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "mocha": "^5.2.0",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "@joystream/storage-runtime-api": "^0.1.0",
-    "@joystream/storage-utils": "^0.1.0",
-    "@joystream/types": "^0.16.1",
-    "axios": "^0.21.1",
-    "chalk": "^2.4.2",
-    "lodash": "^4.17.11",
-    "meow": "^5.0.0",
-    "ipfs-only-hash": "^1.0.2"
-  }
-}

+ 0 - 126
storage-node/packages/cli/src/cli.ts

@@ -1,126 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-import { RuntimeApi } from '@joystream/storage-runtime-api'
-import meow from 'meow'
-import _ from 'lodash'
-
-// Commands
-import * as dev from './commands/dev'
-import { HeadCommand } from './commands/head'
-import { DownloadCommand } from './commands/download'
-import { UploadCommand } from './commands/upload'
-
-// Parse CLI
-const FLAG_DEFINITIONS = {
-  // TODO: current version of meow doesn't support subcommands. We should consider a migration to yargs or oclif.
-}
-
-const usage = `
-  Usage:
-    $ storage-cli command [arguments..]
-
-  Commands:
-    upload            Upload a file to the Joystream Network. Requires a
-                      source file path to upload, data object ID, member ID and account key file with
-                      pass phrase to unlock it.
-    download          Retrieve a file. Requires a content and an output filename.
-    head              Send a HEAD request for a file, and print headers.
-                      Requires a storage node URL and a content ID.
-
-  Dev Commands:       Commands to run on a development chain.
-    dev-init          Setup chain with Alice as lead and storage provider.
-    dev-check         Check the chain is setup with Alice as lead and storage provider.
-    sudo-create-sp    Initialize the chain with a lead storage provider.
-    
-  Type 'storage-cli command' for the exact command usage examples.
-  `
-
-const cli = meow(usage, { flags: FLAG_DEFINITIONS })
-
-// Shows a message, CLI general usage and exits.
-function showUsageAndExit(message: string) {
-  console.log(message)
-  console.log(usage)
-  process.exit(1)
-}
-
-const commands = {
-  // add Alice well known account as storage provider
-  'dev-init': async (api) => {
-    return dev.init(api)
-  },
-  // Checks that the setup done by dev-init command was successful
-  'dev-check': async (api) => {
-    return dev.check(api)
-  },
-  'sudo-create-sp': async (api) => {
-    return dev.makeMemberInitialLeadAndStorageProvider(api)
-  },
-  // Uploads the file to the system. Registers new data object in the runtime, obtains proper colossus instance URL.
-  upload: async (
-    api: any,
-    filePath: string,
-    dataObjectTypeId: string,
-    memberId: string,
-    keyFile: string,
-    passPhrase: string
-  ) => {
-    const uploadCmd = new UploadCommand(api, filePath, dataObjectTypeId, memberId, keyFile, passPhrase)
-
-    await uploadCmd.run()
-  },
-  download: async (api: any, contentId: string, filePath: string) => {
-    const downloadCmd = new DownloadCommand(api, contentId, filePath)
-
-    await downloadCmd.run()
-  },
-  // Shows asset information derived from response headers.
-  // Accepts colossus URL and content ID.
-  head: async (api: any, storageNodeUrl: string, contentId: string) => {
-    const headCmd = new HeadCommand(api, storageNodeUrl, contentId)
-
-    await headCmd.run()
-  },
-}
-
-// Entry point.
-export async function main() {
-  const api = await RuntimeApi.create({ retries: 3 })
-
-  // Simple CLI commands
-  const command = cli.input[0]
-  if (!command) {
-    showUsageAndExit('Enter the command, please.')
-  }
-
-  if (Object.prototype.hasOwnProperty.call(commands, command)) {
-    // Command recognized
-    const args = _.clone(cli.input).slice(1)
-    try {
-      await commands[command](api, ...args)
-    } catch (err) {
-      console.error('Command Failed:', err)
-      process.exit(-1)
-    }
-  } else {
-    showUsageAndExit(`Command "${command}" not recognized.`)
-  }
-}

+ 0 - 93
storage-node/packages/cli/src/commands/base.ts

@@ -1,93 +0,0 @@
-import chalk from 'chalk'
-import removeEndingForwardSlash from '@joystream/storage-utils/stripEndingSlash'
-import { ContentId } from '@joystream/types/storage'
-import Debug from 'debug'
-const debug = Debug('joystream:storage-cli:base')
-
-// Commands base abstract class. Contains reusable methods.
-export abstract class BaseCommand {
-  protected readonly api: any
-
-  constructor(api: any) {
-    this.api = api
-  }
-
-  // Creates the Colossus asset URL and logs it.
-  protected createAndLogAssetUrl(url: string, contentId: string | ContentId): string {
-    let normalizedContentId: string
-
-    if (typeof contentId === 'string') {
-      normalizedContentId = contentId
-    } else {
-      normalizedContentId = contentId.encode()
-    }
-
-    const normalizedUrl = removeEndingForwardSlash(url)
-    const assetUrl = `${normalizedUrl}/asset/v0/${normalizedContentId}`
-    console.log(chalk.yellow('Generated asset URL:', assetUrl))
-
-    return assetUrl
-  }
-
-  // Abstract method to provide parameter validation.
-  protected abstract validateParameters(): boolean
-
-  // Abstract method to show command usage.
-  protected abstract showUsage()
-
-  // Checks command parameters and shows the usage if necessary.
-  protected assertParameters(): boolean {
-    // Create, validate and show parameters.
-    if (!this.validateParameters()) {
-      console.log(chalk.yellow(`Invalid parameters for the command:`))
-      this.showUsage()
-
-      return false
-    }
-
-    return true
-  }
-
-  // Shows the error message and ends the process with error code.
-  protected fail(message: string): void {
-    console.log(chalk.red(message))
-    process.exit(1)
-  }
-
-  protected maxContentSize(): number {
-    // Maximum content length for the assets (files)
-    return 2000 * 1024 * 1024
-  }
-
-  // Requests the runtime and obtains the storage node endpoint URL.
-  protected async getStorageProviderEndpoint(storageProviderId: string): Promise<string> {
-    try {
-      const endpoint = await this.api.workers.getWorkerStorageValue(storageProviderId)
-
-      debug(`Resolved endpoint: ${endpoint}`)
-
-      return endpoint
-    } catch (err) {
-      this.fail(`Could not get provider endpoint: ${err}`)
-    }
-  }
-
-  protected async getAnyProviderEndpoint(): Promise<string> {
-    try {
-      const providers = await this.api.workers.getAllProviders()
-
-      debug(`Available Providers: ${providers}`)
-      // select first provider
-      do {
-        const id = providers.ids.pop()
-        const endpoint = await this.getStorageProviderEndpoint(id)
-        if (endpoint) {
-          return endpoint
-        }
-      } while (providers.ids.length)
-      throw new Error('No Providers registered endpoint')
-    } catch (err) {
-      this.fail(`Could not get provider endpoint: ${err}`)
-    }
-  }
-}

+ 0 - 265
storage-node/packages/cli/src/commands/dev.ts

@@ -1,265 +0,0 @@
-'use strict'
-
-import dbug from 'debug'
-import { KeyringPair } from '@polkadot/keyring/types'
-import { RuntimeApi } from '@joystream/storage-runtime-api'
-import { GenericJoyStreamRoleSchema as HRTJson } from '@joystream/types/hiring/schemas/role.schema.typings'
-
-const debug = dbug('joystream:storage-cli:dev')
-
-// Derivation path appended to well known development seed used on
-// development chains
-const ALICE_URI = '//Alice'
-const ROLE_ACCOUNT_URI = '//Colossus'
-
-function aliceKeyPair(api: RuntimeApi): KeyringPair {
-  return api.identities.keyring.addFromUri(ALICE_URI, null, 'sr25519')
-}
-
-function roleKeyPair(api: RuntimeApi): KeyringPair {
-  return api.identities.keyring.addFromUri(ROLE_ACCOUNT_URI, null, 'sr25519')
-}
-
-function getKeyFromAddressOrSuri(api: RuntimeApi, addressOrSuri: string) {
-  // Get key from keyring if it is an address
-  try {
-    return api.identities.keyring.getPair(addressOrSuri)
-  } catch (err) {
-    debug('supplied argument was not an address')
-  }
-
-  // Assume a SURI, add to keyring and return keypair
-  return api.identities.keyring.addFromUri(addressOrSuri, null, 'sr25519')
-}
-
-function developmentPort(): number {
-  return 3001
-}
-
-// Checks the chain state for the storage provider setup we expect
-// to have if the initialization was successfully run prior.
-// Returns the provider id if found, throws otherwise.
-const check = async (api): Promise<any> => {
-  const roleAccountId = roleKeyPair(api).address
-  const providerId = await api.workers.findProviderIdByRoleAccount(roleAccountId)
-
-  if (providerId === null) {
-    throw new Error('Dev storage provider not found on chain.')
-  }
-
-  console.log(`
-  Chain is setup with Dev storage provider:
-    providerId = ${providerId}
-    roleAccountId = ${roleAccountId}
-    roleKey = ${ROLE_ACCOUNT_URI}
-  `)
-
-  return providerId
-}
-
-// Setup Alice account on a developement chain as
-// a member, storage lead, and a storage provider using a deterministic
-// development key for the role account
-const init = async (api: RuntimeApi): Promise<any> => {
-  debug('Ensuring we are on Development chain')
-  if (!(await api.system.isDevelopmentChain())) {
-    console.log('This command should only be run on a Development chain')
-    return
-  }
-
-  // check if the initialization was previously run, skip if so.
-  try {
-    await check(api)
-    return
-  } catch (err) {
-    // We didn't find a storage provider with expected role account
-  }
-
-  // Load alice keypair into keyring
-  const alice = aliceKeyPair(api).address
-  const roleAccount = roleKeyPair(api).address
-
-  debug(`Ensuring Alice ${alice} is sudo.`)
-
-  // make sure alice is sudo - indirectly checking this is a dev chain
-  const sudo = await api.identities.getSudoAccount()
-
-  if (!sudo.eq(alice)) {
-    throw new Error('Setup requires Alice to be sudo. Are you sure you are running a devchain?')
-  }
-
-  console.log('Running setup.')
-
-  debug('Ensuring Alice is as member.')
-  let aliceMemberId = await api.identities.firstMemberIdOf(alice)
-
-  if (aliceMemberId === undefined) {
-    debug('Registering Alice as member.')
-    aliceMemberId = await api.identities.registerMember(alice, {
-      handle: 'alice',
-    })
-  } else {
-    debug('Alice is already a member.')
-  }
-
-  debug('Transferring tokens to storage role account.')
-  // Give role account some tokens to work with
-  api.balances.transfer(alice, roleAccount, 100000)
-
-  // Make alice the storage lead
-  debug('Making Alice the storage Lead.')
-  const leadOpeningId = await api.workers.devAddStorageLeadOpening()
-  const leadApplicationId = await api.workers.devApplyOnOpening(leadOpeningId, aliceMemberId, alice, alice)
-  api.workers.devBeginLeadOpeningReview(leadOpeningId)
-  await api.workers.devFillLeadOpening(leadOpeningId, leadApplicationId)
-
-  const leadAccount = await api.workers.getLeadRoleAccount()
-  if (!leadAccount.eq(alice)) {
-    throw new Error('Setting alice as lead failed.')
-  }
-
-  // Create a storage openinging, apply, start review, and fill opening
-  debug(`Making ${ROLE_ACCOUNT_URI} account a storage provider.`)
-
-  const openingId = await api.workers.devAddStorageOpening()
-  debug(`Created new storage opening: ${openingId}`)
-
-  const applicationId = await api.workers.devApplyOnOpening(openingId, aliceMemberId, alice, roleAccount)
-  debug(`Applied with application id: ${applicationId}`)
-
-  api.workers.devBeginStorageOpeningReview(openingId)
-
-  debug(`Filling storage opening.`)
-  const providerId = await api.workers.devFillStorageOpening(openingId, applicationId)
-
-  debug(`Assigned storage provider id: ${providerId}`)
-
-  return check(api)
-}
-
-// Using sudo to create initial storage lead and worker with given keys taken from env variables.
-// Used to quickly setup a storage provider on a new chain before a council is ready.
-const makeMemberInitialLeadAndStorageProvider = async (api: RuntimeApi): Promise<any> => {
-  if (await api.workers.getLeadRoleAccount()) {
-    throw new Error('The Storage Lead is already set!')
-  }
-
-  if (!process.env.SUDO_URI) {
-    throw new Error('required SUDO_URI env variable was not set')
-  }
-
-  if (!process.env.MEMBER_ID) {
-    throw new Error('required MEMBER_ID env variable was not set')
-  }
-
-  if (!process.env.MEMBER_CONTROLLER_URI) {
-    throw new Error('required MEMBER_CONTROLLER_URI env variable was not set')
-  }
-
-  if (!process.env.STORAGE_WORKER_ADDRESS) {
-    throw new Error('required STORAGE_WORKER_ADDRESS env variable was not set')
-  }
-
-  const sudoKey = getKeyFromAddressOrSuri(api, process.env.SUDO_URI)
-  const memberId = parseInt(process.env.MEMBER_ID)
-  const memberController = getKeyFromAddressOrSuri(api, process.env.MEMBER_CONTROLLER_URI).address
-  const leadAccount = memberController
-  const workerAccount = process.env.STORAGE_WORKER_ADDRESS
-
-  const sudo = await api.identities.getSudoAccount()
-
-  // Ensure correct sudo key was provided
-  if (!sudo.eq(sudoKey.address)) {
-    throw new Error('Provided SUDO_URI is not the chain sudo')
-  }
-
-  // Ensure MEMBER_ID and MEMBER_CONTROLLER_URI are valid
-  const memberIds = await api.identities.memberIdsOfController(memberController)
-  if (memberIds.find((id) => id.eq(memberId)) === undefined) {
-    throw new Error(
-      'MEMBER_ID and MEMBER_CONTROLLER_URI do not correspond to a registered member and their controller account'
-    )
-  }
-
-  // Ensure STORAGE_WORKER_ADDRESS is a valid Address
-  api.identities.keyring.decodeAddress(workerAccount)
-
-  debug(`Creating Leader with role key: ${leadAccount}`)
-  debug('Creating Lead Opening')
-  const leadOpeningId = await api.workers.devAddStorageLeadOpening(JSON.stringify(getLeadOpeningInfo()))
-  debug('Applying')
-  const leadApplicationId = await api.workers.devApplyOnOpening(leadOpeningId, memberId, memberController, leadAccount)
-  debug('Starting Review')
-  api.workers.devBeginLeadOpeningReview(leadOpeningId)
-  debug('Filling Opening')
-  await api.workers.devFillLeadOpening(leadOpeningId, leadApplicationId)
-
-  const setLeadAccount = await api.workers.getLeadRoleAccount()
-  if (!setLeadAccount.eq(leadAccount)) {
-    throw new Error('Setting Lead failed!')
-  }
-
-  // Create a storage openinging, apply, start review, and fill opening
-  debug(`Making ${workerAccount} account a storage provider.`)
-
-  const openingId = await api.workers.devAddStorageOpening(JSON.stringify(getWorkerOpeningInfo()))
-  debug(`Created new storage opening: ${openingId}`)
-
-  const applicationId = await api.workers.devApplyOnOpening(openingId, memberId, memberController, workerAccount)
-  debug(`Applied with application id: ${applicationId}`)
-
-  api.workers.devBeginStorageOpeningReview(openingId)
-
-  debug(`Filling storage opening.`)
-  const providerId = await api.workers.devFillStorageOpening(openingId, applicationId)
-
-  debug(`Assigned storage provider id: ${providerId}`)
-}
-
-function getLeadOpeningInfo(): HRTJson {
-  return {
-    'version': 1,
-    'headline': 'Initial Storage Lead',
-    'job': {
-      'title': 'Bootstrap Lead',
-      'description': 'Starting opportunity to bootstrap the network',
-    },
-    'application': {
-      'sections': [],
-    },
-    'reward': 'None',
-    'creator': {
-      'membership': {
-        'handle': 'mokhtar',
-      },
-    },
-    'process': {
-      'details': ['automated'],
-    },
-  }
-}
-
-function getWorkerOpeningInfo(): HRTJson {
-  return {
-    'version': 1,
-    'headline': 'Initial Storage Worker',
-    'job': {
-      'title': 'Bootstrap Worker',
-      'description': 'Starting opportunity to bootstrap the network',
-    },
-    'application': {
-      'sections': [],
-    },
-    'reward': 'None',
-    'creator': {
-      'membership': {
-        'handle': 'mokhtar',
-      },
-    },
-    'process': {
-      'details': ['automated'],
-    },
-  }
-}
-
-export { init, check, aliceKeyPair, roleKeyPair, developmentPort, makeMemberInitialLeadAndStorageProvider }

+ 0 - 70
storage-node/packages/cli/src/commands/download.ts

@@ -1,70 +0,0 @@
-import axios from 'axios'
-import chalk from 'chalk'
-import fs from 'fs'
-import { BaseCommand } from './base'
-
-// Download command class. Validates input parameters and execute the logic for asset downloading.
-export class DownloadCommand extends BaseCommand {
-  private readonly contentId: string
-  private readonly outputFilePath: string
-
-  constructor(api: any, contentId: string, outputFilePath: string) {
-    super(api)
-
-    this.contentId = contentId
-    this.outputFilePath = outputFilePath
-  }
-
-  // Provides parameter validation. Overrides the abstract method from the base class.
-  protected validateParameters(): boolean {
-    return this.contentId && this.contentId !== '' && this.outputFilePath && this.outputFilePath !== ''
-  }
-
-  // Shows command usage. Overrides the abstract method from the base class.
-  protected showUsage() {
-    console.log(
-      chalk.yellow(`
-        Usage:   storage-cli download contentID filePath
-        Example: storage-cli download 5Ec3PL3wbutqvDykhNxXJFEWSdw9rS4LBsGUXH9gSusFzc5X ./movie.mp4
-      `)
-    )
-  }
-
-  // Command executor.
-  async run(): Promise<void> {
-    // Checks for input parameters, shows usage if they are invalid.
-    if (!this.assertParameters()) return
-
-    const storageNodeUrl = await this.getAnyProviderEndpoint()
-
-    const assetUrl = this.createAndLogAssetUrl(storageNodeUrl, this.contentId)
-    console.log(chalk.yellow('File path:', this.outputFilePath))
-
-    // Create file write stream and set error handler.
-    const writer = fs.createWriteStream(this.outputFilePath).on('error', (err) => {
-      this.fail(`File write failed: ${err}`)
-    })
-
-    // Request file download.
-    try {
-      const response = await axios({
-        url: assetUrl,
-        method: 'GET',
-        responseType: 'stream',
-        // max length of response
-        maxContentLength: this.maxContentSize(),
-      })
-
-      response.data.pipe(writer)
-
-      return new Promise((resolve) => {
-        writer.on('finish', () => {
-          console.log('File downloaded.')
-          resolve()
-        })
-      })
-    } catch (err) {
-      this.fail(`Colossus request failed: ${err.message}`)
-    }
-  }
-}

+ 0 - 48
storage-node/packages/cli/src/commands/head.ts

@@ -1,48 +0,0 @@
-import axios from 'axios'
-import chalk from 'chalk'
-import { BaseCommand } from './base'
-
-// Head command class. Validates input parameters and obtains the asset headers.
-export class HeadCommand extends BaseCommand {
-  private readonly storageNodeUrl: string
-  private readonly contentId: string
-
-  constructor(api: any, storageNodeUrl: string, contentId: string) {
-    super(api)
-
-    this.storageNodeUrl = storageNodeUrl
-    this.contentId = contentId
-  }
-
-  // Provides parameter validation. Overrides the abstract method from the base class.
-  protected validateParameters(): boolean {
-    return this.storageNodeUrl && this.storageNodeUrl !== '' && this.contentId && this.contentId !== ''
-  }
-
-  // Shows command usage. Overrides the abstract method from the base class.
-  protected showUsage() {
-    console.log(
-      chalk.yellow(`
-        Usage:   storage-cli head colossusURL contentID
-        Example: storage-cli head http://localhost:3001 0x7a6ba7e9157e5fba190dc146fe1baa8180e29728a5c76779ed99655500cff795
-      `)
-    )
-  }
-
-  // Command executor.
-  async run() {
-    // Checks for input parameters, shows usage if they are invalid.
-    if (!this.assertParameters()) return
-
-    const assetUrl = this.createAndLogAssetUrl(this.storageNodeUrl, this.contentId)
-
-    try {
-      const response = await axios.head(assetUrl)
-
-      console.log(chalk.green(`Content type: ${response.headers['content-type']}`))
-      console.log(chalk.green(`Content length: ${response.headers['content-length']}`))
-    } catch (err) {
-      this.fail(`Colossus request failed: ${err.message}`)
-    }
-  }
-}

+ 0 - 202
storage-node/packages/cli/src/commands/upload.ts

@@ -1,202 +0,0 @@
-import axios, { AxiosRequestConfig } from 'axios'
-import fs from 'fs'
-import ipfsHash from 'ipfs-only-hash'
-import { ContentId, DataObject } from '@joystream/types/storage'
-import BN from 'bn.js'
-import { BaseCommand } from './base'
-import Debug from 'debug'
-import chalk from 'chalk'
-import { aliceKeyPair } from './dev'
-const debug = Debug('joystream:storage-cli:upload')
-
-// Defines the necessary parameters for the AddContent runtime tx.
-interface AddContentParams {
-  accountId: string
-  ipfsCid: string
-  contentId: ContentId
-  fileSize: BN
-  dataObjectTypeId: number
-  memberId: number
-}
-
-// Upload command class. Validates input parameters and uploads the asset to the storage node and runtime.
-export class UploadCommand extends BaseCommand {
-  private readonly mediaSourceFilePath: string
-  private readonly dataObjectTypeId: string
-  private readonly keyFile: string
-  private readonly passPhrase: string
-  private readonly memberId: string
-
-  constructor(
-    api: any,
-    mediaSourceFilePath: string,
-    dataObjectTypeId: string,
-    memberId: string,
-    keyFile: string,
-    passPhrase: string
-  ) {
-    super(api)
-
-    this.mediaSourceFilePath = mediaSourceFilePath
-    this.dataObjectTypeId = dataObjectTypeId
-    this.memberId = memberId
-    this.keyFile = keyFile
-    this.passPhrase = passPhrase
-  }
-
-  // Provides parameter validation. Overrides the abstract method from the base class.
-  protected validateParameters(): boolean {
-    return (
-      this.mediaSourceFilePath &&
-      this.mediaSourceFilePath !== '' &&
-      this.dataObjectTypeId &&
-      this.dataObjectTypeId !== '' &&
-      this.memberId &&
-      this.memberId !== ''
-    )
-  }
-
-  // Reads the file from the filesystem and computes IPFS hash.
-  private async computeIpfsHash(): Promise<string> {
-    const file = fs.createReadStream(this.mediaSourceFilePath).on('error', (err) => {
-      this.fail(`File read failed: ${err}`)
-    })
-
-    return await ipfsHash.of(file)
-  }
-
-  // Read the file size from the file system.
-  private getFileSize(): number {
-    const stats = fs.statSync(this.mediaSourceFilePath)
-    return stats.size
-  }
-
-  // Creates parameters for the AddContent runtime tx.
-  private async getAddContentParams(): Promise<AddContentParams> {
-    const identity = await this.loadIdentity()
-    const accountId = identity.address
-
-    const dataObjectTypeId: number = parseInt(this.dataObjectTypeId)
-    if (isNaN(dataObjectTypeId)) {
-      this.fail(`Cannot parse dataObjectTypeId: ${this.dataObjectTypeId}`)
-    }
-
-    const memberId: number = parseInt(this.memberId)
-    if (isNaN(dataObjectTypeId)) {
-      this.fail(`Cannot parse memberIdString: ${this.memberId}`)
-    }
-
-    return {
-      accountId,
-      ipfsCid: await this.computeIpfsHash(),
-      contentId: ContentId.generate(this.api.api.registry),
-      fileSize: new BN(this.getFileSize()),
-      dataObjectTypeId,
-      memberId,
-    }
-  }
-
-  // Creates the DataObject in the runtime.
-  private async createContent(p: AddContentParams): Promise<DataObject> {
-    try {
-      const dataObject: DataObject = await this.api.assets.createDataObject(
-        p.accountId,
-        p.memberId,
-        p.contentId,
-        p.dataObjectTypeId,
-        p.fileSize,
-        p.ipfsCid
-      )
-
-      return dataObject
-    } catch (err) {
-      if (err.dispatchError) {
-        if (err.dispatchError.isModule) {
-          const error = err.dispatchError.asModule
-          const { name, documentation } = this.api.api.registry.findMetaError(error)
-          this.fail(`Cannot create data object: ${name} ${documentation}`)
-        } else {
-          const error = err.dispatchError.toString()
-          this.fail(`Cannot create data object: ${error}`)
-        }
-      } else {
-        this.fail(`Cannot create data object: ${err}`)
-      }
-    }
-  }
-
-  // Uploads file to given asset URL.
-  private async uploadFile(assetUrl: string) {
-    // Create file read stream and set error handler.
-    const file = fs.createReadStream(this.mediaSourceFilePath).on('error', (err) => {
-      this.fail(`File read failed: ${err}`)
-    })
-
-    // Upload file from the stream.
-    try {
-      const fileSize = this.getFileSize()
-      const config: AxiosRequestConfig = {
-        headers: {
-          'Content-Type': '', // https://github.com/Joystream/storage-node-joystream/issues/16
-          'Content-Length': fileSize.toString(),
-        },
-        // max length of body in PUT request
-        maxBodyLength: this.maxContentSize(),
-      }
-      await axios.put(assetUrl, file, config)
-
-      console.log('File uploaded.')
-    } catch (err) {
-      this.fail(err.toString())
-    }
-  }
-
-  // Loads and unlocks the runtime identity using the key file and pass phrase.
-  private async loadIdentity(): Promise<any> {
-    const noKeyFileProvided = !this.keyFile || this.keyFile === ''
-    const useAlice = noKeyFileProvided && (await this.api.system.isDevelopmentChain())
-
-    if (useAlice) {
-      debug("Discovered 'development' chain.")
-      return aliceKeyPair(this.api)
-    }
-
-    try {
-      await fs.promises.access(this.keyFile)
-    } catch (error) {
-      this.fail(`Cannot read file "${this.keyFile}".`)
-    }
-
-    return this.api.identities.loadUnlock(this.keyFile, this.passPhrase)
-  }
-
-  // Shows command usage. Overrides the abstract method from the base class.
-  protected showUsage() {
-    console.log(
-      chalk.yellow(`
-        Usage:       storage-cli upload mediaSourceFilePath dataObjectTypeId memberId [keyFilePath] [passPhrase]
-        Example:     storage-cli upload ./movie.mp4 1 1 ./keyFile.json secretPhrase
-        Development: storage-cli upload ./movie.mp4 1 0
-      `)
-    )
-  }
-
-  // Command executor.
-  async run() {
-    // Checks for input parameters, shows usage if they are invalid.
-    if (!this.assertParameters()) return
-
-    const addContentParams = await this.getAddContentParams()
-    debug(`AddContent Tx params: ${JSON.stringify(addContentParams)}`)
-    debug(`Decoded CID: ${addContentParams.contentId.toString()}`)
-
-    const dataObject = await this.createContent(addContentParams)
-    debug(`Received data object: ${dataObject.toString()}`)
-
-    const colossusEndpoint = await this.getAnyProviderEndpoint()
-    debug(`Discovered storage node endpoint: ${colossusEndpoint}`)
-
-    const assetUrl = this.createAndLogAssetUrl(colossusEndpoint, addContentParams.contentId)
-    await this.uploadFile(assetUrl)
-  }
-}

+ 0 - 1
storage-node/packages/cli/src/test/index.ts

@@ -1 +0,0 @@
-// Add Tests!

+ 0 - 9
storage-node/packages/cli/tsconfig.json

@@ -1,9 +0,0 @@
-{
-  "include": ["src"],
-  "extends": "../../tsconfig.json",
-  "compilerOptions": {
-    "outDir": "dist",
-    "rootDir": "src",
-    "baseUrl": "."
-  }
-}

+ 0 - 1
storage-node/packages/colossus/.eslintrc.js

@@ -1 +0,0 @@
-../../.eslintrc.js

+ 0 - 81
storage-node/packages/colossus/README.md

@@ -1,81 +0,0 @@
-![Storage Nodes for Joystream](../../banner.svg)
-
-## Development
-
-Run a development server (an ipfs node and development chain should be running on the local machine)
-
-```bash
-$ yarn colossus --dev
-```
-
-This will expect the chain to be configured with certain development accounts.
-The setup can be done by running the dev-init command for the storage-cli:
-
-```sh
-yarn storage-cli dev-init
-```
-
-## Command-Line
-
-```sh
-$ yarn colossus --help
-```
-
-```
-  Colossus - Joystream Storage Node
-
-  Usage:
-    $ colossus [command] [arguments]
-
-  Commands:
-    server        Runs a production server instance. (discovery and storage services)
-                  This is the default command if not specified.
-    discovery     Run the discovery service only.
-
-  Arguments (required for server. Ignored if running server with --dev option):
-    --provider-id ID, -i ID     StorageProviderId assigned to you in working group.
-    --key-file FILE             JSON key export file to use as the storage provider (role account).
-    --public-url=URL, -u URL    API Public URL to announce.
-
-  Arguments (optional):
-    --dev                   Runs server with developer settings.
-    --passphrase            Optional passphrase to use to decrypt the key-file.
-    --port=PORT, -p PORT    Port number to listen on, defaults to 3000.
-    --ws-provider WS_URL    Joystream-node websocket provider, defaults to ws://localhost:9944
-```
-
-To run a storage server in production you will need to enroll on the network first to
-obtain your provider-id and role account.
-
-## API Packages
-
-Since it's not entirely clear yet how APIs will develop in future, the approach
-taken here is to package individual APIs up individually. That is, instead of
-providing an overall API version in `api-base.yml`, it should be part of each
-API package's path.
-
-For example, for a `foo` API in its version `v1`, its definitions should live
-in `./paths/foo/v1.js` and `./paths/foo/v1/*.js` respectively.
-
-_Note:_ until a reasonably stable API is reached, this project uses a `v0`
-version prefix.
-
-## Interface/implementation
-
-For reusability across API versions, it's best to keep files in the `paths`
-subfolder very thin, and instead inject implementations via the `dependencies`
-configuration value of `express-openapi`.
-
-These implementations line to the `./lib` subfolder. Adjust `app.js` as
-needed to make them available to API packages.
-
-## Streaming Notes
-
-For streaming content, it is required that stream metadata is located at the
-start of the stream. Most software writes metadata at the end of the stream,
-because it is when the stream is committed to disk that the entirety of the
-metadata is known.
-
-To move metadata to the start of the stream, a CLI tool such as
-[qtfaststart](https://github.com/danielgtaylor/qtfaststart) for MP4 files might
-be used.

+ 0 - 33
storage-node/packages/colossus/api-base.yml

@@ -1,33 +0,0 @@
-openapi: '3.0.0'
-info:
-  title: 'Colossus - Joystream Storage Node'
-  version: '1.1.0'
-paths: {} # Will be populated by express-openapi
-
-components:
-  # Re-usable parameter definitions
-  parameters: {}
-
-  # Re-usable (response) object definitions
-  schemas:
-    Error:
-      required:
-        - message
-      properties:
-        code:
-          type: integer
-          format: int32
-        message:
-          type: string
-
-    ContentDirectoryEntry: # TODO implement
-      required:
-        - name
-      properties:
-        name:
-          type: string
-
-    ContentDirectoryEntries:
-      type: array
-      items:
-        $ref: '#/components/schemas/ContentDirectoryEntry'

+ 0 - 340
storage-node/packages/colossus/bin/cli.js

@@ -1,340 +0,0 @@
-#!/usr/bin/env node
-/* es-lint disable  */
-
-'use strict'
-
-// Node requires
-const path = require('path')
-
-// npm requires
-const meow = require('meow')
-const chalk = require('chalk')
-const figlet = require('figlet')
-const _ = require('lodash')
-const { sleep } = require('@joystream/storage-utils/sleep')
-
-const debug = require('debug')('joystream:colossus')
-
-// Project root
-const PROJECT_ROOT = path.resolve(__dirname, '..')
-
-// Parse CLI
-const FLAG_DEFINITIONS = {
-  port: {
-    type: 'number',
-    alias: 'p',
-    default: 3000,
-  },
-  keyFile: {
-    type: 'string',
-    isRequired: (flags, input) => {
-      // Only required if running server command and not in dev or anonymous mode
-      if (flags.anonymous || flags.dev) {
-        return false
-      }
-      return input[0] === 'server'
-    },
-  },
-  publicUrl: {
-    type: 'string',
-    alias: 'u',
-    isRequired: (flags, input) => {
-      // Only required if running server command and not in dev or anonymous mode
-      if (flags.anonymous || flags.dev) {
-        return false
-      }
-      return input[0] === 'server'
-    },
-  },
-  passphrase: {
-    type: 'string',
-  },
-  wsProvider: {
-    type: 'string',
-    default: 'ws://localhost:9944',
-  },
-  providerId: {
-    type: 'number',
-    alias: 'i',
-    isRequired: (flags, input) => {
-      // Only required if running server command and not in dev or anonymous mode
-      if (flags.anonymous || flags.dev) {
-        return false
-      }
-      return input[0] === 'server'
-    },
-  },
-  ipfsHost: {
-    type: 'string',
-    default: 'localhost',
-  },
-  anonymous: {
-    type: 'boolean',
-    default: false,
-  },
-  maxSync: {
-    type: 'number',
-    default: 200,
-  },
-}
-
-const cli = meow(
-  `
-  Usage:
-    $ colossus [command] [arguments]
-
-  Commands:
-    server        Runs a production server instance
-
-  Arguments (required for with server command, unless --dev or --anonymous args are used):
-    --provider-id ID, -i ID     StorageProviderId assigned to you in working group.
-    --key-file FILE             JSON key export file to use as the storage provider (role account).
-    --public-url=URL, -u URL    API Public URL to announce.
-
-  Arguments (optional):
-    --dev                   Runs server with developer settings.
-    --passphrase            Optional passphrase to use to decrypt the key-file.
-    --port=PORT, -p PORT    Port number to listen on, defaults to 3000.
-    --ws-provider WS_URL    Joystream-node websocket provider, defaults to ws://localhost:9944
-    --ipfs-host   hostname  ipfs host to use, default to 'localhost'. Default port 5001 is always used
-    --anonymous             Runs server in anonymous mode. Replicates content without need to register
-                            on-chain, and can serve content. Cannot be used to upload content.
-    --maxSync               The max number of items to sync concurrently. Defaults to 30.
-  `,
-  { flags: FLAG_DEFINITIONS }
-)
-
-// All-important banner!
-function banner() {
-  console.log(chalk.blue(figlet.textSync('joystream', 'Speed')))
-}
-
-function startExpressApp(app, port) {
-  const http = require('http')
-  const server = http.createServer(app)
-
-  return new Promise((resolve, reject) => {
-    server.on('error', reject)
-    server.on('close', (...args) => {
-      console.log('Server closed, shutting down...')
-      resolve(...args)
-    })
-    server.on('listening', () => {
-      console.log('API server started.', server.address())
-    })
-    server.listen(port, '::')
-    console.log('Starting API server...')
-  })
-}
-
-// Start app
-function startAllServices({ store, api, port, ipfsHttpGatewayUrl, anonymous }) {
-  const app = require('../lib/app')(PROJECT_ROOT, store, api, ipfsHttpGatewayUrl, anonymous)
-  return startExpressApp(app, port)
-}
-
-// Get an initialized storage instance
-function getStorage(runtimeApi, { ipfsHost }) {
-  // TODO at some point, we can figure out what backend-specific connection
-  // options make sense. For now, just don't use any configuration.
-  const { Storage } = require('@joystream/storage-node-backend')
-
-  const options = {
-    resolve_content_id: async (contentId) => {
-      // Resolve accepted content from cache
-      const hash = runtimeApi.assets.resolveContentIdToIpfsHash(contentId)
-      if (hash) return hash
-
-      // Resolve via API
-      const obj = await runtimeApi.assets.getDataObject(contentId)
-      if (!obj) {
-        return
-      }
-      // if obj.liaison_judgement !== Accepted .. throw ?
-      return obj.ipfs_content_id.toString()
-    },
-    ipfsHost,
-  }
-
-  return Storage.create(options)
-}
-
-async function initApiProduction({ wsProvider, providerId, keyFile, passphrase, anonymous }) {
-  // Load key information
-  const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-  const api = await RuntimeApi.create({
-    account_file: keyFile,
-    passphrase,
-    provider_url: wsProvider,
-    storageProviderId: providerId,
-  })
-
-  if (!anonymous && !api.identities.key) {
-    throw new Error('Failed to unlock storage provider account')
-  }
-
-  await api.untilChainIsSynced()
-
-  // We allow the node to startup without correct provider id and account, but syncing and
-  // publishing of identity will be skipped.
-  if (!anonymous && !(await api.providerIsActiveWorker())) {
-    debug('storage provider role account and storageProviderId are not associated with a worker')
-  }
-
-  return api
-}
-
-async function initApiDevelopment({ wsProvider }) {
-  // Load key information
-  const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-  const api = await RuntimeApi.create({
-    provider_url: wsProvider,
-  })
-
-  const dev = require('../../cli/dist/commands/dev')
-
-  api.identities.useKeyPair(dev.roleKeyPair(api))
-
-  // Wait until dev provider is added to role
-  while (true) {
-    try {
-      api.storageProviderId = await dev.check(api)
-      break
-    } catch (err) {
-      debug(err)
-    }
-
-    await sleep(10000)
-  }
-
-  return api
-}
-
-// TODO: instead of recursion use while/async-await and use promise/setTimout based sleep
-// or cleaner code with generators?
-async function announcePublicUrl(api, publicUrl) {
-  // re-announce in future
-  const reannounce = function (timeoutMs) {
-    setTimeout(announcePublicUrl, timeoutMs, api, publicUrl)
-  }
-
-  const chainIsSyncing = await api.chainIsSyncing()
-  if (chainIsSyncing) {
-    debug('Chain is syncing. Postponing announcing public url.')
-    return reannounce(10 * 60 * 1000)
-  }
-
-  // postpone if provider not active
-  if (!(await api.providerIsActiveWorker())) {
-    debug('storage provider role account and storageProviderId are not associated with a worker')
-    return reannounce(10 * 60 * 1000)
-  }
-
-  const sufficientBalance = await api.providerHasMinimumBalance(1)
-  if (!sufficientBalance) {
-    debug('Provider role account does not have sufficient balance. Postponing announcing public url.')
-    return reannounce(10 * 60 * 1000)
-  }
-
-  debug('announcing public url')
-
-  try {
-    await api.workers.setWorkerStorageValue(publicUrl)
-
-    debug('announcing complete.')
-  } catch (err) {
-    debug(`announcing public url failed: ${err.stack}`)
-
-    // On failure retry sooner
-    debug(`announcing failed, retrying in: 2 minutes`)
-    reannounce(120 * 1000)
-  }
-}
-
-// Simple CLI commands
-let command = cli.input[0]
-if (!command) {
-  command = 'server'
-}
-
-const commands = {
-  server: async () => {
-    banner()
-    let publicUrl, port, api
-
-    if (cli.flags.dev) {
-      const dev = require('../../cli/dist/commands/dev')
-      api = await initApiDevelopment(cli.flags)
-      port = dev.developmentPort()
-      publicUrl = `http://localhost:${port}/`
-    } else {
-      api = await initApiProduction(cli.flags)
-      publicUrl = cli.flags.publicUrl
-      port = cli.flags.port
-    }
-
-    // Get initlal data objects into cache
-    while (true) {
-      try {
-        debug('Fetching data objects')
-        await api.assets.fetchDataObjects()
-        break
-      } catch (err) {
-        debug('Failed fetching data objects', err)
-        await sleep(5000)
-      }
-    }
-
-    // Regularly update data objects
-    setInterval(async () => {
-      try {
-        debug('Fetching data objects')
-        await api.assets.fetchDataObjects()
-      } catch (err) {
-        debug('Failed updating data objects from chain', err)
-      }
-    }, 60000)
-
-    // TODO: check valid url, and valid port number
-    const store = getStorage(api, cli.flags)
-
-    const ipfsHost = cli.flags.ipfsHost
-    const ipfsHttpGatewayUrl = `http://${ipfsHost}:8080/`
-
-    const { startSyncing } = require('../lib/sync')
-    startSyncing(api, { anonymous: cli.flags.anonymous, maxSync: cli.flags.maxSync }, store)
-
-    if (!cli.flags.anonymous) {
-      announcePublicUrl(api, publicUrl)
-    }
-
-    return startAllServices({ store, api, port, ipfsHttpGatewayUrl, anonymous: cli.flags.anonymous })
-  },
-}
-
-async function main() {
-  // Simple CLI commands
-  let command = cli.input[0]
-  if (!command) {
-    command = 'server'
-  }
-
-  if (Object.prototype.hasOwnProperty.call(commands, command)) {
-    // Command recognized
-    const args = _.clone(cli.input).slice(1)
-    await commands[command](...args)
-  } else {
-    throw new Error(`Command '${command}' not recognized, aborting!`)
-  }
-}
-
-main()
-  .then(() => {
-    process.exit(0)
-  })
-  .catch((err) => {
-    console.error(chalk.red(err.stack))
-    process.exit(-1)
-  })

+ 0 - 78
storage-node/packages/colossus/lib/app.js

@@ -1,78 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-// Node requires
-const fs = require('fs')
-const path = require('path')
-
-// npm requires
-const express = require('express')
-const openapi = require('express-openapi')
-const bodyParser = require('body-parser')
-const cors = require('cors')
-const yaml = require('js-yaml')
-
-// Project requires
-const validateResponses = require('./middleware/validate_responses')
-const fileUploads = require('./middleware/file_uploads')
-const pagination = require('@joystream/storage-utils/pagination')
-
-// Configure app
-function createApp(projectRoot, storage, runtime, ipfsHttpGatewayUrl, anonymous) {
-  const app = express()
-  app.use(cors())
-  app.use(bodyParser.json())
-  // FIXME app.use(bodyParser.urlencoded({ extended: true }));
-
-  // Load & extend/configure API docs
-  let api = yaml.safeLoad(fs.readFileSync(path.resolve(projectRoot, 'api-base.yml')))
-  api['x-express-openapi-additional-middleware'] = [validateResponses]
-  api['x-express-openapi-validation-strict'] = true
-
-  api = pagination.openapi(api)
-
-  openapi.initialize({
-    apiDoc: api,
-    app,
-    paths: path.resolve(projectRoot, 'paths'),
-    docsPath: '/swagger.json',
-    consumesMiddleware: {
-      'multipart/form-data': fileUploads,
-    },
-    dependencies: {
-      storage,
-      runtime,
-      ipfsHttpGatewayUrl,
-      anonymous,
-    },
-  })
-
-  // If no other handler gets triggered (errors), respond with the
-  // error serialized to JSON.
-  // Disable lint because we need such function signature.
-  // eslint-disable-next-line no-unused-vars
-  app.use(function (err, req, res, next) {
-    res.status(err.status).json(err)
-  })
-
-  return app
-}
-
-module.exports = createApp

+ 0 - 43
storage-node/packages/colossus/lib/middleware/file_uploads.js

@@ -1,43 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const multer = require('multer')
-
-// Taken from express-openapi examples
-module.exports = function (req, res, next) {
-  multer().any()(req, res, function (err) {
-    if (err) {
-      return next(err)
-    }
-    // Handle both single and multiple files
-    const filesMap = req.files.reduce(
-      (acc, f) =>
-        Object.assign(acc, {
-          [f.fieldname]: (acc[f.fieldname] || []).concat(f),
-        }),
-      {}
-    )
-    Object.keys(filesMap).forEach((fieldname) => {
-      const files = filesMap[fieldname]
-      req.body[fieldname] = files.length > 1 ? files.map(() => '') : ''
-    })
-    return next()
-  })
-}

+ 0 - 77
storage-node/packages/colossus/lib/middleware/ipfs_proxy.js

@@ -1,77 +0,0 @@
-const { createProxyMiddleware } = require('http-proxy-middleware')
-const debug = require('debug')('joystream:ipfs-proxy')
-const mime = require('mime-types')
-
-/* 
-For this proxying to work correctly, ensure IPFS HTTP Gateway is configured as a path gateway:
-This can be done manually with the following command:
-
-  $ ipfs config --json Gateway.PublicGateways '{"localhost": null }' 
-  
-The implicit default config is below which is not what we want!
-
-  $ ipfs config --json Gateway.PublicGateways '{
-    "localhost": {
-        "Paths": ["/ipfs", "/ipns"],
-        "UseSubdomains": true
-      }
-    }'
-
-https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gateway
-*/
-
-const pathFilter = function (path, req) {
-  // we get the full path here so it needs to match the path where
-  // it is used by the openapi initializer
-  return path.match('^/asset/v0') && (req.method === 'GET' || req.method === 'HEAD')
-}
-
-const createPathRewriter = () => {
-  return async (_path, req) => {
-    const hash = req.params.ipfs_content_id
-    return `/ipfs/${hash}`
-  }
-}
-
-const createProxy = (ipfsHttpGatewayUrl) => {
-  const pathRewrite = createPathRewriter()
-
-  return createProxyMiddleware(pathFilter, {
-    // Default path to local IPFS HTTP GATEWAY
-    target: ipfsHttpGatewayUrl || 'http://localhost:8080/',
-    pathRewrite,
-    onProxyRes: function (proxRes, req, res) {
-      /*
-        Make sure the reverse proxy used infront of colosss (nginx/caddy) Does not duplicate
-        these headers to prevent some browsers getting confused especially
-        with duplicate access-control-allow-origin headers!
-        'accept-ranges': 'bytes',
-        'access-control-allow-headers': 'Content-Type, Range, User-Agent, X-Requested-With',
-        'access-control-allow-methods': 'GET',
-        'access-control-allow-origin': '*',
-        'access-control-expose-headers': 'Content-Range, X-Chunked-Output, X-Stream-Output',
-      */
-
-      if (proxRes.statusCode === 301) {
-        // capture redirect when IPFS HTTP Gateway is configured with 'UseDomains':true
-        // and treat it as an error.
-        console.error('IPFS HTTP Gateway is configured for "UseSubdomains". Killing stream')
-        res.status(500).end()
-        proxRes.destroy()
-      } else {
-        // Handle downloading as attachment /asset/v0/:id?download
-        if (req.query.download) {
-          const contentId = req.params.id
-          const contentType = proxRes.headers['content-type']
-          const ext = mime.extension(contentType) || 'bin'
-          const fileName = `${contentId}.${ext}`
-          proxRes.headers['Content-Disposition'] = `attachment; filename=${fileName}`
-        }
-      }
-    },
-  })
-}
-
-module.exports = {
-  createProxy,
-}

+ 0 - 61
storage-node/packages/colossus/lib/middleware/validate_responses.js

@@ -1,61 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:middleware:validate')
-
-// Function taken directly from https://github.com/kogosoftwarellc/open-api/tree/master/packages/express-openapi
-module.exports = function (req, res, next) {
-  const strictValidation = !!req.apiDoc['x-express-openapi-validation-strict']
-  if (typeof res.validateResponse === 'function') {
-    const send = res.send
-    res.send = function expressOpenAPISend(...args) {
-      const onlyWarn = !strictValidation
-      if (res.get('x-express-openapi-validation-error-for') !== undefined) {
-        return send.apply(res, args)
-      }
-      if (res.get('x-express-openapi-validation-for') !== undefined) {
-        return send.apply(res, args)
-      }
-
-      const body = args[0]
-      let validation = res.validateResponse(res.statusCode, body)
-      let validationMessage
-      if (validation === undefined) {
-        validation = { message: undefined, errors: undefined }
-      }
-      if (validation.errors) {
-        const errorList = Array.from(validation.errors)
-          .map((_) => _.message)
-          .join(',')
-        validationMessage = `Invalid response for status code ${res.statusCode}: ${errorList}`
-        debug(validationMessage)
-        // Set to avoid a loop, and to provide the original status code
-        res.set('x-express-openapi-validation-error-for', res.statusCode.toString())
-      }
-      if ((onlyWarn || !validation.errors) && res.statusCode) {
-        res.set('x-express-openapi-validation-for', res.statusCode.toString())
-        return send.apply(res, args)
-      }
-      res.status(500)
-      return res.json({ error: validationMessage })
-    }
-  }
-  next()
-}

+ 0 - 120
storage-node/packages/colossus/lib/sync.js

@@ -1,120 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:sync')
-const _ = require('lodash')
-const { ContentId } = require('@joystream/types/storage')
-const { nextTick } = require('@joystream/storage-utils/sleep')
-
-// Time to wait between sync runs. The lower the better chance to consume all
-// available sync sessions allowed.
-const INTERVAL_BETWEEN_SYNC_RUNS_MS = 3000
-
-async function syncRun({ api, storage, contentBeingSynced, contentCompletedSync, flags }) {
-  // The number of concurrent items to attemp to fetch.
-  const MAX_CONCURRENT_SYNC_ITEMS = Math.max(1, flags.maxSync)
-
-  const contentIds = api.assets.getAcceptedIpfsHashes()
-
-  // Select ids which may need to be synced
-  const idsNotSynced = contentIds
-    .filter((id) => !contentCompletedSync.has(id))
-    .filter((id) => !contentBeingSynced.has(id))
-
-  // We are limiting how many content ids can be synced concurrently, so to ensure
-  // better distribution of content across storage nodes during a potentially long
-  // sync process we don't want all nodes to replicate items in the same order, so
-  // we simply shuffle.
-  const idsToSync = _.shuffle(idsNotSynced)
-
-  while (contentBeingSynced.size < MAX_CONCURRENT_SYNC_ITEMS && idsToSync.length) {
-    const id = idsToSync.shift()
-
-    try {
-      contentBeingSynced.set(id)
-      await storage.pin(id, (err, status) => {
-        if (err) {
-          contentBeingSynced.delete(id)
-          debug(`Error Syncing ${err}`)
-        } else if (status.synced) {
-          contentBeingSynced.delete(id)
-          contentCompletedSync.set(id)
-        }
-      })
-    } catch (err) {
-      // Most likely failed to resolve the content id
-      debug(`Failed calling synchronize ${err}`)
-      contentBeingSynced.delete(id)
-    }
-
-    // Allow callbacks to call to storage.synchronize() to be invoked during this sync run
-    // This will happen if content is found to be local and will speed overall sync process.
-    await nextTick()
-  }
-}
-
-async function syncRunner({ api, flags, storage, contentBeingSynced, contentCompletedSync }) {
-  const retry = () => {
-    setTimeout(syncRunner, INTERVAL_BETWEEN_SYNC_RUNS_MS, {
-      api,
-      flags,
-      storage,
-      contentBeingSynced,
-      contentCompletedSync,
-    })
-  }
-
-  try {
-    if (await api.chainIsSyncing()) {
-      debug('Chain is syncing. Postponing sync.')
-    } else {
-      await syncRun({
-        api,
-        storage,
-        contentBeingSynced,
-        contentCompletedSync,
-        flags,
-      })
-    }
-  } catch (err) {
-    debug(`Error during sync ${err.stack}`)
-  }
-
-  // schedule next sync run
-  retry()
-}
-
-function startSyncing(api, flags, storage) {
-  // ids of content currently being synced
-  const contentBeingSynced = new Map()
-  // ids of content that completed sync
-  const contentCompletedSync = new Map()
-
-  syncRunner({ api, flags, storage, contentBeingSynced, contentCompletedSync })
-
-  setInterval(() => {
-    debug(`objects syncing: ${contentBeingSynced.size}`)
-    debug(`objects local: ${contentCompletedSync.size}`)
-  }, 60000)
-}
-
-module.exports = {
-  startSyncing,
-}

+ 0 - 73
storage-node/packages/colossus/package.json

@@ -1,73 +0,0 @@
-{
-  "name": "@joystream/colossus",
-  "private": true,
-  "version": "0.4.0",
-  "description": "Colossus - Joystream Storage Node",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'test/**/*.js'",
-    "lint": "eslint 'paths/**/*.js' 'lib/**/*.js'",
-    "dev": "nodemon --watch api-base.yml --watch bin/ --watch paths/ --watch lib/ --verbose --ext js --exec node bin/cli.js --"
-  },
-  "bin": {
-    "colossus": "bin/cli.js"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "express": "^4.16.4",
-    "mocha": "^5.2.0",
-    "node-mocks-http": "^1.7.3",
-    "nodemon": "^1.18.10",
-    "supertest": "^3.4.2",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "@joystream/storage-node-backend": "^0.1.0",
-    "@joystream/storage-runtime-api": "^0.1.0",
-    "@joystream/storage-utils": "^0.1.0",
-    "body-parser": "^1.19.0",
-    "chalk": "^2.4.2",
-    "cors": "^2.8.5",
-    "express-openapi": "^4.6.1",
-    "figlet": "^1.2.1",
-    "http-proxy-middleware": "^1.0.5",
-    "ipfs-only-hash": "^1.0.2",
-    "js-yaml": "^3.13.1",
-    "lodash": "^4.17.11",
-    "meow": "^7.0.1",
-    "mime-types": "^2.1.27",
-    "multer": "^1.4.1",
-    "si-prefix": "^0.2.0"
-  }
-}

+ 0 - 385
storage-node/packages/colossus/paths/asset/v0/{id}.js

@@ -1,385 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:colossus:api:asset')
-const filter = require('@joystream/storage-node-backend/filter')
-const ipfsProxy = require('../../../lib/middleware/ipfs_proxy')
-const assert = require('assert')
-
-function errorHandler(response, err, code) {
-  debug(err)
-  // Some err types don't have a valid http status code such as one that come from ipfs node for example
-  const statusCode = typeof err.code === 'number' ? err.code : code
-  response.status(statusCode || 500).send({ message: err.toString() })
-  response.end()
-}
-
-// The maximum total estimated balance that will be spent submitting transactions
-// by the node following processing one upload. Here we assume 3 transactions with
-// base transaction fee = 1. In future this estimate will need to be more accurate
-// and derived from weight to fee calculation.
-const PROCESS_UPLOAD_TX_COSTS = 3
-
-module.exports = function (storage, runtime, ipfsHttpGatewayUrl, anonymous) {
-  debug('created path handler')
-
-  // Creat the IPFS HTTP Gateway proxy middleware
-  const proxy = ipfsProxy.createProxy(ipfsHttpGatewayUrl)
-
-  // Cache of known content mappings and local availability info
-  const ipfsContentIdMap = new Map()
-
-  // Make sure id is valid and was 'Accepted', only then proxy if content is local
-  const proxyAcceptedContentToIpfsGateway = async (req, res, next) => {
-    const content_id = req.params.id
-
-    if (!ipfsContentIdMap.has(content_id)) {
-      const hash = runtime.assets.resolveContentIdToIpfsHash(req.params.id)
-
-      if (!hash) {
-        return res.status(404).send({ message: 'Unknown content' })
-      }
-
-      ipfsContentIdMap.set(content_id, {
-        local: false,
-        ipfs_content_id: hash,
-      })
-    }
-
-    const { ipfs_content_id, local } = ipfsContentIdMap.get(content_id)
-
-    // Pass on the ipfs hash to the middleware
-    req.params.ipfs_content_id = ipfs_content_id
-
-    // Serve it if we know we have it, or it was recently synced successfully
-    if (local || storage.syncStatus(ipfs_content_id).synced) {
-      return proxy(req, res, next)
-    }
-
-    // Not yet processed by sync run, check if we have it locally
-    try {
-      const stat = await storage.ipfsStat(ipfs_content_id, 4000)
-
-      if (stat.local) {
-        ipfsContentIdMap.set(content_id, {
-          local: true,
-          ipfs_content_id,
-        })
-
-        // We know we have the full content locally, serve it
-        return proxy(req, res, next)
-      }
-    } catch (_err) {
-      // timeout trying to stat which most likely means we do not have it
-      // debug('Failed to stat', ipfs_content_id)
-    }
-
-    // Valid content but no certainty that the node has it locally yet.
-    // We a void serving it to prevent poor performance (ipfs node will have to retrieve it on demand
-    // which might be slow and wasteful if content is not cached locally)
-    res.status(404).send({ message: 'Content not available locally' })
-  }
-
-  const doc = {
-    // parameters for all operations in this path
-    parameters: [
-      {
-        name: 'id',
-        in: 'path',
-        required: true,
-        description: 'Joystream Content ID',
-        schema: {
-          type: 'string',
-        },
-      },
-    ],
-
-    // Put for uploads
-    async put(req, res) {
-      if (anonymous) {
-        errorHandler(res, 'Uploads Not Permitted in Anonymous Mode', 400)
-        return
-      }
-
-      const id = req.params.id // content id
-
-      // Check if content exists
-      const roleAddress = runtime.identities.key.address
-      const providerId = runtime.storageProviderId
-      let dataObject
-
-      try {
-        dataObject = await runtime.assets.getDataObject(id)
-      } catch (err) {
-        errorHandler(res, err, 403)
-        return
-      }
-
-      if (!dataObject) {
-        res.status(404).send({ message: 'Content Not Found' })
-        return
-      }
-
-      // Early filtering on content_length..do not wait for fileInfo
-      // ensure its less than max allowed by node policy.
-      const filterResult = filter({}, req.headers)
-
-      if (filterResult.code !== 200) {
-        errorHandler(res, new Error(filterResult.message), filterResult.code)
-        return
-      }
-
-      // Ensure content_length from request equals size in data object.
-      if (!dataObject.size_in_bytes.eq(filterResult.content_length)) {
-        errorHandler(res, new Error('Content Length does not match expected size of content'), 403)
-        return
-      }
-
-      // Ensure we have minimum blance to successfully update state on chain after processing
-      // upload. Due to the node handling concurrent uploads this check will not always guarantee
-      // at the point when transactions are sent that the balance will still be sufficient.
-      const sufficientBalance = await runtime.providerHasMinimumBalance(PROCESS_UPLOAD_TX_COSTS)
-
-      if (!sufficientBalance) {
-        errorHandler(res, 'Server has insufficient balance to process upload.', 503)
-        return
-      }
-
-      // We'll open a write stream to the backend, but reserve the right to
-      // abort upload if the filters don't smell right.
-      let stream
-      try {
-        stream = await storage.open(id, 'w')
-
-        // Wether we are aborting early because of early file detection not passing filter
-        let aborted = false
-
-        // Early file info detection so we can abort early on.. but we do not reject
-        // content because we don't yet have ipfs computed
-        stream.on('fileInfo', async (info) => {
-          try {
-            debug('Early file detection info:', info)
-
-            const filterResult = filter({}, req.headers, info.mimeType)
-            if (filterResult.code !== 200) {
-              aborted = true
-              debug('Ending stream', filterResult.message)
-              stream.end()
-              stream.cleanup()
-              res.status(filterResult.code).send({ message: filterResult.message })
-            }
-          } catch (err) {
-            errorHandler(res, err)
-          }
-        })
-
-        stream.on('end', async () => {
-          if (!aborted) {
-            try {
-              // try to get file info and compute ipfs hash before committing the stream to ifps node.
-              await stream.info()
-            } catch (err) {
-              errorHandler(res, err)
-            }
-          }
-        })
-
-        // At end of stream we should have file info and computed ipfs hash - this event is emitted
-        // only by explicitly calling stream.info() in the stream.on('finish') event handler
-        stream.once('info', async (info, hash) => {
-          if (hash === dataObject.ipfs_content_id.toString()) {
-            const filterResult = filter({}, req.headers, info.mimeType)
-            if (filterResult.code !== 200) {
-              debug('Rejecting content')
-              stream.cleanup()
-              res.status(400).send({ message: 'Rejecting content type' })
-            } else {
-              try {
-                await stream.commit()
-              } catch (err) {
-                errorHandler(res, err)
-              }
-            }
-          } else {
-            stream.cleanup()
-            res.status(400).send({ message: 'Aborting - Not expected IPFS hash for content' })
-          }
-        })
-
-        stream.on('committed', async (hash) => {
-          // they cannot be different unless we did something stupid!
-          assert(hash === dataObject.ipfs_content_id.toString())
-
-          ipfsContentIdMap.set(id, {
-            ipfs_content_id: hash,
-            local: true,
-          })
-
-          // Send ok response early, no need for client to wait for relationships to be created.
-          debug('Sending OK response.')
-          res.status(200).send({ message: 'Asset uploaded.' })
-
-          try {
-            debug('accepting Content')
-            // Only if judegment is Pending
-            if (dataObject.liaison_judgement.type === 'Pending') {
-              await runtime.assets.acceptContent(roleAddress, providerId, id)
-            }
-          } catch (err) {
-            debug(`${err.message}`)
-          }
-        })
-
-        stream.on('error', (err) => {
-          stream.end()
-          stream.cleanup()
-          errorHandler(res, err)
-        })
-        req.pipe(stream)
-      } catch (err) {
-        errorHandler(res, err)
-      }
-    },
-
-    async get(req, res, next) {
-      proxyAcceptedContentToIpfsGateway(req, res, next)
-    },
-
-    async head(req, res, next) {
-      proxyAcceptedContentToIpfsGateway(req, res, next)
-    },
-  }
-
-  // doc.get = proxy
-  // doc.head = proxy
-  // Note: Adding the middleware this way is causing problems!
-  // We are loosing some information from the request, specifically req.query.download parameters for some reason.
-  // Does it have to do with how/when the apiDoc is being processed? binding issue?
-
-  // OpenAPI specs
-  doc.get.apiDoc = {
-    description: 'Download an asset.',
-    operationId: 'assetData',
-    tags: ['asset', 'data'],
-    parameters: [
-      {
-        name: 'download',
-        in: 'query',
-        description: 'Download instead of streaming inline.',
-        required: false,
-        allowEmptyValue: true,
-        schema: {
-          type: 'boolean',
-          default: false,
-        },
-      },
-    ],
-    responses: {
-      200: {
-        description: 'Asset download.',
-        content: {
-          default: {
-            schema: {
-              type: 'string',
-              format: 'binary',
-            },
-          },
-        },
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              $ref: '#/components/schemas/Error',
-            },
-          },
-        },
-      },
-    },
-  }
-
-  doc.put.apiDoc = {
-    description: 'Asset upload.',
-    operationId: 'assetUpload',
-    tags: ['asset', 'data'],
-    requestBody: {
-      content: {
-        '*/*': {
-          schema: {
-            type: 'string',
-            format: 'binary',
-          },
-        },
-      },
-    },
-    responses: {
-      200: {
-        description: 'Asset upload.',
-        content: {
-          'application/json': {
-            schema: {
-              type: 'object',
-              required: ['message'],
-              properties: {
-                message: {
-                  type: 'string',
-                },
-              },
-            },
-          },
-        },
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              $ref: '#/components/schemas/Error',
-            },
-          },
-        },
-      },
-    },
-  }
-
-  doc.head.apiDoc = {
-    description: 'Asset download information.',
-    operationId: 'assetInfo',
-    tags: ['asset', 'metadata'],
-    responses: {
-      200: {
-        description: 'Asset info.',
-      },
-      default: {
-        description: 'Unexpected error',
-        content: {
-          'application/json': {
-            schema: {
-              $ref: '#/components/schemas/Error',
-            },
-          },
-        },
-      },
-    },
-  }
-
-  return doc
-}

+ 0 - 1
storage-node/packages/colossus/test/index.js

@@ -1 +0,0 @@
-// Add Tests!

+ 0 - 3
storage-node/packages/helios/.gitignore

@@ -1,3 +0,0 @@
-node_modules/
-lib/
-

+ 0 - 9
storage-node/packages/helios/README.md

@@ -1,9 +0,0 @@
-# Joystream Helios
-
-A basic tool to scan the joystream storage network to get a birds eye view of the health of the storage providers and content replication status.
-
-## Scanning
-
-```
-yarn helios
-```

+ 0 - 128
storage-node/packages/helios/bin/cli.js

@@ -1,128 +0,0 @@
-#!/usr/bin/env node
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-const { encodeAddress } = require('@polkadot/keyring')
-const axios = require('axios')
-const stripEndingSlash = require('@joystream/storage-utils/stripEndingSlash')
-
-function makeAssetUrl(contentId, source) {
-  source = stripEndingSlash(source)
-  return `${source}/asset/v0/${encodeAddress(contentId)}`
-}
-
-// HTTP HEAD with axios all known content ids on given endpoint
-async function countContentAvailability(providerId, endpoint, contentIds) {
-  let found = 0
-  let errored = 0
-  let requestsSent = 0
-  // Avoid opening too many connections, do it in chunks.. otherwise we get
-  // "Client network socket disconnected before secure TLS connection was established" errors
-  while (contentIds.length) {
-    const chunk = contentIds.splice(0, 100)
-    requestsSent += chunk.length
-    const results = await Promise.allSettled(chunk.map((id) => axios.head(makeAssetUrl(id, endpoint))))
-
-    results.forEach((result, _ix) => {
-      if (result.status === 'rejected') {
-        errored++
-      } else {
-        found++
-      }
-    })
-
-    // show some progress
-    console.error(`provider: ${providerId}:`, `total checks: ${requestsSent}`, `ok: ${found}`, `errors: ${errored}`)
-  }
-
-  return { found, errored }
-}
-
-async function testProviderHasAssets(providerId, endpoint, contentIds) {
-  const total = contentIds.length
-  const startedAt = Date.now()
-  const { found, errored } = await countContentAvailability(providerId, endpoint, contentIds)
-  const completedAt = Date.now()
-  console.log(`
-    ---------------------------------------
-    Final Result for provider ${providerId}
-    url: ${endpoint}
-    fetched: ${found}/${total}
-    failed: ${errored}
-    check took: ${(completedAt - startedAt) / 1000}s
-    ------------------------------------------
-  `)
-}
-
-async function main() {
-  const runtime = await RuntimeApi.create()
-  const { api } = runtime
-
-  // get all providers
-  const { ids: storageProviders } = await runtime.workers.getAllProviders()
-  console.log(`Found ${storageProviders.length} staked providers`)
-
-  // Resolve Endpoints of providers
-  console.log('\nResolving live provider API Endpoints...')
-  const endpoints = await Promise.all(
-    storageProviders.map(async (providerId) => {
-      try {
-        const endpoint = (await runtime.workers.getWorkerStorageValue(providerId)).toString()
-        return { providerId, endpoint }
-      } catch (err) {
-        console.log('resolve failed for id', providerId, err.message)
-        return { providerId, endpoint: null }
-      }
-    })
-  )
-
-  console.log('\nChecking API Endpoints are online')
-  await Promise.all(
-    endpoints.map(async (provider) => {
-      if (!provider.endpoint) {
-        console.log(provider.providerId, 'No url set, skipping')
-        return
-      }
-      const swaggerUrl = `${stripEndingSlash(provider.endpoint)}/swagger.json`
-      try {
-        const { data } = await axios.get(swaggerUrl)
-        console.log(
-          `${provider.providerId}:`,
-          `${provider.endpoint}`,
-          '- OK -',
-          `storage node version ${data.info.version}`
-        )
-      } catch (err) {
-        console.log(`${provider.providerId}`, `${provider.endpoint} - ${err.message}`)
-      }
-    })
-  )
-
-  // Load data objects
-  await runtime.assets.fetchDataObjects()
-
-  const allContentIds = await runtime.assets.getKnownContentIds()
-  const acceptedContentIds = runtime.assets.getAcceptedContentIds()
-  const ipfsHashes = runtime.assets.getAcceptedIpfsHashes()
-
-  console.log('\nData Directory objects:')
-  console.log(allContentIds.length, 'created')
-  console.log(acceptedContentIds.length, 'accepted')
-  console.log(ipfsHashes.length, 'unique accepted hashes')
-
-  // We no longer need a connection to the chain
-  api.disconnect()
-
-  console.log(`
-    Checking available assets on providers (this can take some time)
-    This is done by sending HEAD requests for all 'Accepted' assets.
-  `)
-
-  endpoints.forEach(async ({ providerId, endpoint }) => {
-    if (!endpoint) {
-      return
-    }
-    return testProviderHasAssets(providerId, endpoint, acceptedContentIds.slice())
-  })
-}
-
-main()

+ 0 - 21
storage-node/packages/helios/package.json

@@ -1,21 +0,0 @@
-{
-  "name": "@joystream/helios",
-  "private": true,
-  "version": "0.1.0",
-  "bin": {
-    "helios": "bin/cli.js"
-  },
-  "scripts": {
-    "test": "echo \"Error: no test specified\" && exit 0"
-  },
-  "license": "GPL-3.0-only",
-  "dependencies": {
-    "@joystream/storage-runtime-api": "^0.1.0",
-    "@types/bn.js": "^4.11.5",
-    "axios": "^0.19.0",
-    "bn.js": "^4.11.8"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  }
-}

+ 0 - 1
storage-node/packages/helios/test/index.js

@@ -1 +0,0 @@
-// Add Tests!

+ 0 - 1
storage-node/packages/runtime-api/.eslintrc.js

@@ -1 +0,0 @@
-../../.eslintrc.js

+ 0 - 3
storage-node/packages/runtime-api/.gitignore

@@ -1,3 +0,0 @@
-# Generated JS files
-types/*.js
-!types/index.js

+ 0 - 6
storage-node/packages/runtime-api/README.md

@@ -1,6 +0,0 @@
-# Summary
-
-This package contains convenience functions for the runtime API.
-
-The main entry point creates and initializes a `@polkadot/api` instance, and
-provides more workflow oriented functions than the underlying API exposes.

+ 0 - 210
storage-node/packages/runtime-api/assets.js

@@ -1,210 +0,0 @@
-'use strict'
-
-const debug = require('debug')('joystream:runtime:assets')
-const { decodeAddress } = require('@polkadot/keyring')
-const { StorageObjectOwner, DataObject } = require('@joystream/types/storage')
-
-function parseContentId(contentId) {
-  try {
-    return decodeAddress(contentId)
-  } catch (err) {
-    return contentId
-  }
-}
-
-/*
- * Add asset related functionality to the substrate API.
- */
-class AssetsApi {
-  static async create(base) {
-    const ret = new AssetsApi()
-    ret.base = base
-    await AssetsApi.init()
-    return ret
-  }
-
-  static async init() {
-    debug('Init')
-  }
-
-  /*
-   * Create and return a data object.
-   */
-  async createDataObject(accountId, memberId, contentId, doTypeId, size, ipfsCid) {
-    contentId = parseContentId(contentId)
-    const owner = {
-      Member: memberId,
-    }
-    const content = [
-      {
-        content_id: contentId,
-        type_id: doTypeId,
-        size,
-        ipfs_content_id: ipfsCid,
-      },
-    ]
-    const tx = this.base.api.tx.dataDirectory.addContent(owner, content)
-    await this.base.signAndSend(accountId, tx)
-
-    // If the data object constructed properly, we should now be able to return
-    // the data object from the state.
-    return this.getDataObject(contentId)
-  }
-
-  /*
-   * Returns the Data Object for a contendId.
-   * Returns null if it doesn't exist.
-   */
-  async getDataObject(contentId) {
-    contentId = parseContentId(contentId)
-    // check if contentId key exists in map
-    const storageSize = await this.base.api.query.dataDirectory.dataByContentId.size(contentId)
-    if (storageSize.eq(0)) {
-      return null
-    }
-    return this.base.api.query.dataDirectory.dataByContentId(contentId)
-  }
-
-  /*
-   * Verify the liaison state for a DataObject:
-   * - Check the content ID has a DataObject
-   * - Check the storageProviderId is the liaison
-   * - Check the liaison state is Pending
-   *
-   * Each failure errors out, success returns the data object.
-   */
-  async checkLiaisonForDataObject(storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-
-    const obj = await this.getDataObject(contentId)
-
-    if (!obj) {
-      throw new Error(`No DataObject found for content ID: ${contentId}`)
-    }
-
-    if (!obj.liaison.eq(storageProviderId)) {
-      throw new Error(`This storage node is not liaison for the content ID: ${contentId}`)
-    }
-
-    if (obj.liaison_judgement.type !== 'Pending') {
-      throw new Error(`Content upload has already been processed.`)
-    }
-
-    return obj
-  }
-
-  /*
-   * Sets the data object liaison judgement to Accepted
-   */
-  async acceptContent(providerAccoundId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataDirectory.acceptContent(storageProviderId, contentId)
-    return this.base.signAndSend(providerAccoundId, tx)
-  }
-
-  /*
-   * Gets storage relationship for contentId for the given provider
-   */
-  async getStorageRelationshipAndId(storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const rids = await this.base.api.query.dataObjectStorageRegistry.relationshipsByContentId(contentId)
-
-    while (rids.length) {
-      const relationshipId = rids.shift()
-      let relationship = await this.base.api.query.dataObjectStorageRegistry.relationships(relationshipId)
-      relationship = relationship.unwrap()
-      if (relationship.storage_provider.eq(storageProviderId)) {
-        return { relationship, relationshipId }
-      }
-    }
-
-    return {}
-  }
-
-  /*
-   * Creates storage relationship for a data object and provider and
-   * returns the relationship id
-   */
-  async createStorageRelationship(providerAccountId, storageProviderId, contentId) {
-    contentId = parseContentId(contentId)
-    const tx = this.base.api.tx.dataObjectStorageRegistry.addRelationship(storageProviderId, contentId)
-
-    return this.base.signAndSendThenGetEventResult(providerAccountId, tx, {
-      module: 'dataObjectStorageRegistry',
-      event: 'DataObjectStorageRelationshipAdded',
-      type: 'DataObjectStorageRelationshipId',
-      index: 0,
-    })
-  }
-
-  /*
-   * Set the ready state for a data object storage relationship to the new value
-   */
-  async toggleStorageRelationshipReady(providerAccountId, storageProviderId, dosrId, ready) {
-    const tx = ready
-      ? this.base.api.tx.dataObjectStorageRegistry.setRelationshipReady(storageProviderId, dosrId)
-      : this.base.api.tx.dataObjectStorageRegistry.unsetRelationshipReady(storageProviderId, dosrId)
-    return this.base.signAndSend(providerAccountId, tx)
-  }
-
-  /*
-   * Returns array of all the content ids in storage
-   */
-  async getKnownContentIds() {
-    const keys = await this.base.api.query.dataDirectory.dataByContentId.keys()
-    return keys.map(({ args: [contentId] }) => contentId)
-  }
-
-  /*
-   * Returns array of all content ids in storage where liaison judgement was Accepted
-   */
-  getAcceptedContentIds() {
-    if (!this._cachedEntries) {
-      return []
-    }
-
-    return this._cachedEntries
-      .filter(([, dataObject]) => dataObject.liaison_judgement.type === 'Accepted')
-      .map(
-        ([
-          {
-            args: [contentId],
-          },
-        ]) => contentId
-      )
-  }
-
-  /*
-   * Returns array of all ipfs hashes in storage where liaison judgement was Accepted
-   */
-  getAcceptedIpfsHashes() {
-    if (!this._cachedEntries) {
-      return []
-    }
-    const hashes = new Map()
-    this._cachedEntries
-      .filter(([, dataObject]) => dataObject.liaison_judgement.type === 'Accepted')
-      .forEach(([, dataObject]) => hashes.set(dataObject.ipfs_content_id.toString()))
-    return Array.from(hashes.keys())
-  }
-
-  /*
-   * Fetch and cache all data objects
-   */
-  async fetchDataObjects() {
-    this._cachedEntries = await this.base.api.query.dataDirectory.dataByContentId.entries()
-    this._idMappings = new Map()
-    this._cachedEntries.forEach(([{ args: [contentId] }, dataObject]) =>
-      this._idMappings.set(contentId.encode(), dataObject.ipfs_content_id.toString())
-    )
-  }
-
-  resolveContentIdToIpfsHash(contentId) {
-    if (!this._idMappings) return null
-    return this._idMappings.get(contentId)
-  }
-}
-
-module.exports = {
-  AssetsApi,
-}

+ 0 - 79
storage-node/packages/runtime-api/balances.js

@@ -1,79 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:runtime:balances')
-
-/*
- * Bundle API calls related to account balances.
- */
-class BalancesApi {
-  static async create(base) {
-    const ret = new BalancesApi()
-    ret.base = base
-    await BalancesApi.init()
-    return ret
-  }
-
-  static async init() {
-    debug('Init')
-  }
-
-  /*
-   * Return true/false if the account has a minimum spendable balance.
-   */
-  async hasMinimumBalanceOf(accountId, min) {
-    const balance = await this.availableBalance(accountId)
-    if (typeof min === 'number') {
-      return balance.cmpn(min) >= 0
-    }
-    return balance.cmp(min) >= 0
-  }
-
-  /*
-   * Return the account's available balance which can be spent.
-   */
-  async availableBalance(accountId) {
-    const decoded = this.base.identities.keyring.decodeAddress(accountId, true)
-    return (await this.base.api.derive.balances.all(decoded)).availableBalance
-  }
-
-  /*
-   * Return the base transaction fee.
-   */
-  baseTransactionFee() {
-    return this.base.api.consts.transactionPayment.transactionBaseFee
-  }
-
-  /*
-   * Transfer amount currency from one address to another. The sending
-   * address must be an unlocked key pair!
-   */
-  async transfer(from, to, amount) {
-    const decode = require('@polkadot/keyring').decodeAddress
-    const toDecoded = decode(to, true)
-
-    const tx = this.base.api.tx.balances.transfer(toDecoded, amount)
-    return this.base.signAndSend(from, tx)
-  }
-}
-
-module.exports = {
-  BalancesApi,
-}

+ 0 - 246
storage-node/packages/runtime-api/identities.js

@@ -1,246 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const path = require('path')
-const fs = require('fs')
-const debug = require('debug')('joystream:runtime:identities')
-const { Keyring } = require('@polkadot/keyring')
-const utilCrypto = require('@polkadot/util-crypto')
-
-/*
- * Add identity management to the substrate API.
- *
- * This loosely groups: accounts, key management, and membership.
- */
-class IdentitiesApi {
-  static async create(base, { accountFile, passphrase, canPromptForPassphrase }) {
-    const ret = new IdentitiesApi()
-    ret.base = base
-    await ret.init(accountFile, passphrase, canPromptForPassphrase)
-    return ret
-  }
-
-  async init(accountFile, passphrase, canPromptForPassphrase) {
-    debug('Init')
-
-    // Creatre keyring
-    this.keyring = new Keyring()
-
-    this.canPromptForPassphrase = canPromptForPassphrase || false
-
-    // Load account file, if possible.
-    try {
-      this.key = await this.loadUnlock(accountFile, passphrase)
-    } catch (err) {
-      debug('Error loading account file:', err.message)
-    }
-  }
-
-  /*
-   * Load a key file and unlock it if necessary.
-   */
-  async loadUnlock(accountFile, passphrase) {
-    const fullname = path.resolve(accountFile)
-    debug('Initializing key from', fullname)
-    const key = this.keyring.addFromJson(require(fullname))
-    await this.tryUnlock(key, passphrase)
-    debug('Successfully initialized with address', key.address)
-    return key
-  }
-
-  /*
-   * Try to unlock a key if it isn't already unlocked.
-   * passphrase should be supplied as argument.
-   */
-  async tryUnlock(key, passphrase) {
-    if (!key.isLocked) {
-      debug('Key is not locked, not attempting to unlock')
-      return
-    }
-
-    // First try with an empty passphrase - for convenience
-    try {
-      key.decodePkcs8('')
-
-      if (passphrase) {
-        debug('Key was not encrypted, supplied passphrase was ignored')
-      }
-
-      return
-    } catch (err) {
-      // pass
-    }
-
-    // Then with supplied passphrase
-    try {
-      debug('Decrypting with supplied passphrase')
-      key.decodePkcs8(passphrase)
-      return
-    } catch (err) {
-      // pass
-    }
-
-    // If that didn't work, ask for a passphrase if appropriate
-    if (this.canPromptForPassphrase) {
-      passphrase = await this.askForPassphrase(key.address)
-      key.decodePkcs8(passphrase)
-      return
-    }
-
-    throw new Error('invalid passphrase supplied')
-  }
-
-  /*
-   * Ask for a passphrase
-   */
-
-  /* eslint-disable class-methods-use-this */
-  // Disable lint because the method used by a mocking library.
-  askForPassphrase(address) {
-    // Query for passphrase
-    const prompt = require('password-prompt')
-    return prompt(`Enter passphrase for ${address}: `, { required: false })
-  }
-
-  /*
-   * Return true if the account is a root account of a member
-   */
-  async isMember(accountId) {
-    const memberIds = await this.memberIdsOf(accountId) // return array of member ids
-    return memberIds.length > 0 // true if at least one member id exists for the acccount
-  }
-
-  /*
-   * Return all the member IDs of an account by the root account id
-   */
-  async memberIdsOf(accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    return this.base.api.query.members.memberIdsByRootAccountId(decoded)
-  }
-
-  /*
-   * Return all the member IDs of an account by the controller account id
-   */
-  async memberIdsOfController(accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    return this.base.api.query.members.memberIdsByControllerAccountId(decoded)
-  }
-
-  /*
-   * Return the first member ID of an account, or undefined if not a member root account.
-   */
-  async firstMemberIdOf(accountId) {
-    const decoded = this.keyring.decodeAddress(accountId)
-    const ids = await this.base.api.query.members.memberIdsByRootAccountId(decoded)
-    return ids[0]
-  }
-
-  /*
-   * Export a key pair to JSON. Will ask for a passphrase.
-   */
-  async exportKeyPair(accountId) {
-    const passphrase = await this.askForPassphrase(accountId)
-
-    // Produce JSON output
-    return this.keyring.toJson(accountId, passphrase)
-  }
-
-  /*
-   * Export a key pair and write it to a JSON file with the account ID as the
-   * name.
-   */
-  async writeKeyPairExport(accountId, prefix) {
-    // Generate JSON
-    const data = await this.exportKeyPair(accountId)
-
-    // Write JSON
-    let filename = `${data.address}.json`
-
-    if (prefix) {
-      const path = require('path')
-      filename = path.resolve(prefix, filename)
-    }
-
-    fs.writeFileSync(filename, JSON.stringify(data), {
-      encoding: 'utf8',
-      mode: 0o600,
-    })
-
-    return filename
-  }
-
-  /*
-   * Register account id with userInfo as a new member
-   * using default policy 0, returns new member id
-   */
-  async registerMember(accountId, userInfo) {
-    const tx = this.base.api.tx.members.buyMembership(0, userInfo.handle, userInfo.avatarUri, userInfo.about)
-
-    return this.base.signAndSendThenGetEventResult(accountId, tx, {
-      module: 'members',
-      event: 'MemberRegistered',
-      type: 'MemberId',
-      index: 0,
-    })
-  }
-
-  /*
-   * Injects a keypair and sets it as the default identity
-   */
-  useKeyPair(keyPair) {
-    this.key = this.keyring.addPair(keyPair)
-  }
-
-  /*
-   * Create a new role key. If no name is given,
-   * default to 'storage'.
-   */
-  async createNewRoleKey(name) {
-    name = name || 'storage-provider'
-
-    // Generate new key pair
-    const keyPair = utilCrypto.naclKeypairFromRandom()
-
-    // Encode to an address.
-    const addr = this.keyring.encodeAddress(keyPair.publicKey)
-    debug('Generated new key pair with address', addr)
-
-    // Add to key wring. We set the meta to identify the account as
-    // a role key.
-    const meta = {
-      name: `${name} role account`,
-    }
-
-    const createPair = require('@polkadot/keyring/pair').default
-    const pair = createPair('ed25519', keyPair, meta)
-
-    this.keyring.addPair(pair)
-
-    return pair
-  }
-
-  getSudoAccount() {
-    return this.base.api.query.sudo.key()
-  }
-}
-
-module.exports = {
-  IdentitiesApi,
-}

+ 0 - 379
storage-node/packages/runtime-api/index.js

@@ -1,379 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const debug = require('debug')('joystream:runtime:base')
-const debugTx = require('debug')('joystream:runtime:base:tx')
-
-const { types } = require('@joystream/types')
-const { ApiPromise, WsProvider } = require('@polkadot/api')
-const { IdentitiesApi } = require('@joystream/storage-runtime-api/identities')
-const { BalancesApi } = require('@joystream/storage-runtime-api/balances')
-const { WorkersApi } = require('@joystream/storage-runtime-api/workers')
-const { AssetsApi } = require('@joystream/storage-runtime-api/assets')
-const { SystemApi } = require('@joystream/storage-runtime-api/system')
-const AsyncLock = require('async-lock')
-const Promise = require('bluebird')
-const { sleep } = require('@joystream/storage-utils/sleep')
-
-Promise.config({
-  cancellation: true,
-})
-
-const TX_TIMEOUT = 20 * 1000
-
-/*
- * Initialize runtime (substrate) API and keyring.
- */
-class RuntimeApi {
-  static async create(options) {
-    const runtimeApi = new RuntimeApi()
-    await runtimeApi.init(options || {})
-    return runtimeApi
-  }
-
-  async init(options) {
-    debug('Init')
-
-    options = options || {}
-
-    const provider = new WsProvider(options.provider_url || 'ws://localhost:9944')
-    let attempts = 0
-    // Create the API instrance
-    while (true) {
-      attempts++
-
-      if (options.retries && attempts > options.retries) {
-        throw new Error('Timeout trying to connect to node')
-      }
-
-      try {
-        this.api = new ApiPromise({ provider, types })
-        await this.api.isReadyOrError
-        break
-      } catch (err) {
-        debug('Connecting to node failed, will retry..')
-      }
-      await sleep(5000)
-    }
-
-    this.asyncLock = new AsyncLock()
-
-    // The storage provider id to use
-    this.storageProviderId = parseInt(options.storageProviderId) // u64 instead ?
-
-    // Ok, create individual APIs
-    this.identities = await IdentitiesApi.create(this, {
-      accountFile: options.account_file,
-      passphrase: options.passphrase,
-      canPromptForPassphrase: options.canPromptForPassphrase,
-    })
-    this.balances = await BalancesApi.create(this)
-    this.workers = await WorkersApi.create(this)
-    this.assets = await AssetsApi.create(this)
-    this.system = await SystemApi.create(this)
-  }
-
-  disconnect() {
-    this.api.disconnect()
-  }
-
-  async untilChainIsSynced() {
-    debug('Waiting for chain to be synced before proceeding.')
-    while (true) {
-      const isSyncing = await this.chainIsSyncing()
-      if (isSyncing) {
-        debug('Still waiting for chain to be synced.')
-        await sleep(1 * 30 * 1000)
-      } else {
-        return
-      }
-    }
-  }
-
-  async chainIsSyncing() {
-    const { isSyncing } = await this.api.rpc.system.health()
-    return isSyncing.isTrue
-  }
-
-  async providerHasMinimumBalance(minimumBalance) {
-    const providerAccountId = this.identities.key.address
-    return this.balances.hasMinimumBalanceOf(providerAccountId, minimumBalance)
-  }
-
-  async providerIsActiveWorker() {
-    return this.workers.isRoleAccountOfStorageProvider(this.storageProviderId, this.identities.key.address)
-  }
-
-  executeWithAccountLock(func) {
-    return this.asyncLock.acquire('tx-queue', func)
-  }
-
-  static matchingEvents(subscribed = [], events = []) {
-    const filtered = events.filter((record) => {
-      const { event } = record
-
-      // Skip events we're not interested in.
-      const matching = subscribed.filter((value) => {
-        if (value[0] === '*' && value[1] === '*') {
-          return true
-        } else if (value[0] === '*') {
-          return event.method === value[1]
-        } else if (value[1] === '*') {
-          return event.section === value[0]
-        } else {
-          return event.section === value[0] && event.method === value[1]
-        }
-      })
-      return matching.length > 0
-    })
-
-    return filtered.map((record) => {
-      const { event } = record
-      const types = event.typeDef
-      const payload = new Map()
-
-      // this check may be un-necessary but doing it just incase
-      if (event.data) {
-        event.data.forEach((data, index) => {
-          const type = types[index].type
-          payload.set(index, { type, data })
-        })
-      }
-      const fullName = `${event.section}.${event.method}`
-      debugTx(`matched event: ${fullName} =>`, event.data && event.data.join(', '))
-      return [fullName, payload]
-    })
-  }
-
-  /*
-   * signAndSend() with nonce tracking, to enable concurrent sending of transacctions
-   * so that they can be included in the same block. Allows you to use the accountId instead
-   * of the key, without requiring an external Signer configured on the underlying ApiPromie
-   *
-   * If the subscribed events are given, then the matchedEvents will be returned in the resolved
-   * value.
-   * Resolves when a transaction finalizes with a successful dispatch (for both signed and root origins)
-   * Rejects in all other cases.
-   * Will also reject on timeout if the transaction doesn't finalize in time.
-   */
-  async signAndSend(accountId, tx, subscribed) {
-    // Accept both a string or AccountId as argument
-    accountId = this.identities.keyring.encodeAddress(accountId)
-
-    // Throws if keyPair is not found
-    const fromKey = this.identities.keyring.getPair(accountId)
-
-    // Key must be unlocked to use
-    if (fromKey.isLocked) {
-      throw new Error('Must unlock key before using it to sign!')
-    }
-
-    const callbacks = {
-      // Functions to be called when the submitted transaction is finalized. They are initialized
-      // after the transaction is submitted to the resolve and reject function of the final promise
-      // returned by signAndSend
-      // on extrinsic success
-      onFinalizedSuccess: null,
-      // on extrinsic failure
-      onFinalizedFailed: null,
-      // Function assigned when transaction is successfully submitted. Invoking it ubsubscribes from
-      // listening to tx status updates.
-      unsubscribe: null,
-    }
-
-    // object used to communicate back information from the tx updates handler
-    const out = {
-      lastResult: { status: {} },
-    }
-
-    // synchronize access to nonce
-    await this.executeWithAccountLock(async () => {
-      const nonce = await this.api.rpc.system.accountNextIndex(accountId)
-      const signed = tx.sign(fromKey, { nonce })
-      const txhash = signed.hash
-
-      try {
-        callbacks.unsubscribe = await signed.send(
-          RuntimeApi.createTxUpdateHandler(callbacks, { nonce, txhash, subscribed }, out)
-        )
-
-        const serialized = JSON.stringify({
-          nonce,
-          txhash,
-          tx: signed.toHex(),
-        })
-
-        // We are depending on the behaviour that at this point the Ready status
-        // Elaboration: when the tx is rejected and therefore the tx isn't added
-        // to the tx pool ready queue status is not updated and
-        // .send() throws, so we don't reach this code.
-        if (out.lastResult.status.isFuture) {
-          debugTx(`Warning: Submitted Tx with future nonce: ${serialized}`)
-        } else {
-          debugTx(`Submitted: ${serialized}`)
-        }
-      } catch (err) {
-        const errstr = err.toString()
-        debugTx(`Rejected: ${errstr} txhash: ${txhash} nonce: ${nonce}`)
-        throw err
-      }
-    })
-
-    // Here again we assume that the transaction has been accepted into the tx pool
-    // and status was updated.
-    // We cannot get tx updates for a future tx so return now to avoid blocking caller
-    if (out.lastResult.status.isFuture) {
-      return {}
-    }
-
-    // Return a promise that will resolve when the transaction finalizes.
-    // On timeout it will be rejected. Timeout is a workaround for dealing with the
-    // fact that if rpc connection is lost to node we have no way of detecting it or recovering.
-    // Timeout can also occur if a transaction that was part of batch of transactions submitted
-    // gets usurped.
-    return new Promise((resolve, reject) => {
-      callbacks.onFinalizedSuccess = resolve
-      callbacks.onFinalizedFailed = reject
-    }).timeout(TX_TIMEOUT)
-  }
-
-  /*
-   * Sign and send a transaction expect event from
-   * module and return specific(index) value from event data
-   */
-  async signAndSendThenGetEventResult(senderAccountId, tx, { module, event, index, type }) {
-    if (!module || !event || index === undefined || !type) {
-      throw new Error('MissingSubscribeEventDetails')
-    }
-
-    const subscribed = [[module, event]]
-
-    const { mappedEvents } = await this.signAndSend(senderAccountId, tx, subscribed)
-
-    if (!mappedEvents) {
-      // The tx was a future so it was not possible and will not be possible to get events
-      throw new Error('NoEventsWereCaptured')
-    }
-
-    if (!mappedEvents.length) {
-      // our expected event was not emitted
-      throw new Error('ExpectedEventNotFound')
-    }
-
-    // fix - we may not necessarily want the first event
-    // when there are multiple instances of the same event
-    const firstEvent = mappedEvents[0]
-
-    if (firstEvent[0] !== `${module}.${event}`) {
-      throw new Error('WrongEventCaptured')
-    }
-
-    const payload = firstEvent[1]
-    if (!payload.has(index)) {
-      throw new Error('DataIndexOutOfRange')
-    }
-
-    const value = payload.get(index)
-    if (value.type !== type) {
-      throw new Error('DataTypeNotExpectedType')
-    }
-
-    return value.data
-  }
-
-  static createTxUpdateHandler(callbacks, submittedTx, out = {}) {
-    const { nonce, txhash, subscribed } = submittedTx
-
-    return function handleTxUpdates(result) {
-      const { events = [], status } = result
-      const { unsubscribe, onFinalizedFailed, onFinalizedSuccess } = callbacks
-
-      if (!result || !status) {
-        return
-      }
-
-      out.lastResult = result
-
-      const txinfo = () => {
-        return JSON.stringify({
-          nonce,
-          txhash,
-        })
-      }
-
-      if (result.isError) {
-        unsubscribe()
-
-        debugTx(`Error: ${status.type}`, txinfo())
-
-        onFinalizedFailed &&
-          onFinalizedFailed({ err: status.type, result, tx: status.isUsurped ? status.asUsurped : undefined })
-      } else if (result.isCompleted) {
-        unsubscribe()
-
-        debugTx('Finalized', txinfo())
-
-        const mappedEvents = RuntimeApi.matchingEvents(subscribed, events)
-        const failed = result.findRecord('system', 'ExtrinsicFailed')
-        const success = result.findRecord('system', 'ExtrinsicSuccess')
-        const sudid = result.findRecord('sudo', 'Sudid')
-        const sudoAsDone = result.findRecord('sudo', 'SudoAsDone')
-
-        if (failed) {
-          const {
-            event: { data },
-          } = failed
-          const dispatchError = data[0]
-          onFinalizedFailed({
-            err: 'ExtrinsicFailed',
-            mappedEvents,
-            result,
-            block: status.asCompleted,
-            dispatchError, // we get module number/id and index into the Error enum
-          })
-        } else if (success) {
-          // Note: For root origin calls, the dispatch error is logged to the joystream-node
-          // console, we cannot get it in the events
-          if (sudid) {
-            const dispatchSuccess = sudid.event.data[0]
-            if (dispatchSuccess.isOk) {
-              onFinalizedSuccess({ mappedEvents, result, block: status.asCompleted })
-            } else {
-              onFinalizedFailed({ err: 'SudoFailed', mappedEvents, result, block: status.asCompleted })
-            }
-          } else if (sudoAsDone) {
-            const dispatchSuccess = sudoAsDone.event.data[0]
-            if (dispatchSuccess.isOk) {
-              onFinalizedSuccess({ mappedEvents, result, block: status.asCompleted })
-            } else {
-              onFinalizedFailed({ err: 'SudoAsFailed', mappedEvents, result, block: status.asCompleted })
-            }
-          } else {
-            onFinalizedSuccess({ mappedEvents, result, block: status.asCompleted })
-          }
-        }
-      }
-    }
-  }
-}
-
-module.exports = {
-  RuntimeApi,
-}

+ 0 - 58
storage-node/packages/runtime-api/package.json

@@ -1,58 +0,0 @@
-{
-  "name": "@joystream/storage-runtime-api",
-  "private": true,
-  "version": "0.1.0",
-  "description": "Runtime API abstraction for Joystream Storage Node",
-  "author": "Joystream",
-  "homepage": "https://github.com/Joystream/joystream",
-  "bugs": {
-    "url": "https://github.com/Joystream/joystream/issues"
-  },
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/Joystream/joystream.git"
-  },
-  "license": "GPL-3.0-only",
-  "contributors": [
-    {
-      "name": "Joystream",
-      "url": "https://joystream.org/"
-    }
-  ],
-  "keywords": [
-    "joystream",
-    "storage",
-    "node",
-    "runtime"
-  ],
-  "os": [
-    "darwin",
-    "linux"
-  ],
-  "engines": {
-    "node": ">=14.0.0"
-  },
-  "volta": {
-    "extends": "../../package.json"
-  },
-  "scripts": {
-    "test": "mocha 'test/**/*.js' --exit",
-    "lint": "eslint '**/*.js' --ignore-pattern 'test/**/*.js'"
-  },
-  "devDependencies": {
-    "chai": "^4.2.0",
-    "eslint": "^7.6.0",
-    "mocha": "^5.2.0",
-    "sinon": "^7.3.2",
-    "sinon-chai": "^3.3.0",
-    "temp": "^0.9.0"
-  },
-  "dependencies": {
-    "@joystream/storage-utils": "^0.1.0",
-    "@joystream/types": "^0.16.1",
-    "@polkadot/api": "5.9.1",
-    "async-lock": "^1.2.0",
-    "lodash": "^4.17.11",
-    "password-prompt": "^1.1.2"
-  }
-}

+ 0 - 33
storage-node/packages/runtime-api/system.js

@@ -1,33 +0,0 @@
-'use strict'
-
-const debug = require('debug')('joystream:runtime:system')
-
-/*
- * Add system functionality to the substrate API.
- */
-class SystemApi {
-  static async create(base) {
-    const ret = new SystemApi()
-    ret.base = base
-    await SystemApi.init()
-    return ret
-  }
-
-  static async init() {
-    debug('Init')
-  }
-
-  /*
-   * Check the running chain for the development setup.
-   */
-  async isDevelopmentChain() {
-    const developmentChainName = 'Development'
-    const runningChainName = await this.base.api.rpc.system.chain()
-
-    return runningChainName.toString() === developmentChainName
-  }
-}
-
-module.exports = {
-  SystemApi,
-}

+ 0 - 48
storage-node/packages/runtime-api/test/assets.js

@@ -1,48 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-describe('Assets', () => {
-  let api
-  before(async () => {
-    api = await RuntimeApi.create()
-    await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-  })
-
-  it('returns DataObjects for a content ID', async () => {
-    const obj = await api.assets.getDataObject('foo')
-    expect(obj).to.be.null
-  })
-
-  it('can check the liaison for a DataObject', async () => {
-    expect(async () => {
-      await api.assets.checkLiaisonForDataObject('foo', 'bar')
-    }).to.throw
-  })
-
-  // Needs properly staked accounts
-  it('can accept content')
-  it('can reject content')
-  it('can create a storage relationship for content')
-  it('can toggle a storage relationship to ready state')
-})

+ 0 - 44
storage-node/packages/runtime-api/test/balances.js

@@ -1,44 +0,0 @@
-/*
- * This file is part of the storage node for the Joystream project.
- * Copyright (C) 2019 Joystream Contributors
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <https://www.gnu.org/licenses/>.
- */
-
-'use strict'
-
-const expect = require('chai').expect
-
-const { RuntimeApi } = require('@joystream/storage-runtime-api')
-
-describe('Balances', () => {
-  let api
-  let key
-  before(async () => {
-    api = await RuntimeApi.create()
-    key = await api.identities.loadUnlock('test/data/edwards_unlocked.json')
-  })
-
-  it('returns free balance for an account', async () => {
-    const balance = await api.balances.availableBalance(key.address)
-    // Should be exactly zero
-    expect(balance.cmpn(0)).to.equal(0)
-  })
-
-  it('checks whether a minimum balance exists', async () => {
-    // A minimum of 0 should exist, but no more.
-    expect(await api.balances.hasMinimumBalanceOf(key.address, 0)).to.be.true
-    expect(await api.balances.hasMinimumBalanceOf(key.address, 1)).to.be.false
-  })
-})

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.