Bläddra i källkod

Merge pull request #2830 from mnaamani/tests-giza-setup-new-chain

Tests: Giza setup new chain scripts
Mokhtar Naamani 3 år sedan
förälder
incheckning
1468cb6c38
70 ändrade filer med 1298 tillägg och 926 borttagningar
  1. 35 23
      .env
  2. 4 0
      .github/workflows/network-tests.yml
  3. 14 48
      .github/workflows/run-network-tests.yml
  4. 6 4
      .github/workflows/storage-node.yml
  5. 1 4
      README.md
  6. 0 24
      build-apps-docker.sh
  7. 1 1
      build-packages.sh
  8. 1 1
      cli/package.json
  9. 1 1
      cli/src/base/UploadCommandBase.ts
  10. 7 2
      colossus.Dockerfile
  11. 0 2
      distributor-node.Dockerfile
  12. 0 26
      distributor-node/config/docker/config.docker.yml
  13. 25 25
      distributor-node/package.json
  14. 1 1
      distributor-node/src/commands/dev/batchUpload.ts
  15. 1 1
      distributor-node/src/services/content/ContentService.ts
  16. 0 42
      docker-compose.multi-storage.yml
  17. 200 94
      docker-compose.yml
  18. 0 37
      multi-storage.sh
  19. 1 2
      package.json
  20. 0 13
      query-node/bootstrap.sh
  21. 0 17
      query-node/kill-img.sh
  22. 6 10
      query-node/kill.sh
  23. 2 4
      query-node/package.json
  24. 0 35
      query-node/run-tests.sh
  25. 0 23
      query-node/start-img.sh
  26. 9 19
      query-node/start.sh
  27. 4 0
      setup.sh
  28. 35 50
      start.sh
  29. 1 1
      storage-node-v2/package.json
  30. 1 1
      storage-node-v2/src/services/helpers/hashing.ts
  31. 5 2
      tests/network-tests/.env
  32. 2 0
      tests/network-tests/.gitignore
  33. 1 0
      tests/network-tests/.prettierignore
  34. 0 64
      tests/network-tests/run-local-node-test.sh
  35. 60 92
      tests/network-tests/run-migration-tests.sh
  36. 0 50
      tests/network-tests/run-storage-node-tests.sh
  37. 78 0
      tests/network-tests/run-test-node-docker.sh
  38. 9 4
      tests/network-tests/run-test-node.sh
  39. 26 61
      tests/network-tests/run-tests.sh
  40. 256 49
      tests/network-tests/src/Api.ts
  41. 43 5
      tests/network-tests/src/Scenario.ts
  42. 19 0
      tests/network-tests/src/WorkingGroups.ts
  43. 25 0
      tests/network-tests/src/fixtures/councilAssignment.ts
  44. 13 4
      tests/network-tests/src/fixtures/membershipModule.ts
  45. 3 2
      tests/network-tests/src/fixtures/proposalsModule.ts
  46. 2 1
      tests/network-tests/src/fixtures/sudoHireLead.ts
  47. 4 3
      tests/network-tests/src/fixtures/workingGroupModule.ts
  48. 48 0
      tests/network-tests/src/flows/council/assign.ts
  49. 2 2
      tests/network-tests/src/flows/council/setup.ts
  50. 2 2
      tests/network-tests/src/flows/membership/creatingMemberships.ts
  51. 5 4
      tests/network-tests/src/flows/proposals/manageLeaderRole.ts
  52. 1 1
      tests/network-tests/src/flows/proposals/updateRuntime.ts
  53. 4 3
      tests/network-tests/src/flows/proposals/workingGroupMintCapacityProposal.ts
  54. 4 4
      tests/network-tests/src/flows/workingGroup/atLeastValueBug.ts
  55. 19 10
      tests/network-tests/src/flows/workingGroup/leaderSetup.ts
  56. 33 13
      tests/network-tests/src/flows/workingGroup/manageWorkerAsLead.ts
  57. 5 4
      tests/network-tests/src/flows/workingGroup/manageWorkerAsWorker.ts
  58. 5 4
      tests/network-tests/src/flows/workingGroup/workerPayout.ts
  59. 27 0
      tests/network-tests/src/misc/createCategoriesFixture.ts
  60. 31 0
      tests/network-tests/src/misc/createChannelsAsMemberFixture.ts
  61. 27 0
      tests/network-tests/src/misc/createVideosAsMemberFixture.ts
  62. 68 0
      tests/network-tests/src/misc/mockContentFlow.ts
  63. 15 0
      tests/network-tests/src/misc/updateAllWorkerRoleAccountsFlow.ts
  64. 14 0
      tests/network-tests/src/misc/updateWorkerAccountsFixture.ts
  65. 2 1
      tests/network-tests/src/scenarios/content-directory.ts
  66. 8 4
      tests/network-tests/src/scenarios/full.ts
  67. 26 0
      tests/network-tests/src/scenarios/setup-new-chain.ts
  68. 1 1
      tests/network-tests/src/sender.ts
  69. 29 0
      tests/network-tests/test-setup-new-chain.sh
  70. 20 25
      yarn.lock

+ 35 - 23
.env

@@ -9,39 +9,51 @@ INDEXER_DB_NAME=query_node_indexer
 DB_NAME=query_node_processor
 DB_USER=postgres
 DB_PASS=postgres
-DB_HOST=db
+# This value will not be used by query-node docker containers.
+# When running query-node with docker these services will always use the db service
+DB_HOST=localhost
 DB_PORT=5432
 DEBUG=index-builder:*
 TYPEORM_LOGGING=error
 
-###########################
-#    Indexer options      #
-###########################
-
+## Indexer options
 # Block height to start indexing from.
 # Note, that if there are already some indexed events, this setting is ignored
 BLOCK_HEIGHT=0
 
-###############################
-#    Processor GraphQL API    #
-###############################
+# Query node GraphQL server port
+GRAPHQL_SERVER_PORT=8081
+
+# Hydra indexer gateway GraphQL server port
+HYDRA_INDEXER_GATEWAY_PORT=4000
+
+# Default GraphQL server host. It is required during "query-node config:dev"
+GRAPHQL_SERVER_HOST=localhost
+
+# Websocket RPC endpoint containers will use.
+JOYSTREAM_NODE_WS=ws://joystream-node:9944/
+
+# Query node which colossus will use
+# TODO: Colossus should take a full Url instead
+COLOSSUS_QUERY_NODE_HOST=graphql-server:${GRAPHQL_SERVER_PORT}
 
-GRAPHQL_SERVER_PORT=4002
-GRAPHQL_SERVER_HOST=graphql-server
+# Query node which distributor will use
+DISTRIBUTOR_QUERY_NODE_URL=http://graphql-server:${GRAPHQL_SERVER_PORT}/graphql
 
-WARTHOG_APP_PORT=4002
-WARTHOG_APP_HOST=hydra-indexer-gateway
+# Indexer gateway used by processor. If you don't use the local indexer set this to a remote gateway
+PROCESSOR_INDEXER_GATEWAY=http://hydra-indexer-gateway:${HYDRA_INDEXER_GATEWAY_PORT}/graphql
 
-# Default configuration is to use the docker container
-WS_PROVIDER_ENDPOINT_URI=ws://joystream-node:9944/
+# Colossus services identities
+COLOSSUS_1_WORKER_ID=0
+COLOSSUS_1_ACCOUNT_URI=//testing//worker//Storage//${COLOSSUS_1_WORKER_ID}
+COLOSSUS_2_WORKER_ID=1
+COLOSSUS_2_ACCOUNT_URI=//testing//worker//Storage//${COLOSSUS_2_WORKER_ID}
 
-# If running joystream-node on host machine you can use following address to reach it instead
-# WS_PROVIDER_ENDPOINT_URI=ws://host.docker.internal:9944/
+# Distributor node services identities
+DISTRIBUTOR_1_WORKER_ID=0
+DISTRIBUTOR_1_ACCOUNT_URI=//testing//worker//Distribution//${DISTRIBUTOR_1_WORKER_ID}
+DISTRIBUTOR_2_WORKER_ID=1
+DISTRIBUTOR_2_ACCOUNT_URI=//testing//worker//Distribution//${DISTRIBUTOR_2_WORKER_ID}
 
-######################
-#    Storage Node    #
-######################
-COLOSSUS_PORT=3333
-QUERY_NODE_HOST=${GRAPHQL_SERVER_HOST}:${GRAPHQL_SERVER_PORT}
-WORKER_ID=0
-ACCOUNT_URI=//Alice
+# joystream/node docker image tag
+JOYSTREAM_NODE_TAG=latest

+ 4 - 0
.github/workflows/network-tests.yml

@@ -18,6 +18,8 @@ jobs:
       run: |
         yarn install --frozen-lockfile
         yarn workspace @joystream/types build
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace @joystream/cli build
         yarn workspace network-tests checks --quiet
 
   network_build_osx:
@@ -36,4 +38,6 @@ jobs:
       run: |
         yarn install --frozen-lockfile --network-timeout 120000
         yarn workspace @joystream/types build
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace @joystream/cli build
         yarn workspace network-tests checks --quiet

+ 14 - 48
.github/workflows/run-network-tests.yml

@@ -104,8 +104,15 @@ jobs:
           yarn workspace @joystream/cli build
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
+      - name: Install joystream-cli @joystream/cli/0.5.1
+        run: npm -g install @joystream/cli
       - name: Execute network tests
-        run: RUNTIME=sumer tests/network-tests/run-migration-tests.sh full
+        run: |
+          export HOME=$(pwd)
+          mkdir -p ${HOME}/.local/share/joystream-cli
+          joystream-cli api:setUri ws://localhost:9944
+          export RUNTIME=sumer
+          tests/network-tests/run-migration-tests.sh
 
   basic_runtime:
     name: Integration Tests (New Chain)
@@ -128,13 +135,15 @@ jobs:
         run: |
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
+          yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli build
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
       - name: Execute network tests
         run: tests/network-tests/run-tests.sh full
 
-  query_node:
-    name: Query Node Integration Tests
+  new_chain_setup:
+    name: Initialize new chain
     needs: build_images
     runs-on: ubuntu-latest
     steps:
@@ -155,6 +164,7 @@ jobs:
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
           yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli build
       - name: Ensure query-node builds
         run: yarn workspace query-node-root build
       - name: Ensure tests are runnable
@@ -162,49 +172,5 @@ jobs:
       # Bring up hydra query-node development instance, then run content directory
       # integration tests
       - name: Execute Tests
-        run: |
-          docker-compose up -d joystream-node
-          query-node/run-tests.sh
+        run: tests/network-tests/test-setup-new-chain.sh
 
-  storage_node:
-    name: Storage Node Tests
-    needs: build_images
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v1
-      - uses: actions/setup-node@v1
-        with:
-          node-version: '14.x'
-      - name: Get artifacts
-        uses: actions/download-artifact@v2
-        with:
-          name: ${{ needs.build_images.outputs.use_artifact }}
-      - name: Install artifacts
-        run: |
-          docker load --input joystream-node-docker-image.tar.gz
-          docker images
-      - name: Install packages and dependencies
-        run: |
-          yarn install --frozen-lockfile
-          yarn workspace @joystream/types build
-      - name: Build storage node
-        run: yarn workspace storage-node build
-      - name: Start Services
-        run: |
-          docker-compose up -d ipfs
-          docker-compose up -d joystream-node
-      - name: Configure and start development storage node
-        run: |
-          DEBUG=joystream:* yarn storage-cli dev-init
-          docker-compose up -d colossus
-      - name: Test uploading
-        run: |
-          sleep 6
-          export DEBUG=joystream:*
-          yarn storage-cli upload ./tests/network-tests/assets/joystream.MOV 1 0
-          # Wait for storage-node to set status Accepted on uploaded content
-          sleep 6
-          cd utils/api-scripts/
-          # Assume only one accepted data object was created
-          CONTENT_ID=`yarn --silent script get-first-content-id | tail -n2 | head -n1`
-          yarn storage-cli download ${CONTENT_ID} ./joystream.mov

+ 6 - 4
.github/workflows/storage-node.yml

@@ -18,8 +18,9 @@ jobs:
       run: |
         yarn install --frozen-lockfile
         yarn workspace @joystream/types build
-        yarn workspace storage-node checks --quiet
-        yarn workspace storage-node build
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace storage-node-v2 lint --quiet
+        yarn workspace storage-node-v2 build
 
   storage_node_build_osx:
     name: MacOS Checks
@@ -37,5 +38,6 @@ jobs:
       run: |
         yarn install --frozen-lockfile --network-timeout 120000
         yarn workspace @joystream/types build
-        yarn workspace storage-node checks --quiet
-        yarn workspace storage-node build
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace storage-node-v2 lint --quiet
+        yarn workspace storage-node-v2 build

+ 1 - 4
README.md

@@ -28,12 +28,9 @@ After cloning the repo run the following initialization scripts:
 # build local npm packages
 yarn build:packages
 
-# Build joystream/node image
+# Build joystream/node docker image
 yarn build:node:docker
 
-# Build applications docker image
-yarn build:apps:docker
-
 # start a local development network
 yarn start
 ```

+ 0 - 24
build-apps-docker.sh

@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-if ! command -v docker-compose &> /dev/null
-then
-  echo "docker-compose not found. Skipping docker image builds."
-  exit 0
-fi
-
-# Build processor/graphql-server docker image
-docker-compose build graphql-server
-
-# Build colossus docker image
-echo "Building colossus docker image..."
-docker-compose build colossus
-
-# Build distributor docker image
-echo "Building distributor docker image..."
-docker-compose build distributor-node
-
-# Build the pioneer docker image
-echo "Building pioneer docker image"
-docker-compose build pioneer

+ 1 - 1
build-npm-packages.sh → build-packages.sh

@@ -9,4 +9,4 @@ yarn workspace query-node-root build
 yarn workspace @joystream/cli build
 yarn workspace storage-node-v2 build
 yarn workspace @joystream/distributor-cli build
-# yarn workspace pioneer build
+yarn workspace pioneer build

+ 1 - 1
cli/package.json

@@ -44,7 +44,7 @@
     "proper-lockfile": "^4.1.1",
     "slug": "^2.1.1",
     "tslib": "^1.11.1",
-    "blake3": "^2.1.4",
+    "blake3-wasm": "^2.1.5",
     "multihashes": "^4.0.3",
     "@apollo/client": "^3.2.5",
     "cross-fetch": "^3.0.6",

+ 1 - 1
cli/src/base/UploadCommandBase.ts

@@ -20,7 +20,7 @@ import mimeTypes from 'mime-types'
 import { Assets } from '../schemas/typings/Assets.schema'
 import chalk from 'chalk'
 import { DataObjectCreationParameters } from '@joystream/types/storage'
-import { createHash } from 'blake3'
+import { createHash } from 'blake3-wasm'
 import * as multihash from 'multihashes'
 import { u8aToHex, formatBalance } from '@polkadot/util'
 import { KeyringPair } from '@polkadot/keyring/types'

+ 7 - 2
colossus.Dockerfile

@@ -16,7 +16,7 @@ VOLUME ["/data", "/keystore"]
 ENV WS_PROVIDER_ENDPOINT_URI=ws://not-set
 ENV COLOSSUS_PORT=3333
 ENV QUERY_NODE_HOST=not-set
-ENV WORKER_ID=not-set
+ENV WORKER_ID=
 # - set external key file using the `/keystore` volume
 ENV ACCOUNT_KEYFILE=
 ENV ACCOUNT_PWD=
@@ -32,4 +32,9 @@ ENV ACCOUNT_URI=
 EXPOSE ${COLOSSUS_PORT}
 
 WORKDIR /joystream/storage-node-v2
-ENTRYPOINT yarn storage-node server --queryNodeHost ${QUERY_NODE_HOST} --port ${COLOSSUS_PORT} --uploads /data --worker ${WORKER_ID} --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=${SYNC_INTERVAL} --keyFile=${ACCOUNT_KEYFILE} --elasticSearchHost=${ELASTIC_SEARCH_HOST}
+ENTRYPOINT yarn storage-node server --queryNodeHost ${QUERY_NODE_HOST} \
+    --port ${COLOSSUS_PORT} --uploads /data  \
+    --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=${SYNC_INTERVAL} \
+    --elasticSearchHost=${ELASTIC_SEARCH_HOST} \
+    --accountUri=${ACCOUNT_URI} \
+    --worker ${WORKER_ID}

+ 0 - 2
distributor-node.Dockerfile

@@ -22,7 +22,5 @@ RUN \
   yarn --frozen-lockfile --production &&\
   yarn cache clean
 
-ENV CONFIG_PATH ./distributor-node/config/docker/config.docker.yml
-
 ENTRYPOINT ["yarn", "joystream-distributor"]
 CMD ["start"]

+ 0 - 26
distributor-node/config/docker/config.docker.yml

@@ -1,26 +0,0 @@
-id: distributor-node-docker
-endpoints:
-  queryNode: http://graphql-server-mnt:4002/graphql
-  joystreamNodeWs: ws://joystream-node:9944
-  # elasticSearch: http://elasticsearch:9200
-directories:
-  assets: /data
-  cacheState: /cache
-  logs: /logs
-log:
-  console: info
-  # elastic: info
-limits:
-  storage: 100G
-  maxConcurrentStorageNodeDownloads: 100
-  maxConcurrentOutboundConnections: 300
-  outboundRequestsTimeout: 5000
-intervals:
-  saveCacheState: 60
-  checkStorageNodeResponseTimes: 60
-  cacheCleanup: 60
-port: 3334
-keys:
-  - suri: //Alice
-buckets: 'all'
-workerId: 0

+ 25 - 25
distributor-node/package.json

@@ -8,42 +8,43 @@
   },
   "bugs": "https://github.com/Joystream/joystream/issues",
   "dependencies": {
-    "@joystream/types": "^0.17.0",
-    "@joystream/metadata-protobuf": "^1.0.0",
+    "@apollo/client": "^3.2.5",
     "@elastic/ecs-winston-format": "^1.1.0",
+    "@joystream/metadata-protobuf": "^1.0.0",
+    "@joystream/types": "^0.17.0",
     "@oclif/command": "^1",
     "@oclif/config": "^1",
-    "@oclif/plugin-help": "^2",
-    "@apollo/client": "^3.2.5",
-    "graphql": "^14.7.0",
-    "winston": "^3.3.3",
-    "fast-safe-stringify": "^2.1.1",
+    "@oclif/plugin-help": "^3.2.4",
     "ajv": "^7",
     "axios": "^0.21.1",
+    "blake3-wasm": "^2.1.5",
+    "cors": "^2.8.5",
     "cross-fetch": "^3.1.4",
     "express": "^4.17.1",
-    "express-winston": "^4.1.0",
+    "express-http-proxy": "^1.6.2",
     "express-openapi-validator": "^4.12.4",
+    "express-winston": "^4.1.0",
+    "fast-safe-stringify": "^2.1.1",
     "file-type": "^16.5.1",
+    "graphql": "^14.7.0",
+    "inquirer": "^8.1.2",
+    "js-image-generator": "^1.0.3",
     "lodash": "^4.17.21",
     "lru-cache": "^6.0.0",
+    "multihashes": "^4.0.3",
+    "node-cache": "^5.1.2",
     "node-cleanup": "^2.1.2",
     "proper-lockfile": "^4.1.2",
+    "queue": "^6.0.2",
     "read-chunk": "^3.2.0",
     "send": "^0.17.1",
     "tslib": "^1",
-    "yaml": "^1.10.2",
-    "queue": "^6.0.2",
-    "express-http-proxy": "^1.6.2",
+    "winston": "^3.3.3",
     "winston-elasticsearch": "^0.15.8",
-    "node-cache": "^5.1.2",
-    "cors": "^2.8.5",
-    "inquirer": "^8.1.2",
-    "multihashes": "^4.0.3",
-    "blake3": "^2.1.4",
-    "js-image-generator": "^1.0.3"
+    "yaml": "^1.10.2"
   },
   "devDependencies": {
+    "@adobe/jsonschema2md": "https://github.com/adobe/jsonschema2md",
     "@graphql-codegen/cli": "^1.21.4",
     "@graphql-codegen/import-types-preset": "^1.18.1",
     "@graphql-codegen/typescript": "^1.22.0",
@@ -53,25 +54,24 @@
     "@oclif/test": "^1",
     "@openapitools/openapi-generator-cli": "^2.3.6",
     "@types/chai": "^4",
+    "@types/cors": "^2.8.12",
+    "@types/express-http-proxy": "^1.6.2",
+    "@types/inquirer": "^8.1.1",
     "@types/mocha": "^5",
     "@types/node": "^14",
-    "@types/node-cleanup": "^2.1.1",
-    "@types/express-http-proxy": "^1.6.2",
     "@types/node-cache": "^4.2.5",
+    "@types/node-cleanup": "^2.1.1",
     "@types/send": "^0.17.0",
-    "@types/inquirer": "^8.1.1",
-    "@types/cors": "^2.8.12",
     "chai": "^4",
     "globby": "^10",
     "json-schema-to-typescript": "^10.1.4",
+    "markdown-magic": "^2.5.2",
     "mocha": "^5",
     "nyc": "^14",
     "openapi-typescript": "^4.0.2",
     "ts-node": "^8",
     "typescript": "^3.3",
-    "@adobe/jsonschema2md": "https://github.com/adobe/jsonschema2md",
-    "widdershins": "^4.0.1",
-    "markdown-magic": "^2.5.2"
+    "widdershins": "^4.0.1"
   },
   "engines": {
     "node": ">=14.16.1"
@@ -102,7 +102,7 @@
         "description": "Commands for performing node operator (Distribution Working Group worker) on-chain duties (like accepting bucket invitations, setting node metadata)"
       },
       "dev": {
-        "description":"Developer utility commands"
+        "description": "Developer utility commands"
       }
     }
   },

+ 1 - 1
distributor-node/src/commands/dev/batchUpload.ts

@@ -1,6 +1,6 @@
 import AccountsCommandBase from '../../command-base/accounts'
 import DefaultCommandBase, { flags } from '../../command-base/default'
-import { hash } from 'blake3'
+import { hash } from 'blake3-wasm'
 import { FilesApi, Configuration, TokenRequest } from '../../services/networking/storage-node/generated'
 import { u8aToHex } from '@polkadot/util'
 import * as multihash from 'multihashes'

+ 1 - 1
distributor-node/src/services/content/ContentService.ts

@@ -7,7 +7,7 @@ import { FileContinousReadStream, FileContinousReadStreamOptions } from './FileC
 import FileType from 'file-type'
 import { Readable, pipeline } from 'stream'
 import { NetworkingService } from '../networking'
-import { createHash } from 'blake3'
+import { createHash } from 'blake3-wasm'
 import * as multihash from 'multihashes'
 
 export const DEFAULT_CONTENT_TYPE = 'application/octet-stream'

+ 0 - 42
docker-compose.multi-storage.yml

@@ -1,42 +0,0 @@
-version: '3.4'
-services:
-  colossus-2:
-    image: joystream/colossus:latest
-    restart: on-failure
-    build:
-      context: .
-      dockerfile: colossus.Dockerfile
-    depends_on:
-      - graphql-server
-    volumes:
-      - /data
-      - /keystore
-      - ${ACCOUNT_KEYFILE}:/joystream/storage-node-v2/keyfile
-    ports:
-      - '127.0.0.1:${COLOSSUS_PORT_2}:${COLOSSUS_PORT}'
-    env_file:
-      # relative to working directory where docker-compose was run from
-      - .env
-    environment:
-      WORKER_ID: ${WORKER_ID}
-      ACCOUNT_KEYFILE: /joystream/storage-node-v2/keyfile
-
-  distributor-node-2:
-    image: joystream/distributor-node
-    restart: on-failure
-    build:
-      context: .
-      dockerfile: distributor-node.Dockerfile
-    depends_on:
-      - graphql-server
-    volumes:
-      - /data
-      - /cache
-    ports:
-      - 127.0.0.1:${DISTRIBUTOR_PORT_2}:3334
-    environment:
-      JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE: http://${QUERY_NODE_HOST}/graphql
-      JOYSTREAM_DISTRIBUTOR__KEYS: ${KEYS}
-      JOYSTREAM_DISTRIBUTOR__BUCKETS: ${BUCKETS}
-      JOYSTREAM_DISTRIBUTOR__WORKER_ID: ${WORKER_ID}
-    command: ['start']

+ 200 - 94
docker-compose.yml

@@ -4,7 +4,8 @@
 version: '3.4'
 services:
   joystream-node:
-    image: joystream/node:latest
+    image: joystream/node:$JOYSTREAM_NODE_TAG
+    restart: unless-stopped
     build:
       # context is relative to the compose file
       context: .
@@ -12,75 +13,146 @@ services:
       dockerfile: joystream-node.Dockerfile
     container_name: joystream-node
     volumes:
-      - /data
+      - chain-data:/data
     command: --dev --alice --validator --unsafe-ws-external --unsafe-rpc-external --rpc-methods Unsafe --rpc-cors=all --log runtime --base-path /data
     ports:
       - '127.0.0.1:9944:9944'
       - '127.0.0.1:9933:9933'
 
-  colossus:
-    image: joystream/colossus:latest
+  colossus-1:
+    image: node:14
+    container_name: colossus-1
     restart: on-failure
-    build:
-      context: .
-      dockerfile: colossus.Dockerfile
-    depends_on:
-      - graphql-server
     volumes:
-      - /data
-      - /keystore
+      - colossus-1-data:/data
+      - colossus-1-keystore:/keystore
+      - type: bind
+        source: .
+        target: /joystream
+    working_dir: /joystream/storage-node-v2
     ports:
-      - '127.0.0.1:3333:${COLOSSUS_PORT}'
+      - '127.0.0.1:3333:3333'
     env_file:
       # relative to working directory where docker-compose was run from
       - .env
     environment:
-      - COLOSSUS_PORT=3333
-      - QUERY_NODE_HOST=graphql-server-mnt:${GRAPHQL_SERVER_PORT}
-      - WORKER_ID=0
-      - ACCOUNT_URI=//Alice
-      # enable ElasticSearch server
-      # - ELASTIC_SEARCH_HOST=host.docker.internal:9200
+      # ACCOUNT_URI overrides command line arg --accountUri
+      - ACCOUNT_URI=${COLOSSUS_1_ACCOUNT_URI}
+    command: [
+      'yarn', 'storage-node', 'server', '--worker=${COLOSSUS_1_WORKER_ID}', '--port=3333', '--uploads=/data',
+      '--sync', '--syncInterval=1',
+      '--queryNodeHost=${COLOSSUS_QUERY_NODE_HOST}',
+      '--apiUrl=${JOYSTREAM_NODE_WS}'
+    ]
 
-  distributor-node:
-    image: joystream/distributor-node
+  distributor-1:
+    image: node:14
+    container_name: distributor-1
     restart: on-failure
-    build:
-      context: .
-      dockerfile: distributor-node.Dockerfile
-    depends_on:
-      - graphql-server
     volumes:
-      - /data
-      - /cache
+      - distributor-1-data:/data
+      - distributor-1-cache:/cache
+      - distributor-1-logs:/logs
+      - type: bind
+        source: .
+        target: /joystream
+    # let the working_dir be the distributor node to pickup the config.yml file
+    working_dir: /joystream/distributor-node
     ports:
       - 127.0.0.1:3334:3334
+    env_file:
+      # relative to working directory where docker-compose was run from
+      - .env
+    # Node configuration can be overriden via env, for exampe:
+    environment:
+      JOYSTREAM_DISTRIBUTOR__ID: distributor-1
+      JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE: ${DISTRIBUTOR_QUERY_NODE_URL}
+      JOYSTREAM_DISTRIBUTOR__KEYS: "[{\"suri\":\"${DISTRIBUTOR_1_ACCOUNT_URI}\"}]"
+      JOYSTREAM_DISTRIBUTOR__WORKER_ID: ${DISTRIBUTOR_1_WORKER_ID}
+      JOYSTREAM_DISTRIBUTOR__PORT: 3334
+      JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS: ${JOYSTREAM_NODE_WS}
+      JOYSTREAM_DISTRIBUTOR__DIRECTORIES__ASSETS: /data
+      JOYSTREAM_DISTRIBUTOR__DIRECTORIES__CACHE_STATE: /cache
+      JOYSTREAM_DISTRIBUTOR__DIRECTORIES__LOGS: /logs
+    #   JOYSTREAM_DISTRIBUTOR__ENDPOINTS__ELASTIC_SEARCH: es-endpoint
+    #   JOYSTREAM_DISTRIBUTOR__LOG__CONSOLE: "off"
+    #   JOYSTREAM_DISTRIBUTOR__LOG__FILE: "off"
+    #   JOYSTREAM_DISTRIBUTOR__LOG__ELASTIC: "off"
+    #   JOYSTREAM_DISTRIBUTOR__LIMITS__STORAGE: 50G
+    #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[1,2]"
+    command: ['yarn', 'joystream-distributor', 'start']
+
+  colossus-2:
+    image: node:14
+    container_name: colossus-2
+    restart: on-failure
+    volumes:
+      - colossus-2-data:/data
+      - colossus-2-keystore:/keystore
+      - type: bind
+        source: .
+        target: /joystream
+    working_dir: /joystream/storage-node-v2
+    ports:
+      - '127.0.0.1:3335:3333'
+    env_file:
+      # relative to working directory where docker-compose was run from
+      - .env
+    environment:
+      # ACCOUNT_URI overrides command line arg --accountUri
+      - ACCOUNT_URI=${COLOSSUS_2_ACCOUNT_URI}
+    command: [
+      'yarn', 'storage-node', 'server', '--worker=${COLOSSUS_2_WORKER_ID}', '--port=3333', '--uploads=/data',
+      '--sync', '--syncInterval=1',
+      '--queryNodeHost=${COLOSSUS_QUERY_NODE_HOST}',
+      '--apiUrl=${JOYSTREAM_NODE_WS}'
+    ]
+
+  distributor-2:
+    image: node:14
+    container_name: distributor-2
+    restart: on-failure
+    volumes:
+      - distributor-2-data:/data
+      - distributor-2-cache:/cache
+      - distributor-2-logs:/logs
+      - type: bind
+        source: .
+        target: /joystream
+    # let the working_dir be the distributor node to pickup the config.yml file
+    working_dir: /joystream/distributor-node
+    ports:
+      - 127.0.0.1:3336:3334
+    env_file:
+      # relative to working directory where docker-compose was run from
+      - .env
     # Node configuration can be overriden via env, for exampe:
     environment:
-      # JOYSTREAM_DISTRIBUTOR__ID: node-id
-      JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE: http://${GRAPHQL_SERVER_HOST}:${GRAPHQL_SERVER_PORT}/graphql
-    #   JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS: sn-endpoint
+      JOYSTREAM_DISTRIBUTOR__ID: distributor-2
+      JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE: ${DISTRIBUTOR_QUERY_NODE_URL}
+      JOYSTREAM_DISTRIBUTOR__KEYS: "[{\"suri\":\"${DISTRIBUTOR_2_ACCOUNT_URI}\"}]"
+      JOYSTREAM_DISTRIBUTOR__WORKER_ID: ${DISTRIBUTOR_2_WORKER_ID}
+      JOYSTREAM_DISTRIBUTOR__PORT: 3334
+      JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS: ${JOYSTREAM_NODE_WS}
+      JOYSTREAM_DISTRIBUTOR__DIRECTORIES__ASSETS: /data
+      JOYSTREAM_DISTRIBUTOR__DIRECTORIES__CACHE_STATE: /cache
+      JOYSTREAM_DISTRIBUTOR__DIRECTORIES__LOGS: /logs
     #   JOYSTREAM_DISTRIBUTOR__ENDPOINTS__ELASTIC_SEARCH: es-endpoint
-    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__ASSETS: assets-dir
-    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__CACHE_STATE: cache-state-dir
-    #   JOYSTREAM_DISTRIBUTOR__DIRECTORIES__LOGS: logs-dir
     #   JOYSTREAM_DISTRIBUTOR__LOG__CONSOLE: "off"
     #   JOYSTREAM_DISTRIBUTOR__LOG__FILE: "off"
     #   JOYSTREAM_DISTRIBUTOR__LOG__ELASTIC: "off"
     #   JOYSTREAM_DISTRIBUTOR__LIMITS__STORAGE: 50G
-    #   JOYSTREAM_DISTRIBUTOR__PORT: 1234
-    #   JOYSTREAM_DISTRIBUTOR__KEYS="[{\"suri\":\"//Bob\"}]"
     #   JOYSTREAM_DISTRIBUTOR__BUCKETS: "[1,2]"
-    #   JOYSTREAM_DISTRIBUTOR__WORKER_ID: 0
-    command: ["start"]
+    command: ['yarn', 'joystream-distributor', 'start']
 
   db:
     image: postgres:12
-    restart: always
+    container_name: db
+    restart: unless-stopped
     ports:
-      - '127.0.0.1:${DB_PORT}:5432'
+      - '127.0.0.1:${DB_PORT}:${DB_PORT}'
     volumes:
-      - /var/lib/postgresql/data
+      - query-node-data:/var/lib/postgresql/data
     env_file:
       # relative to working directory where docker-compose was run from
       - .env
@@ -88,32 +160,20 @@ services:
       POSTGRES_USER: ${DB_USER}
       POSTGRES_PASSWORD: ${DB_PASS}
       POSTGRES_DB: ${INDEXER_DB_NAME}
+      PGPORT: ${DB_PORT}
 
   graphql-server:
-    image: joystream/apps
-    restart: unless-stopped
-    build:
-      context: .
-      dockerfile: apps.Dockerfile
-      args:
-        - WS_PROVIDER_ENDPOINT_URI=${WS_PROVIDER_ENDPOINT_URI}
-    env_file:
-      # relative to working directory where docker-compose was run from
-      - .env
-    ports:
-      - '127.0.0.1:8081:${GRAPHQL_SERVER_PORT}'
-    depends_on:
-      - db
-    command: ['workspace', 'query-node-root', 'query-node:start:prod']
-
-  graphql-server-mnt:
     image: node:14
+    container_name: graphql-server
     restart: unless-stopped
     env_file:
       # relative to working directory where docker-compose was run from
       - .env
+    environment:
+      - DB_HOST=db
+      - WARTHOG_APP_PORT=${GRAPHQL_SERVER_PORT}
     ports:
-      - '127.0.0.1:8081:${GRAPHQL_SERVER_PORT}'
+      - '127.0.0.1:${GRAPHQL_SERVER_PORT}:${GRAPHQL_SERVER_PORT}'
     depends_on:
       - db
     volumes:
@@ -124,39 +184,18 @@ services:
     command: ['yarn', 'workspace', 'query-node-root', 'query-node:start:prod']
 
   processor:
-    image: joystream/apps
-    restart: unless-stopped
-    build:
-      context: .
-      dockerfile: apps.Dockerfile
-      args:
-        - WS_PROVIDER_ENDPOINT_URI=${WS_PROVIDER_ENDPOINT_URI}
-    env_file:
-      # relative to working directory where docker-compose was run from
-      - .env
-    environment:
-      - INDEXER_ENDPOINT_URL=http://hydra-indexer-gateway:${WARTHOG_APP_PORT}/graphql
-      - TYPEORM_HOST=${DB_HOST}
-      - TYPEORM_DATABASE=${DB_NAME}
-      - WS_PROVIDER_ENDPOINT_URI=${WS_PROVIDER_ENDPOINT_URI}
-    volumes:
-      - ./types/augment/all/defs.json:/joystream/query-node/mappings/lib/generated/types/typedefs.json
-    depends_on:
-      - hydra-indexer-gateway
-    command: ['workspace', 'query-node-root', 'processor:start']
-
-  processor-mnt:
     image: node:14
+    container_name: processor
     restart: unless-stopped
     env_file:
       # relative to working directory where docker-compose was run from
       - .env
     environment:
-      - INDEXER_ENDPOINT_URL=http://hydra-indexer-gateway:${WARTHOG_APP_PORT}/graphql
-      - TYPEORM_HOST=${DB_HOST}
+      - INDEXER_ENDPOINT_URL=${PROCESSOR_INDEXER_GATEWAY}
+      - TYPEORM_HOST=db
       - TYPEORM_DATABASE=${DB_NAME}
     depends_on:
-      - hydra-indexer-gateway
+      - db
     volumes:
       - type: bind
         source: .
@@ -166,6 +205,7 @@ services:
 
   indexer:
     image: joystream/hydra-indexer:3.0.0
+    container_name: indexer
     restart: unless-stopped
     env_file:
       # relative to working directory where docker-compose was run from
@@ -175,6 +215,8 @@ services:
       - INDEXER_WORKERS=5
       - REDIS_URI=redis://redis:6379/0
       - TYPES_JSON=types.json
+      - WS_PROVIDER_ENDPOINT_URI=${JOYSTREAM_NODE_WS}
+      - DB_HOST=db
     depends_on:
       - db
       - redis
@@ -185,6 +227,7 @@ services:
 
   hydra-indexer-gateway:
     image: joystream/hydra-indexer-gateway:3.0.0
+    container_name: hydra-indexer-gateway
     restart: unless-stopped
     env_file:
       # relative to working directory where docker-compose was run from
@@ -196,26 +239,89 @@ services:
       - WARTHOG_STARTER_DB_PORT=${DB_PORT}
       - WARTHOG_STARTER_DB_USERNAME=${DB_USER}
       - WARTHOG_STARTER_REDIS_URI=redis://redis:6379/0
-      - WARTHOG_APP_PORT=${WARTHOG_APP_PORT}
-      - PORT=${WARTHOG_APP_PORT}
+      - WARTHOG_APP_PORT=${HYDRA_INDEXER_GATEWAY_PORT}
+      - PORT=${HYDRA_INDEXER_GATEWAY_PORT}
       - DEBUG=*
     ports:
-      - '127.0.0.1:4000:${WARTHOG_APP_PORT}'
+      - '127.0.0.1:${HYDRA_INDEXER_GATEWAY_PORT}:${HYDRA_INDEXER_GATEWAY_PORT}'
     depends_on:
-      - redis
       - db
-      - indexer
+      - redis
 
   redis:
     image: redis:6.0-alpine
-    restart: always
+    container_name: redis
+    restart: unless-stopped
     ports:
       - '127.0.0.1:6379:6379'
 
   pioneer:
-    image: joystream/pioneer
-    build:
-      context: .
-      dockerfile: pioneer.Dockerfile
+    image: nginx
+    container_name: pioneer
+    restart: unless-stopped
+    volumes:
+      - ./pioneer/packages/apps/build:/usr/share/nginx/html
+    ports:
+      - "127.0.0.1:3000:80"
+    environment:
+      - NGINX_PORT=80
+
+  faucet:
+    image: joystream/faucet:giza
+    restart: on-failure
+    container_name: faucet
+    environment:
+      - SCREENING_AUTHORITY_SEED=//Alice
+      - PORT=3002
+      - PROVIDER=ws://joystream-node:9944
+      - ENDOWMENT=0
+    ports:
+      - "127.0.0.1:3002:3002"
+
+  orion:
+    container_name: orion
+    image: joystream/orion
+    environment:
+      - ORION_PORT=6116
+      - ORION_MONGO_HOSTNAME=mongo
+      - ORION_FEATURED_CONTENT_SECRET=password123
     ports:
-      - '127.0.0.1:3000:80'
+      - "6116:6116"
+    depends_on:
+      - mongo
+    restart: always
+
+  mongo:
+    restart: always
+    container_name: mongo
+    image: library/mongo:4.4
+    volumes:
+      - orion-mongo-data:/data/db
+
+volumes:
+  chain-data:
+    driver: local
+  query-node-data:
+    driver: local
+  colossus-1-data:
+    driver: local
+  colossus-1-keystore:
+    driver: local
+  colossus-2-data:
+    driver: local
+  colossus-2-keystore:
+    driver: local
+  distributor-1-logs:
+    driver: local
+  distributor-1-cache:
+    driver: local
+  distributor-1-data:
+    driver: local
+  distributor-2-logs:
+    driver: local
+  distributor-2-cache:
+    driver: local
+  distributor-2-data:
+    driver: local
+  orion-mongo-data:
+    driver: local

+ 0 - 37
multi-storage.sh

@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# Script to run a second storage node and distributor node on local
-# Make sure to run yarn start prior to running this script
-
-set -a
-. .env
-set +a
-
-export COLOSSUS_PORT_2=3335
-export DISTRIBUTOR_PORT_2=3336
-export KEYS=[//Alice]
-export BUCKETS='all'
-export WORKER_ID=2
-export ACCOUNT_KEYFILE="./types/augment/all/defs.json"
-
-function down()
-{
-    # Stop containers and clear volumes
-    docker-compose -f docker-compose.yml -f docker-compose.multi-storage.yml rm -vsf distributor-node-2
-    docker-compose -f docker-compose.yml -f docker-compose.multi-storage.yml rm -vsf colossus-2
-}
-
-down
-
-trap down EXIT
-
-docker-compose -f docker-compose.yml -f docker-compose.multi-storage.yml run -d --name colossus-2 colossus-2
-
-docker-compose -f docker-compose.yml -f docker-compose.multi-storage.yml run -d --name distributor-node-2 distributor-node-2
-
-echo "use Ctrl+C to shutdown the development network."
-
-while true; do 
-  read
-done

+ 1 - 2
package.json

@@ -5,8 +5,7 @@
   "license": "GPL-3.0-only",
   "scripts": {
     "build:node:docker": "./build-node-docker.sh",
-    "build:apps:docker": "./build-apps-docker.sh",
-    "build:packages": "./build-npm-packages.sh",
+    "build:packages": "./build-packages.sh",
     "setup": "./setup.sh",
     "start": "./start.sh",
     "cargo-checks": "devops/git-hooks/pre-commit && devops/git-hooks/pre-push",

+ 0 - 13
query-node/bootstrap.sh

@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
-cd $SCRIPT_PATH
-
-# Load and export variables from root .env file into shell environment
-set -a
-. ../.env
-. ./generated/graphql-server/.env
-set +a
-
-BOOTSTRAP_DATA_FOLDER=`pwd`/mappings/bootstrap/data node ./mappings/lib/mappings/bootstrap/index.js

+ 0 - 17
query-node/kill-img.sh

@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
-cd $SCRIPT_PATH
-
-set -a
-. ../.env
-set +a
-
-# Only remove query-node related services
-docker-compose rm -vsf processor
-docker-compose rm -vsf graphql-server
-docker-compose rm -vsf indexer
-docker-compose rm -vsf hydra-indexer-gateway
-docker-compose rm -vsf redis
-docker-compose rm -vsf db

+ 6 - 10
query-node/kill.sh

@@ -4,14 +4,10 @@ set -e
 SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
 cd $SCRIPT_PATH
 
-set -a
-. ../.env
-set +a
-
 # Only remove query-node related services
-docker-compose rm -vsf processor-mnt
-docker-compose rm -vsf graphql-server-mnt
-docker-compose rm -vsf indexer
-docker-compose rm -vsf hydra-indexer-gateway
-docker-compose rm -vsf redis
-docker-compose rm -vsf db
+docker-compose -f ../docker-compose.yml rm -vsf processor
+docker-compose -f ../docker-compose.yml rm -vsf graphql-server
+docker-compose -f ../docker-compose.yml rm -vsf indexer
+docker-compose -f ../docker-compose.yml rm -vsf hydra-indexer-gateway
+docker-compose -f ../docker-compose.yml rm -vsf redis
+docker-compose -f ../docker-compose.yml rm -vsf db

+ 2 - 4
query-node/package.json

@@ -8,7 +8,7 @@
     "lint": "yarn workspace query-node-mappings lint",
     "clean": "rm -rf ./generated",
     "clean:query-node": "rm -rf ./generated/graphql-server",
-    "processor:start": "DEBUG=${DEBUG} hydra-processor run -e ../.env",
+    "processor:start": "DEBUG=${DEBUG} hydra-processor run -e generated/graphql-server/.env",
     "query-node:build": "yarn workspace query-node build",
     "query-node:start:dev": "yarn workspace query-node start:dev",
     "query-node:start:prod": "yarn workspace query-node start:prod",
@@ -31,9 +31,7 @@
     "typegen": "rm -rf ./mappings/generated && yarn hydra-typegen typegen manifest.yml --debug",
     "mappings:build": "yarn workspace query-node-mappings build",
     "start:dev": "./start.sh",
-    "start": "./start-img.sh",
-    "kill:dev": "./kill.sh",
-    "kill": "./kill-img.sh"
+    "kill:dev": "./kill.sh"
   },
   "author": "",
   "license": "ISC",

+ 0 - 35
query-node/run-tests.sh

@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
-cd $SCRIPT_PATH
-
-set -a
-. ../.env
-set +a
-
-function cleanup() {
-    # Show tail end of logs for the processor and indexer containers to
-    # see any possible errors
-    (echo "## Processor Logs ##" && docker logs joystream_processor_1 --tail 50) || :
-    (echo "## Indexer Logs ##" && docker logs joystream_indexer_1 --tail 50) || :
-    (echo "## Indexer API Gateway Logs ##" && docker logs joystream_hydra-indexer-gateway_1 --tail 50) || :
-    (echo "## Graphql Server Logs ##" && docker logs joystream_graphql-server_1 --tail 50) || :
-    docker-compose down -v
-}
-
-trap cleanup EXIT
-
-# Bring up db
-docker-compose up -d db
-
-# Migrate the databases
-yarn workspace query-node-root db:prepare
-yarn workspace query-node-root db:migrate
-
-docker-compose up -d graphql-server
-
-# Starting up processor will bring up all services it depends on
-docker-compose up -d processor
-
-time yarn workspace network-tests run-test-scenario content-directory

+ 0 - 23
query-node/start-img.sh

@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
-cd $SCRIPT_PATH
-
-set -a
-. ../.env
-set +a
-
-# Start the joystream-node first to allow fetching Olympia metadata during build (typegen)
-docker-compose up -d joystream-node
-
-# Bring up db
-docker-compose up -d db
-
-# Setup the db
-docker run --rm --env-file ../.env --network joystream_default joystream/apps workspace query-node-root db:prepare
-docker run --rm --env-file ../.env --network joystream_default joystream/apps workspace query-node-root db:migrate
-
-# Start processor and graphql server
-docker-compose up -d processor
-docker-compose up -d graphql-server

+ 9 - 19
query-node/start.sh

@@ -4,33 +4,23 @@ set -e
 SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
 cd $SCRIPT_PATH
 
-set -a
-. ../.env
-set +a
-
-# Start the joystream-node first to allow fetching Olympia metadata during build (typegen)
-docker-compose up -d joystream-node
-
 # Only run codegen if no generated files found
 [ ! -d "generated/" ] && yarn build
 
 # Bring up db
-docker-compose up -d db
-
-# Override DB_HOST for db setup
-export DB_HOST=localhost
+docker-compose -f ../docker-compose.yml up -d db
 
 # Make sure we use dev config for db migrations (prevents "Cannot create database..." and some other errors)
-yarn workspace query-node config:dev
-
+docker-compose -f ../docker-compose.yml run --rm --entrypoint sh graphql-server -c "yarn workspace query-node config:dev"
 # Migrate the databases
-yarn workspace query-node-root db:prepare
-yarn workspace query-node-root db:migrate
+docker-compose -f ../docker-compose.yml run --rm --entrypoint sh graphql-server -c "yarn workspace query-node-root db:prepare"
+docker-compose -f ../docker-compose.yml run --rm --entrypoint sh graphql-server -c "yarn workspace query-node-root db:migrate"
 
-# Set DB_HOST back to docker-service one
-export DB_HOST=db
+# Start indexer and gateway
+docker-compose -f ../docker-compose.yml up -d indexer
+docker-compose -f ../docker-compose.yml up -d hydra-indexer-gateway
 
 # Start processor and graphql server
-docker-compose up -d processor-mnt
-docker-compose up -d graphql-server-mnt
+docker-compose -f ../docker-compose.yml up -d processor
+docker-compose -f ../docker-compose.yml up -d graphql-server
 

+ 4 - 0
setup.sh

@@ -34,6 +34,10 @@ rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
 
 rustup component add rustfmt clippy
 
+# Install substrate keychain tool - install doesn't seem to work lately.
+# cargo install --force subkey --git https://github.com/paritytech/substrate --version 2.0.1 --locked
+# You can use docker instead https://github.com/paritytech/substrate/tree/master/bin/utils/subkey#run-in-a-container
+
 # Volta nodejs, npm, yarn tools manager
 curl https://get.volta.sh | bash
 

+ 35 - 50
start.sh

@@ -1,67 +1,52 @@
 #!/usr/bin/env bash
 set -e
 
-# Run a complete joystream development network on your machine using docker.
-# Make sure to run build-docker-images.sh prior to running this script to use
-# the local build.
+# Run a complete joystream development network on your machine using docker
 
-set -a
-. .env
-set +a
+INIT_CHAIN_SCENARIO=${INIT_CHAIN_SCENARIO:=setup-new-chain}
 
-# Clean start!
-docker-compose down -v
+if [ "${PERSIST}" == true ]
+then
+  echo "Services startup up.."
+else
+  # Clean start!
+  docker-compose down -v
 
-function down()
-{
-    # Stop containers and clear volumes
-    docker-compose down -v
-}
+  function down()
+  {
+      # Stop containers and clear volumes
+      docker-compose down -v
+  }
 
-trap down EXIT
+  trap down EXIT
+fi
 
-# Run a local development chain
+## Run a local development chain
 docker-compose up -d joystream-node
 
-## Query Node Infrastructure
-# Initialize a new database for the query node infrastructure
-docker-compose up -d db
-
-# Override DB_HOST for db setup
-export DB_HOST=localhost
-
-# Make sure we use dev config for db migrations (prevents "Cannot create database..." and some other errors)
-yarn workspace query-node config:dev
+## Init the chain with some state
+export SKIP_MOCK_CONTENT=true
+./tests/network-tests/run-test-scenario.sh ${INIT_CHAIN_SCENARIO}
 
-# Migrate the databases
-yarn workspace query-node-root db:prepare
-yarn workspace query-node-root db:migrate
-
-# Set DB_HOST back to docker-service one
-export DB_HOST=db
+## Set sudo as the membership screening authority
+yarn workspace api-scripts set-sudo-as-screening-auth
 
-# Start processor and graphql server
-docker-compose up -d processor
-docker-compose up -d graphql-server
+## Query Node Infrastructure
+./query-node/start.sh
 
 ## Storage Infrastructure
-docker-compose run -d --name colossus --entrypoint sh colossus -c "yarn storage-node dev:init --apiUrl ${WS_PROVIDER_ENDPOINT_URI} && \
-          yarn storage-node server --queryNodeHost ${QUERY_NODE_HOST} --port ${COLOSSUS_PORT} \
-          --uploads /data --worker ${WORKER_ID} --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=1 \
-          --keyFile=${ACCOUNT_KEYFILE} --elasticSearchHost=${ELASTIC_SEARCH_HOST}"
-
-docker-compose up -d distributor-node
-
-# Create a new content directory lead
-yarn workspace api-scripts initialize-content-lead
-
-# Set sudo as the membership screening authority
-yarn workspace api-scripts set-sudo-as-screening-auth
+docker-compose up -d colossus-1
+docker-compose up -d distributor-1
 
+## Pioneer UI
 docker-compose up -d pioneer
 
-echo "use Ctrl+C to shutdown the development network."
-
-while true; do 
-  read
-done
+if [ "${PERSIST}" == true ]
+then
+  echo "All services started in the background"
+else
+  echo "use Ctrl+C to shutdown the development network."
+  while true; do
+    read
+  done
+fi

+ 1 - 1
storage-node-v2/package.json

@@ -32,7 +32,7 @@
     "ajv": "^7",
     "await-lock": "^2.1.0",
     "base64url": "^3.0.1",
-    "blake3": "^2.1.4",
+    "blake3-wasm": "^2.1.5",
     "cross-fetch": "^3.1.4",
     "express": "4.17.1",
     "express-openapi-validator": "4.12.4",

+ 1 - 1
storage-node-v2/src/services/helpers/hashing.ts

@@ -1,6 +1,6 @@
 import * as multihash from 'multihashes'
 import fs from 'fs'
-import { createHash } from 'blake3'
+import { createHash } from 'blake3-wasm'
 
 /**
  * Reads the file and calculates its hash. It uses the blake3 hashing algorithm

+ 5 - 2
tests/network-tests/.env

@@ -54,5 +54,8 @@ SLASH_AMOUNT = 2
 STAKE_DECREMENT = 3
 # Mint capacity increment value for working gorup mint capacity test
 MINT_CAPACITY_INCREMENT = 1000
-# Storage node address to download content from
-STORAGE_NODE_URL = http://localhost:3001/asset/v0
+# Mini-secret or mnemonic used in SURI for deterministic key derivation
+SURI_MINI_SECRET = ""
+# The starting key id to use when running a scenario. This will allow scenario
+# to be able to use all accounts generated in a prior scenario run against the same chain
+START_KEY_ID = 0

+ 2 - 0
tests/network-tests/.gitignore

@@ -0,0 +1,2 @@
+output.json
+data/

+ 1 - 0
tests/network-tests/.prettierignore

@@ -1,2 +1,3 @@
 .nyc_output/
 .tmp/
+data/

+ 0 - 64
tests/network-tests/run-local-node-test.sh

@@ -1,64 +0,0 @@
-# Location that will be mounted as the /data volume in containers
-# This is how we access the initial members and balances files from
-# the containers and where generated chainspec files will be located.
-DATA_PATH="test-data"
-
-# Initial account balance for Alice
-# Alice is the source of funds for all new accounts that are created in the tests.
-ALICE_INITIAL_BALANCE=100000000
-
-rm -Rf ${DATA_PATH}
-mkdir -p ${DATA_PATH}
-
-echo "{
-  \"balances\":[
-    [\"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY\", ${ALICE_INITIAL_BALANCE}]
-  ]
-}" > ${DATA_PATH}/initial-balances.json
-
-# Make Alice a member
-echo '
-  [{
-    "member_id": 0,
-    "root_account": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "controller_account": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "handle":"alice_with_a_long_handle",
-    "avatar_uri":"https://alice.com/avatar.png",
-    "about":"Alice",
-    "name": "Alice",
-    "registered_at_time": 0
-  },
-  {
-    "member_id": 1,
-    "root_account": "5FUeDYFzvvizNhhHyidsuchG7jnToKj7zfimbWBpWKzT9Fqe",
-    "controller_account": "5FUeDYFzvvizNhhHyidsuchG7jnToKj7zfimbWBpWKzT9Fqe",
-    "handle":"bob_with_a_long_handle",
-    "avatar_uri":"https://bob.com/avatar.png",
-    "about":"Bob",
-    "name": "Bob",
-    "registered_at_time": 0
-  }
-]
-' > ${DATA_PATH}/initial-members.json
-
-# Create a chain spec file
-./target/release/chain-spec-builder generate -a 2 -e 2 -k ${DATA_PATH} --chain-spec-path ${DATA_PATH}/chain-spec.json --initial-balances-path ${DATA_PATH}/initial-balances.json --initial-members-path ${DATA_PATH}/initial-members.json --deployment live > ${DATA_PATH}/seeds
-
-jq -c '.id = "js_babylon_test"' ${DATA_PATH}/chain-spec.json > tmp.$$.json && mv tmp.$$.json ${DATA_PATH}/chain-spec.json
-jq -c '.protocolId = "js/babylon/test"' ${DATA_PATH}/chain-spec.json > tmp.$$.json && mv tmp.$$.json ${DATA_PATH}/chain-spec.json
-
-timeout 3s ./target/release/joystream-node --base-path ${DATA_PATH}/alice3  --validator --chain ${DATA_PATH}/chain-spec.json
-
-
-timeout 3s ./target/release/joystream-node --base-path ${DATA_PATH}/bob4  --validator --port 30334 --ws-port 9945 --chain ${DATA_PATH}/chain-spec.json
-
-
-mv  ${DATA_PATH}/auth-0/* ${DATA_PATH}/alice3/chains/js_babylon_test/keystore
-mv  ${DATA_PATH}/auth-1/* ${DATA_PATH}/bob4/chains/js_babylon_test/keystore
-
-rm -Rf ${DATA_PATH}/alice3/chains/js_babylon_test/db
-rm -Rf ${DATA_PATH}/bob4/chains/js_babylon_test/db
-
-# RUN
-#./target/release/joystream-node --base-path test-data/alice3  --validator --chain test-data/chain-spec.json --pruning=archive --log runtime,txpool,transaction-pool
-#./target/release/joystream-node --base-path test-data/bob4 --validator --port 30334 --ws-port 9945 --chain test-data/chain-spec.json --pruning=archive --log runtime,txpool,transaction-pool

+ 60 - 92
tests/network-tests/run-migration-tests.sh

@@ -4,98 +4,70 @@ set -e
 SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
 cd $SCRIPT_PATH
 
-# Location that will be mounted as the /data volume in containers
-# This is how we access the initial members and balances files from
-# the containers and where generated chainspec files will be located.
-DATA_PATH=${DATA_PATH:=~/tmp}
-
-# Initial account balance for Alice
-# Alice is the source of funds for all new accounts that are created in the tests.
-ALICE_INITIAL_BALANCE=${ALICE_INITIAL_BALANCE:=100000000}
-
-# The docker image tag to use for joystream/node as the starting chain
-# that will be upgraded to the latest runtime.
-RUNTIME=${RUNTIME:=latest}
+# Location to store runtime WASM for runtime upgrade
+DATA_PATH=${DATA_PATH:=$(pwd)/data}
+
+# The joystream/node docker image tag to start chain
+export RUNTIME=${RUNTIME:=latest}
+
+# The joystream/node docker image tag which contains WASM runtime to upgrade chain with
 TARGET_RUNTIME=${TARGET_RUNTIME:=latest}
 
-AUTO_CONFIRM=true
-
-mkdir -p ${DATA_PATH}
-
-echo "{
-  \"balances\":[
-    [\"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY\", ${ALICE_INITIAL_BALANCE}]
-  ]
-}" > ${DATA_PATH}/initial-balances.json
-
-# Make Alice a member
-echo '
-  [{
-    "member_id":0,
-    "root_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "controller_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "handle":"alice",
-    "avatar_uri":"https://alice.com/avatar.png",
-    "about":"Alice",
-    "registered_at_time":0
-  }]
-' > ${DATA_PATH}/initial-members.json
-
-# Create a chain spec file
-docker run --rm -v ${DATA_PATH}:/data --entrypoint ./chain-spec-builder joystream/node:${RUNTIME} \
-  new \
-  --authority-seeds Alice \
-  --sudo-account  5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY \
-  --deployment dev \
-  --chain-spec-path /data/chain-spec.json \
-  --initial-balances-path /data/initial-balances.json \
-  --initial-members-path /data/initial-members.json
-
-# Convert the chain spec file to a raw chainspec file
-docker run --rm -v ${DATA_PATH}:/data joystream/node:${RUNTIME} build-spec \
-  --raw --disable-default-bootnode \
-  --chain /data/chain-spec.json > ~/tmp/chain-spec-raw.json
-
-NETWORK_ARG=
-if [ "$ATTACH_TO_NETWORK" != "" ]; then
-  NETWORK_ARG="--network ${ATTACH_TO_NETWORK}"
-fi
+# Prevent joystream cli from prompting
+export AUTO_CONFIRM=true
 
-# Start a chain with generated chain spec
-# Add "-l ws=trace,ws::handler=info" to get websocket trace logs
-CONTAINER_ID=`docker run -d -v ${DATA_PATH}:/data -p 9944:9944 ${NETWORK_ARG} --name joystream-node joystream/node:${RUNTIME} \
-  --validator --alice --unsafe-ws-external --rpc-cors=all -l runtime \
-  --chain /data/chain-spec-raw.json`
+# Create chainspec with Alice (sudo) as member so we can use her in joystream-cli
+CONTAINER_ID=`MAKE_SUDO_MEMBER=true ./run-test-node-docker.sh`
 
 function cleanup() {
     docker logs ${CONTAINER_ID} --tail 15
-    docker stop ${CONTAINER_ID}
-    docker rm ${CONTAINER_ID}
-    rm tests/network-tests/assets/TestChannel__rejectedContent.json
-    rm tests/network-tests/assets/TestVideo__rejectedContent.json
-    
+    docker-compose -f ../../docker-compose.yml down -v
+    rm ./assets/TestChannel__rejectedContent.json || true
+    rm ./assets/TestVideo__rejectedContent.json || true
 }
 
 function pre_migration_hook() {
-sleep 5 # needed otherwise docker image won't be ready yet
-yarn joystream-cli account:choose --address 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY # Alice
-echo "creating 1 channel"
-yarn joystream-cli content:createChannel --input=./assets/TestChannel.json --context=Member || true
-echo "adding 1 video to the above channel"
-yarn joystream-cli content:createVideo -c 1 --input=./assets/TestVideo.json || true
+  sleep 10 # needed otherwise docker image won't be ready yet
+  # Display runtime version
+  yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
+
+  # assume older version of joystream-cli is installed globally. So we run these commands to
+  # work against older runtime. Assert it is version  `@joystream/cli/0.5.1` ?
+  joystream-cli --version
+
+  joystream-cli account:choose --address 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY # Alice
+  echo "creating 1 channel"
+  joystream-cli content:createChannel --input=./assets/TestChannel.json --context=Member || true
+  echo "adding 1 video to the above channel"
+  joystream-cli content:createVideo -c 1 --input=./assets/TestVideo.json || true
+
+  # Confirm channel and video created successfully
+  joystream-cli content:videos 1
+  joystream-cli content:channel 1
 }
 
 function post_migration_hook() {
-echo "*** verify existence of the 5 new groups ***"
-yarn joystream-cli working-groups:overview --group=operationsAlpha
-yarn joystream-cli working-groups:overview --group=operationsBeta
-yarn joystream-cli working-groups:overview --group=operationsGamma
-yarn joystream-cli working-groups:overview --group=curators
-yarn joystream-cli working-groups:overview --group=distributors
-
-echo "*** verify previously created channel and video are cleared ***"
-yarn joystream-cli content:videos 1
-yarn joystream-cli content:channel 1
+  echo "*** verify existence of the 5 new groups ***"
+  yarn joystream-cli working-groups:overview --group=operationsAlpha
+  yarn joystream-cli working-groups:overview --group=operationsBeta
+  yarn joystream-cli working-groups:overview --group=operationsGamma
+  yarn joystream-cli working-groups:overview --group=curators
+  yarn joystream-cli working-groups:overview --group=distributors
+
+  echo "*** verify previously created channel and video are cleared ***"
+  # Allow a few blocks for migration to complete
+  sleep 12
+  
+  # FIXME: Howto assert these fail as expected. They should report video and channel do no exist
+  # Can we get json output to more easily parse result of query?
+  set +e
+  yarn joystream-cli content:channel 1
+  if [ $? -eq 0 ]; then
+    echo "Unexpected channel was found"
+    exit -1
+  fi
+  # This cammand doesn't give error exit code if videos not found in a channel
+  yarn joystream-cli content:videos 1
 }    
 
 trap cleanup EXIT
@@ -103,30 +75,26 @@ trap cleanup EXIT
 if [ "$TARGET_RUNTIME" == "$RUNTIME" ]; then
   echo "Not Performing a runtime upgrade."
 else
-    # pre migration hook
-    pre_migration_hook
-    
+  pre_migration_hook
+
   # Copy new runtime wasm file from target joystream/node image
   echo "Extracting wasm blob from target joystream/node image."
-  id=`docker create joystream/node:${TARGET_RUNTIME}`
+  id=$(docker create joystream/node:${TARGET_RUNTIME})
   docker cp $id:/joystream/runtime.compact.wasm ${DATA_PATH}
   docker rm $id
 
-  # Display runtime version before runtime upgrade
-  yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
-
   echo "Performing runtime upgrade."
   yarn workspace api-scripts tsnode-strict \
     src/dev-set-runtime-code.ts -- ${DATA_PATH}/runtime.compact.wasm
 
   echo "Runtime upgraded."
 
+  # Display runtime version
+  yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
+
   echo "Performing migration tests"
-  # post migration hook
+
   post_migration_hook
+
   echo "Done with migrations tests"
 fi
-
-# Display runtime version
-yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
-

+ 0 - 50
tests/network-tests/run-storage-node-tests.sh

@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
-cd $SCRIPT_PATH
-
-set -a
-. ../../.env
-set +a
-
-function cleanup() {
-    # Show tail end of logs for the processor and indexer containers to
-    # see any possible errors
-    (echo "## Processor Logs ##" && docker logs joystream_processor_1 --tail 50) || :
-    (echo "## Indexer Logs ##" && docker logs joystream_indexer_1 --tail 50) || :
-    docker-compose down -v
-}
-
-trap cleanup EXIT
-
-# clean start
-docker-compose down -v
-
-docker-compose up -d joystream-node
-
-# Storage node
-DEBUG=joystream:storage-cli:dev yarn storage-cli dev-init
-docker-compose up -d colossus
-
-# Query node is expected to have been already built
-docker-compose up -d db
-yarn workspace query-node-root db:migrate
-docker-compose up -d graphql-server
-# Starting up processor will bring up all services it depends on
-docker-compose up -d processor
-
-# Fixes Error: No active storage providers available
-echo "Wait for colossus to announce public url"
-sleep 6
-
-echo "Creating channel..."
-yarn joystream-cli media:createChannel \
-  --input ./assets/TestChannel.json --confirm
-
-echo "Uploading video..."
-yes | yarn joystream-cli media:uploadVideo ./assets/joystream.MOV \
-  --input ./assets/TestVideo.json \
-  --confirm 
-
-time DEBUG=* yarn workspace network-tests run-test-scenario storage-node

+ 78 - 0
tests/network-tests/run-test-node-docker.sh

@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+set -e
+
+SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
+cd $SCRIPT_PATH
+
+# Log only to stderr
+# Only output from this script should be the container id of the node at the very end
+
+# Location that will be mounted as the /data volume in containers
+# This is where the initial members and balances files and generated chainspec files will be located.
+DATA_PATH=${DATA_PATH:=$(pwd)/data}
+mkdir -p ${DATA_PATH}
+
+# Initial account balance for sudo account
+SUDO_INITIAL_BALANCE=${SUDO_INITIAL_BALANCE:=100000000}
+SUDO_ACCOUNT_URI=${SUDO_ACCOUNT_URI:="//Alice"}
+SUDO_ACCOUNT=$(docker run --rm --pull=always docker.io/parity/subkey:2.0.1 inspect ${SUDO_ACCOUNT_URI} --output-type json | jq .ss58Address -r)
+
+# Source of funds for all new accounts that are created in the tests.
+TREASURY_INITIAL_BALANCE=${TREASURY_INITIAL_BALANCE:=100000000}
+TREASURY_ACCOUNT_URI=${TREASURY_ACCOUNT_URI:=$SUDO_ACCOUNT_URI}
+TREASURY_ACCOUNT=$(docker run --rm --pull=always docker.io/parity/subkey:2.0.1 inspect ${TREASURY_ACCOUNT_URI} --output-type json | jq .ss58Address -r)
+
+>&2 echo "sudo account from suri: ${SUDO_ACCOUNT}"
+>&2 echo "treasury account from suri: ${TREASURY_ACCOUNT}"
+
+# The docker image tag to use for joystream/node
+RUNTIME=${RUNTIME:=latest}
+
+echo "{
+  \"balances\":[
+    [\"$SUDO_ACCOUNT\", $SUDO_INITIAL_BALANCE],
+    [\"$TREASURY_ACCOUNT\", $TREASURY_INITIAL_BALANCE]
+  ]
+}" > ${DATA_PATH}/initial-balances.json
+
+# Remember if there are initial members at genesis query-node needs to be bootstrapped
+# or any events processed for this member will cause processor to fail.
+if [ "${MAKE_SUDO_MEMBER}" == true ]
+then
+  echo "
+    [{
+      \"member_id\":0,
+      \"root_account\":\"$SUDO_ACCOUNT\",
+      \"controller_account\":\"$SUDO_ACCOUNT\",
+      \"handle\":\"sudosudo\",
+      \"avatar_uri\":\"https://sudo.com/avatar.png\",
+      \"about\":\"Sudo\",
+      \"registered_at_time\":0
+    }]
+  " > ${DATA_PATH}/initial-members.json
+else
+  echo "[]" > ${DATA_PATH}/initial-members.json
+fi
+
+# Create a chain spec file
+docker run --rm -v ${DATA_PATH}:/data --entrypoint ./chain-spec-builder joystream/node:${RUNTIME} \
+  new \
+  --authority-seeds Alice \
+  --sudo-account ${SUDO_ACCOUNT} \
+  --deployment dev \
+  --chain-spec-path /data/chain-spec.json \
+  --initial-balances-path /data/initial-balances.json \
+  --initial-members-path /data/initial-members.json
+
+# Convert the chain spec file to a raw chainspec file
+docker run --rm -v ${DATA_PATH}:/data joystream/node:${RUNTIME} build-spec \
+  --raw --disable-default-bootnode \
+  --chain /data/chain-spec.json > ${DATA_PATH}/chain-spec-raw.json
+
+# Start a chain with generated chain spec
+export JOYSTREAM_NODE_TAG=${RUNTIME}
+docker-compose -f ../../docker-compose.yml run -d -v ${DATA_PATH}:/data --name joystream-node \
+  -p 9944:9944 -p 9933:9933 joystream-node \
+  --alice --validator --unsafe-ws-external --unsafe-rpc-external \
+  --rpc-methods Unsafe --rpc-cors=all -l runtime \
+  --chain /data/chain-spec-raw.json

+ 9 - 4
tests/network-tests/run-test-node.sh

@@ -1,13 +1,18 @@
+#!/usr/bin/env bash
+set -e
+
+SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
+cd $SCRIPT_PATH
+
 # Location that will be mounted as the /data volume in containers
 # This is how we access the initial members and balances files from
 # the containers and where generated chainspec files will be located.
-DATA_PATH="test-data"
+DATA_PATH=${DATA_PATH:=./data}
 
 # Initial account balance for Alice
 # Alice is the source of funds for all new accounts that are created in the tests.
 ALICE_INITIAL_BALANCE=100000000
 
-rm -Rf ${DATA_PATH}
 mkdir -p ${DATA_PATH}
 
 echo "{
@@ -48,12 +53,12 @@ function cleanup() {
 trap cleanup EXIT
 
 # Create a chain spec file
-./target/release/chain-spec-builder new -a Alice \
+../../target/release/chain-spec-builder new -a Alice \
   --chain-spec-path ${DATA_PATH}/chain-spec.json \
   --initial-balances-path ${DATA_PATH}/initial-balances.json \
   --initial-members-path ${DATA_PATH}/initial-members.json \
   --deployment dev \
   --sudo-account 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
 
-./target/release/joystream-node --base-path ${DATA_PATH}/alice \
+../../target/release/joystream-node --base-path ${DATA_PATH}/alice \
   --validator --chain ${DATA_PATH}/chain-spec.json --alice --unsafe-ws-external --rpc-cors all

+ 26 - 61
tests/network-tests/run-tests.sh

@@ -4,75 +4,40 @@ set -e
 SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
 cd $SCRIPT_PATH
 
-# Location that will be mounted as the /data volume in containers
-# This is how we access the initial members and balances files from
-# the containers and where generated chainspec files will be located.
-DATA_PATH=${DATA_PATH:=~/tmp}
-
-# Initial account balance for Alice
-# Alice is the source of funds for all new accounts that are created in the tests.
-ALICE_INITIAL_BALANCE=${ALICE_INITIAL_BALANCE:=100000000}
-
-# The docker image tag to use for joystream/node as the starting chain
-# that will be upgraded to the latest runtime.
-RUNTIME=${RUNTIME:=latest}
-
-mkdir -p ${DATA_PATH}
-
-echo "{
-  \"balances\":[
-    [\"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY\", ${ALICE_INITIAL_BALANCE}]
-  ]
-}" > ${DATA_PATH}/initial-balances.json
-
-# Make Alice a member
-echo '
-  [{
-    "member_id":0,
-    "root_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "controller_account":"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
-    "handle":"alice",
-    "avatar_uri":"https://alice.com/avatar.png",
-    "about":"Alice",
-    "registered_at_time":0
-  }]
-' > ${DATA_PATH}/initial-members.json
-
-# Create a chain spec file
-docker run --rm -v ${DATA_PATH}:/data --entrypoint ./chain-spec-builder joystream/node:${RUNTIME} \
-  new \
-  --authority-seeds Alice \
-  --sudo-account  5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY \
-  --deployment dev \
-  --chain-spec-path /data/chain-spec.json \
-  --initial-balances-path /data/initial-balances.json \
-  --initial-members-path /data/initial-members.json
-
-# Convert the chain spec file to a raw chainspec file
-docker run --rm -v ${DATA_PATH}:/data joystream/node:${RUNTIME} build-spec \
-  --raw --disable-default-bootnode \
-  --chain /data/chain-spec.json > ~/tmp/chain-spec-raw.json
-
-NETWORK_ARG=
-if [ "$ATTACH_TO_NETWORK" != "" ]; then
-  NETWORK_ARG="--network ${ATTACH_TO_NETWORK}"
-fi
-
-# Start a chain with generated chain spec
-# Add "-l ws=trace,ws::handler=info" to get websocket trace logs
-CONTAINER_ID=`docker run -d -v ${DATA_PATH}:/data -p 9944:9944 ${NETWORK_ARG} --name joystream-node joystream/node:${RUNTIME} \
-  --validator --alice --unsafe-ws-external --rpc-cors=all -l runtime \
-  --chain /data/chain-spec-raw.json`
+CONTAINER_ID=`./run-test-node-docker.sh`
 
 function cleanup() {
     docker logs ${CONTAINER_ID} --tail 15
-    docker stop ${CONTAINER_ID}
-    docker rm ${CONTAINER_ID}
+    docker-compose -f ../../docker-compose.yml down -v
 }
 
 trap cleanup EXIT
 
+sleep 3
+
 # Display runtime version
 yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
 
+# Start any other services we want
+# docker-compose -f ../../docker-compose.yml up -d colossus-1
+
+# Start a query-node
+../../query-node/start.sh
+
+# Execute tests
+
+# We can load env config used to start docker services and pass them on to the
+# tests. This could be useful to capture keys used or URLs.
+# We just have to watchout for clashing env var names.
+#set -a
+#. ../../.env
+#set +a
+
+# First scenario..
 ./run-test-scenario.sh $1
+
+# In between pickup generated keys from first scenario or bootstrap scene with all well known
+# keys for workers and members..
+
+# Second scenario..
+# ./run-test-scenario.sh $2

+ 256 - 49
tests/network-tests/src/Api.ts

@@ -29,30 +29,39 @@ import {
   OpeningId,
 } from '@joystream/types/hiring'
 import { FillOpeningParameters, ProposalId } from '@joystream/types/proposals'
-import { v4 as uuid } from 'uuid'
+// import { v4 as uuid } from 'uuid'
 import { extendDebug } from './Debugger'
 import { InvertedPromise } from './InvertedPromise'
-
-export enum WorkingGroups {
-  StorageWorkingGroup = 'storageWorkingGroup',
-  ContentWorkingGroup = 'contentWorkingGroup',
-  DistributionWorkingGroup = 'distributionWorkingGroup',
-  GatewayWorkingGroup = 'gatewayWorkingGroup',
-  OperationsWorkingGroupAlpha = 'operationsWorkingGroupAlpha',
-  OperationsWorkingGroupBeta = 'operationsWorkingGroupBeta',
-  OperationsWorkingGroupGamma = 'operationsWorkingGroupGamma',
+import { VideoId } from '@joystream/types/content'
+import { ChannelId } from '@joystream/types/common'
+import { ChannelCategoryMetadata, VideoCategoryMetadata } from '@joystream/metadata-protobuf'
+import { metadataToBytes } from '../../../cli/lib/helpers/serialization'
+import { assert } from 'chai'
+import { WorkingGroups } from './WorkingGroups'
+
+type AnyMetadata = {
+  serializeBinary(): Uint8Array
 }
 
 export class ApiFactory {
   private readonly api: ApiPromise
   private readonly keyring: Keyring
+  // number used as part of key derivation path
+  private keyId = 0
+  // mapping from account address to key id.
+  // To be able to re-derive keypair externally when mini-secret is known.
+  readonly addressesToKeyId: Map<string, number> = new Map()
+  // mini secret used in SURI key derivation path
+  private readonly miniSecret: string
+
   // source of funds for all new accounts
   private readonly treasuryAccount: string
 
   public static async create(
     provider: WsProvider,
     treasuryAccountUri: string,
-    sudoAccountUri: string
+    sudoAccountUri: string,
+    miniSecret: string
   ): Promise<ApiFactory> {
     const debug = extendDebug('api-factory')
     let connectAttempts = 0
@@ -69,7 +78,7 @@ export class ApiFactory {
         // Give it a few seconds to be ready.
         await Utils.wait(5000)
 
-        return new ApiFactory(api, treasuryAccountUri, sudoAccountUri)
+        return new ApiFactory(api, treasuryAccountUri, sudoAccountUri, miniSecret)
       } catch (err) {
         if (connectAttempts === 3) {
           throw new Error('Unable to connect to chain')
@@ -79,36 +88,69 @@ export class ApiFactory {
     }
   }
 
-  constructor(api: ApiPromise, treasuryAccountUri: string, sudoAccountUri: string) {
+  constructor(api: ApiPromise, treasuryAccountUri: string, sudoAccountUri: string, miniSecret: string) {
     this.api = api
     this.keyring = new Keyring({ type: 'sr25519' })
     this.treasuryAccount = this.keyring.addFromUri(treasuryAccountUri).address
     this.keyring.addFromUri(sudoAccountUri)
+    this.miniSecret = miniSecret
+    this.addressesToKeyId = new Map()
+    this.keyId = 0
   }
 
   public getApi(label: string): Api {
-    return new Api(this.api, this.treasuryAccount, this.keyring, label)
+    return new Api(this, this.api, this.treasuryAccount, this.keyring, label)
+  }
+
+  public createKeyPairs(n: number): { key: KeyringPair; id: number }[] {
+    const keys: { key: KeyringPair; id: number }[] = []
+    for (let i = 0; i < n; i++) {
+      const id = this.keyId++
+      const key = this.createCustomKeyPair(`${id}`)
+      keys.push({ key, id })
+      this.addressesToKeyId.set(key.address, id)
+    }
+    return keys
   }
 
-  // public close(): void {
-  //   this.api.disconnect()
-  // }
+  public createCustomKeyPair(customPath: string): KeyringPair {
+    const uri = `${this.miniSecret}//testing//${customPath}`
+    return this.keyring.addFromUri(uri)
+  }
+
+  public keyGenInfo(): { start: number; final: number } {
+    const start = 0
+    const final = this.keyId
+    return {
+      start,
+      final,
+    }
+  }
+
+  public getAllGeneratedAccounts(): { [k: string]: number } {
+    return Object.fromEntries(this.addressesToKeyId)
+  }
 }
 
 export class Api {
+  private readonly factory: ApiFactory
   private readonly api: ApiPromise
   private readonly sender: Sender
-  private readonly keyring: Keyring
   // source of funds for all new accounts
   private readonly treasuryAccount: string
 
-  constructor(api: ApiPromise, treasuryAccount: string, keyring: Keyring, label: string) {
+  constructor(factory: ApiFactory, api: ApiPromise, treasuryAccount: string, keyring: Keyring, label: string) {
+    this.factory = factory
     this.api = api
-    this.keyring = keyring
     this.treasuryAccount = treasuryAccount
     this.sender = new Sender(api, keyring, label)
   }
 
+  // expose only direct ability to query chain
+  get query() {
+    return this.api.query
+  }
+
   public enableDebugTxLogs(): void {
     this.sender.setLogLevel(LogLevel.Debug)
   }
@@ -117,29 +159,39 @@ export class Api {
     this.sender.setLogLevel(LogLevel.Verbose)
   }
 
-  public createKeyPairs(n: number): KeyringPair[] {
-    const nKeyPairs: KeyringPair[] = []
-    for (let i = 0; i < n; i++) {
-      nKeyPairs.push(this.keyring.addFromUri(i + uuid().substring(0, 8)))
-    }
-    return nKeyPairs
+  public createKeyPairs(n: number): { key: KeyringPair; id: number }[] {
+    return this.factory.createKeyPairs(n)
+  }
+
+  public createCustomKeyPair(path: string): KeyringPair {
+    return this.factory.createCustomKeyPair(path)
+  }
+
+  public keyGenInfo(): { start: number; final: number } {
+    return this.factory.keyGenInfo()
+  }
+
+  public getAllGeneratedAccounts(): { [k: string]: number } {
+    return this.factory.getAllGeneratedAccounts()
   }
 
   // Well known WorkingGroup enum defined in runtime
   public getWorkingGroupString(workingGroup: WorkingGroups): string {
     switch (workingGroup) {
-      case WorkingGroups.StorageWorkingGroup:
+      case WorkingGroups.Storage:
         return 'Storage'
-      case WorkingGroups.ContentWorkingGroup:
+      case WorkingGroups.Content:
         return 'Content'
-      case WorkingGroups.GatewayWorkingGroup:
+      case WorkingGroups.Gateway:
         return 'Gateway'
-      case WorkingGroups.OperationsWorkingGroupAlpha:
+      case WorkingGroups.OperationsAlpha:
         return 'OperationsAlpha'
-      case WorkingGroups.OperationsWorkingGroupBeta:
+      case WorkingGroups.OperationsBeta:
         return 'OperationsBeta'
-      case WorkingGroups.OperationsWorkingGroupGamma:
+      case WorkingGroups.OperationsGamma:
         return 'OperationsGamma'
+      case WorkingGroups.Distribution:
+        return 'Distribution'
       default:
         throw new Error(`Invalid working group string representation: ${workingGroup}`)
     }
@@ -150,6 +202,11 @@ export class Api {
     return this.sender.signAndSend(this.api.tx.sudo.sudo(tx), sudo)
   }
 
+  public async makeSudoAsCall(who: string, tx: SubmittableExtrinsic<'promise'>): Promise<ISubmittableResult> {
+    const sudo = await this.api.query.sudo.key()
+    return this.sender.signAndSend(this.api.tx.sudo.sudoAs(who, tx), sudo)
+  }
+
   public createPaidTermId(value: BN): PaidTermId {
     return this.api.createType('PaidTermId', value)
   }
@@ -161,8 +218,16 @@ export class Api {
     )
   }
 
-  public getMemberIds(address: string): Promise<MemberId[]> {
-    return this.api.query.members.memberIdsByControllerAccountId<Vec<MemberId>>(address)
+  // Many calls in the testing framework take an account id instead of a member id when an action
+  // is intended to be in the context of the member. This function is used to do a reverse lookup.
+  // There is an underlying assumption that each member has a unique controller account even
+  // though the runtime does not place that constraint. But for the purpose of the tests we throw
+  // if that condition is found to be false to esnure the tests do not fail. As long as all memberships
+  // are created through the membership fixture this should not happen.
+  public async getMemberId(address: string): Promise<MemberId> {
+    const ids = await this.api.query.members.memberIdsByControllerAccountId<Vec<MemberId>>(address)
+    assert.equal(ids.length, 1, 'Only a single member with same controller account is allowed')
+    return ids[0]
   }
 
   public async getBalance(address: string): Promise<Balance> {
@@ -643,7 +708,7 @@ export class Api {
     description: string,
     runtime: Bytes | string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createRuntimeUpgradeProposal(memberId, name, description, stake, runtime),
       account
@@ -657,7 +722,7 @@ export class Api {
     description: string,
     text: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createTextProposal(memberId, name, description, stake, text),
       account
@@ -672,7 +737,7 @@ export class Api {
     balance: BN,
     destination: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSpendingProposal(memberId, title, description, stake, balance, destination),
       account
@@ -686,7 +751,7 @@ export class Api {
     stake: BN,
     validatorCount: BN
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetValidatorCountProposal(memberId, title, description, stake, validatorCount),
       account
@@ -707,7 +772,7 @@ export class Api {
     minCouncilStake: BN,
     minVotingStake: BN
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetElectionParametersProposal(memberId, title, description, stake, {
         announcing_period: announcingPeriod,
@@ -731,7 +796,7 @@ export class Api {
     openingId: OpeningId,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createBeginReviewWorkingGroupLeaderApplicationsProposal(
         memberId,
@@ -753,7 +818,7 @@ export class Api {
     const councilAccounts = await this.getCouncilAccounts()
     return Promise.all(
       councilAccounts.map(async (account) => {
-        const memberId: MemberId = (await this.getMemberIds(account))[0]
+        const memberId: MemberId = await this.getMemberId(account)
         return this.approveProposal(account, memberId, proposal)
       })
     )
@@ -1168,7 +1233,7 @@ export class Api {
       ),
     })
 
-    const memberId: MemberId = (await this.getMemberIds(leaderOpening.account))[0]
+    const memberId: MemberId = await this.getMemberId(leaderOpening.account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createAddWorkingGroupLeaderOpeningProposal(
         memberId,
@@ -1198,7 +1263,7 @@ export class Api {
     payoutInterval: BN
     workingGroup: string
   }): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(fillOpening.account))[0]
+    const memberId: MemberId = await this.getMemberId(fillOpening.account)
 
     const fillOpeningParameters: FillOpeningParameters = this.api.createType('FillOpeningParameters', {
       opening_id: fillOpening.openingId,
@@ -1233,7 +1298,7 @@ export class Api {
     slash: boolean,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createTerminateWorkingGroupLeaderRoleProposal(
         memberId,
@@ -1260,7 +1325,7 @@ export class Api {
     rewardAmount: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetWorkingGroupLeaderRewardProposal(
         memberId,
@@ -1284,7 +1349,7 @@ export class Api {
     rewardAmount: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createDecreaseWorkingGroupLeaderStakeProposal(
         memberId,
@@ -1308,7 +1373,7 @@ export class Api {
     rewardAmount: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSlashWorkingGroupLeaderStakeProposal(
         memberId,
@@ -1331,7 +1396,7 @@ export class Api {
     mintCapacity: BN,
     workingGroup: string
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx.proposalsCodex.createSetWorkingGroupMintCapacityProposal(
         memberId,
@@ -1384,7 +1449,7 @@ export class Api {
     text: string,
     module: WorkingGroups
   ): Promise<ISubmittableResult> {
-    const memberId: MemberId = (await this.getMemberIds(account))[0]
+    const memberId: MemberId = await this.getMemberId(account)
     return this.sender.signAndSend(
       this.api.tx[module].applyOnOpening(memberId, openingId, roleAccountAddress, roleStake, applicantStake, text),
       account
@@ -1602,6 +1667,16 @@ export class Api {
     return await this.api.query[group].openingById<WorkingGroupOpening>(id)
   }
 
+  public async getActiveWorkerIds(module: WorkingGroups): Promise<WorkerId[]> {
+    return (await this.api.query[module].workerById.entries<Worker>()).map(
+      ([
+        {
+          args: [id],
+        },
+      ]) => id
+    )
+  }
+
   public async getWorkers(module: WorkingGroups): Promise<Worker[]> {
     return (await this.api.query[module].workerById.entries<Worker>()).map((workerWithId) => workerWithId[1])
   }
@@ -1710,4 +1785,136 @@ export class Api {
   public getMaxWorkersCount(module: WorkingGroups): BN {
     return this.api.createType('u32', this.api.consts[module].maxWorkerNumberLimit)
   }
+
+  async getMemberControllerAccount(memberId: number): Promise<string | undefined> {
+    return (await this.api.query.members.membershipById(memberId))?.controller_account.toString()
+  }
+
+  // Create a mock channel, throws on failure
+  async createMockChannel(memberId: number, memberControllerAccount?: string): Promise<ChannelId> {
+    memberControllerAccount = memberControllerAccount || (await this.getMemberControllerAccount(memberId))
+
+    if (!memberControllerAccount) {
+      throw new Error('invalid member id')
+    }
+
+    // Create a channel without any assets
+    const tx = this.api.tx.content.createChannel(
+      { Member: memberId },
+      {
+        assets: null,
+        meta: null,
+        reward_account: null,
+      }
+    )
+
+    const result = await this.sender.signAndSend(tx, memberControllerAccount)
+
+    const record = this.findEventRecord(result.events, 'content', 'ChannelCreated')
+    if (record) {
+      return record.event.data[1] as ChannelId
+    }
+
+    // TODO: get error from 'result'
+    throw new Error('Failed to create channel')
+  }
+
+  // Create a mock video, throws on failure
+  async createMockVideo(memberId: number, channelId: number, memberControllerAccount?: string): Promise<VideoId> {
+    memberControllerAccount = memberControllerAccount || (await this.getMemberControllerAccount(memberId))
+
+    if (!memberControllerAccount) {
+      throw new Error('invalid member id')
+    }
+
+    // Create a video without any assets
+    const tx = this.api.tx.content.createVideo({ Member: memberId }, channelId, {
+      assets: null,
+      meta: null,
+    })
+
+    const result = await this.sender.signAndSend(tx, memberControllerAccount)
+
+    const record = this.findEventRecord(result.events, 'content', 'VideoCreated')
+    if (record) {
+      return record.event.data[2] as VideoId
+    }
+
+    // TODO: get error from 'result'
+    throw new Error('Failed to create video')
+  }
+
+  async createChannelCategoryAsLead(name: string): Promise<ISubmittableResult> {
+    const lead = await this.getGroupLead(WorkingGroups.Content)
+
+    if (!lead) {
+      throw new Error('No Content Lead asigned, cannot create channel category')
+    }
+
+    const account = lead?.role_account_id
+    const meta = new ChannelCategoryMetadata({
+      name,
+    })
+
+    return this.sender.signAndSend(
+      this.api.tx.content.createChannelCategory(
+        { Lead: null },
+        { meta: metadataToBytes(ChannelCategoryMetadata, meta) }
+      ),
+      account?.toString()
+    )
+  }
+
+  async createVideoCategoryAsLead(name: string): Promise<ISubmittableResult> {
+    const lead = await this.getGroupLead(WorkingGroups.Content)
+
+    if (!lead) {
+      throw new Error('No Content Lead asigned, cannot create channel category')
+    }
+
+    const account = lead?.role_account_id
+    const meta = new VideoCategoryMetadata({
+      name,
+    })
+
+    return this.sender.signAndSend(
+      this.api.tx.content.createVideoCategory({ Lead: null }, { meta: metadataToBytes(VideoCategoryMetadata, meta) }),
+      account?.toString()
+    )
+  }
+
+  async assignWorkerRoleAccount(
+    group: WorkingGroups,
+    workerId: WorkerId,
+    account: string
+  ): Promise<ISubmittableResult> {
+    if (!(await this.isWorker(workerId, group))) {
+      throw new Error('Worker not found')
+    }
+    const worker = await this.getWorkerById(workerId, group)
+
+    const memberController = await this.getMemberControllerAccount(worker.member_id.toNumber())
+    // there cannot be a worker associated with member that does not exist
+    if (!memberController) {
+      throw new Error('Member controller not found')
+    }
+
+    // Expect membercontroller key is already added to keyring
+    // Is is responsibility of caller to ensure this is the case!
+
+    const updateRoleAccountCall = this.api.tx[group].updateRoleAccount(workerId, account)
+    return this.makeSudoAsCall(memberController, updateRoleAccountCall)
+  }
+
+  async assignWorkerWellknownAccount(group: WorkingGroups, workerId: WorkerId): Promise<ISubmittableResult> {
+    // path to append to base SURI
+    const uri = `worker//${this.getWorkingGroupString(group)}//${workerId.toNumber()}`
+    const account = this.createCustomKeyPair(uri).address
+    return this.assignWorkerRoleAccount(group, workerId, account)
+  }
+
+  async assignCouncil(accounts: string[]): Promise<ISubmittableResult> {
+    const setCouncilCall = this.api.tx.council.setCouncil(accounts)
+    return this.makeSudoCall(setCouncilCall)
+  }
 }

+ 43 - 5
tests/network-tests/src/Scenario.ts

@@ -1,5 +1,5 @@
 import { WsProvider } from '@polkadot/api'
-import { ApiFactory } from './Api'
+import { ApiFactory, Api } from './Api'
 import { QueryNodeApi } from './QueryNodeApi'
 import { config } from 'dotenv'
 import { ApolloClient, InMemoryCache, HttpLink } from '@apollo/client'
@@ -9,6 +9,7 @@ import { Job } from './Job'
 import { JobManager } from './JobManager'
 import { ResourceManager } from './Resources'
 import fetch from 'cross-fetch'
+import fs from 'fs'
 
 export type ScenarioProps = {
   env: NodeJS.ProcessEnv
@@ -16,6 +17,24 @@ export type ScenarioProps = {
   job: (label: string, flows: Flow[] | Flow) => Job
 }
 
+function writeOutput(api: Api, miniSecret: string) {
+  const outputFilename = 'output.json'
+  console.error('Writing generated account to', outputFilename)
+  // account to key ids
+  const accounts = api.getAllGeneratedAccounts()
+
+  // first and last key id used to generate keys in this scenario
+  const keyIds = api.keyGenInfo()
+
+  const output = {
+    accounts,
+    keyIds,
+    miniSecret,
+  }
+
+  fs.writeFileSync(outputFilename, JSON.stringify(output, undefined, 2))
+}
+
 export async function scenario(scene: (props: ScenarioProps) => Promise<void>): Promise<void> {
   // Load env variables
   config()
@@ -24,13 +43,22 @@ export async function scenario(scene: (props: ScenarioProps) => Promise<void>):
   // Connect api to the chain
   const nodeUrl: string = env.NODE_URL || 'ws://127.0.0.1:9944'
   const provider = new WsProvider(nodeUrl)
-
+  const miniSecret = env.SURI_MINI_SECRET || ''
   const apiFactory = await ApiFactory.create(
     provider,
     env.TREASURY_ACCOUNT_URI || '//Alice',
-    env.SUDO_ACCOUNT_URI || '//Alice'
+    env.SUDO_ACCOUNT_URI || '//Alice',
+    miniSecret
   )
 
+  const api = apiFactory.getApi('Key Generation')
+
+  // Generate all key ids before START_KEY_ID
+  const startKeyId = parseInt(env.START_KEY_ID || '0')
+  if (startKeyId) {
+    api.createKeyPairs(startKeyId)
+  }
+
   const queryNodeUrl: string = env.QUERY_NODE_URL || 'http://127.0.0.1:8081/graphql'
 
   const queryNodeProvider = new ApolloClient({
@@ -49,18 +77,28 @@ export async function scenario(scene: (props: ScenarioProps) => Promise<void>):
 
   const resources = new ResourceManager()
 
+  process.on('SIGINT', () => {
+    console.error('Aborting scenario')
+    writeOutput(api, miniSecret)
+    process.exit(0)
+  })
+
+  let exitCode = 0
+
   try {
     await jobs.run(resources)
   } catch (err) {
     console.error(err)
-    process.exit(-1)
+    exitCode = -1
   }
 
+  writeOutput(api, miniSecret)
+
   // Note: disconnecting and then reconnecting to the chain in the same process
   // doesn't seem to work!
   // Disconnecting is causing error to be thrown:
   // RPC-CORE: getStorage(key: StorageKey, at?: BlockHash): StorageData:: disconnected from ws://127.0.0.1:9944: 1000:: Normal connection closure
   // Are there subsciptions somewhere?
   // apiFactory.close()
-  process.exit()
+  process.exit(exitCode)
 }

+ 19 - 0
tests/network-tests/src/WorkingGroups.ts

@@ -0,0 +1,19 @@
+export enum WorkingGroups {
+  Storage = 'storageWorkingGroup',
+  Content = 'contentWorkingGroup',
+  Distribution = 'distributionWorkingGroup',
+  Gateway = 'gatewayWorkingGroup',
+  OperationsAlpha = 'operationsWorkingGroupAlpha',
+  OperationsBeta = 'operationsWorkingGroupBeta',
+  OperationsGamma = 'operationsWorkingGroupGamma',
+}
+
+export const AllWorkingGroups = [
+  WorkingGroups.Storage,
+  WorkingGroups.Content,
+  WorkingGroups.Distribution,
+  WorkingGroups.Gateway,
+  WorkingGroups.OperationsAlpha,
+  WorkingGroups.OperationsBeta,
+  WorkingGroups.OperationsGamma,
+]

+ 25 - 0
tests/network-tests/src/fixtures/councilAssignment.ts

@@ -0,0 +1,25 @@
+import { assert } from 'chai'
+import { Api } from '../Api'
+import { BaseFixture } from '../Fixture'
+
+export class AssignCouncilFixture extends BaseFixture {
+  private members: string[]
+
+  public constructor(api: Api, members: string[]) {
+    super(api)
+    this.members = members
+  }
+
+  public async execute(): Promise<void> {
+    // Assert no council exists
+    if ((await this.api.getCouncil()).length) {
+      return this.error(new Error('Council assignment fixture expects no council seats to be filled'))
+    }
+
+    await this.api.assignCouncil(this.members)
+
+    // Assert council was set
+    const councilSize = (await this.api.getCouncil()).length
+    assert.equal(councilSize, this.members.length, 'Not Expected council size after assignment')
+  }
+}

+ 13 - 4
tests/network-tests/src/fixtures/membershipModule.ts

@@ -33,6 +33,8 @@ export class BuyMembershipHappyCaseFixture extends BaseFixture {
 
     this.api.treasuryTransferBalanceToAccounts(this.accounts, membershipTransactionFee.add(new BN(membershipFee)))
 
+    // Note: Member alias is dervied from the account so if it is not unique the member registration
+    // will fail with HandleAlreadyRegistered error
     this.memberIds = (
       await Promise.all(
         this.accounts.map((account) =>
@@ -46,6 +48,7 @@ export class BuyMembershipHappyCaseFixture extends BaseFixture {
     this.debug(`Registered ${this.memberIds.length} new members`)
 
     assert.equal(this.memberIds.length, this.accounts.length)
+    // log the member id and corresponding key id
   }
 }
 
@@ -60,10 +63,16 @@ export class BuyMembershipWithInsufficienFundsFixture extends BaseFixture {
   }
 
   async execute(): Promise<void> {
-    // Assertions
-    const membership = await this.api.getMemberIds(this.account)
-
-    assert(membership.length === 0, 'Account must not be associated with a member')
+    let memberId
+    try {
+      memberId = await this.api.getMemberId(this.account)
+    } catch (err) {
+      // member id not found
+    }
+
+    if (memberId) {
+      throw new Error('Account must not be associated with a member')
+    }
 
     // Fee estimation and transfer
     const membershipFee: BN = await this.api.getMembershipFee(this.paidTerms)

+ 3 - 2
tests/network-tests/src/fixtures/proposalsModule.ts

@@ -1,4 +1,5 @@
-import { Api, WorkingGroups } from '../Api'
+import { Api } from '../Api'
+import { WorkingGroups } from '../WorkingGroups'
 import { v4 as uuid } from 'uuid'
 import BN from 'bn.js'
 import { ProposalId } from '@joystream/types/proposals'
@@ -551,7 +552,7 @@ export class SpendingProposalFixture extends BaseFixture {
 
     await this.api.sudoSetCouncilMintCapacity(this.mintCapacity)
 
-    const fundingRecipient = this.api.createKeyPairs(1)[0].address
+    const fundingRecipient = this.api.createKeyPairs(1)[0].key.address
 
     // Proposal creation
     const result = await this.api.proposeSpending(

+ 2 - 1
tests/network-tests/src/fixtures/sudoHireLead.ts

@@ -6,7 +6,8 @@ import {
   SudoFillLeaderOpeningFixture,
 } from './workingGroupModule'
 import { BuyMembershipHappyCaseFixture } from './membershipModule'
-import { Api, WorkingGroups } from '../Api'
+import { Api } from '../Api'
+import { WorkingGroups } from '../WorkingGroups'
 import { OpeningId } from '@joystream/types/hiring'
 import { PaidTermId } from '@joystream/types/members'
 import BN from 'bn.js'

+ 4 - 3
tests/network-tests/src/fixtures/workingGroupModule.ts

@@ -1,6 +1,7 @@
 import BN from 'bn.js'
 import { assert } from 'chai'
-import { Api, WorkingGroups } from '../Api'
+import { Api } from '../Api'
+import { WorkingGroups } from '../WorkingGroups'
 import { KeyringPair } from '@polkadot/keyring/types'
 import { v4 as uuid } from 'uuid'
 import { RewardRelationship } from '@joystream/types/recurring-rewards'
@@ -485,7 +486,7 @@ export class UpdateRewardAccountFixture extends BaseFixture {
     this.api.treasuryTransferBalance(workerRoleAccount, updateRewardAccountFee)
 
     // Update reward account
-    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0]
+    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0].key
     await this.api.updateRewardAccount(workerRoleAccount, this.workerId, createdAccount.address, this.module)
     const newRewardAccount: string = await this.api.getWorkerRewardAccount(this.workerId, this.module)
     assert(
@@ -514,7 +515,7 @@ export class UpdateRoleAccountFixture extends BaseFixture {
     this.api.treasuryTransferBalance(workerRoleAccount, updateRoleAccountFee)
 
     // Update role account
-    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0]
+    const createdAccount: KeyringPair = this.api.createKeyPairs(1)[0].key
     await this.api.updateRoleAccount(workerRoleAccount, this.workerId, createdAccount.address, this.module)
     const newRoleAccount: string = (await this.api.getWorkerById(this.workerId, this.module)).role_account_id.toString()
     assert(

+ 48 - 0
tests/network-tests/src/flows/council/assign.ts

@@ -0,0 +1,48 @@
+import BN from 'bn.js'
+import { PaidTermId } from '@joystream/types/members'
+import { FlowProps } from '../../Flow'
+import { AssignCouncilFixture } from '../../fixtures/councilAssignment'
+import { BuyMembershipHappyCaseFixture } from '../../fixtures/membershipModule'
+import { extendDebug } from '../../Debugger'
+import { FixtureRunner } from '../../Fixture'
+import { Resource } from '../../Resources'
+
+export default function createAssignCouncil(size = 1) {
+  return async function (props: FlowProps): Promise<void> {
+    return assignCouncil(props, size)
+  }
+}
+
+async function assignCouncil({ api, env, lock }: FlowProps, size: number): Promise<void> {
+  const label = 'assignCouncil'
+  const debug = extendDebug(`flow:${label}`)
+
+  debug('Started')
+
+  await lock(Resource.Council)
+
+  // Skip creating council if already elected
+  if ((await api.getCouncil()).length) {
+    return debug('Skipping council setup. A Council is already elected')
+  }
+
+  const councilSize = size || (await api.getCouncilSize()).toNumber()
+
+  debug('Assigning new council of size', councilSize)
+
+  const council = []
+
+  for (let i = 0; i < councilSize; i++) {
+    council.push(api.createCustomKeyPair(`CouncilMember//${i}`).address)
+  }
+
+  const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
+
+  const createMembersFixture = new BuyMembershipHappyCaseFixture(api, council, paidTerms)
+  await new FixtureRunner(createMembersFixture).run()
+
+  const councilAssignment = new AssignCouncilFixture(api, council)
+  await new FixtureRunner(councilAssignment).run()
+
+  debug('Done')
+}

+ 2 - 2
tests/network-tests/src/flows/council/setup.ts

@@ -23,8 +23,8 @@ export default async function councilSetup({ api, env, lock }: FlowProps): Promi
   debug('Electing new council')
 
   const numberOfApplicants = (await api.getCouncilSize()).toNumber() * 2
-  const applicants = api.createKeyPairs(numberOfApplicants).map((key) => key.address)
-  const voters = api.createKeyPairs(5).map((key) => key.address)
+  const applicants = api.createKeyPairs(numberOfApplicants).map(({ key }) => key.address)
+  const voters = api.createKeyPairs(5).map(({ key }) => key.address)
 
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
   const K: number = +env.COUNCIL_ELECTION_K!

+ 2 - 2
tests/network-tests/src/flows/membership/creatingMemberships.ts

@@ -15,8 +15,8 @@ export default async function membershipCreation({ api, env }: FlowProps): Promi
 
   const N: number = +env.MEMBERSHIP_CREATION_N!
   assert(N > 0)
-  const nAccounts = api.createKeyPairs(N).map((key) => key.address)
-  const aAccount = api.createKeyPairs(1)[0].address
+  const nAccounts = api.createKeyPairs(N).map(({ key }) => key.address)
+  const aAccount = api.createKeyPairs(1)[0].key.address
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
 
   // Assert membership can be bought if sufficient funds are available

+ 5 - 4
tests/network-tests/src/flows/proposals/manageLeaderRole.ts

@@ -1,5 +1,6 @@
 import BN from 'bn.js'
-import { Api, WorkingGroups } from '../../Api'
+import { Api } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import { BuyMembershipHappyCaseFixture } from '../../fixtures/membershipModule'
 import {
@@ -24,10 +25,10 @@ import { Resource, ResourceLocker } from '../../Resources'
 
 export default {
   storage: async function ({ api, env, lock }: FlowProps): Promise<void> {
-    return manageLeaderRole(api, env, WorkingGroups.StorageWorkingGroup, lock)
+    return manageLeaderRole(api, env, WorkingGroups.Storage, lock)
   },
   content: async function ({ api, env, lock }: FlowProps): Promise<void> {
-    return manageLeaderRole(api, env, WorkingGroups.ContentWorkingGroup, lock)
+    return manageLeaderRole(api, env, WorkingGroups.Content, lock)
   },
 }
 
@@ -36,7 +37,7 @@ async function manageLeaderRole(api: Api, env: NodeJS.ProcessEnv, group: Working
   debug('Started')
   await lock(Resource.Proposals)
 
-  const leaderAccount = api.createKeyPairs(1)[0].address
+  const leaderAccount = api.createKeyPairs(1)[0].key.address
 
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
   const applicationStake: BN = new BN(env.WORKING_GROUP_APPLICATION_STAKE!)

+ 1 - 1
tests/network-tests/src/flows/proposals/updateRuntime.ts

@@ -28,7 +28,7 @@ export default async function updateRuntime({ api, env, lock }: FlowProps): Prom
   // Some tests after runtime update
   const createMembershipsFixture = new BuyMembershipHappyCaseFixture(
     api,
-    api.createKeyPairs(1).map((key) => key.address),
+    api.createKeyPairs(1).map(({ key }) => key.address),
     paidTerms
   )
   await new FixtureRunner(createMembershipsFixture).run()

+ 4 - 3
tests/network-tests/src/flows/proposals/workingGroupMintCapacityProposal.ts

@@ -1,5 +1,6 @@
 import BN from 'bn.js'
-import { Api, WorkingGroups } from '../../Api'
+import { Api } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import {
   VoteForProposalAndExpectExecutionFixture,
@@ -13,11 +14,11 @@ import { Resource, ResourceLocker } from '../../Resources'
 
 export default {
   storage: async function ({ api, env, lock }: FlowProps): Promise<void> {
-    return workingGroupMintCapactiy(api, env, WorkingGroups.StorageWorkingGroup, lock)
+    return workingGroupMintCapactiy(api, env, WorkingGroups.Storage, lock)
   },
 
   content: async function ({ api, env, lock }: FlowProps): Promise<void> {
-    return workingGroupMintCapactiy(api, env, WorkingGroups.ContentWorkingGroup, lock)
+    return workingGroupMintCapactiy(api, env, WorkingGroups.Content, lock)
   },
 }
 

+ 4 - 4
tests/network-tests/src/flows/workingGroup/atLeastValueBug.ts

@@ -1,4 +1,4 @@
-import { WorkingGroups } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import { AddWorkerOpeningFixture } from '../../fixtures/workingGroupModule'
 import BN from 'bn.js'
@@ -18,7 +18,7 @@ export default async function zeroAtLeastValueBug({ api, env }: FlowProps): Prom
 
   // Pre-conditions
   // A hired lead
-  const lead = await api.getGroupLead(WorkingGroups.StorageWorkingGroup)
+  const lead = await api.getGroupLead(WorkingGroups.Storage)
   assert.notEqual(lead, undefined)
 
   const addWorkerOpeningWithoutStakeFixture = new AddWorkerOpeningFixture(
@@ -27,7 +27,7 @@ export default async function zeroAtLeastValueBug({ api, env }: FlowProps): Prom
     new BN(0),
     openingActivationDelay,
     unstakingPeriod,
-    WorkingGroups.StorageWorkingGroup
+    WorkingGroups.Storage
   )
 
   // Add worker opening with 0 stake, expect failure!
@@ -45,7 +45,7 @@ export default async function zeroAtLeastValueBug({ api, env }: FlowProps): Prom
     roleStake,
     openingActivationDelay,
     new BN(0),
-    WorkingGroups.StorageWorkingGroup
+    WorkingGroups.Storage
   )
 
   // Add worker opening with 0 unstaking period, expect failure!

+ 19 - 10
tests/network-tests/src/flows/workingGroup/leaderSetup.ts

@@ -1,4 +1,5 @@
-import { Api, WorkingGroups } from '../../Api'
+import { Api } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import BN from 'bn.js'
 import { PaidTermId } from '@joystream/types/members'
@@ -8,24 +9,32 @@ import { assert } from 'chai'
 import { FixtureRunner } from '../../Fixture'
 import { extendDebug } from '../../Debugger'
 
-export default {
-  storage: async function ({ api, env }: FlowProps): Promise<void> {
-    return leaderSetup(api, env, WorkingGroups.StorageWorkingGroup)
-  },
-  content: async function ({ api, env }: FlowProps): Promise<void> {
-    return leaderSetup(api, env, WorkingGroups.ContentWorkingGroup)
-  },
+export default function (group: WorkingGroups, canSkip = false) {
+  return async function ({ api, env }: FlowProps): Promise<void> {
+    return leaderSetup(api, env, group, canSkip)
+  }
 }
 
 // Worker application happy case scenario
-async function leaderSetup(api: Api, env: NodeJS.ProcessEnv, group: WorkingGroups): Promise<void> {
+async function leaderSetup(
+  api: Api,
+  env: NodeJS.ProcessEnv,
+  group: WorkingGroups,
+  skipIfAlreadySet = false
+): Promise<void> {
   const debug = extendDebug(`flow:leaderSetup:${group}`)
   debug('Started')
 
   const existingLead = await api.getGroupLead(group)
+
+  if (skipIfAlreadySet && existingLead !== undefined) {
+    debug('Skipping create lead, already exists.')
+    return
+  }
+
   assert.equal(existingLead, undefined, 'Lead is already set')
 
-  const leadKeyPair = api.createKeyPairs(1)[0]
+  const leadKeyPair = api.createKeyPairs(1)[0].key
   const paidTerms: PaidTermId = api.createPaidTermId(new BN(+env.MEMBERSHIP_PAID_TERMS!))
   const applicationStake: BN = new BN(env.WORKING_GROUP_APPLICATION_STAKE!)
   const roleStake: BN = new BN(env.WORKING_GROUP_ROLE_STAKE!)

+ 33 - 13
tests/network-tests/src/flows/workingGroup/manageWorkerAsLead.ts

@@ -1,4 +1,5 @@
-import { Api, WorkingGroups } from '../../Api'
+import { Api } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import {
   ApplyForOpeningFixture,
@@ -15,19 +16,30 @@ import { OpeningId } from '@joystream/types/hiring'
 import { assert } from 'chai'
 import { extendDebug } from '../../Debugger'
 import { FixtureRunner } from '../../Fixture'
+import { WorkerId } from '@joystream/types/working-group'
 
-export default {
-  storage: async function ({ api, env }: FlowProps): Promise<void> {
-    return manageWorkerAsLead(api, env, WorkingGroups.StorageWorkingGroup)
-  },
-  content: async function ({ api, env }: FlowProps): Promise<void> {
-    return manageWorkerAsLead(api, env, WorkingGroups.ContentWorkingGroup)
-  },
+export function manageWorkerFlow(group: WorkingGroups) {
+  return async function ({ api, env }: FlowProps): Promise<void> {
+    await manageWorkerAsLead(api, env, group)
+  }
 }
 
-async function manageWorkerAsLead(api: Api, env: NodeJS.ProcessEnv, group: WorkingGroups): Promise<void> {
-  const debug = extendDebug(`flow:manageWorkerAsLead:${group}`)
+export function hireWorkersFlow(group: WorkingGroups, numWorkers = 1) {
+  return async function ({ api, env }: FlowProps): Promise<void> {
+    await hireWorkersAsLead(api, env, group, numWorkers, numWorkers)
+  }
+}
+
+async function hireWorkersAsLead(
+  api: Api,
+  env: NodeJS.ProcessEnv,
+  group: WorkingGroups,
+  numApplications = 1,
+  numHires = 1
+): Promise<WorkerId[]> {
+  const debug = extendDebug(`flow:hireWorkers:${group}`)
   debug('Started')
+  numHires = Math.min(numApplications, numHires)
 
   const applicationStake: BN = new BN(env.WORKING_GROUP_APPLICATION_STAKE!)
   const roleStake: BN = new BN(env.WORKING_GROUP_ROLE_STAKE!)
@@ -41,7 +53,7 @@ async function manageWorkerAsLead(api: Api, env: NodeJS.ProcessEnv, group: Worki
   const lead = await api.getGroupLead(group)
   assert(lead)
 
-  const applicants = api.createKeyPairs(5).map((key) => key.address)
+  const applicants = api.createKeyPairs(numApplications).map(({ key }) => key.address)
   const memberSetFixture = new BuyMembershipHappyCaseFixture(api, applicants, paidTerms)
   await new FixtureRunner(memberSetFixture).run()
 
@@ -70,7 +82,7 @@ async function manageWorkerAsLead(api: Api, env: NodeJS.ProcessEnv, group: Worki
   const applicationIds = applyForWorkerOpeningFixture.getApplicationIds()
   assert.equal(applicants.length, applicationIds.length)
 
-  const applicationIdsToHire = applicationIds.slice(0, 2)
+  const applicationIdsToHire = applicationIds.slice(0, numHires)
 
   // Begin application review
   const beginApplicationReviewFixture = new BeginApplicationReviewFixture(
@@ -92,7 +104,15 @@ async function manageWorkerAsLead(api: Api, env: NodeJS.ProcessEnv, group: Worki
   )
   await new FixtureRunner(fillOpeningFixture).run()
 
-  const firstWorkerId = fillOpeningFixture.getWorkerIds()[0]
+  debug('Done')
+  return fillOpeningFixture.getWorkerIds()
+}
+
+async function manageWorkerAsLead(api: Api, env: NodeJS.ProcessEnv, group: WorkingGroups): Promise<void> {
+  const debug = extendDebug(`flow:manageWorkerAsLead:${group}`)
+  debug('Started')
+
+  const firstWorkerId = (await hireWorkersAsLead(api, env, group, 3, 1))[0]
 
   const decreaseStakeFixture = new DecreaseStakeFixture(api, firstWorkerId, group)
   // Decrease worker stake

+ 5 - 4
tests/network-tests/src/flows/workingGroup/manageWorkerAsWorker.ts

@@ -1,4 +1,5 @@
-import { Api, WorkingGroups } from '../../Api'
+import { Api } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import {
   AddWorkerOpeningFixture,
@@ -17,10 +18,10 @@ import { extendDebug } from '../../Debugger'
 
 export default {
   storage: async function ({ api, env }: FlowProps): Promise<void> {
-    return manageWorkerAsWorker(api, env, WorkingGroups.StorageWorkingGroup)
+    return manageWorkerAsWorker(api, env, WorkingGroups.Storage)
   },
   content: async function ({ api, env }: FlowProps): Promise<void> {
-    return manageWorkerAsWorker(api, env, WorkingGroups.ContentWorkingGroup)
+    return manageWorkerAsWorker(api, env, WorkingGroups.Content)
   },
 }
 
@@ -41,7 +42,7 @@ async function manageWorkerAsWorker(api: Api, env: NodeJS.ProcessEnv, group: Wor
   const lead = await api.getGroupLead(group)
   assert(lead)
 
-  const newMembers = api.createKeyPairs(1).map((key) => key.address)
+  const newMembers = api.createKeyPairs(1).map(({ key }) => key.address)
 
   const memberSetFixture = new BuyMembershipHappyCaseFixture(api, newMembers, paidTerms)
   // Recreating set of members

+ 5 - 4
tests/network-tests/src/flows/workingGroup/workerPayout.ts

@@ -1,4 +1,5 @@
-import { Api, WorkingGroups } from '../../Api'
+import { Api } from '../../Api'
+import { WorkingGroups } from '../../WorkingGroups'
 import { FlowProps } from '../../Flow'
 import {
   AddWorkerOpeningFixture,
@@ -23,10 +24,10 @@ import { Resource, ResourceLocker } from '../../Resources'
 
 export default {
   storage: async function ({ api, env, lock }: FlowProps): Promise<void> {
-    return workerPayouts(api, env, WorkingGroups.StorageWorkingGroup, lock)
+    return workerPayouts(api, env, WorkingGroups.Storage, lock)
   },
   content: async function ({ api, env, lock }: FlowProps): Promise<void> {
-    return workerPayouts(api, env, WorkingGroups.ContentWorkingGroup, lock)
+    return workerPayouts(api, env, WorkingGroups.Content, lock)
   },
 }
 
@@ -48,7 +49,7 @@ async function workerPayouts(api: Api, env: NodeJS.ProcessEnv, group: WorkingGro
   const lead = await api.getGroupLead(group)
   assert(lead)
 
-  const newMembers = api.createKeyPairs(5).map((key) => key.address)
+  const newMembers = api.createKeyPairs(5).map(({ key }) => key.address)
 
   const memberSetFixture = new BuyMembershipHappyCaseFixture(api, newMembers, paidTerms)
   // Recreating set of members

+ 27 - 0
tests/network-tests/src/misc/createCategoriesFixture.ts

@@ -0,0 +1,27 @@
+import { BaseFixture } from '../Fixture'
+
+export class CreateMockCategories extends BaseFixture {
+  public async execute(): Promise<void> {
+    const categories = [
+      'Film & Animation',
+      'Autos & Vehicles',
+      'Music',
+      'Pets & Animals',
+      'Sports',
+      'Travel & Events',
+      'Gaming',
+      'People & Blogs',
+      'Comedy',
+      'Entertainment',
+      'News & Politics',
+      'Howto & Style',
+      'Education',
+      'Science & Technology',
+      'Nonprofits & Activism',
+    ]
+
+    await Promise.all(categories.map((name) => this.api.createChannelCategoryAsLead(name)))
+
+    await Promise.all(categories.map((name) => this.api.createVideoCategoryAsLead(name)))
+  }
+}

+ 31 - 0
tests/network-tests/src/misc/createChannelsAsMemberFixture.ts

@@ -0,0 +1,31 @@
+import { BaseFixture } from '../Fixture'
+import { Api } from '../Api'
+import { ChannelId } from '@joystream/types/common'
+
+export class CreateChannelsAsMemberFixture extends BaseFixture {
+  // Member that will be channel owner
+  private memberId: number
+  private numChannels: number
+  private createdChannels: ChannelId[] = []
+
+  constructor(api: Api, memberId: number, numChannels: number) {
+    super(api)
+    this.memberId = memberId
+    this.numChannels = numChannels
+  }
+
+  public getCreatedChannels(): ChannelId[] {
+    return this.createdChannels.slice()
+  }
+
+  public async execute(): Promise<void> {
+    const account = await this.api.getMemberControllerAccount(this.memberId)
+
+    const channels = []
+    for (let i = 0; i < this.numChannels; i++) {
+      channels.push(this.api.createMockChannel(this.memberId, account))
+    }
+
+    this.createdChannels = await Promise.all(channels)
+  }
+}

+ 27 - 0
tests/network-tests/src/misc/createVideosAsMemberFixture.ts

@@ -0,0 +1,27 @@
+import { BaseFixture } from '../Fixture'
+import { Api } from '../Api'
+
+export class CreateVideosAsMemberFixture extends BaseFixture {
+  // Member that will be channel owner
+  private memberId: number
+  private numVideos: number
+  private channelId: number
+
+  constructor(api: Api, memberId: number, channelId: number, numVideos: number) {
+    super(api)
+    this.memberId = memberId
+    this.numVideos = numVideos
+    this.channelId = channelId
+  }
+
+  public async execute(): Promise<void> {
+    const account = await this.api.getMemberControllerAccount(this.memberId)
+
+    const videos = []
+    for (let i = 0; i < this.numVideos; i++) {
+      videos.push(this.api.createMockVideo(this.memberId, this.channelId, account))
+    }
+
+    await Promise.all(videos)
+  }
+}

+ 68 - 0
tests/network-tests/src/misc/mockContentFlow.ts

@@ -0,0 +1,68 @@
+// import { assert } from 'chai'
+// import { registry } from '@joystream/types'
+import { CreateChannelsAsMemberFixture } from './createChannelsAsMemberFixture'
+import { CreateVideosAsMemberFixture } from './createVideosAsMemberFixture'
+import { BuyMembershipHappyCaseFixture } from '../fixtures/membershipModule'
+import { CreateMockCategories } from './createCategoriesFixture'
+
+import { FlowProps } from '../Flow'
+import { FixtureRunner } from '../Fixture'
+import { extendDebug } from '../Debugger'
+import BN from 'bn.js'
+
+export default async function mockContent({ api }: FlowProps): Promise<void> {
+  const debug = extendDebug('flow:createMockContent')
+  debug('Started')
+
+  // Check to avoid creating duplicate categories
+  const nextVideoCategoryId = await api.query.content.nextVideoCategoryId()
+  const nextChannelCategoryId = await api.query.content.nextVideoCategoryId()
+
+  if (nextChannelCategoryId.toNumber() === 1 && nextVideoCategoryId.toNumber() === 1) {
+    // create categories with lead
+    const createCategories = new CreateMockCategories(api)
+    debug('Creating Categories')
+    await new FixtureRunner(createCategories).run()
+  } else {
+    debug('Skipping Category Creation')
+  }
+
+  const memberAccount = api.createKeyPairs(1)[0].key.address
+  const createMember: BuyMembershipHappyCaseFixture = new BuyMembershipHappyCaseFixture(
+    api,
+    [memberAccount],
+    api.createPaidTermId(new BN(0))
+  )
+  await new FixtureRunner(createMember).run()
+
+  const memberId = createMember.getCreatedMembers()[0].toNumber()
+
+  // If we are too "aggressive" seeing
+  // 'ExtrinsicStatus:: 1010: Invalid Transaction: Transaction is outdated' errors
+  const numberOfChannelsPerRound = 100
+  const numberOfRoundsChannel = 5
+  const numberOfVideosPerRound = 100
+  const numberOfRoundsVideo = 100
+
+  const channelIds: number[] = []
+
+  // create mock channels
+  debug('Creating Channels')
+  for (let n = 0; n < numberOfRoundsChannel; n++) {
+    const createChannels = new CreateChannelsAsMemberFixture(api, memberId, numberOfChannelsPerRound)
+    await new FixtureRunner(createChannels).run()
+    createChannels.getCreatedChannels().forEach((id) => channelIds.push(id.toNumber()))
+  }
+
+  // Create all videos in same channel
+  const channelId = channelIds[0]
+
+  // create mock videos
+  for (let n = 0; n < numberOfRoundsVideo; n++) {
+    debug('Creating Videos round', n)
+    const createVideos = new CreateVideosAsMemberFixture(api, memberId, channelId, numberOfVideosPerRound)
+    await new FixtureRunner(createVideos).run()
+  }
+
+  debug('Done')
+}

+ 15 - 0
tests/network-tests/src/misc/updateAllWorkerRoleAccountsFlow.ts

@@ -0,0 +1,15 @@
+import { UpdateWorkerAccountsFixture } from './updateWorkerAccountsFixture'
+
+import { FlowProps } from '../Flow'
+import { FixtureRunner } from '../Fixture'
+import { extendDebug } from '../Debugger'
+
+export default async function updateAllWorkerRoleAccounts({ api }: FlowProps): Promise<void> {
+  const debug = extendDebug('flow:updateAllWorkerRoleAccounts')
+  debug('Started')
+
+  const updateAccounts = new UpdateWorkerAccountsFixture(api)
+  await new FixtureRunner(updateAccounts).run()
+
+  debug('Done')
+}

+ 14 - 0
tests/network-tests/src/misc/updateWorkerAccountsFixture.ts

@@ -0,0 +1,14 @@
+import { BaseFixture } from '../Fixture'
+import { AllWorkingGroups } from '../WorkingGroups'
+
+export class UpdateWorkerAccountsFixture extends BaseFixture {
+  public async execute(): Promise<void> {
+    await Promise.all(
+      AllWorkingGroups.map(async (group) =>
+        Promise.all(
+          (await this.api.getActiveWorkerIds(group)).map((id) => this.api.assignWorkerWellknownAccount(group, id))
+        )
+      )
+    )
+  }
+}

+ 2 - 1
tests/network-tests/src/scenarios/content-directory.ts

@@ -1,6 +1,7 @@
+import { WorkingGroups } from '../WorkingGroups'
 import leaderSetup from '../flows/workingGroup/leaderSetup'
 import { scenario } from '../Scenario'
 
 scenario(async ({ job }) => {
-  job('setup content lead', leaderSetup.content)
+  job('setup content lead', leaderSetup(WorkingGroups.Content))
 })

+ 8 - 4
tests/network-tests/src/scenarios/full.ts

@@ -8,10 +8,11 @@ import textProposal from '../flows/proposals/textProposal'
 import validatorCountProposal from '../flows/proposals/validatorCountProposal'
 import wgMintCapacityProposal from '../flows/proposals/workingGroupMintCapacityProposal'
 import atLeastValueBug from '../flows/workingGroup/atLeastValueBug'
-import manageWorkerAsLead from '../flows/workingGroup/manageWorkerAsLead'
+import { manageWorkerFlow } from '../flows/workingGroup/manageWorkerAsLead'
 import manageWorkerAsWorker from '../flows/workingGroup/manageWorkerAsWorker'
 import workerPayout from '../flows/workingGroup/workerPayout'
 import { scenario } from '../Scenario'
+import { WorkingGroups } from '../WorkingGroups'
 
 scenario(async ({ job }) => {
   job('creating members', creatingMemberships)
@@ -29,7 +30,10 @@ scenario(async ({ job }) => {
     manageLeaderRole.content,
   ]).requires(councilJob)
 
-  const leadSetupJob = job('setup leads', [leaderSetup.storage, leaderSetup.content]).after(proposalsJob)
+  const leadSetupJob = job('setup leads', [
+    leaderSetup(WorkingGroups.Storage),
+    leaderSetup(WorkingGroups.Content),
+  ]).after(proposalsJob)
 
   // Test bug only on one instance of working group is sufficient
   job('at least value bug', atLeastValueBug).requires(leadSetupJob)
@@ -38,9 +42,9 @@ scenario(async ({ job }) => {
   job('worker payouts', [workerPayout.storage, workerPayout.content]).requires(leadSetupJob).requires(councilJob)
 
   job('working group tests', [
-    manageWorkerAsLead.storage,
+    manageWorkerFlow(WorkingGroups.Storage),
     manageWorkerAsWorker.storage,
-    manageWorkerAsLead.content,
+    manageWorkerFlow(WorkingGroups.Content),
     manageWorkerAsWorker.content,
   ]).requires(leadSetupJob)
 })

+ 26 - 0
tests/network-tests/src/scenarios/setup-new-chain.ts

@@ -0,0 +1,26 @@
+import assignCouncil from '../flows/council/assign'
+import leaderSetup from '../flows/workingGroup/leaderSetup'
+import mockContentFlow from '../misc/mockContentFlow'
+import updateAccountsFlow from '../misc/updateAllWorkerRoleAccountsFlow'
+import { AllWorkingGroups } from '../WorkingGroups'
+import { scenario } from '../Scenario'
+
+scenario(async ({ job }) => {
+  const COUNCIL_SIZE = 1
+  job('Create Council', assignCouncil(COUNCIL_SIZE))
+
+  const leads = job(
+    'Set WorkingGroup Leads',
+    AllWorkingGroups.map((group) => leaderSetup(group, true))
+  )
+
+  const updateWorkerAccounts = job('Update worker accounts', updateAccountsFlow).after(leads)
+
+  if (!process.env.SKIP_MOCK_CONTENT) {
+    // Create some mock content in content directory - without assets or any real metadata
+    job('Create Mock Content', mockContentFlow).after(updateWorkerAccounts)
+  }
+
+  // assign members known accounts?
+  // assign council known accounts?
+})

+ 1 - 1
tests/network-tests/src/sender.ts

@@ -17,7 +17,7 @@ export enum LogLevel {
 
 export class Sender {
   private readonly api: ApiPromise
-  private static readonly asyncLock: AsyncLock = new AsyncLock()
+  private static readonly asyncLock: AsyncLock = new AsyncLock({ maxPending: 2048 })
   private readonly keyring: Keyring
   private readonly debug: Debugger.Debugger
   private logs: LogLevel = LogLevel.None

+ 29 - 0
tests/network-tests/test-setup-new-chain.sh

@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+set -e
+
+SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
+cd $SCRIPT_PATH
+
+# Custom sudo and treasury accounts - export them before start new chain
+# will be used to configre chainspec and override test framework defaults.
+export SUDO_ACCOUNT_URI=//Bob
+export SUDO_INITIAL_BALANCE=5000
+export TREASURY_ACCOUNT_URI=//Charlie
+export TREASURY_INITIAL_BALANCE=1000000000
+
+CONTAINER_ID=$(./run-test-node-docker.sh)
+
+function cleanup() {
+    docker logs ${CONTAINER_ID} --tail 15
+    docker-compose -f ../../docker-compose.yml down -v
+}
+
+trap cleanup EXIT
+
+sleep 3
+
+# Display runtime version
+yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
+
+# Init chain state
+./run-test-scenario.sh setup-new-chain

+ 20 - 25
yarn.lock

@@ -3967,20 +3967,6 @@
     fs-extra "^9.0.1"
     moment "^2.22.1"
 
-"@oclif/plugin-help@^2":
-  version "2.2.3"
-  resolved "https://registry.yarnpkg.com/@oclif/plugin-help/-/plugin-help-2.2.3.tgz#b993041e92047f0e1762668aab04d6738ac06767"
-  integrity sha512-bGHUdo5e7DjPJ0vTeRBMIrfqTRDBfyR5w0MP41u0n3r7YG5p14lvMmiCXxi6WDaP2Hw5nqx3PnkAIntCKZZN7g==
-  dependencies:
-    "@oclif/command" "^1.5.13"
-    chalk "^2.4.1"
-    indent-string "^4.0.0"
-    lodash.template "^4.4.0"
-    string-width "^3.0.0"
-    strip-ansi "^5.0.0"
-    widest-line "^2.0.1"
-    wrap-ansi "^4.0.0"
-
 "@oclif/plugin-help@^3", "@oclif/plugin-help@^3.2.0", "@oclif/plugin-help@^3.2.2":
   version "3.2.2"
   resolved "https://registry.yarnpkg.com/@oclif/plugin-help/-/plugin-help-3.2.2.tgz#063ee08cee556573a5198fbdfdaa32796deba0ed"
@@ -3997,6 +3983,22 @@
     widest-line "^3.1.0"
     wrap-ansi "^4.0.0"
 
+"@oclif/plugin-help@^3.2.4":
+  version "3.2.4"
+  resolved "https://registry.yarnpkg.com/@oclif/plugin-help/-/plugin-help-3.2.4.tgz#100e0e09d806e20595096609f2220d009ee096e2"
+  integrity sha512-kMSfFbv11S7CKFlbWTKDdAe/gC7P2zCFZEDq6BAHjJdA0htHT8FvBhnyoppR0O2jOTjX80wHjU+ItPpjanfuag==
+  dependencies:
+    "@oclif/command" "^1.5.20"
+    "@oclif/config" "^1.15.1"
+    "@oclif/errors" "^1.2.2"
+    chalk "^4.1.0"
+    indent-string "^4.0.0"
+    lodash "^4.17.21"
+    string-width "^4.2.0"
+    strip-ansi "^6.0.0"
+    widest-line "^3.1.0"
+    wrap-ansi "^4.0.0"
+
 "@oclif/plugin-not-found@^1.2.4":
   version "1.2.4"
   resolved "https://registry.yarnpkg.com/@oclif/plugin-not-found/-/plugin-not-found-1.2.4.tgz#160108c82f0aa10f4fb52cee4e0135af34b7220b"
@@ -9174,10 +9176,10 @@ bl@^4.0.0, bl@^4.0.3, bl@^4.1.0:
     inherits "^2.0.4"
     readable-stream "^3.4.0"
 
-blake3@^2.1.4:
-  version "2.1.4"
-  resolved "https://registry.yarnpkg.com/blake3/-/blake3-2.1.4.tgz#78117bc9e80941097fdf7d03e897a9ee595ecd62"
-  integrity sha512-70hmx0lPd6zmtNwxPT4/1P0pqaEUlTJ0noUBvCXPLfMpN0o8PPaK3q7ZlpRIyhrqcXxeMAJSowNm/L9oi/x1XA==
+blake3-wasm@^2.1.5:
+  version "2.1.5"
+  resolved "https://registry.yarnpkg.com/blake3-wasm/-/blake3-wasm-2.1.5.tgz#b22dbb84bc9419ed0159caa76af4b1b132e6ba52"
+  integrity sha512-F1+K8EbfOZE49dtoPtmxUQrpXaBIl3ICvasLh+nJta0xkz+9kF/7uet9fLnwKqhDrmj6g+6K3Tw9yQPUg2ka5g==
 
 blakejs@^1.1.0:
   version "1.1.0"
@@ -31303,13 +31305,6 @@ wide-align@1.1.3, wide-align@^1.1.0:
   dependencies:
     string-width "^1.0.2 || 2"
 
-widest-line@^2.0.1:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-2.0.1.tgz#7438764730ec7ef4381ce4df82fb98a53142a3fc"
-  integrity sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==
-  dependencies:
-    string-width "^2.1.1"
-
 widest-line@^3.1.0:
   version "3.1.0"
   resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca"