Browse Source

Merge branch 'giza_staging' into giza-integration-tests

Leszek Wiesner 3 years ago
parent
commit
27b7f5bfa6
100 changed files with 1648 additions and 846 deletions
  1. 58 47
      .github/workflows/create-ami.yml
  2. 13 4
      .github/workflows/joystream-node-docker.yml
  3. 3 1
      .github/workflows/run-network-tests.yml
  4. 2 2
      Cargo.lock
  5. 0 0
      devops/aws/.gitignore
  6. 8 2
      devops/aws/README.md
  7. 0 0
      devops/aws/ansible.cfg
  8. 45 0
      devops/aws/build-arm64-playbook.yml
  9. 0 0
      devops/aws/build-code.yml
  10. 0 0
      devops/aws/chain-spec-pioneer.yml
  11. 13 0
      devops/aws/cloudformation/infrastructure.yml
  12. 6 0
      devops/aws/cloudformation/single-instance-docker.yml
  13. 3 0
      devops/aws/cloudformation/single-instance.yml
  14. 0 0
      devops/aws/common.sh
  15. 39 0
      devops/aws/create-joystream-node-ami-playbook.yml
  16. 5 9
      devops/aws/deploy-infra.sample.cfg
  17. 3 2
      devops/aws/deploy-infra.sh
  18. 0 0
      devops/aws/deploy-single-node-playbook.yml
  19. 18 0
      devops/aws/deploy-single-node.sample.cfg
  20. 3 3
      devops/aws/deploy-single-node.sh
  21. 1 1
      devops/aws/destroy-infra.sh
  22. 0 0
      devops/aws/group_vars/all
  23. 0 0
      devops/aws/library/json_modify.py
  24. 0 0
      devops/aws/requirements.yml
  25. 0 0
      devops/aws/roles/admin/tasks/deploy-pioneer.yml
  26. 3 0
      devops/aws/roles/admin/tasks/main.yml
  27. 96 0
      devops/aws/roles/common/tasks/chain-spec-node-keys.yml
  28. 0 0
      devops/aws/roles/common/tasks/get-code-git.yml
  29. 0 0
      devops/aws/roles/common/tasks/get-code-local.yml
  30. 7 4
      devops/aws/roles/common/tasks/run-setup-build.yml
  31. 0 0
      devops/aws/roles/node/templates/joystream-node.service.j2
  32. 0 0
      devops/aws/roles/rpc/tasks/main.yml
  33. 0 0
      devops/aws/roles/rpc/templates/Caddyfile.j2
  34. 0 0
      devops/aws/roles/rpc/templates/joystream-node.service.j2
  35. 0 0
      devops/aws/roles/validators/tasks/main.yml
  36. 0 0
      devops/aws/roles/validators/templates/joystream-node.service.j2
  37. 0 0
      devops/aws/setup-admin.yml
  38. 0 50
      devops/infrastructure/build-arm64-playbook.yml
  39. 0 45
      devops/infrastructure/github-action-playbook.yml
  40. 0 1
      devops/infrastructure/pulumi-common/index.ts
  41. 0 19
      devops/infrastructure/query-node/Pulumi.yaml
  42. 0 461
      devops/infrastructure/query-node/index.ts
  43. 0 76
      devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml
  44. 1 1
      devops/kubernetes/node-network/.gitignore
  45. 0 0
      devops/kubernetes/node-network/Pulumi.yaml
  46. 0 0
      devops/kubernetes/node-network/README.md
  47. 0 0
      devops/kubernetes/node-network/configMap.ts
  48. 0 0
      devops/kubernetes/node-network/index.ts
  49. 0 0
      devops/kubernetes/node-network/json_modify.py
  50. 0 0
      devops/kubernetes/node-network/nfsVolume.ts
  51. 0 0
      devops/kubernetes/node-network/package.json
  52. 0 0
      devops/kubernetes/node-network/tsconfig.json
  53. 0 0
      devops/kubernetes/node-network/utils.ts
  54. 0 0
      devops/kubernetes/node-network/validator.ts
  55. 0 0
      devops/kubernetes/pulumi-common/caddy.ts
  56. 109 0
      devops/kubernetes/pulumi-common/database.ts
  57. 2 0
      devops/kubernetes/pulumi-common/index.ts
  58. 0 0
      devops/kubernetes/pulumi-common/package.json
  59. 0 0
      devops/kubernetes/pulumi-common/tsconfig.json
  60. 1 1
      devops/kubernetes/query-node/.gitignore
  61. 27 0
      devops/kubernetes/query-node/Pulumi.yaml
  62. 26 2
      devops/kubernetes/query-node/README.md
  63. 0 0
      devops/kubernetes/query-node/configMap.ts
  64. 5 0
      devops/kubernetes/query-node/docker_dummy/Dockerfile
  65. 133 0
      devops/kubernetes/query-node/index.ts
  66. 187 0
      devops/kubernetes/query-node/indexerDeployment.ts
  67. 0 0
      devops/kubernetes/query-node/package.json
  68. 210 0
      devops/kubernetes/query-node/processorDeployment.ts
  69. 0 0
      devops/kubernetes/query-node/s3Helpers.ts
  70. 0 0
      devops/kubernetes/query-node/tsconfig.json
  71. 1 1
      devops/kubernetes/storage-node/.gitignore
  72. 0 0
      devops/kubernetes/storage-node/Pulumi.yaml
  73. 2 2
      devops/kubernetes/storage-node/README.md
  74. 56 28
      devops/kubernetes/storage-node/index.ts
  75. 1 0
      devops/kubernetes/storage-node/package.json
  76. 0 0
      devops/kubernetes/storage-node/tsconfig.json
  77. 1 1
      node/Cargo.toml
  78. 8 0
      node/src/chain_spec/mod.rs
  79. 6 6
      pioneer/packages/joy-proposals/src/Proposal/Body.tsx
  80. 1 0
      query-node/mappings/bootstrap-data/data/channelCategories.json
  81. 1 0
      query-node/mappings/bootstrap-data/data/videoCategories.json
  82. 6 2
      query-node/mappings/bootstrap-data/index.ts
  83. 1 1
      query-node/mappings/bootstrap-data/scripts/api.ts
  84. 68 0
      query-node/mappings/bootstrap-data/scripts/fetchCategories.ts
  85. 1 1
      query-node/mappings/bootstrap-data/scripts/fetchMembersData.ts
  86. 16 0
      query-node/mappings/bootstrap-data/types.ts
  87. 42 2
      query-node/mappings/bootstrap.ts
  88. 4 3
      query-node/mappings/package.json
  89. 7 18
      runtime-modules/common/src/working_group.rs
  90. 4 0
      runtime-modules/content/src/errors.rs
  91. 141 16
      runtime-modules/content/src/lib.rs
  92. 212 0
      runtime-modules/content/src/tests/migration.rs
  93. 19 2
      runtime-modules/content/src/tests/mock.rs
  94. 1 0
      runtime-modules/content/src/tests/mod.rs
  95. 3 1
      runtime-modules/storage/src/lib.rs
  96. 1 1
      runtime/Cargo.toml
  97. 5 1
      runtime/src/lib.rs
  98. 4 23
      runtime/src/runtime_api.rs
  99. 2 0
      setup.sh
  100. 7 6
      tests/network-tests/assets/TestChannel.json

+ 58 - 47
.github/workflows/create-ami.yml

@@ -1,3 +1,5 @@
+# Creates an AWS AMI (system image) with compiled joystream-node and subkey
+# 
 name: Create AWS AMI
 
 on:
@@ -8,52 +10,61 @@ jobs:
     name: Build the code and run setup
     runs-on: ubuntu-latest
     env:
-      STACK_NAME: joystream-github-action-${{ github.run_number }}
+      STACK_NAME: create-joystream-node-ami-ga-${{ github.run_number }}
       KEY_NAME: joystream-github-action-key
     steps:
-    - name: Extract branch name
-      shell: bash
-      run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
-      id: extract_branch
-
-    - name: Set AMI Name environment variable
-      shell: bash
-      run: echo "ami_name=joystream-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
-      id: ami_name
-
-    - name: Checkout
-      uses: actions/checkout@v2
-
-    - name: Configure AWS credentials
-      uses: aws-actions/configure-aws-credentials@v1
-      with:
-        aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
-        aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-        aws-region: us-east-1
-
-    - name: Deploy to AWS CloudFormation
-      uses: aws-actions/aws-cloudformation-github-deploy@v1
-      id: deploy_stack
-      with:
-        name: ${{ env.STACK_NAME }}
-        template: devops/infrastructure/single-instance.yml
-        no-fail-on-empty-changeset: "1"
-        parameter-overrides: "KeyName=${{ env.KEY_NAME }}"
-
-    - name: Install Ansible dependencies
-      run: pipx inject ansible-core boto3 botocore
-
-    - name: Run playbook
-      uses: dawidd6/action-ansible-playbook@v2
-      with:
-        playbook: github-action-playbook.yml
-        directory: devops/infrastructure
-        requirements: requirements.yml
-        key: ${{ secrets.SSH_PRIVATE_KEY }}
-        inventory: |
-          [all]
-          ${{ steps.deploy_stack.outputs.PublicIp }}
-        options: |
-          --extra-vars "git_repo=https://github.com/${{ github.repository }} \
-                        branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
-                        stack_name=${{ env.STACK_NAME }} ami_name=${{ env.ami_name }}"
+      - name: Extract branch name
+        shell: bash
+        run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
+        id: extract_branch
+
+      - name: Set AMI Name environment variable
+        shell: bash
+        run: echo "ami_name=joystream-node-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
+        id: ami_name
+
+      - name: Checkout
+        uses: actions/checkout@v2
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: us-east-1
+
+      - name: Deploy to AWS CloudFormation
+        uses: aws-actions/aws-cloudformation-github-deploy@v1
+        id: deploy_stack
+        with:
+          name: ${{ env.STACK_NAME }}
+          template: devops/aws/cloudformation/single-instance.yml
+          no-fail-on-empty-changeset: '1'
+          parameter-overrides: 'KeyName=${{ env.KEY_NAME }}'
+
+      - name: Install Ansible dependencies
+        run: pipx inject ansible-core boto3 botocore
+
+      - name: Run playbook
+        uses: dawidd6/action-ansible-playbook@v2
+        with:
+          playbook: create-joystream-node-ami-playbook.yml
+          directory: devops/aws
+          requirements: requirements.yml
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          inventory: |
+            [all]
+            ${{ steps.deploy_stack.outputs.PublicIp }}
+          options: |
+            --extra-vars "git_repo=https://github.com/${{ github.repository }} \
+                          branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
+                          ami_name=${{ env.ami_name }}"
+
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}

+ 13 - 4
.github/workflows/joystream-node-docker.yml

@@ -71,7 +71,7 @@ jobs:
             platform_tag: 'arm'
             file: 'joystream-node-armv7.Dockerfile'
     env:
-      STACK_NAME: joystream-ga-docker-${{ github.run_number }}-${{ matrix.platform_tag }}
+      STACK_NAME: build-joystream-node-docker-ga-${{ github.run_number }}-${{ matrix.platform_tag }}
     steps:
       - name: Extract branch name
         shell: bash
@@ -120,7 +120,7 @@ jobs:
         id: deploy_stack
         with:
           name: ${{ env.STACK_NAME }}
-          template: devops/infrastructure/single-instance-docker.yml
+          template: devops/aws/cloudformation/single-instance-docker.yml
           no-fail-on-empty-changeset: '1'
           parameter-overrides: 'KeyName=${{ env.KEY_NAME }},EC2AMI=ami-00d1ab6b335f217cf,EC2InstanceType=t4g.xlarge'
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
@@ -129,7 +129,7 @@ jobs:
         uses: dawidd6/action-ansible-playbook@v2
         with:
           playbook: build-arm64-playbook.yml
-          directory: devops/infrastructure
+          directory: devops/aws
           requirements: requirements.yml
           key: ${{ secrets.SSH_PRIVATE_KEY }}
           inventory: |
@@ -142,9 +142,18 @@ jobs:
                           docker_password=${{ secrets.DOCKERHUB_PASSWORD }} \
                           tag_name=${{ steps.compute_shasum.outputs.shasum }}-${{ matrix.platform_tag }} \
                           repository=${{ env.REPOSITORY }} dockerfile=${{ matrix.file }} \
-                          stack_name=${{ env.STACK_NAME }} platform=${{ matrix.platform }}"
+                          platform=${{ matrix.platform }}"
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
 
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}
+
   push-manifest:
     name: Create manifest using both the arch images
     needs: [push-amd64, push-arm]

+ 3 - 1
.github/workflows/run-network-tests.yml

@@ -100,10 +100,12 @@ jobs:
         run: |
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
+          yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli build
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
       - name: Execute network tests
-        run: RUNTIME=sumer tests/network-tests/run-tests.sh full
+        run: RUNTIME=sumer tests/network-tests/run-migration-tests.sh full
 
   basic_runtime:
     name: Integration Tests (New Chain)

+ 2 - 2
Cargo.lock

@@ -2332,7 +2332,7 @@ dependencies = [
 
 [[package]]
 name = "joystream-node"
-version = "5.8.0"
+version = "5.9.0"
 dependencies = [
  "frame-benchmarking",
  "frame-benchmarking-cli",
@@ -2393,7 +2393,7 @@ dependencies = [
 
 [[package]]
 name = "joystream-node-runtime"
-version = "9.10.0"
+version = "9.11.0"
 dependencies = [
  "frame-benchmarking",
  "frame-executive",

+ 0 - 0
devops/infrastructure/.gitignore → devops/aws/.gitignore


+ 8 - 2
devops/infrastructure/README.md → devops/aws/README.md

@@ -26,10 +26,16 @@ On Mac run the command:
 Follow [the official installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for your system.
 
 # How to run
-Copy and edit the file `deploy-config.sample.cfg` and update parameters like AWS_KEY_PAIR_NAME, KEY_PATH
+Copy and edit the file `deploy-infra.sample.cfg` and update parameters like AWS_KEY_PAIR_NAME, KEY_PATH
 Run the `deploy-infra.sh` script to deploy the infrastructure
 
 ```
-cd devops/infrastructure
+cd devops/aws
 ./deploy-infra.sh your-deploy-config.cfg
 ```
+
+# To tear down a network
+
+```
+./destroy-infra.sh your-deploy-config.cfg
+```

+ 0 - 0
devops/infrastructure/ansible.cfg → devops/aws/ansible.cfg


+ 45 - 0
devops/aws/build-arm64-playbook.yml

@@ -0,0 +1,45 @@
+---
+# Setup joystream code, build docker image
+
+- name: Build image and push to docker hub
+  hosts: all
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Install Docker Module for Python
+      pip:
+        name: docker
+
+    - name: Log into DockerHub
+      community.docker.docker_login:
+        username: '{{ docker_username }}'
+        password: '{{ docker_password }}'
+
+    - name: Build an image and push it to a private repo
+      community.docker.docker_image:
+        build:
+          path: ./joystream
+          dockerfile: '{{ dockerfile }}'
+          platform: '{{ platform }}'
+        name: '{{ repository }}'
+        tag: '{{ tag_name }}'
+        push: yes
+        source: build
+      # Run in async fashion for max duration of 2 hours
+      async: 7200
+      poll: 0
+      register: build_result
+
+    - name: Check on build async task
+      async_status:
+        jid: '{{ build_result.ansible_job_id }}'
+      register: job_result
+      until: job_result.finished
+      # Max number of times to check for status
+      retries: 72
+      # Check for the status every 100s
+      delay: 100

+ 0 - 0
devops/infrastructure/build-code.yml → devops/aws/build-code.yml


+ 0 - 0
devops/infrastructure/chain-spec-pioneer.yml → devops/aws/chain-spec-pioneer.yml


+ 13 - 0
devops/infrastructure/infrastructure.yml → devops/aws/cloudformation/infrastructure.yml

@@ -1,3 +1,9 @@
+# Deploy inftrastructure required to run a new joystream chain.
+# This is comprised of:
+#   - N validators
+#   - One RPC node
+#   - s3 bucket with a build of Pionner
+
 AWSTemplateFormatVersion: 2010-09-09
 
 Parameters:
@@ -73,6 +79,10 @@ Resources:
           FromPort: 443
           ToPort: 443
           CidrIp: 0.0.0.0/0
+        - IpProtocol: tcp
+          FromPort: 80
+          ToPort: 80
+          CidrIp: 0.0.0.0/0
         - IpProtocol: tcp
           FromPort: 22
           ToPort: 22
@@ -112,6 +122,9 @@ Resources:
             # Update all packages
             apt-get update -y
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             apt-get upgrade -y
 

+ 6 - 0
devops/infrastructure/single-instance-docker.yml → devops/aws/cloudformation/single-instance-docker.yml

@@ -1,3 +1,6 @@
+# Deploys and EC2 node with docker tools suitable for
+# building joystream node docker images
+
 AWSTemplateFormatVersion: 2010-09-09
 
 Parameters:
@@ -58,6 +61,9 @@ Resources:
             # Update all packages
             apt-get update -y
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             apt-get upgrade -y
 

+ 3 - 0
devops/infrastructure/single-instance.yml → devops/aws/cloudformation/single-instance.yml

@@ -59,6 +59,9 @@ Resources:
             # Update all packages
             apt-get update -y
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             apt-get upgrade -y
 

+ 0 - 0
devops/infrastructure/common.sh → devops/aws/common.sh


+ 39 - 0
devops/aws/create-joystream-node-ami-playbook.yml

@@ -0,0 +1,39 @@
+---
+# Setup joystream code, build and Create AMI
+
+- name: Setup instance
+  hosts: all
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Run setup and build
+      include_role:
+        name: common
+        tasks_from: run-setup-build
+
+    - name: Install subkey
+      include_role:
+        name: admin
+        tasks_from: main
+
+    - name: Basic AMI Creation
+      amazon.aws.ec2_ami:
+        instance_id: '{{ instance_id }}'
+        wait: yes
+        # How long before wait gives up, in seconds
+        wait_timeout: 3600
+        name: '{{ ami_name }}'
+        launch_permissions:
+          group_names: ['all']
+        tags:
+          Name: '{{ ami_name }}'
+      register: ami_data
+      delegate_to: localhost
+
+    - name: Print AMI ID
+      debug:
+        msg: 'AMI ID is: {{ ami_data.image_id }}'

+ 5 - 9
devops/infrastructure/deploy-config.sample.cfg → devops/aws/deploy-infra.sample.cfg

@@ -1,6 +1,6 @@
 #### PARAMETERS USED BY AWS
 
-STACK_NAME=joystream-node
+STACK_NAME=joystream-network
 REGION=us-east-1
 CLI_PROFILE=joystream-user
 KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
@@ -23,19 +23,15 @@ INVENTORY_PATH="$DATA_PATH/inventory"
 
 NUMBER_OF_VALIDATORS=2
 
-## Used for Deploying a new node
-DATE_TIME=$(date +"%d-%b-%Y-%H-%M-%S")
-
-SINGLE_NODE_STACK_NAME="new-node-$DATE_TIME"
-
-BINARY_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joystream-node-5.1.0-9d9e77751-x86_64-linux-gnu.tar.gz"
-CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joy-testnet-5.json"
-
 #### PARAMETERS USED BY ANSIBLE
 
 LOCAL_CODE_PATH="~/Joystream/joystream"
 NETWORK_SUFFIX=7891
 
+DEPLOYMENT_TYPE=live
+INITIAL_MEMBERS_PATH=""
+INITIAL_BALANCES_PATH=""
+
 GIT_REPO="https://github.com/Joystream/joystream.git"
 BRANCH_NAME=sumer
 

+ 3 - 2
devops/infrastructure/deploy-infra.sh → devops/aws/deploy-infra.sh

@@ -29,7 +29,7 @@ aws cloudformation deploy \
   --region $REGION \
   --profile $CLI_PROFILE \
   --stack-name $NEW_STACK_NAME \
-  --template-file infrastructure.yml \
+  --template-file cloudformation/infrastructure.yml \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
@@ -84,7 +84,8 @@ if [ $? -eq 0 ]; then
   echo -e "\n\n=========== Configure and start new validators, rpc node and pioneer ==========="
   ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH chain-spec-pioneer.yml \
     --extra-vars "local_dir=$LOCAL_CODE_PATH network_suffix=$NETWORK_SUFFIX
-                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS"
+                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS
+                  deployment_type=$DEPLOYMENT_TYPE initial_balances_file=$INITIAL_BALANCES_PATH initial_members_file=$INITIAL_MEMBERS_PATH"
 
   echo -e "\n\n Pioneer URL: https://$DOMAIN_NAME"
 fi

+ 0 - 0
devops/infrastructure/single-node-playbook.yml → devops/aws/deploy-single-node-playbook.yml


+ 18 - 0
devops/aws/deploy-single-node.sample.cfg

@@ -0,0 +1,18 @@
+#### PARAMETERS USED BY AWS
+
+REGION=us-east-1
+CLI_PROFILE=joystream-user
+KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
+AWS_KEY_PAIR_NAME="joystream-key"
+
+DEFAULT_EC2_INSTANCE_TYPE=t2.micro
+
+ACCOUNT_ID=$(aws sts get-caller-identity --profile $CLI_PROFILE --query Account --output text)
+
+## Used for Deploying a new node
+DATE_TIME=$(date +"%d-%b-%Y-%H-%M-%S")
+
+SINGLE_NODE_STACK_NAME="joystream-node-$DATE_TIME"
+
+BINARY_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joystream-node-5.1.0-9d9e77751-x86_64-linux-gnu.tar.gz"
+CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joy-testnet-5.json"

+ 3 - 3
devops/infrastructure/deploy-single-node.sh → devops/aws/deploy-single-node.sh

@@ -23,13 +23,13 @@ if [ ! -f "$KEY_PATH" ]; then
     exit 1
 fi
 
-# # Deploy the CloudFormation template
+# Deploy the CloudFormation template
 echo -e "\n\n=========== Deploying single node ==========="
 aws cloudformation deploy \
   --region $REGION \
   --profile $CLI_PROFILE \
   --stack-name $SINGLE_NODE_STACK_NAME \
-  --template-file single-instance.yml \
+  --template-file cloudformation/single-instance.yml \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
@@ -46,6 +46,6 @@ if [ $? -eq 0 ]; then
   echo -e "New Node Public IP: $SERVER_IP"
 
   echo -e "\n\n=========== Configuring node ==========="
-  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH single-node-playbook.yml \
+  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH deploy-single-node-playbook.yml \
     --extra-vars "binary_file=$BINARY_FILE chain_spec_file=$CHAIN_SPEC_FILE"
 fi

+ 1 - 1
devops/infrastructure/delete-stack.sh → devops/aws/destroy-infra.sh

@@ -6,7 +6,7 @@ source common.sh
 
 if [ -z "$1" ]; then
   echo "ERROR: Configuration file not passed"
-  echo "Please use ./delete-stack.sh PATH/TO/CONFIG to run this script"
+  echo "Please use ./destroy-infra.sh PATH/TO/CONFIG to run this script"
   exit 1
 else
   echo "Using $1 file for config"

+ 0 - 0
devops/infrastructure/group_vars/all → devops/aws/group_vars/all


+ 0 - 0
devops/infrastructure/library/json_modify.py → devops/aws/library/json_modify.py


+ 0 - 0
devops/infrastructure/requirements.yml → devops/aws/requirements.yml


+ 0 - 0
devops/infrastructure/roles/admin/tasks/deploy-pioneer.yml → devops/aws/roles/admin/tasks/deploy-pioneer.yml


+ 3 - 0
devops/infrastructure/roles/admin/tasks/main.yml → devops/aws/roles/admin/tasks/main.yml

@@ -16,6 +16,7 @@
 
 - name: Install subkey
   shell: cargo install --force subkey --git https://github.com/paritytech/substrate --version 2.0.1 --locked
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   poll: 0
   register: install_result
@@ -25,5 +26,7 @@
     jid: '{{ install_result.ansible_job_id }}'
   register: job_result
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
+  # Check for the status every 100s
   delay: 100

+ 96 - 0
devops/aws/roles/common/tasks/chain-spec-node-keys.yml

@@ -0,0 +1,96 @@
+---
+# Create chain spec files and keys and copy to all the servers
+
+- name: Debug to test variable
+  debug:
+    msg: 'Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}'
+  run_once: true
+
+- name: Copying initial members file to the server
+  copy:
+    src: '{{ initial_members_file }}'
+    dest: '{{ admin_code_dir }}/initial-members.json'
+  when: initial_members_file is defined and initial_members_file|length > 0
+  run_once: true
+
+- name: Copying initial balances file to the server
+  copy:
+    src: '{{ initial_balances_file }}'
+    dest: '{{ admin_code_dir }}/initial-balances.json'
+  when: initial_balances_file is defined and initial_balances_file|length > 0
+  run_once: true
+
+- name: Run chain-spec-builder to generate chainspec.json file (with initial data)
+  shell: >
+    {{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }}
+    --chain-spec-path {{ chain_spec_path }}
+    --endowed 1 --keystore-path {{ data_path }}
+    {% if deployment_type is defined and deployment_type|length > 0 %}--deployment {{ deployment_type }}{% endif %}
+    {% if initial_members_file is defined and initial_members_file|length > 0 %}--initial-balances-path {{ admin_code_dir }}/initial-balances.json{% endif %}
+    {% if initial_balances_file is defined and initial_balances_file|length > 0 %}--initial-members-path {{ admin_code_dir }}/initial-members.json{% endif %}
+  register: chain_spec_output
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Run subkey to generate node keys
+  shell: subkey generate-node-key
+  delegate_to: '{{ local_or_admin }}'
+  register: subkey_output
+
+- name: Print to stdout
+  debug:
+    msg:
+      - 'Public Key: {{ subkey_output.stderr }}'
+      - 'Private Key: {{ subkey_output.stdout }}'
+
+- name: Print to stdout chain spec
+  debug: var=chain_spec_output.stdout
+  run_once: true
+
+- name: Save output of chain spec to local file
+  copy:
+    content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
+    dest: '{{ data_path }}/chain_spec_output.txt'
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Change chain spec name, id, protocolId
+  json_modify:
+    chain_spec_path: '{{ chain_spec_path }}'
+    prefix: '{{ network_suffix }}'
+    all_nodes: '{{ hostvars }}'
+  delegate_to: '{{ local_or_admin }}'
+  register: result
+  run_once: true
+
+- name: Print output of modified chainspec
+  debug:
+    var: result.result
+  run_once: true
+
+- name: Run build-spec to generate raw chainspec file
+  shell: '{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}'
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Copying chain spec files to localhost
+  synchronize:
+    src: '/home/ubuntu/{{ data_path }}/'
+    dest: '{{ data_path }}'
+    mode: pull
+  run_once: true
+  when: run_on_admin_server|bool
+
+- name: Copy joystream-node binary to localhost
+  fetch:
+    src: '{{ admin_code_dir }}/target/release/joystream-node'
+    dest: '{{ data_path }}/joystream-node'
+    flat: yes
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+  when: run_on_admin_server|bool
+
+- name: Copying raw chain spec file to all servers
+  copy:
+    src: '{{ raw_chain_spec_path }}'
+    dest: '{{ remote_chain_spec_path }}'

+ 0 - 0
devops/infrastructure/roles/common/tasks/get-code-git.yml → devops/aws/roles/common/tasks/get-code-git.yml


+ 0 - 0
devops/infrastructure/roles/common/tasks/get-code-local.yml → devops/aws/roles/common/tasks/get-code-local.yml


+ 7 - 4
devops/infrastructure/roles/common/tasks/run-setup-build.yml → devops/aws/roles/common/tasks/run-setup-build.yml

@@ -2,25 +2,28 @@
 # Run setup and build code
 
 - name: Creat bash profile file
-  command: "touch /home/ubuntu/.bash_profile"
+  command: 'touch /home/ubuntu/.bash_profile'
 
 - name: Run setup script
   command: ./setup.sh
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
 
 - name: Build joystream node
   shell: . ~/.bash_profile && yarn cargo-build
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   poll: 0
   register: build_result
 
 - name: Check on build async task
   async_status:
-    jid: "{{ build_result.ansible_job_id }}"
+    jid: '{{ build_result.ansible_job_id }}'
   register: job_result
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
+  # Check for the status every 100s
   delay: 100

+ 0 - 0
devops/infrastructure/roles/node/templates/joystream-node.service.j2 → devops/aws/roles/node/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/roles/rpc/tasks/main.yml → devops/aws/roles/rpc/tasks/main.yml


+ 0 - 0
devops/infrastructure/roles/rpc/templates/Caddyfile.j2 → devops/aws/roles/rpc/templates/Caddyfile.j2


+ 0 - 0
devops/infrastructure/roles/rpc/templates/joystream-node.service.j2 → devops/aws/roles/rpc/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/roles/validators/tasks/main.yml → devops/aws/roles/validators/tasks/main.yml


+ 0 - 0
devops/infrastructure/roles/validators/templates/joystream-node.service.j2 → devops/aws/roles/validators/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/setup-admin.yml → devops/aws/setup-admin.yml


+ 0 - 50
devops/infrastructure/build-arm64-playbook.yml

@@ -1,50 +0,0 @@
----
-# Setup joystream code, build docker image
-
-- name: Build image and push to docker hub
-  hosts: all
-
-  tasks:
-    - block:
-        - name: Get code from git repo
-          include_role:
-            name: common
-            tasks_from: get-code-git
-
-        - name: Install Docker Module for Python
-          pip:
-            name: docker
-
-        - name: Log into DockerHub
-          community.docker.docker_login:
-            username: '{{ docker_username }}'
-            password: '{{ docker_password }}'
-
-        - name: Build an image and push it to a private repo
-          community.docker.docker_image:
-            build:
-              path: ./joystream
-              dockerfile: '{{ dockerfile }}'
-              platform: '{{ platform }}'
-            name: '{{ repository }}'
-            tag: '{{ tag_name }}'
-            push: yes
-            source: build
-          async: 7200
-          poll: 0
-          register: build_result
-
-        - name: Check on build async task
-          async_status:
-            jid: '{{ build_result.ansible_job_id }}'
-          register: job_result
-          until: job_result.finished
-          retries: 72
-          delay: 100
-
-      always:
-        - name: Delete the stack
-          amazon.aws.cloudformation:
-            stack_name: '{{ stack_name }}'
-            state: 'absent'
-          delegate_to: localhost

+ 0 - 45
devops/infrastructure/github-action-playbook.yml

@@ -1,45 +0,0 @@
----
-# Setup joystream code, build and Create AMI
-
-- name: Setup instance
-  hosts: all
-
-  tasks:
-    - block:
-      - name: Get code from git repo
-        include_role:
-          name: common
-          tasks_from: get-code-git
-
-      - name: Run setup and build
-        include_role:
-          name: common
-          tasks_from: run-setup-build
-
-      - name: Install subkey
-        include_role:
-          name: admin
-          tasks_from: main
-
-      - name: Basic AMI Creation
-        amazon.aws.ec2_ami:
-          instance_id: "{{ instance_id }}"
-          wait: yes
-          name: "{{ ami_name }}"
-          launch_permissions:
-            group_names: ['all']
-          tags:
-            Name: "{{ ami_name }}"
-        register: ami_data
-        delegate_to: localhost
-
-      - name: Print AMI ID
-        debug:
-          msg: "AMI ID is: {{ ami_data.image_id }}"
-
-      always:
-      - name: Delete the stack
-        amazon.aws.cloudformation:
-          stack_name: "{{ stack_name }}"
-          state: "absent"
-        delegate_to: localhost

+ 0 - 1
devops/infrastructure/pulumi-common/index.ts

@@ -1 +0,0 @@
-export { CaddyServiceDeployment } from './caddy'

+ 0 - 19
devops/infrastructure/query-node/Pulumi.yaml

@@ -1,19 +0,0 @@
-name: query-node
-runtime: nodejs
-description: Kubernetes IaC for Query Node
-template:
-  config:
-    aws:profile:
-      default: joystream-user
-    aws:region:
-      default: us-east-1
-    isMinikube:
-      description: Whether you are deploying to minikube
-      default: false
-    isLoadBalancerReady:
-      description: Whether the load balancer service is ready and has been assigned an IP
-      default: false
-    membersFilePath:
-      description: Path to members.json file for processor initialization
-    workersFilePath:
-      description: Path to workers.json file for processor initialization

+ 0 - 461
devops/infrastructure/query-node/index.ts

@@ -1,461 +0,0 @@
-import * as awsx from '@pulumi/awsx'
-import * as eks from '@pulumi/eks'
-import * as docker from '@pulumi/docker'
-import * as pulumi from '@pulumi/pulumi'
-import { configMapFromFile } from './configMap'
-import * as k8s from '@pulumi/kubernetes'
-import * as s3Helpers from './s3Helpers'
-import { CaddyServiceDeployment } from 'pulumi-common'
-
-require('dotenv').config()
-
-const config = new pulumi.Config()
-const awsConfig = new pulumi.Config('aws')
-const isMinikube = config.getBoolean('isMinikube')
-export let kubeconfig: pulumi.Output<any>
-export let joystreamAppsImage: pulumi.Output<string>
-let provider: k8s.Provider
-
-if (isMinikube) {
-  provider = new k8s.Provider('local', {})
-
-  // Create image from local app
-  joystreamAppsImage = new docker.Image('joystream/apps', {
-    build: {
-      context: '../../../',
-      dockerfile: '../../../apps.Dockerfile',
-    },
-    imageName: 'joystream/apps:latest',
-    skipPush: true,
-  }).baseImageName
-  // joystreamAppsImage = pulumi.interpolate`joystream/apps`
-} else {
-  // Create a VPC for our cluster.
-  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
-
-  // Create an EKS cluster with the default configuration.
-  const cluster = new eks.Cluster('eksctl-query-node', {
-    vpcId: vpc.id,
-    subnetIds: vpc.publicSubnetIds,
-    desiredCapacity: 3,
-    maxSize: 3,
-    instanceType: 't2.large',
-    providerCredentialOpts: {
-      profileName: awsConfig.get('profile'),
-    },
-  })
-  provider = cluster.provider
-
-  // Export the cluster's kubeconfig.
-  kubeconfig = cluster.kubeconfig
-
-  // Create a repository
-  const repo = new awsx.ecr.Repository('joystream/apps')
-
-  joystreamAppsImage = repo.buildAndPushImage({
-    dockerfile: '../../../apps.Dockerfile',
-    context: '../../../',
-  })
-}
-
-const resourceOptions = { provider: provider }
-
-const name = 'query-node'
-
-// Create a Kubernetes Namespace
-// const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
-const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
-
-// Export the Namespace name
-export const namespaceName = ns.metadata.name
-
-const appLabels = { appClass: name }
-
-// Create a Deployment
-const databaseLabels = { app: 'postgres-db' }
-
-const pvc = new k8s.core.v1.PersistentVolumeClaim(
-  `db-pvc`,
-  {
-    metadata: {
-      labels: databaseLabels,
-      namespace: namespaceName,
-      name: `db-pvc`,
-    },
-    spec: {
-      accessModes: ['ReadWriteOnce'],
-      resources: {
-        requests: {
-          storage: `10Gi`,
-        },
-      },
-    },
-  },
-  resourceOptions
-)
-
-const databaseDeployment = new k8s.apps.v1.Deployment(
-  'postgres-db',
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: databaseLabels,
-    },
-    spec: {
-      selector: { matchLabels: databaseLabels },
-      template: {
-        metadata: { labels: databaseLabels },
-        spec: {
-          containers: [
-            {
-              name: 'postgres-db',
-              image: 'postgres:12',
-              env: [
-                { name: 'POSTGRES_USER', value: process.env.DB_USER! },
-                { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
-                { name: 'POSTGRES_DB', value: process.env.INDEXER_DB_NAME! },
-              ],
-              ports: [{ containerPort: 5432 }],
-              volumeMounts: [
-                {
-                  name: 'postgres-data',
-                  mountPath: '/var/lib/postgresql/data',
-                  subPath: 'postgres',
-                },
-              ],
-            },
-          ],
-          volumes: [
-            {
-              name: 'postgres-data',
-              persistentVolumeClaim: {
-                claimName: `db-pvc`,
-              },
-            },
-          ],
-        },
-      },
-    },
-  },
-  resourceOptions
-)
-
-const databaseService = new k8s.core.v1.Service(
-  'postgres-db',
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: databaseDeployment.metadata.labels,
-      name: 'postgres-db',
-    },
-    spec: {
-      ports: [{ port: 5432 }],
-      selector: databaseDeployment.spec.template.metadata.labels,
-    },
-  },
-  resourceOptions
-)
-
-const migrationJob = new k8s.batch.v1.Job(
-  'db-migration',
-  {
-    metadata: {
-      namespace: namespaceName,
-    },
-    spec: {
-      backoffLimit: 0,
-      template: {
-        spec: {
-          containers: [
-            {
-              name: 'db-migration',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              resources: { requests: { cpu: '100m', memory: '100Mi' } },
-              env: [
-                {
-                  name: 'WARTHOG_DB_HOST',
-                  value: 'postgres-db',
-                },
-                {
-                  name: 'DB_HOST',
-                  value: 'postgres-db',
-                },
-                { name: 'DB_NAME', value: process.env.DB_NAME! },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
-            },
-          ],
-          restartPolicy: 'Never',
-        },
-      },
-    },
-  },
-  { ...resourceOptions, dependsOn: databaseService }
-)
-
-const membersFilePath = config.get('membersFilePath')
-  ? config.get('membersFilePath')!
-  : '../../../query-node/mappings/bootstrap/data/members.json'
-const workersFilePath = config.get('workersFilePath')
-  ? config.get('workersFilePath')!
-  : '../../../query-node/mappings/bootstrap/data/workers.json'
-
-const dataBucket = new s3Helpers.FileBucket('bootstrap-data', {
-  files: [
-    { path: membersFilePath, name: 'members.json' },
-    { path: workersFilePath, name: 'workers.json' },
-  ],
-  policy: s3Helpers.publicReadPolicy,
-})
-
-const membersUrl = dataBucket.getUrlForFile('members.json')
-const workersUrl = dataBucket.getUrlForFile('workers.json')
-
-const dataPath = '/joystream/query-node/mappings/bootstrap/data'
-
-const processorJob = new k8s.batch.v1.Job(
-  'processor-migration',
-  {
-    metadata: {
-      namespace: namespaceName,
-    },
-    spec: {
-      backoffLimit: 0,
-      template: {
-        spec: {
-          initContainers: [
-            {
-              name: 'curl-init',
-              image: 'appropriate/curl',
-              command: ['/bin/sh', '-c'],
-              args: [
-                pulumi.interpolate`curl -o ${dataPath}/workers.json ${workersUrl}; curl -o ${dataPath}/members.json ${membersUrl}; ls -al ${dataPath};`,
-              ],
-              volumeMounts: [
-                {
-                  name: 'bootstrap-data',
-                  mountPath: dataPath,
-                },
-              ],
-            },
-          ],
-          containers: [
-            {
-              name: 'processor-migration',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                {
-                  name: 'INDEXER_ENDPOINT_URL',
-                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
-                },
-                { name: 'TYPEORM_HOST', value: 'postgres-db' },
-                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
-              ],
-              volumeMounts: [
-                {
-                  name: 'bootstrap-data',
-                  mountPath: dataPath,
-                },
-              ],
-              args: ['workspace', 'query-node-root', 'processor:bootstrap'],
-            },
-          ],
-          restartPolicy: 'Never',
-          volumes: [
-            {
-              name: 'bootstrap-data',
-              emptyDir: {},
-            },
-          ],
-        },
-      },
-    },
-  },
-  { ...resourceOptions, dependsOn: migrationJob }
-)
-
-const defsConfig = new configMapFromFile(
-  'defs-config',
-  {
-    filePath: '../../../types/augment/all/defs.json',
-    namespaceName: namespaceName,
-  },
-  resourceOptions
-).configName
-
-const deployment = new k8s.apps.v1.Deployment(
-  name,
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: appLabels,
-    },
-    spec: {
-      replicas: 1,
-      selector: { matchLabels: appLabels },
-      template: {
-        metadata: {
-          labels: appLabels,
-        },
-        spec: {
-          containers: [
-            {
-              name: 'redis',
-              image: 'redis:6.0-alpine',
-              ports: [{ containerPort: 6379 }],
-            },
-            {
-              name: 'indexer',
-              image: 'joystream/hydra-indexer:2.1.0-beta.9',
-              env: [
-                { name: 'DB_HOST', value: 'postgres-db' },
-                { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-                { name: 'INDEXER_WORKERS', value: '5' },
-                { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
-                { name: 'TYPES_JSON', value: 'types.json' },
-                { name: 'PGUSER', value: process.env.DB_USER! },
-                { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/home/hydra/packages/hydra-indexer/types.json',
-                  name: 'indexer-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['yarn db:bootstrap && yarn start:prod'],
-            },
-            {
-              name: 'hydra-indexer-gateway',
-              image: 'joystream/hydra-indexer-gateway:2.1.0-beta.5',
-              env: [
-                { name: 'WARTHOG_STARTER_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
-                { name: 'WARTHOG_STARTER_DB_HOST', value: 'postgres-db' },
-                { name: 'WARTHOG_STARTER_DB_PASSWORD', value: process.env.DB_PASS! },
-                { name: 'WARTHOG_STARTER_DB_PORT', value: process.env.DB_PORT! },
-                { name: 'WARTHOG_STARTER_DB_USERNAME', value: process.env.DB_USER! },
-                { name: 'WARTHOG_STARTER_REDIS_URI', value: 'redis://localhost:6379/0' },
-                { name: 'WARTHOG_APP_PORT', value: process.env.WARTHOG_APP_PORT! },
-                { name: 'PORT', value: process.env.WARTHOG_APP_PORT! },
-                { name: 'DEBUG', value: '*' },
-              ],
-              ports: [{ containerPort: 4002 }],
-            },
-            {
-              name: 'processor',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                {
-                  name: 'INDEXER_ENDPOINT_URL',
-                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
-                },
-                { name: 'TYPEORM_HOST', value: 'postgres-db' },
-                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
-                  name: 'processor-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
-            },
-            {
-              name: 'graphql-server',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                { name: 'DB_HOST', value: 'postgres-db' },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-                { name: 'DB_USER', value: process.env.DB_USER! },
-                { name: 'DB_PORT', value: process.env.DB_PORT! },
-                { name: 'DB_NAME', value: process.env.DB_NAME! },
-                { name: 'GRAPHQL_SERVER_HOST', value: process.env.GRAPHQL_SERVER_HOST! },
-                { name: 'GRAPHQL_SERVER_PORT', value: process.env.GRAPHQL_SERVER_PORT! },
-              ],
-              ports: [{ name: 'graph-ql-port', containerPort: Number(process.env.GRAPHQL_SERVER_PORT!) }],
-              args: ['workspace', 'query-node-root', 'query-node:start:prod'],
-            },
-          ],
-          volumes: [
-            {
-              name: 'processor-volume',
-              configMap: {
-                name: defsConfig,
-              },
-            },
-            {
-              name: 'indexer-volume',
-              configMap: {
-                name: defsConfig,
-              },
-            },
-          ],
-        },
-      },
-    },
-  },
-  { ...resourceOptions, dependsOn: processorJob }
-)
-
-// Export the Deployment name
-export const deploymentName = deployment.metadata.name
-
-// Create a LoadBalancer Service for the NGINX Deployment
-const service = new k8s.core.v1.Service(
-  name,
-  {
-    metadata: {
-      labels: appLabels,
-      namespace: namespaceName,
-      name: 'query-node',
-    },
-    spec: {
-      ports: [
-        { name: 'port-1', port: 8081, targetPort: 'graph-ql-port' },
-        { name: 'port-2', port: 4000, targetPort: 4002 },
-      ],
-      selector: appLabels,
-    },
-  },
-  resourceOptions
-)
-
-// Export the Service name and public LoadBalancer Endpoint
-export const serviceName = service.metadata.name
-
-const caddyEndpoints = [
-  `/indexer/* {
-    uri strip_prefix /indexer
-    reverse_proxy query-node:4000
-}`,
-  `/server/* {
-    uri strip_prefix /server
-    reverse_proxy query-node:8081
-}`,
-]
-
-const lbReady = config.get('isLoadBalancerReady') === 'true'
-const caddy = new CaddyServiceDeployment(
-  'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, isMinikube, caddyEndpoints },
-  resourceOptions
-)
-
-export const endpoint1 = caddy.primaryEndpoint
-export const endpoint2 = caddy.secondaryEndpoint

+ 0 - 76
devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml

@@ -1,76 +0,0 @@
----
-# Create chain spec files and keys and copy to all the servers
-
-- name: Debug to test variable
-  debug:
-    msg: "Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}"
-  run_once: true
-
-- name: Run chain-spec-builder to generate chainspec.json file
-  command: "{{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }} --chain-spec-path {{ chain_spec_path }} --deployment live --endowed 1 --keystore-path {{ data_path }}"
-  register: chain_spec_output
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Run subkey to generate node keys
-  shell: subkey generate-node-key
-  delegate_to: "{{ local_or_admin }}"
-  register: subkey_output
-
-- name: Print to stdout
-  debug:
-    msg:
-    - "Public Key: {{ subkey_output.stderr }}"
-    - "Private Key: {{ subkey_output.stdout }}"
-
-- name: Print to stdout chain spec
-  debug: var=chain_spec_output.stdout
-  run_once: true
-
-- name: Save output of chain spec to local file
-  copy:
-    content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
-    dest: "{{ data_path }}/chain_spec_output.txt"
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Change chain spec name, id, protocolId
-  json_modify:
-    chain_spec_path: "{{ chain_spec_path }}"
-    prefix: "{{ network_suffix }}"
-    all_nodes: "{{ hostvars }}"
-  delegate_to: "{{ local_or_admin }}"
-  register: result
-  run_once: true
-
-- name: Print output of modified chainspec
-  debug:
-    var: result.result
-  run_once: true
-
-- name: Run build-spec to generate raw chainspec file
-  shell: "{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}"
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Copying chain spec files to localhost
-  synchronize:
-    src: "/home/ubuntu/{{ data_path }}/"
-    dest: "{{ data_path }}"
-    mode: pull
-  run_once: true
-  when: run_on_admin_server|bool
-
-- name: Copy joystream-node binary to localhost
-  fetch:
-    src: "{{ admin_code_dir }}/target/release/joystream-node"
-    dest: "{{ data_path }}/joystream-node"
-    flat: yes
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-  when: run_on_admin_server|bool
-
-- name: Copying raw chain spec file to all servers
-  copy:
-    src: "{{ raw_chain_spec_path }}"
-    dest: "{{ remote_chain_spec_path }}"

+ 1 - 1
devops/infrastructure/node-network/.gitignore → devops/kubernetes/node-network/.gitignore

@@ -1,6 +1,6 @@
 /bin/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 .env
 Pulumi.*.yaml

+ 0 - 0
devops/infrastructure/node-network/Pulumi.yaml → devops/kubernetes/node-network/Pulumi.yaml


+ 0 - 0
devops/infrastructure/node-network/README.md → devops/kubernetes/node-network/README.md


+ 0 - 0
devops/infrastructure/node-network/configMap.ts → devops/kubernetes/node-network/configMap.ts


+ 0 - 0
devops/infrastructure/node-network/index.ts → devops/kubernetes/node-network/index.ts


+ 0 - 0
devops/infrastructure/node-network/json_modify.py → devops/kubernetes/node-network/json_modify.py


+ 0 - 0
devops/infrastructure/node-network/nfsVolume.ts → devops/kubernetes/node-network/nfsVolume.ts


+ 0 - 0
devops/infrastructure/node-network/package.json → devops/kubernetes/node-network/package.json


+ 0 - 0
devops/infrastructure/node-network/tsconfig.json → devops/kubernetes/node-network/tsconfig.json


+ 0 - 0
devops/infrastructure/node-network/utils.ts → devops/kubernetes/node-network/utils.ts


+ 0 - 0
devops/infrastructure/node-network/validator.ts → devops/kubernetes/node-network/validator.ts


+ 0 - 0
devops/infrastructure/pulumi-common/caddy.ts → devops/kubernetes/pulumi-common/caddy.ts


+ 109 - 0
devops/kubernetes/pulumi-common/database.ts

@@ -0,0 +1,109 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class delpoys a Postgres instance on a Persistent Volume
+ */
+export class PostgresServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('postgres:service:PostgresServiceDeployment', name, {}, opts)
+
+    const databaseLabels = { app: name }
+    const pvcName = `${name}-pvc`
+
+    const pvc = new k8s.core.v1.PersistentVolumeClaim(
+      pvcName,
+      {
+        metadata: {
+          labels: databaseLabels,
+          namespace: args.namespaceName,
+          name: pvcName,
+        },
+        spec: {
+          accessModes: ['ReadWriteOnce'],
+          resources: {
+            requests: {
+              storage: `${args.storage}Gi`,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: databaseLabels,
+        },
+        spec: {
+          selector: { matchLabels: databaseLabels },
+          template: {
+            metadata: { labels: databaseLabels },
+            spec: {
+              containers: [
+                {
+                  name: 'postgres-db',
+                  image: 'postgres:12',
+                  env: args.env,
+                  ports: [{ containerPort: 5432 }],
+                  volumeMounts: [
+                    {
+                      name: 'postgres-data',
+                      mountPath: '/var/lib/postgresql/data',
+                      subPath: 'postgres',
+                    },
+                  ],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'postgres-data',
+                  persistentVolumeClaim: {
+                    claimName: pvcName,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+
+    this.service = new k8s.core.v1.Service(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: this.deployment.metadata.labels,
+          name: name,
+        },
+        spec: {
+          ports: [{ port: 5432 }],
+          selector: this.deployment.spec.template.metadata.labels,
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+interface Environment {
+  name: string
+  value: string
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  env?: Environment[]
+  storage: Number
+  isMinikube?: boolean
+}

+ 2 - 0
devops/kubernetes/pulumi-common/index.ts

@@ -0,0 +1,2 @@
+export { CaddyServiceDeployment } from './caddy'
+export { PostgresServiceDeployment } from './database'

+ 0 - 0
devops/infrastructure/pulumi-common/package.json → devops/kubernetes/pulumi-common/package.json


+ 0 - 0
devops/infrastructure/pulumi-common/tsconfig.json → devops/kubernetes/pulumi-common/tsconfig.json


+ 1 - 1
devops/infrastructure/query-node/.gitignore → devops/kubernetes/query-node/.gitignore

@@ -1,6 +1,6 @@
 /bin/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 .env
 Pulumi.*.yaml

+ 27 - 0
devops/kubernetes/query-node/Pulumi.yaml

@@ -0,0 +1,27 @@
+name: query-node
+runtime: nodejs
+description: Kubernetes IaC for Query Node
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isMinikube:
+      description: Whether you are deploying to minikube
+      default: false
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    externalIndexerUrl:
+      description: URL for an external indexer. If set this will not deploy an Indexer.
+      default: 'http://query-node:4000/graphql'
+    skipProcessor:
+      description: If set to true, will not deploy a processor instance
+      default: false
+    useLocalRepo:
+      description: If set to true, will use an existing docker image on local
+      default: false
+    appsImage:
+      description: The joystream image to use for running GraphQL servers
+      default: joystream/apps:latest

+ 26 - 2
devops/infrastructure/query-node/README.md → devops/kubernetes/query-node/README.md

@@ -38,15 +38,34 @@ After cloning this repo, from this working directory, run these commands:
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true
+    --plaintext isMinikube=true --plaintext skipProcessor=false
    ```
 
    If you want to build the stack on AWS set the `isMinikube` config to `false`
 
    ```bash
-   $ puluim config set isMinikube false
+   $ pulumi config set isMinikube false
    ```
 
+   If you want to use an existing Indexer and not deploy a new one set `externalIndexerUrl`
+
+   ```bash
+   $ pulumi config set externalIndexerUrl <URL>
+   ```
+
+   You must have a valid docker image of `joystream/apps` either on Docker hub or your local to deploy the infrastructure.
+   If the image exists locally & you are running on minikube, run
+
+   ```bash
+   $ pulumi config set-all --plaintext useLocalRepo=true --plaintext appsImage=<IMAGE_NAME>
+   ```
+
+   NOTE: The docker deamon for minikube is different from that of the docker desktop. To connect your Docker CLI to the docker
+   daemon inside the VM you need to run: `eval $(minikube docker-env)`. To copy the image from your local deamon to minikube run
+   `minikube image load joystream/apps:latest --daemon`.
+
+   If not using minikube, just specify the `appsImage` config.
+
 1. Create a `.env` file in this directory (`cp ../../../.env ./.env`) and set the database and other variables in it
 
    Make sure to set `GRAPHQL_SERVER_PORT=4001`
@@ -70,6 +89,11 @@ After cloning this repo, from this working directory, run these commands:
 
    The GraphQl server is accessible at `https://<ENDPOINT>/server/graphql` and indexer at `https://<ENDPOINT>/indexer/graphql`
 
+1. If you are using Minikube, run `minikube service graphql-server -n $(pulumi stack output namespaceName)`
+
+   This will setup a proxy for your `query-node` service, which can then be accessed at
+   the URL given in the output
+
 1. Access the Kubernetes Cluster using `kubectl`
 
    To access your new Kubernetes cluster using `kubectl`, we need to set up the

+ 0 - 0
devops/infrastructure/query-node/configMap.ts → devops/kubernetes/query-node/configMap.ts


+ 5 - 0
devops/kubernetes/query-node/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

+ 133 - 0
devops/kubernetes/query-node/index.ts

@@ -0,0 +1,133 @@
+import * as awsx from '@pulumi/awsx'
+import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
+import * as pulumi from '@pulumi/pulumi'
+import { configMapFromFile } from './configMap'
+import * as k8s from '@pulumi/kubernetes'
+import { IndexerServiceDeployment } from './indexerDeployment'
+import { ProcessorServiceDeployment } from './processorDeployment'
+import { CaddyServiceDeployment } from 'pulumi-common'
+
+require('dotenv').config()
+
+const config = new pulumi.Config()
+const awsConfig = new pulumi.Config('aws')
+const isMinikube = config.getBoolean('isMinikube')
+const externalIndexerUrl = config.get('externalIndexerUrl')
+const appsImage = config.get('appsImage') || `joystream/apps:latest`
+const skipProcessor = config.getBoolean('skipProcessor')
+const useLocalRepo = config.getBoolean('useLocalRepo')
+
+export let kubeconfig: pulumi.Output<any>
+export let joystreamAppsImage: pulumi.Output<string>
+let provider: k8s.Provider
+
+if (skipProcessor && externalIndexerUrl) {
+  pulumi.log.error('Need to deploy atleast one component, Indexer or Processor')
+  throw new Error(`Please check the config settings for skipProcessor and externalIndexerUrl`)
+}
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+
+  if (useLocalRepo) {
+    // Use already existing image in minikube environment
+    joystreamAppsImage = pulumi.interpolate`${appsImage}`
+  } else {
+    // Access image from docker hub
+    joystreamAppsImage = new docker.RemoteImage('apps', {
+      name: appsImage!,
+    }).name
+  }
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-query-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    desiredCapacity: 3,
+    maxSize: 3,
+    instanceType: 't2.large',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+
+  // Create a repository
+  const repo = new awsx.ecr.Repository('joystream/apps')
+
+  // Build an image from an existing local/docker hub image and push to ECR
+  joystreamAppsImage = repo.buildAndPushImage({
+    context: './docker_dummy',
+    dockerfile: './docker_dummy/Dockerfile',
+    args: { SOURCE_IMAGE: appsImage! },
+  })
+}
+
+const resourceOptions = { provider: provider }
+
+const name = 'query-node'
+
+// Create a Kubernetes Namespace
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const defsConfig = new configMapFromFile(
+  'defs-config',
+  {
+    filePath: '../../../types/augment/all/defs.json',
+    namespaceName: namespaceName,
+  },
+  resourceOptions
+).configName
+
+if (!externalIndexerUrl) {
+  const indexer = new IndexerServiceDeployment(
+    'indexer',
+    { namespaceName, storage: 10, defsConfig, joystreamAppsImage },
+    resourceOptions
+  )
+}
+
+if (!skipProcessor) {
+  const processor = new ProcessorServiceDeployment(
+    'processor',
+    { namespaceName, storage: 10, defsConfig, joystreamAppsImage, externalIndexerUrl },
+    resourceOptions
+  )
+}
+
+const caddyEndpoints = [
+  `/indexer* {
+    uri strip_prefix /indexer
+    reverse_proxy indexer:4000
+}`,
+  `/server* {
+    uri strip_prefix /server
+    reverse_proxy graphql-server:8081
+}`,
+]
+
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+
+export let endpoint1: pulumi.Output<string>
+export let endpoint2: pulumi.Output<string>
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, isMinikube, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 187 - 0
devops/kubernetes/query-node/indexerDeployment.ts

@@ -0,0 +1,187 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { PostgresServiceDeployment } from 'pulumi-common'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class deploys a db, a migration job and indexer deployment and service
+ */
+export class IndexerServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('indexer:service:IndexerServiceDeployment', name, {}, opts)
+
+    // Name passed in the constructor will be the endpoint for accessing the service
+    const serviceName = name
+    let appLabels = { appClass: 'indexer' }
+
+    const indexerDbName = 'indexer-db'
+    const indexerDb = new PostgresServiceDeployment(
+      indexerDbName,
+      {
+        namespaceName: args.namespaceName,
+        env: [
+          { name: 'POSTGRES_USER', value: process.env.DB_USER! },
+          { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
+          { name: 'POSTGRES_DB', value: process.env.INDEXER_DB_NAME! },
+        ],
+        storage: args.storage,
+      },
+      { parent: this }
+    )
+
+    const indexerMigrationJob = new k8s.batch.v1.Job(
+      'indexer-db-migration',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+        },
+        spec: {
+          backoffLimit: 0,
+          template: {
+            spec: {
+              containers: [
+                {
+                  name: 'db-migration',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  resources: { requests: { cpu: '100m', memory: '100Mi' } },
+                  env: [
+                    {
+                      name: 'WARTHOG_DB_HOST',
+                      value: indexerDbName,
+                    },
+                    {
+                      name: 'DB_HOST',
+                      value: indexerDbName,
+                    },
+                    { name: 'WARTHOG_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
+                    { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
+                    { name: 'DB_PASS', value: process.env.DB_PASS! },
+                  ],
+                  command: ['/bin/sh', '-c'],
+                  args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
+                },
+              ],
+              restartPolicy: 'Never',
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: indexerDb.service }
+    )
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      'indexer',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: appLabels,
+        },
+        spec: {
+          replicas: 1,
+          selector: { matchLabels: appLabels },
+          template: {
+            metadata: {
+              labels: appLabels,
+            },
+            spec: {
+              containers: [
+                {
+                  name: 'redis',
+                  image: 'redis:6.0-alpine',
+                  ports: [{ containerPort: 6379 }],
+                },
+                {
+                  name: 'indexer',
+                  image: 'joystream/hydra-indexer:3.0.0',
+                  env: [
+                    { name: 'DB_HOST', value: indexerDbName },
+                    { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
+                    { name: 'DB_PASS', value: process.env.DB_PASS! },
+                    { name: 'DB_USER', value: process.env.DB_USER! },
+                    { name: 'DB_PORT', value: process.env.DB_PORT! },
+                    { name: 'INDEXER_WORKERS', value: '5' },
+                    { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
+                    { name: 'DEBUG', value: 'index-builder:*' },
+                    { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+                    { name: 'TYPES_JSON', value: 'types.json' },
+                    { name: 'PGUSER', value: process.env.DB_USER! },
+                    { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
+                  ],
+                  volumeMounts: [
+                    {
+                      mountPath: '/home/hydra/packages/hydra-indexer/types.json',
+                      name: 'indexer-volume',
+                      subPath: 'fileData',
+                    },
+                  ],
+                  command: ['/bin/sh', '-c'],
+                  args: ['yarn db:bootstrap && yarn start:prod'],
+                },
+                {
+                  name: 'hydra-indexer-gateway',
+                  image: 'joystream/hydra-indexer-gateway:3.0.0',
+                  env: [
+                    { name: 'WARTHOG_STARTER_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
+                    { name: 'WARTHOG_STARTER_DB_HOST', value: indexerDbName },
+                    { name: 'WARTHOG_STARTER_DB_PASSWORD', value: process.env.DB_PASS! },
+                    { name: 'WARTHOG_STARTER_DB_PORT', value: process.env.DB_PORT! },
+                    { name: 'WARTHOG_STARTER_DB_USERNAME', value: process.env.DB_USER! },
+                    { name: 'WARTHOG_STARTER_REDIS_URI', value: 'redis://localhost:6379/0' },
+                    { name: 'WARTHOG_APP_PORT', value: process.env.WARTHOG_APP_PORT! },
+                    { name: 'PORT', value: process.env.WARTHOG_APP_PORT! },
+                    { name: 'DEBUG', value: '*' },
+                  ],
+                  ports: [{ name: 'hydra-port', containerPort: Number(process.env.WARTHOG_APP_PORT!) }],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'indexer-volume',
+                  configMap: {
+                    name: args.defsConfig,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: indexerMigrationJob }
+    )
+
+    // Create a Service for the Indexer
+    this.service = new k8s.core.v1.Service(
+      serviceName,
+      {
+        metadata: {
+          labels: appLabels,
+          namespace: args.namespaceName,
+          name: serviceName,
+        },
+        spec: {
+          ports: [{ name: 'port-1', port: 4000, targetPort: 'hydra-port' }],
+          selector: appLabels,
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+interface Environment {
+  name: string
+  value: string
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  joystreamAppsImage: pulumi.Output<string>
+  defsConfig: pulumi.Output<string> | undefined
+  env?: Environment[]
+  storage: Number
+}

+ 0 - 0
devops/infrastructure/query-node/package.json → devops/kubernetes/query-node/package.json


+ 210 - 0
devops/kubernetes/query-node/processorDeployment.ts

@@ -0,0 +1,210 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { PostgresServiceDeployment } from 'pulumi-common'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class deploys a db, a migration job, graphql server and processor
+ */
+export class ProcessorServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+  public readonly endpoint: string
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('processor:service:ProcessorServiceDeployment', name, {}, opts)
+
+    // Name passed in the constructor will be the endpoint for accessing the service
+    this.endpoint = 'graphql-server'
+
+    const processorDbName = 'processor-db'
+    const processorDb = new PostgresServiceDeployment(
+      processorDbName,
+      {
+        namespaceName: args.namespaceName,
+        env: [
+          { name: 'POSTGRES_USER', value: process.env.DB_USER! },
+          { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
+          { name: 'POSTGRES_DB', value: process.env.DB_NAME! },
+        ],
+        storage: args.storage,
+      },
+      { parent: this }
+    )
+
+    const processorMigrationJob = new k8s.batch.v1.Job(
+      'processor-db-migration',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+        },
+        spec: {
+          backoffLimit: 0,
+          template: {
+            spec: {
+              containers: [
+                {
+                  name: 'db-migration',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  resources: { requests: { cpu: '100m', memory: '100Mi' } },
+                  env: [
+                    {
+                      name: 'WARTHOG_DB_HOST',
+                      value: processorDbName,
+                    },
+                    {
+                      name: 'DB_HOST',
+                      value: processorDbName,
+                    },
+                    { name: 'WARTHOG_DB_DATABASE', value: process.env.DB_NAME! },
+                    { name: 'DB_NAME', value: process.env.DB_NAME! },
+                    { name: 'DB_PASS', value: process.env.DB_PASS! },
+                  ],
+                  command: ['/bin/sh', '-c'],
+                  args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
+                },
+              ],
+              restartPolicy: 'Never',
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: processorDb.service }
+    )
+
+    let appLabels = { appClass: 'graphql-server' }
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      'graphql-server',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: appLabels,
+        },
+        spec: {
+          replicas: 1,
+          selector: { matchLabels: appLabels },
+          template: {
+            metadata: {
+              labels: appLabels,
+            },
+            spec: {
+              containers: [
+                {
+                  name: 'graphql-server',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  env: [
+                    { name: 'DB_HOST', value: processorDbName },
+                    { name: 'DB_PASS', value: process.env.DB_PASS! },
+                    { name: 'DB_USER', value: process.env.DB_USER! },
+                    { name: 'DB_PORT', value: process.env.DB_PORT! },
+                    { name: 'DB_NAME', value: process.env.DB_NAME! },
+                    { name: 'GRAPHQL_SERVER_HOST', value: process.env.GRAPHQL_SERVER_HOST! },
+                    { name: 'GRAPHQL_SERVER_PORT', value: process.env.GRAPHQL_SERVER_PORT! },
+                    { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+                  ],
+                  ports: [{ name: 'graph-ql-port', containerPort: Number(process.env.GRAPHQL_SERVER_PORT!) }],
+                  args: ['workspace', 'query-node-root', 'query-node:start:prod'],
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: processorMigrationJob }
+    )
+
+    // Create a Service for the GraphQL Server
+    this.service = new k8s.core.v1.Service(
+      'graphql-server',
+      {
+        metadata: {
+          labels: appLabels,
+          namespace: args.namespaceName,
+          name: this.endpoint,
+        },
+        spec: {
+          ports: [{ name: 'port-1', port: 8081, targetPort: 'graph-ql-port' }],
+          selector: appLabels,
+        },
+      },
+      { parent: this }
+    )
+
+    const indexerURL = args.externalIndexerUrl || `http://indexer:4000/graphql`
+    appLabels = { appClass: 'processor' }
+
+    const processorDeployment = new k8s.apps.v1.Deployment(
+      `processor`,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: appLabels,
+        },
+        spec: {
+          replicas: 1,
+          selector: { matchLabels: appLabels },
+          template: {
+            metadata: {
+              labels: appLabels,
+            },
+            spec: {
+              containers: [
+                {
+                  name: 'processor',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  env: [
+                    {
+                      name: 'INDEXER_ENDPOINT_URL',
+                      value: indexerURL,
+                    },
+                    { name: 'TYPEORM_HOST', value: processorDbName },
+                    { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
+                    { name: 'DEBUG', value: 'index-builder:*' },
+                    { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+                  ],
+                  volumeMounts: [
+                    {
+                      mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
+                      name: 'processor-volume',
+                      subPath: 'fileData',
+                    },
+                  ],
+                  command: ['/bin/sh', '-c'],
+                  args: ['cd query-node && yarn hydra-processor run -e ../.env'],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'processor-volume',
+                  configMap: {
+                    name: args.defsConfig,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: this.service }
+    )
+  }
+}
+
+interface Environment {
+  name: string
+  value: string
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  joystreamAppsImage: pulumi.Output<string>
+  defsConfig: pulumi.Output<string> | undefined
+  externalIndexerUrl: string | undefined
+  env?: Environment[]
+  storage: Number
+}

+ 0 - 0
devops/infrastructure/query-node/s3Helpers.ts → devops/kubernetes/query-node/s3Helpers.ts


+ 0 - 0
devops/infrastructure/query-node/tsconfig.json → devops/kubernetes/query-node/tsconfig.json


+ 1 - 1
devops/infrastructure/storage-node/.gitignore → devops/kubernetes/storage-node/.gitignore

@@ -1,5 +1,5 @@
 /bin/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 Pulumi.*.yaml

+ 0 - 0
devops/infrastructure/storage-node/Pulumi.yaml → devops/kubernetes/storage-node/Pulumi.yaml


+ 2 - 2
devops/infrastructure/storage-node/README.md → devops/kubernetes/storage-node/README.md

@@ -39,14 +39,14 @@ After cloning this repo, from this working directory, run these commands:
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
     --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
-    --plaintext isAnonymous=true
+    --plaintext isMinikube=true --plaintext isAnonymous=true
    ```
 
    If running for production use the below mentioned config
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false \
+    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false --plaintext isMinikube=false \
     --plaintext providerId=<ID> --plaintext keyFile=<PATH> --plaintext publicURL=<DOMAIN> --secret passphrase=<PASSPHRASE>
    ```
 

+ 56 - 28
devops/infrastructure/storage-node/index.ts → devops/kubernetes/storage-node/index.ts

@@ -1,6 +1,7 @@
 import * as awsx from '@pulumi/awsx'
 import * as aws from '@pulumi/aws'
 import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
 import * as k8s from '@pulumi/kubernetes'
 import * as pulumi from '@pulumi/pulumi'
 import { CaddyServiceDeployment } from 'pulumi-common'
@@ -15,37 +16,57 @@ const lbReady = config.get('isLoadBalancerReady') === 'true'
 const name = 'storage-node'
 const colossusPort = parseInt(config.get('colossusPort') || '3000')
 const storage = parseInt(config.get('storage') || '40')
+const isMinikube = config.getBoolean('isMinikube')
 
 let additionalParams: string[] | pulumi.Input<string>[] = []
 let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
 let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
 
-// Create a VPC for our cluster.
-const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+export let kubeconfig: pulumi.Output<any>
+export let colossusImage: pulumi.Output<string>
+let provider: k8s.Provider
 
-// Create an EKS cluster with the default configuration.
-const cluster = new eks.Cluster('eksctl-storage-node', {
-  vpcId: vpc.id,
-  subnetIds: vpc.publicSubnetIds,
-  instanceType: 't2.medium',
-  providerCredentialOpts: {
-    profileName: awsConfig.get('profile'),
-  },
-})
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+  // Create image from local app
+  colossusImage = new docker.Image('joystream/colossus', {
+    build: {
+      context: '../../../',
+      dockerfile: '../../../colossus.Dockerfile',
+    },
+    imageName: 'joystream/colossus:latest',
+    skipPush: true,
+  }).baseImageName
+  // colossusImage = pulumi.interpolate`joystream/colossus:latest`
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
 
-// Export the cluster's kubeconfig.
-export const kubeconfig = cluster.kubeconfig
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-storage-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
 
-// Create a repository
-const repo = new awsx.ecr.Repository('colossus-image')
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
 
-// Build an image and publish it to our ECR repository.
-export const colossusImage = repo.buildAndPushImage({
-  dockerfile: '../../../colossus.Dockerfile',
-  context: '../../../',
-})
+  // Create a repository
+  const repo = new awsx.ecr.Repository('colossus-image')
 
-const resourceOptions = { provider: cluster.provider }
+  // Build an image and publish it to our ECR repository.
+  colossusImage = repo.buildAndPushImage({
+    dockerfile: '../../../colossus.Dockerfile',
+    context: '../../../',
+  })
+}
+
+const resourceOptions = { provider: provider }
 
 // Create a Kubernetes Namespace
 const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
@@ -88,14 +109,19 @@ const caddyEndpoints = [
 }`,
 ]
 
-const caddy = new CaddyServiceDeployment(
-  'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, caddyEndpoints },
-  resourceOptions
-)
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
 
-export const endpoint1 = caddy.primaryEndpoint
-export const endpoint2 = caddy.secondaryEndpoint
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}
 
 export let appLink: pulumi.Output<string>
 
@@ -180,6 +206,7 @@ const deployment = new k8s.apps.v1.Deployment(
             {
               name: 'colossus',
               image: colossusImage,
+              imagePullPolicy: 'IfNotPresent',
               env: [
                 {
                   name: 'WS_PROVIDER_ENDPOINT_URI',
@@ -222,6 +249,7 @@ const service = new k8s.core.v1.Service(
       name: 'storage-node',
     },
     spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
       ports: [{ name: 'port-1', port: colossusPort }],
       selector: appLabels,
     },

+ 1 - 0
devops/infrastructure/storage-node/package.json → devops/kubernetes/storage-node/package.json

@@ -9,6 +9,7 @@
     "@pulumi/eks": "^0.31.0",
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
     "pulumi-common": "file:../pulumi-common"
   }
 }

+ 0 - 0
devops/infrastructure/storage-node/tsconfig.json → devops/kubernetes/storage-node/tsconfig.json


+ 1 - 1
node/Cargo.toml

@@ -3,7 +3,7 @@ authors = ['Joystream contributors']
 build = 'build.rs'
 edition = '2018'
 name = 'joystream-node'
-version = '5.8.0'
+version = '5.9.0'
 default-run = "joystream-node"
 
 [[bin]]

+ 8 - 0
node/src/chain_spec/mod.rs

@@ -366,6 +366,14 @@ pub fn testnet_genesis(
                 next_series_id: 1,
                 next_person_id: 1,
                 next_channel_transfer_request_id: 1,
+                video_migration: node_runtime::content::MigrationConfigRecord {
+                    current_id: 1,
+                    final_id: 1,
+                },
+                channel_migration: node_runtime::content::MigrationConfigRecord {
+                    current_id: 1,
+                    final_id: 1,
+                },
             }
         }),
         proposals_codex: Some(ProposalsCodexConfig {

+ 6 - 6
pioneer/packages/joy-proposals/src/Proposal/Body.tsx

@@ -16,7 +16,7 @@ import { formatBalance } from '@polkadot/util';
 import PromiseComponent from '@polkadot/joy-utils/react/components/PromiseComponent';
 import ReactMarkdown from 'react-markdown';
 import { StakingPolicy } from '@joystream/types/hiring';
-import { WorkingGroup, WorkingGroupKey } from '@joystream/types/common';
+import { WorkingGroup } from '@joystream/types/common';
 import { ApplicationsDetailsByOpening } from '@polkadot/joy-utils/react/components/working-groups/ApplicationDetails';
 import { LeadInfoFromId } from '@polkadot/joy-utils/react/components/working-groups/LeadInfo';
 import { formatReward } from '@polkadot/joy-utils/functions/format';
@@ -269,7 +269,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
         : <ApplicationsDetailsByOpening
           openingId={openingId.toNumber()}
           acceptedIds={[succesfulApplicationId.toNumber()]}
-          group={workingGroup.type as WorkingGroupKey}/>,
+          group={workingGroup.type}/>,
       true
     )
   ],
@@ -280,7 +280,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
       'Lead',
       historical
         ? `#${(leadId as WorkerId).toNumber()}`
-        : <LeadInfoFromId group={(group as WorkingGroup).type as WorkingGroupKey} leadId={(leadId as WorkerId).toNumber()}/>,
+        : <LeadInfoFromId group={(group as WorkingGroup).type} leadId={(leadId as WorkerId).toNumber()}/>,
       true
     )
   ],
@@ -291,7 +291,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
       'Lead',
       historical
         ? `#${(leadId as WorkerId).toNumber()}`
-        : <LeadInfoFromId group={(group as WorkingGroup).type as WorkingGroupKey} leadId={(leadId as WorkerId).toNumber()}/>,
+        : <LeadInfoFromId group={(group as WorkingGroup).type} leadId={(leadId as WorkerId).toNumber()}/>,
       true
     )
   ],
@@ -302,7 +302,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
       'Lead',
       historical
         ? `#${(leadId as WorkerId).toNumber()}`
-        : <LeadInfoFromId group={(group as WorkingGroup).type as WorkingGroupKey} leadId={(leadId as WorkerId).toNumber()}/>,
+        : <LeadInfoFromId group={(group as WorkingGroup).type} leadId={(leadId as WorkerId).toNumber()}/>,
       true
     )
   ],
@@ -321,7 +321,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
         'Lead',
         historical
           ? `#${leadId.toNumber()}`
-          : <LeadInfoFromId group={workingGroup.type as WorkingGroupKey} leadId={leadId.toNumber()}/>,
+          : <LeadInfoFromId group={workingGroup.type} leadId={leadId.toNumber()}/>,
         true
       )
     ];

+ 1 - 0
query-node/mappings/bootstrap-data/data/channelCategories.json

@@ -0,0 +1 @@
+[]

+ 1 - 0
query-node/mappings/bootstrap-data/data/videoCategories.json

@@ -0,0 +1 @@
+[]

+ 6 - 2
query-node/mappings/bootstrap-data/index.ts

@@ -1,10 +1,14 @@
-import { MemberJson, StorageSystemJson, WorkingGroupsJson } from './types'
+import { MemberJson, StorageSystemJson, WorkingGroupsJson, VideoCategoryJson, ChannelCategoryJson } from './types'
 import storageSystemJson from './data/storageSystem.json'
 import membersJson from './data/members.json'
 import workingGroupsJson from './data/workingGroups.json'
+import channelCategoriesJson from './data/channelCategories.json'
+import videoCategoriesJson from './data/videoCategories.json'
 
 const storageSystemData: StorageSystemJson = storageSystemJson
 const membersData: MemberJson[] = membersJson
 const workingGroupsData: WorkingGroupsJson = workingGroupsJson
+const channelCategoriesData: ChannelCategoryJson[] = channelCategoriesJson
+const videoCategoriesData: VideoCategoryJson[] = videoCategoriesJson
 
-export { storageSystemData, membersData, workingGroupsData }
+export { storageSystemData, membersData, workingGroupsData, channelCategoriesData, videoCategoriesData }

+ 1 - 1
query-node/mappings/bootstrap-data/scripts/api.ts

@@ -1,5 +1,5 @@
 import { ApiPromise, WsProvider } from '@polkadot/api'
-import types from '@joystream/sumer-types/augment/all/defs.json'
+import types from '@joystream/types/augment/all/defs.json'
 
 export default async function createApi(): Promise<ApiPromise> {
   // Get URL to websocket endpoint from environment or connect to local node by default

+ 68 - 0
query-node/mappings/bootstrap-data/scripts/fetchCategories.ts

@@ -0,0 +1,68 @@
+import fs from 'fs'
+import path from 'path'
+import { ApolloClient, InMemoryCache, HttpLink, gql } from '@apollo/client'
+import fetch from 'cross-fetch'
+
+type categoryType = {
+  id: string
+  name: string
+  createdInBlock: number
+  createdAt: Date
+  updatedAt: Date
+}
+
+async function main() {
+  const env = process.env
+  const queryNodeUrl: string = env.QUERY_NODE_URL || 'http://127.0.0.1:8081/graphql'
+
+  console.log(`Connecting to Query Node at: ${queryNodeUrl}`)
+  const queryNodeProvider = new ApolloClient({
+    link: new HttpLink({ uri: queryNodeUrl, fetch }),
+    cache: new InMemoryCache(),
+  })
+
+  const videoCategories = await getCategories(queryNodeProvider, 'videoCategories')
+
+  const channelCategories = await getCategories(queryNodeProvider, 'channelCategories')
+
+  fs.writeFileSync(
+    path.resolve(__dirname, '../data/videoCategories.json'),
+    JSON.stringify(videoCategories, undefined, 4)
+  )
+  fs.writeFileSync(
+    path.resolve(__dirname, '../data/channelCategories.json'),
+    JSON.stringify(channelCategories, undefined, 4)
+  )
+
+  console.log(`${videoCategories.length} video categories exported & saved!`)
+  console.log(`${channelCategories.length} channel categories exported & saved!`)
+}
+
+async function getCategories(queryNodeProvider, categoryType): Promise<Array<categoryType>> {
+  const GET_ALL_CATEGORY_ITEMS = gql`
+    query {
+      ${categoryType} {
+        id
+        name
+        createdInBlock
+        createdAt
+        updatedAt
+      }
+    }
+  `
+  const queryResult = await queryNodeProvider.query({ query: GET_ALL_CATEGORY_ITEMS })
+  const categories = queryResult.data[categoryType].map(({ id, name, createdInBlock, createdAt, updatedAt }) => {
+    return {
+      id,
+      name,
+      createdInBlock,
+      createdAt,
+      updatedAt,
+    }
+  })
+  return categories
+}
+
+main()
+  .then(() => process.exit())
+  .catch(console.error)

+ 1 - 1
query-node/mappings/bootstrap-data/scripts/fetchMembersData.ts

@@ -1,6 +1,6 @@
 import createApi from './api'
 import { ApiPromise } from '@polkadot/api'
-import { MemberId, Membership } from '@joystream/sumer-types/augment/all'
+import { MemberId, Membership } from '@joystream/types/augment/all'
 import { BlockHash } from '@polkadot/types/interfaces'
 import { MemberJson } from '../types'
 import fs from 'fs'

+ 16 - 0
query-node/mappings/bootstrap-data/types.ts

@@ -33,3 +33,19 @@ export type WorkingGroupJson = {
 export type WorkingGroupsJson = {
   [group in 'GATEWAY' | 'STORAGE']?: WorkingGroupJson
 }
+
+export type VideoCategoryJson = {
+  id: string
+  name: string
+  createdInBlock: number
+  createdAt: string
+  updatedAt: string
+}
+
+export type ChannelCategoryJson = {
+  id: string
+  name: string
+  createdInBlock: number
+  createdAt: string
+  updatedAt: string
+}

+ 42 - 2
query-node/mappings/bootstrap.ts

@@ -1,8 +1,22 @@
 import { StoreContext } from '@joystream/hydra-common'
 import BN from 'bn.js'
-import { Membership, MembershipEntryMethod, StorageSystemParameters, Worker, WorkerType } from 'query-node/dist/model'
+import {
+  Membership,
+  MembershipEntryMethod,
+  StorageSystemParameters,
+  Worker,
+  WorkerType,
+  ChannelCategory,
+  VideoCategory,
+} from 'query-node/dist/model'
 import { workerEntityId } from './workingGroup'
-import { storageSystemData, membersData, workingGroupsData } from './bootstrap-data'
+import {
+  storageSystemData,
+  membersData,
+  workingGroupsData,
+  videoCategoriesData,
+  channelCategoriesData,
+} from './bootstrap-data'
 
 export async function bootstrapData({ store }: StoreContext): Promise<void> {
   // Storage system
@@ -55,4 +69,30 @@ export async function bootstrapData({ store }: StoreContext): Promise<void> {
     )
   })
   await Promise.all(workers.map((w) => store.save<Worker>(w)))
+
+  const channelCategories = channelCategoriesData.map(
+    (m) =>
+      new ChannelCategory({
+        id: m.id,
+        name: m.name,
+        channels: [],
+        createdInBlock: m.createdInBlock,
+        createdAt: new Date(m.createdAt),
+        updatedAt: new Date(m.updatedAt),
+      })
+  )
+  await Promise.all(channelCategories.map((m) => store.save<ChannelCategory>(m)))
+
+  const videoCategories = videoCategoriesData.map(
+    (m) =>
+      new VideoCategory({
+        id: m.id,
+        name: m.name,
+        videos: [],
+        createdInBlock: m.createdInBlock,
+        createdAt: new Date(m.createdAt),
+        updatedAt: new Date(m.updatedAt),
+      })
+  )
+  await Promise.all(videoCategories.map((m) => store.save<VideoCategory>(m)))
 }

+ 4 - 3
query-node/mappings/package.json

@@ -12,17 +12,18 @@
     "checks": "prettier ./ --check && yarn lint",
     "format": "prettier ./ --write ",
     "bootstrap-data:fetch:members": "yarn ts-node ./bootstrap-data/scripts/fetchMembersData.ts",
+    "bootstrap-data:fetch:categories": "yarn ts-node ./bootstrap-data/scripts/fetchCategories.ts",
     "bootstrap-data:fetch:workingGroups": "yarn ts-node ./bootstrap-data/scripts/fetchWorkingGroupsData.ts",
-    "bootstrap-data:fetch": "yarn bootstrap-data:fetch:members && yarn bootstrap-data:fetch:workingGroups"
+    "bootstrap-data:fetch": "yarn bootstrap-data:fetch:members && yarn bootstrap-data:fetch:workingGroups && yarn bootstrap-data:fetch:categories"
   },
   "dependencies": {
     "@polkadot/types": "5.9.1",
     "@joystream/hydra-common": "3.1.0-alpha.1",
     "@joystream/hydra-db-utils": "3.1.0-alpha.1",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/sumer-types": "npm:@joystream/types@^0.16.0",
     "@joystream/types": "^0.17.0",
-    "@joystream/warthog": "2.35.0"
+    "@joystream/warthog": "2.35.0",
+    "@apollo/client": "^3.2.5"
   },
   "devDependencies": {
     "prettier": "^2.2.1",

+ 7 - 18
runtime-modules/common/src/working_group.rs

@@ -5,38 +5,27 @@ use serde::{Deserialize, Serialize};
 use strum_macros::EnumIter;
 
 /// Defines well-known working groups.
-/// Additional integer values are set to maintain the index of the enum variants after its
-/// modifying. The 'isize' suffix is required by the 'clippy' linter. We should revisit it after we
-/// upgrade the rust compiler version (current version is "nightly-2021-02-20-x86_64").
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize, EnumIter))]
 #[derive(Encode, Decode, Clone, PartialEq, Eq, Copy, Debug, PartialOrd, Ord)]
 pub enum WorkingGroup {
-    /* Reserved
-        // working_group::Instance0.
-        Reserved,
-    */
-    /* Reserved
-        /// Forum working group: working_group::Instance1.
-        Forum,
-    */
     /// Storage working group: working_group::Instance2.
-    Storage = 2isize,
+    Storage,
 
     /// Storage working group: working_group::Instance3.
-    Content = 3isize,
+    Content,
 
     /// Operations working group: working_group::Instance4.
-    OperationsAlpha = 4isize,
+    OperationsAlpha,
 
     /// Gateway working group: working_group::Instance5.
-    Gateway = 5isize,
+    Gateway,
 
     /// Distribution working group: working_group::Instance6.
-    Distribution = 6isize,
+    Distribution,
 
     /// Operations working group: working_group::Instance7.
-    OperationsBeta = 7isize,
+    OperationsBeta,
 
     /// Operations working group: working_group::Instance8.
-    OperationsGamma = 8isize,
+    OperationsGamma,
 }

+ 4 - 0
runtime-modules/content/src/errors.rs

@@ -79,6 +79,10 @@ decl_error! {
         /// Bag Size specified is not valid
         InvalidBagSizeSpecified,
 
+        /// Migration not done yet
+        MigrationNotFinished,
+
+
 
     }
 }

+ 141 - 16
runtime-modules/content/src/lib.rs

@@ -60,6 +60,8 @@ pub trait NumericIdentifier:
     + PartialEq
     + Ord
     + Zero
+    + From<u64>
+    + Into<u64>
 {
 }
 
@@ -101,8 +103,29 @@ pub trait Trait:
 
     /// The storage type used
     type DataObjectStorage: storage::DataObjectStorage<Self>;
+
+    /// Video migrated in each block during migration
+    type VideosMigrationsEachBlock: Get<u64>;
+
+    /// Channel migrated in each block during migration
+    type ChannelsMigrationsEachBlock: Get<u64>;
 }
 
+/// Data structure in order to keep track of the migration
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
+pub struct MigrationConfigRecord<NumericId> {
+    // at each block the videos/channels removed will be those with id in the
+    // half open range [current_id, final_id).
+    // when migration is triggered final_id will be updated
+    // when migration is performed current_id will be updated
+    pub current_id: NumericId,
+    pub final_id: NumericId,
+}
+
+type VideoMigrationConfig<T> = MigrationConfigRecord<<T as Trait>::VideoId>;
+type ChannelMigrationConfig<T> = MigrationConfigRecord<<T as storage::Trait>::ChannelId>;
+
 /// The owner of a channel, is the authorized "actor" that can update
 /// or delete or transfer a channel and its contents.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
@@ -490,6 +513,13 @@ decl_storage! {
 
         /// Map, representing  CuratorGroupId -> CuratorGroup relation
         pub CuratorGroupById get(fn curator_group_by_id): map hasher(blake2_128_concat) T::CuratorGroupId => CuratorGroup<T>;
+
+        /// Migration config for channels
+        pub ChannelMigration get(fn channel_migration) config(): ChannelMigrationConfig<T>;
+
+        /// Migration config for videos:
+        pub VideoMigration get(fn video_migration) config(): VideoMigrationConfig<T>;
+
     }
 }
 
@@ -633,6 +663,9 @@ decl_module! {
             actor: ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
             params: ChannelCreationParameters<T>,
         ) {
+            // ensure migration is done
+             ensure!(Self::is_migration_done(), Error::<T>::MigrationNotFinished);
+
             ensure_actor_authorized_to_create_channel::<T>(
                 origin.clone(),
                 &actor,
@@ -689,7 +722,7 @@ decl_module! {
             params: ChannelUpdateParameters<T>,
         ) {
             // check that channel exists
-            let channel = Self::ensure_channel_exists(&channel_id)?;
+            let channel = Self::ensure_channel_validity(&channel_id)?;
 
             ensure_actor_authorized_to_update_channel::<T>(
                 origin,
@@ -733,8 +766,9 @@ decl_module! {
             channel_id: T::ChannelId,
             num_objects_to_delete: u64,
         ) -> DispatchResult {
+
             // check that channel exists
-            let channel = Self::ensure_channel_exists(&channel_id)?;
+            let channel = Self::ensure_channel_validity(&channel_id)?;
 
             // ensure permissions
             ensure_actor_authorized_to_update_channel::<T>(
@@ -797,7 +831,7 @@ decl_module! {
             rationale: Vec<u8>,
         ) {
             // check that channel exists
-            let channel = Self::ensure_channel_exists(&channel_id)?;
+            let channel = Self::ensure_channel_validity(&channel_id)?;
 
             if channel.is_censored == is_censored {
                 return Ok(())
@@ -919,7 +953,7 @@ decl_module! {
         ) {
 
             // check that channel exists
-            let channel = Self::ensure_channel_exists(&channel_id)?;
+            let channel = Self::ensure_channel_validity(&channel_id)?;
 
             ensure_actor_authorized_to_update_channel::<T>(
                 origin,
@@ -956,10 +990,11 @@ decl_module! {
             // add it to the onchain state
             VideoById::<T>::insert(video_id, video);
 
-            // Only increment next video id if adding content was successful
+            // Only increment next video id
             NextVideoId::<T>::mutate(|id| *id += T::VideoId::one());
 
             // Add recently added video id to the channel
+
             ChannelById::<T>::mutate(channel_id, |channel| {
                 channel.num_videos = channel.num_videos.saturating_add(1);
             });
@@ -975,8 +1010,9 @@ decl_module! {
             video_id: T::VideoId,
             params: VideoUpdateParameters<T>,
         ) {
+
             // check that video exists, retrieve corresponding channel id.
-            let video = Self::ensure_video_exists(&video_id)?;
+            let video = Self::ensure_video_validity(&video_id)?;
 
             let channel_id = video.in_channel;
             let channel = ChannelById::<T>::get(&channel_id);
@@ -1013,15 +1049,13 @@ decl_module! {
             video_id: T::VideoId,
             assets_to_remove: BTreeSet<DataObjectId<T>>,
         ) {
-
             // check that video exists
-            let video = Self::ensure_video_exists(&video_id)?;
+            let video = Self::ensure_video_validity(&video_id)?;
 
             // get information regarding channel
             let channel_id = video.in_channel;
             let channel = ChannelById::<T>::get(channel_id);
 
-
             ensure_actor_authorized_to_update_channel::<T>(
                 origin,
                 &actor,
@@ -1214,7 +1248,7 @@ decl_module! {
             rationale: Vec<u8>,
         ) {
             // check that video exists
-            let video = Self::ensure_video_exists(&video_id)?;
+            let video = Self::ensure_video_validity(&video_id)?;
 
             if video.is_censored == is_censored {
                 return Ok(())
@@ -1267,10 +1301,94 @@ decl_module! {
         ) {
             Self::not_implemented()?;
         }
+
+        fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight {
+            Self::perform_video_migration();
+            Self::perform_channel_migration();
+
+            10_000_000 // TODO: adjust Weight
+        }
     }
 }
 
 impl<T: Trait> Module<T> {
+    /// Migrate Videos
+    fn perform_video_migration() {
+        let MigrationConfigRecord {
+            current_id,
+            final_id,
+        } = <VideoMigration<T>>::get();
+
+        if current_id < final_id {
+            // perform migration procedure
+            let next_id = sp_std::cmp::min(
+                current_id + T::VideosMigrationsEachBlock::get().into(),
+                final_id,
+            );
+
+            //
+            // == MUTATION SAFE ==
+            //
+
+            // clear maps: (iterator are lazy and do nothing unless consumed)
+            for id in current_id.into()..next_id.into() {
+                <VideoById<T>>::remove(T::VideoId::from(id));
+            }
+
+            // edit the current id
+            <VideoMigration<T>>::mutate(|value| value.current_id = next_id);
+        }
+    }
+
+    /// Migrate Channels
+    fn perform_channel_migration() {
+        let MigrationConfigRecord {
+            current_id,
+            final_id,
+        } = <ChannelMigration<T>>::get();
+
+        if current_id < final_id {
+            // perform migration procedure
+            let next_id = sp_std::cmp::min(
+                current_id + T::ChannelsMigrationsEachBlock::get().into(),
+                final_id,
+            );
+
+            //
+            // == MUTATION SAFE ==
+            //
+
+            // clear maps: (iterator are lazy and do nothing unless consumed)
+            for id in current_id.into()..next_id.into() {
+                <ChannelById<T>>::remove(T::ChannelId::from(id));
+            }
+
+            // edit the current id
+            <ChannelMigration<T>>::mutate(|value| value.current_id = next_id);
+        }
+    }
+
+    /// Ensure Channel Migration Finished
+
+    /// Ensure Video Migration Finished
+    fn is_migration_done() -> bool {
+        let MigrationConfigRecord {
+            current_id,
+            final_id,
+        } = <VideoMigration<T>>::get();
+
+        let video_migration_done = current_id == final_id;
+
+        let MigrationConfigRecord {
+            current_id,
+            final_id,
+        } = <ChannelMigration<T>>::get();
+
+        let channel_migration_done = current_id == final_id;
+
+        return video_migration_done && channel_migration_done;
+    }
+
     /// Ensure `CuratorGroup` under given id exists
     fn ensure_curator_group_under_given_id_exists(
         curator_group_id: &T::CuratorGroupId,
@@ -1290,7 +1408,11 @@ impl<T: Trait> Module<T> {
         Ok(Self::curator_group_by_id(curator_group_id))
     }
 
-    fn ensure_channel_exists(channel_id: &T::ChannelId) -> Result<Channel<T>, Error<T>> {
+    fn ensure_channel_validity(channel_id: &T::ChannelId) -> Result<Channel<T>, Error<T>> {
+        // ensure migration is done
+        ensure!(Self::is_migration_done(), Error::<T>::MigrationNotFinished,);
+
+        // ensure channel exists
         ensure!(
             ChannelById::<T>::contains_key(channel_id),
             Error::<T>::ChannelDoesNotExist
@@ -1298,7 +1420,11 @@ impl<T: Trait> Module<T> {
         Ok(ChannelById::<T>::get(channel_id))
     }
 
-    fn ensure_video_exists(video_id: &T::VideoId) -> Result<Video<T>, Error<T>> {
+    fn ensure_video_validity(video_id: &T::VideoId) -> Result<Video<T>, Error<T>> {
+        // ensure migration is done
+        ensure!(Self::is_migration_done(), Error::<T>::MigrationNotFinished,);
+
+        // ensure video exists
         ensure!(
             VideoById::<T>::contains_key(video_id),
             Error::<T>::VideoDoesNotExist
@@ -1413,10 +1539,9 @@ impl<T: Trait> Module<T> {
 // Reset Videos and Channels on runtime upgrade but preserving next ids and categories.
 impl<T: Trait> Module<T> {
     pub fn on_runtime_upgrade() {
-        // Clear VideoById map
-        <VideoById<T>>::remove_all();
-        // Clear ChannelById map
-        <ChannelById<T>>::remove_all();
+        // setting final index triggers migration
+        <VideoMigration<T>>::mutate(|config| config.final_id = <NextVideoId<T>>::get());
+        <ChannelMigration<T>>::mutate(|config| config.final_id = <NextChannelId<T>>::get());
     }
 }
 

+ 212 - 0
runtime-modules/content/src/tests/migration.rs

@@ -0,0 +1,212 @@
+#![cfg(test)]
+
+use super::mock::*;
+use crate::sp_api_hidden_includes_decl_storage::hidden_include::traits::Currency;
+use crate::*;
+use std::ops::Rem;
+
+fn assert_video_and_channel_existrinsics_with(result: DispatchResult) {
+    let params = VideoCreationParametersRecord {
+        assets: None,
+        meta: None,
+    };
+
+    // attempt to create valid channel if result is ok, otherwise id does not matter
+    let channel_id = if result.is_ok() {
+        Content::next_channel_id()
+    } else {
+        <Test as storage::Trait>::ChannelId::one()
+    };
+
+    // attempt to create valid video if result is ok, otherwise id does not matter
+    let video_id = if result.is_ok() {
+        Content::next_video_id()
+    } else {
+        <Test as Trait>::VideoId::one()
+    };
+
+    assert_eq!(
+        Content::create_channel(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: None,
+                meta: Some(vec![]),
+                reward_account: None,
+            },
+        ),
+        result
+    );
+
+    assert_eq!(
+        Content::create_video(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            channel_id.clone(),
+            params.clone()
+        ),
+        result
+    );
+    assert_eq!(
+        Content::update_channel(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            channel_id.clone(),
+            ChannelUpdateParametersRecord {
+                assets_to_upload: None,
+                new_meta: Some(vec![]),
+                reward_account: None,
+                assets_to_remove: BTreeSet::new(),
+            },
+        ),
+        result
+    );
+    assert_eq!(
+        Content::update_video(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            video_id.clone(),
+            VideoUpdateParametersRecord {
+                assets_to_upload: None,
+                new_meta: Some(vec![]),
+                assets_to_remove: BTreeSet::new(),
+            },
+        ),
+        result
+    );
+
+    assert_eq!(
+        Content::update_channel_censorship_status(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            channel_id.clone(),
+            false,
+            b"test".to_vec()
+        ),
+        result
+    );
+
+    assert_eq!(
+        Content::update_video_censorship_status(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            video_id.clone(),
+            false,
+            b"test".to_vec()
+        ),
+        result
+    );
+
+    assert_eq!(
+        Content::delete_video(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            video_id.clone(),
+            BTreeSet::new(),
+        ),
+        result
+    );
+    assert_eq!(
+        Content::delete_channel(
+            Origin::signed(FIRST_MEMBER_ORIGIN),
+            ContentActor::Member(FIRST_MEMBER_ID),
+            channel_id.clone(),
+            0u64,
+        ),
+        result
+    );
+}
+
+fn setup_scenario_with(n_videos: u64, n_channels: u64) -> (u64, u64) {
+    let _ = balances::Module::<Test>::deposit_creating(
+        &FIRST_MEMBER_ORIGIN,
+        <Test as balances::Trait>::Balance::from(10_000u32),
+    );
+
+    // create n_channels channels
+    for _ in 0..n_channels {
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: None,
+                meta: Some(vec![]),
+                reward_account: None,
+            },
+            Ok(()),
+        );
+    }
+
+    let params = VideoCreationParametersRecord {
+        assets: None,
+        meta: None,
+    };
+
+    // create n_videos videos
+    for i in 0..n_videos {
+        create_video_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            i.rem(n_channels) + 1,
+            params.clone(),
+            Ok(()),
+        );
+    }
+
+    // assert that the specified channels have been created
+    assert_eq!(VideoById::<Test>::iter().count() as u64, n_videos);
+    assert_eq!(ChannelById::<Test>::iter().count() as u64, n_channels);
+
+    let channels_migrations_per_block = <Test as Trait>::ChannelsMigrationsEachBlock::get();
+    let videos_migrations_per_block = <Test as Trait>::VideosMigrationsEachBlock::get();
+
+    // return the number of blocks required for migration
+    let divide_with_ceiling =
+        |x: u64, y: u64| (x / y) + ((x.checked_rem(y).unwrap_or_default() > 0u64) as u64);
+    (
+        divide_with_ceiling(n_channels, channels_migrations_per_block),
+        divide_with_ceiling(n_videos, videos_migrations_per_block),
+    )
+}
+
+#[test]
+fn migration_test() {
+    with_default_mock_builder(|| {
+        const START_MIGRATION_AT_BLOCK: u64 = 1;
+        run_to_block(START_MIGRATION_AT_BLOCK);
+
+        // setup scenario
+        let (blocks_channels, blocks_videos) = setup_scenario_with(100u64, 100u64);
+
+        // block at which all migrations should be completed
+        let last_migration_block = std::cmp::max(blocks_channels, blocks_videos);
+
+        // ensure we have setup scenario to properly test migration over multiple blocks
+        assert!(last_migration_block > START_MIGRATION_AT_BLOCK);
+
+        // triggering migration
+        Content::on_runtime_upgrade();
+
+        // migration should have started
+        assert!(!Content::is_migration_done());
+
+        // migration is not complete all extrinsics should fail
+        assert_video_and_channel_existrinsics_with(Err(Error::<Test>::MigrationNotFinished.into()));
+
+        // make progress with migration but should not be complete yet
+        run_to_block(last_migration_block);
+        assert!(!Content::is_migration_done());
+        assert_video_and_channel_existrinsics_with(Err(Error::<Test>::MigrationNotFinished.into()));
+
+        // run migration to expected completion block
+        run_to_block(last_migration_block + 1);
+
+        // assert that maps are cleared & migration is done
+        assert!(Content::is_migration_done());
+        assert_eq!(VideoById::<Test>::iter().count(), 0);
+        assert_eq!(ChannelById::<Test>::iter().count(), 0);
+
+        // video and channel extr. now succeed
+        assert_video_and_channel_existrinsics_with(Ok(()));
+    })
+}

+ 19 - 2
runtime-modules/content/src/tests/mock.rs

@@ -325,6 +325,8 @@ impl common::origin::ActorOriginValidator<Origin, u64, u64> for () {
 parameter_types! {
     pub const MaxNumberOfCuratorsPerGroup: u32 = 10;
     pub const ChannelOwnershipPaymentEscrowId: [u8; 8] = *b"12345678";
+    pub const VideosMigrationsEachBlock: u64 = 20;
+    pub const ChannelsMigrationsEachBlock: u64 = 10;
 }
 
 impl Trait for Test {
@@ -360,6 +362,9 @@ impl Trait for Test {
 
     /// The data object used in storage
     type DataObjectStorage = storage::Module<Self>;
+
+    type VideosMigrationsEachBlock = VideosMigrationsEachBlock;
+    type ChannelsMigrationsEachBlock = ChannelsMigrationsEachBlock;
 }
 
 pub type System = frame_system::Module<Test>;
@@ -375,6 +380,8 @@ pub struct ExtBuilder {
     next_series_id: u64,
     next_channel_transfer_request_id: u64,
     next_curator_group_id: u64,
+    video_migration: VideoMigrationConfig<Test>,
+    channel_migration: ChannelMigrationConfig<Test>,
 }
 
 impl Default for ExtBuilder {
@@ -389,6 +396,14 @@ impl Default for ExtBuilder {
             next_series_id: 1,
             next_channel_transfer_request_id: 1,
             next_curator_group_id: 1,
+            video_migration: MigrationConfigRecord {
+                current_id: 1,
+                final_id: 1,
+            },
+            channel_migration: MigrationConfigRecord {
+                current_id: 1,
+                final_id: 1,
+            },
         }
     }
 }
@@ -409,6 +424,8 @@ impl ExtBuilder {
             next_series_id: self.next_series_id,
             next_channel_transfer_request_id: self.next_channel_transfer_request_id,
             next_curator_group_id: self.next_curator_group_id,
+            video_migration: self.video_migration,
+            channel_migration: self.channel_migration,
         }
         .assimilate_storage(&mut t)
         .unwrap();
@@ -425,9 +442,9 @@ pub fn with_default_mock_builder<R, F: FnOnce() -> R>(f: F) -> R {
 // https://substrate.dev/docs/en/next/development/module/tests
 pub fn run_to_block(n: u64) {
     while System::block_number() < n {
-        <System as OnFinalize<u64>>::on_finalize(System::block_number());
+        <Content as OnFinalize<u64>>::on_finalize(System::block_number());
         System::set_block_number(System::block_number() + 1);
-        <System as OnInitialize<u64>>::on_initialize(System::block_number());
+        <Content as OnInitialize<u64>>::on_initialize(System::block_number());
     }
 }
 

+ 1 - 0
runtime-modules/content/src/tests/mod.rs

@@ -2,5 +2,6 @@
 
 mod channels;
 mod curators;
+mod migration;
 mod mock;
 mod videos;

+ 3 - 1
runtime-modules/storage/src/lib.rs

@@ -275,7 +275,9 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
         + Default
         + Copy
         + MaybeSerialize
-        + PartialEq;
+        + PartialEq
+        + From<u64>
+        + Into<u64>;
 
     /// Distribution bucket operator ID type (relationship between distribution bucket and
     /// distribution operator).

+ 1 - 1
runtime/Cargo.toml

@@ -4,7 +4,7 @@ edition = '2018'
 name = 'joystream-node-runtime'
 # Follow convention: https://github.com/Joystream/substrate-runtime-joystream/issues/1
 # {Authoring}.{Spec}.{Impl} of the RuntimeVersion
-version = '9.10.0'
+version = '9.11.0'
 
 [dependencies]
 # Third-party dependencies

+ 5 - 1
runtime/src/lib.rs

@@ -83,7 +83,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
     spec_name: create_runtime_str!("joystream-node"),
     impl_name: create_runtime_str!("joystream-node"),
     authoring_version: 9,
-    spec_version: 10,
+    spec_version: 11,
     impl_version: 0,
     apis: crate::runtime_api::EXPORTED_RUNTIME_API_VERSIONS,
     transaction_version: 1,
@@ -429,6 +429,8 @@ impl pallet_finality_tracker::Trait for Runtime {
 parameter_types! {
     pub const MaxNumberOfCuratorsPerGroup: MaxNumber = 50;
     pub const ChannelOwnershipPaymentEscrowId: [u8; 8] = *b"chescrow";
+    pub const VideosMigrationsEachBlock: u64 = 100;
+    pub const ChannelsMigrationsEachBlock: u64 = 25;
 }
 
 impl content::Trait for Runtime {
@@ -443,6 +445,8 @@ impl content::Trait for Runtime {
     type ChannelOwnershipTransferRequestId = ChannelOwnershipTransferRequestId;
     type MaxNumberOfCuratorsPerGroup = MaxNumberOfCuratorsPerGroup;
     type DataObjectStorage = Storage;
+    type VideosMigrationsEachBlock = VideosMigrationsEachBlock;
+    type ChannelsMigrationsEachBlock = ChannelsMigrationsEachBlock;
 }
 
 impl hiring::Trait for Runtime {

+ 4 - 23
runtime/src/runtime_api.rs

@@ -22,8 +22,7 @@ use crate::{
 };
 
 use crate::{
-    ContentWorkingGroupInstance, DistributionWorkingGroupInstance,
-    OperationsWorkingGroupInstanceAlpha, OperationsWorkingGroupInstanceBeta,
+    DistributionWorkingGroupInstance, OperationsWorkingGroupInstanceBeta,
     OperationsWorkingGroupInstanceGamma,
 };
 use frame_support::weights::Weight;
@@ -60,10 +59,6 @@ pub type BlockId = generic::BlockId<Block>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<AccountId, Call, Signature, SignedExtra>;
 
-// Alias for the alpha operationsworking group
-pub(crate) type OperationsWorkingGroupAlpha<T> =
-    working_group::Module<T, OperationsWorkingGroupInstanceAlpha>;
-
 // Alias for the beta operations working group
 pub(crate) type OperationsWorkingGroupBeta<T> =
     working_group::Module<T, OperationsWorkingGroupInstanceBeta>;
@@ -72,8 +67,6 @@ pub(crate) type OperationsWorkingGroupBeta<T> =
 pub(crate) type OperationsWorkingGroupGamma<T> =
     working_group::Module<T, OperationsWorkingGroupInstanceGamma>;
 
-pub(crate) type ContentWorkingGroup<T> = working_group::Module<T, ContentWorkingGroupInstance>;
-
 pub(crate) type DistributionWorkingGroup<T> =
     working_group::Module<T, DistributionWorkingGroupInstance>;
 
@@ -92,13 +85,9 @@ impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade {
 
         let default_content_working_group_mint_capacity = 0;
 
-        OperationsWorkingGroupAlpha::<Runtime>::initialize_working_group(
-            default_text_constraint,
-            default_text_constraint,
-            default_text_constraint,
-            default_storage_size_constraint,
-            default_content_working_group_mint_capacity,
-        );
+        // Do not init persisted working group module instances
+        // OperationsWorkingGroupAlpha (previously OperationsWorkingGroup)
+        // ContentWorkingGroup (previously ContentDirectoryWorkingGroup)
 
         OperationsWorkingGroupBeta::<Runtime>::initialize_working_group(
             default_text_constraint,
@@ -116,14 +105,6 @@ impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade {
             default_content_working_group_mint_capacity,
         );
 
-        ContentWorkingGroup::<Runtime>::initialize_working_group(
-            default_text_constraint,
-            default_text_constraint,
-            default_text_constraint,
-            default_storage_size_constraint,
-            default_content_working_group_mint_capacity,
-        );
-
         DistributionWorkingGroup::<Runtime>::initialize_working_group(
             default_text_constraint,
             default_text_constraint,

+ 2 - 0
setup.sh

@@ -3,6 +3,8 @@
 set -e
 
 if [[ "$OSTYPE" == "linux-gnu" ]]; then
+    # Prevent interactive prompts that would interrup the installation
+    export DEBIAN_FRONTEND=noninteractive
     # code build tools
     sudo apt-get update
     sudo apt-get install -y coreutils clang llvm jq curl gcc xz-utils sudo pkg-config unzip libc6-dev make libssl-dev python

+ 7 - 6
tests/network-tests/assets/TestChannel.json

@@ -1,9 +1,10 @@
 {
-  "handle": "Storage node channel",
-  "description": "Storage node channel",
-  "language": { "existing": { "code": "EN" } },
-  "coverPhotoUrl": "",
-  "avatarPhotoUrl": "",
+  "title": "Example Joystream Channel",
+  "description": "This is an awesome example channel!",
   "isPublic": true,
-  "isCensored": false
+  "language": "en",
+  "category": 1,
+  "avatarPhotoPath": "./joystream.png",
+  "coverPhotoPath": "./joystream.png",
+  "rewardAccount": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"
 }

Some files were not shown because too many files changed in this diff