Browse Source

Merge branch 'giza_staging' into distributor-node-staging

Leszek Wiesner 3 years ago
parent
commit
2d8324c18d
100 changed files with 1975 additions and 1149 deletions
  1. 41 16
      .env
  2. 58 47
      .github/workflows/create-ami.yml
  3. 68 0
      .github/workflows/deploy-playground.yml
  4. 32 32
      .github/workflows/joystream-cli.yml
  5. 13 4
      .github/workflows/joystream-node-docker.yml
  6. 4 0
      .github/workflows/network-tests.yml
  7. 16 48
      .github/workflows/run-network-tests.yml
  8. 4 2
      .github/workflows/storage-node.yml
  9. 2 2
      Cargo.lock
  10. 6 3
      README.md
  11. 0 37
      build-docker-images.sh
  12. 24 0
      build-node-docker.sh
  13. 2 2
      build-packages.sh
  14. 0 0
      chain-metadata.json
  15. 1 1
      cli/package.json
  16. 1 1
      cli/src/base/UploadCommandBase.ts
  17. 7 1
      colossus.Dockerfile
  18. 0 0
      devops/aws/.gitignore
  19. 8 2
      devops/aws/README.md
  20. 0 0
      devops/aws/ansible.cfg
  21. 45 0
      devops/aws/build-arm64-playbook.yml
  22. 0 0
      devops/aws/build-code.yml
  23. 0 0
      devops/aws/chain-spec-pioneer.yml
  24. 13 0
      devops/aws/cloudformation/infrastructure.yml
  25. 20 1
      devops/aws/cloudformation/single-instance-docker.yml
  26. 3 0
      devops/aws/cloudformation/single-instance.yml
  27. 0 0
      devops/aws/common.sh
  28. 39 0
      devops/aws/create-joystream-node-ami-playbook.yml
  29. 5 9
      devops/aws/deploy-infra.sample.cfg
  30. 3 2
      devops/aws/deploy-infra.sh
  31. 108 0
      devops/aws/deploy-playground-playbook.yml
  32. 46 0
      devops/aws/deploy-playground.sh
  33. 0 0
      devops/aws/deploy-single-node-playbook.yml
  34. 19 0
      devops/aws/deploy-single-node.sample.cfg
  35. 3 8
      devops/aws/deploy-single-node.sh
  36. 1 1
      devops/aws/destroy-infra.sh
  37. 0 0
      devops/aws/group_vars/all
  38. 0 0
      devops/aws/library/json_modify.py
  39. 0 0
      devops/aws/requirements.yml
  40. 0 0
      devops/aws/roles/admin/tasks/deploy-pioneer.yml
  41. 3 0
      devops/aws/roles/admin/tasks/main.yml
  42. 96 0
      devops/aws/roles/common/tasks/chain-spec-node-keys.yml
  43. 1 0
      devops/aws/roles/common/tasks/get-code-git.yml
  44. 0 0
      devops/aws/roles/common/tasks/get-code-local.yml
  45. 7 4
      devops/aws/roles/common/tasks/run-setup-build.yml
  46. 0 0
      devops/aws/roles/node/templates/joystream-node.service.j2
  47. 0 0
      devops/aws/roles/rpc/tasks/main.yml
  48. 0 0
      devops/aws/roles/rpc/templates/Caddyfile.j2
  49. 0 0
      devops/aws/roles/rpc/templates/joystream-node.service.j2
  50. 0 0
      devops/aws/roles/validators/tasks/main.yml
  51. 0 0
      devops/aws/roles/validators/templates/joystream-node.service.j2
  52. 0 0
      devops/aws/setup-admin.yml
  53. 49 0
      devops/aws/templates/Playground-Caddyfile.j2
  54. 0 50
      devops/infrastructure/build-arm64-playbook.yml
  55. 0 45
      devops/infrastructure/github-action-playbook.yml
  56. 0 1
      devops/infrastructure/pulumi-common/index.ts
  57. 0 19
      devops/infrastructure/query-node/Pulumi.yaml
  58. 0 461
      devops/infrastructure/query-node/index.ts
  59. 0 76
      devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml
  60. 0 236
      devops/infrastructure/storage-node/index.ts
  61. 1 1
      devops/kubernetes/argus/.gitignore
  62. 35 0
      devops/kubernetes/argus/Pulumi.yaml
  63. 12 9
      devops/kubernetes/argus/README.md
  64. 5 0
      devops/kubernetes/argus/docker_dummy/Dockerfile
  65. 229 0
      devops/kubernetes/argus/index.ts
  66. 1 0
      devops/kubernetes/argus/package.json
  67. 0 0
      devops/kubernetes/argus/tsconfig.json
  68. 1 1
      devops/kubernetes/node-network/.gitignore
  69. 0 0
      devops/kubernetes/node-network/Pulumi.yaml
  70. 0 0
      devops/kubernetes/node-network/README.md
  71. 0 0
      devops/kubernetes/node-network/configMap.ts
  72. 0 0
      devops/kubernetes/node-network/index.ts
  73. 0 0
      devops/kubernetes/node-network/json_modify.py
  74. 0 0
      devops/kubernetes/node-network/nfsVolume.ts
  75. 0 0
      devops/kubernetes/node-network/package.json
  76. 0 0
      devops/kubernetes/node-network/tsconfig.json
  77. 0 0
      devops/kubernetes/node-network/utils.ts
  78. 0 0
      devops/kubernetes/node-network/validator.ts
  79. 0 0
      devops/kubernetes/pulumi-common/caddy.ts
  80. 0 0
      devops/kubernetes/pulumi-common/configMap.ts
  81. 109 0
      devops/kubernetes/pulumi-common/database.ts
  82. 4 0
      devops/kubernetes/pulumi-common/index.ts
  83. 0 0
      devops/kubernetes/pulumi-common/package.json
  84. 0 0
      devops/kubernetes/pulumi-common/tsconfig.json
  85. 43 0
      devops/kubernetes/pulumi-common/volume.ts
  86. 1 1
      devops/kubernetes/query-node/.gitignore
  87. 36 0
      devops/kubernetes/query-node/Pulumi.yaml
  88. 139 0
      devops/kubernetes/query-node/README.md
  89. 29 0
      devops/kubernetes/query-node/configMap.ts
  90. 5 0
      devops/kubernetes/query-node/docker_dummy/Dockerfile
  91. 137 0
      devops/kubernetes/query-node/index.ts
  92. 158 0
      devops/kubernetes/query-node/indexerDeployment.ts
  93. 0 0
      devops/kubernetes/query-node/package.json
  94. 237 0
      devops/kubernetes/query-node/processorDeployment.ts
  95. 0 0
      devops/kubernetes/query-node/s3Helpers.ts
  96. 0 0
      devops/kubernetes/query-node/tsconfig.json
  97. 5 0
      devops/kubernetes/storage-node/.gitignore
  98. 16 12
      devops/kubernetes/storage-node/Pulumi.yaml
  99. 19 14
      devops/kubernetes/storage-node/README.md
  100. 5 0
      devops/kubernetes/storage-node/docker_dummy/Dockerfile

+ 41 - 16
.env

@@ -9,32 +9,57 @@ INDEXER_DB_NAME=query_node_indexer
 DB_NAME=query_node_processor
 DB_NAME=query_node_processor
 DB_USER=postgres
 DB_USER=postgres
 DB_PASS=postgres
 DB_PASS=postgres
-DB_HOST=db
+# This value will not be used by query-node docker containers.
+# When running query-node with docker these services will always use the db service
+DB_HOST=localhost
 DB_PORT=5432
 DB_PORT=5432
 DEBUG=index-builder:*
 DEBUG=index-builder:*
 TYPEORM_LOGGING=error
 TYPEORM_LOGGING=error
 
 
-###########################
-#    Indexer options      #
-###########################
-
+## Indexer options
 # Block height to start indexing from.
 # Block height to start indexing from.
 # Note, that if there are already some indexed events, this setting is ignored
 # Note, that if there are already some indexed events, this setting is ignored
 BLOCK_HEIGHT=0
 BLOCK_HEIGHT=0
 
 
-###############################
-#    Processor GraphQL API    #
-###############################
+# Query node GraphQL server port
+GRAPHQL_SERVER_PORT=8081
 
 
-GRAPHQL_SERVER_PORT=4002
-GRAPHQL_SERVER_HOST=graphql-server
+# Query node playground subscription endpoint
 GRAPHQL_PLAYGROUND_SUBSCRIPTION_ENDPOINT=ws://localhost:8081/graphql
 GRAPHQL_PLAYGROUND_SUBSCRIPTION_ENDPOINT=ws://localhost:8081/graphql
 
 
-WARTHOG_APP_PORT=4002
-WARTHOG_APP_HOST=hydra-indexer-gateway
+# Hydra indexer gateway GraphQL server port
+HYDRA_INDEXER_GATEWAY_PORT=4000
+
+# Default GraphQL server host. It is required during "query-node config:dev"
+GRAPHQL_SERVER_HOST=localhost
+
+# Websocket RPC endpoint containers will use.
+JOYSTREAM_NODE_WS=ws://joystream-node:9944/
+
+# Query node which colossus will use
+COLOSSUS_QUERY_NODE_URL=http://graphql-server:${GRAPHQL_SERVER_PORT}/graphql
+
+# Query node which distributor will use
+DISTRIBUTOR_QUERY_NODE_URL=http://graphql-server:${GRAPHQL_SERVER_PORT}/graphql
+
+# Indexer gateway used by processor. If you don't use the local indexer set this to a remote gateway
+PROCESSOR_INDEXER_GATEWAY=http://hydra-indexer-gateway:${HYDRA_INDEXER_GATEWAY_PORT}/graphql
+
+# Colossus services identities
+COLOSSUS_1_WORKER_ID=0
+COLOSSUS_1_WORKER_URI=//testing//worker//Storage//${COLOSSUS_1_WORKER_ID}
+COLOSSUS_1_TRANSACTOR_URI=//Colossus1
+
+COLOSSUS_2_WORKER_ID=1
+COLOSSUS_2_WORKER_URI=//testing//worker//Storage//${COLOSSUS_2_WORKER_ID}
+COLOSSUS_2_TRANSACTOR_URI=//Colossus2
+
+# Distributor node services identities
+DISTRIBUTOR_1_WORKER_ID=0
+DISTRIBUTOR_1_ACCOUNT_URI=//testing//worker//Distribution//${DISTRIBUTOR_1_WORKER_ID}
 
 
-# Default configuration is to use the docker container
-WS_PROVIDER_ENDPOINT_URI=ws://joystream-node:9944/
+DISTRIBUTOR_2_WORKER_ID=1
+DISTRIBUTOR_2_ACCOUNT_URI=//testing//worker//Distribution//${DISTRIBUTOR_2_WORKER_ID}
 
 
-# If running joystream-node on host machine you can use following address to reach it instead
-# WS_PROVIDER_ENDPOINT_URI=ws://host.docker.internal:9944/
+# joystream/node docker image tag
+JOYSTREAM_NODE_TAG=latest

+ 58 - 47
.github/workflows/create-ami.yml

@@ -1,3 +1,5 @@
+# Creates an AWS AMI (system image) with compiled joystream-node and subkey
+# 
 name: Create AWS AMI
 name: Create AWS AMI
 
 
 on:
 on:
@@ -8,52 +10,61 @@ jobs:
     name: Build the code and run setup
     name: Build the code and run setup
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     env:
     env:
-      STACK_NAME: joystream-github-action-${{ github.run_number }}
+      STACK_NAME: create-joystream-node-ami-ga-${{ github.run_number }}
       KEY_NAME: joystream-github-action-key
       KEY_NAME: joystream-github-action-key
     steps:
     steps:
-    - name: Extract branch name
-      shell: bash
-      run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
-      id: extract_branch
-
-    - name: Set AMI Name environment variable
-      shell: bash
-      run: echo "ami_name=joystream-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
-      id: ami_name
-
-    - name: Checkout
-      uses: actions/checkout@v2
-
-    - name: Configure AWS credentials
-      uses: aws-actions/configure-aws-credentials@v1
-      with:
-        aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
-        aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-        aws-region: us-east-1
-
-    - name: Deploy to AWS CloudFormation
-      uses: aws-actions/aws-cloudformation-github-deploy@v1
-      id: deploy_stack
-      with:
-        name: ${{ env.STACK_NAME }}
-        template: devops/infrastructure/single-instance.yml
-        no-fail-on-empty-changeset: "1"
-        parameter-overrides: "KeyName=${{ env.KEY_NAME }}"
-
-    - name: Install Ansible dependencies
-      run: pipx inject ansible-core boto3 botocore
-
-    - name: Run playbook
-      uses: dawidd6/action-ansible-playbook@v2
-      with:
-        playbook: github-action-playbook.yml
-        directory: devops/infrastructure
-        requirements: requirements.yml
-        key: ${{ secrets.SSH_PRIVATE_KEY }}
-        inventory: |
-          [all]
-          ${{ steps.deploy_stack.outputs.PublicIp }}
-        options: |
-          --extra-vars "git_repo=https://github.com/${{ github.repository }} \
-                        branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
-                        stack_name=${{ env.STACK_NAME }} ami_name=${{ env.ami_name }}"
+      - name: Extract branch name
+        shell: bash
+        run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
+        id: extract_branch
+
+      - name: Set AMI Name environment variable
+        shell: bash
+        run: echo "ami_name=joystream-node-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
+        id: ami_name
+
+      - name: Checkout
+        uses: actions/checkout@v2
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: us-east-1
+
+      - name: Deploy to AWS CloudFormation
+        uses: aws-actions/aws-cloudformation-github-deploy@v1
+        id: deploy_stack
+        with:
+          name: ${{ env.STACK_NAME }}
+          template: devops/aws/cloudformation/single-instance.yml
+          no-fail-on-empty-changeset: '1'
+          parameter-overrides: 'KeyName=${{ env.KEY_NAME }}'
+
+      - name: Install Ansible dependencies
+        run: pipx inject ansible-core boto3 botocore
+
+      - name: Run playbook
+        uses: dawidd6/action-ansible-playbook@v2
+        with:
+          playbook: create-joystream-node-ami-playbook.yml
+          directory: devops/aws
+          requirements: requirements.yml
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          inventory: |
+            [all]
+            ${{ steps.deploy_stack.outputs.PublicIp }}
+          options: |
+            --extra-vars "git_repo=https://github.com/${{ github.repository }} \
+                          branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
+                          ami_name=${{ env.ami_name }}"
+
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}

+ 68 - 0
.github/workflows/deploy-playground.yml

@@ -0,0 +1,68 @@
+name: Deploy Playground
+
+on:
+  workflow_dispatch:
+    inputs:
+      gitRepo:
+        description: 'Code repository'
+        required: false
+        default: 'https://github.com/Joystream/joystream.git'
+      branchName:
+        description: 'Branch to deploy'
+        required: false
+        default: 'master'
+      keyName:
+        description: 'SSH key pair on AWS'
+        required: false
+        default: 'joystream-github-action-key'
+      instanceType:
+        description: 'AWS EC2 instance type (t2.micro, t2.large)'
+        required: false
+        default: 't2.micro'
+
+defaults:
+  run:
+    working-directory: devops/aws
+
+jobs:
+  deploy-playground:
+    name: Create an EC2 instance and configure docker-compose stack
+    runs-on: ubuntu-latest
+    env:
+      STACK_NAME: joystream-playground-${{ github.event.inputs.branchName }}-${{ github.run_number }}
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v2
+
+      - name: Install Ansible dependencies
+        run: pipx inject ansible-core boto3 botocore
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: us-east-1
+
+      - name: Deploy to AWS CloudFormation
+        uses: aws-actions/aws-cloudformation-github-deploy@v1
+        id: deploy_stack
+        with:
+          name: ${{ env.STACK_NAME }}
+          template: devops/aws/cloudformation/single-instance-docker.yml
+          no-fail-on-empty-changeset: '1'
+          parameter-overrides: 'KeyName=${{ github.event.inputs.keyName }},EC2InstanceType=${{ github.event.inputs.instanceType }}'
+
+      - name: Run playbook
+        uses: dawidd6/action-ansible-playbook@v2
+        with:
+          playbook: deploy-playground-playbook.yml
+          directory: devops/aws
+          requirements: requirements.yml
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          inventory: |
+            [all]
+            ${{ steps.deploy_stack.outputs.PublicIp }}
+          options: |
+            --extra-vars "git_repo=${{ github.event.inputs.gitRepo }} \
+                          branch_name=${{ github.event.inputs.branchName }}"

+ 32 - 32
.github/workflows/joystream-cli.yml

@@ -9,22 +9,22 @@ jobs:
       matrix:
       matrix:
         node-version: [14.x]
         node-version: [14.x]
     steps:
     steps:
-    - uses: actions/checkout@v1
-    - name: Use Node.js ${{ matrix.node-version }}
-      uses: actions/setup-node@v1
-      with:
-        node-version: ${{ matrix.node-version }}
-    - name: checks
-      run: |
-        yarn install --frozen-lockfile
-        yarn workspace @joystream/types build
-        yarn workspace @joystream/metadata-protobuf build
-        yarn workspace @joystream/cli checks --quiet
-    - name: yarn pack test
-      run: |
-        yarn workspace @joystream/cli pack --filename cli-pack-test.tgz
-        tar zxvf ./cli/cli-pack-test.tgz -C cli
-        cd ./cli/package && yarn link
+      - uses: actions/checkout@v1
+      - name: Use Node.js ${{ matrix.node-version }}
+        uses: actions/setup-node@v1
+        with:
+          node-version: ${{ matrix.node-version }}
+      - name: checks
+        run: |
+          yarn install --frozen-lockfile
+          yarn workspace @joystream/types build
+          yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli checks --quiet
+      - name: yarn pack test
+        run: |
+          yarn workspace @joystream/cli pack --filename cli-pack-test.tgz
+          tar zxvf ./cli/cli-pack-test.tgz -C cli
+          cd ./cli/package && yarn link
 
 
   cli_build_osx:
   cli_build_osx:
     name: MacOS Checks
     name: MacOS Checks
@@ -33,19 +33,19 @@ jobs:
       matrix:
       matrix:
         node-version: [14.x]
         node-version: [14.x]
     steps:
     steps:
-    - uses: actions/checkout@v1
-    - name: Use Node.js ${{ matrix.node-version }}
-      uses: actions/setup-node@v1
-      with:
-        node-version: ${{ matrix.node-version }}
-    - name: checks
-      run: |
-        yarn install --frozen-lockfile --network-timeout 120000
-        yarn workspace @joystream/types build
-        yarn workspace @joystream/metadata-protobuf build
-        yarn workspace @joystream/cli checks --quiet
-    - name: yarn pack test
-      run: |
-        yarn workspace @joystream/cli pack --filename cli-pack-test.tgz
-        tar zxvf ./cli/cli-pack-test.tgz -C cli
-        cd ./cli/package && yarn link
+      - uses: actions/checkout@v1
+      - name: Use Node.js ${{ matrix.node-version }}
+        uses: actions/setup-node@v1
+        with:
+          node-version: ${{ matrix.node-version }}
+      - name: checks
+        run: |
+          yarn install --frozen-lockfile --network-timeout 120000
+          yarn workspace @joystream/types build
+          yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli checks --quiet
+      - name: yarn pack test
+        run: |
+          yarn workspace @joystream/cli pack --filename cli-pack-test.tgz
+          tar zxvf ./cli/cli-pack-test.tgz -C cli
+          cd ./cli/package && yarn link

+ 13 - 4
.github/workflows/joystream-node-docker.yml

@@ -71,7 +71,7 @@ jobs:
             platform_tag: 'arm'
             platform_tag: 'arm'
             file: 'joystream-node-armv7.Dockerfile'
             file: 'joystream-node-armv7.Dockerfile'
     env:
     env:
-      STACK_NAME: joystream-ga-docker-${{ github.run_number }}-${{ matrix.platform_tag }}
+      STACK_NAME: build-joystream-node-docker-ga-${{ github.run_number }}-${{ matrix.platform_tag }}
     steps:
     steps:
       - name: Extract branch name
       - name: Extract branch name
         shell: bash
         shell: bash
@@ -120,7 +120,7 @@ jobs:
         id: deploy_stack
         id: deploy_stack
         with:
         with:
           name: ${{ env.STACK_NAME }}
           name: ${{ env.STACK_NAME }}
-          template: devops/infrastructure/single-instance-docker.yml
+          template: devops/aws/cloudformation/single-instance-docker.yml
           no-fail-on-empty-changeset: '1'
           no-fail-on-empty-changeset: '1'
           parameter-overrides: 'KeyName=${{ env.KEY_NAME }},EC2AMI=ami-00d1ab6b335f217cf,EC2InstanceType=t4g.xlarge'
           parameter-overrides: 'KeyName=${{ env.KEY_NAME }},EC2AMI=ami-00d1ab6b335f217cf,EC2InstanceType=t4g.xlarge'
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
@@ -129,7 +129,7 @@ jobs:
         uses: dawidd6/action-ansible-playbook@v2
         uses: dawidd6/action-ansible-playbook@v2
         with:
         with:
           playbook: build-arm64-playbook.yml
           playbook: build-arm64-playbook.yml
-          directory: devops/infrastructure
+          directory: devops/aws
           requirements: requirements.yml
           requirements: requirements.yml
           key: ${{ secrets.SSH_PRIVATE_KEY }}
           key: ${{ secrets.SSH_PRIVATE_KEY }}
           inventory: |
           inventory: |
@@ -142,9 +142,18 @@ jobs:
                           docker_password=${{ secrets.DOCKERHUB_PASSWORD }} \
                           docker_password=${{ secrets.DOCKERHUB_PASSWORD }} \
                           tag_name=${{ steps.compute_shasum.outputs.shasum }}-${{ matrix.platform_tag }} \
                           tag_name=${{ steps.compute_shasum.outputs.shasum }}-${{ matrix.platform_tag }} \
                           repository=${{ env.REPOSITORY }} dockerfile=${{ matrix.file }} \
                           repository=${{ env.REPOSITORY }} dockerfile=${{ matrix.file }} \
-                          stack_name=${{ env.STACK_NAME }} platform=${{ matrix.platform }}"
+                          platform=${{ matrix.platform }}"
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
 
 
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}
+
   push-manifest:
   push-manifest:
     name: Create manifest using both the arch images
     name: Create manifest using both the arch images
     needs: [push-amd64, push-arm]
     needs: [push-amd64, push-arm]

+ 4 - 0
.github/workflows/network-tests.yml

@@ -18,6 +18,8 @@ jobs:
       run: |
       run: |
         yarn install --frozen-lockfile
         yarn install --frozen-lockfile
         yarn workspace @joystream/types build
         yarn workspace @joystream/types build
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace @joystream/cli build
         yarn workspace network-tests checks --quiet
         yarn workspace network-tests checks --quiet
 
 
   network_build_osx:
   network_build_osx:
@@ -36,4 +38,6 @@ jobs:
       run: |
       run: |
         yarn install --frozen-lockfile --network-timeout 120000
         yarn install --frozen-lockfile --network-timeout 120000
         yarn workspace @joystream/types build
         yarn workspace @joystream/types build
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace @joystream/cli build
         yarn workspace network-tests checks --quiet
         yarn workspace network-tests checks --quiet

+ 16 - 48
.github/workflows/run-network-tests.yml

@@ -100,10 +100,19 @@ jobs:
         run: |
         run: |
           yarn install --frozen-lockfile
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
           yarn workspace @joystream/types build
+          yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli build
       - name: Ensure tests are runnable
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
         run: yarn workspace network-tests build
+      - name: Install joystream-cli @joystream/cli/0.5.1
+        run: npm -g install @joystream/cli
       - name: Execute network tests
       - name: Execute network tests
-        run: RUNTIME=sumer tests/network-tests/run-tests.sh full
+        run: |
+          export HOME=${PWD}
+          mkdir -p ${HOME}/.local/share/joystream-cli
+          joystream-cli api:setUri ws://localhost:9944
+          export RUNTIME=sumer
+          tests/network-tests/run-migration-tests.sh
 
 
   basic_runtime:
   basic_runtime:
     name: Integration Tests (New Chain)
     name: Integration Tests (New Chain)
@@ -126,13 +135,15 @@ jobs:
         run: |
         run: |
           yarn install --frozen-lockfile
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
           yarn workspace @joystream/types build
+          yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli build
       - name: Ensure tests are runnable
       - name: Ensure tests are runnable
         run: yarn workspace network-tests build
         run: yarn workspace network-tests build
       - name: Execute network tests
       - name: Execute network tests
         run: tests/network-tests/run-tests.sh full
         run: tests/network-tests/run-tests.sh full
 
 
-  query_node:
-    name: Query Node Integration Tests
+  new_chain_setup:
+    name: Initialize new chain
     needs: build_images
     needs: build_images
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
@@ -153,6 +164,7 @@ jobs:
           yarn install --frozen-lockfile
           yarn install --frozen-lockfile
           yarn workspace @joystream/types build
           yarn workspace @joystream/types build
           yarn workspace @joystream/metadata-protobuf build
           yarn workspace @joystream/metadata-protobuf build
+          yarn workspace @joystream/cli build
       - name: Ensure query-node builds
       - name: Ensure query-node builds
         run: yarn workspace query-node-root build
         run: yarn workspace query-node-root build
       - name: Ensure tests are runnable
       - name: Ensure tests are runnable
@@ -160,49 +172,5 @@ jobs:
       # Bring up hydra query-node development instance, then run content directory
       # Bring up hydra query-node development instance, then run content directory
       # integration tests
       # integration tests
       - name: Execute Tests
       - name: Execute Tests
-        run: |
-          docker-compose up -d joystream-node
-          query-node/run-tests.sh
+        run: tests/network-tests/test-setup-new-chain.sh
 
 
-  storage_node:
-    name: Storage Node Tests
-    needs: build_images
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v1
-      - uses: actions/setup-node@v1
-        with:
-          node-version: '14.x'
-      - name: Get artifacts
-        uses: actions/download-artifact@v2
-        with:
-          name: ${{ needs.build_images.outputs.use_artifact }}
-      - name: Install artifacts
-        run: |
-          docker load --input joystream-node-docker-image.tar.gz
-          docker images
-      - name: Install packages and dependencies
-        run: |
-          yarn install --frozen-lockfile
-          yarn workspace @joystream/types build
-      - name: Build storage node
-        run: yarn workspace storage-node build
-      - name: Start Services
-        run: |
-          docker-compose up -d ipfs
-          docker-compose up -d joystream-node
-      - name: Configure and start development storage node
-        run: |
-          DEBUG=joystream:* yarn storage-cli dev-init
-          docker-compose up -d colossus
-      - name: Test uploading
-        run: |
-          sleep 6
-          export DEBUG=joystream:*
-          yarn storage-cli upload ./tests/network-tests/assets/joystream.MOV 1 0
-          # Wait for storage-node to set status Accepted on uploaded content
-          sleep 6
-          cd utils/api-scripts/
-          # Assume only one accepted data object was created
-          CONTENT_ID=`yarn --silent script get-first-content-id | tail -n2 | head -n1`
-          yarn storage-cli download ${CONTENT_ID} ./joystream.mov

+ 4 - 2
.github/workflows/storage-node.yml

@@ -18,7 +18,8 @@ jobs:
       run: |
       run: |
         yarn install --frozen-lockfile
         yarn install --frozen-lockfile
         yarn workspace @joystream/types build
         yarn workspace @joystream/types build
-        yarn workspace storage-node checks --quiet
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace storage-node lint --quiet
         yarn workspace storage-node build
         yarn workspace storage-node build
 
 
   storage_node_build_osx:
   storage_node_build_osx:
@@ -37,5 +38,6 @@ jobs:
       run: |
       run: |
         yarn install --frozen-lockfile --network-timeout 120000
         yarn install --frozen-lockfile --network-timeout 120000
         yarn workspace @joystream/types build
         yarn workspace @joystream/types build
-        yarn workspace storage-node checks --quiet
+        yarn workspace @joystream/metadata-protobuf build
+        yarn workspace storage-node lint --quiet
         yarn workspace storage-node build
         yarn workspace storage-node build

+ 2 - 2
Cargo.lock

@@ -2332,7 +2332,7 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "joystream-node"
 name = "joystream-node"
-version = "5.10.0"
+version = "5.12.0"
 dependencies = [
 dependencies = [
  "frame-benchmarking",
  "frame-benchmarking",
  "frame-benchmarking-cli",
  "frame-benchmarking-cli",
@@ -2393,7 +2393,7 @@ dependencies = [
 
 
 [[package]]
 [[package]]
 name = "joystream-node-runtime"
 name = "joystream-node-runtime"
-version = "9.10.0"
+version = "9.12.0"
 dependencies = [
 dependencies = [
  "frame-benchmarking",
  "frame-benchmarking",
  "frame-executive",
  "frame-executive",

+ 6 - 3
README.md

@@ -14,7 +14,7 @@ The following tools are required for building, testing and contributing to this
 - [Rust](https://www.rust-lang.org/tools/install) toolchain - _required_
 - [Rust](https://www.rust-lang.org/tools/install) toolchain - _required_
 - [nodejs](https://nodejs.org/) v14.x - _required_
 - [nodejs](https://nodejs.org/) v14.x - _required_
 - [yarn classic](https://classic.yarnpkg.com/en/docs/install) package manager v1.22.x- _required_
 - [yarn classic](https://classic.yarnpkg.com/en/docs/install) package manager v1.22.x- _required_
-- [docker](https://www.docker.com/get-started) and docker-compose - _optional_
+- [docker](https://www.docker.com/get-started) and docker-compose - _required_
 - [ansible](https://www.ansible.com/) - _optional_
 - [ansible](https://www.ansible.com/) - _optional_
 
 
 If you use VSCode as your code editor we recommend using the workspace [settings](devops/vscode/settings.json) for recommend eslint plugin to function properly.
 If you use VSCode as your code editor we recommend using the workspace [settings](devops/vscode/settings.json) for recommend eslint plugin to function properly.
@@ -25,8 +25,11 @@ After cloning the repo run the following initialization scripts:
 # Install rust toolchain
 # Install rust toolchain
 ./setup.sh
 ./setup.sh
 
 
-# Install npm package dependencies, build packages and docker images
-yarn build
+# build local npm packages
+yarn build:packages
+
+# Build joystream/node docker image
+yarn build:node:docker
 
 
 # start a local development network
 # start a local development network
 yarn start
 yarn start

+ 0 - 37
build-docker-images.sh

@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-if ! command -v docker-compose &> /dev/null
-then
-  echo "docker-compose not found. Skipping docker image builds."
-  exit 0
-fi
-
-# Build or fetch cached joystream/node docker image
-if [[ "$SKIP_JOYSTREAM_NODE" = 1 || "$SKIP_JOYSTREAM_NODE" = "true" ]]; then
-  echo "Skipping build of joystream/node docker image."
-else
-  # Fetch a cached joystream/node image if one is found matching code shasum instead of building
-  CODE_HASH=`scripts/runtime-code-shasum.sh`
-  IMAGE=joystream/node:${CODE_HASH}
-  echo "Trying to fetch cached ${IMAGE} image"
-  docker pull ${IMAGE} || :
-
-  if ! docker inspect ${IMAGE} > /dev/null;
-  then
-    echo "Fetch failed, building image locally"
-    docker-compose build joystream-node
-  else
-    echo "Tagging cached image as 'latest'"
-    docker image tag ${IMAGE} joystream/node:latest
-  fi
-fi
-
-# Build joystream/apps docker image
-echo "Building 'joystream/apps' docker image..."
-docker-compose build colossus
-
-# Build the pioneer docker image
-echo "Building pioneer docker image"
-docker-compose build pioneer

+ 24 - 0
build-node-docker.sh

@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+
+if ! command -v docker-compose &> /dev/null
+then
+  echo "docker-compose not found. Skipping docker image builds."
+  exit 0
+fi
+
+# Fetch a cached joystream/node image if one is found matching code shasum instead of building
+CODE_HASH=`scripts/runtime-code-shasum.sh`
+IMAGE=joystream/node:${CODE_HASH}
+echo "Trying to fetch cached ${IMAGE} image"
+docker pull ${IMAGE} || :
+
+if ! docker inspect ${IMAGE} > /dev/null;
+then
+  echo "Fetch failed, building image locally"
+  docker-compose build joystream-node
+else
+  echo "Tagging cached image as 'latest'"
+  docker image tag ${IMAGE} joystream/node:latest
+fi

+ 2 - 2
build-npm-packages.sh → build-packages.sh

@@ -2,11 +2,11 @@
 
 
 set -e
 set -e
 
 
-yarn
+yarn --frozen-lockfile
 yarn workspace @joystream/types build
 yarn workspace @joystream/types build
 yarn workspace @joystream/metadata-protobuf build
 yarn workspace @joystream/metadata-protobuf build
 yarn workspace query-node-root build
 yarn workspace query-node-root build
 yarn workspace @joystream/cli build
 yarn workspace @joystream/cli build
 yarn workspace storage-node build
 yarn workspace storage-node build
-yarn workspace storage-node build
+yarn workspace @joystream/distributor-cli build
 yarn workspace pioneer build
 yarn workspace pioneer build

File diff suppressed because it is too large
+ 0 - 0
chain-metadata.json


+ 1 - 1
cli/package.json

@@ -44,7 +44,7 @@
     "proper-lockfile": "^4.1.1",
     "proper-lockfile": "^4.1.1",
     "slug": "^2.1.1",
     "slug": "^2.1.1",
     "tslib": "^1.11.1",
     "tslib": "^1.11.1",
-    "blake3": "^2.1.4",
+    "blake3-wasm": "^2.1.5",
     "multihashes": "^4.0.3",
     "multihashes": "^4.0.3",
     "@apollo/client": "^3.2.5",
     "@apollo/client": "^3.2.5",
     "cross-fetch": "^3.0.6",
     "cross-fetch": "^3.0.6",

+ 1 - 1
cli/src/base/UploadCommandBase.ts

@@ -20,7 +20,7 @@ import mimeTypes from 'mime-types'
 import { Assets } from '../schemas/typings/Assets.schema'
 import { Assets } from '../schemas/typings/Assets.schema'
 import chalk from 'chalk'
 import chalk from 'chalk'
 import { DataObjectCreationParameters } from '@joystream/types/storage'
 import { DataObjectCreationParameters } from '@joystream/types/storage'
-import { createHash } from 'blake3'
+import { createHash } from 'blake3-wasm'
 import * as multihash from 'multihashes'
 import * as multihash from 'multihashes'
 import { u8aToHex, formatBalance } from '@polkadot/util'
 import { u8aToHex, formatBalance } from '@polkadot/util'
 import { KeyringPair } from '@polkadot/keyring/types'
 import { KeyringPair } from '@polkadot/keyring/types'

+ 7 - 1
colossus.Dockerfile

@@ -32,4 +32,10 @@ ENV ACCOUNT_URI=
 EXPOSE ${COLOSSUS_PORT}
 EXPOSE ${COLOSSUS_PORT}
 
 
 WORKDIR /joystream/storage-node
 WORKDIR /joystream/storage-node
-ENTRYPOINT yarn storage-node server --queryNodeEndpoint ${QUERY_NODE_ENDPOINT} --port ${COLOSSUS_PORT} --uploads /data --worker ${WORKER_ID} --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=${SYNC_INTERVAL} --keyFile=${ACCOUNT_KEYFILE} --elasticSearchEndpoint=${ELASTIC_SEARCH_ENDPOINT} --logFileName=/logs/log.txt
+ENTRYPOINT yarn storage-node server --queryNodeEndpoint ${QUERY_NODE_ENDPOINT} \
+    --port ${COLOSSUS_PORT} --uploads /data  \
+    --apiUrl ${WS_PROVIDER_ENDPOINT_URI} --sync --syncInterval=${SYNC_INTERVAL} \
+    --elasticSearchEndpoint=${ELASTIC_SEARCH_ENDPOINT} \
+    --accountUri=${ACCOUNT_URI} \
+    --worker ${WORKER_ID} \
+    --logFilePath=/logs

+ 0 - 0
devops/infrastructure/.gitignore → devops/aws/.gitignore


+ 8 - 2
devops/infrastructure/README.md → devops/aws/README.md

@@ -26,10 +26,16 @@ On Mac run the command:
 Follow [the official installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for your system.
 Follow [the official installation guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for your system.
 
 
 # How to run
 # How to run
-Copy and edit the file `deploy-config.sample.cfg` and update parameters like AWS_KEY_PAIR_NAME, KEY_PATH
+Copy and edit the file `deploy-infra.sample.cfg` and update parameters like AWS_KEY_PAIR_NAME, KEY_PATH
 Run the `deploy-infra.sh` script to deploy the infrastructure
 Run the `deploy-infra.sh` script to deploy the infrastructure
 
 
 ```
 ```
-cd devops/infrastructure
+cd devops/aws
 ./deploy-infra.sh your-deploy-config.cfg
 ./deploy-infra.sh your-deploy-config.cfg
 ```
 ```
+
+# To tear down a network
+
+```
+./destroy-infra.sh your-deploy-config.cfg
+```

+ 0 - 0
devops/infrastructure/ansible.cfg → devops/aws/ansible.cfg


+ 45 - 0
devops/aws/build-arm64-playbook.yml

@@ -0,0 +1,45 @@
+---
+# Setup joystream code, build docker image
+
+- name: Build image and push to docker hub
+  hosts: all
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Install Docker Module for Python
+      pip:
+        name: docker
+
+    - name: Log into DockerHub
+      community.docker.docker_login:
+        username: '{{ docker_username }}'
+        password: '{{ docker_password }}'
+
+    - name: Build an image and push it to a private repo
+      community.docker.docker_image:
+        build:
+          path: ./joystream
+          dockerfile: '{{ dockerfile }}'
+          platform: '{{ platform }}'
+        name: '{{ repository }}'
+        tag: '{{ tag_name }}'
+        push: yes
+        source: build
+      # Run in async fashion for max duration of 2 hours
+      async: 7200
+      poll: 0
+      register: build_result
+
+    - name: Check on build async task
+      async_status:
+        jid: '{{ build_result.ansible_job_id }}'
+      register: job_result
+      until: job_result.finished
+      # Max number of times to check for status
+      retries: 72
+      # Check for the status every 100s
+      delay: 100

+ 0 - 0
devops/infrastructure/build-code.yml → devops/aws/build-code.yml


+ 0 - 0
devops/infrastructure/chain-spec-pioneer.yml → devops/aws/chain-spec-pioneer.yml


+ 13 - 0
devops/infrastructure/infrastructure.yml → devops/aws/cloudformation/infrastructure.yml

@@ -1,3 +1,9 @@
+# Deploy inftrastructure required to run a new joystream chain.
+# This is comprised of:
+#   - N validators
+#   - One RPC node
+#   - s3 bucket with a build of Pionner
+
 AWSTemplateFormatVersion: 2010-09-09
 AWSTemplateFormatVersion: 2010-09-09
 
 
 Parameters:
 Parameters:
@@ -73,6 +79,10 @@ Resources:
           FromPort: 443
           FromPort: 443
           ToPort: 443
           ToPort: 443
           CidrIp: 0.0.0.0/0
           CidrIp: 0.0.0.0/0
+        - IpProtocol: tcp
+          FromPort: 80
+          ToPort: 80
+          CidrIp: 0.0.0.0/0
         - IpProtocol: tcp
         - IpProtocol: tcp
           FromPort: 22
           FromPort: 22
           ToPort: 22
           ToPort: 22
@@ -112,6 +122,9 @@ Resources:
             # Update all packages
             # Update all packages
             apt-get update -y
             apt-get update -y
 
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             # Install the updates
             apt-get upgrade -y
             apt-get upgrade -y
 
 

+ 20 - 1
devops/infrastructure/single-instance-docker.yml → devops/aws/cloudformation/single-instance-docker.yml

@@ -1,3 +1,6 @@
+# Deploys and EC2 node with docker tools suitable for
+# building joystream node docker images
+
 AWSTemplateFormatVersion: 2010-09-09
 AWSTemplateFormatVersion: 2010-09-09
 
 
 Parameters:
 Parameters:
@@ -23,6 +26,14 @@ Resources:
           FromPort: 22
           FromPort: 22
           ToPort: 22
           ToPort: 22
           CidrIp: 0.0.0.0/0
           CidrIp: 0.0.0.0/0
+        - IpProtocol: tcp
+          FromPort: 443
+          ToPort: 443
+          CidrIp: 0.0.0.0/0
+        - IpProtocol: tcp
+          FromPort: 80
+          ToPort: 80
+          CidrIp: 0.0.0.0/0
       Tags:
       Tags:
         - Key: Name
         - Key: Name
           Value: !Sub '${AWS::StackName}_validator'
           Value: !Sub '${AWS::StackName}_validator'
@@ -58,6 +69,9 @@ Resources:
             # Update all packages
             # Update all packages
             apt-get update -y
             apt-get update -y
 
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             # Install the updates
             apt-get upgrade -y
             apt-get upgrade -y
 
 
@@ -65,7 +79,7 @@ Resources:
 
 
             curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
             curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
 
 
-            echo "deb [arch=arm64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+            echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
 
 
             apt-get update -y
             apt-get update -y
 
 
@@ -73,6 +87,11 @@ Resources:
 
 
             usermod -aG docker ubuntu
             usermod -aG docker ubuntu
 
 
+            # Update docker-compose to 1.28+
+            curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+            chmod +x /usr/local/bin/docker-compose
+            ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
+
             # Get latest cfn scripts and install them;
             # Get latest cfn scripts and install them;
             apt-get install -y python3-setuptools
             apt-get install -y python3-setuptools
             mkdir -p /opt/aws/bin
             mkdir -p /opt/aws/bin

+ 3 - 0
devops/infrastructure/single-instance.yml → devops/aws/cloudformation/single-instance.yml

@@ -59,6 +59,9 @@ Resources:
             # Update all packages
             # Update all packages
             apt-get update -y
             apt-get update -y
 
 
+            # Prevent interactive prompts that would interrupt the installation
+            export DEBIAN_FRONTEND=noninteractive
+
             # Install the updates
             # Install the updates
             apt-get upgrade -y
             apt-get upgrade -y
 
 

+ 0 - 0
devops/infrastructure/common.sh → devops/aws/common.sh


+ 39 - 0
devops/aws/create-joystream-node-ami-playbook.yml

@@ -0,0 +1,39 @@
+---
+# Setup joystream code, build and Create AMI
+
+- name: Setup instance
+  hosts: all
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Run setup and build
+      include_role:
+        name: common
+        tasks_from: run-setup-build
+
+    - name: Install subkey
+      include_role:
+        name: admin
+        tasks_from: main
+
+    - name: Basic AMI Creation
+      amazon.aws.ec2_ami:
+        instance_id: '{{ instance_id }}'
+        wait: yes
+        # How long before wait gives up, in seconds
+        wait_timeout: 3600
+        name: '{{ ami_name }}'
+        launch_permissions:
+          group_names: ['all']
+        tags:
+          Name: '{{ ami_name }}'
+      register: ami_data
+      delegate_to: localhost
+
+    - name: Print AMI ID
+      debug:
+        msg: 'AMI ID is: {{ ami_data.image_id }}'

+ 5 - 9
devops/infrastructure/deploy-config.sample.cfg → devops/aws/deploy-infra.sample.cfg

@@ -1,6 +1,6 @@
 #### PARAMETERS USED BY AWS
 #### PARAMETERS USED BY AWS
 
 
-STACK_NAME=joystream-node
+STACK_NAME=joystream-network
 REGION=us-east-1
 REGION=us-east-1
 CLI_PROFILE=joystream-user
 CLI_PROFILE=joystream-user
 KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
 KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
@@ -23,19 +23,15 @@ INVENTORY_PATH="$DATA_PATH/inventory"
 
 
 NUMBER_OF_VALIDATORS=2
 NUMBER_OF_VALIDATORS=2
 
 
-## Used for Deploying a new node
-DATE_TIME=$(date +"%d-%b-%Y-%H-%M-%S")
-
-SINGLE_NODE_STACK_NAME="new-node-$DATE_TIME"
-
-BINARY_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joystream-node-5.1.0-9d9e77751-x86_64-linux-gnu.tar.gz"
-CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joy-testnet-5.json"
-
 #### PARAMETERS USED BY ANSIBLE
 #### PARAMETERS USED BY ANSIBLE
 
 
 LOCAL_CODE_PATH="~/Joystream/joystream"
 LOCAL_CODE_PATH="~/Joystream/joystream"
 NETWORK_SUFFIX=7891
 NETWORK_SUFFIX=7891
 
 
+DEPLOYMENT_TYPE=live
+INITIAL_MEMBERS_PATH=""
+INITIAL_BALANCES_PATH=""
+
 GIT_REPO="https://github.com/Joystream/joystream.git"
 GIT_REPO="https://github.com/Joystream/joystream.git"
 BRANCH_NAME=sumer
 BRANCH_NAME=sumer
 
 

+ 3 - 2
devops/infrastructure/deploy-infra.sh → devops/aws/deploy-infra.sh

@@ -29,7 +29,7 @@ aws cloudformation deploy \
   --region $REGION \
   --region $REGION \
   --profile $CLI_PROFILE \
   --profile $CLI_PROFILE \
   --stack-name $NEW_STACK_NAME \
   --stack-name $NEW_STACK_NAME \
-  --template-file infrastructure.yml \
+  --template-file cloudformation/infrastructure.yml \
   --no-fail-on-empty-changeset \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
   --parameter-overrides \
@@ -84,7 +84,8 @@ if [ $? -eq 0 ]; then
   echo -e "\n\n=========== Configure and start new validators, rpc node and pioneer ==========="
   echo -e "\n\n=========== Configure and start new validators, rpc node and pioneer ==========="
   ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH chain-spec-pioneer.yml \
   ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH chain-spec-pioneer.yml \
     --extra-vars "local_dir=$LOCAL_CODE_PATH network_suffix=$NETWORK_SUFFIX
     --extra-vars "local_dir=$LOCAL_CODE_PATH network_suffix=$NETWORK_SUFFIX
-                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS"
+                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS
+                  deployment_type=$DEPLOYMENT_TYPE initial_balances_file=$INITIAL_BALANCES_PATH initial_members_file=$INITIAL_MEMBERS_PATH"
 
 
   echo -e "\n\n Pioneer URL: https://$DOMAIN_NAME"
   echo -e "\n\n Pioneer URL: https://$DOMAIN_NAME"
 fi
 fi

+ 108 - 0
devops/aws/deploy-playground-playbook.yml

@@ -0,0 +1,108 @@
+---
+# Run the docker-compose setup on a new EC2 instance
+
+- name: Setup EC2 instance and start docker-compose services
+  hosts: all
+  gather_facts: yes
+
+  tasks:
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
+
+    - name: Creat bash profile file
+      command: 'touch /home/ubuntu/.bash_profile'
+
+    - name: Run setup script
+      command: ./setup.sh
+      args:
+        chdir: '{{ remote_code_path }}'
+
+    - name: Copy bash_profile content
+      shell: cat ~/.bash_profile
+      register: bash_data
+
+    - name: Copy bash_profile content to bashrc for non-interactive sessions
+      blockinfile:
+        block: '{{ bash_data.stdout }}'
+        path: ~/.bashrc
+        insertbefore: BOF
+
+    - name: Make sure docker is running
+      command: systemctl start docker
+      become: yes
+
+    - name: Build packages
+      command: yarn build:packages
+      args:
+        chdir: '{{ remote_code_path }}'
+      async: 3600
+      poll: 0
+      register: build_result
+
+    - name: Check on build async task
+      async_status:
+        jid: '{{ build_result.ansible_job_id }}'
+      register: job_result
+      until: job_result.finished
+      # Max number of times to check for status
+      retries: 36
+      # Check for the status every 100s
+      delay: 100
+
+    - name: Build Node image
+      command: yarn build:node:docker
+      args:
+        chdir: '{{ remote_code_path }}'
+
+    - name: Run docker-compose
+      command: yarn start
+      args:
+        chdir: '{{ remote_code_path }}'
+      environment:
+        PERSIST: 'true'
+        COLOSSUS_1_NODE_URI: 'https://{{ inventory_hostname }}.nip.io/colossus-1/'
+        DISTRIBUTOR_1_NODE_URI: 'https://{{ inventory_hostname }}.nip.io/distributor-1/'
+      async: 1800
+      poll: 0
+      register: compose_result
+
+    - name: Check on yarn start task
+      async_status:
+        jid: '{{ compose_result.ansible_job_id }}'
+      register: job_result
+      until: job_result.finished
+      # Max number of times to check for status
+      retries: 18
+      # Check for the status every 100s
+      delay: 100
+
+    - name: Set nip.io domain with IP
+      set_fact:
+        nip_domain: '{{ inventory_hostname }}.nip.io'
+      run_once: yes
+
+    - name: Install and configure Caddy
+      include_role:
+        name: caddy_ansible.caddy_ansible
+        apply:
+          become: yes
+      vars:
+        caddy_config: "{{ lookup('template', 'templates/Playground-Caddyfile.j2') }}"
+        caddy_systemd_capabilities_enabled: true
+        caddy_update: false
+
+    - name: Print endpoints
+      debug:
+        msg:
+          - 'The services should now be accesible at:'
+          - 'Pioneer: https://{{ nip_domain }}/pioneer/'
+          - 'WebSocket RPC: wss://{{ nip_domain }}/ws-rpc'
+          - 'HTTP RPC: https://{{ nip_domain }}/http-rpc'
+          - 'Colossus: https://{{ nip_domain }}/colossus-1'
+          - 'Distributor: https://{{ nip_domain }}/distributor-1'
+          - 'GraphQL server: https://{{ nip_domain }}/query-node/server/graphql'
+          - 'Indexer: https://{{ nip_domain }}/query-node/indexer/graphql'
+          - 'Member Faucet: https://{{ nip_domain }}/member-faucet/register'
+          - 'Orion: https://{{ nip_domain }}/orion/graphql'

+ 46 - 0
devops/aws/deploy-playground.sh

@@ -0,0 +1,46 @@
+#!/bin/bash
+
+set -e
+
+source common.sh
+
+if [ -z "$1" ]; then
+  echo "ERROR: Configuration file not passed"
+  echo "Please use ./deploy-playground.sh PATH/TO/CONFIG to run this script"
+  exit 1
+else
+  echo "Using $1 file for config"
+  source $1
+fi
+
+if [ ! -f "$KEY_PATH" ]; then
+    echo "Key file not found at $KEY_PATH"
+    exit 1
+fi
+
+# Deploy the CloudFormation template
+echo -e "\n\n=========== Deploying single node ==========="
+aws cloudformation deploy \
+  --region $REGION \
+  --profile $CLI_PROFILE \
+  --stack-name $SINGLE_NODE_STACK_NAME \
+  --template-file cloudformation/single-instance-docker.yml \
+  --no-fail-on-empty-changeset \
+  --capabilities CAPABILITY_NAMED_IAM \
+  --parameter-overrides \
+    EC2InstanceType=$DEFAULT_EC2_INSTANCE_TYPE \
+    KeyName=$AWS_KEY_PAIR_NAME
+
+# If the deploy succeeded, get the IP and configure the created instance
+if [ $? -eq 0 ]; then
+  # Install additional Ansible roles from requirements
+  ansible-galaxy install -r requirements.yml
+
+  SERVER_IP=$(get_aws_export $SINGLE_NODE_STACK_NAME "PublicIp")
+
+  echo -e "New Node Public IP: $SERVER_IP"
+
+  echo -e "\n\n=========== Configuring node ==========="
+  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH deploy-playground-playbook.yml \
+    --extra-vars "branch_name=$BRANCH_NAME git_repo=$GIT_REPO"
+fi

+ 0 - 0
devops/infrastructure/single-node-playbook.yml → devops/aws/deploy-single-node-playbook.yml


+ 19 - 0
devops/aws/deploy-single-node.sample.cfg

@@ -0,0 +1,19 @@
+#### PARAMETERS USED BY AWS
+
+REGION=us-east-1
+CLI_PROFILE=joystream-user
+KEY_PATH="/Users/joystream/Joystream/joystream-key.pem"
+AWS_KEY_PAIR_NAME="joystream-key"
+
+DEFAULT_EC2_INSTANCE_TYPE=t2.micro
+
+## Used for Deploying a new node
+DATE_TIME=$(date +"%d-%b-%Y-%H-%M-%S")
+
+SINGLE_NODE_STACK_NAME="joystream-node-$DATE_TIME"
+
+BINARY_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joystream-node-5.1.0-9d9e77751-x86_64-linux-gnu.tar.gz"
+CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0/joy-testnet-5.json"
+
+GIT_REPO="https://github.com/Joystream/joystream.git"
+BRANCH_NAME="master"

+ 3 - 8
devops/infrastructure/deploy-single-node.sh → devops/aws/deploy-single-node.sh

@@ -13,23 +13,18 @@ else
   source $1
   source $1
 fi
 fi
 
 
-if [ $ACCOUNT_ID == None ]; then
-    echo "Couldn't find Account ID, please check if AWS Profile $CLI_PROFILE is set"
-    exit 1
-fi
-
 if [ ! -f "$KEY_PATH" ]; then
 if [ ! -f "$KEY_PATH" ]; then
     echo "Key file not found at $KEY_PATH"
     echo "Key file not found at $KEY_PATH"
     exit 1
     exit 1
 fi
 fi
 
 
-# # Deploy the CloudFormation template
+# Deploy the CloudFormation template
 echo -e "\n\n=========== Deploying single node ==========="
 echo -e "\n\n=========== Deploying single node ==========="
 aws cloudformation deploy \
 aws cloudformation deploy \
   --region $REGION \
   --region $REGION \
   --profile $CLI_PROFILE \
   --profile $CLI_PROFILE \
   --stack-name $SINGLE_NODE_STACK_NAME \
   --stack-name $SINGLE_NODE_STACK_NAME \
-  --template-file single-instance.yml \
+  --template-file cloudformation/single-instance.yml \
   --no-fail-on-empty-changeset \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
   --parameter-overrides \
@@ -46,6 +41,6 @@ if [ $? -eq 0 ]; then
   echo -e "New Node Public IP: $SERVER_IP"
   echo -e "New Node Public IP: $SERVER_IP"
 
 
   echo -e "\n\n=========== Configuring node ==========="
   echo -e "\n\n=========== Configuring node ==========="
-  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH single-node-playbook.yml \
+  ansible-playbook -i $SERVER_IP, --private-key $KEY_PATH deploy-single-node-playbook.yml \
     --extra-vars "binary_file=$BINARY_FILE chain_spec_file=$CHAIN_SPEC_FILE"
     --extra-vars "binary_file=$BINARY_FILE chain_spec_file=$CHAIN_SPEC_FILE"
 fi
 fi

+ 1 - 1
devops/infrastructure/delete-stack.sh → devops/aws/destroy-infra.sh

@@ -6,7 +6,7 @@ source common.sh
 
 
 if [ -z "$1" ]; then
 if [ -z "$1" ]; then
   echo "ERROR: Configuration file not passed"
   echo "ERROR: Configuration file not passed"
-  echo "Please use ./delete-stack.sh PATH/TO/CONFIG to run this script"
+  echo "Please use ./destroy-infra.sh PATH/TO/CONFIG to run this script"
   exit 1
   exit 1
 else
 else
   echo "Using $1 file for config"
   echo "Using $1 file for config"

+ 0 - 0
devops/infrastructure/group_vars/all → devops/aws/group_vars/all


+ 0 - 0
devops/infrastructure/library/json_modify.py → devops/aws/library/json_modify.py


+ 0 - 0
devops/infrastructure/requirements.yml → devops/aws/requirements.yml


+ 0 - 0
devops/infrastructure/roles/admin/tasks/deploy-pioneer.yml → devops/aws/roles/admin/tasks/deploy-pioneer.yml


+ 3 - 0
devops/infrastructure/roles/admin/tasks/main.yml → devops/aws/roles/admin/tasks/main.yml

@@ -16,6 +16,7 @@
 
 
 - name: Install subkey
 - name: Install subkey
   shell: cargo install --force subkey --git https://github.com/paritytech/substrate --version 2.0.1 --locked
   shell: cargo install --force subkey --git https://github.com/paritytech/substrate --version 2.0.1 --locked
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   async: 3600
   poll: 0
   poll: 0
   register: install_result
   register: install_result
@@ -25,5 +26,7 @@
     jid: '{{ install_result.ansible_job_id }}'
     jid: '{{ install_result.ansible_job_id }}'
   register: job_result
   register: job_result
   until: job_result.finished
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
   retries: 36
+  # Check for the status every 100s
   delay: 100
   delay: 100

+ 96 - 0
devops/aws/roles/common/tasks/chain-spec-node-keys.yml

@@ -0,0 +1,96 @@
+---
+# Create chain spec files and keys and copy to all the servers
+
+- name: Debug to test variable
+  debug:
+    msg: 'Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}'
+  run_once: true
+
+- name: Copying initial members file to the server
+  copy:
+    src: '{{ initial_members_file }}'
+    dest: '{{ admin_code_dir }}/initial-members.json'
+  when: initial_members_file is defined and initial_members_file|length > 0
+  run_once: true
+
+- name: Copying initial balances file to the server
+  copy:
+    src: '{{ initial_balances_file }}'
+    dest: '{{ admin_code_dir }}/initial-balances.json'
+  when: initial_balances_file is defined and initial_balances_file|length > 0
+  run_once: true
+
+- name: Run chain-spec-builder to generate chainspec.json file (with initial data)
+  shell: >
+    {{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }}
+    --chain-spec-path {{ chain_spec_path }}
+    --endowed 1 --keystore-path {{ data_path }}
+    {% if deployment_type is defined and deployment_type|length > 0 %}--deployment {{ deployment_type }}{% endif %}
+    {% if initial_members_file is defined and initial_members_file|length > 0 %}--initial-balances-path {{ admin_code_dir }}/initial-balances.json{% endif %}
+    {% if initial_balances_file is defined and initial_balances_file|length > 0 %}--initial-members-path {{ admin_code_dir }}/initial-members.json{% endif %}
+  register: chain_spec_output
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Run subkey to generate node keys
+  shell: subkey generate-node-key
+  delegate_to: '{{ local_or_admin }}'
+  register: subkey_output
+
+- name: Print to stdout
+  debug:
+    msg:
+      - 'Public Key: {{ subkey_output.stderr }}'
+      - 'Private Key: {{ subkey_output.stdout }}'
+
+- name: Print to stdout chain spec
+  debug: var=chain_spec_output.stdout
+  run_once: true
+
+- name: Save output of chain spec to local file
+  copy:
+    content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
+    dest: '{{ data_path }}/chain_spec_output.txt'
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Change chain spec name, id, protocolId
+  json_modify:
+    chain_spec_path: '{{ chain_spec_path }}'
+    prefix: '{{ network_suffix }}'
+    all_nodes: '{{ hostvars }}'
+  delegate_to: '{{ local_or_admin }}'
+  register: result
+  run_once: true
+
+- name: Print output of modified chainspec
+  debug:
+    var: result.result
+  run_once: true
+
+- name: Run build-spec to generate raw chainspec file
+  shell: '{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}'
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+
+- name: Copying chain spec files to localhost
+  synchronize:
+    src: '/home/ubuntu/{{ data_path }}/'
+    dest: '{{ data_path }}'
+    mode: pull
+  run_once: true
+  when: run_on_admin_server|bool
+
+- name: Copy joystream-node binary to localhost
+  fetch:
+    src: '{{ admin_code_dir }}/target/release/joystream-node'
+    dest: '{{ data_path }}/joystream-node'
+    flat: yes
+  delegate_to: '{{ local_or_admin }}'
+  run_once: true
+  when: run_on_admin_server|bool
+
+- name: Copying raw chain spec file to all servers
+  copy:
+    src: '{{ raw_chain_spec_path }}'
+    dest: '{{ remote_chain_spec_path }}'

+ 1 - 0
devops/infrastructure/roles/common/tasks/get-code-git.yml → devops/aws/roles/common/tasks/get-code-git.yml

@@ -5,6 +5,7 @@
   file:
   file:
     state: absent
     state: absent
     path: "{{ remote_code_path }}"
     path: "{{ remote_code_path }}"
+  become: yes
 
 
 - name: Git checkout
 - name: Git checkout
   git:
   git:

+ 0 - 0
devops/infrastructure/roles/common/tasks/get-code-local.yml → devops/aws/roles/common/tasks/get-code-local.yml


+ 7 - 4
devops/infrastructure/roles/common/tasks/run-setup-build.yml → devops/aws/roles/common/tasks/run-setup-build.yml

@@ -2,25 +2,28 @@
 # Run setup and build code
 # Run setup and build code
 
 
 - name: Creat bash profile file
 - name: Creat bash profile file
-  command: "touch /home/ubuntu/.bash_profile"
+  command: 'touch /home/ubuntu/.bash_profile'
 
 
 - name: Run setup script
 - name: Run setup script
   command: ./setup.sh
   command: ./setup.sh
   args:
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
 
 
 - name: Build joystream node
 - name: Build joystream node
   shell: . ~/.bash_profile && yarn cargo-build
   shell: . ~/.bash_profile && yarn cargo-build
   args:
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   async: 3600
   poll: 0
   poll: 0
   register: build_result
   register: build_result
 
 
 - name: Check on build async task
 - name: Check on build async task
   async_status:
   async_status:
-    jid: "{{ build_result.ansible_job_id }}"
+    jid: '{{ build_result.ansible_job_id }}'
   register: job_result
   register: job_result
   until: job_result.finished
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
   retries: 36
+  # Check for the status every 100s
   delay: 100
   delay: 100

+ 0 - 0
devops/infrastructure/roles/node/templates/joystream-node.service.j2 → devops/aws/roles/node/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/roles/rpc/tasks/main.yml → devops/aws/roles/rpc/tasks/main.yml


+ 0 - 0
devops/infrastructure/roles/rpc/templates/Caddyfile.j2 → devops/aws/roles/rpc/templates/Caddyfile.j2


+ 0 - 0
devops/infrastructure/roles/rpc/templates/joystream-node.service.j2 → devops/aws/roles/rpc/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/roles/validators/tasks/main.yml → devops/aws/roles/validators/tasks/main.yml


+ 0 - 0
devops/infrastructure/roles/validators/templates/joystream-node.service.j2 → devops/aws/roles/validators/templates/joystream-node.service.j2


+ 0 - 0
devops/infrastructure/setup-admin.yml → devops/aws/setup-admin.yml


+ 49 - 0
devops/aws/templates/Playground-Caddyfile.j2

@@ -0,0 +1,49 @@
+{{ nip_domain }}/ws-rpc* {
+    uri strip_prefix /ws-rpc
+    reverse_proxy localhost:9944
+}
+
+{{ nip_domain }}/http-rpc* {
+    uri strip_prefix /http-rpc
+    reverse_proxy localhost:9933
+}
+
+{{ nip_domain }}/pioneer* {
+    uri strip_prefix /pioneer
+    reverse_proxy localhost:3000
+}
+
+{{ nip_domain }}/colossus-1* {
+    uri strip_prefix /colossus-1
+    reverse_proxy localhost:3333
+}
+
+{{ nip_domain }}/distributor-1* {
+    uri strip_prefix /distributor-1
+    reverse_proxy localhost:3334
+}
+
+# newer versions of graphql-server seems to expect this url also
+{{ nip_domain }}/@apollographql/* {
+    reverse_proxy localhost:8081
+}
+
+{{ nip_domain }}/query-node/server* {
+    uri strip_prefix /query-node/server
+    reverse_proxy localhost:8081
+}
+
+{{ nip_domain }}/query-node/indexer* {
+    uri strip_prefix /query-node/indexer
+    reverse_proxy localhost:4000
+}
+
+{{ nip_domain }}/orion* {
+    uri strip_prefix /orion
+    reverse_proxy localhost:6116
+}
+
+{{ nip_domain }}/member-faucet* {
+    uri strip_prefix /member-faucet
+    reverse_proxy localhost:3002
+}

+ 0 - 50
devops/infrastructure/build-arm64-playbook.yml

@@ -1,50 +0,0 @@
----
-# Setup joystream code, build docker image
-
-- name: Build image and push to docker hub
-  hosts: all
-
-  tasks:
-    - block:
-        - name: Get code from git repo
-          include_role:
-            name: common
-            tasks_from: get-code-git
-
-        - name: Install Docker Module for Python
-          pip:
-            name: docker
-
-        - name: Log into DockerHub
-          community.docker.docker_login:
-            username: '{{ docker_username }}'
-            password: '{{ docker_password }}'
-
-        - name: Build an image and push it to a private repo
-          community.docker.docker_image:
-            build:
-              path: ./joystream
-              dockerfile: '{{ dockerfile }}'
-              platform: '{{ platform }}'
-            name: '{{ repository }}'
-            tag: '{{ tag_name }}'
-            push: yes
-            source: build
-          async: 7200
-          poll: 0
-          register: build_result
-
-        - name: Check on build async task
-          async_status:
-            jid: '{{ build_result.ansible_job_id }}'
-          register: job_result
-          until: job_result.finished
-          retries: 72
-          delay: 100
-
-      always:
-        - name: Delete the stack
-          amazon.aws.cloudformation:
-            stack_name: '{{ stack_name }}'
-            state: 'absent'
-          delegate_to: localhost

+ 0 - 45
devops/infrastructure/github-action-playbook.yml

@@ -1,45 +0,0 @@
----
-# Setup joystream code, build and Create AMI
-
-- name: Setup instance
-  hosts: all
-
-  tasks:
-    - block:
-      - name: Get code from git repo
-        include_role:
-          name: common
-          tasks_from: get-code-git
-
-      - name: Run setup and build
-        include_role:
-          name: common
-          tasks_from: run-setup-build
-
-      - name: Install subkey
-        include_role:
-          name: admin
-          tasks_from: main
-
-      - name: Basic AMI Creation
-        amazon.aws.ec2_ami:
-          instance_id: "{{ instance_id }}"
-          wait: yes
-          name: "{{ ami_name }}"
-          launch_permissions:
-            group_names: ['all']
-          tags:
-            Name: "{{ ami_name }}"
-        register: ami_data
-        delegate_to: localhost
-
-      - name: Print AMI ID
-        debug:
-          msg: "AMI ID is: {{ ami_data.image_id }}"
-
-      always:
-      - name: Delete the stack
-        amazon.aws.cloudformation:
-          stack_name: "{{ stack_name }}"
-          state: "absent"
-        delegate_to: localhost

+ 0 - 1
devops/infrastructure/pulumi-common/index.ts

@@ -1 +0,0 @@
-export { CaddyServiceDeployment } from './caddy'

+ 0 - 19
devops/infrastructure/query-node/Pulumi.yaml

@@ -1,19 +0,0 @@
-name: query-node
-runtime: nodejs
-description: Kubernetes IaC for Query Node
-template:
-  config:
-    aws:profile:
-      default: joystream-user
-    aws:region:
-      default: us-east-1
-    isMinikube:
-      description: Whether you are deploying to minikube
-      default: false
-    isLoadBalancerReady:
-      description: Whether the load balancer service is ready and has been assigned an IP
-      default: false
-    membersFilePath:
-      description: Path to members.json file for processor initialization
-    workersFilePath:
-      description: Path to workers.json file for processor initialization

+ 0 - 461
devops/infrastructure/query-node/index.ts

@@ -1,461 +0,0 @@
-import * as awsx from '@pulumi/awsx'
-import * as eks from '@pulumi/eks'
-import * as docker from '@pulumi/docker'
-import * as pulumi from '@pulumi/pulumi'
-import { configMapFromFile } from './configMap'
-import * as k8s from '@pulumi/kubernetes'
-import * as s3Helpers from './s3Helpers'
-import { CaddyServiceDeployment } from 'pulumi-common'
-
-require('dotenv').config()
-
-const config = new pulumi.Config()
-const awsConfig = new pulumi.Config('aws')
-const isMinikube = config.getBoolean('isMinikube')
-export let kubeconfig: pulumi.Output<any>
-export let joystreamAppsImage: pulumi.Output<string>
-let provider: k8s.Provider
-
-if (isMinikube) {
-  provider = new k8s.Provider('local', {})
-
-  // Create image from local app
-  joystreamAppsImage = new docker.Image('joystream/apps', {
-    build: {
-      context: '../../../',
-      dockerfile: '../../../apps.Dockerfile',
-    },
-    imageName: 'joystream/apps:latest',
-    skipPush: true,
-  }).baseImageName
-  // joystreamAppsImage = pulumi.interpolate`joystream/apps`
-} else {
-  // Create a VPC for our cluster.
-  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
-
-  // Create an EKS cluster with the default configuration.
-  const cluster = new eks.Cluster('eksctl-query-node', {
-    vpcId: vpc.id,
-    subnetIds: vpc.publicSubnetIds,
-    desiredCapacity: 3,
-    maxSize: 3,
-    instanceType: 't2.large',
-    providerCredentialOpts: {
-      profileName: awsConfig.get('profile'),
-    },
-  })
-  provider = cluster.provider
-
-  // Export the cluster's kubeconfig.
-  kubeconfig = cluster.kubeconfig
-
-  // Create a repository
-  const repo = new awsx.ecr.Repository('joystream/apps')
-
-  joystreamAppsImage = repo.buildAndPushImage({
-    dockerfile: '../../../apps.Dockerfile',
-    context: '../../../',
-  })
-}
-
-const resourceOptions = { provider: provider }
-
-const name = 'query-node'
-
-// Create a Kubernetes Namespace
-// const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
-const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
-
-// Export the Namespace name
-export const namespaceName = ns.metadata.name
-
-const appLabels = { appClass: name }
-
-// Create a Deployment
-const databaseLabels = { app: 'postgres-db' }
-
-const pvc = new k8s.core.v1.PersistentVolumeClaim(
-  `db-pvc`,
-  {
-    metadata: {
-      labels: databaseLabels,
-      namespace: namespaceName,
-      name: `db-pvc`,
-    },
-    spec: {
-      accessModes: ['ReadWriteOnce'],
-      resources: {
-        requests: {
-          storage: `10Gi`,
-        },
-      },
-    },
-  },
-  resourceOptions
-)
-
-const databaseDeployment = new k8s.apps.v1.Deployment(
-  'postgres-db',
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: databaseLabels,
-    },
-    spec: {
-      selector: { matchLabels: databaseLabels },
-      template: {
-        metadata: { labels: databaseLabels },
-        spec: {
-          containers: [
-            {
-              name: 'postgres-db',
-              image: 'postgres:12',
-              env: [
-                { name: 'POSTGRES_USER', value: process.env.DB_USER! },
-                { name: 'POSTGRES_PASSWORD', value: process.env.DB_PASS! },
-                { name: 'POSTGRES_DB', value: process.env.INDEXER_DB_NAME! },
-              ],
-              ports: [{ containerPort: 5432 }],
-              volumeMounts: [
-                {
-                  name: 'postgres-data',
-                  mountPath: '/var/lib/postgresql/data',
-                  subPath: 'postgres',
-                },
-              ],
-            },
-          ],
-          volumes: [
-            {
-              name: 'postgres-data',
-              persistentVolumeClaim: {
-                claimName: `db-pvc`,
-              },
-            },
-          ],
-        },
-      },
-    },
-  },
-  resourceOptions
-)
-
-const databaseService = new k8s.core.v1.Service(
-  'postgres-db',
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: databaseDeployment.metadata.labels,
-      name: 'postgres-db',
-    },
-    spec: {
-      ports: [{ port: 5432 }],
-      selector: databaseDeployment.spec.template.metadata.labels,
-    },
-  },
-  resourceOptions
-)
-
-const migrationJob = new k8s.batch.v1.Job(
-  'db-migration',
-  {
-    metadata: {
-      namespace: namespaceName,
-    },
-    spec: {
-      backoffLimit: 0,
-      template: {
-        spec: {
-          containers: [
-            {
-              name: 'db-migration',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              resources: { requests: { cpu: '100m', memory: '100Mi' } },
-              env: [
-                {
-                  name: 'WARTHOG_DB_HOST',
-                  value: 'postgres-db',
-                },
-                {
-                  name: 'DB_HOST',
-                  value: 'postgres-db',
-                },
-                { name: 'DB_NAME', value: process.env.DB_NAME! },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate'],
-            },
-          ],
-          restartPolicy: 'Never',
-        },
-      },
-    },
-  },
-  { ...resourceOptions, dependsOn: databaseService }
-)
-
-const membersFilePath = config.get('membersFilePath')
-  ? config.get('membersFilePath')!
-  : '../../../query-node/mappings/bootstrap/data/members.json'
-const workersFilePath = config.get('workersFilePath')
-  ? config.get('workersFilePath')!
-  : '../../../query-node/mappings/bootstrap/data/workers.json'
-
-const dataBucket = new s3Helpers.FileBucket('bootstrap-data', {
-  files: [
-    { path: membersFilePath, name: 'members.json' },
-    { path: workersFilePath, name: 'workers.json' },
-  ],
-  policy: s3Helpers.publicReadPolicy,
-})
-
-const membersUrl = dataBucket.getUrlForFile('members.json')
-const workersUrl = dataBucket.getUrlForFile('workers.json')
-
-const dataPath = '/joystream/query-node/mappings/bootstrap/data'
-
-const processorJob = new k8s.batch.v1.Job(
-  'processor-migration',
-  {
-    metadata: {
-      namespace: namespaceName,
-    },
-    spec: {
-      backoffLimit: 0,
-      template: {
-        spec: {
-          initContainers: [
-            {
-              name: 'curl-init',
-              image: 'appropriate/curl',
-              command: ['/bin/sh', '-c'],
-              args: [
-                pulumi.interpolate`curl -o ${dataPath}/workers.json ${workersUrl}; curl -o ${dataPath}/members.json ${membersUrl}; ls -al ${dataPath};`,
-              ],
-              volumeMounts: [
-                {
-                  name: 'bootstrap-data',
-                  mountPath: dataPath,
-                },
-              ],
-            },
-          ],
-          containers: [
-            {
-              name: 'processor-migration',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                {
-                  name: 'INDEXER_ENDPOINT_URL',
-                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
-                },
-                { name: 'TYPEORM_HOST', value: 'postgres-db' },
-                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
-              ],
-              volumeMounts: [
-                {
-                  name: 'bootstrap-data',
-                  mountPath: dataPath,
-                },
-              ],
-              args: ['workspace', 'query-node-root', 'processor:bootstrap'],
-            },
-          ],
-          restartPolicy: 'Never',
-          volumes: [
-            {
-              name: 'bootstrap-data',
-              emptyDir: {},
-            },
-          ],
-        },
-      },
-    },
-  },
-  { ...resourceOptions, dependsOn: migrationJob }
-)
-
-const defsConfig = new configMapFromFile(
-  'defs-config',
-  {
-    filePath: '../../../types/augment/all/defs.json',
-    namespaceName: namespaceName,
-  },
-  resourceOptions
-).configName
-
-const deployment = new k8s.apps.v1.Deployment(
-  name,
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: appLabels,
-    },
-    spec: {
-      replicas: 1,
-      selector: { matchLabels: appLabels },
-      template: {
-        metadata: {
-          labels: appLabels,
-        },
-        spec: {
-          containers: [
-            {
-              name: 'redis',
-              image: 'redis:6.0-alpine',
-              ports: [{ containerPort: 6379 }],
-            },
-            {
-              name: 'indexer',
-              image: 'joystream/hydra-indexer:2.1.0-beta.9',
-              env: [
-                { name: 'DB_HOST', value: 'postgres-db' },
-                { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-                { name: 'INDEXER_WORKERS', value: '5' },
-                { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
-                { name: 'TYPES_JSON', value: 'types.json' },
-                { name: 'PGUSER', value: process.env.DB_USER! },
-                { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/home/hydra/packages/hydra-indexer/types.json',
-                  name: 'indexer-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['yarn db:bootstrap && yarn start:prod'],
-            },
-            {
-              name: 'hydra-indexer-gateway',
-              image: 'joystream/hydra-indexer-gateway:2.1.0-beta.5',
-              env: [
-                { name: 'WARTHOG_STARTER_DB_DATABASE', value: process.env.INDEXER_DB_NAME! },
-                { name: 'WARTHOG_STARTER_DB_HOST', value: 'postgres-db' },
-                { name: 'WARTHOG_STARTER_DB_PASSWORD', value: process.env.DB_PASS! },
-                { name: 'WARTHOG_STARTER_DB_PORT', value: process.env.DB_PORT! },
-                { name: 'WARTHOG_STARTER_DB_USERNAME', value: process.env.DB_USER! },
-                { name: 'WARTHOG_STARTER_REDIS_URI', value: 'redis://localhost:6379/0' },
-                { name: 'WARTHOG_APP_PORT', value: process.env.WARTHOG_APP_PORT! },
-                { name: 'PORT', value: process.env.WARTHOG_APP_PORT! },
-                { name: 'DEBUG', value: '*' },
-              ],
-              ports: [{ containerPort: 4002 }],
-            },
-            {
-              name: 'processor',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                {
-                  name: 'INDEXER_ENDPOINT_URL',
-                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
-                },
-                { name: 'TYPEORM_HOST', value: 'postgres-db' },
-                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
-                  name: 'processor-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
-            },
-            {
-              name: 'graphql-server',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                { name: 'DB_HOST', value: 'postgres-db' },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-                { name: 'DB_USER', value: process.env.DB_USER! },
-                { name: 'DB_PORT', value: process.env.DB_PORT! },
-                { name: 'DB_NAME', value: process.env.DB_NAME! },
-                { name: 'GRAPHQL_SERVER_HOST', value: process.env.GRAPHQL_SERVER_HOST! },
-                { name: 'GRAPHQL_SERVER_PORT', value: process.env.GRAPHQL_SERVER_PORT! },
-              ],
-              ports: [{ name: 'graph-ql-port', containerPort: Number(process.env.GRAPHQL_SERVER_PORT!) }],
-              args: ['workspace', 'query-node-root', 'query-node:start:prod'],
-            },
-          ],
-          volumes: [
-            {
-              name: 'processor-volume',
-              configMap: {
-                name: defsConfig,
-              },
-            },
-            {
-              name: 'indexer-volume',
-              configMap: {
-                name: defsConfig,
-              },
-            },
-          ],
-        },
-      },
-    },
-  },
-  { ...resourceOptions, dependsOn: processorJob }
-)
-
-// Export the Deployment name
-export const deploymentName = deployment.metadata.name
-
-// Create a LoadBalancer Service for the NGINX Deployment
-const service = new k8s.core.v1.Service(
-  name,
-  {
-    metadata: {
-      labels: appLabels,
-      namespace: namespaceName,
-      name: 'query-node',
-    },
-    spec: {
-      ports: [
-        { name: 'port-1', port: 8081, targetPort: 'graph-ql-port' },
-        { name: 'port-2', port: 4000, targetPort: 4002 },
-      ],
-      selector: appLabels,
-    },
-  },
-  resourceOptions
-)
-
-// Export the Service name and public LoadBalancer Endpoint
-export const serviceName = service.metadata.name
-
-const caddyEndpoints = [
-  `/indexer/* {
-    uri strip_prefix /indexer
-    reverse_proxy query-node:4000
-}`,
-  `/server/* {
-    uri strip_prefix /server
-    reverse_proxy query-node:8081
-}`,
-]
-
-const lbReady = config.get('isLoadBalancerReady') === 'true'
-const caddy = new CaddyServiceDeployment(
-  'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, isMinikube, caddyEndpoints },
-  resourceOptions
-)
-
-export const endpoint1 = caddy.primaryEndpoint
-export const endpoint2 = caddy.secondaryEndpoint

+ 0 - 76
devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml

@@ -1,76 +0,0 @@
----
-# Create chain spec files and keys and copy to all the servers
-
-- name: Debug to test variable
-  debug:
-    msg: "Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}"
-  run_once: true
-
-- name: Run chain-spec-builder to generate chainspec.json file
-  command: "{{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }} --chain-spec-path {{ chain_spec_path }} --deployment live --endowed 1 --keystore-path {{ data_path }}"
-  register: chain_spec_output
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Run subkey to generate node keys
-  shell: subkey generate-node-key
-  delegate_to: "{{ local_or_admin }}"
-  register: subkey_output
-
-- name: Print to stdout
-  debug:
-    msg:
-    - "Public Key: {{ subkey_output.stderr }}"
-    - "Private Key: {{ subkey_output.stdout }}"
-
-- name: Print to stdout chain spec
-  debug: var=chain_spec_output.stdout
-  run_once: true
-
-- name: Save output of chain spec to local file
-  copy:
-    content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
-    dest: "{{ data_path }}/chain_spec_output.txt"
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Change chain spec name, id, protocolId
-  json_modify:
-    chain_spec_path: "{{ chain_spec_path }}"
-    prefix: "{{ network_suffix }}"
-    all_nodes: "{{ hostvars }}"
-  delegate_to: "{{ local_or_admin }}"
-  register: result
-  run_once: true
-
-- name: Print output of modified chainspec
-  debug:
-    var: result.result
-  run_once: true
-
-- name: Run build-spec to generate raw chainspec file
-  shell: "{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}"
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-
-- name: Copying chain spec files to localhost
-  synchronize:
-    src: "/home/ubuntu/{{ data_path }}/"
-    dest: "{{ data_path }}"
-    mode: pull
-  run_once: true
-  when: run_on_admin_server|bool
-
-- name: Copy joystream-node binary to localhost
-  fetch:
-    src: "{{ admin_code_dir }}/target/release/joystream-node"
-    dest: "{{ data_path }}/joystream-node"
-    flat: yes
-  delegate_to: "{{ local_or_admin }}"
-  run_once: true
-  when: run_on_admin_server|bool
-
-- name: Copying raw chain spec file to all servers
-  copy:
-    src: "{{ raw_chain_spec_path }}"
-    dest: "{{ remote_chain_spec_path }}"

+ 0 - 236
devops/infrastructure/storage-node/index.ts

@@ -1,236 +0,0 @@
-import * as awsx from '@pulumi/awsx'
-import * as aws from '@pulumi/aws'
-import * as eks from '@pulumi/eks'
-import * as k8s from '@pulumi/kubernetes'
-import * as pulumi from '@pulumi/pulumi'
-import { CaddyServiceDeployment } from 'pulumi-common'
-import * as fs from 'fs'
-
-const awsConfig = new pulumi.Config('aws')
-const config = new pulumi.Config()
-
-const wsProviderEndpointURI = config.require('wsProviderEndpointURI')
-const isAnonymous = config.require('isAnonymous') === 'true'
-const lbReady = config.get('isLoadBalancerReady') === 'true'
-const name = 'storage-node'
-const colossusPort = parseInt(config.get('colossusPort') || '3000')
-const storage = parseInt(config.get('storage') || '40')
-
-let additionalParams: string[] | pulumi.Input<string>[] = []
-let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
-let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
-
-// Create a VPC for our cluster.
-const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
-
-// Create an EKS cluster with the default configuration.
-const cluster = new eks.Cluster('eksctl-storage-node', {
-  vpcId: vpc.id,
-  subnetIds: vpc.publicSubnetIds,
-  instanceType: 't2.medium',
-  providerCredentialOpts: {
-    profileName: awsConfig.get('profile'),
-  },
-})
-
-// Export the cluster's kubeconfig.
-export const kubeconfig = cluster.kubeconfig
-
-// Create a repository
-const repo = new awsx.ecr.Repository('colossus-image')
-
-// Build an image and publish it to our ECR repository.
-export const colossusImage = repo.buildAndPushImage({
-  dockerfile: '../../../colossus.Dockerfile',
-  context: '../../../',
-})
-
-const resourceOptions = { provider: cluster.provider }
-
-// Create a Kubernetes Namespace
-const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
-
-// Export the Namespace name
-export const namespaceName = ns.metadata.name
-
-const appLabels = { appClass: name }
-
-const pvc = new k8s.core.v1.PersistentVolumeClaim(
-  `${name}-pvc`,
-  {
-    metadata: {
-      labels: appLabels,
-      namespace: namespaceName,
-      name: `${name}-pvc`,
-    },
-    spec: {
-      accessModes: ['ReadWriteOnce'],
-      resources: {
-        requests: {
-          storage: `${storage}Gi`,
-        },
-      },
-    },
-  },
-  resourceOptions
-)
-
-volumes.push({
-  name: 'ipfs-data',
-  persistentVolumeClaim: {
-    claimName: `${name}-pvc`,
-  },
-})
-
-const caddyEndpoints = [
-  ` {
-    reverse_proxy storage-node:${colossusPort}
-}`,
-]
-
-const caddy = new CaddyServiceDeployment(
-  'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, caddyEndpoints },
-  resourceOptions
-)
-
-export const endpoint1 = caddy.primaryEndpoint
-export const endpoint2 = caddy.secondaryEndpoint
-
-export let appLink: pulumi.Output<string>
-
-if (lbReady) {
-  appLink = pulumi.interpolate`https://${endpoint1}`
-
-  if (!isAnonymous) {
-    const remoteKeyFilePath = '/joystream/key-file.json'
-    const providerId = config.require('providerId')
-    const keyFile = config.require('keyFile')
-    const publicUrl = config.get('publicURL') ? config.get('publicURL')! : appLink
-
-    const keyConfig = new k8s.core.v1.ConfigMap('key-config', {
-      metadata: { namespace: namespaceName, labels: appLabels },
-      data: { 'fileData': fs.readFileSync(keyFile).toString() },
-    })
-    const keyConfigName = keyConfig.metadata.apply((m) => m.name)
-
-    additionalParams = ['--provider-id', providerId, '--key-file', remoteKeyFilePath, '--public-url', publicUrl]
-
-    volumeMounts.push({
-      mountPath: remoteKeyFilePath,
-      name: 'keyfile-volume',
-      subPath: 'fileData',
-    })
-
-    volumes.push({
-      name: 'keyfile-volume',
-      configMap: {
-        name: keyConfigName,
-      },
-    })
-
-    const passphrase = config.get('passphrase')
-    if (passphrase) {
-      additionalParams.push('--passphrase', passphrase)
-    }
-  }
-}
-
-if (isAnonymous) {
-  additionalParams.push('--anonymous')
-}
-
-// Create a Deployment
-const deployment = new k8s.apps.v1.Deployment(
-  name,
-  {
-    metadata: {
-      namespace: namespaceName,
-      labels: appLabels,
-    },
-    spec: {
-      replicas: 1,
-      selector: { matchLabels: appLabels },
-      template: {
-        metadata: {
-          labels: appLabels,
-        },
-        spec: {
-          hostname: 'ipfs',
-          containers: [
-            {
-              name: 'ipfs',
-              image: 'ipfs/go-ipfs:latest',
-              ports: [{ containerPort: 5001 }, { containerPort: 8080 }],
-              command: ['/bin/sh', '-c'],
-              args: [
-                'set -e; \
-                /usr/local/bin/start_ipfs config profile apply lowpower; \
-                /usr/local/bin/start_ipfs config --json Gateway.PublicGateways \'{"localhost": null }\'; \
-                /usr/local/bin/start_ipfs config Datastore.StorageMax 200GB; \
-                /sbin/tini -- /usr/local/bin/start_ipfs daemon --migrate=true',
-              ],
-              volumeMounts: [
-                {
-                  name: 'ipfs-data',
-                  mountPath: '/data/ipfs',
-                },
-              ],
-            },
-            {
-              name: 'colossus',
-              image: colossusImage,
-              env: [
-                {
-                  name: 'WS_PROVIDER_ENDPOINT_URI',
-                  // example 'wss://18.209.241.63.nip.io/'
-                  value: wsProviderEndpointURI,
-                },
-                {
-                  name: 'DEBUG',
-                  value: 'joystream:*',
-                },
-              ],
-              volumeMounts,
-              command: [
-                'yarn',
-                'colossus',
-                '--ws-provider',
-                wsProviderEndpointURI,
-                '--ipfs-host',
-                'ipfs',
-                ...additionalParams,
-              ],
-              ports: [{ containerPort: colossusPort }],
-            },
-          ],
-          volumes,
-        },
-      },
-    },
-  },
-  resourceOptions
-)
-
-// Create a LoadBalancer Service for the Deployment
-const service = new k8s.core.v1.Service(
-  name,
-  {
-    metadata: {
-      labels: appLabels,
-      namespace: namespaceName,
-      name: 'storage-node',
-    },
-    spec: {
-      ports: [{ name: 'port-1', port: colossusPort }],
-      selector: appLabels,
-    },
-  },
-  resourceOptions
-)
-
-// Export the Service name
-export const serviceName = service.metadata.name
-
-// Export the Deployment name
-export const deploymentName = deployment.metadata.name

+ 1 - 1
devops/infrastructure/storage-node/.gitignore → devops/kubernetes/argus/.gitignore

@@ -1,5 +1,5 @@
 /bin/
 /bin/
 /node_modules/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 package-lock.json
 Pulumi.*.yaml
 Pulumi.*.yaml

+ 35 - 0
devops/kubernetes/argus/Pulumi.yaml

@@ -0,0 +1,35 @@
+name: argus
+runtime: nodejs
+description: A Pulumi program to deploy Argus node
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    queryNodeHost:
+      description: Query node GraphQL endpoint
+      default: 'https://hydra.joystream.org/graphql'
+    wsProviderEndpointURI:
+      description: Chain RPC endpoint
+      default: 'wss://rome-rpc-endpoint.joystream.org:9944/'
+    argusImage:
+      description: The distributor node image to use for running the node
+    keys:
+      description: Specifies the keys available within distributor node CLI
+    buckets:
+      description: Specifies the buckets distributed by the node
+    workerId:
+      description: ID of the node operator (distribution working group worker)
+    dataStorage:
+      description: Amount of storage (in Gi) assigned for the data directory
+      default: 10
+    logStorage:
+      description: Amount of storage (in Gi) assigned for the logs directory
+      default: 2
+    cacheStorage:
+      description: Amount of storage (in Gi) assigned for the cache directory
+      default: 10

+ 12 - 9
devops/infrastructure/storage-node/README.md → devops/kubernetes/argus/README.md

@@ -1,6 +1,6 @@
-# Amazon EKS Cluster: Hello World!
+# Argus deployment on Minikube or EKS
 
 
-This example deploys an EKS Kubernetes cluster with custom ipfs image
+This project deploys an Argus node on an EKS or a minikube cluster
 
 
 ## Deploying the App
 ## Deploying the App
 
 
@@ -38,25 +38,28 @@ After cloning this repo, from this working directory, run these commands:
 
 
    ```bash
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
+    --plaintext queryNodeHost='https://34.197.252.42.nip.io/server/graphql' --plaintext isMinikube=true \
     --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
     --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
-    --plaintext isAnonymous=true
+    --plaintext argusImage='joystream/distributor-node:latest' \
+    --plaintext keys='[{ "suri": "//Alice" }]' --plaintext buckets='["1:0","1:1"]' --plaintext workerId=0
    ```
    ```
 
 
-   If running for production use the below mentioned config
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
 
 
    ```bash
    ```bash
-   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false \
-    --plaintext providerId=<ID> --plaintext keyFile=<PATH> --plaintext publicURL=<DOMAIN> --secret passphrase=<PASSPHRASE>
+   $ pulumi config set isMinikube false
    ```
    ```
 
 
-   You can also set the `storage` and the `colossusPort` config parameters if required
-
 1. Stand up the EKS cluster:
 1. Stand up the EKS cluster:
 
 
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
    new EKS cluster takes between 10-15 minutes.
    new EKS cluster takes between 10-15 minutes.
 
 
+1. If you are using Minikube, run `minikube service argus-node -n $(pulumi stack output namespaceName)`
+
+   This will setup a proxy for your `argus-node` service, which can then be accessed at
+   the URL given in the output
+
 1. Once the stack if up and running, we will modify the Caddy config to get SSL certificate for the load balancer
 1. Once the stack if up and running, we will modify the Caddy config to get SSL certificate for the load balancer
 
 
    Modify the config variable `isLoadBalancerReady`
    Modify the config variable `isLoadBalancerReady`

+ 5 - 0
devops/kubernetes/argus/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

+ 229 - 0
devops/kubernetes/argus/index.ts

@@ -0,0 +1,229 @@
+import * as awsx from '@pulumi/awsx'
+import * as aws from '@pulumi/aws'
+import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { CaddyServiceDeployment, CustomPersistentVolume } from 'pulumi-common'
+
+const awsConfig = new pulumi.Config('aws')
+const config = new pulumi.Config()
+
+const queryNodeHost = config.require('queryNodeHost')
+const wsProviderEndpointURI = config.require('wsProviderEndpointURI')
+const configArgusImage = config.require('argusImage')
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+const keys = config.require('keys')
+const buckets = config.require('buckets')
+const workerId = config.require('workerId')
+const name = 'argus-node'
+const isMinikube = config.getBoolean('isMinikube')
+const dataStorage = config.getNumber('dataStorage') || 10
+const logStorage = config.getNumber('logStorage') || 2
+const cacheStorage = config.getNumber('cacheStorage') || 10
+
+export let kubeconfig: pulumi.Output<any>
+export let argusImage: pulumi.Output<string> = pulumi.interpolate`${configArgusImage}`
+let provider: k8s.Provider
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('argus-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-argus-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    desiredCapacity: 2,
+    maxSize: 2,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+
+  // Create a repository
+  const repo = new awsx.ecr.Repository('distributor-node')
+
+  // Build an image and publish it to our ECR repository.
+  argusImage = repo.buildAndPushImage({
+    context: './docker_dummy',
+    dockerfile: './docker_dummy/Dockerfile',
+    args: { SOURCE_IMAGE: argusImage! },
+  })
+
+  // Uncomment the below line to use an existing image
+  // argusImage = pulumi.interpolate`ahhda/distributor-node:latest`
+}
+
+const resourceOptions = { provider: provider }
+
+// Create a Kubernetes Namespace
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const appLabels = { appClass: name }
+
+const dataPVC = new CustomPersistentVolume(
+  'data',
+  { namespaceName: namespaceName, storage: dataStorage },
+  resourceOptions
+)
+const logsPVC = new CustomPersistentVolume(
+  'logs',
+  { namespaceName: namespaceName, storage: logStorage },
+  resourceOptions
+)
+const cachePVC = new CustomPersistentVolume(
+  'cache',
+  { namespaceName: namespaceName, storage: cacheStorage },
+  resourceOptions
+)
+
+// Create a Deployment
+const deployment = new k8s.apps.v1.Deployment(
+  name,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'argus',
+              image: argusImage,
+              imagePullPolicy: 'IfNotPresent',
+              workingDir: '/joystream/distributor-node',
+              env: [
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE',
+                  value: queryNodeHost,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS',
+                  value: wsProviderEndpointURI,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__KEYS',
+                  value: keys,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__BUCKETS',
+                  value: buckets,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__WORKER_ID',
+                  value: workerId,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__PORT',
+                  value: '3334',
+                },
+              ],
+              args: ['start'],
+              ports: [{ containerPort: 3334 }],
+              volumeMounts: [
+                {
+                  name: 'data',
+                  mountPath: '/data',
+                  subPath: 'data',
+                },
+                {
+                  name: 'logs',
+                  mountPath: '/logs',
+                  subPath: 'logs',
+                },
+                {
+                  name: 'cache',
+                  mountPath: '/cache',
+                  subPath: 'cache',
+                },
+              ],
+            },
+          ],
+          volumes: [
+            {
+              name: 'data',
+              persistentVolumeClaim: {
+                claimName: dataPVC.pvc.metadata.name,
+              },
+            },
+            {
+              name: 'logs',
+              persistentVolumeClaim: {
+                claimName: logsPVC.pvc.metadata.name,
+              },
+            },
+            {
+              name: 'cache',
+              persistentVolumeClaim: {
+                claimName: cachePVC.pvc.metadata.name,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  resourceOptions
+)
+
+// Create a LoadBalancer Service for the Deployment
+const service = new k8s.core.v1.Service(
+  name,
+  {
+    metadata: {
+      labels: appLabels,
+      namespace: namespaceName,
+      name: name,
+    },
+    spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
+      ports: [{ name: 'port-1', port: 3334 }],
+      selector: appLabels,
+    },
+  },
+  resourceOptions
+)
+
+// Export the Service name
+export const serviceName = service.metadata.name
+
+// Export the Deployment name
+export const deploymentName = deployment.metadata.name
+
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
+
+const caddyEndpoints = [
+  ` {
+    reverse_proxy ${name}:3334
+}`,
+]
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 1 - 0
devops/infrastructure/storage-node/package.json → devops/kubernetes/argus/package.json

@@ -9,6 +9,7 @@
     "@pulumi/eks": "^0.31.0",
     "@pulumi/eks": "^0.31.0",
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
     "pulumi-common": "file:../pulumi-common"
     "pulumi-common": "file:../pulumi-common"
   }
   }
 }
 }

+ 0 - 0
devops/infrastructure/node-network/tsconfig.json → devops/kubernetes/argus/tsconfig.json


+ 1 - 1
devops/infrastructure/node-network/.gitignore → devops/kubernetes/node-network/.gitignore

@@ -1,6 +1,6 @@
 /bin/
 /bin/
 /node_modules/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 package-lock.json
 .env
 .env
 Pulumi.*.yaml
 Pulumi.*.yaml

+ 0 - 0
devops/infrastructure/node-network/Pulumi.yaml → devops/kubernetes/node-network/Pulumi.yaml


+ 0 - 0
devops/infrastructure/node-network/README.md → devops/kubernetes/node-network/README.md


+ 0 - 0
devops/infrastructure/node-network/configMap.ts → devops/kubernetes/node-network/configMap.ts


+ 0 - 0
devops/infrastructure/node-network/index.ts → devops/kubernetes/node-network/index.ts


+ 0 - 0
devops/infrastructure/node-network/json_modify.py → devops/kubernetes/node-network/json_modify.py


+ 0 - 0
devops/infrastructure/node-network/nfsVolume.ts → devops/kubernetes/node-network/nfsVolume.ts


+ 0 - 0
devops/infrastructure/node-network/package.json → devops/kubernetes/node-network/package.json


+ 0 - 0
devops/infrastructure/query-node/tsconfig.json → devops/kubernetes/node-network/tsconfig.json


+ 0 - 0
devops/infrastructure/node-network/utils.ts → devops/kubernetes/node-network/utils.ts


+ 0 - 0
devops/infrastructure/node-network/validator.ts → devops/kubernetes/node-network/validator.ts


+ 0 - 0
devops/infrastructure/pulumi-common/caddy.ts → devops/kubernetes/pulumi-common/caddy.ts


+ 0 - 0
devops/infrastructure/query-node/configMap.ts → devops/kubernetes/pulumi-common/configMap.ts


+ 109 - 0
devops/kubernetes/pulumi-common/database.ts

@@ -0,0 +1,109 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class delpoys a Postgres instance on a Persistent Volume
+ */
+export class PostgresServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('postgres:service:PostgresServiceDeployment', name, {}, opts)
+
+    const databaseLabels = { app: name }
+    const pvcName = `${name}-pvc`
+
+    const pvc = new k8s.core.v1.PersistentVolumeClaim(
+      pvcName,
+      {
+        metadata: {
+          labels: databaseLabels,
+          namespace: args.namespaceName,
+          name: pvcName,
+        },
+        spec: {
+          accessModes: ['ReadWriteOnce'],
+          resources: {
+            requests: {
+              storage: `${args.storage}Gi`,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: databaseLabels,
+        },
+        spec: {
+          selector: { matchLabels: databaseLabels },
+          template: {
+            metadata: { labels: databaseLabels },
+            spec: {
+              containers: [
+                {
+                  name: 'postgres-db',
+                  image: 'postgres:12',
+                  env: args.env,
+                  ports: [{ containerPort: 5432 }],
+                  volumeMounts: [
+                    {
+                      name: 'postgres-data',
+                      mountPath: '/var/lib/postgresql/data',
+                      subPath: 'postgres',
+                    },
+                  ],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'postgres-data',
+                  persistentVolumeClaim: {
+                    claimName: pvcName,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+
+    this.service = new k8s.core.v1.Service(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: this.deployment.metadata.labels,
+          name: name,
+        },
+        spec: {
+          ports: [{ port: 5432 }],
+          selector: this.deployment.spec.template.metadata.labels,
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+interface Environment {
+  name: string
+  value: string
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  env?: Environment[]
+  storage: Number
+  isMinikube?: boolean
+}

+ 4 - 0
devops/kubernetes/pulumi-common/index.ts

@@ -0,0 +1,4 @@
+export { CaddyServiceDeployment } from './caddy'
+export { PostgresServiceDeployment } from './database'
+export { configMapFromFile } from './configMap'
+export { CustomPersistentVolume } from './volume'

+ 0 - 0
devops/infrastructure/pulumi-common/package.json → devops/kubernetes/pulumi-common/package.json


+ 0 - 0
devops/infrastructure/pulumi-common/tsconfig.json → devops/kubernetes/pulumi-common/tsconfig.json


+ 43 - 0
devops/kubernetes/pulumi-common/volume.ts

@@ -0,0 +1,43 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+
+/**
+ * This is an abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class creates a Persistent Volume
+ */
+export class CustomPersistentVolume extends pulumi.ComponentResource {
+  public readonly pvc: k8s.core.v1.PersistentVolumeClaim
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('volume:service:CustomPersistentVolume', name, {}, opts)
+
+    const volumeLabels = { app: name }
+    const pvcName = `${name}-pvc`
+
+    this.pvc = new k8s.core.v1.PersistentVolumeClaim(
+      pvcName,
+      {
+        metadata: {
+          labels: volumeLabels,
+          namespace: args.namespaceName,
+          name: pvcName,
+        },
+        spec: {
+          accessModes: ['ReadWriteOnce'],
+          resources: {
+            requests: {
+              storage: `${args.storage}Gi`,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  storage: Number
+}

+ 1 - 1
devops/infrastructure/query-node/.gitignore → devops/kubernetes/query-node/.gitignore

@@ -1,6 +1,6 @@
 /bin/
 /bin/
 /node_modules/
 /node_modules/
-kubeconfig.yml
+kubeconfig*
 package-lock.json
 package-lock.json
 .env
 .env
 Pulumi.*.yaml
 Pulumi.*.yaml

+ 36 - 0
devops/kubernetes/query-node/Pulumi.yaml

@@ -0,0 +1,36 @@
+name: query-node
+runtime: nodejs
+description: Kubernetes IaC for Query Node
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isMinikube:
+      description: Whether you are deploying to minikube
+      default: false
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    externalIndexerUrl:
+      description: URL for an external indexer. If set this will not deploy an Indexer.
+      default: 'http://query-node:4000/graphql'
+    skipProcessor:
+      description: If set to true, will not deploy a processor instance
+      default: false
+    useLocalRepo:
+      description: If set to true, will use an existing docker image on local
+      default: false
+    appsImage:
+      description: The joystream image to use for running GraphQL servers
+      default: joystream/apps:latest
+    dbPassword:
+      description: database password for indexer and processor databases  
+      required: true
+    blockHeight:
+      descroption: Block height to start indexing at
+      default: 0
+    joystreamWsEndpoint:
+      description: Joystream-node websocket endpoint used by indexer
+      required: true

+ 139 - 0
devops/kubernetes/query-node/README.md

@@ -0,0 +1,139 @@
+# Query Node automated deployment
+
+Deploys an EKS Kubernetes cluster with query node
+
+## Deploying the App
+
+To deploy your infrastructure, follow the below steps.
+
+### Prerequisites
+
+1. [Install Pulumi](https://www.pulumi.com/docs/get-started/install/)
+1. [Install Node.js](https://nodejs.org/en/download/)
+1. Install a package manager for Node.js, such as [npm](https://www.npmjs.com/get-npm) or [Yarn](https://yarnpkg.com/en/docs/install).
+1. [Configure AWS Credentials](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/)
+1. Optional (for debugging): [Install kubectl](https://kubernetes.io/docs/tasks/tools/)
+
+### Steps
+
+After cloning this repo, from this working directory, run these commands:
+
+1. Install the required Node.js packages:
+
+   This installs the dependent packages [needed](https://www.pulumi.com/docs/intro/concepts/how-pulumi-works/) for our Pulumi program.
+
+   ```bash
+   $ npm install
+   ```
+
+1. Create a new stack, which is an isolated deployment target for this example:
+
+   This will initialize the Pulumi program in TypeScript.
+
+   ```bash
+   $ pulumi stack init
+   ```
+
+1. Set the required configuration variables in `Pulumi.<stack>.yaml`
+
+   ```bash
+   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
+    --plaintext dbPassword=password --plaintext blockHeight=0 \
+    --plaintext joystreamWsEndpoint=ws://endpoint.somewhere.net:9944 \
+    --plaintext isMinikube=true --plaintext skipProcessor=false
+   ```
+
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
+
+   ```bash
+   $ pulumi config set isMinikube false
+   ```
+
+   If you want to use an existing Indexer and not deploy a new one set `externalIndexerUrl`
+
+   ```bash
+   $ pulumi config set externalIndexerUrl <URL>
+   ```
+
+   You must have a valid docker image of `joystream/apps` either on Docker hub or your local to deploy the infrastructure.
+   If the image exists locally & you are running on minikube, run
+
+   ```bash
+   $ pulumi config set-all --plaintext useLocalRepo=true --plaintext appsImage=<IMAGE_NAME>
+   ```
+
+   NOTE: The docker deamon for minikube is different from that of the docker desktop. To connect your Docker CLI to the docker
+   daemon inside the VM you need to run: `eval $(minikube docker-env)`. To copy the image from your local deamon to minikube run
+   `minikube image load joystream/apps:latest --daemon`.
+
+   If not using minikube, just specify the `appsImage` config.
+
+1. Stand up the Kubernetes cluster:
+
+   Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
+   new EKS cluster takes between 10-15 minutes.
+
+1. Once the stack is up and running, we will modify the Caddy config to get SSL certificate for the load balancer
+
+   Modify the config variable `isLoadBalancerReady`
+
+   ```bash
+   $ pulumi config set isLoadBalancerReady true
+   ```
+
+   Run `pulumi up -y` to update the Caddy config
+
+1. You can now access the endpoints using `pulumi stack output endpoint1` or `pulumi stack output endpoint2`
+
+   The GraphQl server is accessible at `https://<ENDPOINT>/server/graphql` and indexer at `https://<ENDPOINT>/indexer/graphql`
+
+1. If you are using Minikube, run `minikube service graphql-server -n $(pulumi stack output namespaceName)`
+
+   This will setup a proxy for your `query-node` service, which can then be accessed at
+   the URL given in the output
+
+1. Access the Kubernetes Cluster using `kubectl`
+
+   To access your new Kubernetes cluster using `kubectl`, we need to set up the
+   `kubeconfig` file and download `kubectl`. We can leverage the Pulumi
+   stack output in the CLI, as Pulumi facilitates exporting these objects for us.
+
+   ```bash
+   $ pulumi stack output kubeconfig --show-secrets > kubeconfig
+   $ export KUBECONFIG=$PWD/kubeconfig
+   $ kubectl get nodes
+   ```
+
+   We can also use the stack output to query the cluster for our newly created Deployment:
+
+   ```bash
+   $ kubectl get deployment $(pulumi stack output deploymentName) --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get service $(pulumi stack output serviceName) --namespace=$(pulumi stack output namespaceName)
+   ```
+
+   To get logs
+
+   ```bash
+   $ kubectl config set-context --current --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get pods
+   $ kubectl logs <PODNAME> --all-containers
+   ```
+
+   To see complete pulumi stack output
+
+   ```bash
+   $ pulumi stack output
+   ```
+
+   To execute a command
+
+   ```bash
+   $ kubectl exec --stdin --tty <PODNAME> -c colossus -- /bin/bash
+   ```
+
+1. Once you've finished experimenting, tear down your stack's resources by destroying and removing it:
+
+   ```bash
+   $ pulumi destroy --yes
+   $ pulumi stack rm --yes
+   ```

+ 29 - 0
devops/kubernetes/query-node/configMap.ts

@@ -0,0 +1,29 @@
+import * as pulumi from '@pulumi/pulumi'
+import * as k8s from '@pulumi/kubernetes'
+import * as fs from 'fs'
+
+export class ConfigMapFromFile extends pulumi.ComponentResource {
+  public readonly configName?: pulumi.Output<string>
+
+  constructor(name: string, args: ConfigMapArgs, opts: pulumi.ComponentResourceOptions = {}) {
+    super('pkg:query-node:configMap', name, {}, opts)
+
+    this.configName = new k8s.core.v1.ConfigMap(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+        },
+        data: {
+          'fileData': fs.readFileSync(args.filePath).toString(),
+        },
+      },
+      opts
+    ).metadata.apply((m) => m.name)
+  }
+}
+
+export interface ConfigMapArgs {
+  filePath: string
+  namespaceName: pulumi.Output<string>
+}

+ 5 - 0
devops/kubernetes/query-node/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

+ 137 - 0
devops/kubernetes/query-node/index.ts

@@ -0,0 +1,137 @@
+import * as awsx from '@pulumi/awsx'
+import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
+import * as pulumi from '@pulumi/pulumi'
+import { ConfigMapFromFile } from './configMap'
+import * as k8s from '@pulumi/kubernetes'
+import { IndexerServiceDeployment } from './indexerDeployment'
+import { ProcessorServiceDeployment } from './processorDeployment'
+import { CaddyServiceDeployment } from 'pulumi-common'
+
+const config = new pulumi.Config()
+const awsConfig = new pulumi.Config('aws')
+const isMinikube = config.getBoolean('isMinikube')
+const externalIndexerUrl = config.get('externalIndexerUrl')
+const appsImage = config.get('appsImage') || `joystream/apps:latest`
+const skipProcessor = config.getBoolean('skipProcessor')
+const useLocalRepo = config.getBoolean('useLocalRepo')
+
+export let kubeconfig: pulumi.Output<any>
+export let joystreamAppsImage: pulumi.Output<string> = pulumi.interpolate`${appsImage}`
+let provider: k8s.Provider
+
+if (skipProcessor && externalIndexerUrl) {
+  pulumi.log.info('No Indexer or Processor will be deployed only the cluster')
+}
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+
+  if (useLocalRepo) {
+    // Use already existing image in minikube environment
+    joystreamAppsImage = pulumi.interpolate`${appsImage}`
+  } else {
+    // Access image from docker hub
+    joystreamAppsImage = new docker.RemoteImage('apps', {
+      name: appsImage!,
+    }).name
+  }
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('query-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-query-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    desiredCapacity: 3,
+    maxSize: 3,
+    instanceType: 't2.large',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+
+  // Only deploy ECR and push image if we need to deploy processor from
+  // local image build.
+  if (!skipProcessor && useLocalRepo) {
+    // Create a repository
+    const repo = new awsx.ecr.Repository('joystream/apps')
+
+    // Build an image from an existing local/docker hub image and push to ECR
+    joystreamAppsImage = repo.buildAndPushImage({
+      context: './docker_dummy',
+      dockerfile: './docker_dummy/Dockerfile',
+      args: { SOURCE_IMAGE: appsImage },
+    })
+  }
+}
+
+const resourceOptions = { provider: provider }
+
+const name = 'query-node'
+
+// Create a Kubernetes Namespace
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const defsConfig = new ConfigMapFromFile(
+  'defs-config',
+  {
+    filePath: '../../../types/augment/all/defs.json',
+    namespaceName: namespaceName,
+  },
+  resourceOptions
+).configName
+
+if (!externalIndexerUrl) {
+  const indexer = new IndexerServiceDeployment(
+    'indexer',
+    { namespaceName, storage: 10, defsConfig, joystreamAppsImage },
+    resourceOptions
+  )
+}
+
+if (!skipProcessor) {
+  const processor = new ProcessorServiceDeployment(
+    'processor',
+    { namespaceName, storage: 10, defsConfig, joystreamAppsImage, externalIndexerUrl },
+    resourceOptions
+  )
+}
+
+const caddyEndpoints = [
+  `/indexer* {
+    uri strip_prefix /indexer
+    reverse_proxy indexer:4000
+  }`,
+  `/server* {
+    uri strip_prefix /server
+    reverse_proxy graphql-server:8081
+  }`,
+  `/@apollographql/* {
+    reverse_proxy graphql-server:8081
+  }`,
+]
+
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+
+export let endpoint1: pulumi.Output<string>
+export let endpoint2: pulumi.Output<string>
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, isMinikube, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 158 - 0
devops/kubernetes/query-node/indexerDeployment.ts

@@ -0,0 +1,158 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { PostgresServiceDeployment } from 'pulumi-common'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class deploys a db, a migration job and indexer deployment and service
+ */
+export class IndexerServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('indexer:service:IndexerServiceDeployment', name, {}, opts)
+
+    const config = new pulumi.Config()
+    const DB_PASS = config.require('dbPassword')
+    const BLOCK_HEIGHT = config.require('blockHeight') || '0'
+    const WS_PROVIDER_ENDPOINT_URI = config.require('joystreamWsEndpoint')
+
+    const DB_USERNAME = 'postgres'
+    const INDEXER_DATABASE_NAME = 'indexer'
+    const DB_PORT = '5432'
+
+    // Name passed in the constructor will be the endpoint for accessing the service
+    const serviceName = name
+    const appLabels = { appClass: 'indexer' }
+
+    const indexerDbName = 'indexer-db'
+    const indexerDb = new PostgresServiceDeployment(
+      indexerDbName,
+      {
+        namespaceName: args.namespaceName,
+        env: [
+          { name: 'POSTGRES_USER', value: DB_USERNAME },
+          { name: 'POSTGRES_PASSWORD', value: DB_PASS },
+          { name: 'POSTGRES_DB', value: INDEXER_DATABASE_NAME },
+          { name: 'PGPORT', value: DB_PORT },
+        ],
+        storage: args.storage,
+      },
+      { parent: this }
+    )
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      'indexer',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: appLabels,
+        },
+        spec: {
+          replicas: 1,
+          selector: { matchLabels: appLabels },
+          template: {
+            metadata: {
+              labels: appLabels,
+            },
+            spec: {
+              containers: [
+                {
+                  name: 'redis',
+                  image: 'redis:6.0-alpine',
+                  ports: [{ containerPort: 6379 }],
+                },
+                {
+                  name: 'indexer',
+                  image: 'joystream/hydra-indexer:3.0.0',
+                  env: [
+                    { name: 'DB_HOST', value: indexerDbName },
+                    { name: 'DB_NAME', value: INDEXER_DATABASE_NAME },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
+                    { name: 'INDEXER_WORKERS', value: '5' },
+                    // localhost for redis should work since it is in the same deployment
+                    { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
+                    { name: 'DEBUG', value: 'index-builder:*' },
+                    { name: 'WS_PROVIDER_ENDPOINT_URI', value: WS_PROVIDER_ENDPOINT_URI },
+                    { name: 'TYPES_JSON', value: 'types.json' },
+                    { name: 'PGUSER', value: DB_USERNAME },
+                    { name: 'BLOCK_HEIGHT', value: BLOCK_HEIGHT },
+                  ],
+                  volumeMounts: [
+                    {
+                      mountPath: '/home/hydra/packages/hydra-indexer/types.json',
+                      name: 'indexer-volume',
+                      subPath: 'fileData',
+                    },
+                  ],
+                  command: ['/bin/sh', '-c'],
+                  args: ['yarn db:bootstrap && yarn start:prod'],
+                },
+                {
+                  name: 'hydra-indexer-gateway',
+                  image: 'joystream/hydra-indexer-gateway:3.0.0',
+                  env: [
+                    { name: 'WARTHOG_STARTER_DB_DATABASE', value: INDEXER_DATABASE_NAME },
+                    { name: 'WARTHOG_STARTER_DB_HOST', value: indexerDbName },
+                    { name: 'WARTHOG_STARTER_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_STARTER_DB_PORT', value: DB_PORT },
+                    { name: 'WARTHOG_STARTER_DB_USERNAME', value: DB_USERNAME },
+                    // localhost for redis should work since it is in the same deployment
+                    { name: 'WARTHOG_STARTER_REDIS_URI', value: 'redis://localhost:6379/0' },
+                    { name: 'WARTHOG_APP_PORT', value: '4001' },
+                    { name: 'PORT', value: '4001' },
+                    { name: 'DEBUG', value: '*' },
+                  ],
+                  ports: [{ name: 'hydra-port', containerPort: 4001 }],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'indexer-volume',
+                  configMap: {
+                    name: args.defsConfig,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: indexerDb.service }
+    )
+
+    // Create a Service for the Indexer
+    this.service = new k8s.core.v1.Service(
+      serviceName,
+      {
+        metadata: {
+          labels: appLabels,
+          namespace: args.namespaceName,
+          name: serviceName,
+        },
+        spec: {
+          ports: [{ name: 'port-1', port: 4000, targetPort: 'hydra-port' }],
+          selector: appLabels,
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+interface Environment {
+  name: string
+  value: string
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  joystreamAppsImage: pulumi.Output<string>
+  defsConfig: pulumi.Output<string> | undefined
+  env?: Environment[]
+  storage: number
+}

+ 0 - 0
devops/infrastructure/query-node/package.json → devops/kubernetes/query-node/package.json


+ 237 - 0
devops/kubernetes/query-node/processorDeployment.ts

@@ -0,0 +1,237 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { PostgresServiceDeployment } from 'pulumi-common'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class deploys a db, a migration job, graphql server and processor
+ */
+export class ProcessorServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+  public readonly endpoint: string
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('processor:service:ProcessorServiceDeployment', name, {}, opts)
+
+    const config = new pulumi.Config()
+    const DB_PASS = config.require('dbPassword')
+    const DB_USERNAME = 'postgres'
+    const PROCESSOR_DATABASE_NAME = 'processor'
+    const DB_PORT = '5432'
+
+    // Name passed in the constructor will be the endpoint for accessing the service
+    this.endpoint = 'graphql-server'
+
+    const processorDbName = 'processor-db'
+    const processorDb = new PostgresServiceDeployment(
+      processorDbName,
+      {
+        namespaceName: args.namespaceName,
+        env: [
+          { name: 'POSTGRES_USER', value: DB_USERNAME },
+          { name: 'POSTGRES_PASSWORD', value: DB_PASS },
+          { name: 'POSTGRES_DB', value: PROCESSOR_DATABASE_NAME },
+          { name: 'PGPORT', value: DB_PORT },
+        ],
+        storage: args.storage,
+      },
+      { parent: this }
+    )
+
+    const processorMigrationJob = new k8s.batch.v1.Job(
+      'processor-db-migration',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+        },
+        spec: {
+          backoffLimit: 0,
+          template: {
+            spec: {
+              containers: [
+                {
+                  name: 'db-migration',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  resources: { requests: { cpu: '100m', memory: '100Mi' } },
+                  env: [
+                    {
+                      name: 'WARTHOG_DB_HOST',
+                      value: processorDbName,
+                    },
+                    {
+                      name: 'DB_HOST',
+                      value: processorDbName,
+                    },
+                    { name: 'WARTHOG_DB_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_USERNAME', value: DB_USERNAME },
+                    { name: 'WARTHOG_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_DB_PORT', value: DB_PORT },
+                    { name: 'DB_NAME', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
+                  ],
+                  command: ['/bin/sh', '-c'],
+                  args: [
+                    // 'yarn workspace query-node config:dev;',
+                    'yarn workspace query-node-root db:prepare; yarn workspace query-node-root db:migrate',
+                  ],
+                },
+              ],
+              restartPolicy: 'Never',
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: processorDb.service }
+    )
+
+    let appLabels = { appClass: 'graphql-server' }
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      'graphql-server',
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: appLabels,
+        },
+        spec: {
+          replicas: 1,
+          selector: { matchLabels: appLabels },
+          template: {
+            metadata: {
+              labels: appLabels,
+            },
+            spec: {
+              containers: [
+                {
+                  name: 'graphql-server',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  env: [
+                    { name: 'DB_HOST', value: processorDbName },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
+                    { name: 'DB_NAME', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_USERNAME', value: DB_USERNAME },
+                    { name: 'WARTHOG_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_APP_PORT', value: '4002' },
+                    // Why do we need this anyway?
+                    { name: 'GRAPHQL_SERVER_HOST', value: 'graphql-server' },
+                  ],
+                  ports: [{ name: 'graph-ql-port', containerPort: 4002 }],
+                  args: ['workspace', 'query-node-root', 'query-node:start:prod'],
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: processorMigrationJob }
+    )
+
+    // Create a Service for the GraphQL Server
+    this.service = new k8s.core.v1.Service(
+      'graphql-server',
+      {
+        metadata: {
+          labels: appLabels,
+          namespace: args.namespaceName,
+          name: this.endpoint,
+        },
+        spec: {
+          ports: [{ name: 'port-1', port: 8081, targetPort: 'graph-ql-port' }],
+          selector: appLabels,
+        },
+      },
+      { parent: this }
+    )
+
+    const indexerURL = args.externalIndexerUrl || `http://indexer:4000/graphql`
+    appLabels = { appClass: 'processor' }
+
+    const processorDeployment = new k8s.apps.v1.Deployment(
+      `processor`,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: appLabels,
+        },
+        spec: {
+          replicas: 1,
+          selector: { matchLabels: appLabels },
+          template: {
+            metadata: {
+              labels: appLabels,
+            },
+            spec: {
+              containers: [
+                {
+                  name: 'processor',
+                  image: args.joystreamAppsImage,
+                  imagePullPolicy: 'IfNotPresent',
+                  env: [
+                    {
+                      name: 'INDEXER_ENDPOINT_URL',
+                      value: indexerURL,
+                    },
+                    { name: 'TYPEORM_HOST', value: processorDbName },
+                    { name: 'TYPEORM_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'DEBUG', value: 'index-builder:*' },
+                    { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+                    { name: 'DB_PASS', value: DB_PASS },
+                    { name: 'DB_USER', value: DB_USERNAME },
+                    { name: 'DB_PORT', value: DB_PORT },
+                    { name: 'WARTHOG_DB_DATABASE', value: PROCESSOR_DATABASE_NAME },
+                    { name: 'WARTHOG_DB_USERNAME', value: DB_USERNAME },
+                    { name: 'WARTHOG_DB_PASSWORD', value: DB_PASS },
+                    { name: 'WARTHOG_DB_PORT', value: DB_PORT },
+                    // These are note required but must be defined or processor will not startup
+                    { name: 'WARTHOG_APP_HOST', value: 'graphql-server' },
+                    { name: 'WARTHOG_APP_PORT', value: '4002' },
+                  ],
+                  volumeMounts: [
+                    {
+                      mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
+                      name: 'processor-volume',
+                      subPath: 'fileData',
+                    },
+                  ],
+                  args: ['workspace', 'query-node-root', 'processor:start'],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'processor-volume',
+                  configMap: {
+                    name: args.defsConfig,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this, dependsOn: this.service }
+    )
+  }
+}
+
+interface Environment {
+  name: string
+  value: string
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  joystreamAppsImage: pulumi.Output<string>
+  defsConfig: pulumi.Output<string> | undefined
+  externalIndexerUrl: string | undefined
+  env?: Environment[]
+  storage: number
+}

+ 0 - 0
devops/infrastructure/query-node/s3Helpers.ts → devops/kubernetes/query-node/s3Helpers.ts


+ 0 - 0
devops/infrastructure/storage-node/tsconfig.json → devops/kubernetes/query-node/tsconfig.json


+ 5 - 0
devops/kubernetes/storage-node/.gitignore

@@ -0,0 +1,5 @@
+/bin/
+/node_modules/
+kubeconfig*
+package-lock.json
+Pulumi.*.yaml

+ 16 - 12
devops/infrastructure/storage-node/Pulumi.yaml → devops/kubernetes/storage-node/Pulumi.yaml

@@ -1,33 +1,37 @@
-name: eks-cluster
+name: storage-node
 runtime: nodejs
 runtime: nodejs
-description: A Pulumi program to deploy storage node to cloud environment
+description: A Pulumi program to deploy storage node to Kubernetes
 template:
 template:
   config:
   config:
     aws:profile:
     aws:profile:
       default: joystream-user
       default: joystream-user
     aws:region:
     aws:region:
       default: us-east-1
       default: us-east-1
+    isMinikube:
+      description: Whether you are deploying to minikube
+      default: false
     wsProviderEndpointURI:
     wsProviderEndpointURI:
       description: Chain RPC endpoint
       description: Chain RPC endpoint
-      default: 'wss://rome-rpc-endpoint.joystream.org:9944/'
-    isAnonymous:
-      description: Whether you are deploying an anonymous storage node
-      default: true
     isLoadBalancerReady:
     isLoadBalancerReady:
       description: Whether the load balancer service is ready and has been assigned an IP
       description: Whether the load balancer service is ready and has been assigned an IP
       default: false
       default: false
     colossusPort:
     colossusPort:
       description: Port that is exposed for the colossus container
       description: Port that is exposed for the colossus container
-      default: 3000
+      default: 3333
     storage:
     storage:
       description: Amount of storage in gigabytes for ipfs volume
       description: Amount of storage in gigabytes for ipfs volume
       default: 40
       default: 40
-    providerId:
-      description: StorageProviderId assigned to you in working group
     keyFile:
     keyFile:
-      description: Path to JSON key export file to use as the storage provider (role account)
-    publicURL:
-      description: API Public URL to announce
+      description: Key file for the account
     passphrase:
     passphrase:
       description: Optional passphrase to use to decrypt the key-file
       description: Optional passphrase to use to decrypt the key-file
       secret: true
       secret: true
+    colossusImage:
+      description: The colossus image to use for running the storage node
+      default: joystream/colossus:latest
+    queryNodeEndpoint:
+      description: Full URL for Query node endpoint
+    workerId:
+      description: ID of the node operator (distribution working group worker)
+    accountURI:
+      description: Account URI

+ 19 - 14
devops/infrastructure/query-node/README.md → devops/kubernetes/storage-node/README.md

@@ -1,6 +1,6 @@
-# Query Node automated deployment
+# Amazon EKS Cluster: Hello World!
 
 
-Deploys an EKS Kubernetes cluster with query node
+Deploy storage-node to a Kubernetes cluster
 
 
 ## Deploying the App
 ## Deploying the App
 
 
@@ -37,26 +37,29 @@ After cloning this repo, from this working directory, run these commands:
 1. Set the required configuration variables in `Pulumi.<stack>.yaml`
 1. Set the required configuration variables in `Pulumi.<stack>.yaml`
 
 
    ```bash
    ```bash
-   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true
+   $ pulumi config set-all --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
+    --plaintext queryNodeEndpoint='http://graphql-server.query-node-yszsbs2i:8081' \
+    --plaintext keyFile='../../../keyfile.json' --secret passphrase='' \
+    --plaintext accountURI='//Alice' workerId=0 \
+    --plaintext isMinikube=true --plaintext colossusImage='joystream/colossus:latest' \
+    --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user
    ```
    ```
 
 
    If you want to build the stack on AWS set the `isMinikube` config to `false`
    If you want to build the stack on AWS set the `isMinikube` config to `false`
 
 
    ```bash
    ```bash
-   $ puluim config set isMinikube false
+   $ pulumi config set isMinikube false
    ```
    ```
 
 
-1. Create a `.env` file in this directory (`cp ../../../.env ./.env`) and set the database and other variables in it
+   You can also set the `storage` and the `colossusPort` config parameters if required. Check `Pulumi.yaml` file
+   for additional parameters.
 
 
-   Make sure to set `GRAPHQL_SERVER_PORT=4001`
-
-1. Stand up the Kubernetes cluster:
+1. Stand up the EKS cluster:
 
 
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
    new EKS cluster takes between 10-15 minutes.
    new EKS cluster takes between 10-15 minutes.
 
 
-1. Once the stack is up and running, we will modify the Caddy config to get SSL certificate for the load balancer
+1. Once the stack if up and running, we will modify the Caddy config to get SSL certificate for the load balancer
 
 
    Modify the config variable `isLoadBalancerReady`
    Modify the config variable `isLoadBalancerReady`
 
 
@@ -66,10 +69,6 @@ After cloning this repo, from this working directory, run these commands:
 
 
    Run `pulumi up -y` to update the Caddy config
    Run `pulumi up -y` to update the Caddy config
 
 
-1. You can now access the endpoints using `pulumi stack output endpoint1` or `pulumi stack output endpoint2`
-
-   The GraphQl server is accessible at `https://<ENDPOINT>/server/graphql` and indexer at `https://<ENDPOINT>/indexer/graphql`
-
 1. Access the Kubernetes Cluster using `kubectl`
 1. Access the Kubernetes Cluster using `kubectl`
 
 
    To access your new Kubernetes cluster using `kubectl`, we need to set up the
    To access your new Kubernetes cluster using `kubectl`, we need to set up the
@@ -97,6 +96,12 @@ After cloning this repo, from this working directory, run these commands:
    $ kubectl logs <PODNAME> --all-containers
    $ kubectl logs <PODNAME> --all-containers
    ```
    ```
 
 
+   To run a command on a pod
+
+   ```bash
+   $ kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1}
+   ```
+
    To see complete pulumi stack output
    To see complete pulumi stack output
 
 
    ```bash
    ```bash

+ 5 - 0
devops/kubernetes/storage-node/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

Some files were not shown because too many files changed in this diff