Parcourir la source

Merge pull request #2787 from mnaamani/giza_staging_update_from_master

Giza staging update from master
Mokhtar Naamani il y a 3 ans
Parent
commit
a95c6bbaf6

+ 55 - 46
.github/workflows/create-ami.yml

@@ -11,49 +11,58 @@ jobs:
       STACK_NAME: joystream-github-action-${{ github.run_number }}
       KEY_NAME: joystream-github-action-key
     steps:
-    - name: Extract branch name
-      shell: bash
-      run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
-      id: extract_branch
-
-    - name: Set AMI Name environment variable
-      shell: bash
-      run: echo "ami_name=joystream-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
-      id: ami_name
-
-    - name: Checkout
-      uses: actions/checkout@v2
-
-    - name: Configure AWS credentials
-      uses: aws-actions/configure-aws-credentials@v1
-      with:
-        aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
-        aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-        aws-region: us-east-1
-
-    - name: Deploy to AWS CloudFormation
-      uses: aws-actions/aws-cloudformation-github-deploy@v1
-      id: deploy_stack
-      with:
-        name: ${{ env.STACK_NAME }}
-        template: devops/infrastructure/single-instance.yml
-        no-fail-on-empty-changeset: "1"
-        parameter-overrides: "KeyName=${{ env.KEY_NAME }}"
-
-    - name: Install Ansible dependencies
-      run: pipx inject ansible-core boto3 botocore
-
-    - name: Run playbook
-      uses: dawidd6/action-ansible-playbook@v2
-      with:
-        playbook: github-action-playbook.yml
-        directory: devops/infrastructure
-        requirements: requirements.yml
-        key: ${{ secrets.SSH_PRIVATE_KEY }}
-        inventory: |
-          [all]
-          ${{ steps.deploy_stack.outputs.PublicIp }}
-        options: |
-          --extra-vars "git_repo=https://github.com/${{ github.repository }} \
-                        branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
-                        stack_name=${{ env.STACK_NAME }} ami_name=${{ env.ami_name }}"
+      - name: Extract branch name
+        shell: bash
+        run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
+        id: extract_branch
+
+      - name: Set AMI Name environment variable
+        shell: bash
+        run: echo "ami_name=joystream-${{ steps.extract_branch.outputs.branch }}-${{ github.run_number }}" >> $GITHUB_ENV
+        id: ami_name
+
+      - name: Checkout
+        uses: actions/checkout@v2
+
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v1
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: us-east-1
+
+      - name: Deploy to AWS CloudFormation
+        uses: aws-actions/aws-cloudformation-github-deploy@v1
+        id: deploy_stack
+        with:
+          name: ${{ env.STACK_NAME }}
+          template: devops/infrastructure/cloudformation/single-instance.yml
+          no-fail-on-empty-changeset: '1'
+          parameter-overrides: 'KeyName=${{ env.KEY_NAME }}'
+
+      - name: Install Ansible dependencies
+        run: pipx inject ansible-core boto3 botocore
+
+      - name: Run playbook
+        uses: dawidd6/action-ansible-playbook@v2
+        with:
+          playbook: github-action-playbook.yml
+          directory: devops/infrastructure
+          requirements: requirements.yml
+          key: ${{ secrets.SSH_PRIVATE_KEY }}
+          inventory: |
+            [all]
+            ${{ steps.deploy_stack.outputs.PublicIp }}
+          options: |
+            --extra-vars "git_repo=https://github.com/${{ github.repository }} \
+                          branch_name=${{ steps.extract_branch.outputs.branch }} instance_id=${{ steps.deploy_stack.outputs.InstanceId }}
+                          ami_name=${{ env.ami_name }}"
+
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}

+ 11 - 2
.github/workflows/joystream-node-docker.yml

@@ -120,7 +120,7 @@ jobs:
         id: deploy_stack
         with:
           name: ${{ env.STACK_NAME }}
-          template: devops/infrastructure/single-instance-docker.yml
+          template: devops/infrastructure/cloudformation/single-instance-docker.yml
           no-fail-on-empty-changeset: '1'
           parameter-overrides: 'KeyName=${{ env.KEY_NAME }},EC2AMI=ami-00d1ab6b335f217cf,EC2InstanceType=t4g.xlarge'
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
@@ -142,9 +142,18 @@ jobs:
                           docker_password=${{ secrets.DOCKERHUB_PASSWORD }} \
                           tag_name=${{ steps.compute_shasum.outputs.shasum }}-${{ matrix.platform_tag }} \
                           repository=${{ env.REPOSITORY }} dockerfile=${{ matrix.file }} \
-                          stack_name=${{ env.STACK_NAME }} platform=${{ matrix.platform }}"
+                          platform=${{ matrix.platform }}"
         if: ${{ steps.compute_image_exists.outputs.image_exists == 1 }}
 
+      - name: Delete CloudFormation Stack
+        if: always()
+        continue-on-error: true
+        run: |
+          echo "Deleting ${{ env.STACK_NAME }} stack"
+          aws cloudformation delete-stack --stack-name ${{ env.STACK_NAME }}
+          echo "Waiting for ${{ env.STACK_NAME }} to be deleted..."
+          aws cloudformation wait stack-delete-complete --stack-name ${{ env.STACK_NAME }}
+
   push-manifest:
     name: Create manifest using both the arch images
     needs: [push-amd64, push-arm]

+ 34 - 39
devops/infrastructure/build-arm64-playbook.yml

@@ -5,46 +5,41 @@
   hosts: all
 
   tasks:
-    - block:
-        - name: Get code from git repo
-          include_role:
-            name: common
-            tasks_from: get-code-git
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
 
-        - name: Install Docker Module for Python
-          pip:
-            name: docker
+    - name: Install Docker Module for Python
+      pip:
+        name: docker
 
-        - name: Log into DockerHub
-          community.docker.docker_login:
-            username: '{{ docker_username }}'
-            password: '{{ docker_password }}'
+    - name: Log into DockerHub
+      community.docker.docker_login:
+        username: '{{ docker_username }}'
+        password: '{{ docker_password }}'
 
-        - name: Build an image and push it to a private repo
-          community.docker.docker_image:
-            build:
-              path: ./joystream
-              dockerfile: '{{ dockerfile }}'
-              platform: '{{ platform }}'
-            name: '{{ repository }}'
-            tag: '{{ tag_name }}'
-            push: yes
-            source: build
-          async: 7200
-          poll: 0
-          register: build_result
+    - name: Build an image and push it to a private repo
+      community.docker.docker_image:
+        build:
+          path: ./joystream
+          dockerfile: '{{ dockerfile }}'
+          platform: '{{ platform }}'
+        name: '{{ repository }}'
+        tag: '{{ tag_name }}'
+        push: yes
+        source: build
+      # Run in async fashion for max duration of 2 hours
+      async: 7200
+      poll: 0
+      register: build_result
 
-        - name: Check on build async task
-          async_status:
-            jid: '{{ build_result.ansible_job_id }}'
-          register: job_result
-          until: job_result.finished
-          retries: 72
-          delay: 100
-
-      always:
-        - name: Delete the stack
-          amazon.aws.cloudformation:
-            stack_name: '{{ stack_name }}'
-            state: 'absent'
-          delegate_to: localhost
+    - name: Check on build async task
+      async_status:
+        jid: '{{ build_result.ansible_job_id }}'
+      register: job_result
+      until: job_result.finished
+      # Max number of times to check for status
+      retries: 72
+      # Check for the status every 100s
+      delay: 100

+ 6 - 1
devops/infrastructure/infrastructure.yml → devops/infrastructure/cloudformation/infrastructure.yml

@@ -73,6 +73,10 @@ Resources:
           FromPort: 443
           ToPort: 443
           CidrIp: 0.0.0.0/0
+        - IpProtocol: tcp
+          FromPort: 80
+          ToPort: 80
+          CidrIp: 0.0.0.0/0
         - IpProtocol: tcp
           FromPort: 22
           ToPort: 22
@@ -112,7 +116,8 @@ Resources:
             # Update all packages
             apt-get update -y
 
-            # Install the updates
+            # Install the updates except docker, to avoid interactive prompt which blocks the flow of the script
+            apt-mark hold docker.io
             apt-get upgrade -y
 
             # Get latest cfn scripts and install them;

+ 0 - 0
devops/infrastructure/single-instance-docker.yml → devops/infrastructure/cloudformation/single-instance-docker.yml


+ 0 - 0
devops/infrastructure/single-instance.yml → devops/infrastructure/cloudformation/single-instance.yml


+ 4 - 0
devops/infrastructure/deploy-config.sample.cfg

@@ -36,6 +36,10 @@ CHAIN_SPEC_FILE="https://github.com/Joystream/joystream/releases/download/v9.3.0
 LOCAL_CODE_PATH="~/Joystream/joystream"
 NETWORK_SUFFIX=7891
 
+DEPLOYMENT_TYPE=live
+INITIAL_MEMBERS_PATH=""
+INITIAL_BALANCES_PATH=""
+
 GIT_REPO="https://github.com/Joystream/joystream.git"
 BRANCH_NAME=sumer
 

+ 3 - 2
devops/infrastructure/deploy-infra.sh

@@ -29,7 +29,7 @@ aws cloudformation deploy \
   --region $REGION \
   --profile $CLI_PROFILE \
   --stack-name $NEW_STACK_NAME \
-  --template-file infrastructure.yml \
+  --template-file cloudformation/infrastructure.yml \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \
@@ -84,7 +84,8 @@ if [ $? -eq 0 ]; then
   echo -e "\n\n=========== Configure and start new validators, rpc node and pioneer ==========="
   ansible-playbook -i $INVENTORY_PATH --private-key $KEY_PATH chain-spec-pioneer.yml \
     --extra-vars "local_dir=$LOCAL_CODE_PATH network_suffix=$NETWORK_SUFFIX
-                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS"
+                  data_path=data-$NEW_STACK_NAME bucket_name=$BUCKET_NAME number_of_validators=$NUMBER_OF_VALIDATORS
+                  deployment_type=$DEPLOYMENT_TYPE initial_balances_file=$INITIAL_BALANCES_PATH initial_members_file=$INITIAL_MEMBERS_PATH"
 
   echo -e "\n\n Pioneer URL: https://$DOMAIN_NAME"
 fi

+ 2 - 2
devops/infrastructure/deploy-single-node.sh

@@ -23,13 +23,13 @@ if [ ! -f "$KEY_PATH" ]; then
     exit 1
 fi
 
-# # Deploy the CloudFormation template
+# Deploy the CloudFormation template
 echo -e "\n\n=========== Deploying single node ==========="
 aws cloudformation deploy \
   --region $REGION \
   --profile $CLI_PROFILE \
   --stack-name $SINGLE_NODE_STACK_NAME \
-  --template-file single-instance.yml \
+  --template-file cloudformation/single-instance.yml \
   --no-fail-on-empty-changeset \
   --capabilities CAPABILITY_NAMED_IAM \
   --parameter-overrides \

+ 28 - 34
devops/infrastructure/github-action-playbook.yml

@@ -5,41 +5,35 @@
   hosts: all
 
   tasks:
-    - block:
-      - name: Get code from git repo
-        include_role:
-          name: common
-          tasks_from: get-code-git
+    - name: Get code from git repo
+      include_role:
+        name: common
+        tasks_from: get-code-git
 
-      - name: Run setup and build
-        include_role:
-          name: common
-          tasks_from: run-setup-build
+    - name: Run setup and build
+      include_role:
+        name: common
+        tasks_from: run-setup-build
 
-      - name: Install subkey
-        include_role:
-          name: admin
-          tasks_from: main
+    - name: Install subkey
+      include_role:
+        name: admin
+        tasks_from: main
 
-      - name: Basic AMI Creation
-        amazon.aws.ec2_ami:
-          instance_id: "{{ instance_id }}"
-          wait: yes
-          name: "{{ ami_name }}"
-          launch_permissions:
-            group_names: ['all']
-          tags:
-            Name: "{{ ami_name }}"
-        register: ami_data
-        delegate_to: localhost
+    - name: Basic AMI Creation
+      amazon.aws.ec2_ami:
+        instance_id: '{{ instance_id }}'
+        wait: yes
+        # How long before wait gives up, in seconds
+        wait_timeout: 3600
+        name: '{{ ami_name }}'
+        launch_permissions:
+          group_names: ['all']
+        tags:
+          Name: '{{ ami_name }}'
+      register: ami_data
+      delegate_to: localhost
 
-      - name: Print AMI ID
-        debug:
-          msg: "AMI ID is: {{ ami_data.image_id }}"
-
-      always:
-      - name: Delete the stack
-        amazon.aws.cloudformation:
-          stack_name: "{{ stack_name }}"
-          state: "absent"
-        delegate_to: localhost
+    - name: Print AMI ID
+      debug:
+        msg: 'AMI ID is: {{ ami_data.image_id }}'

+ 3 - 0
devops/infrastructure/query-node/Pulumi.yaml

@@ -17,3 +17,6 @@ template:
       description: Path to members.json file for processor initialization
     workersFilePath:
       description: Path to workers.json file for processor initialization
+    indexerURL:
+      description: URL for the indexer endpoint
+      default: 'http://query-node:4000/graphql'

+ 2 - 1
devops/infrastructure/query-node/README.md

@@ -38,7 +38,8 @@ After cloning this repo, from this working directory, run these commands:
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true
+    --plaintext workersFilePath=<PATH> --plaintext membersFilePath=<PATH> --plaintext isMinikube=true \
+    --plaintext indexerURL=<URL>
    ```
 
    If you want to build the stack on AWS set the `isMinikube` config to `false`

+ 92 - 57
devops/infrastructure/query-node/index.ts

@@ -63,7 +63,6 @@ const resourceOptions = { provider: provider }
 const name = 'query-node'
 
 // Create a Kubernetes Namespace
-// const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
 const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
 
 // Export the Namespace name
@@ -289,6 +288,38 @@ const defsConfig = new configMapFromFile(
   resourceOptions
 ).configName
 
+const indexerContainer = []
+
+const existingIndexer = config.get('indexerURL')
+
+if (!existingIndexer) {
+  indexerContainer.push({
+    name: 'indexer',
+    image: 'joystream/hydra-indexer:2.1.0-beta.9',
+    env: [
+      { name: 'DB_HOST', value: 'postgres-db' },
+      { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
+      { name: 'DB_PASS', value: process.env.DB_PASS! },
+      { name: 'INDEXER_WORKERS', value: '5' },
+      { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
+      { name: 'DEBUG', value: 'index-builder:*' },
+      { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
+      { name: 'TYPES_JSON', value: 'types.json' },
+      { name: 'PGUSER', value: process.env.DB_USER! },
+      { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
+    ],
+    volumeMounts: [
+      {
+        mountPath: '/home/hydra/packages/hydra-indexer/types.json',
+        name: 'indexer-volume',
+        subPath: 'fileData',
+      },
+    ],
+    command: ['/bin/sh', '-c'],
+    args: ['yarn db:bootstrap && yarn start:prod'],
+  })
+}
+
 const deployment = new k8s.apps.v1.Deployment(
   name,
   {
@@ -310,31 +341,7 @@ const deployment = new k8s.apps.v1.Deployment(
               image: 'redis:6.0-alpine',
               ports: [{ containerPort: 6379 }],
             },
-            {
-              name: 'indexer',
-              image: 'joystream/hydra-indexer:2.1.0-beta.9',
-              env: [
-                { name: 'DB_HOST', value: 'postgres-db' },
-                { name: 'DB_NAME', value: process.env.INDEXER_DB_NAME! },
-                { name: 'DB_PASS', value: process.env.DB_PASS! },
-                { name: 'INDEXER_WORKERS', value: '5' },
-                { name: 'REDIS_URI', value: 'redis://localhost:6379/0' },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'WS_PROVIDER_ENDPOINT_URI', value: process.env.WS_PROVIDER_ENDPOINT_URI! },
-                { name: 'TYPES_JSON', value: 'types.json' },
-                { name: 'PGUSER', value: process.env.DB_USER! },
-                { name: 'BLOCK_HEIGHT', value: process.env.BLOCK_HEIGHT! },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/home/hydra/packages/hydra-indexer/types.json',
-                  name: 'indexer-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['yarn db:bootstrap && yarn start:prod'],
-            },
+            ...indexerContainer,
             {
               name: 'hydra-indexer-gateway',
               image: 'joystream/hydra-indexer-gateway:2.1.0-beta.5',
@@ -351,30 +358,6 @@ const deployment = new k8s.apps.v1.Deployment(
               ],
               ports: [{ containerPort: 4002 }],
             },
-            {
-              name: 'processor',
-              image: joystreamAppsImage,
-              imagePullPolicy: 'IfNotPresent',
-              env: [
-                {
-                  name: 'INDEXER_ENDPOINT_URL',
-                  value: `http://localhost:${process.env.WARTHOG_APP_PORT}/graphql`,
-                },
-                { name: 'TYPEORM_HOST', value: 'postgres-db' },
-                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
-                { name: 'DEBUG', value: 'index-builder:*' },
-                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
-              ],
-              volumeMounts: [
-                {
-                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
-                  name: 'processor-volume',
-                  subPath: 'fileData',
-                },
-              ],
-              command: ['/bin/sh', '-c'],
-              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
-            },
             {
               name: 'graphql-server',
               image: joystreamAppsImage,
@@ -393,12 +376,6 @@ const deployment = new k8s.apps.v1.Deployment(
             },
           ],
           volumes: [
-            {
-              name: 'processor-volume',
-              configMap: {
-                name: defsConfig,
-              },
-            },
             {
               name: 'indexer-volume',
               configMap: {
@@ -436,9 +413,67 @@ const service = new k8s.core.v1.Service(
   resourceOptions
 )
 
-// Export the Service name and public LoadBalancer Endpoint
+// Export the Service name
 export const serviceName = service.metadata.name
 
+const indexerURL = config.get('indexerURL') || `http://query-node:4000/graphql`
+
+const processorDeployment = new k8s.apps.v1.Deployment(
+  `processor`,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'processor',
+              image: joystreamAppsImage,
+              imagePullPolicy: 'IfNotPresent',
+              env: [
+                {
+                  name: 'INDEXER_ENDPOINT_URL',
+                  value: indexerURL,
+                },
+                { name: 'TYPEORM_HOST', value: 'postgres-db' },
+                { name: 'TYPEORM_DATABASE', value: process.env.DB_NAME! },
+                { name: 'DEBUG', value: 'index-builder:*' },
+                { name: 'PROCESSOR_POLL_INTERVAL', value: '1000' },
+              ],
+              volumeMounts: [
+                {
+                  mountPath: '/joystream/query-node/mappings/lib/generated/types/typedefs.json',
+                  name: 'processor-volume',
+                  subPath: 'fileData',
+                },
+              ],
+              command: ['/bin/sh', '-c'],
+              args: ['cd query-node && yarn hydra-processor run -e ../.env'],
+            },
+          ],
+          volumes: [
+            {
+              name: 'processor-volume',
+              configMap: {
+                name: defsConfig,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  { ...resourceOptions, dependsOn: deployment }
+)
+
 const caddyEndpoints = [
   `/indexer/* {
     uri strip_prefix /indexer

+ 3 - 0
devops/infrastructure/roles/admin/tasks/main.yml

@@ -16,6 +16,7 @@
 
 - name: Install subkey
   shell: cargo install --force subkey --git https://github.com/paritytech/substrate --version 2.0.1 --locked
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   poll: 0
   register: install_result
@@ -25,5 +26,7 @@
     jid: '{{ install_result.ansible_job_id }}'
   register: job_result
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
+  # Check for the status every 100s
   delay: 100

+ 42 - 22
devops/infrastructure/roles/common/tasks/chain-spec-node-keys.yml

@@ -3,25 +3,45 @@
 
 - name: Debug to test variable
   debug:
-    msg: "Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}"
+    msg: 'Data path: {{ data_path }}, Chain Spec path: {{ chain_spec_path }}'
   run_once: true
 
-- name: Run chain-spec-builder to generate chainspec.json file
-  command: "{{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }} --chain-spec-path {{ chain_spec_path }} --deployment live --endowed 1 --keystore-path {{ data_path }}"
+- name: Copying initial members file to the server
+  copy:
+    src: '{{ initial_members_file }}'
+    dest: '{{ admin_code_dir }}/initial-members.json'
+  when: initial_members_file is defined and initial_members_file|length > 0
+  run_once: true
+
+- name: Copying initial balances file to the server
+  copy:
+    src: '{{ initial_balances_file }}'
+    dest: '{{ admin_code_dir }}/initial-balances.json'
+  when: initial_balances_file is defined and initial_balances_file|length > 0
+  run_once: true
+
+- name: Run chain-spec-builder to generate chainspec.json file (with initial data)
+  shell: >
+    {{ admin_code_dir }}/target/release/chain-spec-builder generate -a {{ number_of_validators }}
+    --chain-spec-path {{ chain_spec_path }}
+    --endowed 1 --keystore-path {{ data_path }}
+    {% if deployment_type is defined and deployment_type|length > 0 %}--deployment {{ deployment_type }}{% endif %}
+    {% if initial_members_file is defined and initial_members_file|length > 0 %}--initial-balances-path {{ admin_code_dir }}/initial-balances.json{% endif %}
+    {% if initial_balances_file is defined and initial_balances_file|length > 0 %}--initial-members-path {{ admin_code_dir }}/initial-members.json{% endif %}
   register: chain_spec_output
-  delegate_to: "{{ local_or_admin }}"
+  delegate_to: '{{ local_or_admin }}'
   run_once: true
 
 - name: Run subkey to generate node keys
   shell: subkey generate-node-key
-  delegate_to: "{{ local_or_admin }}"
+  delegate_to: '{{ local_or_admin }}'
   register: subkey_output
 
 - name: Print to stdout
   debug:
     msg:
-    - "Public Key: {{ subkey_output.stderr }}"
-    - "Private Key: {{ subkey_output.stdout }}"
+      - 'Public Key: {{ subkey_output.stderr }}'
+      - 'Private Key: {{ subkey_output.stdout }}'
 
 - name: Print to stdout chain spec
   debug: var=chain_spec_output.stdout
@@ -30,16 +50,16 @@
 - name: Save output of chain spec to local file
   copy:
     content: '{{ chain_spec_output.stdout | regex_replace("\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]", "") }}'
-    dest: "{{ data_path }}/chain_spec_output.txt"
-  delegate_to: "{{ local_or_admin }}"
+    dest: '{{ data_path }}/chain_spec_output.txt'
+  delegate_to: '{{ local_or_admin }}'
   run_once: true
 
 - name: Change chain spec name, id, protocolId
   json_modify:
-    chain_spec_path: "{{ chain_spec_path }}"
-    prefix: "{{ network_suffix }}"
-    all_nodes: "{{ hostvars }}"
-  delegate_to: "{{ local_or_admin }}"
+    chain_spec_path: '{{ chain_spec_path }}'
+    prefix: '{{ network_suffix }}'
+    all_nodes: '{{ hostvars }}'
+  delegate_to: '{{ local_or_admin }}'
   register: result
   run_once: true
 
@@ -49,28 +69,28 @@
   run_once: true
 
 - name: Run build-spec to generate raw chainspec file
-  shell: "{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}"
-  delegate_to: "{{ local_or_admin }}"
+  shell: '{{ admin_code_dir }}/target/release/joystream-node build-spec --chain {{ chain_spec_path }} --raw > {{ raw_chain_spec_path }}'
+  delegate_to: '{{ local_or_admin }}'
   run_once: true
 
 - name: Copying chain spec files to localhost
   synchronize:
-    src: "/home/ubuntu/{{ data_path }}/"
-    dest: "{{ data_path }}"
+    src: '/home/ubuntu/{{ data_path }}/'
+    dest: '{{ data_path }}'
     mode: pull
   run_once: true
   when: run_on_admin_server|bool
 
 - name: Copy joystream-node binary to localhost
   fetch:
-    src: "{{ admin_code_dir }}/target/release/joystream-node"
-    dest: "{{ data_path }}/joystream-node"
+    src: '{{ admin_code_dir }}/target/release/joystream-node'
+    dest: '{{ data_path }}/joystream-node'
     flat: yes
-  delegate_to: "{{ local_or_admin }}"
+  delegate_to: '{{ local_or_admin }}'
   run_once: true
   when: run_on_admin_server|bool
 
 - name: Copying raw chain spec file to all servers
   copy:
-    src: "{{ raw_chain_spec_path }}"
-    dest: "{{ remote_chain_spec_path }}"
+    src: '{{ raw_chain_spec_path }}'
+    dest: '{{ remote_chain_spec_path }}'

+ 7 - 4
devops/infrastructure/roles/common/tasks/run-setup-build.yml

@@ -2,25 +2,28 @@
 # Run setup and build code
 
 - name: Creat bash profile file
-  command: "touch /home/ubuntu/.bash_profile"
+  command: 'touch /home/ubuntu/.bash_profile'
 
 - name: Run setup script
   command: ./setup.sh
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
 
 - name: Build joystream node
   shell: . ~/.bash_profile && yarn cargo-build
   args:
-    chdir: "{{ remote_code_path }}"
+    chdir: '{{ remote_code_path }}'
+  # Run in async fashion for max duration of 1 hr
   async: 3600
   poll: 0
   register: build_result
 
 - name: Check on build async task
   async_status:
-    jid: "{{ build_result.ansible_job_id }}"
+    jid: '{{ build_result.ansible_job_id }}'
   register: job_result
   until: job_result.finished
+  # Max number of times to check for status
   retries: 36
+  # Check for the status every 100s
   delay: 100

+ 2 - 2
devops/infrastructure/storage-node/README.md

@@ -39,14 +39,14 @@ After cloning this repo, from this working directory, run these commands:
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
     --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
-    --plaintext isAnonymous=true
+    --plaintext isMinikube=true --plaintext isAnonymous=true
    ```
 
    If running for production use the below mentioned config
 
    ```bash
    $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
-    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false \
+    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' --plaintext isAnonymous=false --plaintext isMinikube=false \
     --plaintext providerId=<ID> --plaintext keyFile=<PATH> --plaintext publicURL=<DOMAIN> --secret passphrase=<PASSPHRASE>
    ```
 

+ 56 - 28
devops/infrastructure/storage-node/index.ts

@@ -1,6 +1,7 @@
 import * as awsx from '@pulumi/awsx'
 import * as aws from '@pulumi/aws'
 import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
 import * as k8s from '@pulumi/kubernetes'
 import * as pulumi from '@pulumi/pulumi'
 import { CaddyServiceDeployment } from 'pulumi-common'
@@ -15,37 +16,57 @@ const lbReady = config.get('isLoadBalancerReady') === 'true'
 const name = 'storage-node'
 const colossusPort = parseInt(config.get('colossusPort') || '3000')
 const storage = parseInt(config.get('storage') || '40')
+const isMinikube = config.getBoolean('isMinikube')
 
 let additionalParams: string[] | pulumi.Input<string>[] = []
 let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
 let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
 
-// Create a VPC for our cluster.
-const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+export let kubeconfig: pulumi.Output<any>
+export let colossusImage: pulumi.Output<string>
+let provider: k8s.Provider
 
-// Create an EKS cluster with the default configuration.
-const cluster = new eks.Cluster('eksctl-storage-node', {
-  vpcId: vpc.id,
-  subnetIds: vpc.publicSubnetIds,
-  instanceType: 't2.medium',
-  providerCredentialOpts: {
-    profileName: awsConfig.get('profile'),
-  },
-})
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+  // Create image from local app
+  colossusImage = new docker.Image('joystream/colossus', {
+    build: {
+      context: '../../../',
+      dockerfile: '../../../colossus.Dockerfile',
+    },
+    imageName: 'joystream/colossus:latest',
+    skipPush: true,
+  }).baseImageName
+  // colossusImage = pulumi.interpolate`joystream/colossus:latest`
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('storage-node-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
 
-// Export the cluster's kubeconfig.
-export const kubeconfig = cluster.kubeconfig
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-storage-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
 
-// Create a repository
-const repo = new awsx.ecr.Repository('colossus-image')
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
 
-// Build an image and publish it to our ECR repository.
-export const colossusImage = repo.buildAndPushImage({
-  dockerfile: '../../../colossus.Dockerfile',
-  context: '../../../',
-})
+  // Create a repository
+  const repo = new awsx.ecr.Repository('colossus-image')
 
-const resourceOptions = { provider: cluster.provider }
+  // Build an image and publish it to our ECR repository.
+  colossusImage = repo.buildAndPushImage({
+    dockerfile: '../../../colossus.Dockerfile',
+    context: '../../../',
+  })
+}
+
+const resourceOptions = { provider: provider }
 
 // Create a Kubernetes Namespace
 const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
@@ -88,14 +109,19 @@ const caddyEndpoints = [
 }`,
 ]
 
-const caddy = new CaddyServiceDeployment(
-  'caddy-proxy',
-  { lbReady, namespaceName: namespaceName, caddyEndpoints },
-  resourceOptions
-)
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
 
-export const endpoint1 = caddy.primaryEndpoint
-export const endpoint2 = caddy.secondaryEndpoint
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}
 
 export let appLink: pulumi.Output<string>
 
@@ -180,6 +206,7 @@ const deployment = new k8s.apps.v1.Deployment(
             {
               name: 'colossus',
               image: colossusImage,
+              imagePullPolicy: 'IfNotPresent',
               env: [
                 {
                   name: 'WS_PROVIDER_ENDPOINT_URI',
@@ -222,6 +249,7 @@ const service = new k8s.core.v1.Service(
       name: 'storage-node',
     },
     spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
       ports: [{ name: 'port-1', port: colossusPort }],
       selector: appLabels,
     },

+ 1 - 0
devops/infrastructure/storage-node/package.json

@@ -9,6 +9,7 @@
     "@pulumi/eks": "^0.31.0",
     "@pulumi/kubernetes": "^3.0.0",
     "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
     "pulumi-common": "file:../pulumi-common"
   }
 }