Browse Source

Update README, remove custom containers

Anuj Bansal 3 years ago
parent
commit
67d258b573

+ 0 - 2
colossus.Dockerfile

@@ -11,8 +11,6 @@ RUN yarn --frozen-lockfile
 RUN yarn workspace @joystream/types build
 RUN yarn workspace storage-node build
 
-# ENV WS_PROVIDER_ENDPOINT_URI=ws://host.docker.internal:9944/
-
 RUN yarn
 
 ENTRYPOINT yarn colossus --dev --ws-provider $WS_PROVIDER_ENDPOINT_URI

+ 24 - 149
devops/infrastructure/kubernetes/README.md

@@ -36,160 +36,17 @@ After cloning this repo, from this working directory, run these commands:
 
 1. Set the required AWS configuration variables in `Pulumi.<stack>.yaml`
 
-1. Set `WS_PROVIDER_ENDPOINT_URI` environment variable. Example `export WS_PROVIDER_ENDPOINT_URI='wss://18.209.241.63.nip.io/'`
+1. Set `WS_PROVIDER_ENDPOINT_URI` environment variable.
+
+   ```bash
+   $ export WS_PROVIDER_ENDPOINT_URI='wss://18.209.241.63.nip.io/'
+   ```
 
 1. Stand up the EKS cluster:
 
    Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
    new EKS cluster takes between 10-15 minutes.
 
-   ```bash
-   $ pulumi update
-   Previewing update (eks-demo):
-
-       Type                                          Name                              	Plan
-   +   pulumi:pulumi:Stack                           eks-hello-world-eks-demo     			create
-   +   ├─ eks:index:Cluster                          helloworld                          	create
-   +   │  ├─ eks:index:ServiceRole                   helloworld-eksRole                  	create
-   +   │  │  ├─ aws:iam:Role                         helloworld-eksRole-role             	create
-   +   │  │  ├─ aws:iam:RolePolicyAttachment         helloworld-eksRole-90eb1c99         	create
-   +   │  │  └─ aws:iam:RolePolicyAttachment         helloworld-eksRole-4b490823         	create
-   +   │  ├─ eks:index:ServiceRole                   helloworld-instanceRole             	create
-   +   │  │  ├─ aws:iam:Role                         helloworld-instanceRole-role        	create
-   +   │  │  ├─ aws:iam:RolePolicyAttachment         helloworld-instanceRole-03516f97    	create
-   +   │  │  ├─ aws:iam:RolePolicyAttachment         helloworld-instanceRole-e1b295bd    	create
-   +   │  │  └─ aws:iam:RolePolicyAttachment         helloworld-instanceRole-3eb088f2    	create
-   +   │  ├─ pulumi-nodejs:dynamic:Resource          helloworld-cfnStackName             	create
-   +   │  ├─ aws:ec2:SecurityGroup                   helloworld-eksClusterSecurityGroup  	create
-   +   │  ├─ aws:iam:InstanceProfile                 helloworld-instanceProfile          	create
-   +   │  ├─ aws:eks:Cluster                         helloworld-eksCluster               	create
-   +   │  ├─ pulumi-nodejs:dynamic:Resource          helloworld-vpc-cni                  	create
-   +   │  ├─ pulumi:providers:kubernetes             helloworld-eks-k8s                  	create
-   +   │  ├─ aws:ec2:SecurityGroup                   helloworld-nodeSecurityGroup        	create
-   +   │  ├─ kubernetes:core:ConfigMap               helloworld-nodeAccess               	create
-   +   │  ├─ kubernetes:storage.k8s.io:StorageClass  helloworld-gp2                      	create
-   +   │  ├─ aws:ec2:SecurityGroupRule               helloworld-eksClusterIngressRule    	create
-   +   │  ├─ aws:ec2:LaunchConfiguration             helloworld-nodeLaunchConfiguration  	create
-   +   │  ├─ aws:cloudformation:Stack                helloworld-nodes                    	create
-   +   │  └─ pulumi:providers:kubernetes             helloworld-provider                 	create
-   +   └─ aws-infra:network:Network                  vpc                               	create
-   +      ├─ aws:ec2:Vpc                             vpc                               	create
-   +      ├─ aws:ec2:Eip                             vpc-nat-0                         	create
-   +      ├─ aws:ec2:Eip                             vpc-nat-1                         	create
-   +      ├─ aws:ec2:InternetGateway                 vpc                               	create
-   +      ├─ aws:ec2:Subnet                          vpc-nat-1                         	create
-   +      ├─ aws:ec2:Subnet                          vpc-0                             	create
-   +      ├─ aws:ec2:Subnet                          vpc-nat-0                         	create
-   +      ├─ aws:ec2:Subnet                          vpc-1                             	create
-   +      ├─ aws:ec2:RouteTable                      vpc                               	create
-   +      ├─ aws:ec2:NatGateway                      vpc-nat-1                         	create
-   +      ├─ aws:ec2:RouteTableAssociation           vpc-nat-1                         	create
-   +      ├─ aws:ec2:NatGateway                      vpc-nat-0                         	create
-   +      ├─ aws:ec2:RouteTableAssociation           vpc-nat-0                         	create
-   +      ├─ aws:ec2:RouteTable                      vpc-nat-1                         	create
-   +      ├─ aws:ec2:RouteTable                      vpc-nat-0                         	create
-   +      ├─ aws:ec2:RouteTableAssociation           vpc-1                             	create
-   +      └─ aws:ec2:RouteTableAssociation           vpc-0                             	create
-
-   Resources:
-       + 42 to create
-
-   clusterng (eks-demo):
-
-       Type                                          Name                              	Status      Info
-   +   pulumi:pulumi:Stack                           eks-hello-world-eks-demo     			created
-   +   ├─ eks:index:Cluster                          helloworld                          	created
-   +   │  ├─ eks:index:ServiceRole                   helloworld-eksRole                  	created
-   +   │  │  ├─ aws:iam:Role                         helloworld-eksRole-role             	created
-   +   │  │  ├─ aws:iam:RolePolicyAttachment         helloworld-eksRole-90eb1c99         	created
-   +   │  │  └─ aws:iam:RolePolicyAttachment         helloworld-eksRole-4b490823         	created
-   +   │  ├─ eks:index:ServiceRole                   helloworld-instanceRole             	created
-   +   │  │  ├─ aws:iam:Role                         helloworld-instanceRole-role        	created
-   +   │  │  ├─ aws:iam:RolePolicyAttachment         helloworld-instanceRole-3eb088f2    	created
-   +   │  │  ├─ aws:iam:RolePolicyAttachment         helloworld-instanceRole-03516f97    	created
-   +   │  │  └─ aws:iam:RolePolicyAttachment         helloworld-instanceRole-e1b295bd    	created
-   +   │  ├─ pulumi-nodejs:dynamic:Resource          helloworld-cfnStackName             	created
-   +   │  ├─ aws:iam:InstanceProfile                 helloworld-instanceProfile          	created
-   +   │  ├─ aws:ec2:SecurityGroup                   helloworld-eksClusterSecurityGroup  	created
-   +   │  ├─ aws:eks:Cluster                         helloworld-eksCluster               	created
-   +   │  ├─ pulumi:providers:kubernetes             helloworld-eks-k8s                  	created
-   +   │  ├─ pulumi-nodejs:dynamic:Resource          helloworld-vpc-cni                  	created
-   +   │  ├─ aws:ec2:SecurityGroup                   helloworld-nodeSecurityGroup        	created
-   +   │  ├─ kubernetes:core:ConfigMap               helloworld-nodeAccess               	created
-   +   │  ├─ kubernetes:storage.k8s.io:StorageClass  helloworld-gp2                      	created
-   +   │  ├─ aws:ec2:SecurityGroupRule               helloworld-eksClusterIngressRule    	created
-   +   │  ├─ aws:ec2:LaunchConfiguration             helloworld-nodeLaunchConfiguration  	created
-   +   │  ├─ aws:cloudformation:Stack                helloworld-nodes                    	created
-   +   │  └─ pulumi:providers:kubernetes             helloworld-provider                 	created
-   +   └─ aws-infra:network:Network                  vpc                               	created
-   +      ├─ aws:ec2:Vpc                             vpc                               	created
-   +      ├─ aws:ec2:Eip                             vpc-nat-0                         	created
-   +      ├─ aws:ec2:Eip                             vpc-nat-1                         	created
-   +      ├─ aws:ec2:InternetGateway                 vpc                               	created
-   +      ├─ aws:ec2:Subnet                          vpc-nat-1                         	created
-   +      ├─ aws:ec2:Subnet                          vpc-0                             	created
-   +      ├─ aws:ec2:Subnet                          vpc-nat-0                         	created
-   +      ├─ aws:ec2:Subnet                          vpc-1                             	created
-   +      ├─ aws:ec2:RouteTable                      vpc                               	created
-   +      ├─ aws:ec2:NatGateway                      vpc-nat-1                         	created
-   +      ├─ aws:ec2:NatGateway                      vpc-nat-0                         	created
-   +      ├─ aws:ec2:RouteTableAssociation           vpc-nat-0                         	created
-   +      ├─ aws:ec2:RouteTableAssociation           vpc-nat-1                         	created
-   +      ├─ aws:ec2:RouteTable                      vpc-nat-1                         	created
-   +      ├─ aws:ec2:RouteTableAssociation           vpc-1                             	created
-   +      ├─ aws:ec2:RouteTable                      vpc-nat-0                         	created
-   +      └─ aws:ec2:RouteTableAssociation           vpc-0                             	created
-
-   Diagnostics:
-   pulumi:pulumi:Stack (eks-hello-world-eks-demo):
-
-   Outputs:
-       kubeconfig: {
-           apiVersion     : "v1"
-           clusters       : [
-               [0]: {
-                   cluster: {
-                       certificate-authority-data: "<CERT_DATA>"
-                       server                    : "https://<SERVER_ADDR>.us-west-2.eks.amazonaws.com"
-                   }
-                   name   : "kubernetes"
-               }
-           ]
-           contexts       : [
-               [0]: {
-                   context: {
-                       cluster: "kubernetes"
-                       user   : "aws"
-                   }
-                   name   : "aws"
-               }
-           ]
-           current-context: "aws"
-           kind           : "Config"
-           users          : [
-               [0]: {
-                   name: "aws"
-                   user: {
-                       exec: {
-                           apiVersion: "client.authentication.k8s.io/v1alpha1"
-                           args      : [
-                               [0]: "token"
-                               [1]: "-i"
-                               [2]: "helloworld-eksCluster-e9e1711"
-                           ]
-                           command   : "aws-iam-authenticator"
-                       }
-                   }
-               }
-           ]
-       }
-
-   Resources:
-       + 42 created
-
-   Duration: 13m7s
-   ```
-
 1. Access the Kubernetes Cluster using `kubectl`
 
    To access your new Kubernetes cluster using `kubectl`, we need to set up the
@@ -209,7 +66,25 @@ After cloning this repo, from this working directory, run these commands:
    $ kubectl get service $(pulumi stack output serviceName) --namespace=$(pulumi stack output namespaceName)
    ```
 
-   By deploying the NGINX image in this way, it is outside of Pulumi's control. But this is simply to show that we can control our cluster via the CLI as well.
+   To get logs
+
+   ```bash
+   $ kubectl config set-context --current --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get pods
+   $ kubectl logs <PODNAME> --all-containers
+   ```
+
+   To see complete pulumi stack output
+
+   ```bash
+   $ pulumi stack output
+   ```
+
+   To execute a command
+
+   ```bash
+   $ kubectl exec --stdin --tty <PODNAME> -c colossus -- /bin/bash
+   ```
 
 1. Once you've finished experimenting, tear down your stack's resources by destroying and removing it:
 

+ 0 - 7
devops/infrastructure/kubernetes/app/Dockerfile

@@ -1,7 +0,0 @@
-FROM ipfs/go-ipfs:latest
-
-EXPOSE 8080 5001
-
-# New startup script
-ADD container_daemon /usr/local/bin/start_ipfs
-RUN chmod 0755 /usr/local/bin/start_ipfs

+ 0 - 58
devops/infrastructure/kubernetes/app/container_daemon

@@ -1,58 +0,0 @@
-#!/bin/sh
-set -e
-user=ipfs
-repo="$IPFS_PATH"
-
-if [ `id -u` -eq 0 ]; then
-  echo "Changing user to $user"
-  # ensure folder is writable
-  su-exec "$user" test -w "$repo" || chown -R -- "$user" "$repo"
-  # restart script with new privileges
-  exec su-exec "$user" "$0" "$@"
-fi
-
-# 2nd invocation with regular user
-ipfs version
-
-if [ -e "$repo/config" ]; then
-  echo "Found IPFS fs-repo at $repo"
-else
-  case "$IPFS_PROFILE" in
-    "") INIT_ARGS="" ;;
-    *) INIT_ARGS="--profile=$IPFS_PROFILE" ;;
-  esac
-  ipfs init $INIT_ARGS
-  ipfs config Addresses.API /ip4/0.0.0.0/tcp/5001
-  ipfs config Addresses.Gateway /ip4/0.0.0.0/tcp/8080
-  ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin '["*"]'
-  ipfs config --json API.HTTPHeaders.Access-Control-Allow-Methods '["PUT", "POST"]'
-
-  # Set up the swarm key, if provided
-
-  SWARM_KEY_FILE="$repo/swarm.key"
-  SWARM_KEY_PERM=0400
-
-  # Create a swarm key from a given environment variable
-  if [ ! -z "$IPFS_SWARM_KEY" ] ; then
-    echo "Copying swarm key from variable..."
-    echo -e "$IPFS_SWARM_KEY" >"$SWARM_KEY_FILE" || exit 1
-    chmod $SWARM_KEY_PERM "$SWARM_KEY_FILE"
-  fi
-
-  # Unset the swarm key variable
-  unset IPFS_SWARM_KEY
-
-  # Check during initialization if a swarm key was provided and
-  # copy it to the ipfs directory with the right permissions
-  # WARNING: This will replace the swarm key if it exists
-  if [ ! -z "$IPFS_SWARM_KEY_FILE" ] ; then
-    echo "Copying swarm key from file..."
-    install -m $SWARM_KEY_PERM "$IPFS_SWARM_KEY_FILE" "$SWARM_KEY_FILE" || exit 1
-  fi
-
-  # Unset the swarm key file variable
-  unset IPFS_SWARM_KEY_FILE
-
-fi
-
-exec ipfs "$@"

+ 2 - 6
devops/infrastructure/kubernetes/index.ts

@@ -24,9 +24,7 @@ export const kubeconfig = cluster.kubeconfig
 // Create a repository
 const repo = new awsx.ecr.Repository('my-repo')
 
-// Build an image from the "./app" directory
-// and publish it to our ECR repository.
-export const ipfsImage = repo.buildAndPushImage('./app')
+// Build an image and publish it to our ECR repository.
 
 export const colossusImage = repo.buildAndPushImage({
   dockerfile: '../../../colossus.Dockerfile',
@@ -61,7 +59,7 @@ const deployment = new k8s.apps.v1.Deployment(
           containers: [
             {
               name: 'ipfs',
-              image: ipfsImage,
+              image: 'ipfs/go-ipfs:latest',
               ports: [{ containerPort: 5001 }, { containerPort: 8080 }],
             },
             {
@@ -128,5 +126,3 @@ const service = new k8s.core.v1.Service(
 // Export the Service name and public LoadBalancer Endpoint
 export const serviceName = service.metadata.name
 export const serviceHostname = service.status.loadBalancer.ingress[0].hostname
-
-console.log(serviceHostname)