Просмотр исходного кода

Merge pull request #2812 from ahhda/argus_pulumi

DevOps - Deploy Argus node using Pulumi
Mokhtar Naamani 3 лет назад
Родитель
Сommit
6e0cd9d242

+ 5 - 0
devops/kubernetes/argus/.gitignore

@@ -0,0 +1,5 @@
+/bin/
+/node_modules/
+kubeconfig*
+package-lock.json
+Pulumi.*.yaml

+ 35 - 0
devops/kubernetes/argus/Pulumi.yaml

@@ -0,0 +1,35 @@
+name: argus
+runtime: nodejs
+description: A Pulumi program to deploy Argus node
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    queryNodeHost:
+      description: Query node GraphQL endpoint
+      default: 'https://hydra.joystream.org/graphql'
+    wsProviderEndpointURI:
+      description: Chain RPC endpoint
+      default: 'wss://rome-rpc-endpoint.joystream.org:9944/'
+    argusImage:
+      description: The distributor node image to use for running the node
+    keys:
+      description: Specifies the keys available within distributor node CLI
+    buckets:
+      description: Specifies the buckets distributed by the node
+    workerId:
+      description: ID of the node operator (distribution working group worker)
+    dataStorage:
+      description: Amount of storage (in Gi) assigned for the data directory
+      default: 10
+    logStorage:
+      description: Amount of storage (in Gi) assigned for the logs directory
+      default: 2
+    cacheStorage:
+      description: Amount of storage (in Gi) assigned for the cache directory
+      default: 10

+ 123 - 0
devops/kubernetes/argus/README.md

@@ -0,0 +1,123 @@
+# Argus deployment on Minikube or EKS
+
+This project deploys an Argus node on an EKS or a minikube cluster
+
+## Deploying the App
+
+To deploy your infrastructure, follow the below steps.
+
+### Prerequisites
+
+1. [Install Pulumi](https://www.pulumi.com/docs/get-started/install/)
+1. [Install Node.js](https://nodejs.org/en/download/)
+1. Install a package manager for Node.js, such as [npm](https://www.npmjs.com/get-npm) or [Yarn](https://yarnpkg.com/en/docs/install).
+1. [Configure AWS Credentials](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/)
+1. Optional (for debugging): [Install kubectl](https://kubernetes.io/docs/tasks/tools/)
+
+### Steps
+
+After cloning this repo, from this working directory, run these commands:
+
+1. Install the required Node.js packages:
+
+   This installs the dependent packages [needed](https://www.pulumi.com/docs/intro/concepts/how-pulumi-works/) for our Pulumi program.
+
+   ```bash
+   $ npm install
+   ```
+
+1. Create a new stack, which is an isolated deployment target for this example:
+
+   This will initialize the Pulumi program in TypeScript.
+
+   ```bash
+   $ pulumi stack init
+   ```
+
+1. Set the required configuration variables in `Pulumi.<stack>.yaml`
+
+   ```bash
+   $ pulumi config set-all --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user \
+    --plaintext queryNodeHost='https://34.197.252.42.nip.io/server/graphql' --plaintext isMinikube=true \
+    --plaintext wsProviderEndpointURI='wss://rome-rpc-endpoint.joystream.org:9944/' \
+    --plaintext argusImage='joystream/distributor-node:latest' \
+    --plaintext keys='[{ "suri": "//Alice" }]' --plaintext buckets='["1:0","1:1"]' --plaintext workerId=0
+   ```
+
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
+
+   ```bash
+   $ pulumi config set isMinikube false
+   ```
+
+1. Stand up the EKS cluster:
+
+   Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
+   new EKS cluster takes between 10-15 minutes.
+
+1. If you are using Minikube, run `minikube service argus-node -n $(pulumi stack output namespaceName)`
+
+   This will setup a proxy for your `argus-node` service, which can then be accessed at
+   the URL given in the output
+
+1. Once the stack if up and running, we will modify the Caddy config to get SSL certificate for the load balancer
+
+   Modify the config variable `isLoadBalancerReady`
+
+   ```bash
+   $ pulumi config set isLoadBalancerReady true
+   ```
+
+   Run `pulumi up -y` to update the Caddy config
+
+1. Access the Kubernetes Cluster using `kubectl`
+
+   To access your new Kubernetes cluster using `kubectl`, we need to set up the
+   `kubeconfig` file and download `kubectl`. We can leverage the Pulumi
+   stack output in the CLI, as Pulumi facilitates exporting these objects for us.
+
+   ```bash
+   $ pulumi stack output kubeconfig --show-secrets > kubeconfig
+   $ export KUBECONFIG=$PWD/kubeconfig
+   $ kubectl get nodes
+   ```
+
+   We can also use the stack output to query the cluster for our newly created Deployment:
+
+   ```bash
+   $ kubectl get deployment $(pulumi stack output deploymentName) --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get service $(pulumi stack output serviceName) --namespace=$(pulumi stack output namespaceName)
+   ```
+
+   To get logs
+
+   ```bash
+   $ kubectl config set-context --current --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get pods
+   $ kubectl logs <PODNAME> --all-containers
+   ```
+
+   To run a command on a pod
+
+   ```bash
+   $ kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1}
+   ```
+
+   To see complete pulumi stack output
+
+   ```bash
+   $ pulumi stack output
+   ```
+
+   To execute a command
+
+   ```bash
+   $ kubectl exec --stdin --tty <PODNAME> -c colossus -- /bin/bash
+   ```
+
+1. Once you've finished experimenting, tear down your stack's resources by destroying and removing it:
+
+   ```bash
+   $ pulumi destroy --yes
+   $ pulumi stack rm --yes
+   ```

+ 5 - 0
devops/kubernetes/argus/docker_dummy/Dockerfile

@@ -0,0 +1,5 @@
+# Since Pulumi does not support push without a build
+# we build an image from an existing local image
+ARG SOURCE_IMAGE
+
+FROM --platform=linux/amd64 ${SOURCE_IMAGE}

+ 229 - 0
devops/kubernetes/argus/index.ts

@@ -0,0 +1,229 @@
+import * as awsx from '@pulumi/awsx'
+import * as aws from '@pulumi/aws'
+import * as eks from '@pulumi/eks'
+import * as docker from '@pulumi/docker'
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { CaddyServiceDeployment, CustomPersistentVolume } from 'pulumi-common'
+
+const awsConfig = new pulumi.Config('aws')
+const config = new pulumi.Config()
+
+const queryNodeHost = config.require('queryNodeHost')
+const wsProviderEndpointURI = config.require('wsProviderEndpointURI')
+const configArgusImage = config.require('argusImage')
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+const keys = config.require('keys')
+const buckets = config.require('buckets')
+const workerId = config.require('workerId')
+const name = 'argus-node'
+const isMinikube = config.getBoolean('isMinikube')
+const dataStorage = config.getNumber('dataStorage') || 10
+const logStorage = config.getNumber('logStorage') || 2
+const cacheStorage = config.getNumber('cacheStorage') || 10
+
+export let kubeconfig: pulumi.Output<any>
+export let argusImage: pulumi.Output<string> = pulumi.interpolate`${configArgusImage}`
+let provider: k8s.Provider
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('argus-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-argus-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    desiredCapacity: 2,
+    maxSize: 2,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+
+  // Create a repository
+  const repo = new awsx.ecr.Repository('distributor-node')
+
+  // Build an image and publish it to our ECR repository.
+  argusImage = repo.buildAndPushImage({
+    context: './docker_dummy',
+    dockerfile: './docker_dummy/Dockerfile',
+    args: { SOURCE_IMAGE: argusImage! },
+  })
+
+  // Uncomment the below line to use an existing image
+  // argusImage = pulumi.interpolate`ahhda/distributor-node:latest`
+}
+
+const resourceOptions = { provider: provider }
+
+// Create a Kubernetes Namespace
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const appLabels = { appClass: name }
+
+const dataPVC = new CustomPersistentVolume(
+  'data',
+  { namespaceName: namespaceName, storage: dataStorage },
+  resourceOptions
+)
+const logsPVC = new CustomPersistentVolume(
+  'logs',
+  { namespaceName: namespaceName, storage: logStorage },
+  resourceOptions
+)
+const cachePVC = new CustomPersistentVolume(
+  'cache',
+  { namespaceName: namespaceName, storage: cacheStorage },
+  resourceOptions
+)
+
+// Create a Deployment
+const deployment = new k8s.apps.v1.Deployment(
+  name,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'argus',
+              image: argusImage,
+              imagePullPolicy: 'IfNotPresent',
+              workingDir: '/joystream/distributor-node',
+              env: [
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__ENDPOINTS__QUERY_NODE',
+                  value: queryNodeHost,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__ENDPOINTS__JOYSTREAM_NODE_WS',
+                  value: wsProviderEndpointURI,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__KEYS',
+                  value: keys,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__BUCKETS',
+                  value: buckets,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__WORKER_ID',
+                  value: workerId,
+                },
+                {
+                  name: 'JOYSTREAM_DISTRIBUTOR__PORT',
+                  value: '3334',
+                },
+              ],
+              args: ['start'],
+              ports: [{ containerPort: 3334 }],
+              volumeMounts: [
+                {
+                  name: 'data',
+                  mountPath: '/data',
+                  subPath: 'data',
+                },
+                {
+                  name: 'logs',
+                  mountPath: '/logs',
+                  subPath: 'logs',
+                },
+                {
+                  name: 'cache',
+                  mountPath: '/cache',
+                  subPath: 'cache',
+                },
+              ],
+            },
+          ],
+          volumes: [
+            {
+              name: 'data',
+              persistentVolumeClaim: {
+                claimName: dataPVC.pvc.metadata.name,
+              },
+            },
+            {
+              name: 'logs',
+              persistentVolumeClaim: {
+                claimName: logsPVC.pvc.metadata.name,
+              },
+            },
+            {
+              name: 'cache',
+              persistentVolumeClaim: {
+                claimName: cachePVC.pvc.metadata.name,
+              },
+            },
+          ],
+        },
+      },
+    },
+  },
+  resourceOptions
+)
+
+// Create a LoadBalancer Service for the Deployment
+const service = new k8s.core.v1.Service(
+  name,
+  {
+    metadata: {
+      labels: appLabels,
+      namespace: namespaceName,
+      name: name,
+    },
+    spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
+      ports: [{ name: 'port-1', port: 3334 }],
+      selector: appLabels,
+    },
+  },
+  resourceOptions
+)
+
+// Export the Service name
+export const serviceName = service.metadata.name
+
+// Export the Deployment name
+export const deploymentName = deployment.metadata.name
+
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
+
+const caddyEndpoints = [
+  ` {
+    reverse_proxy ${name}:3334
+}`,
+]
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 15 - 0
devops/kubernetes/argus/package.json

@@ -0,0 +1,15 @@
+{
+  "name": "eks-cluster",
+  "devDependencies": {
+    "@types/node": "^10.0.0"
+  },
+  "dependencies": {
+    "@pulumi/aws": "^4.0.0",
+    "@pulumi/awsx": "^0.30.0",
+    "@pulumi/eks": "^0.31.0",
+    "@pulumi/kubernetes": "^3.0.0",
+    "@pulumi/pulumi": "^3.0.0",
+    "@pulumi/docker": "^3.0.0",
+    "pulumi-common": "file:../pulumi-common"
+  }
+}

+ 18 - 0
devops/kubernetes/argus/tsconfig.json

@@ -0,0 +1,18 @@
+{
+    "compilerOptions": {
+        "strict": true,
+        "outDir": "bin",
+        "target": "es2016",
+        "module": "commonjs",
+        "moduleResolution": "node",
+        "sourceMap": true,
+        "experimentalDecorators": true,
+        "pretty": true,
+        "noFallthroughCasesInSwitch": true,
+        "noImplicitReturns": true,
+        "forceConsistentCasingInFileNames": true
+    },
+    "files": [
+        "index.ts"
+    ]
+}

+ 1 - 0
devops/kubernetes/pulumi-common/index.ts

@@ -1,3 +1,4 @@
 export { CaddyServiceDeployment } from './caddy'
 export { PostgresServiceDeployment } from './database'
 export { configMapFromFile } from './configMap'
+export { CustomPersistentVolume } from './volume'

+ 43 - 0
devops/kubernetes/pulumi-common/volume.ts

@@ -0,0 +1,43 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+
+/**
+ * This is an abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class creates a Persistent Volume
+ */
+export class CustomPersistentVolume extends pulumi.ComponentResource {
+  public readonly pvc: k8s.core.v1.PersistentVolumeClaim
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('volume:service:CustomPersistentVolume', name, {}, opts)
+
+    const volumeLabels = { app: name }
+    const pvcName = `${name}-pvc`
+
+    this.pvc = new k8s.core.v1.PersistentVolumeClaim(
+      pvcName,
+      {
+        metadata: {
+          labels: volumeLabels,
+          namespace: args.namespaceName,
+          name: pvcName,
+        },
+        spec: {
+          accessModes: ['ReadWriteOnce'],
+          resources: {
+            requests: {
+              storage: `${args.storage}Gi`,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  storage: Number
+}