index.ts 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. import * as awsx from '@pulumi/awsx'
  2. import * as aws from '@pulumi/aws'
  3. import * as eks from '@pulumi/eks'
  4. import * as k8s from '@pulumi/kubernetes'
  5. import * as pulumi from '@pulumi/pulumi'
  6. import * as fs from 'fs'
  7. const dns = require('dns')
  8. const awsConfig = new pulumi.Config('aws')
  9. const config = new pulumi.Config()
  10. const wsProviderEndpointURI = config.require('wsProviderEndpointURI')
  11. const isProduction = config.require('isProduction') === 'true'
  12. const lbReady = config.get('isLoadBalancerReady') === 'true'
  13. // Create a VPC for our cluster.
  14. const vpc = new awsx.ec2.Vpc('vpc', { numberOfAvailabilityZones: 2 })
  15. // Create an EKS cluster with the default configuration.
  16. const cluster = new eks.Cluster('eksctl-my-cluster', {
  17. vpcId: vpc.id,
  18. subnetIds: vpc.publicSubnetIds,
  19. instanceType: 't2.micro',
  20. providerCredentialOpts: {
  21. profileName: awsConfig.get('profile'),
  22. },
  23. })
  24. // Export the cluster's kubeconfig.
  25. export const kubeconfig = cluster.kubeconfig
  26. // Create a repository
  27. const repo = new awsx.ecr.Repository('colossus-image')
  28. // Build an image and publish it to our ECR repository.
  29. export const colossusImage = repo.buildAndPushImage({
  30. dockerfile: '../../../colossus.Dockerfile',
  31. context: '../../../',
  32. })
  33. const name = 'storage-node'
  34. const colossusPort = 3001
  35. // Create a Kubernetes Namespace
  36. const ns = new k8s.core.v1.Namespace(name, {}, { provider: cluster.provider })
  37. // Export the Namespace name
  38. export const namespaceName = ns.metadata.name
  39. const appLabels = { appClass: name }
  40. // Create a LoadBalancer Service for the Deployment
  41. const service = new k8s.core.v1.Service(
  42. name,
  43. {
  44. metadata: {
  45. labels: appLabels,
  46. namespace: namespaceName,
  47. },
  48. spec: {
  49. type: 'LoadBalancer',
  50. ports: [
  51. { name: 'http', port: 80 },
  52. { name: 'https', port: 443 },
  53. ],
  54. selector: appLabels,
  55. },
  56. },
  57. {
  58. provider: cluster.provider,
  59. }
  60. )
  61. // Export the Service name and public LoadBalancer Endpoint
  62. export const serviceName = service.metadata.name
  63. // When "done", this will print the hostname
  64. export let serviceHostname: pulumi.Output<string>
  65. serviceHostname = service.status.loadBalancer.ingress[0].hostname
  66. export let appLink: pulumi.Output<string>
  67. const publicUrlInput: pulumi.Input<string> = pulumi.interpolate`http://${serviceHostname}/`
  68. let additionalParams: string[] | pulumi.Input<string>[] = []
  69. let volumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
  70. let caddyVolumeMounts: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.VolumeMount>[]> = []
  71. let volumes: pulumi.Input<pulumi.Input<k8s.types.input.core.v1.Volume>[]> = []
  72. if (isProduction) {
  73. const remoteKeyFilePath = '/joystream/key-file.json'
  74. const providerId = config.require('providerId')
  75. const keyFile = config.require('keyFile')
  76. const publicUrl = config.get('publicURL') ? config.get('publicURL')! : publicUrlInput
  77. const keyConfig = new k8s.core.v1.ConfigMap(name, {
  78. metadata: { namespace: namespaceName, labels: appLabels },
  79. data: { 'fileData': fs.readFileSync(keyFile).toString() },
  80. })
  81. const keyConfigName = keyConfig.metadata.apply((m) => m.name)
  82. additionalParams = ['--provider-id', providerId, '--key-file', remoteKeyFilePath, '--public-url', publicUrl]
  83. volumeMounts.push({
  84. mountPath: remoteKeyFilePath,
  85. name: 'keyfile-volume',
  86. subPath: 'fileData',
  87. })
  88. volumes.push({
  89. name: 'keyfile-volume',
  90. configMap: {
  91. name: keyConfigName,
  92. },
  93. })
  94. const passphrase = config.get('passphrase')
  95. if (passphrase) {
  96. additionalParams.push('--passphrase', passphrase)
  97. }
  98. } else {
  99. additionalParams.push('--anonymous')
  100. }
  101. if (lbReady) {
  102. async function lookupPromise(url: string) {
  103. return new Promise((resolve, reject) => {
  104. dns.lookup(url, (err: any, address: any) => {
  105. if (err) reject(err)
  106. resolve(address)
  107. })
  108. })
  109. }
  110. const lbIp = serviceHostname.apply((dnsName) => {
  111. return lookupPromise(dnsName)
  112. })
  113. const caddyConfig = pulumi.interpolate`${lbIp}.nip.io {
  114. reverse_proxy localhost:${colossusPort}
  115. }`
  116. const keyConfig = new k8s.core.v1.ConfigMap(name, {
  117. metadata: { namespace: namespaceName, labels: appLabels },
  118. data: { 'fileData': caddyConfig },
  119. })
  120. const keyConfigName = keyConfig.metadata.apply((m) => m.name)
  121. caddyVolumeMounts.push({
  122. mountPath: '/etc/caddy/Caddyfile',
  123. name: 'caddy-volume',
  124. subPath: 'fileData',
  125. })
  126. volumes.push({
  127. name: 'caddy-volume',
  128. configMap: {
  129. name: keyConfigName,
  130. },
  131. })
  132. appLink = pulumi.interpolate`${lbIp}.nip.io`
  133. lbIp.apply((value) => console.log(`You can now access the app at: ${value}.nip.io`))
  134. }
  135. // Create a Deployment
  136. const deployment = new k8s.apps.v1.Deployment(
  137. name,
  138. {
  139. metadata: {
  140. namespace: namespaceName,
  141. labels: appLabels,
  142. },
  143. spec: {
  144. replicas: 1,
  145. selector: { matchLabels: appLabels },
  146. template: {
  147. metadata: {
  148. labels: appLabels,
  149. },
  150. spec: {
  151. hostname: 'ipfs',
  152. containers: [
  153. {
  154. name: 'ipfs',
  155. image: 'ipfs/go-ipfs:latest',
  156. ports: [{ containerPort: 5001 }, { containerPort: 8080 }],
  157. command: ['/bin/sh', '-c'],
  158. args: [
  159. 'set -e; \
  160. /usr/local/bin/start_ipfs config profile apply lowpower; \
  161. /usr/local/bin/start_ipfs config --json Gateway.PublicGateways \'{"localhost": null }\'; \
  162. /usr/local/bin/start_ipfs config Datastore.StorageMax 200GB; \
  163. /sbin/tini -- /usr/local/bin/start_ipfs daemon --migrate=true',
  164. ],
  165. },
  166. // {
  167. // name: 'httpd',
  168. // image: 'crccheck/hello-world',
  169. // ports: [{ name: 'hello-world', containerPort: 8000 }],
  170. // },
  171. {
  172. name: 'caddy',
  173. image: 'caddy',
  174. ports: [
  175. { name: 'caddy-http', containerPort: 80 },
  176. { name: 'caddy-https', containerPort: 443 },
  177. ],
  178. volumeMounts: caddyVolumeMounts,
  179. },
  180. {
  181. name: 'colossus',
  182. image: colossusImage,
  183. env: [
  184. {
  185. name: 'WS_PROVIDER_ENDPOINT_URI',
  186. // example 'wss://18.209.241.63.nip.io/'
  187. value: wsProviderEndpointURI,
  188. },
  189. {
  190. name: 'DEBUG',
  191. value: 'joystream:*',
  192. },
  193. ],
  194. volumeMounts,
  195. command: [
  196. 'yarn',
  197. 'colossus',
  198. '--ws-provider',
  199. wsProviderEndpointURI,
  200. '--ipfs-host',
  201. 'ipfs',
  202. ...additionalParams,
  203. ],
  204. ports: [{ containerPort: colossusPort }],
  205. },
  206. ],
  207. volumes,
  208. },
  209. },
  210. },
  211. },
  212. {
  213. provider: cluster.provider,
  214. }
  215. )
  216. // Export the Deployment name
  217. export const deploymentName = deployment.metadata.name