Parcourir la source

Merge pull request #3101 from Joystream/giza

Update Giza Staging from Giza
Mokhtar Naamani il y a 3 ans
Parent
commit
3ac2f0fff4

+ 4 - 4
Cargo.lock

@@ -731,7 +731,7 @@ dependencies = [
 
 [[package]]
 name = "chain-spec-builder"
-version = "3.3.0"
+version = "3.3.1"
 dependencies = [
  "ansi_term 0.12.1",
  "enum-utils",
@@ -2332,7 +2332,7 @@ dependencies = [
 
 [[package]]
 name = "joystream-node"
-version = "5.13.0"
+version = "5.14.0"
 dependencies = [
  "frame-benchmarking",
  "frame-benchmarking-cli",
@@ -2393,7 +2393,7 @@ dependencies = [
 
 [[package]]
 name = "joystream-node-runtime"
-version = "9.13.0"
+version = "9.14.0"
 dependencies = [
  "frame-benchmarking",
  "frame-executive",
@@ -4206,7 +4206,7 @@ dependencies = [
 
 [[package]]
 name = "pallet-storage"
-version = "4.0.0"
+version = "4.0.1"
 dependencies = [
  "frame-benchmarking",
  "frame-support",

Fichier diff supprimé car celui-ci est trop grand
+ 0 - 0
chain-metadata.json


+ 1 - 1
cli/package.json

@@ -11,7 +11,7 @@
     "@apidevtools/json-schema-ref-parser": "^9.0.6",
     "@ffprobe-installer/ffprobe": "^1.1.0",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.1",
+    "@joystream/types": "^0.17.2",
     "@oclif/command": "^1.5.19",
     "@oclif/config": "^1.14.0",
     "@oclif/plugin-autocomplete": "^0.2.0",

+ 5 - 0
devops/kubernetes/orion/.gitignore

@@ -0,0 +1,5 @@
+/bin/
+/node_modules/
+kubeconfig*
+package-lock.json
+Pulumi.*.yaml

+ 25 - 0
devops/kubernetes/orion/Pulumi.yaml

@@ -0,0 +1,25 @@
+name: orion
+runtime: nodejs
+description: A Pulumi program to deploy Orion service to Kubernetes
+template:
+  config:
+    aws:profile:
+      default: joystream-user
+    aws:region:
+      default: us-east-1
+    isMinikube:
+      description: Whether you are deploying to minikube
+      default: false
+    queryNodeEndpoint:
+      description: Full URL for Query node endpoint
+    isLoadBalancerReady:
+      description: Whether the load balancer service is ready and has been assigned an IP
+      default: false
+    storage:
+      description: Amount of storage in gigabytes for ipfs volume
+      default: 40
+    orionImage:
+      description: The Orion image to use for running the orion node
+      default: joystream/orion:latest
+    contentSecret:
+      description: Orion featured content secret

+ 119 - 0
devops/kubernetes/orion/README.md

@@ -0,0 +1,119 @@
+# Amazon Kubernetes Cluster: Orion
+
+Deploy Orion to a Kubernetes cluster
+
+## Deploying the App
+
+To deploy your infrastructure, follow the below steps.
+
+### Prerequisites
+
+1. [Install Pulumi](https://www.pulumi.com/docs/get-started/install/)
+1. [Install Node.js](https://nodejs.org/en/download/)
+1. Install a package manager for Node.js, such as [npm](https://www.npmjs.com/get-npm) or [Yarn](https://yarnpkg.com/en/docs/install).
+1. [Configure AWS Credentials](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/)
+1. Optional (for debugging): [Install kubectl](https://kubernetes.io/docs/tasks/tools/)
+
+### Steps
+
+After cloning this repo, from this working directory, run these commands:
+
+1. Install the required Node.js packages:
+
+   This installs the dependent packages [needed](https://www.pulumi.com/docs/intro/concepts/how-pulumi-works/) for our Pulumi program.
+
+   ```bash
+   $ npm install
+   ```
+
+1. Create a new stack, which is an isolated deployment target for this example:
+
+   This will initialize the Pulumi program in TypeScript.
+
+   ```bash
+   $ pulumi stack init
+   ```
+
+1. Set the required configuration variables in `Pulumi.<stack>.yaml`
+
+   ```bash
+   $ pulumi config set-all --plaintext queryNodeEndpoint='http://host.minikube.internal:8081/graphql' \
+    --plaintext isMinikube=true --plaintext orionImage='joystream/orion:latest' \
+    --plaintext contentSecret='password123' \
+    --plaintext aws:region=us-east-1 --plaintext aws:profile=joystream-user
+   ```
+
+   If you want to build the stack on AWS set the `isMinikube` config to `false`
+
+   ```bash
+   $ pulumi config set isMinikube false
+   ```
+
+   You can also set the `storage` config parameter if required. Check `Pulumi.yaml` file for additional parameters.
+
+1. Stand up the EKS cluster:
+
+   Running `pulumi up -y` will deploy the EKS cluster. Note, provisioning a
+   new EKS cluster takes between 10-15 minutes.
+
+1. Once the stack if up and running, we will modify the Caddy config to get SSL certificate for the load balancer
+
+   Modify the config variable `isLoadBalancerReady`
+
+   ```bash
+   $ pulumi config set isLoadBalancerReady true
+   ```
+
+   Run `pulumi up -y` to update the Caddy config
+
+1. Access the Kubernetes Cluster using `kubectl`
+
+   To access your new Kubernetes cluster using `kubectl`, we need to set up the
+   `kubeconfig` file and download `kubectl`. We can leverage the Pulumi
+   stack output in the CLI, as Pulumi facilitates exporting these objects for us.
+
+   ```bash
+   $ pulumi stack output kubeconfig --show-secrets > kubeconfig
+   $ export KUBECONFIG=$PWD/kubeconfig
+   $ kubectl get nodes
+   ```
+
+   We can also use the stack output to query the cluster for our newly created Deployment:
+
+   ```bash
+   $ kubectl get deployment $(pulumi stack output deploymentName) --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get service $(pulumi stack output serviceName) --namespace=$(pulumi stack output namespaceName)
+   ```
+
+   To get logs
+
+   ```bash
+   $ kubectl config set-context --current --namespace=$(pulumi stack output namespaceName)
+   $ kubectl get pods
+   $ kubectl logs <PODNAME> --all-containers
+   ```
+
+   To run a command on a pod
+
+   ```bash
+   $ kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1}
+   ```
+
+   To see complete pulumi stack output
+
+   ```bash
+   $ pulumi stack output
+   ```
+
+   To execute a command
+
+   ```bash
+   $ kubectl exec --stdin --tty <PODNAME> -c colossus -- /bin/bash
+   ```
+
+1. Once you've finished experimenting, tear down your stack's resources by destroying and removing it:
+
+   ```bash
+   $ pulumi destroy --yes
+   $ pulumi stack rm --yes
+   ```

+ 154 - 0
devops/kubernetes/orion/index.ts

@@ -0,0 +1,154 @@
+import * as awsx from '@pulumi/awsx'
+import * as eks from '@pulumi/eks'
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+import { CaddyServiceDeployment } from 'pulumi-common'
+import { MongoDBServiceDeployment } from './mongo'
+
+const awsConfig = new pulumi.Config('aws')
+const config = new pulumi.Config()
+
+const name = 'orion'
+
+const queryNodeHost = config.require('queryNodeEndpoint')
+const lbReady = config.get('isLoadBalancerReady') === 'true'
+const orionImage = config.get('orionImage') || `joystream/orion:latest`
+const contentSecret = config.require('contentSecret')
+const storage = parseInt(config.get('storage') || '40')
+const isMinikube = config.getBoolean('isMinikube')
+
+export let kubeconfig: pulumi.Output<any>
+let provider: k8s.Provider
+
+if (isMinikube) {
+  provider = new k8s.Provider('local', {})
+} else {
+  // Create a VPC for our cluster.
+  const vpc = new awsx.ec2.Vpc('orion-vpc', { numberOfAvailabilityZones: 2, numberOfNatGateways: 1 })
+
+  // Create an EKS cluster with the default configuration.
+  const cluster = new eks.Cluster('eksctl-orion-node', {
+    vpcId: vpc.id,
+    subnetIds: vpc.publicSubnetIds,
+    instanceType: 't2.medium',
+    providerCredentialOpts: {
+      profileName: awsConfig.get('profile'),
+    },
+  })
+  provider = cluster.provider
+
+  // Export the cluster's kubeconfig.
+  kubeconfig = cluster.kubeconfig
+}
+
+const resourceOptions = { provider: provider }
+
+// Create a Kubernetes Namespace
+const ns = new k8s.core.v1.Namespace(name, {}, resourceOptions)
+
+// Export the Namespace name
+export const namespaceName = ns.metadata.name
+
+const appLabels = { appClass: name }
+
+const mongoDb = new MongoDBServiceDeployment(
+  'mongo-db',
+  {
+    namespaceName: namespaceName,
+    storage: storage,
+  },
+  resourceOptions
+)
+
+// Create a Deployment
+const deployment = new k8s.apps.v1.Deployment(
+  name,
+  {
+    metadata: {
+      namespace: namespaceName,
+      labels: appLabels,
+    },
+    spec: {
+      replicas: 1,
+      selector: { matchLabels: appLabels },
+      template: {
+        metadata: {
+          labels: appLabels,
+        },
+        spec: {
+          containers: [
+            {
+              name: 'orion',
+              image: orionImage,
+              imagePullPolicy: 'IfNotPresent',
+              env: [
+                {
+                  name: 'ORION_PORT',
+                  value: '6116',
+                },
+                {
+                  name: 'ORION_MONGO_HOSTNAME',
+                  value: mongoDb.service.metadata.name,
+                },
+                {
+                  name: 'ORION_FEATURED_CONTENT_SECRET',
+                  value: contentSecret,
+                },
+                {
+                  name: 'ORION_QUERY_NODE_URL',
+                  value: queryNodeHost,
+                },
+              ],
+              ports: [{ containerPort: 6116 }],
+            },
+          ],
+        },
+      },
+    },
+  },
+  resourceOptions
+)
+
+// Create a LoadBalancer Service for the Deployment
+const service = new k8s.core.v1.Service(
+  name,
+  {
+    metadata: {
+      labels: appLabels,
+      namespace: namespaceName,
+      name: 'orion-node',
+    },
+    spec: {
+      type: isMinikube ? 'NodePort' : 'ClusterIP',
+      ports: [{ name: 'port-1', port: 6116 }],
+      selector: appLabels,
+    },
+  },
+  resourceOptions
+)
+
+// Export the Service name
+export const serviceName = service.metadata.name
+
+// Export the Deployment name
+export const deploymentName = deployment.metadata.name
+
+const caddyEndpoints = [
+  ` {
+    reverse_proxy orion-node:6116
+}`,
+]
+
+export let endpoint1: pulumi.Output<string> = pulumi.interpolate``
+export let endpoint2: pulumi.Output<string> = pulumi.interpolate``
+
+if (!isMinikube) {
+  const caddy = new CaddyServiceDeployment(
+    'caddy-proxy',
+    { lbReady, namespaceName: namespaceName, caddyEndpoints },
+    resourceOptions
+  )
+
+  endpoint1 = pulumi.interpolate`${caddy.primaryEndpoint}`
+  endpoint2 = pulumi.interpolate`${caddy.secondaryEndpoint}`
+}

+ 100 - 0
devops/kubernetes/orion/mongo.ts

@@ -0,0 +1,100 @@
+import * as k8s from '@pulumi/kubernetes'
+import * as pulumi from '@pulumi/pulumi'
+
+/**
+ * ServiceDeployment is an example abstraction that uses a class to fold together the common pattern of a
+ * Kubernetes Deployment and its associated Service object.
+ * This class delpoys a Mongo DB instance on a Persistent Volume
+ */
+export class MongoDBServiceDeployment extends pulumi.ComponentResource {
+  public readonly deployment: k8s.apps.v1.Deployment
+  public readonly service: k8s.core.v1.Service
+
+  constructor(name: string, args: ServiceDeploymentArgs, opts?: pulumi.ComponentResourceOptions) {
+    super('mongodb:service:PostgresServiceDeployment', name, {}, opts)
+
+    const databaseLabels = { app: name }
+    const pvcName = `${name}-pvc`
+
+    const pvc = new k8s.core.v1.PersistentVolumeClaim(
+      pvcName,
+      {
+        metadata: {
+          labels: databaseLabels,
+          namespace: args.namespaceName,
+          name: pvcName,
+        },
+        spec: {
+          accessModes: ['ReadWriteOnce'],
+          resources: {
+            requests: {
+              storage: `${args.storage}Gi`,
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+
+    this.deployment = new k8s.apps.v1.Deployment(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: databaseLabels,
+        },
+        spec: {
+          selector: { matchLabels: databaseLabels },
+          template: {
+            metadata: { labels: databaseLabels },
+            spec: {
+              containers: [
+                {
+                  name: 'mongo-db',
+                  image: 'library/mongo:4.4',
+                  volumeMounts: [
+                    {
+                      name: 'mongo-data',
+                      mountPath: '/data/db',
+                      subPath: 'mongo',
+                    },
+                  ],
+                },
+              ],
+              volumes: [
+                {
+                  name: 'mongo-data',
+                  persistentVolumeClaim: {
+                    claimName: pvcName,
+                  },
+                },
+              ],
+            },
+          },
+        },
+      },
+      { parent: this }
+    )
+
+    this.service = new k8s.core.v1.Service(
+      name,
+      {
+        metadata: {
+          namespace: args.namespaceName,
+          labels: this.deployment.metadata.labels,
+          name: name,
+        },
+        spec: {
+          ports: [{ port: 27017 }],
+          selector: this.deployment.spec.template.metadata.labels,
+        },
+      },
+      { parent: this }
+    )
+  }
+}
+
+export interface ServiceDeploymentArgs {
+  namespaceName: pulumi.Output<string>
+  storage: Number
+}

+ 13 - 0
devops/kubernetes/orion/package.json

@@ -0,0 +1,13 @@
+{
+  "name": "eks-cluster",
+  "devDependencies": {
+    "@types/node": "^10.0.0"
+  },
+  "dependencies": {
+    "@pulumi/awsx": "^0.30.0",
+    "@pulumi/eks": "^0.31.0",
+    "@pulumi/kubernetes": "^3.0.0",
+    "@pulumi/pulumi": "^3.0.0",
+    "pulumi-common": "file:../pulumi-common"
+  }
+}

+ 18 - 0
devops/kubernetes/orion/tsconfig.json

@@ -0,0 +1,18 @@
+{
+    "compilerOptions": {
+        "strict": true,
+        "outDir": "bin",
+        "target": "es2016",
+        "module": "commonjs",
+        "moduleResolution": "node",
+        "sourceMap": true,
+        "experimentalDecorators": true,
+        "pretty": true,
+        "noFallthroughCasesInSwitch": true,
+        "noImplicitReturns": true,
+        "forceConsistentCasingInFileNames": true
+    },
+    "files": [
+        "index.ts"
+    ]
+}

+ 1 - 0
distributor-node/config.yml

@@ -34,6 +34,7 @@ operatorApi:
   hmacSecret: this-is-not-so-secret
 keys:
   - suri: //Alice
+  - suri: //testing//worker//Distribution//0
   # - mnemonic: "escape naive annual throw tragic achieve grunt verify cram note harvest problem"
   #   type: ed25519
   # - keyfile: "/path/to/keyfile.json"

+ 1 - 1
distributor-node/package.json

@@ -11,7 +11,7 @@
     "@apollo/client": "^3.2.5",
     "@elastic/ecs-winston-format": "^1.1.0",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.1",
+    "@joystream/types": "^0.17.2",
     "@oclif/command": "^1",
     "@oclif/config": "^1",
     "@oclif/plugin-help": "^3",

+ 1 - 1
node/Cargo.toml

@@ -3,7 +3,7 @@ authors = ['Joystream contributors']
 build = 'build.rs'
 edition = '2018'
 name = 'joystream-node'
-version = '5.13.0'
+version = '5.14.0'
 default-run = "joystream-node"
 
 [[bin]]

+ 8 - 0
query-node/mappings/common.ts

@@ -6,6 +6,7 @@ import { metaToObject } from '@joystream/metadata-protobuf/utils'
 import { AnyMetadataClass, DecodedMetadataObject } from '@joystream/metadata-protobuf/types'
 
 export const CURRENT_NETWORK = Network.GIZA
+
 /*
   Simple logger enabling error and informational reporting.
 
@@ -223,3 +224,10 @@ export async function getById<T extends BaseModel>(
 
   return result
 }
+
+export function deterministicEntityId(createdInEvent: SubstrateEvent, additionalIdentifier?: string | number): string {
+  return (
+    `${createdInEvent.blockNumber}-${createdInEvent.indexInBlock}` +
+    (additionalIdentifier ? `-${additionalIdentifier}` : '')
+  )
+}

+ 6 - 11
query-node/mappings/content/utils.ts

@@ -1,5 +1,5 @@
 import { DatabaseManager, EventContext, StoreContext } from '@joystream/hydra-common'
-import { FindConditions, Raw } from 'typeorm'
+import { FindConditions } from 'typeorm'
 import {
   IVideoMetadata,
   IPublishedBeforeJoystream,
@@ -8,7 +8,7 @@ import {
   IChannelMetadata,
 } from '@joystream/metadata-protobuf'
 import { integrateMeta, isSet, isValidLanguageCode } from '@joystream/metadata-protobuf/utils'
-import { invalidMetadata, inconsistentState, logger } from '../common'
+import { invalidMetadata, inconsistentState, logger, deterministicEntityId } from '../common'
 import {
   // primary entities
   CuratorGroup,
@@ -209,9 +209,8 @@ async function processVideoMediaEncoding(
   const encoding =
     existingVideoMediaEncoding ||
     new VideoMediaEncoding({
+      id: deterministicEntityId(event),
       createdAt: new Date(event.blockTimestamp),
-      createdById: '1',
-      updatedById: '1',
     })
   // integrate media encoding-related data
   integrateMeta(encoding, metadata, ['codecName', 'container', 'mimeMediaType'])
@@ -231,10 +230,9 @@ async function processVideoMediaMetadata(
   const videoMedia =
     existingVideoMedia ||
     new VideoMediaMetadata({
+      id: deterministicEntityId(event),
       createdInBlock: event.blockNumber,
       createdAt: new Date(event.blockTimestamp),
-      createdById: '1',
-      updatedById: '1',
     })
 
   // integrate media-related data
@@ -364,13 +362,11 @@ async function processLanguage(
 
   // create new language
   const newLanguage = new Language({
+    id: deterministicEntityId(event),
     iso: languageIso,
     createdInBlock: event.blockNumber,
     createdAt: new Date(event.blockTimestamp),
     updatedAt: new Date(event.blockTimestamp),
-    // TODO: remove these lines after Hydra auto-fills the values when cascading save (remove them on all places)
-    createdById: '1',
-    updatedById: '1',
   })
 
   await store.save<Language>(newLanguage)
@@ -397,9 +393,8 @@ async function updateVideoLicense(
     license =
       previousLicense ||
       new License({
+        id: deterministicEntityId(event),
         createdAt: new Date(event.blockTimestamp),
-        createdById: '1',
-        updatedById: '1',
       })
     license.updatedAt = new Date(event.blockTimestamp)
     integrateMeta(license, licenseMetadata, ['attribution', 'code', 'customText'])

+ 1 - 1
query-node/mappings/package.json

@@ -21,7 +21,7 @@
     "@joystream/hydra-common": "3.1.0-alpha.16",
     "@joystream/hydra-db-utils": "3.1.0-alpha.16",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.1",
+    "@joystream/types": "^0.17.2",
     "@joystream/warthog": "2.41.2",
     "@apollo/client": "^3.2.5"
   },

+ 3 - 2
query-node/mappings/storage/index.ts

@@ -76,6 +76,7 @@ export async function storage_StorageOperatorMetadataSet({ event, store }: Event
   const [bucketId, , metadataBytes] = new Storage.StorageOperatorMetadataSetEvent(event).params
   const storageBucket = await getStorageBucketWithOperatorMetadata(store, bucketId.toString())
   storageBucket.operatorMetadata = await processStorageOperatorMetadata(
+    event,
     store,
     storageBucket.operatorMetadata,
     metadataBytes
@@ -276,7 +277,7 @@ export async function storage_DistributionBucketFamilyMetadataSet({
   const [familyId, metadataBytes] = new Storage.DistributionBucketFamilyMetadataSetEvent(event).params
 
   const family = await getDistributionBucketFamilyWithMetadata(store, familyId.toString())
-  family.metadata = await processDistributionBucketFamilyMetadata(store, family.metadata, metadataBytes)
+  family.metadata = await processDistributionBucketFamilyMetadata(event, store, family.metadata, metadataBytes)
 
   await store.save<DistributionBucketFamily>(family)
 }
@@ -426,7 +427,7 @@ export async function storage_DistributionBucketMetadataSet({
   const [workerId, bucketId, metadataBytes] = new Storage.DistributionBucketMetadataSetEvent(event).params
 
   const operator = await getDistributionBucketOperatorWithMetadata(store, distributionOperatorId(bucketId, workerId))
-  operator.metadata = await processDistributionOperatorMetadata(store, operator.metadata, metadataBytes)
+  operator.metadata = await processDistributionOperatorMetadata(event, store, operator.metadata, metadataBytes)
 
   await store.save<DistributionBucketOperator>(operator)
 }

+ 47 - 45
query-node/mappings/storage/metadata.ts

@@ -1,4 +1,4 @@
-import { DatabaseManager } from '@joystream/hydra-common'
+import { DatabaseManager, SubstrateEvent } from '@joystream/hydra-common'
 import {
   DistributionBucketFamilyMetadata,
   DistributionBucketOperatorMetadata,
@@ -11,7 +11,7 @@ import {
   GeographicalAreaSubdivistion,
   DistributionBucketFamilyGeographicArea,
 } from 'query-node/dist/model'
-import { deserializeMetadata, invalidMetadata } from '../common'
+import { deserializeMetadata, deterministicEntityId, invalidMetadata } from '../common'
 import { Bytes } from '@polkadot/types'
 import {
   DistributionBucketOperatorMetadata as DistributionBucketOperatorMetadataProto,
@@ -21,6 +21,7 @@ import {
   GeographicalArea as GeographicalAreaProto,
 } from '@joystream/metadata-protobuf'
 import { isSet, isEmptyObject, isValidCountryCode, isValidSubdivisionCode } from '@joystream/metadata-protobuf/utils'
+import _ from 'lodash'
 
 const protobufContinentToGraphlContinent: { [key in GeographicalAreaProto.Continent]: Continent } = {
   [GeographicalAreaProto.Continent.AF]: Continent.AF,
@@ -33,11 +34,12 @@ const protobufContinentToGraphlContinent: { [key in GeographicalAreaProto.Contin
 }
 
 async function processNodeLocationMetadata(
+  event: SubstrateEvent,
   store: DatabaseManager,
   current: NodeLocationMetadata | undefined,
   meta: INodeLocationMetadata
 ): Promise<NodeLocationMetadata> {
-  const nodeLocation = current || new NodeLocationMetadata()
+  const nodeLocation = current || new NodeLocationMetadata({ id: deterministicEntityId(event) })
   if (isSet(meta.city)) {
     nodeLocation.city = meta.city
   }
@@ -45,7 +47,7 @@ async function processNodeLocationMetadata(
     if (isEmptyObject(meta.coordinates)) {
       nodeLocation.coordinates = null as any
     } else {
-      const coordinates = current?.coordinates || new GeoCoordinates()
+      const coordinates = current?.coordinates || new GeoCoordinates({ id: deterministicEntityId(event) })
       coordinates.latitude = meta.coordinates.latitude || coordinates.latitude || 0
       coordinates.longitude = meta.coordinates.longitude || coordinates.longitude || 0
       await store.save<GeoCoordinates>(coordinates)
@@ -65,6 +67,7 @@ async function processNodeLocationMetadata(
 }
 
 export async function processDistributionOperatorMetadata(
+  event: SubstrateEvent,
   store: DatabaseManager,
   current: DistributionBucketOperatorMetadata | undefined,
   metadataBytes: Bytes
@@ -73,14 +76,14 @@ export async function processDistributionOperatorMetadata(
   if (!meta) {
     return current
   }
-  const metadataEntity = current || new DistributionBucketOperatorMetadata()
+  const metadataEntity = current || new DistributionBucketOperatorMetadata({ id: deterministicEntityId(event) })
   if (isSet(meta.endpoint)) {
     metadataEntity.nodeEndpoint = meta.endpoint
   }
   if (isSet(meta.location)) {
     metadataEntity.nodeLocation = isEmptyObject(meta.location)
       ? (null as any)
-      : await processNodeLocationMetadata(store, metadataEntity.nodeLocation, meta.location)
+      : await processNodeLocationMetadata(event, store, metadataEntity.nodeLocation, meta.location)
   }
   if (isSet(meta.extra)) {
     metadataEntity.extra = meta.extra
@@ -92,6 +95,7 @@ export async function processDistributionOperatorMetadata(
 }
 
 export async function processStorageOperatorMetadata(
+  event: SubstrateEvent,
   store: DatabaseManager,
   current: StorageBucketOperatorMetadata | undefined,
   metadataBytes: Bytes
@@ -100,14 +104,14 @@ export async function processStorageOperatorMetadata(
   if (!meta) {
     return current
   }
-  const metadataEntity = current || new StorageBucketOperatorMetadata()
+  const metadataEntity = current || new StorageBucketOperatorMetadata({ id: deterministicEntityId(event) })
   if (isSet(meta.endpoint)) {
     metadataEntity.nodeEndpoint = meta.endpoint || (null as any)
   }
   if (isSet(meta.location)) {
     metadataEntity.nodeLocation = isEmptyObject(meta.location)
       ? (null as any)
-      : await processNodeLocationMetadata(store, metadataEntity.nodeLocation, meta.location)
+      : await processNodeLocationMetadata(event, store, metadataEntity.nodeLocation, meta.location)
   }
   if (isSet(meta.extra)) {
     metadataEntity.extra = meta.extra || (null as any)
@@ -119,6 +123,7 @@ export async function processStorageOperatorMetadata(
 }
 
 export async function processDistributionBucketFamilyMetadata(
+  event: SubstrateEvent,
   store: DatabaseManager,
   current: DistributionBucketFamilyMetadata | undefined,
   metadataBytes: Bytes
@@ -127,7 +132,7 @@ export async function processDistributionBucketFamilyMetadata(
   if (!meta) {
     return current
   }
-  const metadataEntity = current || new DistributionBucketFamilyMetadata()
+  const metadataEntity = current || new DistributionBucketFamilyMetadata({ id: deterministicEntityId(event) })
   if (isSet(meta.region)) {
     metadataEntity.region = meta.region || (null as any)
   }
@@ -138,7 +143,7 @@ export async function processDistributionBucketFamilyMetadata(
     metadataEntity.latencyTestTargets = meta.latencyTestTargets.filter((t) => t)
   }
 
-  await store.save<DistributionBucketOperatorMetadata>(metadataEntity)
+  await store.save<DistributionBucketFamilyMetadata>(metadataEntity)
 
   // Update areas after metadata is saved (since we need an id to reference)
   if (isSet(meta.areas)) {
@@ -146,45 +151,42 @@ export async function processDistributionBucketFamilyMetadata(
     await Promise.all(metadataEntity.areas?.map((a) => store.remove<DistributionBucketFamilyGeographicArea>(a)) || [])
     // Save new areas
     await Promise.all(
-      meta.areas
-        .filter((a) => !isEmptyObject(a))
-        .map(async (a) => {
-          const area = new DistributionBucketFamilyGeographicArea({
-            distributionBucketFamilyMetadata: metadataEntity,
-          })
+      _.uniqWith(
+        meta.areas.filter((a) => !isEmptyObject(a)),
+        _.isEqual
+      ).map(async (a, i) => {
+        const area = new DistributionBucketFamilyGeographicArea({
+          id: `${metadataEntity.id}-${i}`,
+          distributionBucketFamilyMetadata: metadataEntity,
+        })
 
-          if (a.continent) {
-            const continent = new GeographicalAreaContinent()
-            continent.code = protobufContinentToGraphlContinent[a.continent]
-            if (!continent.code) {
-              return invalidMetadata(`Unrecognized continent enum variant: ${a.continent}`)
-            }
-            area.id = `${metadataEntity.id}-C-${continent.code}`
-            area.area = continent
+        if (a.continent) {
+          const continent = new GeographicalAreaContinent()
+          continent.code = protobufContinentToGraphlContinent[a.continent]
+          if (!continent.code) {
+            return invalidMetadata(`Unrecognized continent enum variant: ${a.continent}`)
           }
-
-          if (a.countryCode) {
-            if (!isValidCountryCode(a.countryCode)) {
-              return invalidMetadata(`Invalid country code: ${a.countryCode}`)
-            }
-            const country = new GeographicalAreaCountry()
-            country.code = a.countryCode
-            area.id = `${metadataEntity.id}-c-${country.code}`
-            area.area = country
+          area.area = continent
+        } else if (a.countryCode) {
+          if (!isValidCountryCode(a.countryCode)) {
+            return invalidMetadata(`Invalid country code: ${a.countryCode}`)
           }
-
-          if (a.subdivisionCode) {
-            if (!isValidSubdivisionCode(a.subdivisionCode)) {
-              return invalidMetadata(`Invalid subdivision code: ${a.subdivisionCode}`)
-            }
-            const subdivision = new GeographicalAreaSubdivistion()
-            subdivision.code = a.subdivisionCode
-            area.id = `${metadataEntity.id}-s-${subdivision.code}`
-            area.area = subdivision
+          const country = new GeographicalAreaCountry()
+          country.code = a.countryCode
+          area.area = country
+        } else if (a.subdivisionCode) {
+          if (!isValidSubdivisionCode(a.subdivisionCode)) {
+            return invalidMetadata(`Invalid subdivision code: ${a.subdivisionCode}`)
           }
-
-          await store.save<DistributionBucketFamilyGeographicArea>(area)
-        })
+          const subdivision = new GeographicalAreaSubdivistion()
+          subdivision.code = a.subdivisionCode
+          area.area = subdivision
+        } else {
+          return
+        }
+
+        await store.save<DistributionBucketFamilyGeographicArea>(area)
+      })
     )
   }
 

+ 1 - 1
runtime-modules/storage/Cargo.toml

@@ -1,6 +1,6 @@
 [package]
 name = 'pallet-storage'
-version = '4.0.0'
+version = '4.0.1'
 authors = ['Joystream contributors']
 edition = '2018'
 

+ 74 - 6
runtime-modules/storage/src/lib.rs

@@ -824,6 +824,26 @@ pub struct StorageBucketRecord<WorkerId, AccountId> {
 
     /// Defines limits for a bucket.
     pub voucher: Voucher,
+
+    /// Number of assigned bags.
+    pub assigned_bags: u64,
+}
+
+impl<WorkerId, AccountId> StorageBucketRecord<WorkerId, AccountId> {
+    // Increment the assigned bags number.
+    fn register_bag_assignment(&mut self) {
+        self.assigned_bags = self.assigned_bags.saturating_add(1);
+    }
+
+    // Decrement the assigned bags number.
+    fn unregister_bag_assignment(&mut self) {
+        self.assigned_bags = self.assigned_bags.saturating_sub(1);
+    }
+
+    // Checks the bag assignment number. Returns true if it equals zero.
+    fn no_bags_assigned(&self) -> bool {
+        self.assigned_bags == 0
+    }
 }
 
 // Helper-struct for the data object uploading.
@@ -1537,6 +1557,9 @@ decl_module! {
                 Error::<T>::CannotDeleteNonEmptyStorageBucket
             );
 
+            // Check that no assigned bags left.
+            ensure!(bucket.no_bags_assigned(), Error::<T>::StorageBucketIsBoundToBag);
+
             //
             // == MUTATION SAFE ==
             //
@@ -1716,6 +1739,7 @@ decl_module! {
                 operator_status,
                 accepting_new_bags,
                 voucher,
+                assigned_bags: 0,
             };
 
             let storage_bucket_id = Self::next_storage_bucket_id();
@@ -1773,6 +1797,9 @@ decl_module! {
                 );
             }
 
+            // Update bag counters.
+            Self::change_bag_assignments_for_storage_buckets(&add_buckets, &remove_buckets);
+
             Bags::<T>::mutate(&bag_id, |bag| {
                 bag.update_storage_buckets(&mut add_buckets.clone(), &remove_buckets);
             });
@@ -2223,7 +2250,10 @@ decl_module! {
                 bag.update_distribution_buckets(&mut add_buckets_ids.clone(), &remove_buckets_ids);
             });
 
-            Self::change_bag_assignments(&add_buckets_ids, &remove_buckets_ids);
+            Self::change_bag_assignments_for_distribution_buckets(
+                &add_buckets_ids,
+                &remove_buckets_ids
+            );
 
             Self::deposit_event(
                 RawEvent::DistributionBucketsUpdatedForBag(
@@ -2645,6 +2675,8 @@ impl<T: Trait> DataObjectStorage<T> for Module<T> {
 
         let bag_id: BagId<T> = dynamic_bag_id.clone().into();
 
+        let deleted_dynamic_bag = Self::dynamic_bag(&dynamic_bag_id);
+
         //
         // == MUTATION SAFE ==
         //
@@ -2655,6 +2687,16 @@ impl<T: Trait> DataObjectStorage<T> for Module<T> {
 
         <Bags<T>>::remove(&bag_id);
 
+        Self::change_bag_assignments_for_distribution_buckets(
+            &BTreeSet::new(),
+            &deleted_dynamic_bag.distributed_by,
+        );
+
+        Self::change_bag_assignments_for_storage_buckets(
+            &BTreeSet::new(),
+            &deleted_dynamic_bag.stored_by,
+        );
+
         Self::deposit_event(RawEvent::DynamicBagDeleted(
             deletion_prize_account_id,
             dynamic_bag_id,
@@ -2704,6 +2746,7 @@ impl<T: Trait> DataObjectStorage<T> for Module<T> {
         //
         // == MUTATION SAFE ==
         //
+
         Self::create_dynamic_bag_inner(
             &dynamic_bag_id,
             &deletion_prize,
@@ -2753,10 +2796,6 @@ impl<T: Trait> Module<T> {
         storage_buckets: &BTreeSet<T::StorageBucketId>,
         distribution_buckets: &BTreeSet<DistributionBucketId<T>>,
     ) -> DispatchResult {
-        //
-        // = MUTATION SAFE =
-        //
-
         if let Some(deletion_prize) = deletion_prize.clone() {
             <StorageTreasury<T>>::deposit(&deletion_prize.account_id, deletion_prize.prize)?;
         }
@@ -2772,6 +2811,13 @@ impl<T: Trait> Module<T> {
 
         <Bags<T>>::insert(&bag_id, bag);
 
+        Self::change_bag_assignments_for_distribution_buckets(
+            &distribution_buckets,
+            &BTreeSet::new(),
+        );
+
+        Self::change_bag_assignments_for_storage_buckets(&storage_buckets, &BTreeSet::new());
+
         Self::deposit_event(RawEvent::DynamicBagCreated(
             dynamic_bag_id.clone(),
             deletion_prize.clone(),
@@ -3826,7 +3872,7 @@ impl<T: Trait> Module<T> {
     }
 
     // Add and/or remove distribution buckets assignments to bags.
-    fn change_bag_assignments(
+    fn change_bag_assignments_for_distribution_buckets(
         add_buckets: &BTreeSet<DistributionBucketId<T>>,
         remove_buckets: &BTreeSet<DistributionBucketId<T>>,
     ) {
@@ -3861,6 +3907,28 @@ impl<T: Trait> Module<T> {
         }
     }
 
+    // Add and/or remove storage buckets assignments to bags.
+    fn change_bag_assignments_for_storage_buckets(
+        add_buckets: &BTreeSet<T::StorageBucketId>,
+        remove_buckets: &BTreeSet<T::StorageBucketId>,
+    ) {
+        for bucket_id in add_buckets.iter() {
+            if StorageBucketById::<T>::contains_key(bucket_id) {
+                StorageBucketById::<T>::mutate(bucket_id, |bucket| {
+                    bucket.register_bag_assignment();
+                })
+            }
+        }
+
+        for bucket_id in remove_buckets.iter() {
+            if StorageBucketById::<T>::contains_key(bucket_id) {
+                StorageBucketById::<T>::mutate(bucket_id, |bucket| {
+                    bucket.unregister_bag_assignment();
+                })
+            }
+        }
+    }
+
     // Checks distribution buckets for bag assignment number. Returns true only if all 'assigned_bags' are
     // zero.
     fn no_bags_assigned(family_id: &T::DistributionBucketFamilyId) -> bool {

+ 202 - 0
runtime-modules/storage/src/tests/mod.rs

@@ -329,6 +329,49 @@ fn update_storage_buckets_for_bags_succeeded() {
     });
 }
 
+#[test]
+fn update_storage_buckets_for_bags_succeeded_with_additioonal_checks_on_adding_and_removing() {
+    build_test_externalities().execute_with(|| {
+        set_default_update_storage_buckets_per_bag_limit();
+
+        let static_bag_id = StaticBagId::Council;
+        let bag_id: BagId<Test> = static_bag_id.into();
+
+        let bucket_id = CreateStorageBucketFixture::default()
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
+            .call_and_assert(Ok(()))
+            .unwrap();
+
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
+
+        UpdateStorageBucketForBagsFixture::default()
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
+            .with_bag_id(bag_id.clone())
+            .with_add_bucket_ids(add_buckets_ids.clone())
+            .call_and_assert(Ok(()));
+
+        // Add check
+        let bag = Storage::bag(&bag_id);
+        assert_eq!(bag.stored_by, add_buckets_ids);
+
+        let bucket = Storage::storage_bucket_by_id(&bucket_id);
+        assert_eq!(bucket.assigned_bags, 1);
+
+        // ******
+        UpdateStorageBucketForBagsFixture::default()
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
+            .with_bag_id(bag_id.clone())
+            .with_remove_bucket_ids(add_buckets_ids.clone())
+            .call_and_assert(Ok(()));
+
+        let bag = Storage::bag(&bag_id);
+        assert_eq!(bag.stored_by.len(), 0);
+
+        let bucket = Storage::storage_bucket_by_id(&bucket_id);
+        assert_eq!(bucket.assigned_bags, 0);
+    });
+}
+
 #[test]
 fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
     build_test_externalities().execute_with(|| {
@@ -2620,6 +2663,119 @@ fn delete_dynamic_bags_succeeded() {
     });
 }
 
+#[test]
+fn delete_dynamic_bags_succeeded_with_assigned_distribution_buckets() {
+    build_test_externalities().execute_with(|| {
+        let initial_balance = 1000;
+        let deletion_prize_value = 77;
+        increase_account_balance(&DEFAULT_MEMBER_ACCOUNT_ID, initial_balance);
+
+        let distribution_buckets_number = 10;
+        let family_policy_number1 = 2;
+        let family_policy_number2 = 3;
+
+        create_storage_buckets(DEFAULT_STORAGE_BUCKETS_NUMBER);
+        let (family1, _) =
+            create_distribution_bucket_family_with_buckets(distribution_buckets_number);
+        let (family2, _) =
+            create_distribution_bucket_family_with_buckets(distribution_buckets_number);
+
+        let family_policy = BTreeMap::from_iter(vec![
+            (family1, family_policy_number1),
+            (family2, family_policy_number2),
+        ]);
+
+        UpdateFamiliesInDynamicBagCreationPolicyFixture::default()
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .with_families(family_policy)
+            .call_and_assert(Ok(()));
+
+        let dynamic_bag_id = DynamicBagId::<Test>::Member(DEFAULT_MEMBER_ID);
+        CreateDynamicBagFixture::default()
+            .with_bag_id(dynamic_bag_id.clone())
+            .with_deletion_prize(DynamicBagDeletionPrize::<Test> {
+                account_id: DEFAULT_MEMBER_ACCOUNT_ID,
+                prize: deletion_prize_value,
+            })
+            .call_and_assert(Ok(()));
+
+        let bag = Storage::dynamic_bag(&dynamic_bag_id);
+
+        let total_distributed_buckets_number = family_policy_number1 + family_policy_number2;
+        assert_eq!(
+            bag.distributed_by.len(),
+            total_distributed_buckets_number as usize
+        );
+
+        let distributed_by_bag = bag.distributed_by.clone();
+        for distribution_bucket_id in &distributed_by_bag {
+            let bucket = Storage::distribution_bucket_by_family_id_by_index(
+                distribution_bucket_id.distribution_bucket_family_id,
+                distribution_bucket_id.distribution_bucket_index,
+            );
+
+            assert_eq!(bucket.assigned_bags, 1);
+        }
+
+        DeleteDynamicBagFixture::default()
+            .with_bag_id(dynamic_bag_id.clone())
+            .with_deletion_account_id(DEFAULT_MEMBER_ACCOUNT_ID)
+            .call_and_assert(Ok(()));
+
+        for distribution_bucket_id in &distributed_by_bag {
+            let bucket = Storage::distribution_bucket_by_family_id_by_index(
+                distribution_bucket_id.distribution_bucket_family_id,
+                distribution_bucket_id.distribution_bucket_index,
+            );
+
+            assert_eq!(bucket.assigned_bags, 0);
+        }
+    });
+}
+
+#[test]
+fn delete_dynamic_bags_succeeded_with_assigned_storage_buckets() {
+    build_test_externalities().execute_with(|| {
+        let initial_balance = 1000;
+        let deletion_prize_value = 77;
+        increase_account_balance(&DEFAULT_MEMBER_ACCOUNT_ID, initial_balance);
+
+        let storage_buckets_number = DefaultMemberDynamicBagNumberOfStorageBuckets::get();
+        create_storage_buckets(storage_buckets_number);
+
+        let dynamic_bag_id = DynamicBagId::<Test>::Member(DEFAULT_MEMBER_ID);
+        CreateDynamicBagFixture::default()
+            .with_bag_id(dynamic_bag_id.clone())
+            .with_deletion_prize(DynamicBagDeletionPrize::<Test> {
+                account_id: DEFAULT_MEMBER_ACCOUNT_ID,
+                prize: deletion_prize_value,
+            })
+            .call_and_assert(Ok(()));
+
+        let bag = Storage::dynamic_bag(&dynamic_bag_id);
+
+        assert_eq!(bag.stored_by.len(), storage_buckets_number as usize);
+
+        let stored_by_bag = bag.stored_by.clone();
+        for bucket_id in &stored_by_bag {
+            let bucket = Storage::storage_bucket_by_id(bucket_id);
+
+            assert_eq!(bucket.assigned_bags, 1);
+        }
+
+        DeleteDynamicBagFixture::default()
+            .with_bag_id(dynamic_bag_id.clone())
+            .with_deletion_account_id(DEFAULT_MEMBER_ACCOUNT_ID)
+            .call_and_assert(Ok(()));
+
+        for bucket_id in &stored_by_bag {
+            let bucket = Storage::storage_bucket_by_id(bucket_id);
+
+            assert_eq!(bucket.assigned_bags, 0);
+        }
+    });
+}
+
 #[test]
 fn delete_dynamic_bags_fails_with_non_existent_dynamic_bag() {
     build_test_externalities().execute_with(|| {
@@ -2731,6 +2887,20 @@ fn delete_storage_bucket_fails_with_non_empty_bucket() {
     });
 }
 
+#[test]
+fn delete_storage_bucket_fails_with_assigned_bag() {
+    build_test_externalities().execute_with(|| {
+        let bag_id = BagId::<Test>::Static(StaticBagId::Council);
+
+        let bucket_id = create_default_storage_bucket_and_assign_to_bag(bag_id.clone());
+
+        DeleteStorageBucketFixture::default()
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
+            .with_storage_bucket_id(bucket_id)
+            .call_and_assert(Err(Error::<Test>::StorageBucketIsBoundToBag.into()));
+    });
+}
+
 #[test]
 fn remove_storage_bucket_operator_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3131,8 +3301,25 @@ fn create_dynamic_bag_succeeded() {
         run_to_block(starting_block);
 
         let dynamic_bag_id = DynamicBagId::<Test>::Member(DEFAULT_MEMBER_ID);
+        let distribution_buckets_number = 10;
+        let family_policy_number1 = 2;
+        let family_policy_number2 = 3;
 
         create_storage_buckets(DEFAULT_STORAGE_BUCKETS_NUMBER);
+        let (family1, _) =
+            create_distribution_bucket_family_with_buckets(distribution_buckets_number);
+        let (family2, _) =
+            create_distribution_bucket_family_with_buckets(distribution_buckets_number);
+
+        let family_policy = BTreeMap::from_iter(vec![
+            (family1, family_policy_number1),
+            (family2, family_policy_number2),
+        ]);
+
+        UpdateFamiliesInDynamicBagCreationPolicyFixture::default()
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .with_families(family_policy)
+            .call_and_assert(Ok(()));
 
         let deletion_prize_value = 100;
         let deletion_prize_account_id = DEFAULT_MEMBER_ACCOUNT_ID;
@@ -3174,6 +3361,21 @@ fn create_dynamic_bag_succeeded() {
             creation_policy.number_of_storage_buckets as usize
         );
 
+        let total_distributed_buckets_number = family_policy_number1 + family_policy_number2;
+        assert_eq!(
+            bag.distributed_by.len(),
+            total_distributed_buckets_number as usize
+        );
+
+        for distribution_bucket_id in &bag.distributed_by {
+            let bucket = Storage::distribution_bucket_by_family_id_by_index(
+                distribution_bucket_id.distribution_bucket_family_id,
+                distribution_bucket_id.distribution_bucket_index,
+            );
+
+            assert_eq!(bucket.assigned_bags, 1);
+        }
+
         assert_eq!(bag.deletion_prize.unwrap(), deletion_prize_value);
 
         // post-check balances

+ 1 - 1
runtime/Cargo.toml

@@ -4,7 +4,7 @@ edition = '2018'
 name = 'joystream-node-runtime'
 # Follow convention: https://github.com/Joystream/substrate-runtime-joystream/issues/1
 # {Authoring}.{Spec}.{Impl} of the RuntimeVersion
-version = '9.13.0'
+version = '9.14.0'
 
 [dependencies]
 # Third-party dependencies

+ 1 - 1
runtime/src/lib.rs

@@ -83,7 +83,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
     spec_name: create_runtime_str!("joystream-node"),
     impl_name: create_runtime_str!("joystream-node"),
     authoring_version: 9,
-    spec_version: 13,
+    spec_version: 14,
     impl_version: 0,
     apis: crate::runtime_api::EXPORTED_RUNTIME_API_VERSIONS,
     transaction_version: 1,

+ 1 - 1
storage-node/package.json

@@ -11,7 +11,7 @@
     "@apollo/client": "^3.3.21",
     "@elastic/ecs-winston-format": "^1.3.1",
     "@joystream/metadata-protobuf": "^1.0.0",
-    "@joystream/types": "^0.17.1",
+    "@joystream/types": "^0.17.2",
     "@oclif/command": "^1",
     "@oclif/config": "^1",
     "@oclif/plugin-help": "^3",

+ 2 - 1
types/augment/all/defs.json

@@ -469,7 +469,8 @@
     "StorageBucket": {
         "operator_status": "StorageBucketOperatorStatus",
         "accepting_new_bags": "bool",
-        "voucher": "Voucher"
+        "voucher": "Voucher",
+        "assigned_bags": "u64"
     },
     "StaticBagId": {
         "_enum": {

+ 1 - 0
types/augment/all/types.ts

@@ -1059,6 +1059,7 @@ export interface StorageBucket extends Struct {
   readonly operator_status: StorageBucketOperatorStatus;
   readonly accepting_new_bags: bool;
   readonly voucher: Voucher;
+  readonly assigned_bags: u64;
 }
 
 /** @name StorageBucketId */

+ 1 - 1
types/package.json

@@ -1,6 +1,6 @@
 {
   "name": "@joystream/types",
-  "version": "0.17.1",
+  "version": "0.17.2",
   "description": "Types for Joystream Substrate Runtime - Giza release",
   "main": "index.js",
   "types": "index.d.ts",

+ 2 - 0
types/src/storage.ts

@@ -172,6 +172,7 @@ export type IStorageBucket = {
   operator_status: StorageBucketOperatorStatus
   accepting_new_bags: bool
   voucher: Voucher
+  assigned_bags: u64
 }
 
 export class StorageBucket
@@ -179,6 +180,7 @@ export class StorageBucket
     operator_status: StorageBucketOperatorStatus,
     accepting_new_bags: bool,
     voucher: Voucher,
+    assigned_bags: u64,
   })
   implements IStorageBucket {}
 

+ 1 - 1
utils/api-scripts/package.json

@@ -11,7 +11,7 @@
     "tsnode-strict": "node -r ts-node/register --unhandled-rejections=strict"
   },
   "dependencies": {
-    "@joystream/types": "^0.17.1",
+    "@joystream/types": "^0.17.2",
     "@polkadot/api": "5.9.1",
     "@polkadot/types": "5.9.1",
     "@polkadot/keyring": "7.3.1",

+ 1 - 1
utils/chain-spec-builder/Cargo.toml

@@ -3,7 +3,7 @@ authors = ['Joystream contributors']
 build = 'build.rs'
 edition = '2018'
 name = 'chain-spec-builder'
-version = '3.3.0'
+version = '3.3.1'
 
 [dependencies]
 ansi_term = "0.12.1"

+ 1 - 1
utils/migration-scripts/package.json

@@ -12,7 +12,7 @@
     "@oclif/config": "^1",
     "@oclif/plugin-help": "^3.2.3",
     "tslib": "^1",
-    "@joystream/types": "^0.17.1",
+    "@joystream/types": "^0.17.2",
     "@polkadot/api": "5.9.1",
     "@polkadot/types": "5.9.1",
     "@polkadot/keyring": "7.3.1",

+ 1 - 1
yarn.lock

@@ -3131,7 +3131,7 @@
     yaml-validator "^3.0.0"
 
 "@joystream/types@link:types":
-  version "0.17.0"
+  version "0.17.1"
   dependencies:
     "@polkadot/api" "5.9.1"
     "@polkadot/keyring" "7.3.1"

Certains fichiers n'ont pas été affichés car il y a eu trop de fichiers modifiés dans ce diff