Browse Source

Merge branch 'giza' into vnft_schema_mappings_second

ondratra 3 years ago
parent
commit
9e823278ff
59 changed files with 3026 additions and 1440 deletions
  1. 2 2
      Cargo.lock
  2. 1 1
      README.md
  3. 0 0
      chain-metadata.json
  4. 3 3
      devops/git-hooks/pre-push
  5. 5 5
      joystream-node-armv7.Dockerfile
  6. 5 5
      joystream-node.Dockerfile
  7. 3 3
      node/README.md
  8. 2 1
      package.json
  9. 6 6
      pioneer/packages/joy-proposals/src/Proposal/Body.tsx
  10. 3 0
      query-node/build.sh
  11. 5 2
      query-node/codegen/package.json
  12. 618 110
      query-node/codegen/yarn.lock
  13. 4 4
      query-node/mappings/bootstrap-data/types.ts
  14. 1 1
      query-node/mappings/common.ts
  15. 10 6
      query-node/mappings/content/channel.ts
  16. 5 3
      query-node/mappings/package.json
  17. 23 0
      query-node/mappings/scripts/postHydraCLIInstall.ts
  18. 45 0
      query-node/mappings/scripts/postInstall.ts
  19. 19 0
      query-node/mappings/scripts/utils.ts
  20. 49 71
      query-node/mappings/storage/index.ts
  21. 8 4
      query-node/mappings/storage/utils.ts
  22. 1 1
      query-node/package.json
  23. 5 3
      query-node/schemas/content.graphql
  24. 4 0
      query-node/schemas/membership.graphql
  25. 8 38
      query-node/schemas/storage.graphql
  26. 8 7
      runtime-modules/common/src/working_group.rs
  27. 6 0
      runtime-modules/content/src/errors.rs
  28. 117 73
      runtime-modules/content/src/lib.rs
  29. 1 1
      runtime-modules/content/src/nft/mod.rs
  30. 200 44
      runtime-modules/content/src/permissions/mod.rs
  31. 196 118
      runtime-modules/content/src/tests/channels.rs
  32. 81 12
      runtime-modules/content/src/tests/mock.rs
  33. 1 0
      runtime-modules/content/src/tests/nft/accept_incoming_offer.rs
  34. 1 0
      runtime-modules/content/src/tests/nft/buy_nft.rs
  35. 275 4
      runtime-modules/content/src/tests/videos.rs
  36. 14 6
      runtime-modules/content/src/types.rs
  37. 0 94
      runtime-modules/storage/src/distribution_bucket_picker.rs
  38. 216 219
      runtime-modules/storage/src/lib.rs
  39. 78 0
      runtime-modules/storage/src/random_buckets/distribution_bucket_picker.rs
  40. 131 0
      runtime-modules/storage/src/random_buckets/mod.rs
  41. 65 0
      runtime-modules/storage/src/random_buckets/storage_bucket_picker.rs
  42. 0 168
      runtime-modules/storage/src/storage_bucket_picker.rs
  43. 109 96
      runtime-modules/storage/src/tests/fixtures.rs
  44. 2 4
      runtime-modules/storage/src/tests/mocks.rs
  45. 144 223
      runtime-modules/storage/src/tests/mod.rs
  46. 3 0
      runtime/src/integration/content_directory.rs
  47. 1 3
      runtime/src/lib.rs
  48. 3 2
      runtime/src/primitives.rs
  49. 2 2
      scripts/cargo-build.sh
  50. 2 2
      scripts/cargo-tests-with-networking.sh
  51. 1 1
      scripts/raspberry-cross-build.sh
  52. 4 4
      scripts/run-dev-chain.sh
  53. 2 2
      setup.sh
  54. 15 11
      types/augment/all/defs.json
  55. 15 8
      types/augment/all/types.ts
  56. 1 8
      types/src/common.ts
  57. 5 1
      types/src/content/index.ts
  58. 18 7
      types/src/storage.ts
  59. 474 51
      yarn.lock

+ 2 - 2
Cargo.lock

@@ -1242,9 +1242,9 @@ dependencies = [
 
 [[package]]
 name = "environmental"
-version = "1.1.2"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e"
+checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797"
 
 [[package]]
 name = "erased-serde"

+ 1 - 1
README.md

@@ -95,7 +95,7 @@ You can also run your our own joystream-node:
 
 ```sh
 git checkout master
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo build --release
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 build --release
 ./target/release/joystream-node -- --pruning archive --chain testnets/joy-testnet-5.json
 ```
 

File diff suppressed because it is too large
+ 0 - 0
chain-metadata.json


+ 3 - 3
devops/git-hooks/pre-push

@@ -1,13 +1,13 @@
 #!/bin/sh
 set -e
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 echo 'running clippy (rust linter)'
 # When custom build.rs triggers wasm-build-runner-impl to build we get error:
 # "Rust WASM toolchain not installed, please install it!"
 # So we skip building the WASM binary by setting BUILD_DUMMY_WASM_BINARY=1
-BUILD_DUMMY_WASM_BINARY=1 cargo +nightly-2021-03-24 clippy --release --all -- -D warnings
+BUILD_DUMMY_WASM_BINARY=1 cargo +nightly-2021-02-20 clippy --release --all -- -D warnings
 
 echo 'running cargo unit tests'
-cargo +nightly-2021-03-24 test --release --all
+cargo +nightly-2021-02-20 test --release --all

+ 5 - 5
joystream-node-armv7.Dockerfile

@@ -1,9 +1,9 @@
 FROM rust:1.52.1-buster AS rust
 RUN rustup self update
-RUN rustup install nightly-2021-03-24 --force
-RUN rustup default nightly-2021-03-24
-RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-RUN rustup component add --toolchain nightly-2021-03-24 clippy
+RUN rustup install nightly-2021-02-20 --force
+RUN rustup default nightly-2021-02-20
+RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
+RUN rustup component add --toolchain nightly-2021-02-20 clippy
 RUN apt-get update && \
   apt-get install -y curl git gcc xz-utils sudo pkg-config unzip clang llvm libc6-dev
 
@@ -14,7 +14,7 @@ COPY . /joystream
 
 # Build all cargo crates
 # Ensure our tests and linter pass before actual build
-ENV WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+ENV WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 RUN apt-get install -y libprotobuf-dev protobuf-compiler
 RUN BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings && \
     cargo test --release --all && \

+ 5 - 5
joystream-node.Dockerfile

@@ -1,9 +1,9 @@
 FROM rust:1.52.1-buster AS rust
 RUN rustup self update
-RUN rustup install nightly-2021-03-24 --force
-RUN rustup default nightly-2021-03-24
-RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-RUN rustup component add --toolchain nightly-2021-03-24 clippy
+RUN rustup install nightly-2021-02-20 --force
+RUN rustup default nightly-2021-02-20
+RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
+RUN rustup component add --toolchain nightly-2021-02-20 clippy
 RUN apt-get update && \
   apt-get install -y curl git gcc xz-utils sudo pkg-config unzip clang llvm libc6-dev
 
@@ -14,7 +14,7 @@ COPY . /joystream
 
 # Build all cargo crates
 # Ensure our tests and linter pass before actual build
-ENV WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+ENV WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 RUN BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings && \
     cargo test --release --all && \
     cargo build --release

+ 3 - 3
node/README.md

@@ -26,7 +26,7 @@ cd joystream/
 Compile the node and runtime:
 
 ```bash
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo build --release
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 build --release
 ```
 
 This produces the binary in `./target/release/joystream-node`
@@ -57,7 +57,7 @@ Use the `--chain` argument, and specify the path to the genesis `chain.json` fil
 Running unit tests:
 
 ```bash
-cargo test --release --all
+cargo +nightly-2021-02-20 test --release --all
 ```
 
 Running full suite of checks, tests, formatting and linting:
@@ -79,7 +79,7 @@ If you are building a tagged release from `master` branch and want to install th
 This will install the executable `joystream-node` to your `~/.cargo/bin` folder, which you would normally have in your `$PATH` environment.
 
 ```bash
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo install joystream-node --path node/ --locked
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 install joystream-node --path node/ --locked
 ```
 
 Now you can run and connect to the testnet:

+ 2 - 1
package.json

@@ -49,7 +49,8 @@
     "rxjs": "^7.4.0",
     "typeorm": "0.2.34",
     "pg": "^8.4.0",
-    "chalk": "^4.0.0"
+    "chalk": "^4.0.0",
+    "@joystream/warthog": "2.39.0"
   },
   "devDependencies": {
     "eslint": "^7.25.0",

+ 6 - 6
pioneer/packages/joy-proposals/src/Proposal/Body.tsx

@@ -16,7 +16,7 @@ import { formatBalance } from '@polkadot/util';
 import PromiseComponent from '@polkadot/joy-utils/react/components/PromiseComponent';
 import ReactMarkdown from 'react-markdown';
 import { StakingPolicy } from '@joystream/types/hiring';
-import { WorkingGroup, WorkingGroupKey } from '@joystream/types/common';
+import { WorkingGroup } from '@joystream/types/common';
 import { ApplicationsDetailsByOpening } from '@polkadot/joy-utils/react/components/working-groups/ApplicationDetails';
 import { LeadInfoFromId } from '@polkadot/joy-utils/react/components/working-groups/LeadInfo';
 import { formatReward } from '@polkadot/joy-utils/functions/format';
@@ -269,7 +269,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
         : <ApplicationsDetailsByOpening
           openingId={openingId.toNumber()}
           acceptedIds={[succesfulApplicationId.toNumber()]}
-          group={workingGroup.type as WorkingGroupKey}/>,
+          group={workingGroup.type}/>,
       true
     )
   ],
@@ -280,7 +280,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
       'Lead',
       historical
         ? `#${(leadId as WorkerId).toNumber()}`
-        : <LeadInfoFromId group={(group as WorkingGroup).type as WorkingGroupKey} leadId={(leadId as WorkerId).toNumber()}/>,
+        : <LeadInfoFromId group={(group as WorkingGroup).type} leadId={(leadId as WorkerId).toNumber()}/>,
       true
     )
   ],
@@ -291,7 +291,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
       'Lead',
       historical
         ? `#${(leadId as WorkerId).toNumber()}`
-        : <LeadInfoFromId group={(group as WorkingGroup).type as WorkingGroupKey} leadId={(leadId as WorkerId).toNumber()}/>,
+        : <LeadInfoFromId group={(group as WorkingGroup).type} leadId={(leadId as WorkerId).toNumber()}/>,
       true
     )
   ],
@@ -302,7 +302,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
       'Lead',
       historical
         ? `#${(leadId as WorkerId).toNumber()}`
-        : <LeadInfoFromId group={(group as WorkingGroup).type as WorkingGroupKey} leadId={(leadId as WorkerId).toNumber()}/>,
+        : <LeadInfoFromId group={(group as WorkingGroup).type} leadId={(leadId as WorkerId).toNumber()}/>,
       true
     )
   ],
@@ -321,7 +321,7 @@ const paramParsers: { [k in ProposalType]: (params: SpecificProposalDetails<k>,
         'Lead',
         historical
           ? `#${leadId.toNumber()}`
-          : <LeadInfoFromId group={workingGroup.type as WorkingGroupKey} leadId={leadId.toNumber()}/>,
+          : <LeadInfoFromId group={workingGroup.type} leadId={leadId.toNumber()}/>,
         true
       )
     ];

+ 3 - 0
query-node/build.sh

@@ -23,6 +23,9 @@ yarn ts-node --project ./mappings/tsconfig.json ./mappings/scripts/postCodegen.t
 # and are inline with root workspace resolutions
 yarn
 
+# Add missing typeorm binary symlink
+ln -s ../../../../../node_modules/typeorm/cli.js ./generated/graphql-server/node_modules/.bin/typeorm
+
 yarn workspace query-node codegen
 yarn workspace query-node build
 

+ 5 - 2
query-node/codegen/package.json

@@ -4,8 +4,11 @@
   "description": "Hydra codegen tools for Joystream Query Node",
   "author": "",
   "license": "ISC",
+  "scripts": {
+    "postinstall": "cd .. && yarn workspace query-node-mappings postHydraCLIInstall"
+  },
   "dependencies": {
-    "@joystream/hydra-cli": "3.1.0-alpha.1",
-    "@joystream/hydra-typegen": "3.1.0-alpha.1"
+    "@joystream/hydra-cli": "3.1.0-alpha.13",
+    "@joystream/hydra-typegen": "3.1.0-alpha.13"
   }
 }

File diff suppressed because it is too large
+ 618 - 110
query-node/codegen/yarn.lock


+ 4 - 4
query-node/mappings/bootstrap-data/types.ts

@@ -38,14 +38,14 @@ export type VideoCategoryJson = {
   id: string
   name: string
   createdInBlock: number
-  createdAt: Date
-  updatedAt: Date
+  createdAt: string
+  updatedAt: string
 }
 
 export type ChannelCategoryJson = {
   id: string
   name: string
   createdInBlock: number
-  createdAt: Date
-  updatedAt: Date
+  createdAt: string
+  updatedAt: string
 }

+ 1 - 1
query-node/mappings/common.ts

@@ -205,7 +205,7 @@ type EntityClass<T extends BaseModel> = {
   name: string
 }
 
-type RelationsArr<T extends BaseModel> = Exclude<
+export type RelationsArr<T extends BaseModel> = Exclude<
   keyof T & string,
   { [K in keyof T]: T[K] extends BaseModel | undefined ? '' : T[K] extends BaseModel[] | undefined ? '' : K }[keyof T]
 >[]

+ 10 - 6
query-node/mappings/content/channel.ts

@@ -4,7 +4,7 @@ eslint-disable @typescript-eslint/naming-convention
 import { EventContext, StoreContext } from '@joystream/hydra-common'
 import { Content } from '../generated/types'
 import { convertContentActorToChannelOwner, processChannelMetadata } from './utils'
-import { Channel, ChannelCategory, StorageDataObject } from 'query-node/dist/model'
+import { Channel, ChannelCategory, StorageDataObject, Membership } from 'query-node/dist/model'
 import { deserializeMetadata, inconsistentState, logger } from '../common'
 import { ChannelCategoryMetadata, ChannelMetadata } from '@joystream/metadata-protobuf'
 import { integrateMeta } from '@joystream/metadata-protobuf/utils'
@@ -14,9 +14,7 @@ import { removeDataObject } from '../storage/utils'
 export async function content_ChannelCreated(ctx: EventContext & StoreContext): Promise<void> {
   const { store, event } = ctx
   // read event data
-  const [contentActor, channelId, runtimeChannel, channelCreationParameters] = new Content.ChannelCreatedEvent(
-    event
-  ).params
+  const [contentActor, channelId, , channelCreationParameters] = new Content.ChannelCreatedEvent(event).params
 
   // create entity
   const channel = new Channel({
@@ -26,12 +24,14 @@ export async function content_ChannelCreated(ctx: EventContext & StoreContext):
     videos: [],
     createdInBlock: event.blockNumber,
     rewardAccount: channelCreationParameters.reward_account.unwrapOr(undefined)?.toString(),
-    deletionPrizeDestAccount: runtimeChannel.deletion_prize_source_account_id.toString(),
     // fill in auto-generated fields
     createdAt: new Date(event.blockTimestamp),
     updatedAt: new Date(event.blockTimestamp),
     // prepare channel owner (handles fields `ownerMember` and `ownerCuratorGroup`)
     ...(await convertContentActorToChannelOwner(store, contentActor)),
+    collaborators: Array.from(channelCreationParameters.collaborators).map(
+      (id) => new Membership({ id: id.toString() })
+    ),
   })
 
   // deserialize & process metadata
@@ -76,13 +76,17 @@ export async function content_ChannelUpdated(ctx: EventContext & StoreContext):
 
   // prepare changed reward account
   const newRewardAccount = channelUpdateParameters.reward_account.unwrapOr(null)
-
   // reward account change happened?
   if (newRewardAccount) {
     // this will change the `channel`!
     channel.rewardAccount = newRewardAccount.unwrapOr(undefined)?.toString()
   }
 
+  const newCollaborators = channelUpdateParameters.collaborators.unwrapOr(undefined)
+  if (newCollaborators) {
+    channel.collaborators = Array.from(newCollaborators).map((id) => new Membership({ id: id.toString() }))
+  }
+
   // set last update time
   channel.updatedAt = new Date(event.blockTimestamp)
 

+ 5 - 3
query-node/mappings/package.json

@@ -11,6 +11,8 @@
     "lint": "eslint . --quiet --ext .ts",
     "checks": "prettier ./ --check && yarn lint",
     "format": "prettier ./ --write ",
+    "postinstall": "yarn ts-node ./scripts/postInstall.ts",
+    "postHydraCLIInstall": "yarn ts-node ./scripts/postHydraCLIInstall.ts",
     "bootstrap-data:fetch:members": "yarn ts-node ./bootstrap-data/scripts/fetchMembersData.ts",
     "bootstrap-data:fetch:categories": "yarn ts-node ./bootstrap-data/scripts/fetchCategories.ts",
     "bootstrap-data:fetch:workingGroups": "yarn ts-node ./bootstrap-data/scripts/fetchWorkingGroupsData.ts",
@@ -18,12 +20,12 @@
   },
   "dependencies": {
     "@polkadot/types": "5.9.1",
-    "@joystream/hydra-common": "3.1.0-alpha.1",
-    "@joystream/hydra-db-utils": "3.1.0-alpha.1",
+    "@joystream/hydra-common": "3.1.0-alpha.13",
+    "@joystream/hydra-db-utils": "3.1.0-alpha.13",
     "@joystream/metadata-protobuf": "^1.0.0",
     "@joystream/sumer-types": "npm:@joystream/types@^0.16.0",
     "@joystream/types": "^0.17.0",
-    "@joystream/warthog": "2.35.0",
+    "@joystream/warthog": "2.39.0",
     "@apollo/client": "^3.2.5"
   },
   "devDependencies": {

+ 23 - 0
query-node/mappings/scripts/postHydraCLIInstall.ts

@@ -0,0 +1,23 @@
+// A script to be executed post hydra-cli install, that may include patches for Hydra CLI
+import path from 'path'
+import { replaceInFile } from './utils'
+
+// FIXME: Temporary fix for missing JOIN and HAVING conditions in search queries (Hydra)
+const searchServiceTemplatePath = path.resolve(
+  __dirname,
+  '../../codegen/node_modules/@joystream/hydra-cli/lib/src/templates/textsearch/service.ts.mst'
+)
+
+replaceInFile({
+  filePath: searchServiceTemplatePath,
+  regex: /queries = queries\.concat\(generateSqlQuery\(repositories\[index\]\.metadata\.tableName, WHERE\)\);/,
+  newContent:
+    'queries = queries.concat(generateSqlQuery(repositories[index].metadata.tableName, qb.createJoinExpression(), WHERE, qb.createHavingExpression()));',
+})
+
+replaceInFile({
+  filePath: searchServiceTemplatePath,
+  regex: /const generateSqlQuery =[\s\S]+\+ where;/,
+  newContent: `const generateSqlQuery = (table: string, joins: string, where: string, having: string) =>
+  \`SELECT '\${table}_' || "\${table}"."id" AS unique_id FROM "\${table}" \` + joins + ' ' + where + ' ' + having;`,
+})

+ 45 - 0
query-node/mappings/scripts/postInstall.ts

@@ -0,0 +1,45 @@
+// A script to be executed post query-node install, that may include workarounds in Hydra node_modules
+import path from 'path'
+import { replaceInFile } from './utils'
+
+// FIXME: Temporarly remove broken sanitizeNullCharacter call
+const subscribersJsPath = path.resolve(
+  __dirname,
+  '../../../node_modules/@joystream/hydra-processor/lib/db/subscribers.js'
+)
+replaceInFile({
+  filePath: subscribersJsPath,
+  regex: /sanitizeNullCharacter\(entity, field\);/g,
+  newContent: '//sanitizeNullCharacter(entity, field)',
+})
+
+// FIXME: Temporarly replace broken relations resolution in @joystream/warthog
+const dataLoaderJsPath = path.resolve(
+  __dirname,
+  '../../../node_modules/@joystream/warthog/dist/middleware/DataLoaderMiddleware.js'
+)
+replaceInFile({
+  filePath: dataLoaderJsPath,
+  regex: /return context\.connection\.relationIdLoader[\s\S]+return group\.related;\s+\}\);\s+\}\)/,
+  newContent: `return Promise.all(
+    entities.map(entity => context.connection.relationLoader.load(relation, entity))
+  ).then(function (results) {
+    return results.map(function (related) {
+      return (relation.isManyToOne || relation.isOneToOne) ? related[0] : related
+    })
+  })`,
+})
+
+// FIXME: Temporary fix for "table name x specified more than once"
+const baseServiceJsPath = path.resolve(__dirname, '../../../node_modules/@joystream/warthog/dist/core/BaseService.js')
+replaceInFile({
+  filePath: baseServiceJsPath,
+  regex: /function common\(parameters, localIdColumn, foreignTableName, foreignColumnMap, foreignColumnName\) \{[^}]+\}/,
+  newContent: `function common(parameters, localIdColumn, foreignTableName, foreignColumnMap, foreignColumnName) {
+    const uuid = require('uuid/v4')
+    const foreignTableAlias = uuid().replace('-', '')
+    var foreingIdColumn = "\\"" + foreignTableAlias + "\\".\\"" + foreignColumnMap[foreignColumnName] + "\\"";
+    parameters.topLevelQb.leftJoin(foreignTableName, foreignTableAlias, localIdColumn + " = " + foreingIdColumn);
+    addWhereCondition(parameters, foreignTableAlias, foreignColumnMap);
+  }`,
+})

+ 19 - 0
query-node/mappings/scripts/utils.ts

@@ -0,0 +1,19 @@
+import fs from 'fs'
+import { blake2AsHex } from '@polkadot/util-crypto'
+
+type ReplaceLinesInFileParams = {
+  filePath: string
+  regex: RegExp
+  newContent: string
+}
+
+export function replaceInFile({ filePath, regex, newContent }: ReplaceLinesInFileParams): void {
+  const paramsHash = blake2AsHex(filePath + '|' + regex.source + '|' + newContent)
+  const startMark = `/* BEGIN REPLACED CONTENT ${paramsHash} */`
+  const endMark = `/* END REPLACED CONTENT ${paramsHash} */`
+  const fileContent = fs.readFileSync(filePath).toString()
+  if (fileContent.includes(startMark)) {
+    return
+  }
+  fs.writeFileSync(filePath, fileContent.replace(regex, `${startMark}\n${newContent}\n${endMark}`))
+}

+ 49 - 71
query-node/mappings/storage/index.ts

@@ -18,12 +18,9 @@ import {
   StorageDataObject,
   StorageSystemParameters,
   GeoCoordinates,
-  StorageBagDistributionAssignment,
-  StorageBagStorageAssignment,
 } from 'query-node/dist/model'
 import BN from 'bn.js'
-import { getById } from '../common'
-import { In } from 'typeorm'
+import { getById, inconsistentState } from '../common'
 import {
   processDistributionBucketFamilyMetadata,
   processDistributionOperatorMetadata,
@@ -141,25 +138,15 @@ export async function storage_StorageBucketsUpdatedForBag({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [bagId, addedBucketsIds, removedBucketsIds] = new Storage.StorageBucketsUpdatedForBagEvent(event).params
+  const [bagId, addedBucketsSet, removedBucketsSet] = new Storage.StorageBucketsUpdatedForBagEvent(event).params
   // Get or create bag
-  const storageBag = await getBag(store, bagId)
-  const assignmentsToRemove = await store.getMany(StorageBagStorageAssignment, {
-    where: {
-      storageBag,
-      storageBucket: { id: In(Array.from(removedBucketsIds).map((bucketId) => bucketId.toString())) },
-    },
-  })
-  const assignmentsToAdd = Array.from(addedBucketsIds).map(
-    (bucketId) =>
-      new StorageBagStorageAssignment({
-        id: `${storageBag.id}-${bucketId.toString()}`,
-        storageBag,
-        storageBucket: new StorageBucket({ id: bucketId.toString() }),
-      })
-  )
-  await Promise.all(assignmentsToRemove.map((a) => store.remove<StorageBagStorageAssignment>(a)))
-  await Promise.all(assignmentsToAdd.map((a) => store.save<StorageBagStorageAssignment>(a)))
+  const storageBag = await getBag(store, bagId, ['storageBuckets'])
+  const removedBucketsIds = Array.from(removedBucketsSet).map((id) => id.toString())
+  const addedBucketsIds = Array.from(addedBucketsSet).map((id) => id.toString())
+  storageBag.storageBuckets = (storageBag.storageBuckets || [])
+    .filter((bucket) => !removedBucketsIds.includes(bucket.id))
+    .concat(addedBucketsIds.map((id) => new StorageBucket({ id })))
+  await store.save<StorageBag>(storageBag)
 }
 
 export async function storage_VoucherChanged({ event, store }: EventContext & StoreContext): Promise<void> {
@@ -189,11 +176,21 @@ export async function storage_StorageBucketVoucherLimitsSet({
 export async function storage_StorageBucketDeleted({ event, store }: EventContext & StoreContext): Promise<void> {
   const [bucketId] = new Storage.StorageBucketDeletedEvent(event).params
   // TODO: Cascade remove on db level (would require changes in Hydra / comitting autogenerated files)
-  const assignments = await store.getMany(StorageBagStorageAssignment, {
-    where: { storageBucket: { id: bucketId.toString() } },
+  const storageBucket = await store.get(StorageBucket, {
+    where: { id: bucketId.toString() },
+    relations: ['bags', 'bags.storageBuckets'],
   })
-  await Promise.all(assignments.map((a) => store.remove<StorageBagStorageAssignment>(a)))
-  await store.remove<StorageBucket>(new StorageBucket({ id: bucketId.toString() }))
+  if (!storageBucket) {
+    inconsistentState(`Storage bucket by id ${bucketId.toString()} not found!`)
+  }
+  // Remove relations
+  await Promise.all(
+    (storageBucket.bags || []).map((bag) => {
+      bag.storageBuckets = (bag.storageBuckets || []).filter((bucket) => bucket.id !== bucketId.toString())
+      return store.save<StorageBag>(bag)
+    })
+  )
+  await store.remove<StorageBucket>(storageBucket)
 }
 
 // DYNAMIC BAGS
@@ -202,36 +199,17 @@ export async function storage_DynamicBagCreated({ event, store }: EventContext &
   const storageBag = new StorageBag({
     id: getDynamicBagId(bagId),
     owner: getDynamicBagOwner(bagId),
+    storageBuckets: Array.from(storageBucketIdsSet).map((id) => new StorageBucket({ id: id.toString() })),
+    distributionBuckets: Array.from(distributionBucketIdsSet).map(
+      (id) => new DistributionBucket({ id: id.toString() })
+    ),
   })
-  const storageAssignments = Array.from(storageBucketIdsSet).map(
-    (bucketId) =>
-      new StorageBagStorageAssignment({
-        id: `${storageBag.id}-${bucketId.toString()}`,
-        storageBag,
-        storageBucket: new StorageBucket({ id: bucketId.toString() }),
-      })
-  )
-  const distributionAssignments = Array.from(distributionBucketIdsSet).map(
-    (bucketId) =>
-      new StorageBagDistributionAssignment({
-        id: `${storageBag.id}-${bucketId.toString()}`,
-        storageBag,
-        distributionBucket: new DistributionBucket({ id: bucketId.toString() }),
-      })
-  )
   await store.save<StorageBag>(storageBag)
-  await Promise.all(storageAssignments.map((a) => store.save<StorageBagStorageAssignment>(a)))
-  await Promise.all(distributionAssignments.map((a) => store.save<StorageBagDistributionAssignment>(a)))
 }
 
 export async function storage_DynamicBagDeleted({ event, store }: EventContext & StoreContext): Promise<void> {
   const [, bagId] = new Storage.DynamicBagDeletedEvent(event).params
   const storageBag = await getDynamicBag(store, bagId, ['objects'])
-  // TODO: Cascade remove on db level (would require changes in Hydra / comitting autogenerated files)
-  const storageAssignments = await store.getMany(StorageBagStorageAssignment, { where: { storageBag } })
-  const distributionAssignments = await store.getMany(StorageBagDistributionAssignment, { where: { storageBag } })
-  await Promise.all(storageAssignments.map((a) => store.remove<StorageBagStorageAssignment>(a)))
-  await Promise.all(distributionAssignments.map((a) => store.remove<StorageBagDistributionAssignment>(a)))
   await store.remove<StorageBag>(storageBag)
 }
 
@@ -341,36 +319,36 @@ export async function storage_DistributionBucketStatusUpdated({
 export async function storage_DistributionBucketDeleted({ event, store }: EventContext & StoreContext): Promise<void> {
   const [, bucketId] = new Storage.DistributionBucketDeletedEvent(event).params
   // TODO: Cascade remove on db level (would require changes in Hydra / comitting autogenerated files)
-  const assignments = await store.getMany(StorageBagDistributionAssignment, {
-    where: { distributionBucket: { id: bucketId.toString() } },
+  const distributionBucket = await store.get(DistributionBucket, {
+    where: { id: bucketId.toString() },
+    relations: ['bags', 'bags.distributionBuckets'],
   })
-  await Promise.all(assignments.map((a) => store.remove<StorageBagDistributionAssignment>(a)))
-  await store.remove<DistributionBucket>(new DistributionBucket({ id: bucketId.toString() }))
+  if (!distributionBucket) {
+    inconsistentState(`Distribution bucket by id ${bucketId.toString()} not found!`)
+  }
+  // Remove relations
+  await Promise.all(
+    (distributionBucket.bags || []).map((bag) => {
+      bag.distributionBuckets = (bag.distributionBuckets || []).filter((bucket) => bucket.id !== bucketId.toString())
+      return store.save<StorageBag>(bag)
+    })
+  )
+  await store.remove<DistributionBucket>(distributionBucket)
 }
 
 export async function storage_DistributionBucketsUpdatedForBag({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [bagId, , addedBucketsIds, removedBucketsIds] = new Storage.DistributionBucketsUpdatedForBagEvent(event).params
+  const [bagId, , addedBucketsSet, removedBucketsSet] = new Storage.DistributionBucketsUpdatedForBagEvent(event).params
   // Get or create bag
-  const storageBag = await getBag(store, bagId)
-  const assignmentsToRemove = await store.getMany(StorageBagDistributionAssignment, {
-    where: {
-      storageBag,
-      distributionBucket: { id: In(Array.from(removedBucketsIds).map((bucketId) => bucketId.toString())) },
-    },
-  })
-  const assignmentsToAdd = Array.from(addedBucketsIds).map(
-    (bucketId) =>
-      new StorageBagDistributionAssignment({
-        id: `${storageBag.id}-${bucketId.toString()}`,
-        storageBag,
-        distributionBucket: new DistributionBucket({ id: bucketId.toString() }),
-      })
-  )
-  await Promise.all(assignmentsToRemove.map((a) => store.remove<StorageBagDistributionAssignment>(a)))
-  await Promise.all(assignmentsToAdd.map((a) => store.save<StorageBagDistributionAssignment>(a)))
+  const storageBag = await getBag(store, bagId, ['distributionBuckets'])
+  const removedBucketsIds = Array.from(removedBucketsSet).map((id) => id.toString())
+  const addedBucketsIds = Array.from(addedBucketsSet).map((id) => id.toString())
+  storageBag.distributionBuckets = (storageBag.distributionBuckets || [])
+    .filter((bucket) => !removedBucketsIds.includes(bucket.id))
+    .concat(addedBucketsIds.map((id) => new DistributionBucket({ id })))
+  await store.save<StorageBag>(storageBag)
 }
 
 export async function storage_DistributionBucketModeUpdated({

+ 8 - 4
query-node/mappings/storage/utils.ts

@@ -17,7 +17,7 @@ import {
   DistributionBucketFamily,
 } from 'query-node/dist/model'
 import BN from 'bn.js'
-import { bytesToString, inconsistentState, getById } from '../common'
+import { bytesToString, inconsistentState, getById, RelationsArr } from '../common'
 import { In } from 'typeorm'
 import { unsetAssetRelations } from '../content/utils'
 
@@ -101,7 +101,7 @@ export function getBagId(bagId: BagId) {
 export async function getDynamicBag(
   store: DatabaseManager,
   bagId: DynamicBagId,
-  relations?: 'objects'[]
+  relations?: RelationsArr<StorageBag>
 ): Promise<StorageBag> {
   return getById(store, StorageBag, getDynamicBagId(bagId), relations)
 }
@@ -109,7 +109,7 @@ export async function getDynamicBag(
 export async function getStaticBag(
   store: DatabaseManager,
   bagId: StaticBagId,
-  relations?: 'objects'[]
+  relations?: RelationsArr<StorageBag>
 ): Promise<StorageBag> {
   const id = getStaticBagId(bagId)
   const bag = await store.get(StorageBag, { where: { id }, relations })
@@ -125,7 +125,11 @@ export async function getStaticBag(
   return bag
 }
 
-export async function getBag(store: DatabaseManager, bagId: BagId, relations?: 'objects'[]): Promise<StorageBag> {
+export async function getBag(
+  store: DatabaseManager,
+  bagId: BagId,
+  relations?: RelationsArr<StorageBag>
+): Promise<StorageBag> {
   return bagId.isStatic
     ? getStaticBag(store, bagId.asStatic, relations)
     : getDynamicBag(store, bagId.asDynamic, relations)

+ 1 - 1
query-node/package.json

@@ -41,7 +41,7 @@
     "tslib": "^2.0.0",
     "@types/bn.js": "^4.11.6",
     "bn.js": "^5.1.2",
-    "@joystream/hydra-processor": "3.1.0-alpha.1"
+    "@joystream/hydra-processor": "3.1.0-alpha.13"
   },
   "volta": {
 		"extends": "../package.json"

+ 5 - 3
query-node/schemas/content.graphql

@@ -35,9 +35,6 @@ type Channel @entity {
   "Reward account where revenue is sent if set."
   rewardAccount: String
 
-  "Destination account for the prize associated with channel deletion"
-  deletionPrizeDestAccount: String!
-
   "The title of the Channel"
   title: String @fulltext(query: "search")
 
@@ -61,9 +58,14 @@ type Channel @entity {
   "The primary langauge of the channel's content"
   language: Language
 
+  "List of videos that belong to the channel"
   videos: [Video!]! @derivedFrom(field: "channel")
 
+  "Number of the block the channel was created in"
   createdInBlock: Int!
+
+  "List of channel collaborators (members)"
+  collaborators: [Membership!]
 }
 
 type CuratorGroup @entity {

+ 4 - 0
query-node/schemas/membership.graphql

@@ -33,5 +33,9 @@ type Membership @entity {
   "The type of subscription the member has purchased if any."
   subscription: Int
 
+  "List of channels the member owns"
   channels: [Channel!]! @derivedFrom(field: "ownerMember")
+
+  "List of channels the member has collaborator access to"
+  collaboratorInChannels: [Channel!] @derivedFrom(field: "collaborators")
 }

+ 8 - 38
query-node/schemas/storage.graphql

@@ -105,8 +105,8 @@ type StorageBucket @entity {
   "Whether the bucket is accepting any new storage bags"
   acceptingNewBags: Boolean!
 
-  "Assignments to store a bag"
-  bagAssignments: [StorageBagStorageAssignment!] @derivedFrom(field: "storageBucket")
+  "Storage bags assigned to the bucket"
+  bags: [StorageBag!] @derivedFrom(field: "storageBuckets")
 
   "Bucket's data object size limit in bytes"
   dataObjectsSizeLimit: BigInt!
@@ -151,46 +151,16 @@ type StorageBag @entity {
   "Data objects in the bag"
   objects: [StorageDataObject!] @derivedFrom(field: "storageBag")
 
-  "Assignments to a storage bucket"
-  storageAssignments: [StorageBagStorageAssignment!] @derivedFrom(field: "storageBag")
+  "Storage buckets assigned to the bag"
+  storageBuckets: [StorageBucket!]
 
-  "Assignments to a distribution bucket"
-  distirbutionAssignments: [StorageBagDistributionAssignment!] @derivedFrom(field: "storageBag")
+  "Distribution buckets assigned to the bag"
+  distributionBuckets: [DistributionBucket!]
 
   "Owner of the storage bag"
   owner: StorageBagOwner!
 }
 
-type StorageBagStorageAssignment @entity {
-  "{storageBagId-storageBucketId}"
-  id: ID!
-
-  "Storage bag to be stored"
-  storageBag: StorageBag!
-
-  "Storage bucket that should store the bag"
-  storageBucket: StorageBucket!
-
-  # Relationship filtering workaround
-  storageBagId: ID
-  storageBucketId: ID
-}
-
-type StorageBagDistributionAssignment @entity {
-  "{storageBagId-distributionBucketId}"
-  id: ID!
-
-  "Storage bag to be distributed"
-  storageBag: StorageBag!
-
-  "Distribution bucket that should distribute the bag"
-  distributionBucket: DistributionBucket!
-
-  # Relationship filtering workaround
-  storageBagId: ID
-  distributionBucketId: ID
-}
-
 type DataObjectTypeChannelAvatar @variant {
   "Related channel entity"
   channel: Channel!
@@ -318,8 +288,8 @@ type DistributionBucket @entity {
   "Whether the bucket is currently distributing content"
   distributing: Boolean!
 
-  "Assignments to distribute a bag"
-  bagAssignments: [StorageBagDistributionAssignment!] @derivedFrom(field: "distributionBucket")
+  "Storage bags assigned to the bucket"
+  bags: [StorageBag!] @derivedFrom(field: "distributionBuckets")
 }
 
 type DistributionBucketFamily @entity {

+ 8 - 7
runtime-modules/common/src/working_group.rs

@@ -19,24 +19,25 @@ pub enum WorkingGroup {
         /// Forum working group: working_group::Instance1.
         Forum,
     */
+
     /// Storage working group: working_group::Instance2.
-    Storage = 2isize,
+    Storage,
 
     /// Storage working group: working_group::Instance3.
-    Content = 3isize,
+    Content,
 
     /// Operations working group: working_group::Instance4.
-    OperationsAlpha = 4isize,
+    OperationsAlpha,
 
     /// Gateway working group: working_group::Instance5.
-    Gateway = 5isize,
+    Gateway,
 
     /// Distribution working group: working_group::Instance6.
-    Distribution = 6isize,
+    Distribution,
 
     /// Operations working group: working_group::Instance7.
-    OperationsBeta = 7isize,
+    OperationsBeta,
 
     /// Operations working group: working_group::Instance8.
-    OperationsGamma = 8isize,
+    OperationsGamma,
 }

+ 6 - 0
runtime-modules/content/src/errors.rs

@@ -37,6 +37,9 @@ decl_error! {
         /// Member authentication failed
         MemberAuthFailed,
 
+        /// Member id not valid
+        CollaboratorIsNotValidMember,
+
         /// Curator authentication failed
         CuratorAuthFailed,
 
@@ -46,6 +49,9 @@ decl_error! {
         /// Operation cannot be perfomed with this Actor
         ActorNotAuthorized,
 
+        /// This content actor cannot own a channel
+        ActorCannotOwnChannel,
+
         /// A Channel or Video Category does not exist.
         CategoryDoesNotExist,
 

+ 117 - 73
runtime-modules/content/src/lib.rs

@@ -109,6 +109,8 @@ pub trait Trait:
     type DataObjectStorage: storage::DataObjectStorage<Self>;
 }
 
+type DataObjectId<T> = <T as storage::Trait>::DataObjectId;
+
 decl_storage! {
     trait Store for Module<T: Trait> as Content {
         pub ChannelById get(fn channel_by_id): map hasher(blake2_128_concat) T::ChannelId => Channel<T>;
@@ -217,8 +219,9 @@ decl_module! {
             origin,
         ) {
 
+        let sender = ensure_signed(origin)?;
             // Ensure given origin is lead
-            ensure_is_lead::<T>(origin)?;
+            ensure_lead_auth_success::<T>(&sender)?;
 
             //
             // == MUTATION SAFE ==
@@ -245,7 +248,10 @@ decl_module! {
         ) {
 
             // Ensure given origin is lead
-            ensure_is_lead::<T>(origin)?;
+        let sender = ensure_signed(origin)?;
+            // Ensure given origin is lead
+            ensure_lead_auth_success::<T>(&sender)?;
+
 
             // Ensure curator group under provided curator_group_id already exist
             Self::ensure_curator_group_under_given_id_exists(&curator_group_id)?;
@@ -272,7 +278,10 @@ decl_module! {
         ) {
 
             // Ensure given origin is lead
-            ensure_is_lead::<T>(origin)?;
+        let sender = ensure_signed(origin)?;
+            // Ensure given origin is lead
+            ensure_lead_auth_success::<T>(&sender)?;
+
 
             // Ensure curator group under provided curator_group_id already exist, retrieve corresponding one
             let curator_group = Self::ensure_curator_group_exists(&curator_group_id)?;
@@ -308,7 +317,9 @@ decl_module! {
         ) {
 
             // Ensure given origin is lead
-            ensure_is_lead::<T>(origin)?;
+        let sender = ensure_signed(origin)?;
+            // Ensure given origin is lead
+            ensure_lead_auth_success::<T>(&sender)?;
 
             // Ensure curator group under provided curator_group_id already exist, retrieve corresponding one
             let curator_group = Self::ensure_curator_group_exists(&curator_group_id)?;
@@ -336,21 +347,28 @@ decl_module! {
             actor: ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
             params: ChannelCreationParameters<T>,
         ) {
+            // channel creator account
+            let sender = ensure_signed(origin)?;
+
             ensure_actor_authorized_to_create_channel::<T>(
-                origin.clone(),
+                &sender,
                 &actor,
             )?;
 
-            // channel creator account
-            let sender = ensure_signed(origin)?;
-
             // The channel owner will be..
             let channel_owner = Self::actor_to_channel_owner(&actor)?;
 
             // next channel id
             let channel_id = NextChannelId::<T>::get();
 
-            // atomically upload to storage and return the # of uploaded assets
+            // ensure collaborator member ids are valid
+            Self::validate_collaborator_set(&params.collaborators)?;
+
+            //
+            // == MUTATION SAFE ==
+            //
+
+            // upload to storage
             if let Some(upload_assets) = params.assets.as_ref() {
                 Self::upload_assets_to_storage(
                     upload_assets,
@@ -359,13 +377,10 @@ decl_module! {
                 )?;
             }
 
-            //
-            // == MUTATION SAFE ==
-            //
-
             // Only increment next channel id if adding content was successful
             NextChannelId::<T>::mutate(|id| *id += T::ChannelId::one());
 
+
             // channel creation
             let channel: Channel<T> = ChannelRecord {
                 owner: channel_owner,
@@ -373,6 +388,7 @@ decl_module! {
                 num_videos: 0u64,
                 is_censored: false,
                 reward_account: params.reward_account.clone(),
+                collaborators: params.collaborators.clone(),
                 // setting the channel owner account as the prize funds account
                 deletion_prize_source_account_id: sender,
             };
@@ -391,42 +407,53 @@ decl_module! {
             channel_id: T::ChannelId,
             params: ChannelUpdateParameters<T>,
         ) {
+            let sender = ensure_signed(origin)?;
+
             // check that channel exists
-            let channel = Self::ensure_channel_exists(&channel_id)?;
+            let mut channel = Self::ensure_channel_exists(&channel_id)?;
 
-            ensure_actor_authorized_to_update_channel::<T>(
-                origin,
+            ensure_actor_authorized_to_update_channel_assets::<T>(
+                &sender,
                 &actor,
-                &channel.owner,
+                &channel,
             )?;
 
-            Self::remove_assets_from_storage(&params.assets_to_remove, &channel_id, &channel.deletion_prize_source_account_id)?;
+            // maybe update the reward account if actor is not a collaborator
+            if let Some(reward_account) = params.reward_account.as_ref() {
+                ensure_actor_can_manage_reward_account::<T>(&sender, &channel.owner, &actor)?;
+                channel.reward_account = reward_account.clone();
+            }
 
-            // atomically upload to storage and return the # of uploaded assets
-            if let Some(upload_assets) = params.assets_to_upload.as_ref() {
-                Self::upload_assets_to_storage(
-                    upload_assets,
-                    &channel_id,
-                    &channel.deletion_prize_source_account_id
-                )?;
+            // update collaborator set if actor is not a collaborator
+            if let Some(new_collabs) = params.collaborators.as_ref() {
+                ensure_actor_can_manage_collaborators::<T>(&sender, &channel.owner, &actor)?;
+                // ensure collaborator member ids are valid
+                Self::validate_collaborator_set(new_collabs)?;
+
+                channel.collaborators = new_collabs.clone();
             }
 
             //
             // == MUTATION SAFE ==
             //
 
-            let mut channel = channel;
-
-            // Maybe update the reward account
-            if let Some(reward_account) = &params.reward_account {
-                channel.reward_account = reward_account.clone();
+            // upload assets to storage
+            if let Some(upload_assets) = params.assets_to_upload.as_ref() {
+                Self::upload_assets_to_storage(
+                    upload_assets,
+                    &channel_id,
+                    &sender,
+                )?;
             }
 
+            // remove eassets from storage
+            Self::remove_assets_from_storage(&params.assets_to_remove, &channel_id, &sender)?;
+
             // Update the channel
             ChannelById::<T>::insert(channel_id, channel.clone());
 
             Self::deposit_event(RawEvent::ChannelUpdated(actor, channel_id, channel, params));
-        }
+}
 
         // extrinsics for channel deletion
         #[weight = 10_000_000] // TODO: adjust weight
@@ -436,12 +463,13 @@ decl_module! {
             channel_id: T::ChannelId,
             num_objects_to_delete: u64,
         ) -> DispatchResult {
+
+            let sender = ensure_signed(origin)?;
             // check that channel exists
             let channel = Self::ensure_channel_exists(&channel_id)?;
 
-            // ensure permissions
-            ensure_actor_authorized_to_update_channel::<T>(
-                origin,
+            ensure_actor_authorized_to_delete_channel::<T>(
+                &sender,
                 &actor,
                 &channel.owner,
             )?;
@@ -461,6 +489,10 @@ decl_module! {
                     Error::<T>::InvalidBagSizeSpecified
                 );
 
+                //
+                // == MUTATION SAFE ==
+                //
+
                 // construct collection of assets to be removed
                 let assets_to_remove = T::DataObjectStorage::get_data_objects_id(&bag_id);
 
@@ -468,20 +500,16 @@ decl_module! {
                 Self::remove_assets_from_storage(
                     &assets_to_remove,
                     &channel_id,
-                    &channel.deletion_prize_source_account_id
+                    &sender,
                 )?;
 
                 // delete channel dynamic bag
                 Storage::<T>::delete_dynamic_bag(
-                    channel.deletion_prize_source_account_id,
-                    dyn_bag
+                    sender,
+                    dyn_bag,
                 )?;
             }
 
-            //
-            // == MUTATION SAFE ==
-            //
-
             // remove channel from on chain state
             ChannelById::<T>::remove(channel_id);
 
@@ -619,32 +647,33 @@ decl_module! {
             channel_id: T::ChannelId,
             params: VideoCreationParameters<T>,
         ) {
+            let sender = ensure_signed(origin.clone())?;
 
             // check that channel exists
             let channel = Self::ensure_channel_exists(&channel_id)?;
 
-            ensure_actor_authorized_to_update_channel::<T>(
-                origin,
+            ensure_actor_authorized_to_update_channel_assets::<T>(
+                &sender,
                 &actor,
-                &channel.owner,
+                &channel,
             )?;
 
             // next video id
             let video_id = NextVideoId::<T>::get();
 
-            // atomically upload to storage and return the # of uploaded assets
+            //
+            // == MUTATION SAFE ==
+            //
+
+            // upload to storage
             if let Some(upload_assets) = params.assets.as_ref() {
                 Self::upload_assets_to_storage(
                     upload_assets,
                     &channel_id,
-                    &channel.deletion_prize_source_account_id
+                    &sender,
                 )?;
             }
 
-            //
-            // == MUTATION SAFE ==
-            //
-
             // create the video struct
             let video: Video<T> = VideoRecord {
                 in_channel: channel_id,
@@ -679,34 +708,37 @@ decl_module! {
             video_id: T::VideoId,
             params: VideoUpdateParameters<T>,
         ) {
+
+            let sender = ensure_signed(origin.clone())?;
             // check that video exists, retrieve corresponding channel id.
             let video = Self::ensure_video_exists(&video_id)?;
 
             let channel_id = video.in_channel;
             let channel = ChannelById::<T>::get(&channel_id);
 
-            ensure_actor_authorized_to_update_channel::<T>(
-                origin,
+            // Check for permission to update channel assets
+            ensure_actor_authorized_to_update_channel_assets::<T>(
+                &sender,
                 &actor,
-                &channel.owner,
+                &channel,
             )?;
 
+            //
+            // == MUTATION SAFE ==
+            //
+
             // remove specified assets from channel bag in storage
-            Self::remove_assets_from_storage(&params.assets_to_remove, &channel_id, &channel.deletion_prize_source_account_id)?;
+            Self::remove_assets_from_storage(&params.assets_to_remove, &channel_id, &sender)?;
 
             // atomically upload to storage and return the # of uploaded assets
             if let Some(upload_assets) = params.assets_to_upload.as_ref() {
                 Self::upload_assets_to_storage(
                     upload_assets,
                     &channel_id,
-                    &channel.deletion_prize_source_account_id
+                    &sender,
                 )?;
             }
 
-            //
-            // == MUTATION SAFE ==
-            //
-
             Self::deposit_event(RawEvent::VideoUpdated(actor, video_id, params));
         }
 
@@ -718,6 +750,8 @@ decl_module! {
             assets_to_remove: BTreeSet<DataObjectId<T>>,
         ) {
 
+           let sender = ensure_signed(origin.clone())?;
+
             // check that video exists
             let video = Self::ensure_video_exists(&video_id)?;
 
@@ -725,12 +759,10 @@ decl_module! {
             let channel_id = video.in_channel;
             let channel = ChannelById::<T>::get(channel_id);
 
-
-            ensure_actor_authorized_to_update_channel::<T>(
-                origin,
+            ensure_actor_authorized_to_update_channel_assets::<T>(
+                &sender,
                 &actor,
-                // The channel owner will be..
-                &channel.owner,
+                &channel,
             )?;
 
             // ensure video can be removed
@@ -739,13 +771,13 @@ decl_module! {
             // Ensure nft for this video have not been issued
             video.ensure_nft_is_not_issued::<T>()?;
 
-            // remove specified assets from channel bag in storage
-            Self::remove_assets_from_storage(&assets_to_remove, &channel_id, &channel.deletion_prize_source_account_id)?;
-
             //
             // == MUTATION SAFE ==
             //
 
+            // remove specified assets from channel bag in storage
+            Self::remove_assets_from_storage(&assets_to_remove, &channel_id, &channel.deletion_prize_source_account_id)?;
+
             // Remove video
             VideoById::<T>::remove(video_id);
 
@@ -984,6 +1016,7 @@ decl_module! {
             metadata: Metadata,
             to: Option<T::MemberId>,
         ) {
+            let sender = ensure_signed(origin.clone())?;
 
             // Ensure given video exists
             let video = Self::ensure_video_exists(&video_id)?;
@@ -994,9 +1027,9 @@ decl_module! {
             let channel_id = video.in_channel;
 
             // Ensure channel exists, retrieve channel owner
-            let channel_owner = Self::ensure_channel_exists(&channel_id)?.owner;
+            let channel = Self::ensure_channel_exists(&channel_id)?;
 
-            ensure_actor_authorized_to_update_channel::<T>(origin, &actor, &channel_owner)?;
+            ensure_actor_authorized_to_update_channel_assets::<T>(&sender, &actor, &channel)?;
 
             // The content owner will be..
             let nft_owner = if let Some(to) = to {
@@ -1188,7 +1221,7 @@ decl_module! {
 
             // Authorize participant under given member id
             let participant_account_id = ensure_signed(origin)?;
-            ensure_member_auth_success::<T>(&participant_id, &participant_account_id)?;
+            ensure_member_auth_success::<T>(&participant_account_id, &participant_id)?;
 
             // Ensure bidder have sufficient balance amount to reserve for bid
             Self::ensure_has_sufficient_balance(&participant_account_id, bid)?;
@@ -1271,7 +1304,7 @@ decl_module! {
 
             // Authorize participant under given member id
             let participant_account_id = ensure_signed(origin)?;
-            ensure_member_auth_success::<T>(&participant_id, &participant_account_id)?;
+            ensure_member_auth_success::<T>(&participant_account_id, &participant_id)?;
 
             // Ensure given video exists
             let video = Self::ensure_video_exists(&video_id)?;
@@ -1312,7 +1345,7 @@ decl_module! {
         ) {
             // Authorize member under given member id
             let account_id = ensure_signed(origin)?;
-            ensure_member_auth_success::<T>(&member_id, &account_id)?;
+            ensure_member_auth_success::<T>(&account_id, &member_id)?;
 
             // Ensure given video exists
             let video = Self::ensure_video_exists(&video_id)?;
@@ -1505,7 +1538,7 @@ decl_module! {
 
             // Authorize participant under given member id
             let participant_account_id = ensure_signed(origin)?;
-            ensure_member_auth_success::<T>(&participant_id, &participant_account_id)?;
+            ensure_member_auth_success::<T>(&participant_account_id, &participant_id)?;
 
             // Ensure given video exists
             let video = Self::ensure_video_exists(&video_id)?;
@@ -1628,6 +1661,8 @@ impl<T: Trait> Module<T> {
                 Ok(ChannelOwner::CuratorGroup(*curator_group_id))
             }
             ContentActor::Member(member_id) => Ok(ChannelOwner::Member(*member_id)),
+            // Lead & collaborators should use their member or curator role to create channels
+            _ => Err(Error::<T>::ActorCannotOwnChannel),
         }
     }
 
@@ -1687,6 +1722,15 @@ impl<T: Trait> Module<T> {
         }
         Ok(())
     }
+
+    fn validate_collaborator_set(collaborators: &BTreeSet<T::MemberId>) -> DispatchResult {
+        // check if all members are valid
+        let res = collaborators
+            .iter()
+            .all(|member_id| <T as ContentActorAuthenticator>::validate_member_id(member_id));
+        ensure!(res, Error::<T>::CollaboratorIsNotValidMember);
+        Ok(())
+    }
 }
 
 // Giza:

+ 1 - 1
runtime-modules/content/src/nft/mod.rs

@@ -237,7 +237,7 @@ impl<T: Trait> Module<T> {
             &nft.transactional_status
         {
             // Authorize participant under given member id
-            ensure_member_auth_success::<T>(&member_id, &participant_account_id)?;
+            ensure_member_auth_success::<T>(participant_account_id, &member_id)?;
 
             if let Some(price) = price {
                 Self::ensure_sufficient_free_balance(participant_account_id, *price)?;

+ 200 - 44
runtime-modules/content/src/permissions/mod.rs

@@ -1,3 +1,19 @@
+// The following table summarizes the permissions in the content subsystem.
+// - Actor role as columns, controller account is Tx sender.
+// - operations on a given channel (=channel=) are rows, which are basically the guards to be
+//   implemented
+// - Entries are conditions to be verified / assertions
+//
+// |                       | *Lead*                   | *Curator*                | *Member*                | *Collaborator*            |
+// |-----------------------+--------------------------+--------------------------+-------------------------+---------------------------|
+// | *assets mgmt*         | channel.owner is curator | curator is channel.owner | member is channel.owner | collab in channel.collabs |
+// | *censorship mgmt*     | true                     | channel.owner is member  | false                   | false                     |
+// | *category mgmt*       | true                     | true                     | false                   | false                     |
+// | *collab. set mgmt*    | channel.owner is curator | curator is channel.owner | member is channel.owner | false                     |
+// | *reward account mgmt* | channel.owner is curator | curator is channel.owner | member is channel.owner | false                     |
+// | *create channel*      | false                    | true                     | true                    | false                     |
+// | *delete channel*      | channel.owner is curator | curator is channel.owner | member is channel.owner | false                     |
+
 mod curator_group;
 
 pub use curator_group::*;
@@ -51,6 +67,9 @@ pub trait ContentActorAuthenticator: frame_system::Trait + membership::Trait {
 
     /// Authorize actor as member
     fn is_member(member_id: &Self::MemberId, account_id: &Self::AccountId) -> bool;
+
+    /// Ensure member id is valid
+    fn validate_member_id(member_id: &Self::MemberId) -> bool;
 }
 
 pub fn ensure_is_valid_curator_id<T: Trait>(curator_id: &T::CuratorId) -> DispatchResult {
@@ -75,8 +94,8 @@ pub fn ensure_curator_auth_success<T: Trait>(
 
 /// Ensure member authorization performed succesfully
 pub fn ensure_member_auth_success<T: Trait>(
-    member_id: &T::MemberId,
     account_id: &T::AccountId,
+    member_id: &T::MemberId,
 ) -> DispatchResult {
     ensure!(
         T::is_member(member_id, account_id),
@@ -91,76 +110,171 @@ pub fn ensure_lead_auth_success<T: Trait>(account_id: &T::AccountId) -> Dispatch
     Ok(())
 }
 
-/// Ensure given `Origin` is lead
-pub fn ensure_is_lead<T: Trait>(origin: T::Origin) -> DispatchResult {
-    let account_id = ensure_signed(origin)?;
-    ensure_lead_auth_success::<T>(&account_id)
-}
-
+/// Ensure actor is authorized to create a channel
 pub fn ensure_actor_authorized_to_create_channel<T: Trait>(
-    origin: T::Origin,
+    sender: &T::AccountId,
     actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
 ) -> DispatchResult {
     match actor {
         // Lead should use their member or curator role to create or update channel assets.
         ContentActor::Lead => Err(Error::<T>::ActorNotAuthorized.into()),
-        ContentActor::Curator(curator_group_id, curator_id) => {
-            let sender = ensure_signed(origin)?;
 
+        ContentActor::Curator(curator_group_id, curator_id) => {
             // Authorize curator, performing all checks to ensure curator can act
             CuratorGroup::<T>::perform_curator_in_group_auth(curator_id, curator_group_id, &sender)
         }
-        ContentActor::Member(member_id) => {
-            let sender = ensure_signed(origin)?;
+        ContentActor::Member(member_id) => ensure_member_auth_success::<T>(sender, member_id),
+        // Lead & collaborators should use their member or curator role to create channels.
+        _ => Err(Error::<T>::ActorCannotOwnChannel.into()),
+    }
+}
 
-            ensure_member_auth_success::<T>(member_id, &sender)
+/// Ensure actor is authorized to delete channel
+pub fn ensure_actor_authorized_to_delete_channel<T: Trait>(
+    sender: &T::AccountId,
+    actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
+    channel_owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
+) -> DispatchResult {
+    match actor {
+        ContentActor::Lead => {
+            // ensure lead is valid
+            ensure_lead_auth_success::<T>(sender)?;
+            // ensure curator
+            ensure_channel_is_owned_by_curators::<T>(channel_owner)?;
+            Ok(())
+        }
+        ContentActor::Curator(curator_group_id, curator_id) => {
+            // ensure curator group is valid
+            CuratorGroup::<T>::perform_curator_in_group_auth(
+                curator_id,
+                curator_group_id,
+                &sender,
+            )?;
+            // ensure group is channel owner
+            ensure_curator_group_is_channel_owner::<T>(channel_owner, curator_group_id)?;
+            Ok(())
         }
+        ContentActor::Member(member_id) => {
+            // ensure valid member
+            ensure_member_auth_success::<T>(&sender, member_id)?;
+            // ensure member is channel owner
+            ensure_member_is_channel_owner::<T>(channel_owner, member_id)?;
+            Ok(())
+        }
+        // collaborators should use their member or curator role to delete channel
+        _ => Err(Error::<T>::ActorNotAuthorized.into()),
     }
 }
 
-// Enure actor can update channels and videos in the channel
-pub fn ensure_actor_authorized_to_update_channel<T: Trait>(
-    origin: T::Origin,
+/// Ensure actor is authorized to manage collaborator set for a channel
+pub fn ensure_actor_can_manage_collaborators<T: Trait>(
+    sender: &T::AccountId,
+    channel_owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
     actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
-    owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
 ) -> DispatchResult {
-    let sender = ensure_signed(origin)?;
-    // Only owner of a channel can update and delete channel assets.
-    // Lead can update and delete curator group owned channel assets.
     match actor {
         ContentActor::Lead => {
-            ensure_lead_auth_success::<T>(&sender)?;
-            if let ChannelOwner::CuratorGroup(_) = owner {
-                Ok(())
-            } else {
-                Err(Error::<T>::ActorNotAuthorized.into())
-            }
+            // ensure lead is valid
+            ensure_lead_auth_success::<T>(sender)?;
+            // ensure curator
+            ensure_channel_is_owned_by_curators::<T>(channel_owner)?;
+            Ok(())
         }
         ContentActor::Curator(curator_group_id, curator_id) => {
-            // Authorize curator, performing all checks to ensure curator can act
+            // ensure curator group is valid
             CuratorGroup::<T>::perform_curator_in_group_auth(
                 curator_id,
                 curator_group_id,
                 &sender,
             )?;
+            // ensure group is channel owner
+            ensure_curator_group_is_channel_owner::<T>(channel_owner, curator_group_id)?;
+            Ok(())
+        }
+        ContentActor::Member(member_id) => {
+            // ensure valid member
+            ensure_member_auth_success::<T>(&sender, member_id)?;
+            // ensure member is channel owner
+            ensure_member_is_channel_owner::<T>(channel_owner, member_id)?;
+            Ok(())
+        }
+        // Collaborators should use their member or curator role in order to update reward account.
+        ContentActor::Collaborator(_) => Err(Error::<T>::ActorNotAuthorized.into()),
+    }
+}
 
-            // Ensure curator group is the channel owner.
-            ensure!(
-                *owner == ChannelOwner::CuratorGroup(*curator_group_id),
-                Error::<T>::ActorNotAuthorized
-            );
-
+/// Ensure actor is authorized to manage reward account for a channel
+pub fn ensure_actor_can_manage_reward_account<T: Trait>(
+    sender: &T::AccountId,
+    channel_owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
+    actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
+) -> DispatchResult {
+    match actor {
+        ContentActor::Lead => {
+            // ensure lead is valid
+            ensure_lead_auth_success::<T>(sender)?;
+            // ensure curator
+            ensure_channel_is_owned_by_curators::<T>(channel_owner)?;
+            Ok(())
+        }
+        ContentActor::Curator(curator_group_id, curator_id) => {
+            // ensure curator group is valid
+            CuratorGroup::<T>::perform_curator_in_group_auth(
+                curator_id,
+                curator_group_id,
+                &sender,
+            )?;
+            // ensure group is channel owner
+            ensure_curator_group_is_channel_owner::<T>(channel_owner, curator_group_id)?;
             Ok(())
         }
         ContentActor::Member(member_id) => {
-            ensure_member_auth_success::<T>(member_id, &sender)?;
+            // ensure valid member
+            ensure_member_auth_success::<T>(&sender, member_id)?;
+            // ensure member is channel owner
+            ensure_member_is_channel_owner::<T>(channel_owner, member_id)?;
+            Ok(())
+        }
+        // collaborators should use their member or curator role in order to update reward account.
+        _ => Err(Error::<T>::ActorNotAuthorized.into()),
+    }
+}
 
-            // Ensure the member is the channel owner.
+/// Ensure actor is authorized to manage channel assets, video also qualify as assets
+pub fn ensure_actor_authorized_to_update_channel_assets<T: Trait>(
+    sender: &T::AccountId,
+    actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
+    channel: &Channel<T>,
+) -> DispatchResult {
+    // Only owner of a channel can update and delete channel assets.
+    // Lead can update and delete curator group owned channel assets.
+    match actor {
+        ContentActor::Lead => {
+            // ensure lead is valid
+            ensure_lead_auth_success::<T>(sender)?;
+            // ensure curator
+            ensure_channel_is_owned_by_curators::<T>(&channel.owner)?;
+            Ok(())
+        }
+        ContentActor::Curator(curator_group_id, curator_id) => {
+            // ensure curator group is valid
+            CuratorGroup::<T>::perform_curator_in_group_auth(curator_id, curator_group_id, sender)?;
+            // ensure group is channel owner
+            ensure_curator_group_is_channel_owner::<T>(&channel.owner, curator_group_id)?;
+            Ok(())
+        }
+        ContentActor::Member(member_id) => {
+            // ensure valid member
+            ensure_member_auth_success::<T>(sender, member_id)?;
+            // ensure member is channel owner
+            ensure_member_is_channel_owner::<T>(&channel.owner, member_id)?;
+            Ok(())
+        }
+        ContentActor::Collaborator(member_id) => {
             ensure!(
-                *owner == ChannelOwner::Member(*member_id),
+                channel.collaborators.contains(member_id),
                 Error::<T>::ActorNotAuthorized
             );
-
             Ok(())
         }
     }
@@ -176,7 +290,7 @@ pub fn ensure_actor_authorized_to_manage_nft<T: Trait>(
     let sender = ensure_signed(origin)?;
 
     if let NFTOwner::Member(member_id) = nft_owner {
-        ensure_member_auth_success::<T>(member_id, &sender)?;
+        ensure_member_auth_success::<T>(&sender, member_id)?;
 
         ensure!(
             *actor == ContentActor::Member(*member_id),
@@ -210,7 +324,7 @@ pub fn ensure_actor_authorized_to_manage_nft<T: Trait>(
                 );
             }
             ContentActor::Member(member_id) => {
-                ensure_member_auth_success::<T>(member_id, &sender)?;
+                ensure_member_auth_success::<T>(&sender, member_id)?;
 
                 // Ensure the member is the channel owner.
                 ensure!(
@@ -218,12 +332,51 @@ pub fn ensure_actor_authorized_to_manage_nft<T: Trait>(
                     Error::<T>::ActorNotAuthorized
                 );
             }
+            // TODO: is this right? Collaborator case was copied from other `ensure*` check during Giza->NFT branch merge
+            // Collaborators should use their member or curator role in order to manage NFTs.
+            ContentActor::Collaborator(_) => return Err(Error::<T>::ActorNotAuthorized.into()),
         }
     }
     Ok(())
 }
 
-// Enure actor can update or delete channels and videos
+/// Ensure channel is owned by some curators
+pub fn ensure_channel_is_owned_by_curators<T: Trait>(
+    channel_owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
+) -> DispatchResult {
+    match channel_owner {
+        ChannelOwner::CuratorGroup(_) => Ok(()),
+        _ => Err(Error::<T>::ActorNotAuthorized.into()),
+    }
+}
+
+/// Ensure specified valid curator group is channel owner
+pub fn ensure_curator_group_is_channel_owner<T: Trait>(
+    channel_owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
+    group_id: &T::CuratorGroupId,
+) -> DispatchResult {
+    // Ensure curator group is channel owner
+    ensure!(
+        *channel_owner == ChannelOwner::CuratorGroup(*group_id),
+        Error::<T>::ActorNotAuthorized
+    );
+    Ok(())
+}
+
+/// Ensure specified valid member is channel owner
+pub fn ensure_member_is_channel_owner<T: Trait>(
+    channel_owner: &ChannelOwner<T::MemberId, T::CuratorGroupId>,
+    member_id: &T::MemberId,
+) -> DispatchResult {
+    // Ensure member is channel owner.
+    ensure!(
+        *channel_owner == ChannelOwner::Member(*member_id),
+        Error::<T>::ActorNotAuthorized
+    );
+    Ok(())
+}
+
+/// Ensure actor can set featured videos
 pub fn ensure_actor_authorized_to_set_featured_videos<T: Trait>(
     origin: T::Origin,
     actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
@@ -237,6 +390,7 @@ pub fn ensure_actor_authorized_to_set_featured_videos<T: Trait>(
     }
 }
 
+/// Ensure actor can censor
 pub fn ensure_actor_authorized_to_censor<T: Trait>(
     origin: T::Origin,
     actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
@@ -266,13 +420,14 @@ pub fn ensure_actor_authorized_to_censor<T: Trait>(
                 Ok(())
             }
         }
-        ContentActor::Member(_) => {
-            // Members cannot censore channels!
+        _ => {
+            // Members & collaborators cannot censore channels!
             Err(Error::<T>::ActorNotAuthorized.into())
         }
     }
 }
 
+/// Ensure actor can manage categories
 pub fn ensure_actor_authorized_to_manage_categories<T: Trait>(
     origin: T::Origin,
     actor: &ContentActor<T::CuratorGroupId, T::CuratorId, T::MemberId>,
@@ -289,8 +444,8 @@ pub fn ensure_actor_authorized_to_manage_categories<T: Trait>(
             // Authorize curator, performing all checks to ensure curator can act
             CuratorGroup::<T>::perform_curator_in_group_auth(curator_id, curator_group_id, &sender)
         }
-        ContentActor::Member(_) => {
-            // Members cannot censore channels!
+        _ => {
+            // Members & collaborators cannot manage categories!
             Err(Error::<T>::ActorNotAuthorized.into())
         }
     }
@@ -307,6 +462,7 @@ pub enum ContentActor<
     Curator(CuratorGroupId, CuratorId),
     Member(MemberId),
     Lead,
+    Collaborator(MemberId),
 }
 
 impl<

+ 196 - 118
runtime-modules/content/src/tests/channels.rs

@@ -2,8 +2,8 @@
 
 use super::curators;
 use super::mock::*;
-use crate::sp_api_hidden_includes_decl_storage::hidden_include::traits::Currency;
 use crate::*;
+use frame_support::traits::Currency;
 use frame_support::{assert_err, assert_ok};
 
 #[test]
@@ -46,6 +46,7 @@ fn successful_channel_deletion() {
                 assets: Some(assets),
                 meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -78,6 +79,7 @@ fn successful_channel_deletion() {
                 assets: None,
                 meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -129,8 +131,9 @@ fn successful_channel_assets_deletion() {
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: Some(assets),
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -148,6 +151,7 @@ fn successful_channel_assets_deletion() {
                 new_meta: None,
                 reward_account: None,
                 assets_to_remove: assets_to_remove,
+                collaborators: None,
             },
         ));
     })
@@ -205,8 +209,9 @@ fn succesful_channel_update() {
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: Some(first_batch),
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -218,9 +223,10 @@ fn succesful_channel_update() {
             channel_id,
             ChannelUpdateParametersRecord {
                 assets_to_upload: Some(second_batch),
-                new_meta: Some(vec![]),
+                new_meta: None,
                 reward_account: None,
                 assets_to_remove: BTreeSet::new(),
+                collaborators: None,
             },
             Ok(()),
         );
@@ -235,6 +241,7 @@ fn succesful_channel_update() {
                 new_meta: None,
                 reward_account: None,
                 assets_to_remove: first_batch_ids,
+                collaborators: None,
             },
             Ok(()),
         );
@@ -278,8 +285,9 @@ fn succesful_channel_creation() {
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: Some(assets),
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -295,8 +303,9 @@ fn lead_cannot_create_channel() {
                 ContentActor::Lead,
                 ChannelCreationParametersRecord {
                     assets: None,
-                    meta: Some(vec![]),
+                    meta: None,
                     reward_account: None,
+                    collaborators: BTreeSet::new(),
                 }
             ),
             Error::<Test>::ActorNotAuthorized
@@ -317,8 +326,9 @@ fn curator_owned_channels() {
                 ContentActor::Curator(FIRST_CURATOR_GROUP_ID, FIRST_CURATOR_ID),
                 ChannelCreationParametersRecord {
                     assets: None,
-                    meta: Some(vec![]),
+                    meta: None,
                     reward_account: None,
+                    collaborators: BTreeSet::new(),
                 }
             ),
             Error::<Test>::CuratorGroupIsNotActive
@@ -334,8 +344,9 @@ fn curator_owned_channels() {
                 ContentActor::Curator(FIRST_CURATOR_GROUP_ID, SECOND_CURATOR_ID),
                 ChannelCreationParametersRecord {
                     assets: None,
-                    meta: Some(vec![]),
+                    meta: None,
                     reward_account: None,
+                    collaborators: BTreeSet::new(),
                 }
             ),
             Error::<Test>::CuratorIsNotAMemberOfGivenCuratorGroup
@@ -348,8 +359,9 @@ fn curator_owned_channels() {
                 ContentActor::Curator(FIRST_CURATOR_GROUP_ID, FIRST_CURATOR_ID),
                 ChannelCreationParametersRecord {
                     assets: None,
-                    meta: Some(vec![]),
+                    meta: None,
                     reward_account: None,
+                    collaborators: BTreeSet::new(),
                 }
             ),
             Error::<Test>::CuratorAuthFailed
@@ -363,8 +375,9 @@ fn curator_owned_channels() {
             ContentActor::Curator(FIRST_CURATOR_GROUP_ID, FIRST_CURATOR_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             }
         ));
 
@@ -377,13 +390,15 @@ fn curator_owned_channels() {
                     owner: ChannelOwner::CuratorGroup(FIRST_CURATOR_GROUP_ID),
                     is_censored: false,
                     reward_account: None,
-                    deletion_prize_source_account_id: FIRST_CURATOR_ORIGIN,
                     num_videos: 0,
+                    collaborators: BTreeSet::new(),
+                    deletion_prize_source_account_id: FIRST_CURATOR_ORIGIN,
                 },
                 ChannelCreationParametersRecord {
                     assets: None,
-                    meta: Some(vec![]),
+                    meta: None,
                     reward_account: None,
+                    collaborators: BTreeSet::new(),
                 }
             ))
         );
@@ -398,7 +413,8 @@ fn curator_owned_channels() {
                 new_meta: None,
                 reward_account: None,
                 assets_to_remove: BTreeSet::new(),
-            },
+                collaborators: None,
+            }
         ));
 
         // Lead can update curator owned channels
@@ -411,148 +427,208 @@ fn curator_owned_channels() {
                 new_meta: None,
                 reward_account: None,
                 assets_to_remove: BTreeSet::new(),
-            },
+                collaborators: None,
+            }
         ));
     })
 }
 
 #[test]
-fn member_owned_channels() {
+fn invalid_member_cannot_create_channel() {
     with_default_mock_builder(|| {
         // Run to block one to see emitted events
         run_to_block(1);
 
         // Not a member
-        assert_err!(
-            Content::create_channel(
-                Origin::signed(UNKNOWN_ORIGIN),
-                ContentActor::Member(MEMBERS_COUNT + 1),
-                ChannelCreationParametersRecord {
-                    assets: None,
-                    meta: Some(vec![]),
-                    reward_account: None,
-                }
-            ),
-            Error::<Test>::MemberAuthFailed
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(UNKNOWN_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: None,
+                meta: None,
+                reward_account: None,
+                collaborators: BTreeSet::new(),
+            },
+            Err(Error::<Test>::MemberAuthFailed.into()),
         );
+    })
+}
 
-        let channel_id_1 = Content::next_channel_id();
+#[test]
+fn invalid_member_cannot_update_channel() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
 
-        // Member can create the channel
-        assert_ok!(Content::create_channel(
-            Origin::signed(FIRST_MEMBER_ORIGIN),
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
-            }
-        ));
-
-        assert_eq!(
-            System::events().last().unwrap().event,
-            MetaEvent::content(RawEvent::ChannelCreated(
-                ContentActor::Member(FIRST_MEMBER_ID),
-                channel_id_1,
-                ChannelRecord {
-                    owner: ChannelOwner::Member(FIRST_MEMBER_ID),
-                    is_censored: false,
-                    reward_account: None,
-                    deletion_prize_source_account_id: FIRST_MEMBER_ORIGIN,
+                collaborators: BTreeSet::new(),
+            },
+            Ok(()),
+        );
 
-                    num_videos: 0,
-                },
-                ChannelCreationParametersRecord {
-                    assets: None,
-                    meta: Some(vec![]),
-                    reward_account: None,
-                }
-            ))
+        update_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(UNKNOWN_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            ChannelUpdateParametersRecord {
+                assets_to_upload: None,
+                new_meta: None,
+                reward_account: None,
+                collaborators: None,
+                assets_to_remove: BTreeSet::new(),
+            },
+            Err(Error::<Test>::MemberAuthFailed.into()),
         );
+    })
+}
 
-        let channel_id_2 = Content::next_channel_id();
+#[test]
+fn invalid_member_cannot_delete_channel() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
 
-        // Member can create the channel
-        assert_ok!(Content::create_channel(
-            Origin::signed(SECOND_MEMBER_ORIGIN),
-            ContentActor::Member(SECOND_MEMBER_ID),
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
-            }
-        ));
+                collaborators: BTreeSet::new(),
+            },
+            Ok(()),
+        );
 
-        assert_eq!(
-            System::events().last().unwrap().event,
-            MetaEvent::content(RawEvent::ChannelCreated(
-                ContentActor::Member(SECOND_MEMBER_ID),
-                channel_id_2,
-                ChannelRecord {
-                    owner: ChannelOwner::Member(SECOND_MEMBER_ID),
-                    is_censored: false,
-                    reward_account: None,
-                    deletion_prize_source_account_id: SECOND_MEMBER_ORIGIN,
+        delete_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(UNKNOWN_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            0u64,
+            Err(Error::<Test>::MemberAuthFailed.into()),
+        );
+    })
+}
 
-                    num_videos: 0,
-                },
-                ChannelCreationParametersRecord {
-                    assets: None,
-                    meta: Some(vec![]),
-                    reward_account: None,
-                }
-            ))
+#[test]
+fn non_authorized_collaborators_cannot_update_channel() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: BTreeSet::new(),
+            },
+            Ok(()),
         );
 
-        // Update channel
-        assert_ok!(Content::update_channel(
-            Origin::signed(FIRST_MEMBER_ORIGIN),
+        // attempt for an non auth. collaborator to update channel assets
+        update_channel_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            ChannelUpdateParametersRecord {
+                assets_to_upload: Some(helper_generate_storage_assets(vec![5])),
+                new_meta: None,
+                reward_account: None,
+                assets_to_remove: vec![DataObjectId::<Test>::one()]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+                collaborators: None,
+            },
+            Err(Error::<Test>::ActorNotAuthorized.into()),
+        );
+
+        // add collaborators
+        update_channel_mock(
+            FIRST_MEMBER_ORIGIN,
             ContentActor::Member(FIRST_MEMBER_ID),
-            channel_id_1,
+            <Test as storage::Trait>::ChannelId::one(),
             ChannelUpdateParametersRecord {
                 assets_to_upload: None,
                 new_meta: None,
                 reward_account: None,
                 assets_to_remove: BTreeSet::new(),
+                collaborators: Some(
+                    vec![COLLABORATOR_MEMBER_ID]
+                        .into_iter()
+                        .collect::<BTreeSet<_>>(),
+                ),
             },
-        ));
+            Ok(()),
+        );
 
-        assert_eq!(
-            System::events().last().unwrap().event,
-            MetaEvent::content(RawEvent::ChannelUpdated(
-                ContentActor::Member(FIRST_MEMBER_ID),
-                channel_id_1,
-                ChannelRecord {
-                    owner: ChannelOwner::Member(FIRST_MEMBER_ID),
-                    is_censored: false,
-                    reward_account: None,
-                    deletion_prize_source_account_id: FIRST_MEMBER_ORIGIN,
+        // attempt for a valid collaborator to update channel fields outside
+        // of his scope
+        update_channel_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            ChannelUpdateParametersRecord {
+                assets_to_upload: None,
+                new_meta: None,
+                reward_account: Some(Some(COLLABORATOR_MEMBER_ORIGIN)),
+                assets_to_remove: BTreeSet::new(),
+                collaborators: None,
+            },
+            Err(Error::<Test>::ActorNotAuthorized.into()),
+        );
+    })
+}
 
-                    num_videos: 0,
-                },
-                ChannelUpdateParametersRecord {
-                    assets_to_upload: None,
-                    new_meta: None,
-                    reward_account: None,
-                    assets_to_remove: BTreeSet::new(),
-                }
-            ))
+#[test]
+fn authorized_collaborators_can_update_channel() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: vec![COLLABORATOR_MEMBER_ID]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+            },
+            Ok(()),
         );
 
-        // Member cannot update a channel they do not own
-        assert_err!(
-            Content::update_channel(
-                Origin::signed(FIRST_MEMBER_ORIGIN),
-                ContentActor::Member(FIRST_MEMBER_ID),
-                channel_id_2,
-                ChannelUpdateParametersRecord {
-                    assets_to_upload: None,
-                    new_meta: None,
-                    reward_account: None,
-                    assets_to_remove: BTreeSet::new(),
-                },
-            ),
-            Error::<Test>::ActorNotAuthorized
+        // attempt for an auth. collaborator to update channel assets
+        update_channel_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            ChannelUpdateParametersRecord {
+                assets_to_upload: Some(helper_generate_storage_assets(vec![5])),
+                new_meta: None,
+                reward_account: None,
+                assets_to_remove: vec![DataObjectId::<Test>::one()]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+                collaborators: None,
+            },
+            Ok(()),
         );
     })
 }
@@ -569,8 +645,9 @@ fn channel_censoring() {
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             }
         ));
 
@@ -645,8 +722,9 @@ fn channel_censoring() {
             ContentActor::Curator(group_id, FIRST_CURATOR_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             }
         ));
 

+ 81 - 12
runtime-modules/content/src/tests/mock.rs

@@ -1,9 +1,8 @@
 #![cfg(test)]
 
 use crate::*;
-
 use frame_support::dispatch::{DispatchError, DispatchResult};
-use frame_support::traits::{OnFinalize, OnInitialize};
+use frame_support::traits::{Currency, OnFinalize, OnInitialize};
 use frame_support::{impl_outer_event, impl_outer_origin, parameter_types};
 use sp_core::H256;
 use sp_runtime::{
@@ -37,6 +36,7 @@ pub const SECOND_CURATOR_ORIGIN: u64 = 3;
 pub const FIRST_MEMBER_ORIGIN: u64 = 4;
 pub const SECOND_MEMBER_ORIGIN: u64 = 5;
 pub const UNKNOWN_ORIGIN: u64 = 7777;
+pub const UNKNOWN_MEMBER_ID: u64 = 7777;
 
 // Members range from MemberId 1 to 10
 pub const MEMBERS_COUNT: MemberId = 10;
@@ -56,6 +56,14 @@ pub const SECOND_MEMBER_ID: MemberId = 2;
 pub const THIRD_MEMBER_ID: MemberId = 7;
 pub const FOURTH_MEMBER_ID: MemberId = 8;
 
+// members that act as collaborators
+pub const COLLABORATOR_MEMBER_ORIGIN: MemberId = 8;
+pub const COLLABORATOR_MEMBER_ID: MemberId = 9;
+
+/// Constants
+// initial balancer for an account
+pub const INIT_BALANCE: u32 = 500;
+
 impl_outer_origin! {
     pub enum Origin for Test {}
 }
@@ -172,6 +180,10 @@ impl ContentActorAuthenticator for Test {
     type CuratorId = u64;
     type CuratorGroupId = u64;
 
+    fn validate_member_id(member_id: &Self::MemberId) -> bool {
+        *member_id < MEMBERS_COUNT
+    }
+
     fn is_lead(account_id: &Self::AccountId) -> bool {
         let lead_account_id = ensure_signed(Origin::signed(LEAD_ORIGIN)).unwrap();
         *account_id == lead_account_id
@@ -198,7 +210,6 @@ impl ContentActorAuthenticator for Test {
 parameter_types! {
     pub const MaxNumberOfDataObjectsPerBag: u64 = 4;
     pub const MaxDistributionBucketFamilyNumber: u64 = 4;
-    pub const MaxDistributionBucketNumberPerFamily: u64 = 10;
     pub const DataObjectDeletionPrize: u64 = 10;
     pub const StorageModuleId: ModuleId = ModuleId(*b"mstorage"); // module storage
     pub const BlacklistSizeLimit: u64 = 1;
@@ -227,7 +238,7 @@ impl storage::Trait for Test {
     type Event = MetaEvent;
     type DataObjectId = u64;
     type StorageBucketId = u64;
-    type DistributionBucketId = u64;
+    type DistributionBucketIndex = u64;
     type DistributionBucketFamilyId = u64;
     type DistributionBucketOperatorId = u64;
     type ChannelId = u64;
@@ -243,7 +254,6 @@ impl storage::Trait for Test {
     type Randomness = CollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
     type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber;
-    type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily;
     type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint;
     type MaxNumberOfPendingInvitationsPerDistributionBucket =
         MaxNumberOfPendingInvitationsPerDistributionBucket;
@@ -564,6 +574,7 @@ pub fn create_member_channel() -> ChannelId {
             assets: Some(assets),
             meta: Some(vec![]),
             reward_account: None,
+            collaborators: BTreeSet::<MemberId>::new(),
         }
     ));
 
@@ -619,7 +630,7 @@ pub fn create_channel_mock(
     let channel_id = Content::next_channel_id();
 
     assert_eq!(
-        Content::create_channel(Origin::signed(sender), actor.clone(), params.clone()),
+        Content::create_channel(Origin::signed(sender.clone()), actor.clone(), params.clone()),
         result.clone(),
     );
 
@@ -634,11 +645,13 @@ pub fn create_channel_mock(
                 ChannelRecord {
                     owner: owner,
                     is_censored: false,
-                    reward_account: params.reward_account,
-                    deletion_prize_source_account_id: sender,
+                    reward_account: params.reward_account.clone(),
+
+                    collaborators: params.collaborators.clone(),
                     num_videos: 0,
+                    deletion_prize_source_account_id: sender.clone(),
                 },
-                params.clone(),
+                params,
             ))
         );
     }
@@ -672,11 +685,17 @@ pub fn update_channel_mock(
                 ChannelRecord {
                     owner: channel_pre.owner.clone(),
                     is_censored: channel_pre.is_censored,
-                    reward_account: channel_pre.reward_account.clone(),
-                    deletion_prize_source_account_id: sender,
+                    reward_account: params
+                        .reward_account
+                        .map_or_else(|| channel_pre.reward_account.clone(), |account| account),
+                    collaborators: params
+                        .collaborators
+                        .clone()
+                        .unwrap_or(channel_pre.collaborators),
                     num_videos: channel_pre.num_videos,
+                    deletion_prize_source_account_id: channel_pre.deletion_prize_source_account_id,
                 },
-                params.clone(),
+                params,
             ))
         );
     }
@@ -723,6 +742,7 @@ pub fn create_simple_channel_and_video(sender: u64, member_id: u64) {
             assets: None,
             meta: Some(vec![]),
             reward_account: Some(REWARD_ACCOUNT_ID),
+            collaborators: BTreeSet::<MemberId>::new(),
         },
         Ok(()),
     );
@@ -807,3 +827,52 @@ pub fn update_video_mock(
         );
     }
 }
+
+pub fn delete_video_mock(
+    sender: u64,
+    actor: ContentActor<CuratorGroupId, CuratorId, MemberId>,
+    video_id: <Test as Trait>::VideoId,
+    assets_to_remove: BTreeSet<DataObjectId<Test>>,
+    result: DispatchResult,
+) {
+    assert_eq!(
+        Content::delete_video(
+            Origin::signed(sender),
+            actor.clone(),
+            video_id.clone(),
+            assets_to_remove.clone(),
+        ),
+        result.clone(),
+    );
+
+    if result.is_ok() {
+        assert_eq!(
+            System::events().last().unwrap().event,
+            MetaEvent::content(RawEvent::VideoDeleted(actor.clone(), video_id))
+        );
+    }
+}
+
+// helper functions
+pub fn helper_generate_storage_assets(sizes: Vec<u64>) -> StorageAssets<Test> {
+    StorageAssetsRecord {
+        object_creation_list: sizes
+            .into_iter()
+            .map(|s| DataObjectCreationParameters {
+                size: s,
+                ipfs_content_id: s.encode(),
+            })
+            .collect::<Vec<_>>(),
+        expected_data_size_fee: storage::DataObjectPerMegabyteFee::<Test>::get(),
+    }
+}
+
+pub fn helper_init_accounts(accounts: Vec<u64>) {
+    // give channel owner funds to permit collaborators to update assets
+    for acc in accounts.iter() {
+        let _ = balances::Module::<Test>::deposit_creating(
+            acc,
+            <Test as balances::Trait>::Balance::from(INIT_BALANCE),
+        );
+    }
+}

+ 1 - 0
runtime-modules/content/src/tests/nft/accept_incoming_offer.rs

@@ -197,6 +197,7 @@ fn accept_incoming_offer_reward_account_is_not_set() {
                 assets: None,
                 meta: Some(vec![]),
                 reward_account: None,
+                collaborators: BTreeSet::<tests::mock::MemberId>::new(),
             },
             Ok(()),
         );

+ 1 - 0
runtime-modules/content/src/tests/nft/buy_nft.rs

@@ -290,6 +290,7 @@ fn buy_nft_reward_account_is_not_set() {
                 assets: None,
                 meta: Some(vec![]),
                 reward_account: None,
+                collaborators: BTreeSet::<tests::mock::MemberId>::new(),
             },
             Ok(()),
         );

+ 275 - 4
runtime-modules/content/src/tests/videos.rs

@@ -1,9 +1,8 @@
 #![cfg(test)]
-
 use super::curators;
 use super::mock::*;
-use crate::sp_api_hidden_includes_decl_storage::hidden_include::traits::Currency;
 use crate::*;
+use frame_support::traits::Currency;
 use frame_support::{assert_err, assert_ok};
 
 #[test]
@@ -24,8 +23,9 @@ fn video_creation_successful() {
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -59,8 +59,9 @@ fn video_update_successful() {
             ContentActor::Member(FIRST_MEMBER_ID),
             ChannelCreationParametersRecord {
                 assets: None,
-                meta: Some(vec![]),
+                meta: None,
                 reward_account: None,
+                collaborators: BTreeSet::new(),
             },
             Ok(()),
         );
@@ -391,3 +392,273 @@ fn featured_videos() {
         );
     })
 }
+
+#[test]
+fn non_authorized_collaborators_cannot_add_video() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: BTreeSet::new(),
+            },
+            Ok(()),
+        );
+
+        create_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            VideoCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![1, 2])),
+                meta: None,
+            },
+            Err(Error::<Test>::ActorNotAuthorized.into()),
+        );
+    })
+}
+
+#[test]
+fn non_authorized_collaborators_cannot_update_video() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: BTreeSet::new(),
+            },
+            Ok(()),
+        );
+
+        // create video
+        create_video_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            VideoCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![1, 2])),
+                meta: None,
+            },
+            Ok(()),
+        );
+
+        update_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as Trait>::VideoId::one(),
+            VideoUpdateParametersRecord {
+                assets_to_upload: Some(helper_generate_storage_assets(vec![5])),
+                new_meta: None,
+                assets_to_remove: vec![DataObjectId::<Test>::one()]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+            },
+            Err(Error::<Test>::ActorNotAuthorized.into()),
+        );
+    })
+}
+
+#[test]
+fn non_authorized_collaborators_cannot_delete_video() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: BTreeSet::new(),
+            },
+            Ok(()),
+        );
+
+        // create video
+        create_video_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            VideoCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![1, 2])),
+                meta: None,
+            },
+            Ok(()),
+        );
+
+        delete_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as Trait>::VideoId::one(),
+            vec![
+                DataObjectId::<Test>::one(),
+                DataObjectId::<Test>::from(2u64),
+            ]
+            .into_iter()
+            .collect::<BTreeSet<_>>(),
+            Err(Error::<Test>::ActorNotAuthorized.into()),
+        );
+    })
+}
+
+#[test]
+fn authorized_collaborators_can_add_video() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: vec![COLLABORATOR_MEMBER_ID]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+            },
+            Ok(()),
+        );
+
+        create_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            VideoCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![1, 2])),
+                meta: None,
+            },
+            Ok(()),
+        );
+    })
+}
+
+#[test]
+fn authorized_collaborators_can_update_video() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: vec![COLLABORATOR_MEMBER_ID]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+            },
+            Ok(()),
+        );
+
+        // create video
+        create_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            VideoCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![1, 2])),
+                meta: None,
+            },
+            Ok(()),
+        );
+
+        update_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as Trait>::VideoId::one(),
+            VideoUpdateParametersRecord {
+                assets_to_upload: Some(helper_generate_storage_assets(vec![5])),
+                new_meta: None,
+                assets_to_remove: vec![DataObjectId::<Test>::one()]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+            },
+            Ok(()),
+        );
+    })
+}
+
+#[test]
+fn authorized_collaborators_can_delete_video() {
+    with_default_mock_builder(|| {
+        // Run to block one to see emitted events
+        run_to_block(1);
+
+        helper_init_accounts(vec![FIRST_MEMBER_ORIGIN, COLLABORATOR_MEMBER_ORIGIN]);
+
+        // create channel
+        create_channel_mock(
+            FIRST_MEMBER_ORIGIN,
+            ContentActor::Member(FIRST_MEMBER_ID),
+            ChannelCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![2, 3])),
+                meta: None,
+                reward_account: None,
+                collaborators: vec![COLLABORATOR_MEMBER_ID]
+                    .into_iter()
+                    .collect::<BTreeSet<_>>(),
+            },
+            Ok(()),
+        );
+
+        // create video
+        create_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as storage::Trait>::ChannelId::one(),
+            VideoCreationParametersRecord {
+                assets: Some(helper_generate_storage_assets(vec![1, 2])),
+                meta: None,
+            },
+            Ok(()),
+        );
+
+        println!("videoevent {:?}", System::events().last().unwrap().event);
+
+        delete_video_mock(
+            COLLABORATOR_MEMBER_ORIGIN,
+            ContentActor::Collaborator(COLLABORATOR_MEMBER_ID),
+            <Test as Trait>::VideoId::one(),
+            vec![
+                DataObjectId::<Test>::one(),
+                DataObjectId::<Test>::from(2u64),
+            ]
+            .into_iter()
+            .collect::<BTreeSet<_>>(),
+            Ok(()),
+        );
+    })
+}

+ 14 - 6
runtime-modules/content/src/types.rs

@@ -1,4 +1,5 @@
 use crate::*;
+use sp_std::borrow::ToOwned;
 
 pub type DataObjectId<T> = <T as storage::Trait>::DataObjectId;
 
@@ -57,7 +58,7 @@ pub struct ChannelCategoryUpdateParameters {
 /// Type representing an owned channel which videos, playlists, and series can belong to.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
-pub struct ChannelRecord<MemberId, CuratorGroupId, AccountId> {
+pub struct ChannelRecord<MemberId: Ord, CuratorGroupId, AccountId> {
     /// The owner of a channel
     pub owner: ChannelOwner<MemberId, CuratorGroupId>,
     /// The videos under this channel
@@ -68,9 +69,11 @@ pub struct ChannelRecord<MemberId, CuratorGroupId, AccountId> {
     pub reward_account: Option<AccountId>,
     /// Account for withdrawing deletion prize funds
     pub deletion_prize_source_account_id: AccountId,
+    /// collaborator set
+    pub collaborators: BTreeSet<MemberId>,
 }
 
-impl<MemberId, CuratorGroupId, AccountId> ChannelRecord<MemberId, CuratorGroupId, AccountId> {
+impl<MemberId: Ord, CuratorGroupId, AccountId> ChannelRecord<MemberId, CuratorGroupId, AccountId> {
     /// Ensure censorship status have been changed
     pub fn ensure_censorship_status_changed<T: Trait>(&self, is_censored: bool) -> DispatchResult {
         ensure!(
@@ -116,23 +119,25 @@ pub type ChannelOwnershipTransferRequest<T> = ChannelOwnershipTransferRequestRec
 /// Information about channel being created.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)]
-pub struct ChannelCreationParametersRecord<StorageAssets, AccountId> {
+pub struct ChannelCreationParametersRecord<StorageAssets, AccountId, MemberId: Ord> {
     /// Asset collection for the channel, referenced by metadata
     pub assets: Option<StorageAssets>,
     /// Metadata about the channel.
     pub meta: Option<Vec<u8>>,
     /// optional reward account
     pub reward_account: Option<AccountId>,
+    /// initial collaborator set
+    pub collaborators: BTreeSet<MemberId>,
 }
 
 pub type ChannelCreationParameters<T> =
-    ChannelCreationParametersRecord<StorageAssets<T>, <T as frame_system::Trait>::AccountId>;
+    ChannelCreationParametersRecord<StorageAssets<T>, <T as frame_system::Trait>::AccountId, <T as common::MembershipTypes>::MemberId>;
 
 /// Information about channel being updated.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
-pub struct ChannelUpdateParametersRecord<StorageAssets, AccountId, DataObjectId: Ord> {
-    /// Asset collection for the channel, referenced by metadata    
+pub struct ChannelUpdateParametersRecord<StorageAssets, AccountId, DataObjectId: Ord, MemberId: Ord> {
+    /// Asset collection for the channel, referenced by metadata
     pub assets_to_upload: Option<StorageAssets>,
     /// If set, metadata update for the channel.
     pub new_meta: Option<Vec<u8>>,
@@ -140,12 +145,15 @@ pub struct ChannelUpdateParametersRecord<StorageAssets, AccountId, DataObjectId:
     pub reward_account: Option<Option<AccountId>>,
     /// assets to be removed from channel
     pub assets_to_remove: BTreeSet<DataObjectId>,
+    /// collaborator set
+    pub collaborators: Option<BTreeSet<MemberId>>,
 }
 
 pub type ChannelUpdateParameters<T> = ChannelUpdateParametersRecord<
     StorageAssets<T>,
     <T as frame_system::Trait>::AccountId,
     DataObjectId<T>,
+    <T as common::MembershipTypes>::MemberId,
 >;
 
 /// Information about the video category being updated.

+ 0 - 94
runtime-modules/storage/src/distribution_bucket_picker.rs

@@ -1,94 +0,0 @@
-#![warn(missing_docs)]
-
-use frame_support::traits::Randomness;
-use sp_arithmetic::traits::Zero;
-use sp_runtime::SaturatedConversion;
-use sp_std::cell::RefCell;
-use sp_std::collections::btree_set::BTreeSet;
-use sp_std::marker::PhantomData;
-use sp_std::rc::Rc;
-use sp_std::vec::Vec;
-
-use crate::{DynamicBagType, Module, Trait};
-
-// Generates distribution bucket IDs to assign to a new dynamic bag.
-pub(crate) struct DistributionBucketPicker<T> {
-    trait_marker: PhantomData<T>,
-}
-
-impl<T: Trait> DistributionBucketPicker<T> {
-    // Get random distribution buckets from distribution bucket families using the dynamic bag
-    // creation policy.
-    pub(crate) fn pick_distribution_buckets(
-        bag_type: DynamicBagType,
-    ) -> BTreeSet<T::DistributionBucketId> {
-        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
-
-        if creation_policy.no_distribution_buckets_required() {
-            return BTreeSet::new();
-        }
-
-        // Randomness for all bucket family.
-        // let random_seed = RefCell::new(Module::<T>::get_initial_random_seed());
-        let random_seed = Rc::new(RefCell::new(Module::<T>::get_initial_random_seed()));
-
-        creation_policy
-            .families
-            .iter()
-            .filter_map(|(family_id, bucket_num)| {
-                Module::<T>::ensure_distribution_bucket_family_exists(family_id)
-                    .ok()
-                    .map(|fam| (fam, bucket_num))
-            })
-            .map(|(family, bucket_num)| {
-                let filtered_ids = family
-                    .distribution_buckets
-                    .iter()
-                    .filter_map(|(id, bucket)| bucket.accepting_new_bags.then(|| *id))
-                    .collect::<Vec<_>>();
-
-                (filtered_ids, bucket_num)
-            })
-            .map(|(bucket_ids, bucket_num)| {
-                Self::get_random_distribution_buckets(bucket_ids, *bucket_num, random_seed.clone())
-            })
-            .flatten()
-            .collect::<BTreeSet<_>>()
-    }
-
-    // Get random bucket IDs from the ID collection.
-    pub fn get_random_distribution_buckets(
-        ids: Vec<T::DistributionBucketId>,
-        bucket_number: u32,
-        seed: Rc<RefCell<T::Hash>>, //     seed: RefCell<T::Hash>
-    ) -> BTreeSet<T::DistributionBucketId> {
-        let mut working_ids = ids;
-        let mut result_ids = BTreeSet::default();
-
-        for _ in 0..bucket_number {
-            if working_ids.is_empty() {
-                break;
-            }
-
-            let current_seed = Self::advance_random_seed(seed.clone());
-
-            let upper_bound = working_ids.len() as u64 - 1;
-            let index =
-                Module::<T>::random_index(current_seed.as_ref(), upper_bound).saturated_into();
-            result_ids.insert(working_ids.remove(index));
-        }
-
-        result_ids
-    }
-
-    // Changes the internal seed value of the container and returns new random seed.
-    fn advance_random_seed(seed: Rc<RefCell<T::Hash>>) -> T::Hash {
-        // Cannot create randomness in the initial block (Substrate error).
-        if <frame_system::Module<T>>::block_number() == Zero::zero() {
-            return Module::<T>::get_initial_random_seed();
-        }
-
-        let current_seed = *seed.borrow();
-        seed.replace(T::Randomness::random(current_seed.as_ref()))
-    }
-}

+ 216 - 219
runtime-modules/storage/src/lib.rs

@@ -105,7 +105,6 @@
 //! - DefaultMemberDynamicBagNumberOfStorageBuckets
 //! - DefaultChannelDynamicBagNumberOfStorageBuckets
 //! - MaxDistributionBucketFamilyNumber
-//! - MaxDistributionBucketNumberPerFamily
 //! - DistributionBucketsPerBagValueConstraint
 //! - MaxNumberOfPendingInvitationsPerDistributionBucket
 
@@ -126,13 +125,15 @@ mod tests;
 #[cfg(feature = "runtime-benchmarks")]
 mod benchmarking;
 
-pub(crate) mod distribution_bucket_picker;
-pub(crate) mod storage_bucket_picker;
+//pub(crate) mod distribution_bucket_picker;
+pub(crate) mod random_buckets;
 
 use codec::{Codec, Decode, Encode};
 use frame_support::dispatch::{DispatchError, DispatchResult};
 use frame_support::traits::{Currency, ExistenceRequirement, Get, Randomness};
-use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter};
+use frame_support::{
+    decl_error, decl_event, decl_module, decl_storage, ensure, IterableStorageDoubleMap, Parameter,
+};
 use frame_system::ensure_root;
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
@@ -149,8 +150,8 @@ use common::constraints::BoundedValueConstraint;
 use common::origin::ActorOriginValidator;
 use common::working_group::WorkingGroup;
 
-use distribution_bucket_picker::DistributionBucketPicker;
-use storage_bucket_picker::StorageBucketPicker;
+use random_buckets::DistributionBucketPicker;
+use random_buckets::StorageBucketPicker;
 
 /// Public interface for the storage module.
 pub trait DataObjectStorage<T: Trait> {
@@ -245,17 +246,21 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
         + Default
         + Copy
         + MaybeSerialize
-        + PartialEq;
+        + PartialEq
+        + Into<u64>
+        + From<u64>;
 
-    /// Distribution bucket ID type.
-    type DistributionBucketId: Parameter
+    /// Distribution bucket index within a distribution bucket family type.
+    type DistributionBucketIndex: Parameter
         + Member
         + BaseArithmetic
         + Codec
         + Default
         + Copy
         + MaybeSerialize
-        + PartialEq;
+        + PartialEq
+        + Into<u64>
+        + From<u64>;
 
     /// Distribution bucket family ID type.
     type DistributionBucketFamilyId: Parameter
@@ -321,9 +326,6 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
     /// Defines max allowed distribution bucket family number.
     type MaxDistributionBucketFamilyNumber: Get<u64>;
 
-    /// Defines max allowed distribution bucket number per family.
-    type MaxDistributionBucketNumberPerFamily: Get<u64>;
-
     /// Max number of pending invitations per distribution bucket.
     type MaxNumberOfPendingInvitationsPerDistributionBucket: Get<u64>;
 
@@ -484,8 +486,7 @@ pub struct DataObject<Balance> {
 }
 
 /// Type alias for the BagRecord.
-pub type Bag<T> =
-    BagRecord<<T as Trait>::StorageBucketId, <T as Trait>::DistributionBucketId, BalanceOf<T>>;
+pub type Bag<T> = BagRecord<<T as Trait>::StorageBucketId, DistributionBucketId<T>, BalanceOf<T>>;
 
 /// Bag container.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
@@ -585,6 +586,7 @@ pub enum DynamicBagType {
 
     /// Channel dynamic bag type.
     Channel,
+    // Modify 'delete_distribution_bucket_family' on adding the new type!
 }
 
 impl Default for DynamicBagType {
@@ -826,45 +828,42 @@ impl<Balance: Saturating + Copy> BagUpdate<Balance> {
 
 /// Type alias for the DistributionBucketFamilyRecord.
 pub type DistributionBucketFamily<T> =
-    DistributionBucketFamilyRecord<<T as Trait>::DistributionBucketId, WorkerId<T>>;
+    DistributionBucketFamilyRecord<<T as Trait>::DistributionBucketIndex>;
 
 /// Distribution bucket family.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
-pub struct DistributionBucketFamilyRecord<DistributionBucketId: Ord, WorkerId: Ord> {
-    /// Distribution bucket map.
-    pub distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucketRecord<WorkerId>>,
+pub struct DistributionBucketFamilyRecord<DistributionBucketIndex> {
+    /// Next distribution bucket index.
+    pub next_distribution_bucket_index: DistributionBucketIndex,
 }
 
-impl<DistributionBucketId: Ord, WorkerId: Ord>
-    DistributionBucketFamilyRecord<DistributionBucketId, WorkerId>
+impl<DistributionBucketIndex: BaseArithmetic>
+    DistributionBucketFamilyRecord<DistributionBucketIndex>
 {
-    // Add and/or remove distribution buckets assignments to bags.
-    fn change_bag_assignments(
-        &mut self,
-        add_buckets: &BTreeSet<DistributionBucketId>,
-        remove_buckets: &BTreeSet<DistributionBucketId>,
-    ) {
-        for bucket_id in add_buckets.iter() {
-            if let Some(bucket) = self.distribution_buckets.get_mut(bucket_id) {
-                bucket.register_bag_assignment();
-            }
-        }
-
-        for bucket_id in remove_buckets.iter() {
-            if let Some(bucket) = self.distribution_buckets.get_mut(bucket_id) {
-                bucket.unregister_bag_assignment();
-            }
-        }
+    // Increments the next distribution bucket index variable.
+    fn increment_next_distribution_bucket_index_counter(&mut self) {
+        self.next_distribution_bucket_index += One::one()
     }
+}
 
-    // Checks inner buckets for bag assignment number. Returns true only if all 'assigned_bags' are
-    // zero.
-    fn no_bags_assigned(&self) -> bool {
-        self.distribution_buckets
-            .values()
-            .all(|b| b.no_bags_assigned())
-    }
+/// Type alias for the DistributionBucketIdRecord.
+pub type DistributionBucketId<T> = DistributionBucketIdRecord<
+    <T as Trait>::DistributionBucketFamilyId,
+    <T as Trait>::DistributionBucketIndex,
+>;
+
+/// Complex distribution bucket ID type.
+/// Joins a distribution bucket family ID and a distribution bucket index within the family.
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug, PartialOrd, Ord)]
+pub struct DistributionBucketIdRecord<DistributionBucketFamilyId: Ord, DistributionBucketIndex: Ord>
+{
+    /// Distribution bucket family ID.
+    pub distribution_bucket_family_id: DistributionBucketFamilyId,
+
+    /// Distribution bucket ID.
+    pub distribution_bucket_index: DistributionBucketIndex,
 }
 
 /// Type alias for the DistributionBucketRecord.
@@ -962,12 +961,15 @@ decl_storage! {
             map hasher(blake2_128_concat) T::DistributionBucketFamilyId =>
             DistributionBucketFamily<T>;
 
+        /// 'Distribution bucket' storage double map.
+        pub DistributionBucketByFamilyIdById get (fn distribution_bucket_by_family_id_by_index):
+            double_map
+            hasher(blake2_128_concat) T::DistributionBucketFamilyId,
+            hasher(blake2_128_concat) T::DistributionBucketIndex => DistributionBucket<T>;
+
         /// Total number of distribution bucket families in the system.
         pub DistributionBucketFamilyNumber get(fn distribution_bucket_family_number): u64;
 
-        /// Distribution bucket id counter. Starts at zero.
-        pub NextDistributionBucketId get(fn next_distribution_bucket_id): T::DistributionBucketId;
-
         /// "Distribution buckets per bag" number limit.
         pub DistributionBucketsPerBagLimit get (fn distribution_buckets_per_bag_limit): u64;
     }
@@ -986,7 +988,8 @@ decl_event! {
         <T as frame_system::Trait>::AccountId,
         Balance = BalanceOf<T>,
         <T as Trait>::DistributionBucketFamilyId,
-        <T as Trait>::DistributionBucketId,
+        DistributionBucketId = DistributionBucketId<T>,
+        <T as Trait>::DistributionBucketIndex,
     {
         /// Emits on creating the storage bucket.
         /// Params
@@ -1164,16 +1167,14 @@ decl_event! {
 
         /// Emits on storage bucket status update (accepting new bags).
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - new status (accepting new bags)
-        DistributionBucketStatusUpdated(DistributionBucketFamilyId, DistributionBucketId, bool),
+        DistributionBucketStatusUpdated(DistributionBucketId, bool),
 
         /// Emits on deleting distribution bucket.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
-        DistributionBucketDeleted(DistributionBucketFamilyId, DistributionBucketId),
+        DistributionBucketDeleted(DistributionBucketId),
 
         /// Emits on updating distribution buckets for bag.
         /// Params
@@ -1183,8 +1184,8 @@ decl_event! {
         DistributionBucketsUpdatedForBag(
             BagId,
             DistributionBucketFamilyId,
-            BTreeSet<DistributionBucketId>,
-            BTreeSet<DistributionBucketId>
+            BTreeSet<DistributionBucketIndex>,
+            BTreeSet<DistributionBucketIndex>
         ),
 
         /// Emits on changing the "Distribution buckets per bag" number limit.
@@ -1194,10 +1195,9 @@ decl_event! {
 
         /// Emits on storage bucket mode update (distributing flag).
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - distributing
-        DistributionBucketModeUpdated(DistributionBucketFamilyId, DistributionBucketId, bool),
+        DistributionBucketModeUpdated(DistributionBucketId, bool),
 
         /// Emits on dynamic bag creation policy update (distribution bucket families).
         /// Params
@@ -1210,22 +1210,18 @@ decl_event! {
 
         /// Emits on creating a distribution bucket invitation for the operator.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - worker ID
         DistributionBucketOperatorInvited(
-            DistributionBucketFamilyId,
             DistributionBucketId,
             WorkerId,
         ),
 
         /// Emits on canceling a distribution bucket invitation for the operator.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - operator worker ID
         DistributionBucketInvitationCancelled(
-            DistributionBucketFamilyId,
             DistributionBucketId,
             WorkerId,
         ),
@@ -1233,34 +1229,28 @@ decl_event! {
         /// Emits on accepting a distribution bucket invitation for the operator.
         /// Params
         /// - worker ID
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         DistributionBucketInvitationAccepted(
             WorkerId,
-            DistributionBucketFamilyId,
             DistributionBucketId,
         ),
 
         /// Emits on setting the metadata by a distribution bucket operator.
         /// Params
         /// - worker ID
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - metadata
         DistributionBucketMetadataSet(
             WorkerId,
-            DistributionBucketFamilyId,
             DistributionBucketId,
             Vec<u8>
         ),
 
         /// Emits on the distribution bucket operator removal.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - distribution bucket operator ID
         DistributionBucketOperatorRemoved(
-            DistributionBucketFamilyId,
             DistributionBucketId,
             WorkerId
         ),
@@ -1396,9 +1386,6 @@ decl_error! {
         /// Distribution bucket family doesn't exist.
         DistributionBucketFamilyDoesntExist,
 
-        /// Max distribution bucket number per family limit exceeded.
-        MaxDistributionBucketNumberPerFamilyLimitExceeded,
-
         /// Distribution bucket doesn't exist.
         DistributionBucketDoesntExist,
 
@@ -1481,10 +1468,6 @@ decl_module! {
         /// Exports const - max allowed distribution bucket family number.
         const MaxDistributionBucketFamilyNumber: u64 = T::MaxDistributionBucketFamilyNumber::get();
 
-        /// Exports const - max allowed distribution bucket number per family.
-        const MaxDistributionBucketNumberPerFamily: u64 =
-            T::MaxDistributionBucketNumberPerFamily::get();
-
         /// Exports const - "Distribution buckets per bag" value constraint.
         const DistributionBucketsPerBagValueConstraint: StorageBucketsPerBagValueConstraint =
             T::DistributionBucketsPerBagValueConstraint::get();
@@ -2032,10 +2015,10 @@ decl_module! {
         pub fn delete_distribution_bucket_family(origin, family_id: T::DistributionBucketFamilyId) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
+            Self::ensure_distribution_bucket_family_exists(&family_id)?;
 
             // Check that no assigned bags left.
-            ensure!(family.no_bags_assigned(), Error::<T>::DistributionBucketIsBoundToBag);
+            ensure!(Self::no_bags_assigned(&family_id), Error::<T>::DistributionBucketIsBoundToBag);
 
             Self::check_dynamic_bag_creation_policy_for_dependencies(
                 &family_id,
@@ -2068,12 +2051,6 @@ decl_module! {
 
             let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
 
-            ensure!(
-                family.distribution_buckets.len().saturated_into::<u64>() <
-                    T::MaxDistributionBucketNumberPerFamily::get(),
-                Error::<T>::MaxDistributionBucketNumberPerFamilyLimitExceeded
-            );
-
             //
             // == MUTATION SAFE ==
             //
@@ -2086,13 +2063,14 @@ decl_module! {
                 assigned_bags: 0,
             };
 
-            let bucket_id = Self::next_distribution_bucket_id();
+            let bucket_index = family.next_distribution_bucket_index;
+            let bucket_id = Self::create_distribution_bucket_id(family_id, bucket_index);
 
             <DistributionBucketFamilyById<T>>::mutate(family_id, |family|{
-                family.distribution_buckets.insert(bucket_id, bucket);
+                family.increment_next_distribution_bucket_index_counter();
             });
 
-            <NextDistributionBucketId<T>>::put(bucket_id + One::one());
+            <DistributionBucketByFamilyIdById<T>>::insert(family_id, bucket_index, bucket);
 
             Self::deposit_event(
                 RawEvent::DistributionBucketCreated(family_id, accepting_new_bags, bucket_id)
@@ -2103,34 +2081,27 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_distribution_bucket_status(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             accepting_new_bags: bool
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             //
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.accepting_new_bags = accepting_new_bags;
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketStatusUpdated(
-                    family_id,
-                    distribution_bucket_id,
-                    accepting_new_bags
-                )
+                RawEvent::DistributionBucketStatusUpdated(bucket_id, accepting_new_bags)
             );
         }
 
@@ -2138,13 +2109,11 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn delete_distribution_bucket(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
         ){
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(&family, &distribution_bucket_id)?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             // Check that no assigned bags left.
             ensure!(bucket.no_bags_assigned(), Error::<T>::DistributionBucketIsBoundToBag);
@@ -2156,12 +2125,13 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                family.distribution_buckets.remove(&distribution_bucket_id);
-            });
+            <DistributionBucketByFamilyIdById<T>>::remove(
+                &bucket_id.distribution_bucket_family_id,
+                &bucket_id.distribution_bucket_index
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketDeleted(family_id, distribution_bucket_id)
+                RawEvent::DistributionBucketDeleted(bucket_id)
             );
         }
 
@@ -2171,36 +2141,44 @@ decl_module! {
             origin,
             bag_id: BagId<T>,
             family_id: T::DistributionBucketFamilyId,
-            add_buckets: BTreeSet<T::DistributionBucketId>,
-            remove_buckets: BTreeSet<T::DistributionBucketId>,
+            add_buckets_indices: BTreeSet<T::DistributionBucketIndex>,
+            remove_buckets_indices: BTreeSet<T::DistributionBucketIndex>,
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
             Self::validate_update_distribution_buckets_for_bag_params(
                 &bag_id,
                 &family_id,
-                &add_buckets,
-                &remove_buckets,
+                &add_buckets_indices,
+                &remove_buckets_indices,
             )?;
 
             //
             // == MUTATION SAFE ==
             //
 
+            let add_buckets_ids = add_buckets_indices
+                .iter()
+                .map(|idx| Self::create_distribution_bucket_id(family_id, *idx))
+                .collect::<BTreeSet<_>>();
+
+            let remove_buckets_ids = remove_buckets_indices
+                .iter()
+                .map(|idx| Self::create_distribution_bucket_id(family_id, *idx))
+                .collect::<BTreeSet<_>>();
+
             Bags::<T>::mutate(&bag_id, |bag| {
-                bag.update_distribution_buckets(&mut add_buckets.clone(), &remove_buckets);
+                bag.update_distribution_buckets(&mut add_buckets_ids.clone(), &remove_buckets_ids);
             });
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                family.change_bag_assignments(&add_buckets, &remove_buckets);
-            });
+            Self::change_bag_assignments(&add_buckets_ids, &remove_buckets_ids);
 
             Self::deposit_event(
                 RawEvent::DistributionBucketsUpdatedForBag(
                     bag_id,
                     family_id,
-                    add_buckets,
-                    remove_buckets
+                    add_buckets_indices,
+                    remove_buckets_indices
                 )
             );
         }
@@ -2229,34 +2207,27 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_distribution_bucket_mode(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             distributing: bool
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             //
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.distributing = distributing;
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketModeUpdated(
-                    family_id,
-                    distribution_bucket_id,
-                    distributing
-                )
+                RawEvent::DistributionBucketModeUpdated(bucket_id, distributing)
             );
         }
 
@@ -2291,17 +2262,12 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn invite_distribution_bucket_operator(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             operator_worker_id: WorkerId<T>
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             Self::ensure_distribution_provider_can_be_invited(&bucket, &operator_worker_id)?;
 
@@ -2309,18 +2275,16 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.pending_invitations.insert(operator_worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketOperatorInvited(
-                    family_id,
-                    distribution_bucket_id,
-                    operator_worker_id,
-                )
+                RawEvent::DistributionBucketOperatorInvited(bucket_id, operator_worker_id)
             );
         }
 
@@ -2328,17 +2292,12 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn cancel_distribution_bucket_operator_invite(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             operator_worker_id: WorkerId<T>
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.pending_invitations.contains(&operator_worker_id),
@@ -2349,16 +2308,17 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.pending_invitations.remove(&operator_worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
                 RawEvent::DistributionBucketInvitationCancelled(
-                    family_id,
-                    distribution_bucket_id,
+                    bucket_id,
                     operator_worker_id
                 )
             );
@@ -2368,17 +2328,12 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn remove_distribution_bucket_operator(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             operator_worker_id: WorkerId<T>,
         ){
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.operators.contains(&operator_worker_id),
@@ -2390,18 +2345,16 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.operators.remove(&operator_worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketOperatorRemoved(
-                    family_id,
-                    distribution_bucket_id,
-                    operator_worker_id
-                )
+                RawEvent::DistributionBucketOperatorRemoved(bucket_id, operator_worker_id)
             );
         }
 
@@ -2436,17 +2389,11 @@ decl_module! {
         pub fn accept_distribution_bucket_invitation(
             origin,
             worker_id: WorkerId<T>,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
-
+            bucket_id: DistributionBucketId<T>,
         ) {
             T::ensure_distribution_worker_origin(origin, worker_id)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.pending_invitations.contains(&worker_id),
@@ -2457,19 +2404,17 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.pending_invitations.remove(&worker_id);
                     bucket.operators.insert(worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketInvitationAccepted(
-                    worker_id,
-                    family_id,
-                    distribution_bucket_id,
-                )
+                RawEvent::DistributionBucketInvitationAccepted(worker_id, bucket_id)
             );
         }
 
@@ -2478,17 +2423,12 @@ decl_module! {
         pub fn set_distribution_operator_metadata(
             origin,
             worker_id: WorkerId<T>,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             metadata: Vec<u8>,
         ) {
             T::ensure_distribution_worker_origin(origin, worker_id)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.operators.contains(&worker_id),
@@ -2500,12 +2440,7 @@ decl_module! {
             //
 
             Self::deposit_event(
-                RawEvent::DistributionBucketMetadataSet(
-                    worker_id,
-                    family_id,
-                    distribution_bucket_id,
-                    metadata
-                )
+                RawEvent::DistributionBucketMetadataSet(worker_id, bucket_id, metadata)
             );
         }
 
@@ -3351,7 +3286,7 @@ impl<T: Trait> Module<T> {
     // Selects distributed bucket ID sets to assign to the dynamic bag.
     pub(crate) fn pick_distribution_buckets_for_dynamic_bag(
         bag_type: DynamicBagType,
-    ) -> BTreeSet<T::DistributionBucketId> {
+    ) -> BTreeSet<DistributionBucketId<T>> {
         DistributionBucketPicker::<T>::pick_distribution_buckets(bag_type)
     }
 
@@ -3469,22 +3404,28 @@ impl<T: Trait> Module<T> {
     // Ensures the existence of the distribution bucket.
     // Returns the DistributionBucket object or error.
     fn ensure_distribution_bucket_exists(
-        family: &DistributionBucketFamily<T>,
-        distribution_bucket_id: &T::DistributionBucketId,
+        bucket_id: &DistributionBucketId<T>,
     ) -> Result<DistributionBucket<T>, Error<T>> {
-        family
-            .distribution_buckets
-            .get(distribution_bucket_id)
-            .cloned()
-            .ok_or(Error::<T>::DistributionBucketDoesntExist)
+        ensure!(
+            <DistributionBucketByFamilyIdById<T>>::contains_key(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index
+            ),
+            Error::<T>::DistributionBucketDoesntExist
+        );
+
+        Ok(Self::distribution_bucket_by_family_id_by_index(
+            bucket_id.distribution_bucket_family_id,
+            bucket_id.distribution_bucket_index,
+        ))
     }
 
     // Ensures validity of the `update_distribution_buckets_for_bag` extrinsic parameters
     fn validate_update_distribution_buckets_for_bag_params(
         bag_id: &BagId<T>,
         family_id: &T::DistributionBucketFamilyId,
-        add_buckets: &BTreeSet<T::DistributionBucketId>,
-        remove_buckets: &BTreeSet<T::DistributionBucketId>,
+        add_buckets: &BTreeSet<T::DistributionBucketIndex>,
+        remove_buckets: &BTreeSet<T::DistributionBucketIndex>,
     ) -> DispatchResult {
         ensure!(
             !add_buckets.is_empty() || !remove_buckets.is_empty(),
@@ -3493,7 +3434,7 @@ impl<T: Trait> Module<T> {
 
         let bag = Self::ensure_bag_exists(bag_id)?;
 
-        let family = Self::ensure_distribution_bucket_family_exists(family_id)?;
+        Self::ensure_distribution_bucket_family_exists(family_id)?;
 
         let new_bucket_number = bag
             .distributed_by
@@ -3507,8 +3448,9 @@ impl<T: Trait> Module<T> {
             Error::<T>::MaxDistributionBucketNumberPerBagLimitExceeded
         );
 
-        for bucket_id in remove_buckets.iter() {
-            Self::ensure_distribution_bucket_exists(&family, bucket_id)?;
+        for bucket_index in remove_buckets.iter() {
+            let bucket_id = Self::create_distribution_bucket_id(*family_id, *bucket_index);
+            Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bag.distributed_by.contains(&bucket_id),
@@ -3516,8 +3458,9 @@ impl<T: Trait> Module<T> {
             );
         }
 
-        for bucket_id in add_buckets.iter() {
-            let bucket = Self::ensure_distribution_bucket_exists(&family, bucket_id)?;
+        for bucket_index in add_buckets.iter() {
+            let bucket_id = Self::create_distribution_bucket_id(*family_id, *bucket_index);
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.accepting_new_bags,
@@ -3611,4 +3554,58 @@ impl<T: Trait> Module<T> {
 
         Ok(())
     }
+
+    // Add and/or remove distribution buckets assignments to bags.
+    fn change_bag_assignments(
+        add_buckets: &BTreeSet<DistributionBucketId<T>>,
+        remove_buckets: &BTreeSet<DistributionBucketId<T>>,
+    ) {
+        for bucket_id in add_buckets.iter() {
+            if DistributionBucketByFamilyIdById::<T>::contains_key(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+            ) {
+                DistributionBucketByFamilyIdById::<T>::mutate(
+                    bucket_id.distribution_bucket_family_id,
+                    bucket_id.distribution_bucket_index,
+                    |bucket| {
+                        bucket.register_bag_assignment();
+                    },
+                )
+            }
+        }
+
+        for bucket_id in remove_buckets.iter() {
+            if DistributionBucketByFamilyIdById::<T>::contains_key(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+            ) {
+                DistributionBucketByFamilyIdById::<T>::mutate(
+                    bucket_id.distribution_bucket_family_id,
+                    bucket_id.distribution_bucket_index,
+                    |bucket| {
+                        bucket.unregister_bag_assignment();
+                    },
+                )
+            }
+        }
+    }
+
+    // Checks distribution buckets for bag assignment number. Returns true only if all 'assigned_bags' are
+    // zero.
+    fn no_bags_assigned(family_id: &T::DistributionBucketFamilyId) -> bool {
+        DistributionBucketByFamilyIdById::<T>::iter_prefix_values(family_id)
+            .all(|b| b.no_bags_assigned())
+    }
+
+    // Creates distribution bucket ID from family ID and bucket index.
+    pub(crate) fn create_distribution_bucket_id(
+        distribution_bucket_family_id: T::DistributionBucketFamilyId,
+        distribution_bucket_index: T::DistributionBucketIndex,
+    ) -> DistributionBucketId<T> {
+        DistributionBucketId::<T> {
+            distribution_bucket_family_id,
+            distribution_bucket_index,
+        }
+    }
 }

+ 78 - 0
runtime-modules/storage/src/random_buckets/distribution_bucket_picker.rs

@@ -0,0 +1,78 @@
+#![warn(missing_docs)]
+
+use sp_std::cell::RefCell;
+use sp_std::collections::btree_set::BTreeSet;
+use sp_std::marker::PhantomData;
+use sp_std::vec::Vec;
+
+use crate::{DistributionBucketId, DynamicBagType, Module, Trait};
+
+pub(crate) use super::{RandomBucketIdIterator, SequentialBucketIdIterator};
+
+// Generates distribution bucket IDs to assign to a new dynamic bag.
+pub(crate) struct DistributionBucketPicker<T> {
+    trait_marker: PhantomData<T>,
+}
+
+impl<T: Trait> DistributionBucketPicker<T> {
+    // Get random distribution buckets from distribution bucket families using the dynamic bag
+    // creation policy.
+    pub(crate) fn pick_distribution_buckets(
+        bag_type: DynamicBagType,
+    ) -> BTreeSet<DistributionBucketId<T>> {
+        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
+
+        if creation_policy.no_distribution_buckets_required() {
+            return BTreeSet::new();
+        }
+
+        // Distribution bucket IDs accumulator.
+        let bucket_ids_cell = RefCell::new(BTreeSet::<T::DistributionBucketIndex>::new());
+
+        creation_policy
+            .families
+            .iter()
+            .filter_map(|(family_id, bucket_num)| {
+                Module::<T>::ensure_distribution_bucket_family_exists(family_id)
+                    .ok()
+                    .map(|fam| (family_id, fam, bucket_num))
+            })
+            .map(|(family_id, family, bucket_num)| {
+                RandomBucketIdIterator::<T, T::DistributionBucketIndex>::new(
+                    family.next_distribution_bucket_index,
+                )
+                .chain(
+                    SequentialBucketIdIterator::<T, T::DistributionBucketIndex>::new(
+                        family.next_distribution_bucket_index,
+                    ),
+                )
+                .filter(|bucket_idx| {
+                    let bucket_id = DistributionBucketId::<T> {
+                        distribution_bucket_family_id: *family_id,
+                        distribution_bucket_index: *bucket_idx,
+                    };
+
+                    Module::<T>::ensure_distribution_bucket_exists(&bucket_id)
+                        .ok()
+                        .map(|bucket| bucket.accepting_new_bags)
+                        .unwrap_or(false)
+                })
+                .filter(|bucket_idx| {
+                    let bucket_ids = bucket_ids_cell.borrow();
+
+                    // Skips the iteration on existing ID.
+                    !bucket_ids.contains(bucket_idx)
+                })
+                .map(|bucket_idx| DistributionBucketId::<T> {
+                    distribution_bucket_family_id: *family_id,
+                    distribution_bucket_index: bucket_idx,
+                })
+                .take(*bucket_num as usize)
+                .collect::<Vec<_>>()
+
+                // rename buckets
+            })
+            .flatten()
+            .collect::<BTreeSet<_>>()
+    }
+}

+ 131 - 0
runtime-modules/storage/src/random_buckets/mod.rs

@@ -0,0 +1,131 @@
+use frame_support::traits::{Get, Randomness};
+use sp_arithmetic::traits::{BaseArithmetic, One, Zero};
+use sp_runtime::traits::Bounded;
+use sp_runtime::SaturatedConversion;
+use sp_std::marker::PhantomData;
+
+use crate::{Module, Trait};
+
+pub(crate) mod distribution_bucket_picker;
+pub(crate) mod storage_bucket_picker;
+
+pub(crate) use distribution_bucket_picker::DistributionBucketPicker;
+pub(crate) use storage_bucket_picker::StorageBucketPicker;
+
+// A meta trait for defining generic bucket ID.
+pub(crate) trait BucketId:
+    Bounded + BaseArithmetic + From<u64> + Into<u64> + Clone + PartialOrd
+{
+}
+impl<T: Bounded + BaseArithmetic + From<u64> + Into<u64> + Clone + PartialOrd> BucketId for T {}
+
+// Iterator for random storage or distribution bucket IDs. It uses Substrate Randomness trait
+// (and possibly randomness_collective_flip pallet for implementation).
+// Its maximum iterations number is bounded.
+pub(crate) struct RandomBucketIdIterator<T: Trait, Id: BucketId> {
+    // Trait marker.
+    trait_marker: PhantomData<T>,
+
+    // Current Iterator step number.
+    current_iteration: u64,
+
+    // Maximum allowed iteration number.
+    max_iteration_number: u64,
+
+    // Current seed for the randomness generator.
+    current_seed: T::Hash,
+
+    // Next possible id for the buckets.
+    next_id: Id,
+}
+
+impl<T: Trait, Id: BucketId> Iterator for RandomBucketIdIterator<T, Id> {
+    type Item = Id;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        // Cannot create randomness in the initial block (Substrate error).
+        if <frame_system::Module<T>>::block_number() == Zero::zero() {
+            return None;
+        }
+
+        if self.current_iteration >= self.max_iteration_number {
+            return None;
+        }
+
+        let random_bucket_id = self.random_bucket_id();
+
+        self.current_iteration += 1;
+        self.current_seed = T::Randomness::random(self.current_seed.as_ref());
+
+        Some(random_bucket_id)
+    }
+}
+
+impl<T: Trait, Id: BucketId> RandomBucketIdIterator<T, Id> {
+    // Generate random storage or distribution bucket ID using next_id as an upper_bound.
+    // Deleted bucket IDs are included.
+    fn random_bucket_id(&self) -> Id {
+        let total_buckets_number: u64 = self.next_id.clone().into();
+
+        let random_bucket_id: Id = Module::<T>::random_index(
+            self.current_seed.as_ref(),
+            total_buckets_number.saturated_into(),
+        )
+        .saturated_into();
+
+        random_bucket_id
+    }
+
+    // Creates new iterator.
+    pub(crate) fn new(next_id: Id) -> Self {
+        let seed = Module::<T>::get_initial_random_seed();
+
+        Self {
+            current_iteration: 0,
+            max_iteration_number: T::MaxRandomIterationNumber::get(),
+            trait_marker: PhantomData,
+            current_seed: seed,
+            next_id,
+        }
+    }
+}
+
+// Iterator for sequential storage or distribution bucket IDs. It starts from the first possible storage bucket ID
+// (zero) and goes up to the last storage bucket IDs (next_storage_bucket_id - excluding).
+pub(crate) struct SequentialBucketIdIterator<T: Trait, Id: BucketId> {
+    // Trait marker.
+    trait_marker: PhantomData<T>,
+
+    // Bucket ID for the current iteration.
+    current_bucket_id: Id,
+
+    // Next possible id for the buckets.
+    next_id: Id,
+}
+
+impl<T: Trait, Id: BucketId> Iterator for SequentialBucketIdIterator<T, Id> {
+    type Item = Id;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.current_bucket_id >= self.next_id {
+            return None;
+        }
+
+        let result = self.current_bucket_id.clone();
+
+        self.current_bucket_id += One::one();
+
+        Some(result)
+    }
+}
+
+impl<T: Trait, Id: BucketId> SequentialBucketIdIterator<T, Id> {
+    // Creates new iterator.
+    pub(crate) fn new(next_id: Id) -> Self {
+        Self {
+            current_bucket_id: Zero::zero(),
+            trait_marker: PhantomData,
+            next_id,
+        }
+    }
+}

+ 65 - 0
runtime-modules/storage/src/random_buckets/storage_bucket_picker.rs

@@ -0,0 +1,65 @@
+#![warn(missing_docs)]
+
+use sp_std::cell::RefCell;
+use sp_std::collections::btree_set::BTreeSet;
+use sp_std::marker::PhantomData;
+
+pub(crate) use super::{RandomBucketIdIterator, SequentialBucketIdIterator};
+use crate::{DynamicBagType, Module, Trait};
+
+// Generates storage bucket IDs to assign to a new dynamic bag.
+pub(crate) struct StorageBucketPicker<T> {
+    trait_marker: PhantomData<T>,
+}
+
+impl<T: Trait> StorageBucketPicker<T> {
+    // Selects storage bucket ID sets to assign to the dynamic bag.
+    // At first, it tries to generate random bucket IDs. If acquired random IDs number is not enough
+    // it tries to get additional IDs starting from zero up to the total number of the possible IDs.
+    // The function filters deleted buckets and disabled buckets (accepting_new_bags == false)
+    // Total number of possible IDs is limited by the dynamic bag settings.
+    // Returns an accumulated bucket ID set or an empty set.
+    pub(crate) fn pick_storage_buckets(bag_type: DynamicBagType) -> BTreeSet<T::StorageBucketId> {
+        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
+
+        if creation_policy.no_storage_buckets_required() {
+            return BTreeSet::new();
+        }
+
+        let required_bucket_num = creation_policy.number_of_storage_buckets as usize;
+
+        // Storage bucket IDs accumulator.
+        let bucket_ids_cell = RefCell::new(BTreeSet::new());
+        let next_storage_bucket_id = Module::<T>::next_storage_bucket_id();
+        RandomBucketIdIterator::<T, T::StorageBucketId>::new(next_storage_bucket_id)
+            .chain(SequentialBucketIdIterator::<T, T::StorageBucketId>::new(
+                next_storage_bucket_id,
+            ))
+            .filter(Self::check_storage_bucket_is_valid_for_bag_assigning)
+            .filter(|bucket_id| {
+                let bucket_ids = bucket_ids_cell.borrow();
+
+                // Skips the iteration on existing ID.
+                !bucket_ids.contains(bucket_id)
+            })
+            .take(required_bucket_num)
+            .for_each(|bucket_id| {
+                let mut bucket_ids = bucket_ids_cell.borrow_mut();
+
+                bucket_ids.insert(bucket_id);
+            });
+
+        bucket_ids_cell.into_inner()
+    }
+
+    // Verifies storage bucket ID (non-deleted and accepting new bags).
+    pub(crate) fn check_storage_bucket_is_valid_for_bag_assigning(
+        bucket_id: &T::StorageBucketId,
+    ) -> bool {
+        // Check bucket for existence (return false if not). Check `accepting_new_bags`.
+        Module::<T>::ensure_storage_bucket_exists(bucket_id)
+            .ok()
+            .map(|bucket| bucket.accepting_new_bags)
+            .unwrap_or(false)
+    }
+}

+ 0 - 168
runtime-modules/storage/src/storage_bucket_picker.rs

@@ -1,168 +0,0 @@
-#![warn(missing_docs)]
-
-use frame_support::traits::{Get, Randomness};
-use sp_arithmetic::traits::{One, Zero};
-use sp_runtime::SaturatedConversion;
-use sp_std::cell::RefCell;
-use sp_std::collections::btree_set::BTreeSet;
-use sp_std::marker::PhantomData;
-
-use crate::{DynamicBagType, Module, Trait};
-
-// Generates storage bucket IDs to assign to a new dynamic bag.
-pub(crate) struct StorageBucketPicker<T> {
-    trait_marker: PhantomData<T>,
-}
-
-impl<T: Trait> StorageBucketPicker<T> {
-    // Selects storage bucket ID sets to assign to the storage bucket.
-    // At first, it tries to generate random bucket IDs. If acquired random IDs number is not enough
-    // it tries to get additional IDs starting from zero up to the total number of the possible IDs.
-    // The function filters deleted buckets and disabled buckets (accepting_new_bags == false)
-    // Total number of possible IDs is limited by the dynamic bag settings.
-    // Returns an accumulated bucket ID set or an empty set.
-    pub(crate) fn pick_storage_buckets(bag_type: DynamicBagType) -> BTreeSet<T::StorageBucketId> {
-        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
-
-        if creation_policy.no_storage_buckets_required() {
-            return BTreeSet::new();
-        }
-
-        let required_bucket_num = creation_policy.number_of_storage_buckets as usize;
-
-        // Storage bucket IDs accumulator.
-        let bucket_ids_cell = RefCell::new(BTreeSet::new());
-
-        RandomStorageBucketIdIterator::<T>::new()
-            .chain(SequentialStorageBucketIdIterator::<T>::new())
-            .filter(Self::check_storage_bucket_is_valid_for_bag_assigning)
-            .filter(|bucket_id| {
-                let bucket_ids = bucket_ids_cell.borrow();
-
-                // Skips the iteration on existing ID.
-                !bucket_ids.contains(bucket_id)
-            })
-            .take(required_bucket_num)
-            .for_each(|bucket_id| {
-                let mut bucket_ids = bucket_ids_cell.borrow_mut();
-
-                bucket_ids.insert(bucket_id);
-            });
-
-        bucket_ids_cell.into_inner()
-    }
-
-    // Verifies storage bucket ID (non-deleted and accepting new bags).
-    pub(crate) fn check_storage_bucket_is_valid_for_bag_assigning(
-        bucket_id: &T::StorageBucketId,
-    ) -> bool {
-        // Check bucket for existence (return false if not). Check `accepting_new_bags`.
-        Module::<T>::ensure_storage_bucket_exists(bucket_id)
-            .ok()
-            .map(|bucket| bucket.accepting_new_bags)
-            .unwrap_or(false)
-    }
-}
-
-// Iterator for random storage bucket IDs. It uses Substrate Randomness trait
-// (and possibly randomness_collective_flip pallet for implementation).
-// Its maximum iterations are bounded.
-pub(crate) struct RandomStorageBucketIdIterator<T: Trait> {
-    // Trait marker.
-    trait_marker: PhantomData<T>,
-
-    // Current Iterator step number.
-    current_iteration: u64,
-
-    // Maximum allowed iteration number.
-    max_iteration_number: u64,
-
-    // Current seed for the randomness generator.
-    current_seed: T::Hash,
-}
-
-impl<T: Trait> Iterator for RandomStorageBucketIdIterator<T> {
-    type Item = T::StorageBucketId;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        // Cannot create randomness in the initial block (Substrate error).
-        if <frame_system::Module<T>>::block_number() == Zero::zero() {
-            return None;
-        }
-
-        if self.current_iteration >= self.max_iteration_number {
-            return None;
-        }
-
-        let random_storage_bucket_id = self.random_storage_bucket_id();
-
-        self.current_iteration += 1;
-        self.current_seed = T::Randomness::random(self.current_seed.as_ref());
-
-        Some(random_storage_bucket_id)
-    }
-}
-
-impl<T: Trait> RandomStorageBucketIdIterator<T> {
-    // Generate random storage bucket ID using next_storage_bucket_id() as upper_bound.
-    // Deleted storage bucket ID are included.
-    fn random_storage_bucket_id(&self) -> T::StorageBucketId {
-        let total_buckets_number = Module::<T>::next_storage_bucket_id();
-
-        let random_bucket_id: T::StorageBucketId = Module::<T>::random_index(
-            self.current_seed.as_ref(),
-            total_buckets_number.saturated_into(),
-        )
-        .saturated_into();
-
-        random_bucket_id
-    }
-
-    // Creates new iterator.
-    pub(crate) fn new() -> Self {
-        let seed = Module::<T>::get_initial_random_seed();
-
-        Self {
-            current_iteration: 0,
-            max_iteration_number: T::MaxRandomIterationNumber::get(),
-            trait_marker: PhantomData,
-            current_seed: seed,
-        }
-    }
-}
-
-// Iterator for sequential storage bucket IDs. It starts from the first possible storage bucket ID
-// (zero) and goes up to the last storage bucket IDs (next_storage_bucket_id - excluding).
-pub(crate) struct SequentialStorageBucketIdIterator<T: Trait> {
-    // Trait marker.
-    trait_marker: PhantomData<T>,
-
-    // Storage bucket ID for the current iteration.
-    current_bucket_id: T::StorageBucketId,
-}
-
-impl<T: Trait> Iterator for SequentialStorageBucketIdIterator<T> {
-    type Item = T::StorageBucketId;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        if self.current_bucket_id >= Module::<T>::next_storage_bucket_id() {
-            return None;
-        }
-
-        let result = self.current_bucket_id;
-
-        self.current_bucket_id += One::one();
-
-        Some(result)
-    }
-}
-
-impl<T: Trait> SequentialStorageBucketIdIterator<T> {
-    // Creates new iterator.
-    pub(crate) fn new() -> Self {
-        Self {
-            current_bucket_id: Zero::zero(),
-            trait_marker: PhantomData,
-        }
-    }
-}

+ 109 - 96
runtime-modules/storage/src/tests/fixtures.rs

@@ -14,9 +14,9 @@ use crate::tests::mocks::{
     DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID, DISTRIBUTION_WG_LEADER_ACCOUNT_ID,
 };
 use crate::{
-    BagId, Cid, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily,
-    DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, RawEvent, StaticBagId,
-    StorageBucketOperatorStatus, UploadParameters,
+    BagId, Cid, DataObjectCreationParameters, DataObjectStorage, DistributionBucket,
+    DistributionBucketId, DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, RawEvent,
+    StaticBagId, StorageBucketOperatorStatus, UploadParameters,
 };
 
 // Recommendation from Parity on testing on_finalize
@@ -50,6 +50,7 @@ impl EventFixture {
             u64,
             u64,
             u64,
+            DistributionBucketId<Test>,
             u64,
         >,
     ) {
@@ -69,6 +70,7 @@ impl EventFixture {
             u64,
             u64,
             u64,
+            DistributionBucketId<Test>,
             u64,
         >,
     ) {
@@ -1295,7 +1297,8 @@ impl CreateDistributionBucketFixture {
     }
 
     pub fn call_and_assert(&self, expected_result: DispatchResult) -> Option<u64> {
-        let next_bucket_id = Storage::next_distribution_bucket_id();
+        let next_bucket_index = Storage::distribution_bucket_family_by_id(self.family_id)
+            .next_distribution_bucket_index;
         let actual_result = Storage::create_distribution_bucket(
             self.origin.clone().into(),
             self.family_id,
@@ -1305,24 +1308,27 @@ impl CreateDistributionBucketFixture {
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            assert_eq!(next_bucket_id + 1, Storage::next_distribution_bucket_id());
-
-            let family: DistributionBucketFamily<Test> =
-                Storage::distribution_bucket_family_by_id(self.family_id);
-
-            assert!(family.distribution_buckets.contains_key(&next_bucket_id));
             assert_eq!(
-                family
-                    .distribution_buckets
-                    .get(&next_bucket_id)
-                    .unwrap()
-                    .accepting_new_bags,
-                self.accept_new_bags
+                next_bucket_index + 1,
+                Storage::distribution_bucket_family_by_id(self.family_id)
+                    .next_distribution_bucket_index
             );
 
-            Some(next_bucket_id)
+            let bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    next_bucket_index,
+                );
+
+            assert_eq!(bucket.accepting_new_bags, self.accept_new_bags);
+
+            Some(next_bucket_index)
         } else {
-            assert_eq!(next_bucket_id, Storage::next_distribution_bucket_id());
+            assert_eq!(
+                next_bucket_index,
+                Storage::distribution_bucket_family_by_id(self.family_id)
+                    .next_distribution_bucket_index
+            );
 
             None
         }
@@ -1332,7 +1338,7 @@ impl CreateDistributionBucketFixture {
 pub struct UpdateDistributionBucketStatusFixture {
     origin: RawOrigin<u64>,
     family_id: u64,
-    distribution_bucket_id: u64,
+    distribution_bucket_index: u64,
     new_status: bool,
 }
 
@@ -1341,13 +1347,13 @@ impl UpdateDistributionBucketStatusFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID),
             family_id: Default::default(),
-            distribution_bucket_id: Default::default(),
+            distribution_bucket_index: Default::default(),
             new_status: false,
         }
     }
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
         Self {
-            distribution_bucket_id: bucket_id,
+            distribution_bucket_index: bucket_index,
             ..self
         }
     }
@@ -1367,8 +1373,7 @@ impl UpdateDistributionBucketStatusFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::update_distribution_bucket_status(
             self.origin.clone().into(),
-            self.family_id,
-            self.distribution_bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.distribution_bucket_index),
             self.new_status,
         );
 
@@ -1379,7 +1384,7 @@ impl UpdateDistributionBucketStatusFixture {
 pub struct DeleteDistributionBucketFixture {
     origin: RawOrigin<u64>,
     family_id: u64,
-    distribution_bucket_id: u64,
+    distribution_bucket_index: u64,
 }
 
 impl DeleteDistributionBucketFixture {
@@ -1387,13 +1392,13 @@ impl DeleteDistributionBucketFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID),
             family_id: Default::default(),
-            distribution_bucket_id: Default::default(),
+            distribution_bucket_index: Default::default(),
         }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
         Self {
-            distribution_bucket_id: bucket_id,
+            distribution_bucket_index: bucket_index,
             ..self
         }
     }
@@ -1409,8 +1414,7 @@ impl DeleteDistributionBucketFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::delete_distribution_bucket(
             self.origin.clone().into(),
-            self.family_id,
-            self.distribution_bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.distribution_bucket_index),
         );
 
         assert_eq!(actual_result, expected_result);
@@ -1421,8 +1425,8 @@ pub struct UpdateDistributionBucketForBagsFixture {
     origin: RawOrigin<u64>,
     bag_id: BagId<Test>,
     family_id: u64,
-    add_bucket_ids: BTreeSet<u64>,
-    remove_bucket_ids: BTreeSet<u64>,
+    add_bucket_indices: BTreeSet<u64>,
+    remove_bucket_indices: BTreeSet<u64>,
 }
 
 impl UpdateDistributionBucketForBagsFixture {
@@ -1431,8 +1435,8 @@ impl UpdateDistributionBucketForBagsFixture {
             origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
             bag_id: Default::default(),
             family_id: Default::default(),
-            add_bucket_ids: Default::default(),
-            remove_bucket_ids: Default::default(),
+            add_bucket_indices: Default::default(),
+            remove_bucket_indices: Default::default(),
         }
     }
 
@@ -1440,16 +1444,16 @@ impl UpdateDistributionBucketForBagsFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_add_bucket_ids(self, add_bucket_ids: BTreeSet<u64>) -> Self {
+    pub fn with_add_bucket_indices(self, add_bucket_indices: BTreeSet<u64>) -> Self {
         Self {
-            add_bucket_ids,
+            add_bucket_indices,
             ..self
         }
     }
 
-    pub fn with_remove_bucket_ids(self, remove_bucket_ids: BTreeSet<u64>) -> Self {
+    pub fn with_remove_bucket_indices(self, remove_bucket_indices: BTreeSet<u64>) -> Self {
         Self {
-            remove_bucket_ids,
+            remove_bucket_indices,
             ..self
         }
     }
@@ -1467,8 +1471,8 @@ impl UpdateDistributionBucketForBagsFixture {
             self.origin.clone().into(),
             self.bag_id.clone(),
             self.family_id,
-            self.add_bucket_ids.clone(),
-            self.remove_bucket_ids.clone(),
+            self.add_bucket_indices.clone(),
+            self.remove_bucket_indices.clone(),
         );
 
         assert_eq!(actual_result, expected_result);
@@ -1520,7 +1524,7 @@ impl UpdateDistributionBucketsPerBagLimitFixture {
 pub struct UpdateDistributionBucketModeFixture {
     origin: RawOrigin<u64>,
     family_id: u64,
-    distribution_bucket_id: u64,
+    distribution_bucket_index: u64,
     distributing: bool,
 }
 
@@ -1529,13 +1533,13 @@ impl UpdateDistributionBucketModeFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID),
             family_id: Default::default(),
-            distribution_bucket_id: Default::default(),
+            distribution_bucket_index: Default::default(),
             distributing: true,
         }
     }
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
         Self {
-            distribution_bucket_id: bucket_id,
+            distribution_bucket_index: bucket_index,
             ..self
         }
     }
@@ -1558,8 +1562,7 @@ impl UpdateDistributionBucketModeFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::update_distribution_bucket_mode(
             self.origin.clone().into(),
-            self.family_id,
-            self.distribution_bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.distribution_bucket_index),
             self.distributing,
         );
 
@@ -1621,7 +1624,7 @@ pub struct InviteDistributionBucketOperatorFixture {
     origin: RawOrigin<u64>,
     operator_worker_id: u64,
     family_id: u64,
-    bucket_id: u64,
+    bucket_index: u64,
 }
 
 impl InviteDistributionBucketOperatorFixture {
@@ -1629,7 +1632,7 @@ impl InviteDistributionBucketOperatorFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
             operator_worker_id: DEFAULT_WORKER_ID,
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
         }
     }
@@ -1645,8 +1648,11 @@ impl InviteDistributionBucketOperatorFixture {
         }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1656,19 +1662,18 @@ impl InviteDistributionBucketOperatorFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::invite_distribution_bucket_operator(
             self.origin.clone().into(),
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.operator_worker_id,
         );
 
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(new_bucket
                 .pending_invitations
@@ -1679,7 +1684,7 @@ impl InviteDistributionBucketOperatorFixture {
 
 pub struct CancelDistributionBucketInvitationFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     operator_worker_id: u64,
 }
@@ -1688,7 +1693,7 @@ impl CancelDistributionBucketInvitationFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             operator_worker_id: Default::default(),
         }
@@ -1698,8 +1703,11 @@ impl CancelDistributionBucketInvitationFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1716,19 +1724,18 @@ impl CancelDistributionBucketInvitationFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::cancel_distribution_bucket_operator_invite(
             self.origin.clone().into(),
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.operator_worker_id,
         );
 
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(!new_bucket
                 .pending_invitations
@@ -1739,7 +1746,7 @@ impl CancelDistributionBucketInvitationFixture {
 
 pub struct AcceptDistributionBucketInvitationFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     worker_id: u64,
 }
@@ -1748,7 +1755,7 @@ impl AcceptDistributionBucketInvitationFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             worker_id: Default::default(),
         }
@@ -1758,8 +1765,11 @@ impl AcceptDistributionBucketInvitationFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1774,18 +1784,17 @@ impl AcceptDistributionBucketInvitationFixture {
         let actual_result = Storage::accept_distribution_bucket_invitation(
             self.origin.clone().into(),
             self.worker_id,
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
         );
 
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(!new_bucket.pending_invitations.contains(&self.worker_id));
 
@@ -1796,7 +1805,7 @@ impl AcceptDistributionBucketInvitationFixture {
 
 pub struct SetDistributionBucketMetadataFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     worker_id: u64,
     metadata: Vec<u8>,
@@ -1806,7 +1815,7 @@ impl SetDistributionBucketMetadataFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             worker_id: Default::default(),
             metadata: Default::default(),
@@ -1821,8 +1830,11 @@ impl SetDistributionBucketMetadataFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1837,8 +1849,7 @@ impl SetDistributionBucketMetadataFixture {
         let actual_result = Storage::set_distribution_operator_metadata(
             self.origin.clone().into(),
             self.worker_id,
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.metadata.clone(),
         );
 
@@ -1848,7 +1859,7 @@ impl SetDistributionBucketMetadataFixture {
 
 pub struct RemoveDistributionBucketOperatorFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     operator_worker_id: u64,
 }
@@ -1857,7 +1868,7 @@ impl RemoveDistributionBucketOperatorFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             operator_worker_id: Default::default(),
         }
@@ -1867,8 +1878,11 @@ impl RemoveDistributionBucketOperatorFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1885,18 +1899,17 @@ impl RemoveDistributionBucketOperatorFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::remove_distribution_bucket_operator(
             self.origin.clone().into(),
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.operator_worker_id,
         );
 
         assert_eq!(actual_result, expected_result);
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(!new_bucket.operators.contains(&self.operator_worker_id));
         }

+ 2 - 4
runtime-modules/storage/src/tests/mocks.rs

@@ -50,8 +50,7 @@ impl balances::Trait for Test {
 }
 
 parameter_types! {
-    pub const MaxDistributionBucketFamilyNumber: u64 = 4;
-    pub const MaxDistributionBucketNumberPerFamily: u64 = 10;
+    pub const MaxDistributionBucketFamilyNumber: u64 = 6;
     pub const DataObjectDeletionPrize: u64 = 10;
     pub const StorageModuleId: ModuleId = ModuleId(*b"mstorage"); // module storage
     pub const BlacklistSizeLimit: u64 = 1;
@@ -80,7 +79,7 @@ impl crate::Trait for Test {
     type Event = TestEvent;
     type DataObjectId = u64;
     type StorageBucketId = u64;
-    type DistributionBucketId = u64;
+    type DistributionBucketIndex = u64;
     type DistributionBucketFamilyId = u64;
     type DistributionBucketOperatorId = u64;
     type ChannelId = u64;
@@ -96,7 +95,6 @@ impl crate::Trait for Test {
     type Randomness = CollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
     type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber;
-    type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily;
     type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint;
     type MaxNumberOfPendingInvitationsPerDistributionBucket =
         MaxNumberOfPendingInvitationsPerDistributionBucket;

+ 144 - 223
runtime-modules/storage/src/tests/mod.rs

@@ -15,20 +15,20 @@ use common::working_group::WorkingGroup;
 
 use crate::{
     BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily,
-    DynamicBagCreationPolicy, DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, Error,
-    ModuleAccount, RawEvent, StaticBagId, StorageBucketOperatorStatus, StorageTreasury,
-    UploadParameters, Voucher,
+    DistributionBucketId, DynamicBagCreationPolicy, DynamicBagDeletionPrize, DynamicBagId,
+    DynamicBagType, Error, ModuleAccount, RawEvent, StaticBagId, StorageBucketOperatorStatus,
+    StorageTreasury, UploadParameters, Voucher,
 };
 
 use mocks::{
     build_test_externalities, Balances, DataObjectDeletionPrize,
     DefaultChannelDynamicBagNumberOfStorageBuckets, DefaultMemberDynamicBagNumberOfStorageBuckets,
     InitialStorageBucketsNumberForDynamicBag, MaxDataObjectSize, MaxDistributionBucketFamilyNumber,
-    MaxDistributionBucketNumberPerFamily, MaxRandomIterationNumber, Storage, Test,
-    ANOTHER_DISTRIBUTION_PROVIDER_ID, ANOTHER_STORAGE_PROVIDER_ID,
-    DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID, DEFAULT_DISTRIBUTION_PROVIDER_ID,
-    DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID, DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID,
-    DEFAULT_STORAGE_PROVIDER_ID, DISTRIBUTION_WG_LEADER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
+    MaxRandomIterationNumber, Storage, Test, ANOTHER_DISTRIBUTION_PROVIDER_ID,
+    ANOTHER_STORAGE_PROVIDER_ID, DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID,
+    DEFAULT_DISTRIBUTION_PROVIDER_ID, DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID,
+    DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, DEFAULT_STORAGE_PROVIDER_ID,
+    DISTRIBUTION_WG_LEADER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
 };
 
 use fixtures::*;
@@ -3312,12 +3312,12 @@ fn test_storage_bucket_iterators() {
         let buckets_number = 5;
         create_storage_buckets(buckets_number);
 
-        use crate::storage_bucket_picker::{
-            RandomStorageBucketIdIterator as Rand, SequentialStorageBucketIdIterator as Seq,
+        use crate::random_buckets::storage_bucket_picker::{
+            RandomBucketIdIterator as Rand, SequentialBucketIdIterator as Seq,
         };
 
-        let ids = Rand::<Test>::new()
-            .chain(Seq::<Test>::new())
+        let ids = Rand::<Test, u64>::new(Storage::next_storage_bucket_id())
+            .chain(Seq::<Test, u64>::new(Storage::next_storage_bucket_id()))
             .collect::<Vec<_>>();
 
         // Check combined iterator length.
@@ -3521,15 +3521,19 @@ fn delete_distribution_bucket_family_fails_with_assgined_bags() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
+        let add_buckets = add_buckets_ids
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
@@ -3620,7 +3624,7 @@ fn create_distribution_bucket_succeeded() {
         let starting_block = 1;
         run_to_block(starting_block);
 
-        let accept_new_bags = false;
+        let accept_new_bags = true;
 
         let family_id = CreateDistributionBucketFamilyFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
@@ -3634,14 +3638,14 @@ fn create_distribution_bucket_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        assert!(Storage::distribution_bucket_family_by_id(family_id)
-            .distribution_buckets
-            .contains_key(&bucket_id));
+        assert!(
+            crate::DistributionBucketByFamilyIdById::<Test>::contains_key(&family_id, &bucket_id)
+        );
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketCreated(
             family_id,
             accept_new_bags,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_id),
         ));
     });
 }
@@ -3666,22 +3670,6 @@ fn create_distribution_bucket_fails_with_non_existing_family() {
     });
 }
 
-#[test]
-fn create_distribution_bucket_fails_with_exceeding_max_bucket_number() {
-    build_test_externalities().execute_with(|| {
-        let (family_id, _) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
-
-        CreateDistributionBucketFixture::default()
-            .with_family_id(family_id)
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::MaxDistributionBucketNumberPerFamilyLimitExceeded.into(),
-            ));
-    });
-}
-
 #[test]
 fn update_distribution_bucket_status_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3693,7 +3681,7 @@ fn update_distribution_bucket_status_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -3702,22 +3690,20 @@ fn update_distribution_bucket_status_succeeded() {
         let new_status = true;
         UpdateDistributionBucketStatusFixture::default()
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_new_status(new_status)
             .call_and_assert(Ok(()));
 
         assert_eq!(
-            Storage::distribution_bucket_family_by_id(family_id)
-                .distribution_buckets
-                .get(&bucket_id)
-                .unwrap()
+            Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_index)
                 .accepting_new_bags,
             new_status
         );
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketStatusUpdated(
-            family_id, bucket_id, new_status,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
+            new_status,
         ));
     });
 }
@@ -3746,17 +3732,6 @@ fn update_distribution_bucket_status_fails_with_invalid_distribution_bucket() {
     });
 }
 
-#[test]
-fn update_distribution_bucket_status_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        UpdateDistributionBucketStatusFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn delete_distribution_bucket_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3768,20 +3743,20 @@ fn delete_distribution_bucket_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         DeleteDistributionBucketFixture::default()
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketDeleted(
-            family_id, bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
         ));
     });
 }
@@ -3799,27 +3774,31 @@ fn delete_distribution_bucket_fails_with_assgined_bags() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_accept_new_bags(true)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_indices = BTreeSet::from_iter(vec![bucket_index]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_indices.clone())
             .call_and_assert(Ok(()));
 
+        let add_buckets = add_buckets_indices
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
         DeleteDistributionBucketFixture::default()
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::DistributionBucketIsBoundToBag.into()));
@@ -3834,7 +3813,7 @@ fn delete_distribution_bucket_failed_with_existing_operators() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -3842,7 +3821,7 @@ fn delete_distribution_bucket_failed_with_existing_operators() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(DEFAULT_DISTRIBUTION_PROVIDER_ID)
             .call_and_assert(Ok(()));
@@ -3850,12 +3829,12 @@ fn delete_distribution_bucket_failed_with_existing_operators() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(DEFAULT_DISTRIBUTION_PROVIDER_ID)
             .call_and_assert(Ok(()));
 
         DeleteDistributionBucketFixture::default()
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::DistributionProviderOperatorSet.into()));
@@ -3888,17 +3867,6 @@ fn delete_distribution_bucket_fails_with_non_existing_distribution_bucket() {
     });
 }
 
-#[test]
-fn delete_distribution_bucket_fails_with_non_existing_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        DeleteDistributionBucketFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn update_distribution_buckets_for_bags_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3922,22 +3890,26 @@ fn update_distribution_buckets_for_bags_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
+        let add_buckets = add_buckets_ids
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketsUpdatedForBag(
             bag_id,
             family_id,
-            add_buckets,
+            add_buckets_ids,
             BTreeSet::new(),
         ));
     });
@@ -3963,21 +3935,24 @@ fn update_distribution_buckets_for_bags_succeeded_with_additioonal_checks_on_add
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
         // Add check
+        let add_buckets = add_buckets_ids
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
-        let family = Storage::distribution_bucket_family_by_id(family_id);
-        let bucket = family.distribution_buckets.get(&bucket_id).unwrap();
+        let bucket = Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_id);
         assert_eq!(bucket.assigned_bags, 1);
 
         // ******
@@ -3986,14 +3961,13 @@ fn update_distribution_buckets_for_bags_succeeded_with_additioonal_checks_on_add
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_remove_bucket_ids(add_buckets.clone())
+            .with_remove_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by.len(), 0);
 
-        let family = Storage::distribution_bucket_family_by_id(family_id);
-        let bucket = family.distribution_buckets.get(&bucket_id).unwrap();
+        let bucket = Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_id);
         assert_eq!(bucket.assigned_bags, 0);
     });
 }
@@ -4021,7 +3995,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
             .with_bag_id(bag_id.clone())
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::DynamicBagDoesntExist.into()));
     });
 }
@@ -4052,7 +4026,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_accepting_new_bags_bucket
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
             .with_bag_id(bag_id.clone())
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets.clone())
             .call_and_assert(Err(
                 Error::<Test>::DistributionBucketDoesntAcceptNewBags.into()
             ));
@@ -4100,7 +4074,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_existing_distribution_buc
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(buckets.clone())
+            .with_add_bucket_indices(buckets.clone())
             .call_and_assert(Err(Error::<Test>::DistributionBucketDoesntExist.into()));
 
         // Invalid removed bucket ID.
@@ -4108,7 +4082,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_existing_distribution_buc
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_remove_bucket_ids(buckets.clone())
+            .with_remove_bucket_indices(buckets.clone())
             .call_and_assert(Err(Error::<Test>::DistributionBucketDoesntExist.into()));
     });
 }
@@ -4181,7 +4155,7 @@ fn update_distribution_bucket_mode_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4190,23 +4164,19 @@ fn update_distribution_bucket_mode_succeeded() {
         let distributing = false;
         UpdateDistributionBucketModeFixture::default()
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_distributing(distributing)
             .call_and_assert(Ok(()));
 
         assert_eq!(
-            Storage::distribution_bucket_family_by_id(family_id)
-                .distribution_buckets
-                .get(&bucket_id)
-                .unwrap()
+            Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_index)
                 .accepting_new_bags,
             distributing
         );
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketModeUpdated(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             distributing,
         ));
     });
@@ -4236,17 +4206,6 @@ fn update_distribution_bucket_mode_fails_with_invalid_distribution_bucket() {
     });
 }
 
-#[test]
-fn update_distribution_bucket_mode_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        UpdateDistributionBucketModeFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn update_families_in_dynamic_bag_creation_policy_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4306,7 +4265,9 @@ fn update_families_in_dynamic_bag_creation_policy_fails_with_invalid_family_id()
     });
 }
 
-fn create_distribution_bucket_family_with_buckets(bucket_number: u64) -> (u64, Vec<u64>) {
+fn create_distribution_bucket_family_with_buckets(
+    bucket_number: u64,
+) -> (u64, Vec<DistributionBucketId<Test>>) {
     let family_id = CreateDistributionBucketFamilyFixture::default()
         .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
         .call_and_assert(Ok(()))
@@ -4315,12 +4276,14 @@ fn create_distribution_bucket_family_with_buckets(bucket_number: u64) -> (u64, V
     let bucket_ids = repeat(family_id)
         .take(bucket_number as usize)
         .map(|fam_id| {
-            CreateDistributionBucketFixture::default()
+            let bucket_index = CreateDistributionBucketFixture::default()
                 .with_family_id(fam_id)
                 .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
                 .with_accept_new_bags(true)
                 .call_and_assert(Ok(()))
-                .unwrap()
+                .unwrap();
+
+            Storage::create_distribution_bucket_id(fam_id, bucket_index)
         })
         .collect::<Vec<_>>();
 
@@ -4335,24 +4298,40 @@ fn distribution_bucket_family_pick_during_dynamic_bag_creation_succeeded() {
         run_to_block(starting_block);
 
         let dynamic_bag_type = DynamicBagType::Channel;
+        let buckets_number = 10;
         let new_bucket_number = 5;
 
-        let (family_id1, bucket_ids1) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
-        let (family_id2, bucket_ids2) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
-        let (family_id3, _) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
+        let (family_id1, bucket_ids1) =
+            create_distribution_bucket_family_with_buckets(buckets_number);
+        let (family_id2, bucket_ids2) =
+            create_distribution_bucket_family_with_buckets(buckets_number);
+        let (family_id3, _) = create_distribution_bucket_family_with_buckets(buckets_number);
         let (family_id4, _) = create_distribution_bucket_family_with_buckets(0);
+        let (family_id5, bucket_id5) = create_distribution_bucket_family_with_buckets(1);
+        let (family_id6, bucket_id6) = create_distribution_bucket_family_with_buckets(1);
+
+        let deleted_bucket_id = bucket_id5[0].clone();
+        DeleteDistributionBucketFixture::default()
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .with_family_id(deleted_bucket_id.distribution_bucket_family_id)
+            .with_bucket_index(deleted_bucket_id.distribution_bucket_index)
+            .call_and_assert(Ok(()));
+
+        let disabled_bucket_id = bucket_id6[0].clone();
+        UpdateDistributionBucketStatusFixture::default()
+            .with_new_status(false)
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .with_family_id(disabled_bucket_id.distribution_bucket_family_id)
+            .with_bucket_index(disabled_bucket_id.distribution_bucket_index)
+            .call_and_assert(Ok(()));
 
         let families = BTreeMap::from_iter(vec![
             (family_id1, new_bucket_number),
             (family_id2, new_bucket_number),
             (family_id3, 0),
             (family_id4, new_bucket_number),
+            (family_id5, new_bucket_number),
+            (family_id6, new_bucket_number),
         ]);
 
         UpdateFamiliesInDynamicBagCreationPolicyFixture::default()
@@ -4364,6 +4343,8 @@ fn distribution_bucket_family_pick_during_dynamic_bag_creation_succeeded() {
         let picked_bucket_ids =
             Storage::pick_distribution_buckets_for_dynamic_bag(dynamic_bag_type);
 
+        println!("{:?}", picked_bucket_ids);
+
         assert_eq!(picked_bucket_ids.len(), (new_bucket_number * 2) as usize); // buckets from two families
 
         let total_ids1 = BTreeSet::from_iter(
@@ -4396,7 +4377,7 @@ fn invite_distribution_bucket_operator_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4404,14 +4385,13 @@ fn invite_distribution_bucket_operator_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketOperatorInvited(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             provider_id,
         ));
     });
@@ -4453,7 +4433,7 @@ fn invite_distribution_bucket_operator_fails_with_non_missing_invitation() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4461,14 +4441,14 @@ fn invite_distribution_bucket_operator_fails_with_non_missing_invitation() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Err(
@@ -4488,7 +4468,7 @@ fn invite_distribution_bucket_operator_fails_with_exceeding_the_limit_of_pending
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4496,14 +4476,14 @@ fn invite_distribution_bucket_operator_fails_with_exceeding_the_limit_of_pending
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(another_worker_id)
             .call_and_assert(Err(
@@ -4523,7 +4503,7 @@ fn invite_distribution_bucket_operator_fails_with_already_set_operator() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4531,21 +4511,21 @@ fn invite_distribution_bucket_operator_fails_with_already_set_operator() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Err(Error::<Test>::DistributionProviderOperatorSet.into()));
@@ -4562,7 +4542,7 @@ fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4570,7 +4550,7 @@ fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invalid_provider_id)
             .call_and_assert(Err(
@@ -4579,17 +4559,6 @@ fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_
     });
 }
 
-#[test]
-fn invite_distribution_bucket_operator_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        CancelDistributionBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn cancel_distribution_bucket_operator_invite_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4603,7 +4572,7 @@ fn cancel_distribution_bucket_operator_invite_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4611,7 +4580,7 @@ fn cancel_distribution_bucket_operator_invite_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
@@ -4619,13 +4588,12 @@ fn cancel_distribution_bucket_operator_invite_succeeded() {
         CancelDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketInvitationCancelled(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             provider_id,
         ));
     });
@@ -4665,7 +4633,7 @@ fn cancel_distribution_bucket_operator_invite_fails_with_non_invited_distributio
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4674,22 +4642,11 @@ fn cancel_distribution_bucket_operator_invite_fails_with_non_invited_distributio
         CancelDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .call_and_assert(Err(Error::<Test>::NoDistributionBucketInvitation.into()));
     });
 }
 
-#[test]
-fn cancel_distribution_bucket_operator_invite_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        CancelDistributionBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn accept_distribution_bucket_operator_invite_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4703,7 +4660,7 @@ fn accept_distribution_bucket_operator_invite_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4711,7 +4668,7 @@ fn accept_distribution_bucket_operator_invite_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
@@ -4719,14 +4676,13 @@ fn accept_distribution_bucket_operator_invite_succeeded() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketInvitationAccepted(
             provider_id,
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
         ));
     });
 }
@@ -4765,7 +4721,7 @@ fn accept_distribution_bucket_operator_invite_fails_with_non_invited_distributio
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4774,22 +4730,11 @@ fn accept_distribution_bucket_operator_invite_fails_with_non_invited_distributio
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .call_and_assert(Err(Error::<Test>::NoDistributionBucketInvitation.into()));
     });
 }
 
-#[test]
-fn accept_distribution_bucket_operator_invite_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        AcceptDistributionBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn set_distribution_operator_metadata_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4804,7 +4749,7 @@ fn set_distribution_operator_metadata_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4812,7 +4757,7 @@ fn set_distribution_operator_metadata_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
@@ -4820,22 +4765,21 @@ fn set_distribution_operator_metadata_succeeded() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         SetDistributionBucketMetadataFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(provider_id)
             .with_metadata(metadata.clone())
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketMetadataSet(
             provider_id,
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             metadata,
         ));
     });
@@ -4875,7 +4819,7 @@ fn set_distribution_operator_metadata_fails_with_non_distribution_provider() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4884,24 +4828,13 @@ fn set_distribution_operator_metadata_fails_with_non_distribution_provider() {
         SetDistributionBucketMetadataFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .call_and_assert(Err(
                 Error::<Test>::MustBeDistributionProviderOperatorForBucket.into(),
             ));
     });
 }
 
-#[test]
-fn set_distribution_operator_metadata_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        SetDistributionBucketMetadataFixture::default()
-            .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn remove_distribution_bucket_operator_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4915,7 +4848,7 @@ fn remove_distribution_bucket_operator_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4923,7 +4856,7 @@ fn remove_distribution_bucket_operator_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Ok(()));
@@ -4931,20 +4864,19 @@ fn remove_distribution_bucket_operator_succeeded() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(operator_id)
             .call_and_assert(Ok(()));
 
         RemoveDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketOperatorRemoved(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             operator_id,
         ));
     });
@@ -4959,17 +4891,6 @@ fn remove_distribution_bucket_operator_fails_with_non_leader_origin() {
     });
 }
 
-#[test]
-fn remove_distribution_bucket_operator_fails_with_non_existing_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        RemoveDistributionBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn remove_distribution_bucket_operator_fails_with_non_existing_distribution_bucket() {
     build_test_externalities().execute_with(|| {
@@ -4995,7 +4916,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -5004,7 +4925,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
         RemoveDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Err(
                 Error::<Test>::MustBeDistributionProviderOperatorForBucket.into(),
@@ -5012,7 +4933,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Ok(()));
@@ -5020,7 +4941,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
         RemoveDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Err(
                 Error::<Test>::MustBeDistributionProviderOperatorForBucket.into(),

+ 3 - 0
runtime/src/integration/content_directory.rs

@@ -36,4 +36,7 @@ impl content::ContentActorAuthenticator for Runtime {
     fn is_valid_curator_id(curator_id: &Self::CuratorId) -> bool {
         ContentWorkingGroup::ensure_worker_exists(curator_id).is_ok()
     }
+    fn validate_member_id(member_id: &Self::MemberId) -> bool {
+        membership::Module::<Runtime>::ensure_membership(*member_id).is_ok()
+    }
 }

+ 1 - 3
runtime/src/lib.rs

@@ -670,7 +670,6 @@ parameter_types! {
 }
 
 parameter_types! {
-    pub const MaxDistributionBucketNumberPerFamily: u64 = 500;
     pub const MaxDistributionBucketFamilyNumber: u64 = 200;
     pub const DataObjectDeletionPrize: Balance = 1; //TODO: Change during Olympia release
     pub const BlacklistSizeLimit: u64 = 10000; //TODO: adjust value
@@ -690,7 +689,7 @@ impl storage::Trait for Runtime {
     type Event = Event;
     type DataObjectId = DataObjectId;
     type StorageBucketId = StorageBucketId;
-    type DistributionBucketId = DistributionBucketId;
+    type DistributionBucketIndex = DistributionBucketIndex;
     type DistributionBucketFamilyId = DistributionBucketFamilyId;
     type ChannelId = ChannelId;
     type DataObjectDeletionPrize = DataObjectDeletionPrize;
@@ -705,7 +704,6 @@ impl storage::Trait for Runtime {
     type Randomness = RandomnessCollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
     type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber;
-    type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily;
     type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint;
     type DistributionBucketOperatorId = DistributionBucketOperatorId;
     type MaxNumberOfPendingInvitationsPerDistributionBucket =

+ 3 - 2
runtime/src/primitives.rs

@@ -95,8 +95,9 @@ pub type DataObjectId = u64;
 /// Represent a storage bucket from the storage pallet.
 pub type StorageBucketId = u64;
 
-/// Represent a distribution bucket from the storage pallet.
-pub type DistributionBucketId = u64;
+/// Represent a distribution bucket index within the distribution bucket family from the
+/// storage pallet.
+pub type DistributionBucketIndex = u64;
 
 /// Represent a distribution bucket family from the storage pallet.
 pub type DistributionBucketFamilyId = u64;

+ 2 - 2
scripts/cargo-build.sh

@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
-cargo +nightly-2021-03-24 build --release
+cargo +nightly-2021-02-20 build --release

+ 2 - 2
scripts/cargo-tests-with-networking.sh

@@ -1,7 +1,7 @@
 #!/bin/sh
 set -e
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 echo 'running all cargo tests'
-cargo +nightly-2021-03-24 test --release --all -- --ignored
+cargo +nightly-2021-02-20 test --release --all -- --ignored

+ 1 - 1
scripts/raspberry-cross-build.sh

@@ -9,7 +9,7 @@
 export WORKSPACE_ROOT=`cargo metadata --offline --no-deps --format-version 1 | jq .workspace_root -r`
 
 docker run \
-    -e WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 \
+    -e WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 \
     --volume ${WORKSPACE_ROOT}/:/home/cross/project \
     --volume ${HOME}/.cargo/registry:/home/cross/.cargo/registry \
     joystream/rust-raspberry \

+ 4 - 4
scripts/run-dev-chain.sh

@@ -1,13 +1,13 @@
 #!/usr/bin/env bash
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 # Build release binary
-cargo +nightly-2021-03-24 build --release
+cargo +nightly-2021-02-20 build --release
 
 # Purge existing local chain
-yes | cargo +nightly-2021-03-24 run --release -- purge-chain --dev
+yes | cargo +nightly-2021-02-20 run --release -- purge-chain --dev
 
 # Run local development chain -
 # No need to specify `-p joystream-node` it is the default bin crate in the cargo workspace
-cargo +nightly-2021-03-24 run --release -- --dev
+cargo +nightly-2021-02-20 run --release -- --dev

+ 2 - 2
setup.sh

@@ -27,8 +27,8 @@ curl https://getsubstrate.io -sSf | bash -s -- --fast
 
 source ~/.cargo/env
 
-rustup install nightly-2021-03-24
-rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
+rustup install nightly-2021-02-20
+rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
 
 rustup component add rustfmt clippy
 

+ 15 - 11
types/augment/all/defs.json

@@ -93,8 +93,6 @@
     },
     "WorkingGroup": {
         "_enum": [
-            "_Reserved0",
-            "_Reserved1",
             "Storage",
             "Content",
             "OperationsAlpha",
@@ -542,8 +540,7 @@
     "StorageBucket": {
         "operator_status": "StorageBucketOperatorStatus",
         "accepting_new_bags": "bool",
-        "voucher": "Voucher",
-        "metadata": "Bytes"
+        "voucher": "Voucher"
     },
     "StaticBagId": {
         "_enum": {
@@ -602,7 +599,11 @@
         "size": "u64",
         "ipfsContentId": "Bytes"
     },
-    "DistributionBucketId": "u64",
+    "DistributionBucketId": {
+        "distribution_bucket_family_id": "DistributionBucketFamilyId",
+        "distribution_bucket_index": "DistributionBucketIndex"
+    },
+    "DistributionBucketIndex": "u64",
     "DistributionBucketFamilyId": "u64",
     "DistributionBucket": {
         "accepting_new_bags": "bool",
@@ -612,10 +613,10 @@
         "assigned_bags": "u64"
     },
     "DistributionBucketFamily": {
-        "distribution_buckets": "BTreeMap<DistributionBucketId,DistributionBucket>"
+        "next_distribution_bucket_index": "DistributionBucketIndex"
     },
     "DataObjectIdMap": "BTreeMap<DataObjectId,DataObject>",
-    "DistributionBucketIdSet": "BTreeSet<DistributionBucketId>",
+    "DistributionBucketIndexSet": "BTreeSet<DistributionBucketIndex>",
     "DynamicBagCreationPolicyDistributorFamiliesMap": "BTreeMap<DistributionBucketFamilyId,u32>",
     "ProposalId": "u32",
     "ProposalStatus": {
@@ -774,7 +775,8 @@
         "_enum": {
             "Curator": "(CuratorGroupId,CuratorId)",
             "Member": "MemberId",
-            "Lead": "Null"
+            "Lead": "Null",
+            "Collaborator": "MemberId"
         }
     },
     "StorageAssets": {
@@ -786,7 +788,7 @@
         "num_videos": "u64",
         "is_censored": "bool",
         "reward_account": "Option<GenericAccountId>",
-        "deletion_prize_source_account_id": "GenericAccountId"
+        "collaborators": "BTreeSet<MemberId>"
     },
     "ChannelOwner": {
         "_enum": {
@@ -805,13 +807,15 @@
     "ChannelCreationParameters": {
         "assets": "Option<StorageAssets>",
         "meta": "Option<Bytes>",
-        "reward_account": "Option<GenericAccountId>"
+        "reward_account": "Option<GenericAccountId>",
+        "collaborators": "BTreeSet<MemberId>"
     },
     "ChannelUpdateParameters": {
         "assets_to_upload": "Option<StorageAssets>",
         "new_meta": "Option<Bytes>",
         "reward_account": "Option<Option<GenericAccountId>>",
-        "assets_to_remove": "BTreeSet<DataObjectId>"
+        "assets_to_remove": "BTreeSet<DataObjectId>",
+        "collaborators": "Option<BTreeSet<MemberId>>"
     },
     "ChannelOwnershipTransferRequestId": "u64",
     "ChannelOwnershipTransferRequest": {

+ 15 - 8
types/augment/all/types.ts

@@ -210,7 +210,7 @@ export interface Channel extends Struct {
   readonly num_videos: u64;
   readonly is_censored: bool;
   readonly reward_account: Option<GenericAccountId>;
-  readonly deletion_prize_source_account_id: GenericAccountId;
+  readonly collaborators: BTreeSet<MemberId>;
 }
 
 /** @name ChannelCategory */
@@ -237,6 +237,7 @@ export interface ChannelCreationParameters extends Struct {
   readonly assets: Option<StorageAssets>;
   readonly meta: Option<Bytes>;
   readonly reward_account: Option<GenericAccountId>;
+  readonly collaborators: BTreeSet<MemberId>;
 }
 
 /** @name ChannelCurationStatus */
@@ -273,6 +274,7 @@ export interface ChannelUpdateParameters extends Struct {
   readonly new_meta: Option<Bytes>;
   readonly reward_account: Option<Option<GenericAccountId>>;
   readonly assets_to_remove: BTreeSet<DataObjectId>;
+  readonly collaborators: Option<BTreeSet<MemberId>>;
 }
 
 /** @name ChildPositionInParentCategory */
@@ -309,6 +311,8 @@ export interface ContentActor extends Enum {
   readonly isMember: boolean;
   readonly asMember: MemberId;
   readonly isLead: boolean;
+  readonly isCollaborator: boolean;
+  readonly asCollaborator: MemberId;
 }
 
 /** @name ContentId */
@@ -434,17 +438,23 @@ export interface DistributionBucket extends Struct {
 
 /** @name DistributionBucketFamily */
 export interface DistributionBucketFamily extends Struct {
-  readonly distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucket>;
+  readonly next_distribution_bucket_index: DistributionBucketIndex;
 }
 
 /** @name DistributionBucketFamilyId */
 export interface DistributionBucketFamilyId extends u64 {}
 
 /** @name DistributionBucketId */
-export interface DistributionBucketId extends u64 {}
+export interface DistributionBucketId extends Struct {
+  readonly distribution_bucket_family_id: DistributionBucketFamilyId;
+  readonly distribution_bucket_index: DistributionBucketIndex;
+}
+
+/** @name DistributionBucketIndex */
+export interface DistributionBucketIndex extends u64 {}
 
-/** @name DistributionBucketIdSet */
-export interface DistributionBucketIdSet extends BTreeSet<DistributionBucketId> {}
+/** @name DistributionBucketIndexSet */
+export interface DistributionBucketIndexSet extends BTreeSet<DistributionBucketIndex> {}
 
 /** @name Dynamic */
 export interface Dynamic extends Enum {
@@ -1229,7 +1239,6 @@ export interface StorageBucket extends Struct {
   readonly operator_status: StorageBucketOperatorStatus;
   readonly accepting_new_bags: bool;
   readonly voucher: Voucher;
-  readonly metadata: Bytes;
 }
 
 /** @name StorageBucketId */
@@ -1433,8 +1442,6 @@ export interface WorkerOf extends Struct {
 
 /** @name WorkingGroup */
 export interface WorkingGroup extends Enum {
-  readonly isReserved0: boolean;
-  readonly isReserved1: boolean;
   readonly isStorage: boolean;
   readonly isContent: boolean;
   readonly isOperationsAlpha: boolean;

+ 1 - 8
types/src/common.ts

@@ -70,10 +70,7 @@ export class InputValidationLengthConstraint
   }
 }
 
-// Reserved keys are not part of the exported definition const, since they are not intented to be used
 export const WorkingGroupDef = {
-  // _Reserved0
-  // _Reserved1
   Storage: Null,
   Content: Null,
   OperationsAlpha: Null,
@@ -83,11 +80,7 @@ export const WorkingGroupDef = {
   OperationsGamma: Null,
 } as const
 export type WorkingGroupKey = keyof typeof WorkingGroupDef
-export class WorkingGroup extends JoyEnum({
-  _Reserved0: Null,
-  _Reserved1: Null,
-  ...WorkingGroupDef,
-}) {}
+export class WorkingGroup extends JoyEnum(WorkingGroupDef) {}
 
 // Temporarly in "common", because used both by /working-group and /content-working-group:
 export type ISlashableTerms = {

+ 5 - 1
types/src/content/index.ts

@@ -2,6 +2,7 @@ import { Vec, Option, Tuple, BTreeSet } from '@polkadot/types'
 import { bool, u64, u32, u128, Null, Bytes } from '@polkadot/types/primitive'
 import { MemberId } from '../members'
 import { JoyStructDecorated, JoyEnum, ChannelId } from '../common'
+
 import { GenericAccountId as AccountId } from '@polkadot/types/generic/AccountId'
 import { DataObjectId, DataObjectCreationParameters } from '../storage'
 
@@ -31,6 +32,7 @@ export class ContentActor extends JoyEnum({
   Curator: Tuple.with([CuratorGroupId, CuratorId]),
   Member: MemberId,
   Lead: Null,
+  Collaborator: MemberId,
 }) {}
 
 export class ChannelOwner extends JoyEnum({
@@ -43,13 +45,14 @@ export class Channel extends JoyStructDecorated({
   num_videos: u64,
   is_censored: bool,
   reward_account: Option.with(AccountId),
-  deletion_prize_source_account_id: AccountId,
+  collaborators: BTreeSet.with(MemberId),
 }) {}
 
 export class ChannelCreationParameters extends JoyStructDecorated({
   assets: Option.with(StorageAssets),
   meta: Option.with(Bytes),
   reward_account: Option.with(AccountId),
+  collaborators: BTreeSet.with(MemberId),
 }) {}
 
 export class ChannelUpdateParameters extends JoyStructDecorated({
@@ -57,6 +60,7 @@ export class ChannelUpdateParameters extends JoyStructDecorated({
   new_meta: Option.with(Bytes),
   reward_account: Option.with(Option.with(AccountId)),
   assets_to_remove: BTreeSet.with(DataObjectId),
+  collaborators: Option.with(BTreeSet.with(MemberId)),
 }) {}
 
 export class ChannelOwnershipTransferRequest extends JoyStructDecorated({

+ 18 - 7
types/src/storage.ts

@@ -50,10 +50,22 @@ export class DataObject
 
 export class DataObjectIdSet extends BTreeSet.with(DataObjectId) {}
 export class DataObjectIdMap extends BTreeMap.with(DataObjectId, DataObject) {}
-export class DistributionBucketId extends u64 {}
+export class DistributionBucketIndex extends u64 {}
 export class DistributionBucketFamilyId extends u64 {}
 export class StorageBucketIdSet extends BTreeSet.with(StorageBucketId) {}
-export class DistributionBucketIdSet extends BTreeSet.with(DistributionBucketId) {}
+export class DistributionBucketIndexSet extends BTreeSet.with(DistributionBucketIndex) {}
+
+export type IDistributionBucketId = {
+  distribution_bucket_family_id: DistributionBucketFamilyId
+  distribution_bucket_index: DistributionBucketIndex
+}
+
+export class DistributionBucketId
+  extends JoyStructDecorated({
+    distribution_bucket_family_id: DistributionBucketFamilyId,
+    distribution_bucket_index: DistributionBucketIndex,
+  })
+  implements IDistributionBucketId {}
 
 export type IDynamicBagDeletionPrize = {
   account_id: AccountId
@@ -159,7 +171,6 @@ export type IStorageBucket = {
   operator_status: StorageBucketOperatorStatus
   accepting_new_bags: bool
   voucher: Voucher
-  metadata: Bytes
 }
 
 export class StorageBucket
@@ -167,7 +178,6 @@ export class StorageBucket
     operator_status: StorageBucketOperatorStatus,
     accepting_new_bags: bool,
     voucher: Voucher,
-    metadata: Bytes,
   })
   implements IStorageBucket {}
 
@@ -221,12 +231,12 @@ export class DistributionBucket
   implements IDistributionBucket {}
 
 export type IDistributionBucketFamily = {
-  distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucket>
+  next_distribution_bucket_index: DistributionBucketIndex
 }
 
 export class DistributionBucketFamily
   extends JoyStructDecorated({
-    distribution_buckets: BTreeMap.with(DistributionBucketId, DistributionBucket),
+    next_distribution_bucket_index: DistributionBucketIndex,
   })
   implements IDistributionBucketFamily {}
 
@@ -258,12 +268,13 @@ export const storageTypes: RegistryTypes = {
   StorageBucketOperatorStatus,
   DataObject,
   DistributionBucketId,
+  DistributionBucketIndex,
   DistributionBucketFamilyId,
   DistributionBucket,
   DistributionBucketFamily,
   // Utility types:
   DataObjectIdMap,
-  DistributionBucketIdSet,
+  DistributionBucketIndexSet,
   DynamicBagCreationPolicyDistributorFamiliesMap,
 }
 export default storageTypes

File diff suppressed because it is too large
+ 474 - 51
yarn.lock


Some files were not shown because too many files changed in this diff