Просмотр исходного кода

Merge branch 'giza' into giza-cli

Leszek Wiesner 3 лет назад
Родитель
Сommit
6c05049c25

+ 1 - 1
README.md

@@ -89,7 +89,7 @@ You can also run your our own joystream-node:
 
 ```sh
 git checkout master
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo build --release
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 build --release
 ./target/release/joystream-node -- --pruning archive --chain testnets/joy-testnet-5.json
 ```
 

Разница между файлами не показана из-за своего большого размера
+ 0 - 0
chain-metadata.json


+ 3 - 3
devops/git-hooks/pre-push

@@ -1,13 +1,13 @@
 #!/bin/sh
 set -e
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 echo 'running clippy (rust linter)'
 # When custom build.rs triggers wasm-build-runner-impl to build we get error:
 # "Rust WASM toolchain not installed, please install it!"
 # So we skip building the WASM binary by setting BUILD_DUMMY_WASM_BINARY=1
-BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings
+BUILD_DUMMY_WASM_BINARY=1 cargo +nightly-2021-02-20 clippy --release --all -- -D warnings
 
 echo 'running cargo unit tests'
-cargo test --release --all
+cargo +nightly-2021-02-20 test --release --all

+ 5 - 5
joystream-node-armv7.Dockerfile

@@ -1,9 +1,9 @@
 FROM rust:1.52.1-buster AS rust
 RUN rustup self update
-RUN rustup install nightly-2021-03-24 --force
-RUN rustup default nightly-2021-03-24
-RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-RUN rustup component add --toolchain nightly-2021-03-24 clippy
+RUN rustup install nightly-2021-02-20 --force
+RUN rustup default nightly-2021-02-20
+RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
+RUN rustup component add --toolchain nightly-2021-02-20 clippy
 RUN apt-get update && \
   apt-get install -y curl git gcc xz-utils sudo pkg-config unzip clang llvm libc6-dev
 
@@ -14,7 +14,7 @@ COPY . /joystream
 
 # Build all cargo crates
 # Ensure our tests and linter pass before actual build
-ENV WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+ENV WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 RUN apt-get install -y libprotobuf-dev protobuf-compiler
 RUN BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings && \
     cargo test --release --all && \

+ 5 - 5
joystream-node.Dockerfile

@@ -1,9 +1,9 @@
 FROM rust:1.52.1-buster AS rust
 RUN rustup self update
-RUN rustup install nightly-2021-03-24 --force
-RUN rustup default nightly-2021-03-24
-RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-RUN rustup component add --toolchain nightly-2021-03-24 clippy
+RUN rustup install nightly-2021-02-20 --force
+RUN rustup default nightly-2021-02-20
+RUN rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
+RUN rustup component add --toolchain nightly-2021-02-20 clippy
 RUN apt-get update && \
   apt-get install -y curl git gcc xz-utils sudo pkg-config unzip clang llvm libc6-dev
 
@@ -14,7 +14,7 @@ COPY . /joystream
 
 # Build all cargo crates
 # Ensure our tests and linter pass before actual build
-ENV WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+ENV WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 RUN BUILD_DUMMY_WASM_BINARY=1 cargo clippy --release --all -- -D warnings && \
     cargo test --release --all && \
     cargo build --release

+ 3 - 3
node/README.md

@@ -26,7 +26,7 @@ cd joystream/
 Compile the node and runtime:
 
 ```bash
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo build --release
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 build --release
 ```
 
 This produces the binary in `./target/release/joystream-node`
@@ -57,7 +57,7 @@ Use the `--chain` argument, and specify the path to the genesis `chain.json` fil
 Running unit tests:
 
 ```bash
-cargo test --release --all
+cargo +nightly-2021-02-20 test --release --all
 ```
 
 Running full suite of checks, tests, formatting and linting:
@@ -79,7 +79,7 @@ If you are building a tagged release from `master` branch and want to install th
 This will install the executable `joystream-node` to your `~/.cargo/bin` folder, which you would normally have in your `$PATH` environment.
 
 ```bash
-WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 cargo install joystream-node --path node/ --locked
+WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 cargo +nightly-2021-02-20 install joystream-node --path node/ --locked
 ```
 
 Now you can run and connect to the testnet:

+ 10 - 6
query-node/mappings/content/channel.ts

@@ -4,7 +4,7 @@ eslint-disable @typescript-eslint/naming-convention
 import { EventContext, StoreContext } from '@joystream/hydra-common'
 import { Content } from '../generated/types'
 import { convertContentActorToChannelOwner, processChannelMetadata } from './utils'
-import { Channel, ChannelCategory, StorageDataObject } from 'query-node/dist/model'
+import { Channel, ChannelCategory, StorageDataObject, Membership } from 'query-node/dist/model'
 import { deserializeMetadata, inconsistentState, logger } from '../common'
 import { ChannelCategoryMetadata, ChannelMetadata } from '@joystream/metadata-protobuf'
 import { integrateMeta } from '@joystream/metadata-protobuf/utils'
@@ -14,9 +14,7 @@ import { removeDataObject } from '../storage/utils'
 export async function content_ChannelCreated(ctx: EventContext & StoreContext): Promise<void> {
   const { store, event } = ctx
   // read event data
-  const [contentActor, channelId, runtimeChannel, channelCreationParameters] = new Content.ChannelCreatedEvent(
-    event
-  ).params
+  const [contentActor, channelId, , channelCreationParameters] = new Content.ChannelCreatedEvent(event).params
 
   // create entity
   const channel = new Channel({
@@ -26,12 +24,14 @@ export async function content_ChannelCreated(ctx: EventContext & StoreContext):
     videos: [],
     createdInBlock: event.blockNumber,
     rewardAccount: channelCreationParameters.reward_account.unwrapOr(undefined)?.toString(),
-    deletionPrizeDestAccount: runtimeChannel.deletion_prize_source_account_id.toString(),
     // fill in auto-generated fields
     createdAt: new Date(event.blockTimestamp),
     updatedAt: new Date(event.blockTimestamp),
     // prepare channel owner (handles fields `ownerMember` and `ownerCuratorGroup`)
     ...(await convertContentActorToChannelOwner(store, contentActor)),
+    collaborators: Array.from(channelCreationParameters.collaborators).map(
+      (id) => new Membership({ id: id.toString() })
+    ),
   })
 
   // deserialize & process metadata
@@ -76,13 +76,17 @@ export async function content_ChannelUpdated(ctx: EventContext & StoreContext):
 
   // prepare changed reward account
   const newRewardAccount = channelUpdateParameters.reward_account.unwrapOr(null)
-
   // reward account change happened?
   if (newRewardAccount) {
     // this will change the `channel`!
     channel.rewardAccount = newRewardAccount.unwrapOr(undefined)?.toString()
   }
 
+  const newCollaborators = channelUpdateParameters.collaborators.unwrapOr(undefined)
+  if (newCollaborators) {
+    channel.collaborators = Array.from(newCollaborators).map((id) => new Membership({ id: id.toString() }))
+  }
+
   // set last update time
   channel.updatedAt = new Date(event.blockTimestamp)
 

+ 5 - 3
query-node/schemas/content.graphql

@@ -35,9 +35,6 @@ type Channel @entity {
   "Reward account where revenue is sent if set."
   rewardAccount: String
 
-  "Destination account for the prize associated with channel deletion"
-  deletionPrizeDestAccount: String!
-
   "The title of the Channel"
   title: String @fulltext(query: "search")
 
@@ -61,9 +58,14 @@ type Channel @entity {
   "The primary langauge of the channel's content"
   language: Language
 
+  "List of videos that belong to the channel"
   videos: [Video!]! @derivedFrom(field: "channel")
 
+  "Number of the block the channel was created in"
   createdInBlock: Int!
+
+  "List of channel collaborators (members)"
+  collaborators: [Membership!]
 }
 
 type CuratorGroup @entity {

+ 4 - 0
query-node/schemas/membership.graphql

@@ -33,5 +33,9 @@ type Membership @entity {
   "The type of subscription the member has purchased if any."
   subscription: Int
 
+  "List of channels the member owns"
   channels: [Channel!]! @derivedFrom(field: "ownerMember")
+
+  "List of channels the member has collaborator access to"
+  collaboratorInChannels: [Channel!] @derivedFrom(field: "collaborators")
 }

+ 1 - 3
runtime-modules/content/src/tests/mock.rs

@@ -200,7 +200,6 @@ impl ContentActorAuthenticator for Test {
 parameter_types! {
     pub const MaxNumberOfDataObjectsPerBag: u64 = 4;
     pub const MaxDistributionBucketFamilyNumber: u64 = 4;
-    pub const MaxDistributionBucketNumberPerFamily: u64 = 10;
     pub const DataObjectDeletionPrize: u64 = 10;
     pub const StorageModuleId: ModuleId = ModuleId(*b"mstorage"); // module storage
     pub const BlacklistSizeLimit: u64 = 1;
@@ -229,7 +228,7 @@ impl storage::Trait for Test {
     type Event = MetaEvent;
     type DataObjectId = u64;
     type StorageBucketId = u64;
-    type DistributionBucketId = u64;
+    type DistributionBucketIndex = u64;
     type DistributionBucketFamilyId = u64;
     type DistributionBucketOperatorId = u64;
     type ChannelId = u64;
@@ -245,7 +244,6 @@ impl storage::Trait for Test {
     type Randomness = CollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
     type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber;
-    type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily;
     type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint;
     type MaxNumberOfPendingInvitationsPerDistributionBucket =
         MaxNumberOfPendingInvitationsPerDistributionBucket;

+ 0 - 94
runtime-modules/storage/src/distribution_bucket_picker.rs

@@ -1,94 +0,0 @@
-#![warn(missing_docs)]
-
-use frame_support::traits::Randomness;
-use sp_arithmetic::traits::Zero;
-use sp_runtime::SaturatedConversion;
-use sp_std::cell::RefCell;
-use sp_std::collections::btree_set::BTreeSet;
-use sp_std::marker::PhantomData;
-use sp_std::rc::Rc;
-use sp_std::vec::Vec;
-
-use crate::{DynamicBagType, Module, Trait};
-
-// Generates distribution bucket IDs to assign to a new dynamic bag.
-pub(crate) struct DistributionBucketPicker<T> {
-    trait_marker: PhantomData<T>,
-}
-
-impl<T: Trait> DistributionBucketPicker<T> {
-    // Get random distribution buckets from distribution bucket families using the dynamic bag
-    // creation policy.
-    pub(crate) fn pick_distribution_buckets(
-        bag_type: DynamicBagType,
-    ) -> BTreeSet<T::DistributionBucketId> {
-        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
-
-        if creation_policy.no_distribution_buckets_required() {
-            return BTreeSet::new();
-        }
-
-        // Randomness for all bucket family.
-        // let random_seed = RefCell::new(Module::<T>::get_initial_random_seed());
-        let random_seed = Rc::new(RefCell::new(Module::<T>::get_initial_random_seed()));
-
-        creation_policy
-            .families
-            .iter()
-            .filter_map(|(family_id, bucket_num)| {
-                Module::<T>::ensure_distribution_bucket_family_exists(family_id)
-                    .ok()
-                    .map(|fam| (fam, bucket_num))
-            })
-            .map(|(family, bucket_num)| {
-                let filtered_ids = family
-                    .distribution_buckets
-                    .iter()
-                    .filter_map(|(id, bucket)| bucket.accepting_new_bags.then(|| *id))
-                    .collect::<Vec<_>>();
-
-                (filtered_ids, bucket_num)
-            })
-            .map(|(bucket_ids, bucket_num)| {
-                Self::get_random_distribution_buckets(bucket_ids, *bucket_num, random_seed.clone())
-            })
-            .flatten()
-            .collect::<BTreeSet<_>>()
-    }
-
-    // Get random bucket IDs from the ID collection.
-    pub fn get_random_distribution_buckets(
-        ids: Vec<T::DistributionBucketId>,
-        bucket_number: u32,
-        seed: Rc<RefCell<T::Hash>>, //     seed: RefCell<T::Hash>
-    ) -> BTreeSet<T::DistributionBucketId> {
-        let mut working_ids = ids;
-        let mut result_ids = BTreeSet::default();
-
-        for _ in 0..bucket_number {
-            if working_ids.is_empty() {
-                break;
-            }
-
-            let current_seed = Self::advance_random_seed(seed.clone());
-
-            let upper_bound = working_ids.len() as u64 - 1;
-            let index =
-                Module::<T>::random_index(current_seed.as_ref(), upper_bound).saturated_into();
-            result_ids.insert(working_ids.remove(index));
-        }
-
-        result_ids
-    }
-
-    // Changes the internal seed value of the container and returns new random seed.
-    fn advance_random_seed(seed: Rc<RefCell<T::Hash>>) -> T::Hash {
-        // Cannot create randomness in the initial block (Substrate error).
-        if <frame_system::Module<T>>::block_number() == Zero::zero() {
-            return Module::<T>::get_initial_random_seed();
-        }
-
-        let current_seed = *seed.borrow();
-        seed.replace(T::Randomness::random(current_seed.as_ref()))
-    }
-}

+ 216 - 219
runtime-modules/storage/src/lib.rs

@@ -105,7 +105,6 @@
 //! - DefaultMemberDynamicBagNumberOfStorageBuckets
 //! - DefaultChannelDynamicBagNumberOfStorageBuckets
 //! - MaxDistributionBucketFamilyNumber
-//! - MaxDistributionBucketNumberPerFamily
 //! - DistributionBucketsPerBagValueConstraint
 //! - MaxNumberOfPendingInvitationsPerDistributionBucket
 
@@ -126,13 +125,15 @@ mod tests;
 #[cfg(feature = "runtime-benchmarks")]
 mod benchmarking;
 
-pub(crate) mod distribution_bucket_picker;
-pub(crate) mod storage_bucket_picker;
+//pub(crate) mod distribution_bucket_picker;
+pub(crate) mod random_buckets;
 
 use codec::{Codec, Decode, Encode};
 use frame_support::dispatch::{DispatchError, DispatchResult};
 use frame_support::traits::{Currency, ExistenceRequirement, Get, Randomness};
-use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter};
+use frame_support::{
+    decl_error, decl_event, decl_module, decl_storage, ensure, IterableStorageDoubleMap, Parameter,
+};
 use frame_system::ensure_root;
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
@@ -149,8 +150,8 @@ use common::constraints::BoundedValueConstraint;
 use common::origin::ActorOriginValidator;
 use common::working_group::WorkingGroup;
 
-use distribution_bucket_picker::DistributionBucketPicker;
-use storage_bucket_picker::StorageBucketPicker;
+use random_buckets::DistributionBucketPicker;
+use random_buckets::StorageBucketPicker;
 
 /// Public interface for the storage module.
 pub trait DataObjectStorage<T: Trait> {
@@ -245,17 +246,21 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
         + Default
         + Copy
         + MaybeSerialize
-        + PartialEq;
+        + PartialEq
+        + Into<u64>
+        + From<u64>;
 
-    /// Distribution bucket ID type.
-    type DistributionBucketId: Parameter
+    /// Distribution bucket index within a distribution bucket family type.
+    type DistributionBucketIndex: Parameter
         + Member
         + BaseArithmetic
         + Codec
         + Default
         + Copy
         + MaybeSerialize
-        + PartialEq;
+        + PartialEq
+        + Into<u64>
+        + From<u64>;
 
     /// Distribution bucket family ID type.
     type DistributionBucketFamilyId: Parameter
@@ -321,9 +326,6 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
     /// Defines max allowed distribution bucket family number.
     type MaxDistributionBucketFamilyNumber: Get<u64>;
 
-    /// Defines max allowed distribution bucket number per family.
-    type MaxDistributionBucketNumberPerFamily: Get<u64>;
-
     /// Max number of pending invitations per distribution bucket.
     type MaxNumberOfPendingInvitationsPerDistributionBucket: Get<u64>;
 
@@ -484,8 +486,7 @@ pub struct DataObject<Balance> {
 }
 
 /// Type alias for the BagRecord.
-pub type Bag<T> =
-    BagRecord<<T as Trait>::StorageBucketId, <T as Trait>::DistributionBucketId, BalanceOf<T>>;
+pub type Bag<T> = BagRecord<<T as Trait>::StorageBucketId, DistributionBucketId<T>, BalanceOf<T>>;
 
 /// Bag container.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
@@ -585,6 +586,7 @@ pub enum DynamicBagType {
 
     /// Channel dynamic bag type.
     Channel,
+    // Modify 'delete_distribution_bucket_family' on adding the new type!
 }
 
 impl Default for DynamicBagType {
@@ -826,45 +828,42 @@ impl<Balance: Saturating + Copy> BagUpdate<Balance> {
 
 /// Type alias for the DistributionBucketFamilyRecord.
 pub type DistributionBucketFamily<T> =
-    DistributionBucketFamilyRecord<<T as Trait>::DistributionBucketId, WorkerId<T>>;
+    DistributionBucketFamilyRecord<<T as Trait>::DistributionBucketIndex>;
 
 /// Distribution bucket family.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
-pub struct DistributionBucketFamilyRecord<DistributionBucketId: Ord, WorkerId: Ord> {
-    /// Distribution bucket map.
-    pub distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucketRecord<WorkerId>>,
+pub struct DistributionBucketFamilyRecord<DistributionBucketIndex> {
+    /// Next distribution bucket index.
+    pub next_distribution_bucket_index: DistributionBucketIndex,
 }
 
-impl<DistributionBucketId: Ord, WorkerId: Ord>
-    DistributionBucketFamilyRecord<DistributionBucketId, WorkerId>
+impl<DistributionBucketIndex: BaseArithmetic>
+    DistributionBucketFamilyRecord<DistributionBucketIndex>
 {
-    // Add and/or remove distribution buckets assignments to bags.
-    fn change_bag_assignments(
-        &mut self,
-        add_buckets: &BTreeSet<DistributionBucketId>,
-        remove_buckets: &BTreeSet<DistributionBucketId>,
-    ) {
-        for bucket_id in add_buckets.iter() {
-            if let Some(bucket) = self.distribution_buckets.get_mut(bucket_id) {
-                bucket.register_bag_assignment();
-            }
-        }
-
-        for bucket_id in remove_buckets.iter() {
-            if let Some(bucket) = self.distribution_buckets.get_mut(bucket_id) {
-                bucket.unregister_bag_assignment();
-            }
-        }
+    // Increments the next distribution bucket index variable.
+    fn increment_next_distribution_bucket_index_counter(&mut self) {
+        self.next_distribution_bucket_index += One::one()
     }
+}
 
-    // Checks inner buckets for bag assignment number. Returns true only if all 'assigned_bags' are
-    // zero.
-    fn no_bags_assigned(&self) -> bool {
-        self.distribution_buckets
-            .values()
-            .all(|b| b.no_bags_assigned())
-    }
+/// Type alias for the DistributionBucketIdRecord.
+pub type DistributionBucketId<T> = DistributionBucketIdRecord<
+    <T as Trait>::DistributionBucketFamilyId,
+    <T as Trait>::DistributionBucketIndex,
+>;
+
+/// Complex distribution bucket ID type.
+/// Joins a distribution bucket family ID and a distribution bucket index within the family.
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug, PartialOrd, Ord)]
+pub struct DistributionBucketIdRecord<DistributionBucketFamilyId: Ord, DistributionBucketIndex: Ord>
+{
+    /// Distribution bucket family ID.
+    pub distribution_bucket_family_id: DistributionBucketFamilyId,
+
+    /// Distribution bucket ID.
+    pub distribution_bucket_index: DistributionBucketIndex,
 }
 
 /// Type alias for the DistributionBucketRecord.
@@ -962,12 +961,15 @@ decl_storage! {
             map hasher(blake2_128_concat) T::DistributionBucketFamilyId =>
             DistributionBucketFamily<T>;
 
+        /// 'Distribution bucket' storage double map.
+        pub DistributionBucketByFamilyIdById get (fn distribution_bucket_by_family_id_by_index):
+            double_map
+            hasher(blake2_128_concat) T::DistributionBucketFamilyId,
+            hasher(blake2_128_concat) T::DistributionBucketIndex => DistributionBucket<T>;
+
         /// Total number of distribution bucket families in the system.
         pub DistributionBucketFamilyNumber get(fn distribution_bucket_family_number): u64;
 
-        /// Distribution bucket id counter. Starts at zero.
-        pub NextDistributionBucketId get(fn next_distribution_bucket_id): T::DistributionBucketId;
-
         /// "Distribution buckets per bag" number limit.
         pub DistributionBucketsPerBagLimit get (fn distribution_buckets_per_bag_limit): u64;
     }
@@ -986,7 +988,8 @@ decl_event! {
         <T as frame_system::Trait>::AccountId,
         Balance = BalanceOf<T>,
         <T as Trait>::DistributionBucketFamilyId,
-        <T as Trait>::DistributionBucketId,
+        DistributionBucketId = DistributionBucketId<T>,
+        <T as Trait>::DistributionBucketIndex,
     {
         /// Emits on creating the storage bucket.
         /// Params
@@ -1164,16 +1167,14 @@ decl_event! {
 
         /// Emits on storage bucket status update (accepting new bags).
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - new status (accepting new bags)
-        DistributionBucketStatusUpdated(DistributionBucketFamilyId, DistributionBucketId, bool),
+        DistributionBucketStatusUpdated(DistributionBucketId, bool),
 
         /// Emits on deleting distribution bucket.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
-        DistributionBucketDeleted(DistributionBucketFamilyId, DistributionBucketId),
+        DistributionBucketDeleted(DistributionBucketId),
 
         /// Emits on updating distribution buckets for bag.
         /// Params
@@ -1183,8 +1184,8 @@ decl_event! {
         DistributionBucketsUpdatedForBag(
             BagId,
             DistributionBucketFamilyId,
-            BTreeSet<DistributionBucketId>,
-            BTreeSet<DistributionBucketId>
+            BTreeSet<DistributionBucketIndex>,
+            BTreeSet<DistributionBucketIndex>
         ),
 
         /// Emits on changing the "Distribution buckets per bag" number limit.
@@ -1194,10 +1195,9 @@ decl_event! {
 
         /// Emits on storage bucket mode update (distributing flag).
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - distributing
-        DistributionBucketModeUpdated(DistributionBucketFamilyId, DistributionBucketId, bool),
+        DistributionBucketModeUpdated(DistributionBucketId, bool),
 
         /// Emits on dynamic bag creation policy update (distribution bucket families).
         /// Params
@@ -1210,22 +1210,18 @@ decl_event! {
 
         /// Emits on creating a distribution bucket invitation for the operator.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - worker ID
         DistributionBucketOperatorInvited(
-            DistributionBucketFamilyId,
             DistributionBucketId,
             WorkerId,
         ),
 
         /// Emits on canceling a distribution bucket invitation for the operator.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - operator worker ID
         DistributionBucketInvitationCancelled(
-            DistributionBucketFamilyId,
             DistributionBucketId,
             WorkerId,
         ),
@@ -1233,34 +1229,28 @@ decl_event! {
         /// Emits on accepting a distribution bucket invitation for the operator.
         /// Params
         /// - worker ID
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         DistributionBucketInvitationAccepted(
             WorkerId,
-            DistributionBucketFamilyId,
             DistributionBucketId,
         ),
 
         /// Emits on setting the metadata by a distribution bucket operator.
         /// Params
         /// - worker ID
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - metadata
         DistributionBucketMetadataSet(
             WorkerId,
-            DistributionBucketFamilyId,
             DistributionBucketId,
             Vec<u8>
         ),
 
         /// Emits on the distribution bucket operator removal.
         /// Params
-        /// - distribution bucket family ID
         /// - distribution bucket ID
         /// - distribution bucket operator ID
         DistributionBucketOperatorRemoved(
-            DistributionBucketFamilyId,
             DistributionBucketId,
             WorkerId
         ),
@@ -1396,9 +1386,6 @@ decl_error! {
         /// Distribution bucket family doesn't exist.
         DistributionBucketFamilyDoesntExist,
 
-        /// Max distribution bucket number per family limit exceeded.
-        MaxDistributionBucketNumberPerFamilyLimitExceeded,
-
         /// Distribution bucket doesn't exist.
         DistributionBucketDoesntExist,
 
@@ -1481,10 +1468,6 @@ decl_module! {
         /// Exports const - max allowed distribution bucket family number.
         const MaxDistributionBucketFamilyNumber: u64 = T::MaxDistributionBucketFamilyNumber::get();
 
-        /// Exports const - max allowed distribution bucket number per family.
-        const MaxDistributionBucketNumberPerFamily: u64 =
-            T::MaxDistributionBucketNumberPerFamily::get();
-
         /// Exports const - "Distribution buckets per bag" value constraint.
         const DistributionBucketsPerBagValueConstraint: StorageBucketsPerBagValueConstraint =
             T::DistributionBucketsPerBagValueConstraint::get();
@@ -2032,10 +2015,10 @@ decl_module! {
         pub fn delete_distribution_bucket_family(origin, family_id: T::DistributionBucketFamilyId) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
+            Self::ensure_distribution_bucket_family_exists(&family_id)?;
 
             // Check that no assigned bags left.
-            ensure!(family.no_bags_assigned(), Error::<T>::DistributionBucketIsBoundToBag);
+            ensure!(Self::no_bags_assigned(&family_id), Error::<T>::DistributionBucketIsBoundToBag);
 
             Self::check_dynamic_bag_creation_policy_for_dependencies(
                 &family_id,
@@ -2068,12 +2051,6 @@ decl_module! {
 
             let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
 
-            ensure!(
-                family.distribution_buckets.len().saturated_into::<u64>() <
-                    T::MaxDistributionBucketNumberPerFamily::get(),
-                Error::<T>::MaxDistributionBucketNumberPerFamilyLimitExceeded
-            );
-
             //
             // == MUTATION SAFE ==
             //
@@ -2086,13 +2063,14 @@ decl_module! {
                 assigned_bags: 0,
             };
 
-            let bucket_id = Self::next_distribution_bucket_id();
+            let bucket_index = family.next_distribution_bucket_index;
+            let bucket_id = Self::create_distribution_bucket_id(family_id, bucket_index);
 
             <DistributionBucketFamilyById<T>>::mutate(family_id, |family|{
-                family.distribution_buckets.insert(bucket_id, bucket);
+                family.increment_next_distribution_bucket_index_counter();
             });
 
-            <NextDistributionBucketId<T>>::put(bucket_id + One::one());
+            <DistributionBucketByFamilyIdById<T>>::insert(family_id, bucket_index, bucket);
 
             Self::deposit_event(
                 RawEvent::DistributionBucketCreated(family_id, accepting_new_bags, bucket_id)
@@ -2103,34 +2081,27 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_distribution_bucket_status(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             accepting_new_bags: bool
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             //
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.accepting_new_bags = accepting_new_bags;
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketStatusUpdated(
-                    family_id,
-                    distribution_bucket_id,
-                    accepting_new_bags
-                )
+                RawEvent::DistributionBucketStatusUpdated(bucket_id, accepting_new_bags)
             );
         }
 
@@ -2138,13 +2109,11 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn delete_distribution_bucket(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
         ){
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(&family, &distribution_bucket_id)?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             // Check that no assigned bags left.
             ensure!(bucket.no_bags_assigned(), Error::<T>::DistributionBucketIsBoundToBag);
@@ -2156,12 +2125,13 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                family.distribution_buckets.remove(&distribution_bucket_id);
-            });
+            <DistributionBucketByFamilyIdById<T>>::remove(
+                &bucket_id.distribution_bucket_family_id,
+                &bucket_id.distribution_bucket_index
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketDeleted(family_id, distribution_bucket_id)
+                RawEvent::DistributionBucketDeleted(bucket_id)
             );
         }
 
@@ -2171,36 +2141,44 @@ decl_module! {
             origin,
             bag_id: BagId<T>,
             family_id: T::DistributionBucketFamilyId,
-            add_buckets: BTreeSet<T::DistributionBucketId>,
-            remove_buckets: BTreeSet<T::DistributionBucketId>,
+            add_buckets_indices: BTreeSet<T::DistributionBucketIndex>,
+            remove_buckets_indices: BTreeSet<T::DistributionBucketIndex>,
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
             Self::validate_update_distribution_buckets_for_bag_params(
                 &bag_id,
                 &family_id,
-                &add_buckets,
-                &remove_buckets,
+                &add_buckets_indices,
+                &remove_buckets_indices,
             )?;
 
             //
             // == MUTATION SAFE ==
             //
 
+            let add_buckets_ids = add_buckets_indices
+                .iter()
+                .map(|idx| Self::create_distribution_bucket_id(family_id, *idx))
+                .collect::<BTreeSet<_>>();
+
+            let remove_buckets_ids = remove_buckets_indices
+                .iter()
+                .map(|idx| Self::create_distribution_bucket_id(family_id, *idx))
+                .collect::<BTreeSet<_>>();
+
             Bags::<T>::mutate(&bag_id, |bag| {
-                bag.update_distribution_buckets(&mut add_buckets.clone(), &remove_buckets);
+                bag.update_distribution_buckets(&mut add_buckets_ids.clone(), &remove_buckets_ids);
             });
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                family.change_bag_assignments(&add_buckets, &remove_buckets);
-            });
+            Self::change_bag_assignments(&add_buckets_ids, &remove_buckets_ids);
 
             Self::deposit_event(
                 RawEvent::DistributionBucketsUpdatedForBag(
                     bag_id,
                     family_id,
-                    add_buckets,
-                    remove_buckets
+                    add_buckets_indices,
+                    remove_buckets_indices
                 )
             );
         }
@@ -2229,34 +2207,27 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_distribution_bucket_mode(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             distributing: bool
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             //
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.distributing = distributing;
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketModeUpdated(
-                    family_id,
-                    distribution_bucket_id,
-                    distributing
-                )
+                RawEvent::DistributionBucketModeUpdated(bucket_id, distributing)
             );
         }
 
@@ -2291,17 +2262,12 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn invite_distribution_bucket_operator(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             operator_worker_id: WorkerId<T>
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             Self::ensure_distribution_provider_can_be_invited(&bucket, &operator_worker_id)?;
 
@@ -2309,18 +2275,16 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.pending_invitations.insert(operator_worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketOperatorInvited(
-                    family_id,
-                    distribution_bucket_id,
-                    operator_worker_id,
-                )
+                RawEvent::DistributionBucketOperatorInvited(bucket_id, operator_worker_id)
             );
         }
 
@@ -2328,17 +2292,12 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn cancel_distribution_bucket_operator_invite(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             operator_worker_id: WorkerId<T>
         ) {
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.pending_invitations.contains(&operator_worker_id),
@@ -2349,16 +2308,17 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.pending_invitations.remove(&operator_worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
                 RawEvent::DistributionBucketInvitationCancelled(
-                    family_id,
-                    distribution_bucket_id,
+                    bucket_id,
                     operator_worker_id
                 )
             );
@@ -2368,17 +2328,12 @@ decl_module! {
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn remove_distribution_bucket_operator(
             origin,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             operator_worker_id: WorkerId<T>,
         ){
             T::ensure_distribution_working_group_leader_origin(origin)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.operators.contains(&operator_worker_id),
@@ -2390,18 +2345,16 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.operators.remove(&operator_worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketOperatorRemoved(
-                    family_id,
-                    distribution_bucket_id,
-                    operator_worker_id
-                )
+                RawEvent::DistributionBucketOperatorRemoved(bucket_id, operator_worker_id)
             );
         }
 
@@ -2436,17 +2389,11 @@ decl_module! {
         pub fn accept_distribution_bucket_invitation(
             origin,
             worker_id: WorkerId<T>,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
-
+            bucket_id: DistributionBucketId<T>,
         ) {
             T::ensure_distribution_worker_origin(origin, worker_id)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.pending_invitations.contains(&worker_id),
@@ -2457,19 +2404,17 @@ decl_module! {
             // == MUTATION SAFE ==
             //
 
-            <DistributionBucketFamilyById<T>>::mutate(family_id, |family| {
-                if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) {
+            <DistributionBucketByFamilyIdById<T>>::mutate(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+                |bucket| {
                     bucket.pending_invitations.remove(&worker_id);
                     bucket.operators.insert(worker_id);
                 }
-            });
+            );
 
             Self::deposit_event(
-                RawEvent::DistributionBucketInvitationAccepted(
-                    worker_id,
-                    family_id,
-                    distribution_bucket_id,
-                )
+                RawEvent::DistributionBucketInvitationAccepted(worker_id, bucket_id)
             );
         }
 
@@ -2478,17 +2423,12 @@ decl_module! {
         pub fn set_distribution_operator_metadata(
             origin,
             worker_id: WorkerId<T>,
-            family_id: T::DistributionBucketFamilyId,
-            distribution_bucket_id: T::DistributionBucketId,
+            bucket_id: DistributionBucketId<T>,
             metadata: Vec<u8>,
         ) {
             T::ensure_distribution_worker_origin(origin, worker_id)?;
 
-            let family = Self::ensure_distribution_bucket_family_exists(&family_id)?;
-            let bucket = Self::ensure_distribution_bucket_exists(
-                &family,
-                &distribution_bucket_id
-            )?;
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.operators.contains(&worker_id),
@@ -2500,12 +2440,7 @@ decl_module! {
             //
 
             Self::deposit_event(
-                RawEvent::DistributionBucketMetadataSet(
-                    worker_id,
-                    family_id,
-                    distribution_bucket_id,
-                    metadata
-                )
+                RawEvent::DistributionBucketMetadataSet(worker_id, bucket_id, metadata)
             );
         }
 
@@ -3351,7 +3286,7 @@ impl<T: Trait> Module<T> {
     // Selects distributed bucket ID sets to assign to the dynamic bag.
     pub(crate) fn pick_distribution_buckets_for_dynamic_bag(
         bag_type: DynamicBagType,
-    ) -> BTreeSet<T::DistributionBucketId> {
+    ) -> BTreeSet<DistributionBucketId<T>> {
         DistributionBucketPicker::<T>::pick_distribution_buckets(bag_type)
     }
 
@@ -3469,22 +3404,28 @@ impl<T: Trait> Module<T> {
     // Ensures the existence of the distribution bucket.
     // Returns the DistributionBucket object or error.
     fn ensure_distribution_bucket_exists(
-        family: &DistributionBucketFamily<T>,
-        distribution_bucket_id: &T::DistributionBucketId,
+        bucket_id: &DistributionBucketId<T>,
     ) -> Result<DistributionBucket<T>, Error<T>> {
-        family
-            .distribution_buckets
-            .get(distribution_bucket_id)
-            .cloned()
-            .ok_or(Error::<T>::DistributionBucketDoesntExist)
+        ensure!(
+            <DistributionBucketByFamilyIdById<T>>::contains_key(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index
+            ),
+            Error::<T>::DistributionBucketDoesntExist
+        );
+
+        Ok(Self::distribution_bucket_by_family_id_by_index(
+            bucket_id.distribution_bucket_family_id,
+            bucket_id.distribution_bucket_index,
+        ))
     }
 
     // Ensures validity of the `update_distribution_buckets_for_bag` extrinsic parameters
     fn validate_update_distribution_buckets_for_bag_params(
         bag_id: &BagId<T>,
         family_id: &T::DistributionBucketFamilyId,
-        add_buckets: &BTreeSet<T::DistributionBucketId>,
-        remove_buckets: &BTreeSet<T::DistributionBucketId>,
+        add_buckets: &BTreeSet<T::DistributionBucketIndex>,
+        remove_buckets: &BTreeSet<T::DistributionBucketIndex>,
     ) -> DispatchResult {
         ensure!(
             !add_buckets.is_empty() || !remove_buckets.is_empty(),
@@ -3493,7 +3434,7 @@ impl<T: Trait> Module<T> {
 
         let bag = Self::ensure_bag_exists(bag_id)?;
 
-        let family = Self::ensure_distribution_bucket_family_exists(family_id)?;
+        Self::ensure_distribution_bucket_family_exists(family_id)?;
 
         let new_bucket_number = bag
             .distributed_by
@@ -3507,8 +3448,9 @@ impl<T: Trait> Module<T> {
             Error::<T>::MaxDistributionBucketNumberPerBagLimitExceeded
         );
 
-        for bucket_id in remove_buckets.iter() {
-            Self::ensure_distribution_bucket_exists(&family, bucket_id)?;
+        for bucket_index in remove_buckets.iter() {
+            let bucket_id = Self::create_distribution_bucket_id(*family_id, *bucket_index);
+            Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bag.distributed_by.contains(&bucket_id),
@@ -3516,8 +3458,9 @@ impl<T: Trait> Module<T> {
             );
         }
 
-        for bucket_id in add_buckets.iter() {
-            let bucket = Self::ensure_distribution_bucket_exists(&family, bucket_id)?;
+        for bucket_index in add_buckets.iter() {
+            let bucket_id = Self::create_distribution_bucket_id(*family_id, *bucket_index);
+            let bucket = Self::ensure_distribution_bucket_exists(&bucket_id)?;
 
             ensure!(
                 bucket.accepting_new_bags,
@@ -3611,4 +3554,58 @@ impl<T: Trait> Module<T> {
 
         Ok(())
     }
+
+    // Add and/or remove distribution buckets assignments to bags.
+    fn change_bag_assignments(
+        add_buckets: &BTreeSet<DistributionBucketId<T>>,
+        remove_buckets: &BTreeSet<DistributionBucketId<T>>,
+    ) {
+        for bucket_id in add_buckets.iter() {
+            if DistributionBucketByFamilyIdById::<T>::contains_key(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+            ) {
+                DistributionBucketByFamilyIdById::<T>::mutate(
+                    bucket_id.distribution_bucket_family_id,
+                    bucket_id.distribution_bucket_index,
+                    |bucket| {
+                        bucket.register_bag_assignment();
+                    },
+                )
+            }
+        }
+
+        for bucket_id in remove_buckets.iter() {
+            if DistributionBucketByFamilyIdById::<T>::contains_key(
+                bucket_id.distribution_bucket_family_id,
+                bucket_id.distribution_bucket_index,
+            ) {
+                DistributionBucketByFamilyIdById::<T>::mutate(
+                    bucket_id.distribution_bucket_family_id,
+                    bucket_id.distribution_bucket_index,
+                    |bucket| {
+                        bucket.unregister_bag_assignment();
+                    },
+                )
+            }
+        }
+    }
+
+    // Checks distribution buckets for bag assignment number. Returns true only if all 'assigned_bags' are
+    // zero.
+    fn no_bags_assigned(family_id: &T::DistributionBucketFamilyId) -> bool {
+        DistributionBucketByFamilyIdById::<T>::iter_prefix_values(family_id)
+            .all(|b| b.no_bags_assigned())
+    }
+
+    // Creates distribution bucket ID from family ID and bucket index.
+    pub(crate) fn create_distribution_bucket_id(
+        distribution_bucket_family_id: T::DistributionBucketFamilyId,
+        distribution_bucket_index: T::DistributionBucketIndex,
+    ) -> DistributionBucketId<T> {
+        DistributionBucketId::<T> {
+            distribution_bucket_family_id,
+            distribution_bucket_index,
+        }
+    }
 }

+ 78 - 0
runtime-modules/storage/src/random_buckets/distribution_bucket_picker.rs

@@ -0,0 +1,78 @@
+#![warn(missing_docs)]
+
+use sp_std::cell::RefCell;
+use sp_std::collections::btree_set::BTreeSet;
+use sp_std::marker::PhantomData;
+use sp_std::vec::Vec;
+
+use crate::{DistributionBucketId, DynamicBagType, Module, Trait};
+
+pub(crate) use super::{RandomBucketIdIterator, SequentialBucketIdIterator};
+
+// Generates distribution bucket IDs to assign to a new dynamic bag.
+pub(crate) struct DistributionBucketPicker<T> {
+    trait_marker: PhantomData<T>,
+}
+
+impl<T: Trait> DistributionBucketPicker<T> {
+    // Get random distribution buckets from distribution bucket families using the dynamic bag
+    // creation policy.
+    pub(crate) fn pick_distribution_buckets(
+        bag_type: DynamicBagType,
+    ) -> BTreeSet<DistributionBucketId<T>> {
+        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
+
+        if creation_policy.no_distribution_buckets_required() {
+            return BTreeSet::new();
+        }
+
+        // Distribution bucket IDs accumulator.
+        let bucket_ids_cell = RefCell::new(BTreeSet::<T::DistributionBucketIndex>::new());
+
+        creation_policy
+            .families
+            .iter()
+            .filter_map(|(family_id, bucket_num)| {
+                Module::<T>::ensure_distribution_bucket_family_exists(family_id)
+                    .ok()
+                    .map(|fam| (family_id, fam, bucket_num))
+            })
+            .map(|(family_id, family, bucket_num)| {
+                RandomBucketIdIterator::<T, T::DistributionBucketIndex>::new(
+                    family.next_distribution_bucket_index,
+                )
+                .chain(
+                    SequentialBucketIdIterator::<T, T::DistributionBucketIndex>::new(
+                        family.next_distribution_bucket_index,
+                    ),
+                )
+                .filter(|bucket_idx| {
+                    let bucket_id = DistributionBucketId::<T> {
+                        distribution_bucket_family_id: *family_id,
+                        distribution_bucket_index: *bucket_idx,
+                    };
+
+                    Module::<T>::ensure_distribution_bucket_exists(&bucket_id)
+                        .ok()
+                        .map(|bucket| bucket.accepting_new_bags)
+                        .unwrap_or(false)
+                })
+                .filter(|bucket_idx| {
+                    let bucket_ids = bucket_ids_cell.borrow();
+
+                    // Skips the iteration on existing ID.
+                    !bucket_ids.contains(bucket_idx)
+                })
+                .map(|bucket_idx| DistributionBucketId::<T> {
+                    distribution_bucket_family_id: *family_id,
+                    distribution_bucket_index: bucket_idx,
+                })
+                .take(*bucket_num as usize)
+                .collect::<Vec<_>>()
+
+                // rename buckets
+            })
+            .flatten()
+            .collect::<BTreeSet<_>>()
+    }
+}

+ 131 - 0
runtime-modules/storage/src/random_buckets/mod.rs

@@ -0,0 +1,131 @@
+use frame_support::traits::{Get, Randomness};
+use sp_arithmetic::traits::{BaseArithmetic, One, Zero};
+use sp_runtime::traits::Bounded;
+use sp_runtime::SaturatedConversion;
+use sp_std::marker::PhantomData;
+
+use crate::{Module, Trait};
+
+pub(crate) mod distribution_bucket_picker;
+pub(crate) mod storage_bucket_picker;
+
+pub(crate) use distribution_bucket_picker::DistributionBucketPicker;
+pub(crate) use storage_bucket_picker::StorageBucketPicker;
+
+// A meta trait for defining generic bucket ID.
+pub(crate) trait BucketId:
+    Bounded + BaseArithmetic + From<u64> + Into<u64> + Clone + PartialOrd
+{
+}
+impl<T: Bounded + BaseArithmetic + From<u64> + Into<u64> + Clone + PartialOrd> BucketId for T {}
+
+// Iterator for random storage or distribution bucket IDs. It uses Substrate Randomness trait
+// (and possibly randomness_collective_flip pallet for implementation).
+// Its maximum iterations number is bounded.
+pub(crate) struct RandomBucketIdIterator<T: Trait, Id: BucketId> {
+    // Trait marker.
+    trait_marker: PhantomData<T>,
+
+    // Current Iterator step number.
+    current_iteration: u64,
+
+    // Maximum allowed iteration number.
+    max_iteration_number: u64,
+
+    // Current seed for the randomness generator.
+    current_seed: T::Hash,
+
+    // Next possible id for the buckets.
+    next_id: Id,
+}
+
+impl<T: Trait, Id: BucketId> Iterator for RandomBucketIdIterator<T, Id> {
+    type Item = Id;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        // Cannot create randomness in the initial block (Substrate error).
+        if <frame_system::Module<T>>::block_number() == Zero::zero() {
+            return None;
+        }
+
+        if self.current_iteration >= self.max_iteration_number {
+            return None;
+        }
+
+        let random_bucket_id = self.random_bucket_id();
+
+        self.current_iteration += 1;
+        self.current_seed = T::Randomness::random(self.current_seed.as_ref());
+
+        Some(random_bucket_id)
+    }
+}
+
+impl<T: Trait, Id: BucketId> RandomBucketIdIterator<T, Id> {
+    // Generate random storage or distribution bucket ID using next_id as an upper_bound.
+    // Deleted bucket IDs are included.
+    fn random_bucket_id(&self) -> Id {
+        let total_buckets_number: u64 = self.next_id.clone().into();
+
+        let random_bucket_id: Id = Module::<T>::random_index(
+            self.current_seed.as_ref(),
+            total_buckets_number.saturated_into(),
+        )
+        .saturated_into();
+
+        random_bucket_id
+    }
+
+    // Creates new iterator.
+    pub(crate) fn new(next_id: Id) -> Self {
+        let seed = Module::<T>::get_initial_random_seed();
+
+        Self {
+            current_iteration: 0,
+            max_iteration_number: T::MaxRandomIterationNumber::get(),
+            trait_marker: PhantomData,
+            current_seed: seed,
+            next_id,
+        }
+    }
+}
+
+// Iterator for sequential storage or distribution bucket IDs. It starts from the first possible storage bucket ID
+// (zero) and goes up to the last storage bucket IDs (next_storage_bucket_id - excluding).
+pub(crate) struct SequentialBucketIdIterator<T: Trait, Id: BucketId> {
+    // Trait marker.
+    trait_marker: PhantomData<T>,
+
+    // Bucket ID for the current iteration.
+    current_bucket_id: Id,
+
+    // Next possible id for the buckets.
+    next_id: Id,
+}
+
+impl<T: Trait, Id: BucketId> Iterator for SequentialBucketIdIterator<T, Id> {
+    type Item = Id;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.current_bucket_id >= self.next_id {
+            return None;
+        }
+
+        let result = self.current_bucket_id.clone();
+
+        self.current_bucket_id += One::one();
+
+        Some(result)
+    }
+}
+
+impl<T: Trait, Id: BucketId> SequentialBucketIdIterator<T, Id> {
+    // Creates new iterator.
+    pub(crate) fn new(next_id: Id) -> Self {
+        Self {
+            current_bucket_id: Zero::zero(),
+            trait_marker: PhantomData,
+            next_id,
+        }
+    }
+}

+ 65 - 0
runtime-modules/storage/src/random_buckets/storage_bucket_picker.rs

@@ -0,0 +1,65 @@
+#![warn(missing_docs)]
+
+use sp_std::cell::RefCell;
+use sp_std::collections::btree_set::BTreeSet;
+use sp_std::marker::PhantomData;
+
+pub(crate) use super::{RandomBucketIdIterator, SequentialBucketIdIterator};
+use crate::{DynamicBagType, Module, Trait};
+
+// Generates storage bucket IDs to assign to a new dynamic bag.
+pub(crate) struct StorageBucketPicker<T> {
+    trait_marker: PhantomData<T>,
+}
+
+impl<T: Trait> StorageBucketPicker<T> {
+    // Selects storage bucket ID sets to assign to the dynamic bag.
+    // At first, it tries to generate random bucket IDs. If acquired random IDs number is not enough
+    // it tries to get additional IDs starting from zero up to the total number of the possible IDs.
+    // The function filters deleted buckets and disabled buckets (accepting_new_bags == false)
+    // Total number of possible IDs is limited by the dynamic bag settings.
+    // Returns an accumulated bucket ID set or an empty set.
+    pub(crate) fn pick_storage_buckets(bag_type: DynamicBagType) -> BTreeSet<T::StorageBucketId> {
+        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
+
+        if creation_policy.no_storage_buckets_required() {
+            return BTreeSet::new();
+        }
+
+        let required_bucket_num = creation_policy.number_of_storage_buckets as usize;
+
+        // Storage bucket IDs accumulator.
+        let bucket_ids_cell = RefCell::new(BTreeSet::new());
+        let next_storage_bucket_id = Module::<T>::next_storage_bucket_id();
+        RandomBucketIdIterator::<T, T::StorageBucketId>::new(next_storage_bucket_id)
+            .chain(SequentialBucketIdIterator::<T, T::StorageBucketId>::new(
+                next_storage_bucket_id,
+            ))
+            .filter(Self::check_storage_bucket_is_valid_for_bag_assigning)
+            .filter(|bucket_id| {
+                let bucket_ids = bucket_ids_cell.borrow();
+
+                // Skips the iteration on existing ID.
+                !bucket_ids.contains(bucket_id)
+            })
+            .take(required_bucket_num)
+            .for_each(|bucket_id| {
+                let mut bucket_ids = bucket_ids_cell.borrow_mut();
+
+                bucket_ids.insert(bucket_id);
+            });
+
+        bucket_ids_cell.into_inner()
+    }
+
+    // Verifies storage bucket ID (non-deleted and accepting new bags).
+    pub(crate) fn check_storage_bucket_is_valid_for_bag_assigning(
+        bucket_id: &T::StorageBucketId,
+    ) -> bool {
+        // Check bucket for existence (return false if not). Check `accepting_new_bags`.
+        Module::<T>::ensure_storage_bucket_exists(bucket_id)
+            .ok()
+            .map(|bucket| bucket.accepting_new_bags)
+            .unwrap_or(false)
+    }
+}

+ 0 - 168
runtime-modules/storage/src/storage_bucket_picker.rs

@@ -1,168 +0,0 @@
-#![warn(missing_docs)]
-
-use frame_support::traits::{Get, Randomness};
-use sp_arithmetic::traits::{One, Zero};
-use sp_runtime::SaturatedConversion;
-use sp_std::cell::RefCell;
-use sp_std::collections::btree_set::BTreeSet;
-use sp_std::marker::PhantomData;
-
-use crate::{DynamicBagType, Module, Trait};
-
-// Generates storage bucket IDs to assign to a new dynamic bag.
-pub(crate) struct StorageBucketPicker<T> {
-    trait_marker: PhantomData<T>,
-}
-
-impl<T: Trait> StorageBucketPicker<T> {
-    // Selects storage bucket ID sets to assign to the storage bucket.
-    // At first, it tries to generate random bucket IDs. If acquired random IDs number is not enough
-    // it tries to get additional IDs starting from zero up to the total number of the possible IDs.
-    // The function filters deleted buckets and disabled buckets (accepting_new_bags == false)
-    // Total number of possible IDs is limited by the dynamic bag settings.
-    // Returns an accumulated bucket ID set or an empty set.
-    pub(crate) fn pick_storage_buckets(bag_type: DynamicBagType) -> BTreeSet<T::StorageBucketId> {
-        let creation_policy = Module::<T>::get_dynamic_bag_creation_policy(bag_type);
-
-        if creation_policy.no_storage_buckets_required() {
-            return BTreeSet::new();
-        }
-
-        let required_bucket_num = creation_policy.number_of_storage_buckets as usize;
-
-        // Storage bucket IDs accumulator.
-        let bucket_ids_cell = RefCell::new(BTreeSet::new());
-
-        RandomStorageBucketIdIterator::<T>::new()
-            .chain(SequentialStorageBucketIdIterator::<T>::new())
-            .filter(Self::check_storage_bucket_is_valid_for_bag_assigning)
-            .filter(|bucket_id| {
-                let bucket_ids = bucket_ids_cell.borrow();
-
-                // Skips the iteration on existing ID.
-                !bucket_ids.contains(bucket_id)
-            })
-            .take(required_bucket_num)
-            .for_each(|bucket_id| {
-                let mut bucket_ids = bucket_ids_cell.borrow_mut();
-
-                bucket_ids.insert(bucket_id);
-            });
-
-        bucket_ids_cell.into_inner()
-    }
-
-    // Verifies storage bucket ID (non-deleted and accepting new bags).
-    pub(crate) fn check_storage_bucket_is_valid_for_bag_assigning(
-        bucket_id: &T::StorageBucketId,
-    ) -> bool {
-        // Check bucket for existence (return false if not). Check `accepting_new_bags`.
-        Module::<T>::ensure_storage_bucket_exists(bucket_id)
-            .ok()
-            .map(|bucket| bucket.accepting_new_bags)
-            .unwrap_or(false)
-    }
-}
-
-// Iterator for random storage bucket IDs. It uses Substrate Randomness trait
-// (and possibly randomness_collective_flip pallet for implementation).
-// Its maximum iterations are bounded.
-pub(crate) struct RandomStorageBucketIdIterator<T: Trait> {
-    // Trait marker.
-    trait_marker: PhantomData<T>,
-
-    // Current Iterator step number.
-    current_iteration: u64,
-
-    // Maximum allowed iteration number.
-    max_iteration_number: u64,
-
-    // Current seed for the randomness generator.
-    current_seed: T::Hash,
-}
-
-impl<T: Trait> Iterator for RandomStorageBucketIdIterator<T> {
-    type Item = T::StorageBucketId;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        // Cannot create randomness in the initial block (Substrate error).
-        if <frame_system::Module<T>>::block_number() == Zero::zero() {
-            return None;
-        }
-
-        if self.current_iteration >= self.max_iteration_number {
-            return None;
-        }
-
-        let random_storage_bucket_id = self.random_storage_bucket_id();
-
-        self.current_iteration += 1;
-        self.current_seed = T::Randomness::random(self.current_seed.as_ref());
-
-        Some(random_storage_bucket_id)
-    }
-}
-
-impl<T: Trait> RandomStorageBucketIdIterator<T> {
-    // Generate random storage bucket ID using next_storage_bucket_id() as upper_bound.
-    // Deleted storage bucket ID are included.
-    fn random_storage_bucket_id(&self) -> T::StorageBucketId {
-        let total_buckets_number = Module::<T>::next_storage_bucket_id();
-
-        let random_bucket_id: T::StorageBucketId = Module::<T>::random_index(
-            self.current_seed.as_ref(),
-            total_buckets_number.saturated_into(),
-        )
-        .saturated_into();
-
-        random_bucket_id
-    }
-
-    // Creates new iterator.
-    pub(crate) fn new() -> Self {
-        let seed = Module::<T>::get_initial_random_seed();
-
-        Self {
-            current_iteration: 0,
-            max_iteration_number: T::MaxRandomIterationNumber::get(),
-            trait_marker: PhantomData,
-            current_seed: seed,
-        }
-    }
-}
-
-// Iterator for sequential storage bucket IDs. It starts from the first possible storage bucket ID
-// (zero) and goes up to the last storage bucket IDs (next_storage_bucket_id - excluding).
-pub(crate) struct SequentialStorageBucketIdIterator<T: Trait> {
-    // Trait marker.
-    trait_marker: PhantomData<T>,
-
-    // Storage bucket ID for the current iteration.
-    current_bucket_id: T::StorageBucketId,
-}
-
-impl<T: Trait> Iterator for SequentialStorageBucketIdIterator<T> {
-    type Item = T::StorageBucketId;
-
-    fn next(&mut self) -> Option<Self::Item> {
-        if self.current_bucket_id >= Module::<T>::next_storage_bucket_id() {
-            return None;
-        }
-
-        let result = self.current_bucket_id;
-
-        self.current_bucket_id += One::one();
-
-        Some(result)
-    }
-}
-
-impl<T: Trait> SequentialStorageBucketIdIterator<T> {
-    // Creates new iterator.
-    pub(crate) fn new() -> Self {
-        Self {
-            current_bucket_id: Zero::zero(),
-            trait_marker: PhantomData,
-        }
-    }
-}

+ 109 - 96
runtime-modules/storage/src/tests/fixtures.rs

@@ -14,9 +14,9 @@ use crate::tests::mocks::{
     DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID, DISTRIBUTION_WG_LEADER_ACCOUNT_ID,
 };
 use crate::{
-    BagId, Cid, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily,
-    DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, RawEvent, StaticBagId,
-    StorageBucketOperatorStatus, UploadParameters,
+    BagId, Cid, DataObjectCreationParameters, DataObjectStorage, DistributionBucket,
+    DistributionBucketId, DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, RawEvent,
+    StaticBagId, StorageBucketOperatorStatus, UploadParameters,
 };
 
 // Recommendation from Parity on testing on_finalize
@@ -50,6 +50,7 @@ impl EventFixture {
             u64,
             u64,
             u64,
+            DistributionBucketId<Test>,
             u64,
         >,
     ) {
@@ -69,6 +70,7 @@ impl EventFixture {
             u64,
             u64,
             u64,
+            DistributionBucketId<Test>,
             u64,
         >,
     ) {
@@ -1295,7 +1297,8 @@ impl CreateDistributionBucketFixture {
     }
 
     pub fn call_and_assert(&self, expected_result: DispatchResult) -> Option<u64> {
-        let next_bucket_id = Storage::next_distribution_bucket_id();
+        let next_bucket_index = Storage::distribution_bucket_family_by_id(self.family_id)
+            .next_distribution_bucket_index;
         let actual_result = Storage::create_distribution_bucket(
             self.origin.clone().into(),
             self.family_id,
@@ -1305,24 +1308,27 @@ impl CreateDistributionBucketFixture {
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            assert_eq!(next_bucket_id + 1, Storage::next_distribution_bucket_id());
-
-            let family: DistributionBucketFamily<Test> =
-                Storage::distribution_bucket_family_by_id(self.family_id);
-
-            assert!(family.distribution_buckets.contains_key(&next_bucket_id));
             assert_eq!(
-                family
-                    .distribution_buckets
-                    .get(&next_bucket_id)
-                    .unwrap()
-                    .accepting_new_bags,
-                self.accept_new_bags
+                next_bucket_index + 1,
+                Storage::distribution_bucket_family_by_id(self.family_id)
+                    .next_distribution_bucket_index
             );
 
-            Some(next_bucket_id)
+            let bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    next_bucket_index,
+                );
+
+            assert_eq!(bucket.accepting_new_bags, self.accept_new_bags);
+
+            Some(next_bucket_index)
         } else {
-            assert_eq!(next_bucket_id, Storage::next_distribution_bucket_id());
+            assert_eq!(
+                next_bucket_index,
+                Storage::distribution_bucket_family_by_id(self.family_id)
+                    .next_distribution_bucket_index
+            );
 
             None
         }
@@ -1332,7 +1338,7 @@ impl CreateDistributionBucketFixture {
 pub struct UpdateDistributionBucketStatusFixture {
     origin: RawOrigin<u64>,
     family_id: u64,
-    distribution_bucket_id: u64,
+    distribution_bucket_index: u64,
     new_status: bool,
 }
 
@@ -1341,13 +1347,13 @@ impl UpdateDistributionBucketStatusFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID),
             family_id: Default::default(),
-            distribution_bucket_id: Default::default(),
+            distribution_bucket_index: Default::default(),
             new_status: false,
         }
     }
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
         Self {
-            distribution_bucket_id: bucket_id,
+            distribution_bucket_index: bucket_index,
             ..self
         }
     }
@@ -1367,8 +1373,7 @@ impl UpdateDistributionBucketStatusFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::update_distribution_bucket_status(
             self.origin.clone().into(),
-            self.family_id,
-            self.distribution_bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.distribution_bucket_index),
             self.new_status,
         );
 
@@ -1379,7 +1384,7 @@ impl UpdateDistributionBucketStatusFixture {
 pub struct DeleteDistributionBucketFixture {
     origin: RawOrigin<u64>,
     family_id: u64,
-    distribution_bucket_id: u64,
+    distribution_bucket_index: u64,
 }
 
 impl DeleteDistributionBucketFixture {
@@ -1387,13 +1392,13 @@ impl DeleteDistributionBucketFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID),
             family_id: Default::default(),
-            distribution_bucket_id: Default::default(),
+            distribution_bucket_index: Default::default(),
         }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
         Self {
-            distribution_bucket_id: bucket_id,
+            distribution_bucket_index: bucket_index,
             ..self
         }
     }
@@ -1409,8 +1414,7 @@ impl DeleteDistributionBucketFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::delete_distribution_bucket(
             self.origin.clone().into(),
-            self.family_id,
-            self.distribution_bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.distribution_bucket_index),
         );
 
         assert_eq!(actual_result, expected_result);
@@ -1421,8 +1425,8 @@ pub struct UpdateDistributionBucketForBagsFixture {
     origin: RawOrigin<u64>,
     bag_id: BagId<Test>,
     family_id: u64,
-    add_bucket_ids: BTreeSet<u64>,
-    remove_bucket_ids: BTreeSet<u64>,
+    add_bucket_indices: BTreeSet<u64>,
+    remove_bucket_indices: BTreeSet<u64>,
 }
 
 impl UpdateDistributionBucketForBagsFixture {
@@ -1431,8 +1435,8 @@ impl UpdateDistributionBucketForBagsFixture {
             origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
             bag_id: Default::default(),
             family_id: Default::default(),
-            add_bucket_ids: Default::default(),
-            remove_bucket_ids: Default::default(),
+            add_bucket_indices: Default::default(),
+            remove_bucket_indices: Default::default(),
         }
     }
 
@@ -1440,16 +1444,16 @@ impl UpdateDistributionBucketForBagsFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_add_bucket_ids(self, add_bucket_ids: BTreeSet<u64>) -> Self {
+    pub fn with_add_bucket_indices(self, add_bucket_indices: BTreeSet<u64>) -> Self {
         Self {
-            add_bucket_ids,
+            add_bucket_indices,
             ..self
         }
     }
 
-    pub fn with_remove_bucket_ids(self, remove_bucket_ids: BTreeSet<u64>) -> Self {
+    pub fn with_remove_bucket_indices(self, remove_bucket_indices: BTreeSet<u64>) -> Self {
         Self {
-            remove_bucket_ids,
+            remove_bucket_indices,
             ..self
         }
     }
@@ -1467,8 +1471,8 @@ impl UpdateDistributionBucketForBagsFixture {
             self.origin.clone().into(),
             self.bag_id.clone(),
             self.family_id,
-            self.add_bucket_ids.clone(),
-            self.remove_bucket_ids.clone(),
+            self.add_bucket_indices.clone(),
+            self.remove_bucket_indices.clone(),
         );
 
         assert_eq!(actual_result, expected_result);
@@ -1520,7 +1524,7 @@ impl UpdateDistributionBucketsPerBagLimitFixture {
 pub struct UpdateDistributionBucketModeFixture {
     origin: RawOrigin<u64>,
     family_id: u64,
-    distribution_bucket_id: u64,
+    distribution_bucket_index: u64,
     distributing: bool,
 }
 
@@ -1529,13 +1533,13 @@ impl UpdateDistributionBucketModeFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID),
             family_id: Default::default(),
-            distribution_bucket_id: Default::default(),
+            distribution_bucket_index: Default::default(),
             distributing: true,
         }
     }
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
         Self {
-            distribution_bucket_id: bucket_id,
+            distribution_bucket_index: bucket_index,
             ..self
         }
     }
@@ -1558,8 +1562,7 @@ impl UpdateDistributionBucketModeFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::update_distribution_bucket_mode(
             self.origin.clone().into(),
-            self.family_id,
-            self.distribution_bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.distribution_bucket_index),
             self.distributing,
         );
 
@@ -1621,7 +1624,7 @@ pub struct InviteDistributionBucketOperatorFixture {
     origin: RawOrigin<u64>,
     operator_worker_id: u64,
     family_id: u64,
-    bucket_id: u64,
+    bucket_index: u64,
 }
 
 impl InviteDistributionBucketOperatorFixture {
@@ -1629,7 +1632,7 @@ impl InviteDistributionBucketOperatorFixture {
         Self {
             origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
             operator_worker_id: DEFAULT_WORKER_ID,
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
         }
     }
@@ -1645,8 +1648,11 @@ impl InviteDistributionBucketOperatorFixture {
         }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1656,19 +1662,18 @@ impl InviteDistributionBucketOperatorFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::invite_distribution_bucket_operator(
             self.origin.clone().into(),
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.operator_worker_id,
         );
 
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(new_bucket
                 .pending_invitations
@@ -1679,7 +1684,7 @@ impl InviteDistributionBucketOperatorFixture {
 
 pub struct CancelDistributionBucketInvitationFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     operator_worker_id: u64,
 }
@@ -1688,7 +1693,7 @@ impl CancelDistributionBucketInvitationFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             operator_worker_id: Default::default(),
         }
@@ -1698,8 +1703,11 @@ impl CancelDistributionBucketInvitationFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1716,19 +1724,18 @@ impl CancelDistributionBucketInvitationFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::cancel_distribution_bucket_operator_invite(
             self.origin.clone().into(),
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.operator_worker_id,
         );
 
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(!new_bucket
                 .pending_invitations
@@ -1739,7 +1746,7 @@ impl CancelDistributionBucketInvitationFixture {
 
 pub struct AcceptDistributionBucketInvitationFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     worker_id: u64,
 }
@@ -1748,7 +1755,7 @@ impl AcceptDistributionBucketInvitationFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             worker_id: Default::default(),
         }
@@ -1758,8 +1765,11 @@ impl AcceptDistributionBucketInvitationFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1774,18 +1784,17 @@ impl AcceptDistributionBucketInvitationFixture {
         let actual_result = Storage::accept_distribution_bucket_invitation(
             self.origin.clone().into(),
             self.worker_id,
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
         );
 
         assert_eq!(actual_result, expected_result);
 
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(!new_bucket.pending_invitations.contains(&self.worker_id));
 
@@ -1796,7 +1805,7 @@ impl AcceptDistributionBucketInvitationFixture {
 
 pub struct SetDistributionBucketMetadataFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     worker_id: u64,
     metadata: Vec<u8>,
@@ -1806,7 +1815,7 @@ impl SetDistributionBucketMetadataFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             worker_id: Default::default(),
             metadata: Default::default(),
@@ -1821,8 +1830,11 @@ impl SetDistributionBucketMetadataFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1837,8 +1849,7 @@ impl SetDistributionBucketMetadataFixture {
         let actual_result = Storage::set_distribution_operator_metadata(
             self.origin.clone().into(),
             self.worker_id,
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.metadata.clone(),
         );
 
@@ -1848,7 +1859,7 @@ impl SetDistributionBucketMetadataFixture {
 
 pub struct RemoveDistributionBucketOperatorFixture {
     origin: RawOrigin<u64>,
-    bucket_id: u64,
+    bucket_index: u64,
     family_id: u64,
     operator_worker_id: u64,
 }
@@ -1857,7 +1868,7 @@ impl RemoveDistributionBucketOperatorFixture {
     pub fn default() -> Self {
         Self {
             origin: RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID),
-            bucket_id: Default::default(),
+            bucket_index: Default::default(),
             family_id: Default::default(),
             operator_worker_id: Default::default(),
         }
@@ -1867,8 +1878,11 @@ impl RemoveDistributionBucketOperatorFixture {
         Self { origin, ..self }
     }
 
-    pub fn with_bucket_id(self, bucket_id: u64) -> Self {
-        Self { bucket_id, ..self }
+    pub fn with_bucket_index(self, bucket_index: u64) -> Self {
+        Self {
+            bucket_index,
+            ..self
+        }
     }
 
     pub fn with_family_id(self, family_id: u64) -> Self {
@@ -1885,18 +1899,17 @@ impl RemoveDistributionBucketOperatorFixture {
     pub fn call_and_assert(&self, expected_result: DispatchResult) {
         let actual_result = Storage::remove_distribution_bucket_operator(
             self.origin.clone().into(),
-            self.family_id,
-            self.bucket_id,
+            Storage::create_distribution_bucket_id(self.family_id, self.bucket_index),
             self.operator_worker_id,
         );
 
         assert_eq!(actual_result, expected_result);
         if actual_result.is_ok() {
-            let new_family = Storage::distribution_bucket_family_by_id(self.family_id);
-            let new_bucket = new_family
-                .distribution_buckets
-                .get(&self.bucket_id)
-                .unwrap();
+            let new_bucket: DistributionBucket<Test> =
+                Storage::distribution_bucket_by_family_id_by_index(
+                    self.family_id,
+                    self.bucket_index,
+                );
 
             assert!(!new_bucket.operators.contains(&self.operator_worker_id));
         }

+ 2 - 4
runtime-modules/storage/src/tests/mocks.rs

@@ -50,8 +50,7 @@ impl balances::Trait for Test {
 }
 
 parameter_types! {
-    pub const MaxDistributionBucketFamilyNumber: u64 = 4;
-    pub const MaxDistributionBucketNumberPerFamily: u64 = 10;
+    pub const MaxDistributionBucketFamilyNumber: u64 = 6;
     pub const DataObjectDeletionPrize: u64 = 10;
     pub const StorageModuleId: ModuleId = ModuleId(*b"mstorage"); // module storage
     pub const BlacklistSizeLimit: u64 = 1;
@@ -80,7 +79,7 @@ impl crate::Trait for Test {
     type Event = TestEvent;
     type DataObjectId = u64;
     type StorageBucketId = u64;
-    type DistributionBucketId = u64;
+    type DistributionBucketIndex = u64;
     type DistributionBucketFamilyId = u64;
     type DistributionBucketOperatorId = u64;
     type ChannelId = u64;
@@ -96,7 +95,6 @@ impl crate::Trait for Test {
     type Randomness = CollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
     type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber;
-    type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily;
     type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint;
     type MaxNumberOfPendingInvitationsPerDistributionBucket =
         MaxNumberOfPendingInvitationsPerDistributionBucket;

+ 144 - 223
runtime-modules/storage/src/tests/mod.rs

@@ -15,20 +15,20 @@ use common::working_group::WorkingGroup;
 
 use crate::{
     BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily,
-    DynamicBagCreationPolicy, DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, Error,
-    ModuleAccount, RawEvent, StaticBagId, StorageBucketOperatorStatus, StorageTreasury,
-    UploadParameters, Voucher,
+    DistributionBucketId, DynamicBagCreationPolicy, DynamicBagDeletionPrize, DynamicBagId,
+    DynamicBagType, Error, ModuleAccount, RawEvent, StaticBagId, StorageBucketOperatorStatus,
+    StorageTreasury, UploadParameters, Voucher,
 };
 
 use mocks::{
     build_test_externalities, Balances, DataObjectDeletionPrize,
     DefaultChannelDynamicBagNumberOfStorageBuckets, DefaultMemberDynamicBagNumberOfStorageBuckets,
     InitialStorageBucketsNumberForDynamicBag, MaxDataObjectSize, MaxDistributionBucketFamilyNumber,
-    MaxDistributionBucketNumberPerFamily, MaxRandomIterationNumber, Storage, Test,
-    ANOTHER_DISTRIBUTION_PROVIDER_ID, ANOTHER_STORAGE_PROVIDER_ID,
-    DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID, DEFAULT_DISTRIBUTION_PROVIDER_ID,
-    DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID, DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID,
-    DEFAULT_STORAGE_PROVIDER_ID, DISTRIBUTION_WG_LEADER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
+    MaxRandomIterationNumber, Storage, Test, ANOTHER_DISTRIBUTION_PROVIDER_ID,
+    ANOTHER_STORAGE_PROVIDER_ID, DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID,
+    DEFAULT_DISTRIBUTION_PROVIDER_ID, DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID,
+    DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, DEFAULT_STORAGE_PROVIDER_ID,
+    DISTRIBUTION_WG_LEADER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
 };
 
 use fixtures::*;
@@ -3312,12 +3312,12 @@ fn test_storage_bucket_iterators() {
         let buckets_number = 5;
         create_storage_buckets(buckets_number);
 
-        use crate::storage_bucket_picker::{
-            RandomStorageBucketIdIterator as Rand, SequentialStorageBucketIdIterator as Seq,
+        use crate::random_buckets::storage_bucket_picker::{
+            RandomBucketIdIterator as Rand, SequentialBucketIdIterator as Seq,
         };
 
-        let ids = Rand::<Test>::new()
-            .chain(Seq::<Test>::new())
+        let ids = Rand::<Test, u64>::new(Storage::next_storage_bucket_id())
+            .chain(Seq::<Test, u64>::new(Storage::next_storage_bucket_id()))
             .collect::<Vec<_>>();
 
         // Check combined iterator length.
@@ -3521,15 +3521,19 @@ fn delete_distribution_bucket_family_fails_with_assgined_bags() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
+        let add_buckets = add_buckets_ids
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
@@ -3620,7 +3624,7 @@ fn create_distribution_bucket_succeeded() {
         let starting_block = 1;
         run_to_block(starting_block);
 
-        let accept_new_bags = false;
+        let accept_new_bags = true;
 
         let family_id = CreateDistributionBucketFamilyFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
@@ -3634,14 +3638,14 @@ fn create_distribution_bucket_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        assert!(Storage::distribution_bucket_family_by_id(family_id)
-            .distribution_buckets
-            .contains_key(&bucket_id));
+        assert!(
+            crate::DistributionBucketByFamilyIdById::<Test>::contains_key(&family_id, &bucket_id)
+        );
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketCreated(
             family_id,
             accept_new_bags,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_id),
         ));
     });
 }
@@ -3666,22 +3670,6 @@ fn create_distribution_bucket_fails_with_non_existing_family() {
     });
 }
 
-#[test]
-fn create_distribution_bucket_fails_with_exceeding_max_bucket_number() {
-    build_test_externalities().execute_with(|| {
-        let (family_id, _) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
-
-        CreateDistributionBucketFixture::default()
-            .with_family_id(family_id)
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::MaxDistributionBucketNumberPerFamilyLimitExceeded.into(),
-            ));
-    });
-}
-
 #[test]
 fn update_distribution_bucket_status_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3693,7 +3681,7 @@ fn update_distribution_bucket_status_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -3702,22 +3690,20 @@ fn update_distribution_bucket_status_succeeded() {
         let new_status = true;
         UpdateDistributionBucketStatusFixture::default()
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_new_status(new_status)
             .call_and_assert(Ok(()));
 
         assert_eq!(
-            Storage::distribution_bucket_family_by_id(family_id)
-                .distribution_buckets
-                .get(&bucket_id)
-                .unwrap()
+            Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_index)
                 .accepting_new_bags,
             new_status
         );
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketStatusUpdated(
-            family_id, bucket_id, new_status,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
+            new_status,
         ));
     });
 }
@@ -3746,17 +3732,6 @@ fn update_distribution_bucket_status_fails_with_invalid_distribution_bucket() {
     });
 }
 
-#[test]
-fn update_distribution_bucket_status_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        UpdateDistributionBucketStatusFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn delete_distribution_bucket_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3768,20 +3743,20 @@ fn delete_distribution_bucket_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         DeleteDistributionBucketFixture::default()
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketDeleted(
-            family_id, bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
         ));
     });
 }
@@ -3799,27 +3774,31 @@ fn delete_distribution_bucket_fails_with_assgined_bags() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_accept_new_bags(true)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_indices = BTreeSet::from_iter(vec![bucket_index]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_indices.clone())
             .call_and_assert(Ok(()));
 
+        let add_buckets = add_buckets_indices
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
         DeleteDistributionBucketFixture::default()
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::DistributionBucketIsBoundToBag.into()));
@@ -3834,7 +3813,7 @@ fn delete_distribution_bucket_failed_with_existing_operators() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -3842,7 +3821,7 @@ fn delete_distribution_bucket_failed_with_existing_operators() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(DEFAULT_DISTRIBUTION_PROVIDER_ID)
             .call_and_assert(Ok(()));
@@ -3850,12 +3829,12 @@ fn delete_distribution_bucket_failed_with_existing_operators() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(DEFAULT_DISTRIBUTION_PROVIDER_ID)
             .call_and_assert(Ok(()));
 
         DeleteDistributionBucketFixture::default()
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::DistributionProviderOperatorSet.into()));
@@ -3888,17 +3867,6 @@ fn delete_distribution_bucket_fails_with_non_existing_distribution_bucket() {
     });
 }
 
-#[test]
-fn delete_distribution_bucket_fails_with_non_existing_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        DeleteDistributionBucketFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn update_distribution_buckets_for_bags_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -3922,22 +3890,26 @@ fn update_distribution_buckets_for_bags_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
+        let add_buckets = add_buckets_ids
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketsUpdatedForBag(
             bag_id,
             family_id,
-            add_buckets,
+            add_buckets_ids,
             BTreeSet::new(),
         ));
     });
@@ -3963,21 +3935,24 @@ fn update_distribution_buckets_for_bags_succeeded_with_additioonal_checks_on_add
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
+        let add_buckets_ids = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateDistributionBucketForBagsFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
         // Add check
+        let add_buckets = add_buckets_ids
+            .iter()
+            .map(|idx| Storage::create_distribution_bucket_id(family_id, *idx))
+            .collect::<BTreeSet<_>>();
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by, add_buckets);
 
-        let family = Storage::distribution_bucket_family_by_id(family_id);
-        let bucket = family.distribution_buckets.get(&bucket_id).unwrap();
+        let bucket = Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_id);
         assert_eq!(bucket.assigned_bags, 1);
 
         // ******
@@ -3986,14 +3961,13 @@ fn update_distribution_buckets_for_bags_succeeded_with_additioonal_checks_on_add
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_remove_bucket_ids(add_buckets.clone())
+            .with_remove_bucket_indices(add_buckets_ids.clone())
             .call_and_assert(Ok(()));
 
         let bag = Storage::bag(&bag_id);
         assert_eq!(bag.distributed_by.len(), 0);
 
-        let family = Storage::distribution_bucket_family_by_id(family_id);
-        let bucket = family.distribution_buckets.get(&bucket_id).unwrap();
+        let bucket = Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_id);
         assert_eq!(bucket.assigned_bags, 0);
     });
 }
@@ -4021,7 +3995,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
             .with_bag_id(bag_id.clone())
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::DynamicBagDoesntExist.into()));
     });
 }
@@ -4052,7 +4026,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_accepting_new_bags_bucket
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
             .with_bag_id(bag_id.clone())
-            .with_add_bucket_ids(add_buckets.clone())
+            .with_add_bucket_indices(add_buckets.clone())
             .call_and_assert(Err(
                 Error::<Test>::DistributionBucketDoesntAcceptNewBags.into()
             ));
@@ -4100,7 +4074,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_existing_distribution_buc
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_add_bucket_ids(buckets.clone())
+            .with_add_bucket_indices(buckets.clone())
             .call_and_assert(Err(Error::<Test>::DistributionBucketDoesntExist.into()));
 
         // Invalid removed bucket ID.
@@ -4108,7 +4082,7 @@ fn update_distribution_buckets_for_bags_fails_with_non_existing_distribution_buc
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_family_id(family_id)
-            .with_remove_bucket_ids(buckets.clone())
+            .with_remove_bucket_indices(buckets.clone())
             .call_and_assert(Err(Error::<Test>::DistributionBucketDoesntExist.into()));
     });
 }
@@ -4181,7 +4155,7 @@ fn update_distribution_bucket_mode_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4190,23 +4164,19 @@ fn update_distribution_bucket_mode_succeeded() {
         let distributing = false;
         UpdateDistributionBucketModeFixture::default()
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_distributing(distributing)
             .call_and_assert(Ok(()));
 
         assert_eq!(
-            Storage::distribution_bucket_family_by_id(family_id)
-                .distribution_buckets
-                .get(&bucket_id)
-                .unwrap()
+            Storage::distribution_bucket_by_family_id_by_index(family_id, &bucket_index)
                 .accepting_new_bags,
             distributing
         );
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketModeUpdated(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             distributing,
         ));
     });
@@ -4236,17 +4206,6 @@ fn update_distribution_bucket_mode_fails_with_invalid_distribution_bucket() {
     });
 }
 
-#[test]
-fn update_distribution_bucket_mode_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        UpdateDistributionBucketModeFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn update_families_in_dynamic_bag_creation_policy_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4306,7 +4265,9 @@ fn update_families_in_dynamic_bag_creation_policy_fails_with_invalid_family_id()
     });
 }
 
-fn create_distribution_bucket_family_with_buckets(bucket_number: u64) -> (u64, Vec<u64>) {
+fn create_distribution_bucket_family_with_buckets(
+    bucket_number: u64,
+) -> (u64, Vec<DistributionBucketId<Test>>) {
     let family_id = CreateDistributionBucketFamilyFixture::default()
         .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
         .call_and_assert(Ok(()))
@@ -4315,12 +4276,14 @@ fn create_distribution_bucket_family_with_buckets(bucket_number: u64) -> (u64, V
     let bucket_ids = repeat(family_id)
         .take(bucket_number as usize)
         .map(|fam_id| {
-            CreateDistributionBucketFixture::default()
+            let bucket_index = CreateDistributionBucketFixture::default()
                 .with_family_id(fam_id)
                 .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
                 .with_accept_new_bags(true)
                 .call_and_assert(Ok(()))
-                .unwrap()
+                .unwrap();
+
+            Storage::create_distribution_bucket_id(fam_id, bucket_index)
         })
         .collect::<Vec<_>>();
 
@@ -4335,24 +4298,40 @@ fn distribution_bucket_family_pick_during_dynamic_bag_creation_succeeded() {
         run_to_block(starting_block);
 
         let dynamic_bag_type = DynamicBagType::Channel;
+        let buckets_number = 10;
         let new_bucket_number = 5;
 
-        let (family_id1, bucket_ids1) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
-        let (family_id2, bucket_ids2) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
-        let (family_id3, _) = create_distribution_bucket_family_with_buckets(
-            MaxDistributionBucketNumberPerFamily::get(),
-        );
+        let (family_id1, bucket_ids1) =
+            create_distribution_bucket_family_with_buckets(buckets_number);
+        let (family_id2, bucket_ids2) =
+            create_distribution_bucket_family_with_buckets(buckets_number);
+        let (family_id3, _) = create_distribution_bucket_family_with_buckets(buckets_number);
         let (family_id4, _) = create_distribution_bucket_family_with_buckets(0);
+        let (family_id5, bucket_id5) = create_distribution_bucket_family_with_buckets(1);
+        let (family_id6, bucket_id6) = create_distribution_bucket_family_with_buckets(1);
+
+        let deleted_bucket_id = bucket_id5[0].clone();
+        DeleteDistributionBucketFixture::default()
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .with_family_id(deleted_bucket_id.distribution_bucket_family_id)
+            .with_bucket_index(deleted_bucket_id.distribution_bucket_index)
+            .call_and_assert(Ok(()));
+
+        let disabled_bucket_id = bucket_id6[0].clone();
+        UpdateDistributionBucketStatusFixture::default()
+            .with_new_status(false)
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .with_family_id(disabled_bucket_id.distribution_bucket_family_id)
+            .with_bucket_index(disabled_bucket_id.distribution_bucket_index)
+            .call_and_assert(Ok(()));
 
         let families = BTreeMap::from_iter(vec![
             (family_id1, new_bucket_number),
             (family_id2, new_bucket_number),
             (family_id3, 0),
             (family_id4, new_bucket_number),
+            (family_id5, new_bucket_number),
+            (family_id6, new_bucket_number),
         ]);
 
         UpdateFamiliesInDynamicBagCreationPolicyFixture::default()
@@ -4364,6 +4343,8 @@ fn distribution_bucket_family_pick_during_dynamic_bag_creation_succeeded() {
         let picked_bucket_ids =
             Storage::pick_distribution_buckets_for_dynamic_bag(dynamic_bag_type);
 
+        println!("{:?}", picked_bucket_ids);
+
         assert_eq!(picked_bucket_ids.len(), (new_bucket_number * 2) as usize); // buckets from two families
 
         let total_ids1 = BTreeSet::from_iter(
@@ -4396,7 +4377,7 @@ fn invite_distribution_bucket_operator_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4404,14 +4385,13 @@ fn invite_distribution_bucket_operator_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketOperatorInvited(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             provider_id,
         ));
     });
@@ -4453,7 +4433,7 @@ fn invite_distribution_bucket_operator_fails_with_non_missing_invitation() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4461,14 +4441,14 @@ fn invite_distribution_bucket_operator_fails_with_non_missing_invitation() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Err(
@@ -4488,7 +4468,7 @@ fn invite_distribution_bucket_operator_fails_with_exceeding_the_limit_of_pending
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4496,14 +4476,14 @@ fn invite_distribution_bucket_operator_fails_with_exceeding_the_limit_of_pending
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(another_worker_id)
             .call_and_assert(Err(
@@ -4523,7 +4503,7 @@ fn invite_distribution_bucket_operator_fails_with_already_set_operator() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4531,21 +4511,21 @@ fn invite_distribution_bucket_operator_fails_with_already_set_operator() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_worker_id(invited_worker_id)
             .call_and_assert(Ok(()));
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invited_worker_id)
             .call_and_assert(Err(Error::<Test>::DistributionProviderOperatorSet.into()));
@@ -4562,7 +4542,7 @@ fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4570,7 +4550,7 @@ fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(invalid_provider_id)
             .call_and_assert(Err(
@@ -4579,17 +4559,6 @@ fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_
     });
 }
 
-#[test]
-fn invite_distribution_bucket_operator_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        CancelDistributionBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn cancel_distribution_bucket_operator_invite_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4603,7 +4572,7 @@ fn cancel_distribution_bucket_operator_invite_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4611,7 +4580,7 @@ fn cancel_distribution_bucket_operator_invite_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
@@ -4619,13 +4588,12 @@ fn cancel_distribution_bucket_operator_invite_succeeded() {
         CancelDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketInvitationCancelled(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             provider_id,
         ));
     });
@@ -4665,7 +4633,7 @@ fn cancel_distribution_bucket_operator_invite_fails_with_non_invited_distributio
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4674,22 +4642,11 @@ fn cancel_distribution_bucket_operator_invite_fails_with_non_invited_distributio
         CancelDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .call_and_assert(Err(Error::<Test>::NoDistributionBucketInvitation.into()));
     });
 }
 
-#[test]
-fn cancel_distribution_bucket_operator_invite_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        CancelDistributionBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn accept_distribution_bucket_operator_invite_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4703,7 +4660,7 @@ fn accept_distribution_bucket_operator_invite_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4711,7 +4668,7 @@ fn accept_distribution_bucket_operator_invite_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
@@ -4719,14 +4676,13 @@ fn accept_distribution_bucket_operator_invite_succeeded() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketInvitationAccepted(
             provider_id,
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
         ));
     });
 }
@@ -4765,7 +4721,7 @@ fn accept_distribution_bucket_operator_invite_fails_with_non_invited_distributio
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4774,22 +4730,11 @@ fn accept_distribution_bucket_operator_invite_fails_with_non_invited_distributio
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .call_and_assert(Err(Error::<Test>::NoDistributionBucketInvitation.into()));
     });
 }
 
-#[test]
-fn accept_distribution_bucket_operator_invite_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        AcceptDistributionBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn set_distribution_operator_metadata_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4804,7 +4749,7 @@ fn set_distribution_operator_metadata_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4812,7 +4757,7 @@ fn set_distribution_operator_metadata_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(provider_id)
             .call_and_assert(Ok(()));
@@ -4820,22 +4765,21 @@ fn set_distribution_operator_metadata_succeeded() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(provider_id)
             .call_and_assert(Ok(()));
 
         SetDistributionBucketMetadataFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(provider_id)
             .with_metadata(metadata.clone())
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketMetadataSet(
             provider_id,
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             metadata,
         ));
     });
@@ -4875,7 +4819,7 @@ fn set_distribution_operator_metadata_fails_with_non_distribution_provider() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4884,24 +4828,13 @@ fn set_distribution_operator_metadata_fails_with_non_distribution_provider() {
         SetDistributionBucketMetadataFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .call_and_assert(Err(
                 Error::<Test>::MustBeDistributionProviderOperatorForBucket.into(),
             ));
     });
 }
 
-#[test]
-fn set_distribution_operator_metadata_fails_with_invalid_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        SetDistributionBucketMetadataFixture::default()
-            .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn remove_distribution_bucket_operator_succeeded() {
     build_test_externalities().execute_with(|| {
@@ -4915,7 +4848,7 @@ fn remove_distribution_bucket_operator_succeeded() {
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -4923,7 +4856,7 @@ fn remove_distribution_bucket_operator_succeeded() {
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Ok(()));
@@ -4931,20 +4864,19 @@ fn remove_distribution_bucket_operator_succeeded() {
         AcceptDistributionBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_worker_id(operator_id)
             .call_and_assert(Ok(()));
 
         RemoveDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::DistributionBucketOperatorRemoved(
-            family_id,
-            bucket_id,
+            Storage::create_distribution_bucket_id(family_id, bucket_index),
             operator_id,
         ));
     });
@@ -4959,17 +4891,6 @@ fn remove_distribution_bucket_operator_fails_with_non_leader_origin() {
     });
 }
 
-#[test]
-fn remove_distribution_bucket_operator_fails_with_non_existing_distribution_bucket_family() {
-    build_test_externalities().execute_with(|| {
-        RemoveDistributionBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .call_and_assert(Err(
-                Error::<Test>::DistributionBucketFamilyDoesntExist.into()
-            ));
-    });
-}
-
 #[test]
 fn remove_distribution_bucket_operator_fails_with_non_existing_distribution_bucket() {
     build_test_externalities().execute_with(|| {
@@ -4995,7 +4916,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
             .call_and_assert(Ok(()))
             .unwrap();
 
-        let bucket_id = CreateDistributionBucketFixture::default()
+        let bucket_index = CreateDistributionBucketFixture::default()
             .with_family_id(family_id)
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
@@ -5004,7 +4925,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
         RemoveDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Err(
                 Error::<Test>::MustBeDistributionProviderOperatorForBucket.into(),
@@ -5012,7 +4933,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
 
         InviteDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_family_id(family_id)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Ok(()));
@@ -5020,7 +4941,7 @@ fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_prov
         RemoveDistributionBucketOperatorFixture::default()
             .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
             .with_family_id(family_id)
-            .with_bucket_id(bucket_id)
+            .with_bucket_index(bucket_index)
             .with_operator_worker_id(operator_id)
             .call_and_assert(Err(
                 Error::<Test>::MustBeDistributionProviderOperatorForBucket.into(),

+ 1 - 3
runtime/src/lib.rs

@@ -670,7 +670,6 @@ parameter_types! {
 }
 
 parameter_types! {
-    pub const MaxDistributionBucketNumberPerFamily: u64 = 500;
     pub const MaxDistributionBucketFamilyNumber: u64 = 200;
     pub const DataObjectDeletionPrize: Balance = 1; //TODO: Change during Olympia release
     pub const BlacklistSizeLimit: u64 = 10000; //TODO: adjust value
@@ -690,7 +689,7 @@ impl storage::Trait for Runtime {
     type Event = Event;
     type DataObjectId = DataObjectId;
     type StorageBucketId = StorageBucketId;
-    type DistributionBucketId = DistributionBucketId;
+    type DistributionBucketIndex = DistributionBucketIndex;
     type DistributionBucketFamilyId = DistributionBucketFamilyId;
     type ChannelId = ChannelId;
     type DataObjectDeletionPrize = DataObjectDeletionPrize;
@@ -705,7 +704,6 @@ impl storage::Trait for Runtime {
     type Randomness = RandomnessCollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
     type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber;
-    type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily;
     type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint;
     type DistributionBucketOperatorId = DistributionBucketOperatorId;
     type MaxNumberOfPendingInvitationsPerDistributionBucket =

+ 3 - 2
runtime/src/primitives.rs

@@ -95,8 +95,9 @@ pub type DataObjectId = u64;
 /// Represent a storage bucket from the storage pallet.
 pub type StorageBucketId = u64;
 
-/// Represent a distribution bucket from the storage pallet.
-pub type DistributionBucketId = u64;
+/// Represent a distribution bucket index within the distribution bucket family from the
+/// storage pallet.
+pub type DistributionBucketIndex = u64;
 
 /// Represent a distribution bucket family from the storage pallet.
 pub type DistributionBucketFamilyId = u64;

+ 2 - 2
scripts/cargo-build.sh

@@ -1,5 +1,5 @@
 #!/usr/bin/env bash
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
-cargo build --release
+cargo +nightly-2021-02-20 build --release

+ 2 - 2
scripts/cargo-tests-with-networking.sh

@@ -1,7 +1,7 @@
 #!/bin/sh
 set -e
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 echo 'running all cargo tests'
-cargo test --release --all -- --ignored
+cargo +nightly-2021-02-20 test --release --all -- --ignored

+ 1 - 1
scripts/raspberry-cross-build.sh

@@ -9,7 +9,7 @@
 export WORKSPACE_ROOT=`cargo metadata --offline --no-deps --format-version 1 | jq .workspace_root -r`
 
 docker run \
-    -e WASM_BUILD_TOOLCHAIN=nightly-2021-03-24 \
+    -e WASM_BUILD_TOOLCHAIN=nightly-2021-02-20 \
     --volume ${WORKSPACE_ROOT}/:/home/cross/project \
     --volume ${HOME}/.cargo/registry:/home/cross/.cargo/registry \
     joystream/rust-raspberry \

+ 4 - 4
scripts/run-dev-chain.sh

@@ -1,13 +1,13 @@
 #!/usr/bin/env bash
 
-export WASM_BUILD_TOOLCHAIN=nightly-2021-03-24
+export WASM_BUILD_TOOLCHAIN=nightly-2021-02-20
 
 # Build release binary
-cargo build --release
+cargo +nightly-2021-02-20 build --release
 
 # Purge existing local chain
-yes | cargo run --release -- purge-chain --dev
+yes | cargo +nightly-2021-02-20 run --release -- purge-chain --dev
 
 # Run local development chain -
 # No need to specify `-p joystream-node` it is the default bin crate in the cargo workspace
-cargo run --release -- --dev
+cargo +nightly-2021-02-20 run --release -- --dev

+ 2 - 4
setup.sh

@@ -27,10 +27,8 @@ curl https://getsubstrate.io -sSf | bash -s -- --fast
 
 source ~/.cargo/env
 
-rustup install nightly-2021-03-24
-rustup target add wasm32-unknown-unknown --toolchain nightly-2021-03-24
-
-rustup default nightly-2021-03-24
+rustup install nightly-2021-02-20
+rustup target add wasm32-unknown-unknown --toolchain nightly-2021-02-20
 
 rustup component add rustfmt clippy
 

+ 7 - 3
types/augment/all/defs.json

@@ -588,7 +588,11 @@
         "size": "u64",
         "ipfsContentId": "Bytes"
     },
-    "DistributionBucketId": "u64",
+    "DistributionBucketId": {
+        "distribution_bucket_family_id": "DistributionBucketFamilyId",
+        "distribution_bucket_index": "DistributionBucketIndex"
+    },
+    "DistributionBucketIndex": "u64",
     "DistributionBucketFamilyId": "u64",
     "DistributionBucket": {
         "accepting_new_bags": "bool",
@@ -598,10 +602,10 @@
         "assigned_bags": "u64"
     },
     "DistributionBucketFamily": {
-        "distribution_buckets": "BTreeMap<DistributionBucketId,DistributionBucket>"
+        "next_distribution_bucket_index": "DistributionBucketIndex"
     },
     "DataObjectIdMap": "BTreeMap<DataObjectId,DataObject>",
-    "DistributionBucketIdSet": "BTreeSet<DistributionBucketId>",
+    "DistributionBucketIndexSet": "BTreeSet<DistributionBucketIndex>",
     "DynamicBagCreationPolicyDistributorFamiliesMap": "BTreeMap<DistributionBucketFamilyId,u32>",
     "ProposalId": "u32",
     "ProposalStatus": {

+ 10 - 4
types/augment/all/types.ts

@@ -420,17 +420,23 @@ export interface DistributionBucket extends Struct {
 
 /** @name DistributionBucketFamily */
 export interface DistributionBucketFamily extends Struct {
-  readonly distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucket>;
+  readonly next_distribution_bucket_index: DistributionBucketIndex;
 }
 
 /** @name DistributionBucketFamilyId */
 export interface DistributionBucketFamilyId extends u64 {}
 
 /** @name DistributionBucketId */
-export interface DistributionBucketId extends u64 {}
+export interface DistributionBucketId extends Struct {
+  readonly distribution_bucket_family_id: DistributionBucketFamilyId;
+  readonly distribution_bucket_index: DistributionBucketIndex;
+}
+
+/** @name DistributionBucketIndex */
+export interface DistributionBucketIndex extends u64 {}
 
-/** @name DistributionBucketIdSet */
-export interface DistributionBucketIdSet extends BTreeSet<DistributionBucketId> {}
+/** @name DistributionBucketIndexSet */
+export interface DistributionBucketIndexSet extends BTreeSet<DistributionBucketIndex> {}
 
 /** @name Dynamic */
 export interface Dynamic extends Enum {

+ 18 - 5
types/src/storage.ts

@@ -50,10 +50,22 @@ export class DataObject
 
 export class DataObjectIdSet extends BTreeSet.with(DataObjectId) {}
 export class DataObjectIdMap extends BTreeMap.with(DataObjectId, DataObject) {}
-export class DistributionBucketId extends u64 {}
+export class DistributionBucketIndex extends u64 {}
 export class DistributionBucketFamilyId extends u64 {}
 export class StorageBucketIdSet extends BTreeSet.with(StorageBucketId) {}
-export class DistributionBucketIdSet extends BTreeSet.with(DistributionBucketId) {}
+export class DistributionBucketIndexSet extends BTreeSet.with(DistributionBucketIndex) {}
+
+export type IDistributionBucketId = {
+  distribution_bucket_family_id: DistributionBucketFamilyId
+  distribution_bucket_index: DistributionBucketIndex
+}
+
+export class DistributionBucketId
+  extends JoyStructDecorated({
+    distribution_bucket_family_id: DistributionBucketFamilyId,
+    distribution_bucket_index: DistributionBucketIndex,
+  })
+  implements IDistributionBucketId {}
 
 export type IDynamicBagDeletionPrize = {
   account_id: AccountId
@@ -219,12 +231,12 @@ export class DistributionBucket
   implements IDistributionBucket {}
 
 export type IDistributionBucketFamily = {
-  distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucket>
+  next_distribution_bucket_index: DistributionBucketIndex
 }
 
 export class DistributionBucketFamily
   extends JoyStructDecorated({
-    distribution_buckets: BTreeMap.with(DistributionBucketId, DistributionBucket),
+    next_distribution_bucket_index: DistributionBucketIndex,
   })
   implements IDistributionBucketFamily {}
 
@@ -256,12 +268,13 @@ export const storageTypes: RegistryTypes = {
   StorageBucketOperatorStatus,
   DataObject,
   DistributionBucketId,
+  DistributionBucketIndex,
   DistributionBucketFamilyId,
   DistributionBucket,
   DistributionBucketFamily,
   // Utility types:
   DataObjectIdMap,
-  DistributionBucketIdSet,
+  DistributionBucketIndexSet,
   DynamicBagCreationPolicyDistributorFamiliesMap,
 }
 export default storageTypes

Некоторые файлы не были показаны из-за большого количества измененных файлов