Browse Source

runtime: storage: Add `create distribution bucket family’ extrinsic.

# Conflicts:
#	runtime-modules/storage/src/lib.rs
#	runtime-modules/storage/src/tests/mod.rs
Shamil Gadelshin 3 years ago
parent
commit
89b167b697

+ 11 - 4
node/src/chain_spec/mod.rs

@@ -33,10 +33,10 @@ use sp_runtime::Perbill;
 use node_runtime::{
     membership, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, Balance, BalancesConfig,
     ContentDirectoryConfig, ContentDirectoryWorkingGroupConfig, ContentWorkingGroupConfig,
-    CouncilConfig, CouncilElectionConfig, ElectionParameters, ForumConfig, GrandpaConfig,
-    ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig, SessionConfig, SessionKeys,
-    Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig, SudoConfig, SystemConfig,
-    VersionedStoreConfig, VersionedStorePermissionsConfig, DAYS,
+    CouncilConfig, CouncilElectionConfig, DistributionWorkingGroupConfig, ElectionParameters,
+    ForumConfig, GrandpaConfig, ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig,
+    SessionConfig, SessionKeys, Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig,
+    SudoConfig, SystemConfig, VersionedStoreConfig, VersionedStorePermissionsConfig, DAYS,
 };
 
 // Exported to be used by chain-spec-builder
@@ -319,6 +319,13 @@ pub fn testnet_genesis(
             worker_application_human_readable_text_constraint: default_text_constraint,
             worker_exit_rationale_text_constraint: default_text_constraint,
         }),
+        working_group_Instance4: Some(DistributionWorkingGroupConfig {
+            phantom: Default::default(),
+            working_group_mint_capacity: 0,
+            opening_human_readable_text_constraint: default_text_constraint,
+            worker_application_human_readable_text_constraint: default_text_constraint,
+            worker_exit_rationale_text_constraint: default_text_constraint,
+        }),
         content_directory: Some({
             ContentDirectoryConfig {
                 class_by_id: vec![],

+ 3 - 0
runtime-modules/common/src/working_group.rs

@@ -20,4 +20,7 @@ pub enum WorkingGroup {
 
     /// Storage working group: working_group::Instance3.
     Content = 3isize,
+
+    /// Distribution working group: working_group::Instance4.
+    Distribution = 4isize,
 }

+ 147 - 57
runtime-modules/storage/src/lib.rs

@@ -104,6 +104,11 @@ use common::working_group::WorkingGroup;
 
 use storage_bucket_picker::StorageBucketPicker;
 
+// TODO: constants
+// Max number of distribution bucket families
+// Max number of distribution buckets per family.
+// Max number of pending invitations per distribution bucket.
+
 /// Public interface for the storage module.
 pub trait DataObjectStorage<T: Trait> {
     /// Validates upload parameters and conditions (like global uploading block).
@@ -171,43 +176,53 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
 
     /// Data object ID type.
     type DataObjectId: Parameter
-        + Member
-        + BaseArithmetic
-        + Codec
-        + Default
-        + Copy
-        + MaybeSerialize
-        + PartialEq;
+    + Member
+    + BaseArithmetic
+    + Codec
+    + Default
+    + Copy
+    + MaybeSerialize
+    + PartialEq;
 
     /// Storage bucket ID type.
     type StorageBucketId: Parameter
-        + Member
-        + BaseArithmetic
-        + Codec
-        + Default
-        + Copy
-        + MaybeSerialize
-        + PartialEq;
+    + Member
+    + BaseArithmetic
+    + Codec
+    + Default
+    + Copy
+    + MaybeSerialize
+    + PartialEq;
 
     /// Distribution bucket ID type.
     type DistributionBucketId: Parameter
-        + Member
-        + BaseArithmetic
-        + Codec
-        + Default
-        + Copy
-        + MaybeSerialize
-        + PartialEq;
+    + Member
+    + BaseArithmetic
+    + Codec
+    + Default
+    + Copy
+    + MaybeSerialize
+    + PartialEq;
+
+    /// Distribution bucket family ID type.
+    type DistributionBucketFamilyId: Parameter
+    + Member
+    + BaseArithmetic
+    + Codec
+    + Default
+    + Copy
+    + MaybeSerialize
+    + PartialEq;
 
     /// Channel ID type (part of the dynamic bag ID).
     type ChannelId: Parameter
-        + Member
-        + BaseArithmetic
-        + Codec
-        + Default
-        + Copy
-        + MaybeSerialize
-        + PartialEq;
+    + Member
+    + BaseArithmetic
+    + Codec
+    + Default
+    + Copy
+    + MaybeSerialize
+    + PartialEq;
 
     /// Defines max number of data objects per bag.
     type MaxNumberOfDataObjectsPerBag: Get<u64>;
@@ -239,17 +254,31 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
     /// Something that provides randomness in the runtime.
     type Randomness: Randomness<Self::Hash>;
 
-    /// Demand the working group leader authorization.
+    /// Demand the storage working group leader authorization.
+    /// TODO: Refactor after merging with the Olympia release.
+    fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
+
+    /// Validate origin for the storage worker.
+    /// TODO: Refactor after merging with the Olympia release.
+    fn ensure_storage_worker_origin(
+        origin: Self::Origin,
+        worker_id: WorkerId<Self>,
+    ) -> DispatchResult;
+
+    /// Validate storage worker existence.
     /// TODO: Refactor after merging with the Olympia release.
-    fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
+    fn ensure_storage_worker_exists(worker_id: &WorkerId<Self>) -> DispatchResult;
 
-    /// Validate origin for the worker.
+    /// Demand the distribution group leader authorization.
     /// TODO: Refactor after merging with the Olympia release.
-    fn ensure_worker_origin(origin: Self::Origin, worker_id: WorkerId<Self>) -> DispatchResult;
+    fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
 
-    /// Validate worker existence.
+    /// Validate origin for the distribution worker.
     /// TODO: Refactor after merging with the Olympia release.
-    fn ensure_worker_exists(worker_id: &WorkerId<Self>) -> DispatchResult;
+    fn ensure_distribution_worker_origin(
+        origin: Self::Origin,
+        worker_id: WorkerId<Self>,
+    ) -> DispatchResult;
 }
 
 /// Operations with local pallet account.
@@ -360,7 +389,7 @@ pub struct DataObject<Balance> {
 
 /// Type alias for the BagObject.
 pub type Bag<T> =
-    BagObject<<T as Trait>::StorageBucketId, <T as Trait>::DistributionBucketId, BalanceOf<T>>;
+BagObject<<T as Trait>::StorageBucketId, <T as Trait>::DistributionBucketId, BalanceOf<T>>;
 
 /// Bag container.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
@@ -383,7 +412,7 @@ pub struct BagObject<StorageBucketId: Ord, DistributionBucketId: Ord, Balance> {
 }
 
 impl<StorageBucketId: Ord, DistributionBucketId: Ord, Balance>
-    BagObject<StorageBucketId, DistributionBucketId, Balance>
+BagObject<StorageBucketId, DistributionBucketId, Balance>
 {
     // Add and/or remove storage buckets.
     fn update_buckets(
@@ -495,7 +524,7 @@ impl<MemberId: Default, ChannelId> Default for DynamicBagIdType<MemberId, Channe
 }
 
 impl<MemberId, ChannelId> From<DynamicBagIdType<MemberId, ChannelId>>
-    for BagIdType<MemberId, ChannelId>
+for BagIdType<MemberId, ChannelId>
 {
     fn from(dynamic_bag_id: DynamicBagIdType<MemberId, ChannelId>) -> Self {
         BagIdType::Dynamic(dynamic_bag_id)
@@ -542,7 +571,7 @@ pub struct UploadParametersObject<MemberId, ChannelId, AccountId, Balance> {
 
 /// Alias for the DynamicBagDeletionPrizeObject
 pub type DynamicBagDeletionPrize<T> =
-    DynamicBagDeletionPrizeObject<<T as frame_system::Trait>::AccountId, BalanceOf<T>>;
+DynamicBagDeletionPrizeObject<<T as frame_system::Trait>::AccountId, BalanceOf<T>>;
 
 /// Deletion prize data for the dynamic bag. Requires on the dynamic bag creation.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
@@ -685,6 +714,26 @@ impl<Balance: Saturating + Copy> BagUpdate<Balance> {
     }
 }
 
+/// Distribution bucket family.
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
+pub struct DistributionBucketFamily<DistributionBucketId: Ord> {
+    /// Distribution bucket map.
+    pub distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucket>,
+}
+
+/// Distribution bucket.
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
+pub struct DistributionBucket {
+    //TODO:
+// pending_invitations: BTreeSet<WorkerId>,
+// number_of_pending_data_objects: u32,
+// accepting_new_bags: boolean,
+// distributing: boolean,
+// number_of_operators: u32,
+}
+
 decl_storage! {
     trait Store for Module<T: Trait> as Storage {
         /// Defines whether all new uploads blocked
@@ -729,6 +778,15 @@ decl_storage! {
         pub DataObjectsById get (fn data_object_by_id): double_map
             hasher(blake2_128_concat) BagId<T>,
             hasher(blake2_128_concat) T::DataObjectId => DataObject<BalanceOf<T>>;
+
+        /// Distribution bucket family id counter. Starts at zero.
+        pub NextDistributionBucketFamilyId get(fn next_distribution_bucket_family_id):
+            T::DistributionBucketFamilyId;
+
+        /// Distribution bucket families.
+        pub DistributionBucketFamilyById get (fn distribution_bucket_family_by_id):
+            map hasher(blake2_128_concat) T::DistributionBucketFamilyId =>
+            DistributionBucketFamily<T::DistributionBucketId>;
     }
 }
 
@@ -744,6 +802,7 @@ decl_event! {
         DynamicBagId = DynamicBagId<T>,
         <T as frame_system::Trait>::AccountId,
         Balance = BalanceOf<T>,
+        <T as Trait>::DistributionBucketFamilyId,
     {
         /// Emits on creating the storage bucket.
         /// Params
@@ -893,6 +952,12 @@ decl_event! {
         /// - new total objects size
         /// - new total objects number
         BagObjectsChanged(BagId, u64, u64),
+
+        /// Emits on updating the number of storage buckets in dynamic bag creation policy.
+        /// Params
+        /// - dynamic bag type
+        /// - new number of storage buckets
+        DistributionBucketFamilyCreated(DistributionBucketFamilyId),
     }
 }
 
@@ -1053,7 +1118,7 @@ decl_module! {
             origin,
             storage_bucket_id: T::StorageBucketId,
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1076,7 +1141,7 @@ decl_module! {
         /// Update whether uploading is globally blocked.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_uploading_blocked_status(origin, new_status: bool) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1090,7 +1155,7 @@ decl_module! {
         /// Updates size-based pricing of new objects uploaded.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_data_size_fee(origin, new_data_size_fee: BalanceOf<T>) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1104,7 +1169,7 @@ decl_module! {
         /// Updates "Storage buckets per bag" number limit.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_storage_buckets_per_bag_limit(origin, new_limit: u64) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             T::StorageBucketsPerBagValueConstraint::get().ensure_valid(
                 new_limit,
@@ -1128,7 +1193,7 @@ decl_module! {
             new_objects_size: u64,
             new_objects_number: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1149,7 +1214,7 @@ decl_module! {
             dynamic_bag_type: DynamicBagType,
             number_of_storage_buckets: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1176,7 +1241,7 @@ decl_module! {
             remove_hashes: BTreeSet<ContentId>,
             add_hashes: BTreeSet<ContentId>
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             // Get only hashes that exist in the blacklist.
             let verified_remove_hashes = Self::get_existing_hashes(&remove_hashes);
@@ -1219,7 +1284,7 @@ decl_module! {
             size_limit: u64,
             objects_limit: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let voucher = Voucher {
                 size_limit,
@@ -1268,7 +1333,7 @@ decl_module! {
             add_buckets: BTreeSet<T::StorageBucketId>,
             remove_buckets: BTreeSet<T::StorageBucketId>,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             Self::ensure_bag_exists(&bag_id)?;
 
@@ -1310,7 +1375,7 @@ decl_module! {
         /// Cancel pending storage bucket invite. An invitation must be pending.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn cancel_storage_bucket_operator_invite(origin, storage_bucket_id: T::StorageBucketId){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1336,7 +1401,7 @@ decl_module! {
             storage_bucket_id: T::StorageBucketId,
             operator_id: WorkerId<T>,
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1364,7 +1429,7 @@ decl_module! {
             origin,
             storage_bucket_id: T::StorageBucketId,
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1391,7 +1456,7 @@ decl_module! {
             storage_bucket_id: T::StorageBucketId,
             accepting_new_bags: bool
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1416,7 +1481,7 @@ decl_module! {
             new_objects_size_limit: u64,
             new_objects_number_limit: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1460,7 +1525,7 @@ decl_module! {
             worker_id: WorkerId<T>,
             storage_bucket_id: T::StorageBucketId
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1487,7 +1552,7 @@ decl_module! {
             storage_bucket_id: T::StorageBucketId,
             metadata: Vec<u8>
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1511,7 +1576,7 @@ decl_module! {
             bag_id: BagId<T>,
             data_objects: BTreeSet<T::DataObjectId>,
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1545,6 +1610,31 @@ decl_module! {
                 )
             );
         }
+
+        /// Create a distribution family.
+        #[weight = 10_000_000] // TODO: adjust weight
+        pub fn create_distribution_bucket_family(origin) {
+            T::ensure_distribution_working_group_leader_origin(origin)?;
+
+            // TODO: check max bucket families number
+
+
+            //
+            // == MUTATION SAFE ==
+            //
+
+            let family = DistributionBucketFamily {
+                distribution_buckets: BTreeMap::new(),
+            };
+
+            let family_id = Self::next_distribution_bucket_family_id();
+
+            <NextDistributionBucketFamilyId<T>>::put(family_id + One::one());
+
+            <DistributionBucketFamilyById<T>>::insert(family_id, family);
+
+            Self::deposit_event(RawEvent::DistributionBucketFamilyCreated(family_id));
+        }
     }
 }
 
@@ -1928,7 +2018,7 @@ impl<T: Trait> Module<T> {
 
             id
         })
-        .take(data_objects.len());
+            .take(data_objects.len());
 
         let data_objects_map = ids.zip(data_objects).collect::<BTreeMap<_, _>>();
 
@@ -2355,7 +2445,7 @@ impl<T: Trait> Module<T> {
     // Verifies storage provider operator existence.
     fn ensure_storage_provider_operator_exists(operator_id: &WorkerId<T>) -> DispatchResult {
         ensure!(
-            T::ensure_worker_exists(operator_id).is_ok(),
+            T::ensure_storage_worker_exists(operator_id).is_ok(),
             Error::<T>::StorageProviderOperatorDoesntExist
         );
 

+ 59 - 9
runtime-modules/storage/src/tests/fixtures.rs

@@ -6,7 +6,7 @@ use sp_std::collections::btree_set::BTreeSet;
 
 use super::mocks::{
     Balances, CollectiveFlip, Storage, System, Test, TestEvent, DEFAULT_MEMBER_ACCOUNT_ID,
-    DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, WG_LEADER_ACCOUNT_ID,
+    DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
 };
 
 use crate::{
@@ -45,6 +45,7 @@ impl EventFixture {
             DynamicBagId<Test>,
             u64,
             u64,
+            u64,
         >,
     ) {
         let converted_event = TestEvent::storage(expected_raw_event);
@@ -62,6 +63,7 @@ impl EventFixture {
             DynamicBagId<Test>,
             u64,
             u64,
+            u64,
         >,
     ) {
         let converted_event = TestEvent::storage(expected_raw_event);
@@ -444,7 +446,7 @@ pub struct CancelStorageBucketInvitationFixture {
 impl CancelStorageBucketInvitationFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             storage_bucket_id: Default::default(),
         }
     }
@@ -546,7 +548,7 @@ pub struct UpdateUploadingBlockedStatusFixture {
 impl UpdateUploadingBlockedStatusFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_status: false,
         }
     }
@@ -719,7 +721,7 @@ pub struct UpdateBlacklistFixture {
 impl UpdateBlacklistFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             remove_hashes: BTreeSet::new(),
             add_hashes: BTreeSet::new(),
         }
@@ -829,7 +831,7 @@ pub struct RemoveStorageBucketOperatorFixture {
 impl RemoveStorageBucketOperatorFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             storage_bucket_id: Default::default(),
         }
     }
@@ -875,7 +877,7 @@ pub struct UpdateDataObjectPerMegabyteFeeFixture {
 impl UpdateDataObjectPerMegabyteFeeFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_fee: 0,
         }
     }
@@ -911,7 +913,7 @@ pub struct UpdateStorageBucketsPerBagLimitFixture {
 impl UpdateStorageBucketsPerBagLimitFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_limit: 0,
         }
     }
@@ -1014,7 +1016,7 @@ pub struct UpdateStorageBucketsVoucherMaxLimitsFixture {
 impl UpdateStorageBucketsVoucherMaxLimitsFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_objects_size_limit: 0,
             new_objects_number_limit: 0,
         }
@@ -1115,7 +1117,7 @@ pub struct UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture {
 impl UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_storage_buckets_number: 0,
             dynamic_bag_type: Default::default(),
         }
@@ -1162,3 +1164,51 @@ impl UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture {
         }
     }
 }
+
+pub struct CreateDistributionBucketBucketFamilyFixture {
+    origin: RawOrigin<u64>,
+}
+
+impl CreateDistributionBucketBucketFamilyFixture {
+    pub fn default() -> Self {
+        Self {
+            origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
+        }
+    }
+
+    pub fn with_origin(self, origin: RawOrigin<u64>) -> Self {
+        Self { origin, ..self }
+    }
+
+    pub fn call_and_assert(&self, expected_result: DispatchResult) -> Option<u64> {
+        let next_family_id = Storage::next_distribution_bucket_family_id();
+        //let buckets_number = Storage::storage_buckets_number(); // TODO
+        let actual_result = Storage::create_distribution_bucket_family(self.origin.clone().into());
+
+        assert_eq!(actual_result, expected_result);
+
+        if actual_result.is_ok() {
+            assert_eq!(
+                next_family_id + 1,
+                Storage::next_distribution_bucket_family_id()
+            );
+            // assert_eq!(buckets_number + 1, Storage::storage_buckets_number()); //TODO
+            assert!(<crate::DistributionBucketFamilyById<Test>>::contains_key(
+                next_family_id
+            ));
+
+            Some(next_family_id)
+        } else {
+            assert_eq!(
+                next_family_id,
+                Storage::next_distribution_bucket_family_id()
+            );
+            // assert_eq!(buckets_number, Storage::storage_buckets_number()); //TODO
+            assert!(!<crate::DistributionBucketFamilyById<Test>>::contains_key(
+                next_family_id
+            ));
+
+            None
+        }
+    }
+}

+ 28 - 5
runtime-modules/storage/src/tests/mocks.rs

@@ -68,8 +68,10 @@ parameter_types! {
     };
 }
 
-pub const WG_LEADER_ACCOUNT_ID: u64 = 100001;
+pub const STORAGE_WG_LEADER_ACCOUNT_ID: u64 = 100001;
 pub const DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID: u64 = 100002;
+pub const DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID: u64 = 100003;
+pub const DISTRIBUTION_WG_LEADER_ACCOUNT_ID: u64 = 100004;
 pub const DEFAULT_STORAGE_PROVIDER_ID: u64 = 10;
 pub const ANOTHER_STORAGE_PROVIDER_ID: u64 = 11;
 
@@ -78,6 +80,7 @@ impl crate::Trait for Test {
     type DataObjectId = u64;
     type StorageBucketId = u64;
     type DistributionBucketId = u64;
+    type DistributionBucketFamilyId = u64;
     type ChannelId = u64;
     type MaxNumberOfDataObjectsPerBag = MaxNumberOfDataObjectsPerBag;
     type DataObjectDeletionPrize = DataObjectDeletionPrize;
@@ -90,17 +93,17 @@ impl crate::Trait for Test {
     type Randomness = CollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
 
-    fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+    fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
         let account_id = ensure_signed(origin)?;
 
-        if account_id != WG_LEADER_ACCOUNT_ID {
+        if account_id != STORAGE_WG_LEADER_ACCOUNT_ID {
             Err(DispatchError::BadOrigin)
         } else {
             Ok(())
         }
     }
 
-    fn ensure_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult {
+    fn ensure_storage_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult {
         let account_id = ensure_signed(origin)?;
 
         if account_id != DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID {
@@ -110,7 +113,7 @@ impl crate::Trait for Test {
         }
     }
 
-    fn ensure_worker_exists(worker_id: &u64) -> DispatchResult {
+    fn ensure_storage_worker_exists(worker_id: &u64) -> DispatchResult {
         let allowed_storage_providers =
             vec![DEFAULT_STORAGE_PROVIDER_ID, ANOTHER_STORAGE_PROVIDER_ID];
 
@@ -120,6 +123,26 @@ impl crate::Trait for Test {
             Ok(())
         }
     }
+
+    fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+        let account_id = ensure_signed(origin)?;
+
+        if account_id != DISTRIBUTION_WG_LEADER_ACCOUNT_ID {
+            Err(DispatchError::BadOrigin)
+        } else {
+            Ok(())
+        }
+    }
+
+    fn ensure_distribution_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult {
+        let account_id = ensure_signed(origin)?;
+
+        if account_id != DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID {
+            Err(DispatchError::BadOrigin)
+        } else {
+            Ok(())
+        }
+    }
 }
 
 pub const DEFAULT_MEMBER_ID: u64 = 100;

+ 136 - 106
runtime-modules/storage/src/tests/mod.rs

@@ -14,9 +14,10 @@ use sp_std::iter::FromIterator;
 use common::working_group::WorkingGroup;
 
 use crate::{
-    BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DynamicBagCreationPolicy,
-    DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, Error, ModuleAccount, RawEvent,
-    StaticBagId, StorageBucketOperatorStatus, StorageTreasury, UploadParameters, Voucher,
+    BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily,
+    DynamicBagCreationPolicy, DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, Error,
+    ModuleAccount, RawEvent, StaticBagId, StorageBucketOperatorStatus, StorageTreasury,
+    UploadParameters, Voucher,
 };
 
 use mocks::{
@@ -25,9 +26,10 @@ use mocks::{
     InitialStorageBucketsNumberForDynamicBag, MaxNumberOfDataObjectsPerBag,
     MaxRandomIterationNumber, Storage, Test, ANOTHER_STORAGE_PROVIDER_ID,
     DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID, DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID,
-    DEFAULT_STORAGE_PROVIDER_ID, WG_LEADER_ACCOUNT_ID,
+    DEFAULT_STORAGE_PROVIDER_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
 };
 
+use crate::tests::mocks::DISTRIBUTION_WG_LEADER_ACCOUNT_ID;
 use fixtures::*;
 
 #[test]
@@ -45,7 +47,7 @@ fn create_storage_bucket_succeeded() {
         let invite_worker = None;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_accepting_new_bags(accepting_new_bags)
             .with_invite_worker(invite_worker)
             .with_size_limit(size_limit)
@@ -77,12 +79,12 @@ fn create_storage_bucket_fails_with_invalid_voucher_params() {
         let objects_limit = 10;
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_size_limit(size_limit)
             .call_and_assert(Err(Error::<Test>::VoucherMaxObjectSizeLimitExceeded.into()));
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(objects_limit)
             .call_and_assert(Err(
                 Error::<Test>::VoucherMaxObjectNumberLimitExceeded.into()
@@ -98,7 +100,7 @@ fn create_storage_bucket_succeeded_with_invited_member() {
         let invite_worker = Some(invited_worker_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_accepting_new_bags(accepting_new_bags)
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
@@ -139,7 +141,7 @@ fn create_storage_bucket_fails_with_invalid_storage_provider_id() {
         let invalid_storage_provider_id = 155;
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(invalid_storage_provider_id))
             .call_and_assert(Err(Error::<Test>::StorageProviderOperatorDoesntExist.into()));
     });
@@ -155,7 +157,7 @@ fn accept_storage_bucket_invitation_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -197,7 +199,7 @@ fn accept_storage_bucket_invitation_fails_with_non_existing_storage_bucket() {
 fn accept_storage_bucket_invitation_fails_with_non_invited_storage_provider() {
     build_test_externalities().execute_with(|| {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -215,7 +217,7 @@ fn accept_storage_bucket_invitation_fails_with_different_invited_storage_provide
         let different_storage_provider_id = ANOTHER_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(different_storage_provider_id))
             .call_and_assert(Ok(()))
             .unwrap();
@@ -233,7 +235,7 @@ fn accept_storage_bucket_invitation_fails_with_already_set_storage_provider() {
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(storage_provider_id))
             .call_and_assert(Ok(()))
             .unwrap();
@@ -267,7 +269,7 @@ fn update_storage_buckets_for_bags_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -275,7 +277,7 @@ fn update_storage_buckets_for_bags_succeeded() {
         let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Ok(()));
@@ -301,7 +303,7 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -309,7 +311,7 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
         let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::DynamicBagDoesntExist.into()));
@@ -325,7 +327,7 @@ fn update_storage_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() {
         set_default_update_storage_buckets_per_bag_limit();
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .with_accepting_new_bags(false)
             .call_and_assert(Ok(()))
@@ -334,7 +336,7 @@ fn update_storage_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() {
         let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntAcceptNewBags.into()));
@@ -370,7 +372,7 @@ fn update_storage_buckets_for_bags_succeeded_with_voucher_usage() {
         let size_limit = 100;
 
         let new_bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(objects_limit)
             .with_size_limit(size_limit)
             .call_and_assert(Ok(()))
@@ -383,7 +385,7 @@ fn update_storage_buckets_for_bags_succeeded_with_voucher_usage() {
         assert_eq!(bag.stored_by, old_buckets);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(new_buckets.clone())
             .with_remove_bucket_ids(old_buckets.clone())
@@ -433,7 +435,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_numb
         let new_bucket_objects_limit = 0;
         let new_bucket_size_limit = 100;
         let new_bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(new_bucket_objects_limit)
             .with_size_limit(new_bucket_size_limit)
             .call_and_assert(Ok(()))
@@ -442,7 +444,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_numb
         let new_buckets = BTreeSet::from_iter(vec![new_bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(new_buckets.clone())
             .call_and_assert(Err(
@@ -479,7 +481,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_tota
         let new_bucket_objects_limit = 1;
         let new_bucket_size_limit = 5;
         let new_bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(new_bucket_objects_limit)
             .with_size_limit(new_bucket_size_limit)
             .call_and_assert(Ok(()))
@@ -488,7 +490,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_tota
         let new_buckets = BTreeSet::from_iter(vec![new_bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(new_buckets.clone())
             .call_and_assert(Err(
@@ -506,7 +508,7 @@ fn update_storage_buckets_for_working_group_static_bags_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -517,7 +519,7 @@ fn update_storage_buckets_for_working_group_static_bags_succeeded() {
         let bag_id = BagId::<Test>::Static(static_bag_id.clone());
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Ok(()));
@@ -536,7 +538,7 @@ fn update_storage_buckets_for_dynamic_bags_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -549,7 +551,7 @@ fn update_storage_buckets_for_dynamic_bags_succeeded() {
         create_dynamic_bag(&dynamic_bag_id);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_remove_bucket_ids(buckets.clone())
             .call_and_assert(Ok(()));
@@ -574,7 +576,7 @@ fn update_storage_buckets_for_bags_fails_with_non_leader_origin() {
 fn update_storage_buckets_for_bags_fails_with_empty_params() {
     build_test_externalities().execute_with(|| {
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketIdCollectionsAreEmpty.into()));
     });
 }
@@ -590,14 +592,14 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_storage_buckets() {
 
         // Invalid added bucket ID.
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
 
         // Invalid removed bucket ID.
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_remove_bucket_ids(buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
@@ -613,7 +615,7 @@ fn update_storage_buckets_for_bags_fails_with_going_beyond_the_buckets_per_bag_l
         let bag_id = BagId::<Test>::Static(StaticBagId::Council);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketPerBagLimitExceeded.into()));
@@ -630,7 +632,7 @@ fn update_storage_buckets_succeeds_with_add_remove_within_limits() {
         let _bucket3 = create_default_storage_bucket_and_assign_to_bag(bag_id.clone());
 
         let bucket4 = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
@@ -640,7 +642,7 @@ fn update_storage_buckets_succeeds_with_add_remove_within_limits() {
         let add_buckets = BTreeSet::from_iter(vec![bucket4]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketPerBagLimitExceeded.into()));
@@ -648,7 +650,7 @@ fn update_storage_buckets_succeeds_with_add_remove_within_limits() {
         let remove_buckets = BTreeSet::from_iter(vec![bucket1]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets)
             .with_remove_bucket_ids(remove_buckets)
@@ -722,7 +724,7 @@ fn upload_succeeded_with_data_size_fee() {
         let data_size_fee = 100;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -1051,7 +1053,7 @@ fn upload_fails_with_insufficient_balance_for_data_size_fee() {
         let data_size_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -1083,7 +1085,7 @@ fn upload_fails_with_data_size_fee_changed() {
         let data_size_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -1109,7 +1111,7 @@ fn upload_failed_with_blocked_uploading() {
 
         let new_blocking_status = true;
         UpdateUploadingBlockedStatusFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_status(new_blocking_status)
             .call_and_assert(Ok(()));
 
@@ -1130,7 +1132,7 @@ fn upload_failed_with_blacklisted_data_object() {
         let add_hashes = BTreeSet::from_iter(vec![hash]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes)
             .call_and_assert(Ok(()));
 
@@ -1158,7 +1160,7 @@ fn set_storage_operator_metadata_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1212,7 +1214,7 @@ fn set_storage_operator_metadata_fails_with_invalid_storage_association() {
 
         // Missing invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
@@ -1224,7 +1226,7 @@ fn set_storage_operator_metadata_fails_with_invalid_storage_association() {
 
         // Not accepted invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1270,7 +1272,7 @@ fn accept_pending_data_objects_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .with_size_limit(size_limit)
             .with_objects_limit(objects_limit)
@@ -1286,7 +1288,7 @@ fn accept_pending_data_objects_succeeded() {
         let buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Ok(()));
@@ -1346,7 +1348,7 @@ fn accept_pending_data_objects_fails_with_unrelated_storage_bucket() {
         let bag_id = BagId::<Test>::Static(static_bag_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1393,7 +1395,7 @@ fn accept_pending_data_objects_fails_with_non_existing_dynamic_bag() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1435,7 +1437,7 @@ fn accept_pending_data_objects_succeeded_with_dynamic_bag() {
         let size_limit = 100;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .with_objects_limit(objects_limit)
             .with_size_limit(size_limit)
@@ -1580,13 +1582,13 @@ fn cancel_storage_bucket_operator_invite_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
 
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Ok(()));
 
@@ -1611,7 +1613,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_non_leader_origin() {
 fn cancel_storage_bucket_operator_invite_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -1620,13 +1622,13 @@ fn cancel_storage_bucket_operator_invite_fails_with_non_existing_storage_bucket(
 fn cancel_storage_bucket_operator_invite_fails_with_non_invited_storage_provider() {
     build_test_externalities().execute_with(|| {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .call_and_assert(Ok(()))
             .unwrap();
 
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::NoStorageBucketInvitation.into()));
     });
@@ -1638,7 +1640,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_already_set_storage_provider
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(storage_provider_id))
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1650,7 +1652,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_already_set_storage_provider
             .call_and_assert(Ok(()));
 
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderAlreadySet.into()));
     });
@@ -1665,12 +1667,12 @@ fn invite_storage_bucket_operator_succeeded() {
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_operator_worker_id(storage_provider_id)
             .call_and_assert(Ok(()));
@@ -1697,7 +1699,7 @@ fn invite_storage_bucket_operator_fails_with_non_leader_origin() {
 fn invite_storage_bucket_operator_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -1708,13 +1710,13 @@ fn invite_storage_bucket_operator_fails_with_non_missing_invitation() {
         let invited_worker_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(invited_worker_id))
             .call_and_assert(Ok(()))
             .unwrap();
 
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::InvitedStorageProvider.into()));
     });
@@ -1726,12 +1728,12 @@ fn invite_storage_bucket_operator_fails_with_invalid_storage_provider_id() {
         let invalid_storage_provider_id = 155;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_operator_worker_id(invalid_storage_provider_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderOperatorDoesntExist.into()));
@@ -1747,7 +1749,7 @@ fn update_uploading_blocked_status_succeeded() {
         let new_blocking_status = true;
 
         UpdateUploadingBlockedStatusFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_status(new_blocking_status)
             .call_and_assert(Ok(()));
 
@@ -2189,7 +2191,7 @@ fn delete_data_objects_fails_with_invalid_treasury_balance() {
         let invite_worker = Some(storage_provider_id);
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2321,13 +2323,13 @@ fn update_storage_bucket_status_succeeded() {
         run_to_block(starting_block);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         let new_status = true;
         UpdateStorageBucketStatusFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_new_status(new_status)
             .call_and_assert(Ok(()));
@@ -2351,7 +2353,7 @@ fn update_storage_bucket_status_fails_with_invalid_origin() {
 fn update_storage_bucket_status_fails_with_invalid_storage_bucket() {
     build_test_externalities().execute_with(|| {
         UpdateStorageBucketStatusFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -2367,7 +2369,7 @@ fn update_blacklist_succeeded() {
 
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2378,7 +2380,7 @@ fn update_blacklist_succeeded() {
         let add_hashes = BTreeSet::from_iter(vec![cid2.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .with_remove_hashes(remove_hashes.clone())
             .call_and_assert(Ok(()));
@@ -2401,7 +2403,7 @@ fn update_blacklist_failed_with_exceeding_size_limit() {
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2409,7 +2411,7 @@ fn update_blacklist_failed_with_exceeding_size_limit() {
         let add_hashes = BTreeSet::from_iter(vec![cid2.clone(), cid3.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .with_remove_hashes(remove_hashes.clone())
             .call_and_assert(Err(Error::<Test>::BlacklistSizeLimitExceeded.into()));
@@ -2430,7 +2432,7 @@ fn update_blacklist_failed_with_exceeding_size_limit_with_non_existent_remove_ha
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2438,7 +2440,7 @@ fn update_blacklist_failed_with_exceeding_size_limit_with_non_existent_remove_ha
         let add_hashes = BTreeSet::from_iter(vec![cid2.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .with_remove_hashes(remove_hashes.clone())
             .call_and_assert(Err(Error::<Test>::BlacklistSizeLimitExceeded.into()));
@@ -2457,12 +2459,12 @@ fn update_blacklist_succeeds_with_existent_remove_hashes() {
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2499,7 +2501,7 @@ fn create_storage_bucket_and_assign_to_bag(
     set_default_update_storage_buckets_per_bag_limit();
 
     let bucket_id = CreateStorageBucketFixture::default()
-        .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+        .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
         .with_invite_worker(storage_provider_id)
         .with_objects_limit(objects_limit)
         .with_size_limit(size_limit)
@@ -2509,7 +2511,7 @@ fn create_storage_bucket_and_assign_to_bag(
     let buckets = BTreeSet::from_iter(vec![bucket_id]);
 
     UpdateStorageBucketForBagsFixture::default()
-        .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+        .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
         .with_bag_id(bag_id.clone())
         .with_add_bucket_ids(buckets.clone())
         .call_and_assert(Ok(()));
@@ -2624,12 +2626,12 @@ fn delete_storage_bucket_succeeded() {
         run_to_block(starting_block);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Ok(()));
 
@@ -2652,7 +2654,7 @@ fn delete_storage_bucket_fails_with_non_leader_origin() {
 fn delete_storage_bucket_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -2682,7 +2684,7 @@ fn delete_storage_bucket_fails_with_non_empty_bucket() {
             .call_and_assert(Ok(()));
 
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::CannotDeleteNonEmptyStorageBucket.into()));
     });
@@ -2698,7 +2700,7 @@ fn remove_storage_bucket_operator_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2710,7 +2712,7 @@ fn remove_storage_bucket_operator_succeeded() {
             .call_and_assert(Ok(()));
 
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Ok(()));
 
@@ -2733,7 +2735,7 @@ fn remove_storage_bucket_operator_fails_with_non_leader_origin() {
 fn remove_storage_bucket_operator_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -2745,13 +2747,13 @@ fn remove_storage_bucket_operator_fails_with_non_accepted_storage_provider() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
 
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderMustBeSet.into()));
     });
@@ -2761,13 +2763,13 @@ fn remove_storage_bucket_operator_fails_with_non_accepted_storage_provider() {
 fn remove_storage_bucket_operator_fails_with_missing_storage_provider() {
     build_test_externalities().execute_with(|| {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .call_and_assert(Ok(()))
             .unwrap();
 
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderMustBeSet.into()));
     });
@@ -2782,7 +2784,7 @@ fn update_data_size_fee_succeeded() {
         let new_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(new_fee)
             .call_and_assert(Ok(()));
 
@@ -2812,7 +2814,7 @@ fn data_size_fee_calculation_works_properly() {
         let data_size_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -2889,7 +2891,7 @@ fn update_storage_buckets_per_bag_limit_succeeded() {
         let new_limit = 4;
 
         UpdateStorageBucketsPerBagLimitFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_limit(new_limit)
             .call_and_assert(Ok(()));
 
@@ -2916,14 +2918,14 @@ fn update_storage_buckets_per_bag_limit_fails_with_incorrect_value() {
         let new_limit = 0;
 
         UpdateStorageBucketsPerBagLimitFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_limit(new_limit)
             .call_and_assert(Err(Error::<Test>::StorageBucketsPerBagLimitTooLow.into()));
 
         let new_limit = 100;
 
         UpdateStorageBucketsPerBagLimitFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_limit(new_limit)
             .call_and_assert(Err(Error::<Test>::StorageBucketsPerBagLimitTooHigh.into()));
     });
@@ -2931,7 +2933,7 @@ fn update_storage_buckets_per_bag_limit_fails_with_incorrect_value() {
 
 fn set_update_storage_buckets_per_bag_limit(new_limit: u64) {
     UpdateStorageBucketsPerBagLimitFixture::default()
-        .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+        .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
         .with_new_limit(new_limit)
         .call_and_assert(Ok(()))
 }
@@ -2954,7 +2956,7 @@ fn set_storage_bucket_voucher_limits_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2969,7 +2971,7 @@ fn set_storage_bucket_voucher_limits_succeeded() {
         let new_objects_number_limit = 1;
 
         SetStorageBucketVoucherLimitsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_new_objects_number_limit(new_objects_number_limit)
             .with_new_objects_size_limit(new_objects_size_limit)
@@ -2990,7 +2992,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_values() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -3005,13 +3007,13 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_values() {
         let invalid_objects_number_limit = 1000;
 
         SetStorageBucketVoucherLimitsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_new_objects_size_limit(invalid_objects_size_limit)
             .call_and_assert(Err(Error::<Test>::VoucherMaxObjectSizeLimitExceeded.into()));
 
         SetStorageBucketVoucherLimitsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_new_objects_number_limit(invalid_objects_number_limit)
             .call_and_assert(Err(
@@ -3033,7 +3035,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_origin() {
 fn set_storage_bucket_voucher_limits_fails_with_invalid_storage_bucket() {
     build_test_externalities().execute_with(|| {
         SetStorageBucketVoucherLimitsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -3058,7 +3060,7 @@ fn update_storage_buckets_voucher_max_limits_succeeded() {
         let new_number_limit = 4;
 
         UpdateStorageBucketsVoucherMaxLimitsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_objects_number_limit(new_number_limit)
             .with_new_objects_size_limit(new_size_limit)
             .call_and_assert(Ok(()));
@@ -3380,7 +3382,7 @@ fn create_storage_buckets(buckets_number: u64) -> BTreeSet<u64> {
 
     for _ in 0..buckets_number {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .with_objects_limit(objects_limit)
             .with_size_limit(size_limit)
@@ -3403,7 +3405,7 @@ fn update_number_of_storage_buckets_in_dynamic_bag_creation_policy_succeeded() {
         let new_bucket_number = 40;
 
         UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_storage_buckets_number(new_bucket_number)
             .with_dynamic_bag_type(dynamic_bag_type)
             .call_and_assert(Ok(()));
@@ -3439,7 +3441,7 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() {
         assert_eq!(policy, DefaultMemberDynamicBagCreationPolicy::get());
 
         UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_storage_buckets_number(new_bucket_number)
             .with_dynamic_bag_type(dynamic_bag_type)
             .call_and_assert(Ok(()));
@@ -3453,7 +3455,7 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() {
         assert_eq!(policy, DefaultChannelDynamicBagCreationPolicy::get());
 
         UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_storage_buckets_number(new_bucket_number)
             .with_dynamic_bag_type(dynamic_bag_type)
             .call_and_assert(Ok(()));
@@ -3462,3 +3464,31 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() {
         assert_eq!(policy.number_of_storage_buckets, new_bucket_number);
     });
 }
+
+#[test]
+fn create_distribution_bucket_family_succeeded() {
+    build_test_externalities().execute_with(|| {
+        let starting_block = 1;
+        run_to_block(starting_block);
+
+        let family_id = CreateDistributionBucketBucketFamilyFixture::default()
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .call_and_assert(Ok(()))
+            .unwrap();
+
+        let bucket_family = Storage::distribution_bucket_family_by_id(family_id);
+
+        assert_eq!(bucket_family, DistributionBucketFamily::default());
+
+        EventFixture::assert_last_crate_event(RawEvent::DistributionBucketFamilyCreated(family_id));
+    });
+}
+
+#[test]
+fn create_distribution_bucket_family_fails_with_non_signed_origin() {
+    build_test_externalities().execute_with(|| {
+        CreateDistributionBucketBucketFamilyFixture::default()
+            .with_origin(RawOrigin::None)
+            .call_and_assert(Err(DispatchError::BadOrigin));
+    });
+}

+ 3 - 0
runtime/src/integration/proposals/proposal_encoder.rs

@@ -22,6 +22,9 @@ macro_rules! wrap_working_group_call {
                 Call::ContentDirectoryWorkingGroup($working_group_instance_call)
             }
             WorkingGroup::Storage => Call::StorageWorkingGroup($working_group_instance_call),
+            WorkingGroup::Distribution => {
+                Call::DistributionWorkingGroup($working_group_instance_call)
+            }
         }
     }};
 }

+ 24 - 3
runtime/src/lib.rs

@@ -565,6 +565,9 @@ pub type StorageWorkingGroupInstance = working_group::Instance2;
 // The content directory working group instance alias.
 pub type ContentDirectoryWorkingGroupInstance = working_group::Instance3;
 
+// The distribution working group instance alias.
+pub type DistributionWorkingGroupInstance = working_group::Instance4;
+
 parameter_types! {
     pub const MaxWorkerNumberLimit: u32 = 100;
 }
@@ -579,6 +582,11 @@ impl working_group::Trait<ContentDirectoryWorkingGroupInstance> for Runtime {
     type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
 }
 
+impl working_group::Trait<DistributionWorkingGroupInstance> for Runtime {
+    type Event = Event;
+    type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
+}
+
 parameter_types! {
     pub const ProposalCancellationFee: u64 = 10000;
     pub const ProposalRejectionFee: u64 = 5000;
@@ -665,6 +673,7 @@ impl storage::Trait for Runtime {
     type DataObjectId = DataObjectId;
     type StorageBucketId = StorageBucketId;
     type DistributionBucketId = DistributionBucketId;
+    type DistributionBucketFamilyId = DistributionBucketFamilyId;
     type ChannelId = ChannelId;
     type MaxNumberOfDataObjectsPerBag = MaxNumberOfDataObjectsPerBag;
     type DataObjectDeletionPrize = DataObjectDeletionPrize;
@@ -677,19 +686,30 @@ impl storage::Trait for Runtime {
     type Randomness = RandomnessCollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
 
-    fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+    fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
         StorageWorkingGroup::ensure_origin_is_active_leader(origin)
     }
 
-    fn ensure_worker_origin(origin: Self::Origin, worker_id: ActorId) -> DispatchResult {
+    fn ensure_storage_worker_origin(origin: Self::Origin, worker_id: ActorId) -> DispatchResult {
         StorageWorkingGroup::ensure_worker_signed(origin, &worker_id).map(|_| ())
     }
 
-    fn ensure_worker_exists(worker_id: &ActorId) -> DispatchResult {
+    fn ensure_storage_worker_exists(worker_id: &ActorId) -> DispatchResult {
         StorageWorkingGroup::ensure_worker_exists(&worker_id)
             .map(|_| ())
             .map_err(|err| err.into())
     }
+
+    fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+        DistributionWorkingGroup::ensure_origin_is_active_leader(origin)
+    }
+
+    fn ensure_distribution_worker_origin(
+        origin: Self::Origin,
+        worker_id: ActorId,
+    ) -> DispatchResult {
+        DistributionWorkingGroup::ensure_worker_signed(origin, &worker_id).map(|_| ())
+    }
 }
 
 /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know
@@ -755,6 +775,7 @@ construct_runtime!(
         // reserved for the future use: ForumWorkingGroup: working_group::<Instance1>::{Module, Call, Storage, Event<T>},
         StorageWorkingGroup: working_group::<Instance2>::{Module, Call, Storage, Config<T>, Event<T>},
         ContentDirectoryWorkingGroup: working_group::<Instance3>::{Module, Call, Storage, Config<T>, Event<T>},
+        DistributionWorkingGroup: working_group::<Instance4>::{Module, Call, Storage, Config<T>, Event<T>},
         //
         Storage: storage::{Module, Call, Storage, Event<T>},
     }

+ 3 - 0
runtime/src/primitives.rs

@@ -74,6 +74,9 @@ pub type StorageBucketId = u64;
 /// Represent a distribution bucket from the storage pallet.
 pub type DistributionBucketId = u64;
 
+/// Represent a distribution bucket family from the storage pallet.
+pub type DistributionBucketFamilyId = u64;
+
 /// Represent a media channel.
 pub type ChannelId = u64;