Browse Source

runtime: storage: Add `create distribution bucket family’ extrinsic.

Shamil Gadelshin 3 years ago
parent
commit
4d026c53fd

+ 11 - 4
node/src/chain_spec/mod.rs

@@ -33,10 +33,10 @@ use sp_runtime::Perbill;
 use node_runtime::{
     membership, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, Balance, BalancesConfig,
     ContentDirectoryConfig, ContentDirectoryWorkingGroupConfig, ContentWorkingGroupConfig,
-    CouncilConfig, CouncilElectionConfig, ElectionParameters, ForumConfig, GrandpaConfig,
-    ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig, SessionConfig, SessionKeys,
-    Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig, SudoConfig, SystemConfig,
-    VersionedStoreConfig, VersionedStorePermissionsConfig, DAYS,
+    CouncilConfig, CouncilElectionConfig, DistributionWorkingGroupConfig, ElectionParameters,
+    ForumConfig, GrandpaConfig, ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig,
+    SessionConfig, SessionKeys, Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig,
+    SudoConfig, SystemConfig, VersionedStoreConfig, VersionedStorePermissionsConfig, DAYS,
 };
 
 // Exported to be used by chain-spec-builder
@@ -319,6 +319,13 @@ pub fn testnet_genesis(
             worker_application_human_readable_text_constraint: default_text_constraint,
             worker_exit_rationale_text_constraint: default_text_constraint,
         }),
+        working_group_Instance4: Some(DistributionWorkingGroupConfig {
+            phantom: Default::default(),
+            working_group_mint_capacity: 0,
+            opening_human_readable_text_constraint: default_text_constraint,
+            worker_application_human_readable_text_constraint: default_text_constraint,
+            worker_exit_rationale_text_constraint: default_text_constraint,
+        }),
         content_directory: Some({
             ContentDirectoryConfig {
                 class_by_id: vec![],

+ 3 - 0
runtime-modules/common/src/working_group.rs

@@ -20,4 +20,7 @@ pub enum WorkingGroup {
 
     /// Storage working group: working_group::Instance3.
     Content = 3isize,
+
+    /// Distribution working group: working_group::Instance4.
+    Distribution = 4isize,
 }

+ 115 - 25
runtime-modules/storage/src/lib.rs

@@ -89,6 +89,7 @@ use codec::{Codec, Decode, Encode};
 use frame_support::dispatch::{DispatchError, DispatchResult};
 use frame_support::traits::{Currency, ExistenceRequirement, Get, Randomness};
 use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter};
+use frame_system::ensure_root;
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
 use sp_arithmetic::traits::{BaseArithmetic, One, Zero};
@@ -99,7 +100,6 @@ use sp_std::collections::btree_set::BTreeSet;
 use sp_std::iter;
 use sp_std::marker::PhantomData;
 use sp_std::vec::Vec;
-use frame_system::ensure_root;
 
 use common::constraints::BoundedValueConstraint;
 use common::origin::ActorOriginValidator;
@@ -108,6 +108,11 @@ use common::working_group::WorkingGroup;
 use bag_manager::BagManager;
 use storage_bucket_picker::StorageBucketPicker;
 
+// TODO: constants
+// Max number of distribution bucket families
+// Max number of distribution buckets per family.
+// Max number of pending invitations per distribution bucket.
+
 /// Public interface for the storage module.
 pub trait DataObjectStorage<T: Trait> {
     /// Validates upload parameters and conditions (like global uploading block).
@@ -197,6 +202,16 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
         + MaybeSerialize
         + PartialEq;
 
+    /// Distribution bucket family ID type.
+    type DistributionBucketFamilyId: Parameter
+        + Member
+        + BaseArithmetic
+        + Codec
+        + Default
+        + Copy
+        + MaybeSerialize
+        + PartialEq;
+
     /// Channel ID type (part of the dynamic bag ID).
     type ChannelId: Parameter
         + Member
@@ -240,17 +255,31 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait {
     /// Something that provides randomness in the runtime.
     type Randomness: Randomness<Self::Hash>;
 
-    /// Demand the working group leader authorization.
+    /// Demand the storage working group leader authorization.
+    /// TODO: Refactor after merging with the Olympia release.
+    fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
+
+    /// Validate origin for the storage worker.
+    /// TODO: Refactor after merging with the Olympia release.
+    fn ensure_storage_worker_origin(
+        origin: Self::Origin,
+        worker_id: WorkerId<Self>,
+    ) -> DispatchResult;
+
+    /// Validate storage worker existence.
     /// TODO: Refactor after merging with the Olympia release.
-    fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
+    fn ensure_storage_worker_exists(worker_id: &WorkerId<Self>) -> DispatchResult;
 
-    /// Validate origin for the worker.
+    /// Demand the distribution group leader authorization.
     /// TODO: Refactor after merging with the Olympia release.
-    fn ensure_worker_origin(origin: Self::Origin, worker_id: WorkerId<Self>) -> DispatchResult;
+    fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult;
 
-    /// Validate worker existence.
+    /// Validate origin for the distribution worker.
     /// TODO: Refactor after merging with the Olympia release.
-    fn ensure_worker_exists(worker_id: &WorkerId<Self>) -> DispatchResult;
+    fn ensure_distribution_worker_origin(
+        origin: Self::Origin,
+        worker_id: WorkerId<Self>,
+    ) -> DispatchResult;
 }
 
 /// Operations with local pallet account.
@@ -694,6 +723,26 @@ impl<Balance: Saturating + Copy> BagChangeInfo<Balance> {
     }
 }
 
+/// Distribution bucket family.
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
+pub struct DistributionBucketFamily<DistributionBucketId: Ord> {
+    /// Distribution bucket map.
+    pub distribution_buckets: BTreeMap<DistributionBucketId, DistributionBucket>,
+}
+
+/// Distribution bucket.
+#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
+#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
+pub struct DistributionBucket {
+    //TODO:
+// pending_invitations: BTreeSet<WorkerId>,
+// number_of_pending_data_objects: u32,
+// accepting_new_bags: boolean,
+// distributing: boolean,
+// number_of_operators: u32,
+}
+
 decl_storage! {
     trait Store for Module<T: Trait> as Storage {
         /// Defines whether all new uploads blocked
@@ -741,6 +790,15 @@ decl_storage! {
         /// DynamicBagCreationPolicy by bag type storage map.
         pub DynamicBagCreationPolicies get (fn dynamic_bag_creation_policy):
             map hasher(blake2_128_concat) DynamicBagType => DynamicBagCreationPolicy;
+
+        /// Distribution bucket family id counter. Starts at zero.
+        pub NextDistributionBucketFamilyId get(fn next_distribution_bucket_family_id):
+            T::DistributionBucketFamilyId;
+
+        /// Distribution bucket families.
+        pub DistributionBucketFamilyById get (fn distribution_bucket_family_by_id):
+            map hasher(blake2_128_concat) T::DistributionBucketFamilyId =>
+            DistributionBucketFamily<T::DistributionBucketId>;
     }
 }
 
@@ -756,6 +814,7 @@ decl_event! {
         DynamicBagId = DynamicBagId<T>,
         <T as frame_system::Trait>::AccountId,
         Balance = BalanceOf<T>,
+        <T as Trait>::DistributionBucketFamilyId,
     {
         /// Emits on creating the storage bucket.
         /// Params
@@ -905,6 +964,12 @@ decl_event! {
         /// - dynamic bag type
         /// - new number of storage buckets
         NumberOfStorageBucketsInDynamicBagCreationPolicyUpdated(DynamicBagType, u64),
+
+        /// Emits on updating the number of storage buckets in dynamic bag creation policy.
+        /// Params
+        /// - dynamic bag type
+        /// - new number of storage buckets
+        DistributionBucketFamilyCreated(DistributionBucketFamilyId),
     }
 }
 
@@ -1065,7 +1130,7 @@ decl_module! {
             origin,
             storage_bucket_id: T::StorageBucketId,
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1090,7 +1155,7 @@ decl_module! {
         /// Update whether uploading is globally blocked.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_uploading_blocked_status(origin, new_status: bool) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1104,7 +1169,7 @@ decl_module! {
         /// Updates size-based pricing of new objects uploaded.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_data_size_fee(origin, new_data_size_fee: BalanceOf<T>) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1118,7 +1183,7 @@ decl_module! {
         /// Updates "Storage buckets per bag" number limit.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_storage_buckets_per_bag_limit(origin, new_limit: u64) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             T::StorageBucketsPerBagValueConstraint::get().ensure_valid(
                 new_limit,
@@ -1142,7 +1207,7 @@ decl_module! {
             new_objects_size: u64,
             new_objects_number: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1163,7 +1228,7 @@ decl_module! {
             dynamic_bag_type: DynamicBagType,
             number_of_storage_buckets: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             //
             // == MUTATION SAFE ==
@@ -1190,7 +1255,7 @@ decl_module! {
             remove_hashes: BTreeSet<ContentId>,
             add_hashes: BTreeSet<ContentId>
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             // Get only hashes that exist in the blacklist.
             let verified_remove_hashes = Self::get_existing_hashes(&remove_hashes);
@@ -1233,7 +1298,7 @@ decl_module! {
             size_limit: u64,
             objects_limit: u64,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let voucher = Voucher {
                 size_limit,
@@ -1285,7 +1350,7 @@ decl_module! {
             add_buckets: BTreeSet<T::StorageBucketId>,
             remove_buckets: BTreeSet<T::StorageBucketId>,
         ) {
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let voucher_update = Self::validate_update_storage_buckets_for_bag_params(
                 &bag_id,
@@ -1325,7 +1390,7 @@ decl_module! {
         /// Cancel pending storage bucket invite. An invitation must be pending.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn cancel_storage_bucket_operator_invite(origin, storage_bucket_id: T::StorageBucketId){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1351,7 +1416,7 @@ decl_module! {
             storage_bucket_id: T::StorageBucketId,
             operator_id: WorkerId<T>,
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1379,7 +1444,7 @@ decl_module! {
             origin,
             storage_bucket_id: T::StorageBucketId,
         ){
-            T::ensure_working_group_leader_origin(origin)?;
+            T::ensure_storage_working_group_leader_origin(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1408,7 +1473,7 @@ decl_module! {
             worker_id: WorkerId<T>,
             storage_bucket_id: T::StorageBucketId
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1435,7 +1500,7 @@ decl_module! {
             storage_bucket_id: T::StorageBucketId,
             metadata: Vec<u8>
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1463,7 +1528,7 @@ decl_module! {
             new_objects_size_limit: u64,
             new_objects_number_limit: u64,
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1510,7 +1575,7 @@ decl_module! {
             bag_id: BagId<T>,
             data_objects: BTreeSet<T::DataObjectId>,
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1543,7 +1608,7 @@ decl_module! {
             storage_bucket_id: T::StorageBucketId,
             accepting_new_bags: bool
         ) {
-            T::ensure_worker_origin(origin, worker_id)?;
+            T::ensure_storage_worker_origin(origin, worker_id)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
@@ -1581,6 +1646,31 @@ decl_module! {
 
           Self::create_dynamic_bag(bag_id)?;
         }
+
+        /// Create a distribution family.
+        #[weight = 10_000_000] // TODO: adjust weight
+        pub fn create_distribution_bucket_family(origin) {
+            T::ensure_distribution_working_group_leader_origin(origin)?;
+
+            // TODO: check max bucket families number
+
+
+            //
+            // == MUTATION SAFE ==
+            //
+
+            let family = DistributionBucketFamily {
+                distribution_buckets: BTreeMap::new(),
+            };
+
+            let family_id = Self::next_distribution_bucket_family_id();
+
+            <NextDistributionBucketFamilyId<T>>::put(family_id + One::one());
+
+            <DistributionBucketFamilyById<T>>::insert(family_id, family);
+
+            Self::deposit_event(RawEvent::DistributionBucketFamilyCreated(family_id));
+        }
     }
 }
 
@@ -2376,7 +2466,7 @@ impl<T: Trait> Module<T> {
     // Verifies storage provider operator existence.
     fn ensure_storage_provider_operator_exists(operator_id: &WorkerId<T>) -> DispatchResult {
         ensure!(
-            T::ensure_worker_exists(operator_id).is_ok(),
+            T::ensure_storage_worker_exists(operator_id).is_ok(),
             Error::<T>::StorageProviderOperatorDoesntExist
         );
 

+ 59 - 9
runtime-modules/storage/src/tests/fixtures.rs

@@ -6,7 +6,7 @@ use sp_std::collections::btree_set::BTreeSet;
 
 use super::mocks::{
     Balances, CollectiveFlip, Storage, System, Test, TestEvent, DEFAULT_MEMBER_ACCOUNT_ID,
-    DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, WG_LEADER_ACCOUNT_ID,
+    DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
 };
 
 use crate::{
@@ -44,6 +44,7 @@ impl EventFixture {
             DynamicBagId<Test>,
             u64,
             u64,
+            u64,
         >,
     ) {
         let converted_event = TestEvent::storage(expected_raw_event);
@@ -61,6 +62,7 @@ impl EventFixture {
             DynamicBagId<Test>,
             u64,
             u64,
+            u64,
         >,
     ) {
         let converted_event = TestEvent::storage(expected_raw_event);
@@ -454,7 +456,7 @@ pub struct CancelStorageBucketInvitationFixture {
 impl CancelStorageBucketInvitationFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             storage_bucket_id: Default::default(),
         }
     }
@@ -556,7 +558,7 @@ pub struct UpdateUploadingBlockedStatusFixture {
 impl UpdateUploadingBlockedStatusFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_status: false,
         }
     }
@@ -736,7 +738,7 @@ pub struct UpdateBlacklistFixture {
 impl UpdateBlacklistFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             remove_hashes: BTreeSet::new(),
             add_hashes: BTreeSet::new(),
         }
@@ -846,7 +848,7 @@ pub struct RemoveStorageBucketOperatorFixture {
 impl RemoveStorageBucketOperatorFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             storage_bucket_id: Default::default(),
         }
     }
@@ -892,7 +894,7 @@ pub struct UpdateDataObjectPerMegabyteFeeFixture {
 impl UpdateDataObjectPerMegabyteFeeFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_fee: 0,
         }
     }
@@ -928,7 +930,7 @@ pub struct UpdateStorageBucketsPerBagLimitFixture {
 impl UpdateStorageBucketsPerBagLimitFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_limit: 0,
         }
     }
@@ -1038,7 +1040,7 @@ pub struct UpdateStorageBucketsVoucherMaxLimitsFixture {
 impl UpdateStorageBucketsVoucherMaxLimitsFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_objects_size_limit: 0,
             new_objects_number_limit: 0,
         }
@@ -1128,7 +1130,7 @@ pub struct UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture {
 impl UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture {
     pub fn default() -> Self {
         Self {
-            origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID),
+            origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID),
             new_storage_buckets_number: 0,
             dynamic_bag_type: Default::default(),
         }
@@ -1175,3 +1177,51 @@ impl UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture {
         }
     }
 }
+
+pub struct CreateDistributionBucketBucketFamilyFixture {
+    origin: RawOrigin<u64>,
+}
+
+impl CreateDistributionBucketBucketFamilyFixture {
+    pub fn default() -> Self {
+        Self {
+            origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
+        }
+    }
+
+    pub fn with_origin(self, origin: RawOrigin<u64>) -> Self {
+        Self { origin, ..self }
+    }
+
+    pub fn call_and_assert(&self, expected_result: DispatchResult) -> Option<u64> {
+        let next_family_id = Storage::next_distribution_bucket_family_id();
+        //let buckets_number = Storage::storage_buckets_number(); // TODO
+        let actual_result = Storage::create_distribution_bucket_family(self.origin.clone().into());
+
+        assert_eq!(actual_result, expected_result);
+
+        if actual_result.is_ok() {
+            assert_eq!(
+                next_family_id + 1,
+                Storage::next_distribution_bucket_family_id()
+            );
+            // assert_eq!(buckets_number + 1, Storage::storage_buckets_number()); //TODO
+            assert!(<crate::DistributionBucketFamilyById<Test>>::contains_key(
+                next_family_id
+            ));
+
+            Some(next_family_id)
+        } else {
+            assert_eq!(
+                next_family_id,
+                Storage::next_distribution_bucket_family_id()
+            );
+            // assert_eq!(buckets_number, Storage::storage_buckets_number()); //TODO
+            assert!(!<crate::DistributionBucketFamilyById<Test>>::contains_key(
+                next_family_id
+            ));
+
+            None
+        }
+    }
+}

+ 28 - 5
runtime-modules/storage/src/tests/mocks.rs

@@ -69,8 +69,10 @@ parameter_types! {
     };
 }
 
-pub const WG_LEADER_ACCOUNT_ID: u64 = 100001;
+pub const STORAGE_WG_LEADER_ACCOUNT_ID: u64 = 100001;
 pub const DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID: u64 = 100002;
+pub const DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID: u64 = 100003;
+pub const DISTRIBUTION_WG_LEADER_ACCOUNT_ID: u64 = 100004;
 pub const DEFAULT_STORAGE_PROVIDER_ID: u64 = 10;
 pub const ANOTHER_STORAGE_PROVIDER_ID: u64 = 11;
 
@@ -79,6 +81,7 @@ impl crate::Trait for Test {
     type DataObjectId = u64;
     type StorageBucketId = u64;
     type DistributionBucketId = u64;
+    type DistributionBucketFamilyId = u64;
     type ChannelId = u64;
     type MaxStorageBucketNumber = MaxStorageBucketNumber;
     type MaxNumberOfDataObjectsPerBag = MaxNumberOfDataObjectsPerBag;
@@ -92,17 +95,17 @@ impl crate::Trait for Test {
     type Randomness = CollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
 
-    fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+    fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
         let account_id = ensure_signed(origin)?;
 
-        if account_id != WG_LEADER_ACCOUNT_ID {
+        if account_id != STORAGE_WG_LEADER_ACCOUNT_ID {
             Err(DispatchError::BadOrigin)
         } else {
             Ok(())
         }
     }
 
-    fn ensure_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult {
+    fn ensure_storage_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult {
         let account_id = ensure_signed(origin)?;
 
         if account_id != DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID {
@@ -112,7 +115,7 @@ impl crate::Trait for Test {
         }
     }
 
-    fn ensure_worker_exists(worker_id: &u64) -> DispatchResult {
+    fn ensure_storage_worker_exists(worker_id: &u64) -> DispatchResult {
         let allowed_storage_providers =
             vec![DEFAULT_STORAGE_PROVIDER_ID, ANOTHER_STORAGE_PROVIDER_ID];
 
@@ -122,6 +125,26 @@ impl crate::Trait for Test {
             Ok(())
         }
     }
+
+    fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+        let account_id = ensure_signed(origin)?;
+
+        if account_id != DISTRIBUTION_WG_LEADER_ACCOUNT_ID {
+            Err(DispatchError::BadOrigin)
+        } else {
+            Ok(())
+        }
+    }
+
+    fn ensure_distribution_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult {
+        let account_id = ensure_signed(origin)?;
+
+        if account_id != DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID {
+            Err(DispatchError::BadOrigin)
+        } else {
+            Ok(())
+        }
+    }
 }
 
 pub const DEFAULT_MEMBER_ID: u64 = 100;

+ 131 - 102
runtime-modules/storage/src/tests/mod.rs

@@ -14,9 +14,9 @@ use sp_std::iter::FromIterator;
 use common::working_group::WorkingGroup;
 
 use crate::{
-    BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DynamicBagCreationPolicy,
-    DynamicBagId, DynamicBagType, Error, ModuleAccount, RawEvent, StaticBagId,
-    StorageBucketOperatorStatus, StorageTreasury, UploadParameters, Voucher,
+    BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily,
+    DynamicBagCreationPolicy, DynamicBagId, DynamicBagType, Error, ModuleAccount, RawEvent,
+    StaticBagId, StorageBucketOperatorStatus, StorageTreasury, UploadParameters, Voucher,
 };
 
 use mocks::{
@@ -25,9 +25,10 @@ use mocks::{
     InitialStorageBucketsNumberForDynamicBag, MaxNumberOfDataObjectsPerBag,
     MaxRandomIterationNumber, MaxStorageBucketNumber, Storage, Test, ANOTHER_STORAGE_PROVIDER_ID,
     DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID, DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID,
-    DEFAULT_STORAGE_PROVIDER_ID, WG_LEADER_ACCOUNT_ID,
+    DEFAULT_STORAGE_PROVIDER_ID, STORAGE_WG_LEADER_ACCOUNT_ID,
 };
 
+use crate::tests::mocks::DISTRIBUTION_WG_LEADER_ACCOUNT_ID;
 use fixtures::*;
 
 #[test]
@@ -45,7 +46,7 @@ fn create_storage_bucket_succeeded() {
         let invite_worker = None;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_accepting_new_bags(accepting_new_bags)
             .with_invite_worker(invite_worker)
             .with_size_limit(size_limit)
@@ -77,12 +78,12 @@ fn create_storage_bucket_fails_with_invalid_voucher_params() {
         let objects_limit = 10;
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_size_limit(size_limit)
             .call_and_assert(Err(Error::<Test>::VoucherMaxObjectSizeLimitExceeded.into()));
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(objects_limit)
             .call_and_assert(Err(
                 Error::<Test>::VoucherMaxObjectNumberLimitExceeded.into()
@@ -98,7 +99,7 @@ fn create_storage_bucket_succeeded_with_invited_member() {
         let invite_worker = Some(invited_worker_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_accepting_new_bags(accepting_new_bags)
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
@@ -140,7 +141,7 @@ fn create_storage_bucket_fails_with_exceeding_max_storage_bucket_limit() {
         create_storage_buckets(buckets_number);
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(
                 Error::<Test>::MaxStorageBucketNumberLimitExceeded.into()
             ));
@@ -153,7 +154,7 @@ fn create_storage_bucket_fails_with_invalid_storage_provider_id() {
         let invalid_storage_provider_id = 155;
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(invalid_storage_provider_id))
             .call_and_assert(Err(Error::<Test>::StorageProviderOperatorDoesntExist.into()));
     });
@@ -169,7 +170,7 @@ fn accept_storage_bucket_invitation_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -211,7 +212,7 @@ fn accept_storage_bucket_invitation_fails_with_non_existing_storage_bucket() {
 fn accept_storage_bucket_invitation_fails_with_non_invited_storage_provider() {
     build_test_externalities().execute_with(|| {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -229,7 +230,7 @@ fn accept_storage_bucket_invitation_fails_with_different_invited_storage_provide
         let different_storage_provider_id = ANOTHER_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(different_storage_provider_id))
             .call_and_assert(Ok(()))
             .unwrap();
@@ -247,7 +248,7 @@ fn accept_storage_bucket_invitation_fails_with_already_set_storage_provider() {
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(storage_provider_id))
             .call_and_assert(Ok(()))
             .unwrap();
@@ -279,7 +280,7 @@ fn update_storage_buckets_for_bags_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -287,7 +288,7 @@ fn update_storage_buckets_for_bags_succeeded() {
         let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Ok(()));
@@ -313,7 +314,7 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -321,7 +322,7 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() {
         let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::DynamicBagDoesntExist.into()));
@@ -335,7 +336,7 @@ fn update_storage_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() {
         let bag_id = BagId::<Test>::StaticBag(static_bag_id.clone());
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .with_accepting_new_bags(false)
             .call_and_assert(Ok(()))
@@ -344,7 +345,7 @@ fn update_storage_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() {
         let add_buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(add_buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntAcceptNewBags.into()));
@@ -379,7 +380,7 @@ fn update_storage_buckets_for_bags_succeeded_with_voucher_usage() {
         let size_limit = 100;
 
         let new_bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(objects_limit)
             .with_size_limit(size_limit)
             .call_and_assert(Ok(()))
@@ -392,7 +393,7 @@ fn update_storage_buckets_for_bags_succeeded_with_voucher_usage() {
         assert_eq!(bag.stored_by, old_buckets);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(new_buckets.clone())
             .with_remove_bucket_ids(old_buckets.clone())
@@ -441,7 +442,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_numb
         let new_bucket_objects_limit = 0;
         let new_bucket_size_limit = 100;
         let new_bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(new_bucket_objects_limit)
             .with_size_limit(new_bucket_size_limit)
             .call_and_assert(Ok(()))
@@ -450,7 +451,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_numb
         let new_buckets = BTreeSet::from_iter(vec![new_bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(new_buckets.clone())
             .call_and_assert(Err(
@@ -486,7 +487,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_tota
         let new_bucket_objects_limit = 1;
         let new_bucket_size_limit = 5;
         let new_bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_objects_limit(new_bucket_objects_limit)
             .with_size_limit(new_bucket_size_limit)
             .call_and_assert(Ok(()))
@@ -495,7 +496,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_tota
         let new_buckets = BTreeSet::from_iter(vec![new_bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(new_buckets.clone())
             .call_and_assert(Err(
@@ -511,7 +512,7 @@ fn update_storage_buckets_for_working_group_static_bags_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -522,7 +523,7 @@ fn update_storage_buckets_for_working_group_static_bags_succeeded() {
         let bag_id = BagId::<Test>::StaticBag(static_bag_id.clone());
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Ok(()));
@@ -541,7 +542,7 @@ fn update_storage_buckets_for_dynamic_bags_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -554,7 +555,7 @@ fn update_storage_buckets_for_dynamic_bags_succeeded() {
         create_dynamic_bag(&dynamic_bag_id);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_remove_bucket_ids(buckets.clone())
             .call_and_assert(Ok(()));
@@ -579,7 +580,7 @@ fn update_storage_buckets_for_bags_fails_with_non_leader_origin() {
 fn update_storage_buckets_for_bags_fails_with_empty_params() {
     build_test_externalities().execute_with(|| {
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketIdCollectionsAreEmpty.into()));
     });
 }
@@ -593,14 +594,14 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_storage_buckets() {
 
         // Invalid added bucket ID.
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
 
         // Invalid removed bucket ID.
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_remove_bucket_ids(buckets.clone())
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
@@ -668,7 +669,7 @@ fn upload_succeeded_with_data_size_fee() {
         let data_size_fee = 100;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -983,7 +984,7 @@ fn upload_fails_with_insufficient_balance_for_data_size_fee() {
         let data_size_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -1008,7 +1009,7 @@ fn upload_failed_with_blocked_uploading() {
 
         let new_blocking_status = true;
         UpdateUploadingBlockedStatusFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_status(new_blocking_status)
             .call_and_assert(Ok(()));
 
@@ -1029,7 +1030,7 @@ fn upload_failed_with_blacklisted_data_object() {
         let add_hashes = BTreeSet::from_iter(vec![hash]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes)
             .call_and_assert(Ok(()));
 
@@ -1056,7 +1057,7 @@ fn set_storage_operator_metadata_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1110,7 +1111,7 @@ fn set_storage_operator_metadata_fails_with_invalid_storage_association() {
 
         // Missing invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
@@ -1122,7 +1123,7 @@ fn set_storage_operator_metadata_fails_with_invalid_storage_association() {
 
         // Not accepted invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1166,7 +1167,7 @@ fn accept_pending_data_objects_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .with_size_limit(size_limit)
             .with_objects_limit(objects_limit)
@@ -1182,7 +1183,7 @@ fn accept_pending_data_objects_succeeded() {
         let buckets = BTreeSet::from_iter(vec![bucket_id]);
 
         UpdateStorageBucketForBagsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_bag_id(bag_id.clone())
             .with_add_bucket_ids(buckets.clone())
             .call_and_assert(Ok(()));
@@ -1240,7 +1241,7 @@ fn accept_pending_data_objects_fails_with_unrelated_storage_bucket() {
         let bag_id = BagId::<Test>::StaticBag(static_bag_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1286,7 +1287,7 @@ fn accept_pending_data_objects_fails_with_non_existing_dynamic_bag() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1328,7 +1329,7 @@ fn accept_pending_data_objects_succeeded_with_dynamic_bag() {
         let size_limit = 100;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .with_objects_limit(objects_limit)
             .with_size_limit(size_limit)
@@ -1471,13 +1472,13 @@ fn cancel_storage_bucket_operator_invite_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
 
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Ok(()));
 
@@ -1502,7 +1503,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_non_leader_origin() {
 fn cancel_storage_bucket_operator_invite_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -1511,13 +1512,13 @@ fn cancel_storage_bucket_operator_invite_fails_with_non_existing_storage_bucket(
 fn cancel_storage_bucket_operator_invite_fails_with_non_invited_storage_provider() {
     build_test_externalities().execute_with(|| {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .call_and_assert(Ok(()))
             .unwrap();
 
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::NoStorageBucketInvitation.into()));
     });
@@ -1529,7 +1530,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_already_set_storage_provider
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(storage_provider_id))
             .call_and_assert(Ok(()))
             .unwrap();
@@ -1541,7 +1542,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_already_set_storage_provider
             .call_and_assert(Ok(()));
 
         CancelStorageBucketInvitationFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderAlreadySet.into()));
     });
@@ -1556,12 +1557,12 @@ fn invite_storage_bucket_operator_succeeded() {
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_operator_worker_id(storage_provider_id)
             .call_and_assert(Ok(()));
@@ -1588,7 +1589,7 @@ fn invite_storage_bucket_operator_fails_with_non_leader_origin() {
 fn invite_storage_bucket_operator_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -1599,13 +1600,13 @@ fn invite_storage_bucket_operator_fails_with_non_missing_invitation() {
         let invited_worker_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(invited_worker_id))
             .call_and_assert(Ok(()))
             .unwrap();
 
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::InvitedStorageProvider.into()));
     });
@@ -1617,12 +1618,12 @@ fn invite_storage_bucket_operator_fails_with_invalid_storage_provider_id() {
         let invalid_storage_provider_id = 155;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         InviteStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_operator_worker_id(invalid_storage_provider_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderOperatorDoesntExist.into()));
@@ -1638,7 +1639,7 @@ fn update_uploading_blocked_status_succeeded() {
         let new_blocking_status = true;
 
         UpdateUploadingBlockedStatusFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_status(new_blocking_status)
             .call_and_assert(Ok(()));
 
@@ -2075,7 +2076,7 @@ fn delete_data_objects_fails_with_invalid_treasury_balance() {
         let invite_worker = Some(storage_provider_id);
 
         CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2195,7 +2196,7 @@ fn update_storage_bucket_status_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2248,7 +2249,7 @@ fn update_storage_bucket_status_fails_with_invalid_storage_association() {
 
         // Missing invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
@@ -2260,7 +2261,7 @@ fn update_storage_bucket_status_fails_with_invalid_storage_association() {
 
         // Not accepted invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2298,7 +2299,7 @@ fn update_blacklist_succeeded() {
 
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2309,7 +2310,7 @@ fn update_blacklist_succeeded() {
         let add_hashes = BTreeSet::from_iter(vec![cid2.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .with_remove_hashes(remove_hashes.clone())
             .call_and_assert(Ok(()));
@@ -2332,7 +2333,7 @@ fn update_blacklist_failed_with_exceeding_size_limit() {
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2340,7 +2341,7 @@ fn update_blacklist_failed_with_exceeding_size_limit() {
         let add_hashes = BTreeSet::from_iter(vec![cid2.clone(), cid3.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .with_remove_hashes(remove_hashes.clone())
             .call_and_assert(Err(Error::<Test>::BlacklistSizeLimitExceeded.into()));
@@ -2361,7 +2362,7 @@ fn update_blacklist_failed_with_exceeding_size_limit_with_non_existent_remove_ha
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2369,7 +2370,7 @@ fn update_blacklist_failed_with_exceeding_size_limit_with_non_existent_remove_ha
         let add_hashes = BTreeSet::from_iter(vec![cid2.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .with_remove_hashes(remove_hashes.clone())
             .call_and_assert(Err(Error::<Test>::BlacklistSizeLimitExceeded.into()));
@@ -2388,12 +2389,12 @@ fn update_blacklist_succeeds_with_existent_remove_hashes() {
         let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]);
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
         UpdateBlacklistFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_add_hashes(add_hashes.clone())
             .call_and_assert(Ok(()));
 
@@ -2429,7 +2430,7 @@ fn create_storage_bucket_and_assign_to_bag(
     set_max_voucher_limits();
 
     let bucket_id = CreateStorageBucketFixture::default()
-        .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+        .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
         .with_invite_worker(storage_provider_id)
         .with_objects_limit(objects_limit)
         .with_size_limit(size_limit)
@@ -2439,7 +2440,7 @@ fn create_storage_bucket_and_assign_to_bag(
     let buckets = BTreeSet::from_iter(vec![bucket_id]);
 
     UpdateStorageBucketForBagsFixture::default()
-        .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+        .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
         .with_bag_id(bag_id.clone())
         .with_add_bucket_ids(buckets.clone())
         .call_and_assert(Ok(()));
@@ -2607,12 +2608,12 @@ fn delete_storage_bucket_succeeded() {
         run_to_block(starting_block);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Ok(()));
 
@@ -2635,7 +2636,7 @@ fn delete_storage_bucket_fails_with_non_leader_origin() {
 fn delete_storage_bucket_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -2646,13 +2647,13 @@ fn delete_storage_bucket_fails_with_non_missing_invitation() {
         let invited_worker_id = DEFAULT_STORAGE_PROVIDER_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(Some(invited_worker_id))
             .call_and_assert(Ok(()))
             .unwrap();
 
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::InvitedStorageProvider.into()));
     });
@@ -2682,7 +2683,7 @@ fn delete_storage_bucket_fails_with_non_empty_bucket() {
             .call_and_assert(Ok(()));
 
         DeleteStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::CannotDeleteNonEmptyStorageBucket.into()));
     });
@@ -2698,7 +2699,7 @@ fn remove_storage_bucket_operator_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -2710,7 +2711,7 @@ fn remove_storage_bucket_operator_succeeded() {
             .call_and_assert(Ok(()));
 
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Ok(()));
 
@@ -2733,7 +2734,7 @@ fn remove_storage_bucket_operator_fails_with_non_leader_origin() {
 fn remove_storage_bucket_operator_fails_with_non_existing_storage_bucket() {
     build_test_externalities().execute_with(|| {
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Err(Error::<Test>::StorageBucketDoesntExist.into()));
     });
 }
@@ -2745,13 +2746,13 @@ fn remove_storage_bucket_operator_fails_with_non_accepted_storage_provider() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
 
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderMustBeSet.into()));
     });
@@ -2761,13 +2762,13 @@ fn remove_storage_bucket_operator_fails_with_non_accepted_storage_provider() {
 fn remove_storage_bucket_operator_fails_with_missing_storage_provider() {
     build_test_externalities().execute_with(|| {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .call_and_assert(Ok(()))
             .unwrap();
 
         RemoveStorageBucketOperatorFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .call_and_assert(Err(Error::<Test>::StorageProviderMustBeSet.into()));
     });
@@ -2782,7 +2783,7 @@ fn update_data_size_fee_succeeded() {
         let new_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(new_fee)
             .call_and_assert(Ok(()));
 
@@ -2812,7 +2813,7 @@ fn data_size_fee_calculation_works_properly() {
         let data_size_fee = 1000;
 
         UpdateDataObjectPerMegabyteFeeFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_fee(data_size_fee)
             .call_and_assert(Ok(()));
 
@@ -2918,7 +2919,7 @@ fn update_storage_buckets_per_bag_limit_succeeded() {
         let new_limit = 4;
 
         UpdateStorageBucketsPerBagLimitFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_limit(new_limit)
             .call_and_assert(Ok(()));
 
@@ -2945,14 +2946,14 @@ fn update_storage_buckets_per_bag_limit_fails_with_incorrect_value() {
         let new_limit = 0;
 
         UpdateStorageBucketsPerBagLimitFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_limit(new_limit)
             .call_and_assert(Err(Error::<Test>::StorageBucketsPerBagLimitTooLow.into()));
 
         let new_limit = 100;
 
         UpdateStorageBucketsPerBagLimitFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_limit(new_limit)
             .call_and_assert(Err(Error::<Test>::StorageBucketsPerBagLimitTooHigh.into()));
     });
@@ -2962,7 +2963,7 @@ fn set_update_storage_buckets_per_bag_limit() {
     let new_limit = 7;
 
     UpdateStorageBucketsPerBagLimitFixture::default()
-        .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+        .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
         .with_new_limit(new_limit)
         .call_and_assert(Ok(()))
 }
@@ -2979,7 +2980,7 @@ fn set_storage_bucket_voucher_limits_succeeded() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -3017,7 +3018,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_values() {
         let invite_worker = Some(storage_provider_id);
 
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -3075,7 +3076,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_storage_association() {
 
         // Missing invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .call_and_assert(Ok(()))
             .unwrap();
 
@@ -3087,7 +3088,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_storage_association() {
 
         // Not accepted invitation
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(invite_worker)
             .call_and_assert(Ok(()))
             .unwrap();
@@ -3134,7 +3135,7 @@ fn update_storage_buckets_voucher_max_limits_succeeded() {
         let new_number_limit = 4;
 
         UpdateStorageBucketsVoucherMaxLimitsFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_objects_number_limit(new_number_limit)
             .with_new_objects_size_limit(new_size_limit)
             .call_and_assert(Ok(()));
@@ -3402,7 +3403,7 @@ fn create_storage_buckets(buckets_number: u64) -> BTreeSet<u64> {
 
     for _ in 0..buckets_number {
         let bucket_id = CreateStorageBucketFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_invite_worker(None)
             .with_objects_limit(objects_limit)
             .with_size_limit(size_limit)
@@ -3425,7 +3426,7 @@ fn update_number_of_storage_buckets_in_dynamic_bag_creation_policy_succeeded() {
         let new_bucket_number = 40;
 
         UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_storage_buckets_number(new_bucket_number)
             .with_dynamic_bag_type(dynamic_bag_type)
             .call_and_assert(Ok(()));
@@ -3461,7 +3462,7 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() {
         assert_eq!(policy, DefaultMemberDynamicBagCreationPolicy::get());
 
         UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_storage_buckets_number(new_bucket_number)
             .with_dynamic_bag_type(dynamic_bag_type)
             .call_and_assert(Ok(()));
@@ -3475,7 +3476,7 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() {
         assert_eq!(policy, DefaultChannelDynamicBagCreationPolicy::get());
 
         UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default()
-            .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID))
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
             .with_new_storage_buckets_number(new_bucket_number)
             .with_dynamic_bag_type(dynamic_bag_type)
             .call_and_assert(Ok(()));
@@ -3484,3 +3485,31 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() {
         assert_eq!(policy.number_of_storage_buckets, new_bucket_number);
     });
 }
+
+#[test]
+fn create_distribution_bucket_family_succeeded() {
+    build_test_externalities().execute_with(|| {
+        let starting_block = 1;
+        run_to_block(starting_block);
+
+        let family_id = CreateDistributionBucketBucketFamilyFixture::default()
+            .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID))
+            .call_and_assert(Ok(()))
+            .unwrap();
+
+        let bucket_family = Storage::distribution_bucket_family_by_id(family_id);
+
+        assert_eq!(bucket_family, DistributionBucketFamily::default());
+
+        EventFixture::assert_last_crate_event(RawEvent::DistributionBucketFamilyCreated(family_id));
+    });
+}
+
+#[test]
+fn create_distribution_bucket_family_fails_with_non_signed_origin() {
+    build_test_externalities().execute_with(|| {
+        CreateDistributionBucketBucketFamilyFixture::default()
+            .with_origin(RawOrigin::None)
+            .call_and_assert(Err(DispatchError::BadOrigin));
+    });
+}

+ 3 - 0
runtime/src/integration/proposals/proposal_encoder.rs

@@ -22,6 +22,9 @@ macro_rules! wrap_working_group_call {
                 Call::ContentDirectoryWorkingGroup($working_group_instance_call)
             }
             WorkingGroup::Storage => Call::StorageWorkingGroup($working_group_instance_call),
+            WorkingGroup::Distribution => {
+                Call::DistributionWorkingGroup($working_group_instance_call)
+            }
         }
     }};
 }

+ 24 - 3
runtime/src/lib.rs

@@ -565,6 +565,9 @@ pub type StorageWorkingGroupInstance = working_group::Instance2;
 // The content directory working group instance alias.
 pub type ContentDirectoryWorkingGroupInstance = working_group::Instance3;
 
+// The distribution working group instance alias.
+pub type DistributionWorkingGroupInstance = working_group::Instance4;
+
 parameter_types! {
     pub const MaxWorkerNumberLimit: u32 = 100;
 }
@@ -579,6 +582,11 @@ impl working_group::Trait<ContentDirectoryWorkingGroupInstance> for Runtime {
     type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
 }
 
+impl working_group::Trait<DistributionWorkingGroupInstance> for Runtime {
+    type Event = Event;
+    type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
+}
+
 parameter_types! {
     pub const ProposalCancellationFee: u64 = 10000;
     pub const ProposalRejectionFee: u64 = 5000;
@@ -666,6 +674,7 @@ impl storage::Trait for Runtime {
     type DataObjectId = DataObjectId;
     type StorageBucketId = StorageBucketId;
     type DistributionBucketId = DistributionBucketId;
+    type DistributionBucketFamilyId = DistributionBucketFamilyId;
     type ChannelId = ChannelId;
     type MaxStorageBucketNumber = MaxStorageBucketNumber;
     type MaxNumberOfDataObjectsPerBag = MaxNumberOfDataObjectsPerBag;
@@ -679,19 +688,30 @@ impl storage::Trait for Runtime {
     type Randomness = RandomnessCollectiveFlip;
     type MaxRandomIterationNumber = MaxRandomIterationNumber;
 
-    fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+    fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
         StorageWorkingGroup::ensure_origin_is_active_leader(origin)
     }
 
-    fn ensure_worker_origin(origin: Self::Origin, worker_id: ActorId) -> DispatchResult {
+    fn ensure_storage_worker_origin(origin: Self::Origin, worker_id: ActorId) -> DispatchResult {
         StorageWorkingGroup::ensure_worker_signed(origin, &worker_id).map(|_| ())
     }
 
-    fn ensure_worker_exists(worker_id: &ActorId) -> DispatchResult {
+    fn ensure_storage_worker_exists(worker_id: &ActorId) -> DispatchResult {
         StorageWorkingGroup::ensure_worker_exists(&worker_id)
             .map(|_| ())
             .map_err(|err| err.into())
     }
+
+    fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult {
+        DistributionWorkingGroup::ensure_origin_is_active_leader(origin)
+    }
+
+    fn ensure_distribution_worker_origin(
+        origin: Self::Origin,
+        worker_id: ActorId,
+    ) -> DispatchResult {
+        DistributionWorkingGroup::ensure_worker_signed(origin, &worker_id).map(|_| ())
+    }
 }
 
 /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know
@@ -757,6 +777,7 @@ construct_runtime!(
         // reserved for the future use: ForumWorkingGroup: working_group::<Instance1>::{Module, Call, Storage, Event<T>},
         StorageWorkingGroup: working_group::<Instance2>::{Module, Call, Storage, Config<T>, Event<T>},
         ContentDirectoryWorkingGroup: working_group::<Instance3>::{Module, Call, Storage, Config<T>, Event<T>},
+        DistributionWorkingGroup: working_group::<Instance4>::{Module, Call, Storage, Config<T>, Event<T>},
         //
         Storage: storage::{Module, Call, Storage, Event<T>},
     }

+ 3 - 0
runtime/src/primitives.rs

@@ -74,6 +74,9 @@ pub type StorageBucketId = u64;
 /// Represent a distribution bucket from the storage pallet.
 pub type DistributionBucketId = u64;
 
+/// Represent a distribution bucket family from the storage pallet.
+pub type DistributionBucketFamilyId = u64;
+
 /// Represent a media channel.
 pub type ChannelId = u64;