Browse Source

Merge branch 'sumer' into sumer-content-dir-impl-channels

Mokhtar Naamani 4 years ago
parent
commit
5ad77e66bc

+ 0 - 6
node/src/chain_spec/content_config.rs

@@ -124,7 +124,6 @@ impl EncodedContentData {
 pub fn empty_data_directory_config() -> DataDirectoryConfig {
     DataDirectoryConfig {
         data_object_by_content_id: vec![],
-        known_content_ids: vec![],
         quotas: vec![],
         quota_size_limit_upper_bound: 20000,
         quota_objects_limit_upper_bound: 200,
@@ -150,11 +149,6 @@ pub fn data_directory_config_from_json(data_file: &Path) -> DataDirectoryConfig
             .iter()
             .map(|object| (object.storage_object_owner.clone(), object.quota))
             .collect(),
-        known_content_ids: content
-            .data_objects
-            .into_iter()
-            .map(|object| object.content_id)
-            .collect(),
         quota_size_limit_upper_bound: content.quota_size_limit_upper_bound,
         quota_objects_limit_upper_bound: content.quota_objects_limit_upper_bound,
         global_quota: content.global_quota,

+ 18 - 3
node/src/chain_spec/mod.rs

@@ -29,9 +29,10 @@ use sp_runtime::traits::{IdentifyAccount, Verify};
 use sp_runtime::Perbill;
 
 use node_runtime::{
-    membership, AuthorityDiscoveryConfig, BabeConfig, Balance, BalancesConfig, ContentConfig,
-    ContentDirectoryWorkingGroupConfig, CouncilConfig, CouncilElectionConfig, DataDirectoryConfig,
-    DataObjectStorageRegistryConfig, DataObjectTypeRegistryConfig, ElectionParameters, ForumConfig,
+    membership, AuthorityDiscoveryConfig, BabeConfig, Balance, BalancesConfig,
+    BuilderWorkingGroupConfig, ContentConfig, ContentDirectoryWorkingGroupConfig, CouncilConfig,
+    CouncilElectionConfig, DataDirectoryConfig, DataObjectStorageRegistryConfig,
+    DataObjectTypeRegistryConfig, ElectionParameters, ForumConfig, GatewayWorkingGroupConfig,
     GrandpaConfig, ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig, SessionConfig,
     SessionKeys, Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig, SudoConfig,
     SystemConfig, DAYS, WASM_BINARY,
@@ -318,6 +319,20 @@ pub fn testnet_genesis(
             worker_application_human_readable_text_constraint: default_text_constraint,
             worker_exit_rationale_text_constraint: default_text_constraint,
         }),
+        working_group_Instance4: Some(BuilderWorkingGroupConfig {
+            phantom: Default::default(),
+            working_group_mint_capacity: 0,
+            opening_human_readable_text_constraint: default_text_constraint,
+            worker_application_human_readable_text_constraint: default_text_constraint,
+            worker_exit_rationale_text_constraint: default_text_constraint,
+        }),
+        working_group_Instance5: Some(GatewayWorkingGroupConfig {
+            phantom: Default::default(),
+            working_group_mint_capacity: 0,
+            opening_human_readable_text_constraint: default_text_constraint,
+            worker_application_human_readable_text_constraint: default_text_constraint,
+            worker_exit_rationale_text_constraint: default_text_constraint,
+        }),
         content: Some({
             ContentConfig {
                 next_curator_group_id: 1,

+ 15 - 0
runtime-modules/common/src/storage.rs

@@ -41,4 +41,19 @@ pub trait StorageSystem<T: crate::StorageOwnership + crate::MembershipTypes> {
         owner: StorageObjectOwner<T::MemberId, T::ChannelId, T::DAOId>,
         content_parameters: Vec<ContentParameters<T::ContentId, T::DataObjectTypeId>>,
     ) -> DispatchResult;
+
+    // Should hook into call on storage system,
+    // but requires rich error (with reasons)  types.
+    // caller already knows the `ContentId`s as they are part of
+    // the ContentUploadParameters
+    fn atomically_remove_content(
+        owner: &StorageObjectOwner<T::MemberId, T::ChannelId, T::DAOId>,
+        content_ids: &[T::ContentId],
+    ) -> DispatchResult;
+
+    // Checks if given owner can remove content under givencontent ids from the storage system
+    fn can_remove_content(
+        owner: &StorageObjectOwner<T::MemberId, T::ChannelId, T::DAOId>,
+        content_ids: &[T::ContentId],
+    ) -> DispatchResult;
 }

+ 4 - 0
runtime-modules/common/src/working_group.rs

@@ -16,4 +16,8 @@ pub enum WorkingGroup {
     Storage,
     /// Storage working group: working_group::Instance3.
     Content,
+    /// Builder working group: working_group::Instance4.
+    Builder,
+    /// Gateway working group: working_group::Instance5.
+    Gateway,
 }

+ 129 - 52
runtime-modules/storage/src/data_directory.rs

@@ -62,8 +62,6 @@ pub trait Trait:
     /// Validates member id and origin combination.
     type MemberOriginValidator: ActorOriginValidator<Self::Origin, MemberId<Self>, Self::AccountId>;
 
-    type MaxObjectsPerInjection: Get<u32>;
-
     /// Default content quota for all actors.
     type DefaultQuota: Get<Quota>;
 }
@@ -109,6 +107,9 @@ decl_error! {
 
         /// Content uploading blocked.
         ContentUploadingBlocked,
+
+        /// Provided owner should be equal o the data object owner under given content id
+        OwnersAreNotEqual
     }
 }
 
@@ -244,8 +245,6 @@ pub type DataObjectsMap<T> = BTreeMap<ContentId<T>, DataObject<T>>;
 
 decl_storage! {
     trait Store for Module<T: Trait> as DataDirectory {
-        /// List of ids known to the system.
-        pub KnownContentIds get(fn known_content_ids) config(): Vec<ContentId<T>> = Vec::new();
 
         /// Maps data objects by their content id.
         pub DataObjectByContentId get(fn data_object_by_content_id) config():
@@ -276,6 +275,7 @@ decl_event! {
         StorageProviderId = StorageProviderId<T>,
         Content = Vec<ContentParameters<ContentId<T>, DataObjectTypeId<T>>>,
         ContentId = ContentId<T>,
+        ContentIds = Vec<ContentId<T>>,
         QuotaLimit = u64,
         UploadingStatus = bool
     {
@@ -285,6 +285,12 @@ decl_event! {
         /// - StorageObjectOwner enum.
         ContentAdded(Content, StorageObjectOwner),
 
+        /// Emits on content removal.
+        /// Params:
+        /// - Content parameters representation.
+        /// - StorageObjectOwner enum.
+        ContentRemoved(ContentIds, StorageObjectOwner),
+
         /// Emits when the storage provider accepts a content.
         /// Params:
         /// - Id of the relationship.
@@ -325,9 +331,6 @@ decl_module! {
         /// Predefined errors.
         type Error = Error<T>;
 
-        /// Maximum objects allowed per inject_data_objects() transaction
-        const MaxObjectsPerInjection: u32 = T::MaxObjectsPerInjection::get();
-
         /// Adds the content to the system. The created DataObject
         /// awaits liaison to accept or reject it.
         #[weight = 10_000_000] // TODO: adjust weight
@@ -365,6 +368,30 @@ decl_module! {
             Self::deposit_event(RawEvent::ContentAdded(content, owner));
         }
 
+        /// Remove the content from the system.
+        #[weight = 10_000_000] // TODO: adjust weight
+        pub fn remove_content(
+            origin,
+            owner: StorageObjectOwner<MemberId<T>, ChannelId<T>, DAOId<T>>,
+            content_ids: Vec<ContentId<T>>
+        ) {
+
+            // Ensure given origin can perform operation under specific storage object owner
+            Self::ensure_storage_object_owner_origin(origin, &owner)?;
+
+            // Ensure content under given content ids can be successfully removed
+            let content = Self::ensure_content_can_be_removed(&content_ids, &owner)?;
+
+            //
+            // == MUTATION SAFE ==
+            //
+
+            // Let's remove a content
+            Self::delete_content(&owner, &content_ids, content);
+
+            Self::deposit_event(RawEvent::ContentRemoved(content_ids, owner));
+        }
+
         /// Updates storage object owner quota objects limit. Requires leader privileges.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn update_storage_object_owner_quota_objects_limit(
@@ -433,8 +460,6 @@ decl_module! {
 
             Self::update_content_judgement(&storage_provider_id, content_id, LiaisonJudgement::Accepted)?;
 
-            <KnownContentIds<T>>::mutate(|ids| ids.push(content_id));
-
             Self::deposit_event(RawEvent::ContentAccepted(content_id, storage_provider_id));
         }
 
@@ -464,48 +489,6 @@ decl_module! {
             <UploadingBlocked>::put(is_blocked);
             Self::deposit_event(RawEvent::ContentUploadingStatusUpdated(is_blocked));
         }
-
-        // Sudo methods
-
-        /// Removes the content id from the list of known content ids. Requires root privileges.
-        #[weight = 10_000_000] // TODO: adjust weight
-        fn remove_known_content_id(origin, content_id: T::ContentId) {
-            ensure_root(origin)?;
-
-            // == MUTATION SAFE ==
-
-            let upd_content_ids: Vec<T::ContentId> = Self::known_content_ids()
-                .into_iter()
-                .filter(|&id| id != content_id)
-                .collect();
-            <KnownContentIds<T>>::put(upd_content_ids);
-        }
-
-        /// Injects a set of data objects and their corresponding content id into the directory.
-        /// The operation is "silent" - no events will be emitted as objects are added.
-        /// The number of objects that can be added per call is limited to prevent the dispatch
-        /// from causing the block production to fail if it takes too much time to process.
-        /// Existing data objects will be overwritten.
-        #[weight = 10_000_000] // TODO: adjust weight
-        pub(crate) fn inject_data_objects(origin, objects: DataObjectsMap<T>) {
-            ensure_root(origin)?;
-
-            // Must provide something to inject
-            ensure!(objects.len() <= T::MaxObjectsPerInjection::get() as usize, Error::<T>::DataObjectsInjectionExceededLimit);
-
-            for (id, object) in objects.into_iter() {
-                // append to known content ids
-                // duplicates will be removed at the end
-                <KnownContentIds<T>>::mutate(|ids| ids.push(id));
-                <DataObjectByContentId<T>>::insert(id, object);
-            }
-
-            // remove duplicate ids
-            <KnownContentIds<T>>::mutate(|ids| {
-                ids.sort();
-                ids.dedup();
-            });
-        }
     }
 }
 
@@ -572,6 +555,35 @@ impl<T: Trait> Module<T> {
         })
     }
 
+    // Ensure content under given content ids can be successfully removed
+    fn ensure_content_can_be_removed(
+        content_ids: &[T::ContentId],
+        owner: &StorageObjectOwner<MemberId<T>, ChannelId<T>, DAOId<T>>,
+    ) -> Result<Vec<DataObject<T>>, Error<T>> {
+        let mut content = Vec::new();
+        for content_id in content_ids {
+            let data_object =
+                Self::data_object_by_content_id(content_id).ok_or(Error::<T>::CidNotFound)?;
+            ensure!(data_object.owner == *owner, Error::<T>::OwnersAreNotEqual);
+            content.push(data_object);
+        }
+
+        Ok(content)
+    }
+
+    fn calculate_content_voucher(content: Vec<DataObject<T>>) -> Voucher {
+        let content_length = content.len() as u64;
+
+        let content_size = content
+            .into_iter()
+            .fold(0, |total_size, content| total_size + content.size);
+
+        Voucher {
+            size: content_size,
+            objects: content_length,
+        }
+    }
+
     // Ensures global quota constraints satisfied.
     fn ensure_global_quota_constraints_satisfied(upload_voucher: Voucher) -> DispatchResult {
         let global_quota_voucher = Self::global_quota().calculate_voucher();
@@ -618,6 +630,27 @@ impl<T: Trait> Module<T> {
         <GlobalQuota>::mutate(|global_quota| global_quota.fill_quota(upload_voucher));
     }
 
+    // Complete content removal
+    fn delete_content(
+        owner: &StorageObjectOwner<MemberId<T>, ChannelId<T>, DAOId<T>>,
+        content_ids: &[T::ContentId],
+        content: Vec<DataObject<T>>,
+    ) {
+        let removal_voucher = Self::calculate_content_voucher(content);
+
+        for content_id in content_ids {
+            <DataObjectByContentId<T>>::remove(content_id);
+        }
+
+        // Updade owner quota.
+        <Quotas<T>>::mutate(owner, |owner_quota| {
+            owner_quota.release_quota(removal_voucher)
+        });
+
+        // Update global quota
+        <GlobalQuota>::mutate(|global_quota| global_quota.release_quota(removal_voucher));
+    }
+
     fn ensure_content_is_valid(
         multi_content: &[ContentParameters<T::ContentId, DataObjectTypeId<T>>],
     ) -> DispatchResult {
@@ -691,22 +724,66 @@ impl<T: Trait> common::storage::StorageSystem<T> for Module<T> {
     ) -> DispatchResult {
         Self::ensure_content_is_valid(&content)?;
 
-        let liaison = T::StorageProviderHelper::get_random_storage_provider()?;
+        Self::ensure_uploading_is_not_blocked()?;
 
         let owner_quota = Self::get_quota(&owner);
+
+        // Ensure owner quota constraints satisfied.
+        // Calculate upload voucher
         let upload_voucher = Self::ensure_owner_quota_constraints_satisfied(owner_quota, &content)?;
 
+        // Ensure global quota constraints satisfied.
+        Self::ensure_global_quota_constraints_satisfied(upload_voucher)?;
+
+        let liaison = T::StorageProviderHelper::get_random_storage_provider()?;
+
+        //
+        // == MUTATION SAFE ==
+        //
+
+        // Let's create the entry then
+
         Self::upload_content(owner_quota, upload_voucher, liaison, content, owner);
         Ok(())
     }
 
+    fn atomically_remove_content(
+        owner: &StorageObjectOwner<MemberId<T>, ChannelId<T>, DAOId<T>>,
+        content_ids: &[T::ContentId],
+    ) -> DispatchResult {
+        // Ensure content under given content ids can be successfully removed
+        let content = Self::ensure_content_can_be_removed(content_ids, owner)?;
+
+        //
+        // == MUTATION SAFE ==
+        //
+
+        // Let's remove a content
+        Self::delete_content(owner, content_ids, content);
+        Ok(())
+    }
+
     fn can_add_content(
         owner: StorageObjectOwner<MemberId<T>, ChannelId<T>, DAOId<T>>,
         content: Vec<ContentParameters<T::ContentId, DataObjectTypeId<T>>>,
     ) -> DispatchResult {
+        Self::ensure_uploading_is_not_blocked()?;
+
         T::StorageProviderHelper::get_random_storage_provider()?;
         let owner_quota = Self::get_quota(&owner);
+
+        // Ensure owner quota constraints satisfied.
         Self::ensure_owner_quota_constraints_satisfied(owner_quota, &content)?;
         Self::ensure_content_is_valid(&content)
     }
+
+    fn can_remove_content(
+        owner: &StorageObjectOwner<MemberId<T>, ChannelId<T>, DAOId<T>>,
+        content_ids: &[ContentId<T>],
+    ) -> DispatchResult {
+        // Ensure content under given content ids can be successfully removed
+        Self::ensure_content_can_be_removed(content_ids, &owner)?;
+
+        Ok(())
+    }
 }

+ 0 - 137
runtime-modules/storage/src/tests/data_directory.rs

@@ -2,7 +2,6 @@
 
 use common::storage::StorageObjectOwner;
 use frame_support::dispatch::DispatchError;
-use sp_std::collections::btree_map::BTreeMap;
 use system::RawOrigin;
 
 use super::mock::*;
@@ -205,139 +204,3 @@ fn reject_content_as_liaison() {
         assert_eq!(res, Ok(()));
     });
 }
-
-#[test]
-fn data_object_injection_works() {
-    with_default_mock_builder(|| {
-        // No objects in directory before injection
-        assert_eq!(TestDataDirectory::known_content_ids(), vec![]);
-
-        // new objects to inject into the directory
-        let mut objects = BTreeMap::new();
-
-        let object = data_directory::DataObjectInternal {
-            type_id: 1,
-            size: 1234,
-            added_at: data_directory::BlockAndTime {
-                block: 10,
-                time: 1024,
-            },
-            owner: StorageObjectOwner::Member(1),
-            liaison: TEST_MOCK_LIAISON_STORAGE_PROVIDER_ID,
-            liaison_judgement: data_directory::LiaisonJudgement::Pending,
-            ipfs_content_id: vec![],
-        };
-
-        let content_id_1 = 1;
-        objects.insert(content_id_1, object.clone());
-
-        let content_id_2 = 2;
-        objects.insert(content_id_2, object.clone());
-
-        let res = TestDataDirectory::inject_data_objects(RawOrigin::Root.into(), objects);
-        assert!(res.is_ok());
-
-        assert_eq!(
-            TestDataDirectory::known_content_ids(),
-            vec![content_id_1, content_id_2]
-        );
-
-        assert_eq!(
-            TestDataDirectory::data_object_by_content_id(content_id_1),
-            Some(object.clone())
-        );
-
-        assert_eq!(
-            TestDataDirectory::data_object_by_content_id(content_id_2),
-            Some(object)
-        );
-    });
-}
-
-#[test]
-fn data_object_injection_overwrites_and_removes_duplicate_ids() {
-    with_default_mock_builder(|| {
-        let sender = 1u64;
-        let owner = StorageObjectOwner::Member(1u64);
-        let content_id_1 = 1;
-        let content_id_2 = 2;
-
-        let content_parameters_first = ContentParameters {
-            content_id: content_id_1,
-            type_id: 1,
-            size: 10,
-            ipfs_content_id: vec![8, 8, 8, 8],
-        };
-
-        let content_parameters_second = ContentParameters {
-            content_id: content_id_2,
-            type_id: 2,
-            size: 20,
-            ipfs_content_id: vec![9, 9, 9, 9],
-        };
-
-        // Start with some existing objects in directory which will be
-        // overwritten
-        let res = TestDataDirectory::add_content(
-            Origin::signed(sender),
-            owner.clone(),
-            vec![content_parameters_first],
-        );
-        assert!(res.is_ok());
-        let res = TestDataDirectory::add_content(
-            Origin::signed(sender),
-            owner,
-            vec![content_parameters_second],
-        );
-        assert!(res.is_ok());
-
-        let mut objects = BTreeMap::new();
-
-        let object1 = data_directory::DataObjectInternal {
-            type_id: 1,
-            size: 6666,
-            added_at: data_directory::BlockAndTime {
-                block: 10,
-                time: 1000,
-            },
-            owner: StorageObjectOwner::Member(5),
-            liaison: TEST_MOCK_LIAISON_STORAGE_PROVIDER_ID,
-            liaison_judgement: data_directory::LiaisonJudgement::Pending,
-            ipfs_content_id: vec![5, 6, 7],
-        };
-
-        let object2 = data_directory::DataObjectInternal {
-            type_id: 1,
-            size: 7777,
-            added_at: data_directory::BlockAndTime {
-                block: 20,
-                time: 2000,
-            },
-            owner: StorageObjectOwner::Member(6),
-            liaison: TEST_MOCK_LIAISON_STORAGE_PROVIDER_ID,
-            liaison_judgement: data_directory::LiaisonJudgement::Pending,
-            ipfs_content_id: vec![5, 6, 7],
-        };
-
-        objects.insert(content_id_1, object1.clone());
-        objects.insert(content_id_2, object2.clone());
-
-        let res = TestDataDirectory::inject_data_objects(RawOrigin::Root.into(), objects);
-        assert!(res.is_ok());
-
-        assert_eq!(
-            TestDataDirectory::known_content_ids(),
-            vec![content_id_1, content_id_2]
-        );
-
-        assert_eq!(
-            TestDataDirectory::data_object_by_content_id(content_id_1),
-            Some(object1.clone())
-        );
-
-        assert_eq!(
-            TestDataDirectory::data_object_by_content_id(content_id_2),
-            Some(object2)
-        );
-    });
-}

+ 0 - 3
runtime-modules/storage/src/tests/mock.rs

@@ -96,7 +96,6 @@ parameter_types! {
     pub const MaximumBlockLength: u32 = 2 * 1024;
     pub const AvailableBlockRatio: Perbill = Perbill::one();
     pub const MinimumPeriod: u64 = 5;
-    pub const MaxObjectsPerInjection: u32 = 5;
     pub const DefaultQuota: Quota = Quota::new(5000, 50);
 }
 
@@ -180,7 +179,6 @@ impl data_directory::Trait for Test {
     type StorageProviderHelper = ();
     type IsActiveDataObjectType = AnyDataObjectTypeIsActive;
     type MemberOriginValidator = ();
-    type MaxObjectsPerInjection = MaxObjectsPerInjection;
     type DefaultQuota = DefaultQuota;
 }
 
@@ -293,7 +291,6 @@ impl ExtBuilder {
             quota_objects_limit_upper_bound: self.quota_objects_limit_upper_bound,
             global_quota: self.global_quota,
             data_object_by_content_id: vec![],
-            known_content_ids: vec![],
             quotas: vec![],
             uploading_blocked: false,
         }

+ 2 - 0
runtime/src/integration/proposals/proposal_encoder.rs

@@ -22,6 +22,8 @@ macro_rules! wrap_working_group_call {
                 Call::ContentDirectoryWorkingGroup($working_group_instance_call)
             }
             WorkingGroup::Storage => Call::StorageWorkingGroup($working_group_instance_call),
+            WorkingGroup::Builder => Call::BuilderWorkingGroup($working_group_instance_call),
+            WorkingGroup::Gateway => Call::GatewayWorkingGroup($working_group_instance_call),
         }
     }};
 }

+ 18 - 2
runtime/src/lib.rs

@@ -453,7 +453,6 @@ impl memo::Trait for Runtime {
 }
 
 parameter_types! {
-    pub const MaxObjectsPerInjection: u32 = 100;
     pub const DefaultQuota: Quota = Quota::new(5000, 50);
 }
 
@@ -466,7 +465,6 @@ impl storage::data_directory::Trait for Runtime {
     type StorageProviderHelper = integration::storage::StorageProviderHelper;
     type IsActiveDataObjectType = DataObjectTypeRegistry;
     type MemberOriginValidator = MembershipOriginValidator<Self>;
-    type MaxObjectsPerInjection = MaxObjectsPerInjection;
     type DefaultQuota = DefaultQuota;
 }
 
@@ -497,6 +495,12 @@ pub type StorageWorkingGroupInstance = working_group::Instance2;
 // The content directory working group instance alias.
 pub type ContentDirectoryWorkingGroupInstance = working_group::Instance3;
 
+// The builder working group instance alias.
+pub type BuilderWorkingGroupInstance = working_group::Instance4;
+
+// The gateway working group instance alias.
+pub type GatewayWorkingGroupInstance = working_group::Instance5;
+
 parameter_types! {
     pub const MaxWorkerNumberLimit: u32 = 100;
 }
@@ -511,6 +515,16 @@ impl working_group::Trait<ContentDirectoryWorkingGroupInstance> for Runtime {
     type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
 }
 
+impl working_group::Trait<BuilderWorkingGroupInstance> for Runtime {
+    type Event = Event;
+    type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
+}
+
+impl working_group::Trait<GatewayWorkingGroupInstance> for Runtime {
+    type Event = Event;
+    type MaxWorkerNumberLimit = MaxWorkerNumberLimit;
+}
+
 impl service_discovery::Trait for Runtime {
     type Event = Event;
 }
@@ -645,5 +659,7 @@ construct_runtime!(
         // reserved for the future use: ForumWorkingGroup: working_group::<Instance1>::{Module, Call, Storage, Event<T>},
         StorageWorkingGroup: working_group::<Instance2>::{Module, Call, Storage, Config<T>, Event<T>},
         ContentDirectoryWorkingGroup: working_group::<Instance3>::{Module, Call, Storage, Config<T>, Event<T>},
+        BuilderWorkingGroup: working_group::<Instance4>::{Module, Call, Storage, Config<T>, Event<T>},
+        GatewayWorkingGroup: working_group::<Instance5>::{Module, Call, Storage, Config<T>, Event<T>},
     }
 );

+ 121 - 2
runtime/src/tests/proposals_integration/working_group_proposals.rs

@@ -11,8 +11,9 @@ use proposals_codex::AddOpeningParameters;
 use working_group::{OpeningPolicyCommitment, RewardPolicy};
 
 use crate::{
-    Balance, BlockNumber, ContentDirectoryWorkingGroup, ContentDirectoryWorkingGroupInstance,
-    StorageWorkingGroup, StorageWorkingGroupInstance,
+    Balance, BlockNumber, BuilderWorkingGroup, BuilderWorkingGroupInstance,
+    ContentDirectoryWorkingGroup, ContentDirectoryWorkingGroupInstance, GatewayWorkingGroup,
+    GatewayWorkingGroupInstance, StorageWorkingGroup, StorageWorkingGroupInstance,
 };
 use sp_std::collections::btree_set::BTreeSet;
 
@@ -52,6 +53,22 @@ fn add_opening(
             >>::contains_key(opening_id));
             opening_id
         }
+        WorkingGroup::Builder => {
+            let opening_id = BuilderWorkingGroup::next_opening_id();
+            assert!(!<working_group::OpeningById<
+                Runtime,
+                BuilderWorkingGroupInstance,
+            >>::contains_key(opening_id));
+            opening_id
+        }
+        WorkingGroup::Gateway => {
+            let opening_id = GatewayWorkingGroup::next_opening_id();
+            assert!(!<working_group::OpeningById<
+                Runtime,
+                GatewayWorkingGroupInstance,
+            >>::contains_key(opening_id));
+            opening_id
+        }
     };
 
     let codex_extrinsic_test_fixture = CodexProposalTestFixture::default_for_call(|| {
@@ -330,6 +347,18 @@ fn create_add_working_group_leader_opening_proposal_execution_succeeds() {
                     StorageWorkingGroupInstance,
                 >(group);
             }
+            WorkingGroup::Builder => {
+                run_create_add_working_group_leader_opening_proposal_execution_succeeds::<
+                    Runtime,
+                    BuilderWorkingGroupInstance,
+                >(group);
+            }
+            WorkingGroup::Gateway => {
+                run_create_add_working_group_leader_opening_proposal_execution_succeeds::<
+                    Runtime,
+                    GatewayWorkingGroupInstance,
+                >(group);
+            }
         }
     }
 }
@@ -388,6 +417,18 @@ fn create_begin_review_working_group_leader_applications_proposal_execution_succ
                 StorageWorkingGroupInstance,
             >(group);
             }
+            WorkingGroup::Builder => {
+                run_create_begin_review_working_group_leader_applications_proposal_execution_succeeds::<
+                Runtime,
+                BuilderWorkingGroupInstance,
+            >(group);
+            }
+            WorkingGroup::Gateway => {
+                run_create_begin_review_working_group_leader_applications_proposal_execution_succeeds::<
+                Runtime,
+                GatewayWorkingGroupInstance,
+            >(group);
+            }
         }
     }
 }
@@ -468,6 +509,18 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() {
                     StorageWorkingGroupInstance,
                 >(group);
             }
+            WorkingGroup::Builder => {
+                run_create_fill_working_group_leader_opening_proposal_execution_succeeds::<
+                    Runtime,
+                    BuilderWorkingGroupInstance,
+                >(group);
+            }
+            WorkingGroup::Gateway => {
+                run_create_fill_working_group_leader_opening_proposal_execution_succeeds::<
+                    Runtime,
+                    GatewayWorkingGroupInstance,
+                >(group);
+            }
         }
     }
 
@@ -545,6 +598,18 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() {
                         StorageWorkingGroupInstance,
                     >(group);
                 }
+                WorkingGroup::Builder => {
+                    run_create_decrease_group_leader_stake_proposal_execution_succeeds::<
+                        Runtime,
+                        BuilderWorkingGroupInstance,
+                    >(group);
+                }
+                WorkingGroup::Gateway => {
+                    run_create_decrease_group_leader_stake_proposal_execution_succeeds::<
+                        Runtime,
+                        GatewayWorkingGroupInstance,
+                    >(group);
+                }
             }
         }
     }
@@ -661,6 +726,18 @@ fn run_create_decrease_group_leader_stake_proposal_execution_succeeds<
                         StorageWorkingGroupInstance,
                     >(group)
                 }
+                WorkingGroup::Builder => {
+                    run_create_slash_group_leader_stake_proposal_execution_succeeds::<
+                        Runtime,
+                        BuilderWorkingGroupInstance,
+                    >(group)
+                }
+                WorkingGroup::Gateway => {
+                    run_create_slash_group_leader_stake_proposal_execution_succeeds::<
+                        Runtime,
+                        GatewayWorkingGroupInstance,
+                    >(group)
+                }
             }
         }
     }
@@ -778,6 +855,18 @@ fn run_create_slash_group_leader_stake_proposal_execution_succeeds<
                         StorageWorkingGroupInstance,
                     >(group);
                 }
+                WorkingGroup::Builder => {
+                    run_create_set_working_group_mint_capacity_proposal_execution_succeeds::<
+                        Runtime,
+                        BuilderWorkingGroupInstance,
+                    >(group);
+                }
+                WorkingGroup::Gateway => {
+                    run_create_set_working_group_mint_capacity_proposal_execution_succeeds::<
+                        Runtime,
+                        GatewayWorkingGroupInstance,
+                    >(group);
+                }
             }
         }
 
@@ -834,6 +923,18 @@ fn run_create_slash_group_leader_stake_proposal_execution_succeeds<
                             StorageWorkingGroupInstance,
                         >(group);
                     }
+                    WorkingGroup::Builder => {
+                        run_create_set_working_group_mint_capacity_proposal_execution_succeeds::<
+                            Runtime,
+                            BuilderWorkingGroupInstance,
+                        >(group);
+                    }
+                    WorkingGroup::Gateway => {
+                        run_create_set_working_group_mint_capacity_proposal_execution_succeeds::<
+                            Runtime,
+                            GatewayWorkingGroupInstance,
+                        >(group);
+                    }
                 }
             }
         }
@@ -957,6 +1058,18 @@ fn run_create_slash_group_leader_stake_proposal_execution_succeeds<
                             StorageWorkingGroupInstance,
                         >(group);
                     }
+                    WorkingGroup::Builder => {
+                        run_create_terminate_group_leader_role_proposal_execution_succeeds::<
+                            Runtime,
+                            BuilderWorkingGroupInstance,
+                        >(group);
+                    }
+                    WorkingGroup::Gateway => {
+                        run_create_terminate_group_leader_role_proposal_execution_succeeds::<
+                            Runtime,
+                            GatewayWorkingGroupInstance,
+                        >(group);
+                    }
                 }
             }
         }
@@ -1076,6 +1189,12 @@ fn run_create_slash_group_leader_stake_proposal_execution_succeeds<
                     WorkingGroup::Storage => {
                         run_create_terminate_group_leader_role_proposal_with_slashing_execution_succeeds::<Runtime, StorageWorkingGroupInstance>(group);
                     }
+                    WorkingGroup::Builder => {
+                        run_create_terminate_group_leader_role_proposal_with_slashing_execution_succeeds::<Runtime, BuilderWorkingGroupInstance>(group);
+                    }
+                    WorkingGroup::Gateway => {
+                        run_create_terminate_group_leader_role_proposal_with_slashing_execution_succeeds::<Runtime, GatewayWorkingGroupInstance>(group);
+                    }
                 }
             }
         }