Browse Source

Merge pull request #2935 from shamil-gadelshin/giza_transaction_nonce_fix

Giza transaction nonce fix
Lezek123 3 years ago
parent
commit
a03903b0ba

File diff suppressed because it is too large
+ 0 - 0
chain-metadata.json


+ 2 - 1
query-node/mappings/storage/index.ts

@@ -95,10 +95,11 @@ export async function storage_StorageBucketInvitationAccepted({
   event,
   store,
 }: EventContext & StoreContext): Promise<void> {
-  const [bucketId, workerId] = new Storage.StorageBucketInvitationAcceptedEvent(event).params
+  const [bucketId, workerId, transactorAccountId] = new Storage.StorageBucketInvitationAcceptedEvent(event).params
   const storageBucket = await getById(store, StorageBucket, bucketId.toString())
   const operatorStatus = new StorageBucketOperatorStatusActive()
   operatorStatus.workerId = workerId.toNumber()
+  operatorStatus.transactorAccountId = transactorAccountId.toString()
   storageBucket.operatorStatus = operatorStatus
   await store.save<StorageBucket>(storageBucket)
 }

+ 1 - 0
query-node/schemas/storage.graphql

@@ -35,6 +35,7 @@ type StorageBucketOperatorStatusInvited @variant {
 
 type StorageBucketOperatorStatusActive @variant {
   workerId: Int!
+  transactorAccountId: String!
 }
 
 union StorageBucketOperatorStatus = StorageBucketOperatorStatusMissing | StorageBucketOperatorStatusInvited | StorageBucketOperatorStatusActive

+ 75 - 29
runtime-modules/storage/src/lib.rs

@@ -134,7 +134,7 @@ use frame_support::traits::{Currency, ExistenceRequirement, Get, Randomness};
 use frame_support::{
     decl_error, decl_event, decl_module, decl_storage, ensure, IterableStorageDoubleMap, Parameter,
 };
-use frame_system::ensure_root;
+use frame_system::{ensure_root, ensure_signed};
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
 use sp_arithmetic::traits::{BaseArithmetic, One, Zero};
@@ -763,7 +763,7 @@ impl VoucherUpdate {
 /// Defines the storage bucket connection to the storage operator (storage WG worker).
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)]
-pub enum StorageBucketOperatorStatus<WorkerId> {
+pub enum StorageBucketOperatorStatus<WorkerId, AccountId> {
     /// No connection.
     Missing,
 
@@ -771,22 +771,25 @@ pub enum StorageBucketOperatorStatus<WorkerId> {
     InvitedStorageWorker(WorkerId),
 
     /// Storage operator accepted the invitation.
-    StorageWorker(WorkerId),
+    StorageWorker(WorkerId, AccountId),
 }
 
-impl<WorkerId> Default for StorageBucketOperatorStatus<WorkerId> {
+impl<WorkerId, AccountId> Default for StorageBucketOperatorStatus<WorkerId, AccountId> {
     fn default() -> Self {
         Self::Missing
     }
 }
 
+/// Type alias for the StorageBucketRecord.
+pub type StorageBucket<T> = StorageBucketRecord<WorkerId<T>, <T as frame_system::Trait>::AccountId>;
+
 /// A commitment to hold some set of bags for long term storage. A bucket may have a bucket
 /// operator, which is a single worker in the storage working group.
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)]
-pub struct StorageBucket<WorkerId> {
+pub struct StorageBucketRecord<WorkerId, AccountId> {
     /// Current storage operator status.
-    pub operator_status: StorageBucketOperatorStatus<WorkerId>,
+    pub operator_status: StorageBucketOperatorStatus<WorkerId, AccountId>,
 
     /// Defines whether the bucket accepts new bags.
     pub accepting_new_bags: bool,
@@ -922,7 +925,7 @@ decl_storage! {
 
         /// Storage buckets.
         pub StorageBucketById get (fn storage_bucket_by_id): map hasher(blake2_128_concat)
-            T::StorageBucketId => StorageBucket<WorkerId<T>>;
+            T::StorageBucketId => StorageBucket<T>;
 
         /// Blacklisted data object hashes.
         pub Blacklist get (fn blacklist): map hasher(blake2_128_concat) Cid => ();
@@ -1004,7 +1007,8 @@ decl_event! {
         /// Params
         /// - storage bucket ID
         /// - invited worker ID
-        StorageBucketInvitationAccepted(StorageBucketId, WorkerId),
+        /// - transactor account ID
+        StorageBucketInvitationAccepted(StorageBucketId, WorkerId, AccountId),
 
         /// Emits on updating storage buckets for bag.
         /// Params
@@ -1433,6 +1437,9 @@ decl_error! {
 
         /// Max data object size exceeded.
         MaxDataObjectSizeExceeded,
+
+        /// Invalid transactor account ID for this bucket.
+        InvalidTransactorAccount,
     }
 }
 
@@ -1671,7 +1678,7 @@ decl_module! {
                 .map(StorageBucketOperatorStatus::InvitedStorageWorker)
                 .unwrap_or(StorageBucketOperatorStatus::Missing);
 
-            let storage_bucket = StorageBucket {
+            let storage_bucket = StorageBucket::<T> {
                 operator_status,
                 accepting_new_bags,
                 voucher,
@@ -1888,11 +1895,14 @@ decl_module! {
         // ===== Storage Operator actions =====
 
         /// Accept the storage bucket invitation. An invitation must match the worker_id parameter.
+        /// It accepts an additional account ID (transactor) for accepting data objects to prevent
+        /// transaction nonce collisions.
         #[weight = 10_000_000] // TODO: adjust weight
         pub fn accept_storage_bucket_invitation(
             origin,
             worker_id: WorkerId<T>,
-            storage_bucket_id: T::StorageBucketId
+            storage_bucket_id: T::StorageBucketId,
+            transactor_account_id: T::AccountId,
         ) {
             T::ensure_storage_worker_origin(origin, worker_id)?;
 
@@ -1905,11 +1915,19 @@ decl_module! {
             //
 
             <StorageBucketById<T>>::mutate(storage_bucket_id, |bucket| {
-                bucket.operator_status = StorageBucketOperatorStatus::StorageWorker(worker_id);
+                bucket.operator_status =
+                    StorageBucketOperatorStatus::StorageWorker(
+                        worker_id,
+                        transactor_account_id.clone()
+                );
             });
 
             Self::deposit_event(
-                RawEvent::StorageBucketInvitationAccepted(storage_bucket_id, worker_id)
+                RawEvent::StorageBucketInvitationAccepted(
+                    storage_bucket_id,
+                    worker_id,
+                    transactor_account_id
+                )
             );
         }
 
@@ -1945,11 +1963,11 @@ decl_module! {
             bag_id: BagId<T>,
             data_objects: BTreeSet<T::DataObjectId>,
         ) {
-            T::ensure_storage_worker_origin(origin, worker_id)?;
+            let transactor_account_id = ensure_signed(origin)?;
 
             let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?;
 
-            Self::ensure_bucket_invitation_accepted(&bucket, worker_id)?;
+            Self::ensure_bucket_transactor_access(&bucket, worker_id, transactor_account_id)?;
 
             Self::ensure_bag_exists(&bag_id)?;
 
@@ -2762,7 +2780,7 @@ impl<T: Trait> Module<T> {
     // Returns the StorageBucket object or error.
     fn ensure_storage_bucket_exists(
         storage_bucket_id: &T::StorageBucketId,
-    ) -> Result<StorageBucket<WorkerId<T>>, Error<T>> {
+    ) -> Result<StorageBucket<T>, Error<T>> {
         ensure!(
             <StorageBucketById<T>>::contains_key(storage_bucket_id),
             Error::<T>::StorageBucketDoesntExist
@@ -2774,14 +2792,14 @@ impl<T: Trait> Module<T> {
     // Ensures the correct invitation for the storage bucket and storage provider. Storage provider
     // must be invited.
     fn ensure_bucket_storage_provider_invitation_status(
-        bucket: &StorageBucket<WorkerId<T>>,
+        bucket: &StorageBucket<T>,
         worker_id: WorkerId<T>,
     ) -> DispatchResult {
         match bucket.operator_status {
             StorageBucketOperatorStatus::Missing => {
                 Err(Error::<T>::NoStorageBucketInvitation.into())
             }
-            StorageBucketOperatorStatus::StorageWorker(_) => {
+            StorageBucketOperatorStatus::StorageWorker(..) => {
                 Err(Error::<T>::StorageProviderAlreadySet.into())
             }
             StorageBucketOperatorStatus::InvitedStorageWorker(invited_worker_id) => {
@@ -2798,9 +2816,9 @@ impl<T: Trait> Module<T> {
     // Ensures the correct invitation for the storage bucket and storage provider for removal.
     // Must be invited storage provider.
     fn ensure_bucket_storage_provider_invitation_status_for_removal(
-        bucket: &StorageBucket<WorkerId<T>>,
+        bucket: &StorageBucket<T>,
     ) -> DispatchResult {
-        if let StorageBucketOperatorStatus::StorageWorker(_) = bucket.operator_status {
+        if let StorageBucketOperatorStatus::StorageWorker(..) = bucket.operator_status {
             Ok(())
         } else {
             Err(Error::<T>::StorageProviderMustBeSet.into())
@@ -2808,14 +2826,12 @@ impl<T: Trait> Module<T> {
     }
 
     // Ensures the correct invitation for the storage bucket and storage provider. Must be pending.
-    fn ensure_bucket_pending_invitation_status(
-        bucket: &StorageBucket<WorkerId<T>>,
-    ) -> DispatchResult {
+    fn ensure_bucket_pending_invitation_status(bucket: &StorageBucket<T>) -> DispatchResult {
         match bucket.operator_status {
             StorageBucketOperatorStatus::Missing => {
                 Err(Error::<T>::NoStorageBucketInvitation.into())
             }
-            StorageBucketOperatorStatus::StorageWorker(_) => {
+            StorageBucketOperatorStatus::StorageWorker(..) => {
                 Err(Error::<T>::StorageProviderAlreadySet.into())
             }
             StorageBucketOperatorStatus::InvitedStorageWorker(_) => Ok(()),
@@ -2823,12 +2839,10 @@ impl<T: Trait> Module<T> {
     }
 
     // Ensures the missing invitation for the storage bucket and storage provider.
-    fn ensure_bucket_missing_invitation_status(
-        bucket: &StorageBucket<WorkerId<T>>,
-    ) -> DispatchResult {
+    fn ensure_bucket_missing_invitation_status(bucket: &StorageBucket<T>) -> DispatchResult {
         match bucket.operator_status {
             StorageBucketOperatorStatus::Missing => Ok(()),
-            StorageBucketOperatorStatus::StorageWorker(_) => {
+            StorageBucketOperatorStatus::StorageWorker(..) => {
                 Err(Error::<T>::StorageProviderAlreadySet.into())
             }
             StorageBucketOperatorStatus::InvitedStorageWorker(_) => {
@@ -2839,7 +2853,7 @@ impl<T: Trait> Module<T> {
 
     // Ensures correct storage provider for the storage bucket.
     fn ensure_bucket_invitation_accepted(
-        bucket: &StorageBucket<WorkerId<T>>,
+        bucket: &StorageBucket<T>,
         worker_id: WorkerId<T>,
     ) -> DispatchResult {
         match bucket.operator_status {
@@ -2849,7 +2863,7 @@ impl<T: Trait> Module<T> {
             StorageBucketOperatorStatus::InvitedStorageWorker(_) => {
                 Err(Error::<T>::InvalidStorageProvider.into())
             }
-            StorageBucketOperatorStatus::StorageWorker(invited_worker_id) => {
+            StorageBucketOperatorStatus::StorageWorker(invited_worker_id, _) => {
                 ensure!(
                     worker_id == invited_worker_id,
                     Error::<T>::InvalidStorageProvider
@@ -2860,6 +2874,38 @@ impl<T: Trait> Module<T> {
         }
     }
 
+    // Ensures correct storage provider transactor account for the storage bucket.
+    fn ensure_bucket_transactor_access(
+        bucket: &StorageBucket<T>,
+        worker_id: WorkerId<T>,
+        transactor_account_id: T::AccountId,
+    ) -> DispatchResult {
+        match bucket.operator_status.clone() {
+            StorageBucketOperatorStatus::Missing => {
+                Err(Error::<T>::StorageProviderMustBeSet.into())
+            }
+            StorageBucketOperatorStatus::InvitedStorageWorker(_) => {
+                Err(Error::<T>::InvalidStorageProvider.into())
+            }
+            StorageBucketOperatorStatus::StorageWorker(
+                invited_worker_id,
+                bucket_transactor_account_id,
+            ) => {
+                ensure!(
+                    worker_id == invited_worker_id,
+                    Error::<T>::InvalidStorageProvider
+                );
+
+                ensure!(
+                    transactor_account_id == bucket_transactor_account_id,
+                    Error::<T>::InvalidTransactorAccount
+                );
+
+                Ok(())
+            }
+        }
+    }
+
     // Create data objects from the creation data.
     fn create_data_objects(
         object_creation_list: Vec<DataObjectCreationParameters>,

+ 13 - 1
runtime-modules/storage/src/tests/fixtures.rs

@@ -188,6 +188,7 @@ pub struct AcceptStorageBucketInvitationFixture {
     origin: RawOrigin<u64>,
     worker_id: u64,
     storage_bucket_id: u64,
+    transactor_account_id: u64,
 }
 
 impl AcceptStorageBucketInvitationFixture {
@@ -196,6 +197,7 @@ impl AcceptStorageBucketInvitationFixture {
             origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID),
             worker_id: DEFAULT_WORKER_ID,
             storage_bucket_id: Default::default(),
+            transactor_account_id: DEFAULT_ACCOUNT_ID,
         }
     }
 
@@ -206,6 +208,12 @@ impl AcceptStorageBucketInvitationFixture {
     pub fn with_worker_id(self, worker_id: u64) -> Self {
         Self { worker_id, ..self }
     }
+    pub fn with_transactor_account_id(self, transactor_account_id: u64) -> Self {
+        Self {
+            transactor_account_id,
+            ..self
+        }
+    }
 
     pub fn with_storage_bucket_id(self, storage_bucket_id: u64) -> Self {
         Self {
@@ -221,6 +229,7 @@ impl AcceptStorageBucketInvitationFixture {
             self.origin.clone().into(),
             self.worker_id,
             self.storage_bucket_id,
+            self.transactor_account_id,
         );
 
         assert_eq!(actual_result, expected_result);
@@ -229,7 +238,10 @@ impl AcceptStorageBucketInvitationFixture {
         if actual_result.is_ok() {
             assert_eq!(
                 new_bucket.operator_status,
-                StorageBucketOperatorStatus::StorageWorker(self.worker_id)
+                StorageBucketOperatorStatus::StorageWorker(
+                    self.worker_id,
+                    self.transactor_account_id
+                )
             );
         } else {
             assert_eq!(old_bucket, new_bucket);

+ 47 - 0
runtime-modules/storage/src/tests/mod.rs

@@ -156,6 +156,7 @@ fn accept_storage_bucket_invitation_succeeded() {
 
         let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
         let invite_worker = Some(storage_provider_id);
+        let transactor_id = DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID;
 
         let bucket_id = CreateStorageBucketFixture::default()
             .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
@@ -167,11 +168,13 @@ fn accept_storage_bucket_invitation_succeeded() {
             .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
             .with_storage_bucket_id(bucket_id)
             .with_worker_id(storage_provider_id)
+            .with_transactor_account_id(transactor_id)
             .call_and_assert(Ok(()));
 
         EventFixture::assert_last_crate_event(RawEvent::StorageBucketInvitationAccepted(
             bucket_id,
             storage_provider_id,
+            transactor_id,
         ));
     });
 }
@@ -1327,6 +1330,7 @@ fn accept_pending_data_objects_fails_with_unrelated_storage_bucket() {
 
         AcceptStorageBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
+            .with_transactor_account_id(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID)
             .with_storage_bucket_id(bucket_id)
             .with_worker_id(storage_provider_id)
             .call_and_assert(Ok(()));
@@ -1373,6 +1377,7 @@ fn accept_pending_data_objects_fails_with_non_existing_dynamic_bag() {
 
         AcceptStorageBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
+            .with_transactor_account_id(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID)
             .with_storage_bucket_id(bucket_id)
             .with_worker_id(storage_provider_id)
             .call_and_assert(Ok(()));
@@ -1397,6 +1402,46 @@ fn accept_pending_data_objects_fails_with_non_existing_dynamic_bag() {
     });
 }
 
+#[test]
+fn accept_pending_data_objects_fails_with_invalid_transactor_account_id() {
+    build_test_externalities().execute_with(|| {
+        let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID;
+        let invite_worker = Some(storage_provider_id);
+        let transactor_account_id = 11111;
+
+        let bucket_id = CreateStorageBucketFixture::default()
+            .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID))
+            .with_invite_worker(invite_worker)
+            .call_and_assert(Ok(()))
+            .unwrap();
+
+        AcceptStorageBucketInvitationFixture::default()
+            .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
+            .with_transactor_account_id(transactor_account_id)
+            .with_storage_bucket_id(bucket_id)
+            .with_worker_id(storage_provider_id)
+            .call_and_assert(Ok(()));
+
+        let initial_balance = 1000;
+        increase_account_balance(&DEFAULT_MEMBER_ACCOUNT_ID, initial_balance);
+
+        let dynamic_bag_id = DynamicBagId::<Test>::Member(DEFAULT_MEMBER_ID);
+        let bag_id = BagId::<Test>::Dynamic(dynamic_bag_id.clone());
+
+        let data_object_id = 0;
+
+        let data_object_ids = BTreeSet::from_iter(vec![data_object_id]);
+
+        AcceptPendingDataObjectsFixture::default()
+            .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
+            .with_worker_id(storage_provider_id)
+            .with_storage_bucket_id(bucket_id)
+            .with_bag_id(bag_id.clone())
+            .with_data_object_ids(data_object_ids)
+            .call_and_assert(Err(Error::<Test>::InvalidTransactorAccount.into()));
+    });
+}
+
 #[test]
 fn accept_pending_data_objects_succeeded_with_dynamic_bag() {
     build_test_externalities().execute_with(|| {
@@ -1417,6 +1462,7 @@ fn accept_pending_data_objects_succeeded_with_dynamic_bag() {
 
         AcceptStorageBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
+            .with_transactor_account_id(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID)
             .with_storage_bucket_id(bucket_id)
             .with_worker_id(storage_provider_id)
             .call_and_assert(Ok(()));
@@ -2482,6 +2528,7 @@ fn create_storage_bucket_and_assign_to_bag(
     if let Some(storage_provider_id) = storage_provider_id {
         AcceptStorageBucketInvitationFixture::default()
             .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID))
+            .with_transactor_account_id(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID)
             .with_storage_bucket_id(bucket_id)
             .with_worker_id(storage_provider_id)
             .call_and_assert(Ok(()));

+ 1 - 1
storage-node-v2/package.json

@@ -93,7 +93,7 @@
   },
   "volta": {
     "node": "14.16.1",
-    "yarn": "1.22.4"
+    "yarn": "1.22.5"
   },
   "files": [
     "/bin",

+ 1 - 1
storage-node-v2/scripts/init-dev-bucket.sh

@@ -13,6 +13,6 @@ ${CLI} dev:init
 ${CLI} leader:update-bag-limit -l 7 --dev
 ${CLI} leader:update-voucher-limits -o 10000 -s 1000000000 --dev
 BUCKET_ID=`${CLI} leader:create-bucket -i=0 -a -n=10000 -s=1000000000  --dev` 
-${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev
+${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev -t=5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
 ${CLI} leader:update-bag -a=${BUCKET_ID} -i static:council --dev 
 ${CLI} operator:set-metadata -w 0 -i=${BUCKET_ID} -e http://localhost:3333 --dev

+ 2 - 2
storage-node-v2/scripts/run-all-commands.sh

@@ -21,7 +21,7 @@ ${CLI} leader:update-dynamic-bag-policy -n 10 -t Member --dev
 
 # Create and configure a bucket.
 BUCKET_ID=`${CLI} leader:create-bucket -i=0 --dev` # bucketId = 0
-${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev
+${CLI} operator:accept-invitation -w=0 -i=${BUCKET_ID} --dev -t=5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
 ${CLI} leader:set-bucket-limits -i=${BUCKET_ID} -o=100 -s=10000000 --dev
 ${CLI} leader:update-bucket-status -i=${BUCKET_ID} --set on --dev
 ${CLI} leader:update-bag -a=${BUCKET_ID} -i static:council --dev 
@@ -37,7 +37,7 @@ BUCKET_ID=`${CLI} leader:create-bucket -a -n=100 -s=10000000  --dev` # bucketId
 ${CLI} leader:invite-operator -i=${BUCKET_ID} -w=0  --dev 
 ${CLI} leader:cancel-invite -i=${BUCKET_ID} --dev 
 ${CLI} leader:invite-operator -i=${BUCKET_ID} -w=0  --dev 
-${CLI} operator:accept-invitation -i=${BUCKET_ID} -w=0 --dev
+${CLI} operator:accept-invitation -i=${BUCKET_ID} -w=0 --dev -t=5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
 ${CLI} leader:remove-operator -i=${BUCKET_ID}   --dev 
 
 # Toggle uploading block.

+ 7 - 1
storage-node-v2/src/commands/operator/accept-invitation.ts

@@ -26,6 +26,11 @@ export default class OperatorAcceptInvitation extends ApiCommandBase {
       required: true,
       description: 'Storage bucket ID',
     }),
+    transactorAccountId: flags.string({
+      char: 't',
+      required: true,
+      description: 'Transactor account ID (public key)',
+    }),
     ...ApiCommandBase.flags,
   }
 
@@ -34,6 +39,7 @@ export default class OperatorAcceptInvitation extends ApiCommandBase {
 
     const worker = flags.workerId
     const bucket = flags.bucketId
+    const transactorAccountId = flags.transactorAccountId
 
     logger.info('Accepting pending storage bucket invitation...')
     if (flags.dev) {
@@ -43,7 +49,7 @@ export default class OperatorAcceptInvitation extends ApiCommandBase {
     const account = this.getAccount(flags)
 
     const api = await this.getApi()
-    const success = await acceptStorageBucketInvitation(api, account, worker, bucket)
+    const success = await acceptStorageBucketInvitation(api, account, worker, bucket, transactorAccountId)
 
     this.exitAfterRuntimeCall(success)
   }

+ 22 - 11
storage-node-v2/src/services/queryNode/generated/schema.ts

@@ -91,8 +91,6 @@ export type Channel = BaseGraphQlObject & {
   categoryId?: Maybe<Scalars['String']>
   /** Reward account where revenue is sent if set. */
   rewardAccount?: Maybe<Scalars['String']>
-  /** Destination account for the prize associated with channel deletion */
-  deletionPrizeDestAccount: Scalars['String']
   /** The title of the Channel */
   title?: Maybe<Scalars['String']>
   /** The description of a Channel */
@@ -108,7 +106,9 @@ export type Channel = BaseGraphQlObject & {
   language?: Maybe<Language>
   languageId?: Maybe<Scalars['String']>
   videos: Array<Video>
+  /** Number of the block the channel was created in */
   createdInBlock: Scalars['Int']
+  collaborators: Array<Membership>
 }
 
 export type ChannelCategoriesByNameFtsOutput = {
@@ -228,7 +228,6 @@ export type ChannelCreateInput = {
   ownerCuratorGroup?: Maybe<Scalars['ID']>
   category?: Maybe<Scalars['ID']>
   rewardAccount?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount: Scalars['String']
   title?: Maybe<Scalars['String']>
   description?: Maybe<Scalars['String']>
   coverPhoto?: Maybe<Scalars['ID']>
@@ -259,8 +258,6 @@ export enum ChannelOrderByInput {
   CategoryDesc = 'category_DESC',
   RewardAccountAsc = 'rewardAccount_ASC',
   RewardAccountDesc = 'rewardAccount_DESC',
-  DeletionPrizeDestAccountAsc = 'deletionPrizeDestAccount_ASC',
-  DeletionPrizeDestAccountDesc = 'deletionPrizeDestAccount_DESC',
   TitleAsc = 'title_ASC',
   TitleDesc = 'title_DESC',
   DescriptionAsc = 'description_ASC',
@@ -284,7 +281,6 @@ export type ChannelUpdateInput = {
   ownerCuratorGroup?: Maybe<Scalars['ID']>
   category?: Maybe<Scalars['ID']>
   rewardAccount?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount?: Maybe<Scalars['String']>
   title?: Maybe<Scalars['String']>
   description?: Maybe<Scalars['String']>
   coverPhoto?: Maybe<Scalars['ID']>
@@ -325,11 +321,6 @@ export type ChannelWhereInput = {
   rewardAccount_startsWith?: Maybe<Scalars['String']>
   rewardAccount_endsWith?: Maybe<Scalars['String']>
   rewardAccount_in?: Maybe<Array<Scalars['String']>>
-  deletionPrizeDestAccount_eq?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_contains?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_startsWith?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_endsWith?: Maybe<Scalars['String']>
-  deletionPrizeDestAccount_in?: Maybe<Array<Scalars['String']>>
   title_eq?: Maybe<Scalars['String']>
   title_contains?: Maybe<Scalars['String']>
   title_startsWith?: Maybe<Scalars['String']>
@@ -359,6 +350,9 @@ export type ChannelWhereInput = {
   videos_none?: Maybe<VideoWhereInput>
   videos_some?: Maybe<VideoWhereInput>
   videos_every?: Maybe<VideoWhereInput>
+  collaborators_none?: Maybe<MembershipWhereInput>
+  collaborators_some?: Maybe<MembershipWhereInput>
+  collaborators_every?: Maybe<MembershipWhereInput>
   AND?: Maybe<Array<ChannelWhereInput>>
   OR?: Maybe<Array<ChannelWhereInput>>
 }
@@ -512,6 +506,8 @@ export type DistributionBucket = BaseGraphQlObject & {
   version: Scalars['Int']
   family: DistributionBucketFamily
   familyId: Scalars['String']
+  /** Bucket index within the family */
+  bucketIndex: Scalars['Int']
   operators: Array<DistributionBucketOperator>
   /** Whether the bucket is accepting any new bags */
   acceptingNewBags: Scalars['Boolean']
@@ -528,6 +524,7 @@ export type DistributionBucketConnection = {
 
 export type DistributionBucketCreateInput = {
   family: Scalars['ID']
+  bucketIndex: Scalars['Float']
   acceptingNewBags: Scalars['Boolean']
   distributing: Scalars['Boolean']
 }
@@ -1028,6 +1025,8 @@ export enum DistributionBucketOrderByInput {
   DeletedAtDesc = 'deletedAt_DESC',
   FamilyAsc = 'family_ASC',
   FamilyDesc = 'family_DESC',
+  BucketIndexAsc = 'bucketIndex_ASC',
+  BucketIndexDesc = 'bucketIndex_DESC',
   AcceptingNewBagsAsc = 'acceptingNewBags_ASC',
   AcceptingNewBagsDesc = 'acceptingNewBags_DESC',
   DistributingAsc = 'distributing_ASC',
@@ -1036,6 +1035,7 @@ export enum DistributionBucketOrderByInput {
 
 export type DistributionBucketUpdateInput = {
   family?: Maybe<Scalars['ID']>
+  bucketIndex?: Maybe<Scalars['Float']>
   acceptingNewBags?: Maybe<Scalars['Boolean']>
   distributing?: Maybe<Scalars['Boolean']>
 }
@@ -1065,6 +1065,12 @@ export type DistributionBucketWhereInput = {
   deletedAt_gte?: Maybe<Scalars['DateTime']>
   deletedById_eq?: Maybe<Scalars['ID']>
   deletedById_in?: Maybe<Array<Scalars['ID']>>
+  bucketIndex_eq?: Maybe<Scalars['Int']>
+  bucketIndex_gt?: Maybe<Scalars['Int']>
+  bucketIndex_gte?: Maybe<Scalars['Int']>
+  bucketIndex_lt?: Maybe<Scalars['Int']>
+  bucketIndex_lte?: Maybe<Scalars['Int']>
+  bucketIndex_in?: Maybe<Array<Scalars['Int']>>
   acceptingNewBags_eq?: Maybe<Scalars['Boolean']>
   acceptingNewBags_in?: Maybe<Array<Scalars['Boolean']>>
   distributing_eq?: Maybe<Scalars['Boolean']>
@@ -1483,6 +1489,7 @@ export type Membership = BaseGraphQlObject & {
   /** The type of subscription the member has purchased if any. */
   subscription?: Maybe<Scalars['Int']>
   channels: Array<Channel>
+  collaboratorInChannels: Array<Channel>
 }
 
 export type MembershipConnection = {
@@ -1616,6 +1623,9 @@ export type MembershipWhereInput = {
   channels_none?: Maybe<ChannelWhereInput>
   channels_some?: Maybe<ChannelWhereInput>
   channels_every?: Maybe<ChannelWhereInput>
+  collaboratorInChannels_none?: Maybe<ChannelWhereInput>
+  collaboratorInChannels_some?: Maybe<ChannelWhereInput>
+  collaboratorInChannels_every?: Maybe<ChannelWhereInput>
   AND?: Maybe<Array<MembershipWhereInput>>
   OR?: Maybe<Array<MembershipWhereInput>>
 }
@@ -2618,6 +2628,7 @@ export type StorageBucketOperatorStatus =
 
 export type StorageBucketOperatorStatusActive = {
   workerId: Scalars['Int']
+  transactorAccountId: Scalars['String']
 }
 
 export type StorageBucketOperatorStatusInvited = {

+ 3 - 2
storage-node-v2/src/services/runtime/extrinsics.ts

@@ -61,10 +61,11 @@ export async function acceptStorageBucketInvitation(
   api: ApiPromise,
   account: KeyringPair,
   workerId: number,
-  storageBucketId: number
+  storageBucketId: number,
+  transactorAccountId: string
 ): Promise<boolean> {
   return await extrinsicWrapper(() => {
-    const tx = api.tx.storage.acceptStorageBucketInvitation(workerId, storageBucketId)
+    const tx = api.tx.storage.acceptStorageBucketInvitation(workerId, storageBucketId, transactorAccountId)
 
     return sendAndFollowNamedTx(api, account, tx)
   })

+ 1 - 1
types/augment/all/defs.json

@@ -579,7 +579,7 @@
         "_enum": {
             "Missing": "Null",
             "InvitedStorageWorker": "WorkerId",
-            "StorageWorker": "WorkerId"
+            "StorageWorker": "(WorkerId,GenericAccountId)"
         }
     },
     "DataObject": {

+ 1 - 1
types/augment/all/types.ts

@@ -1229,7 +1229,7 @@ export interface StorageBucketOperatorStatus extends Enum {
   readonly isInvitedStorageWorker: boolean;
   readonly asInvitedStorageWorker: WorkerId;
   readonly isStorageWorker: boolean;
-  readonly asStorageWorker: WorkerId;
+  readonly asStorageWorker: ITuple<[WorkerId, GenericAccountId]>;
 }
 
 /** @name StorageBucketsPerBagValueConstraint */

+ 2 - 1
types/src/storage.ts

@@ -10,6 +10,7 @@ import {
   Option,
   u32,
   u128,
+  Tuple,
 } from '@polkadot/types'
 import { Balance } from '@polkadot/types/interfaces'
 import { RegistryTypes } from '@polkadot/types/types'
@@ -163,7 +164,7 @@ export class Voucher
 export const StorageBucketOperatorStatusDef = {
   Missing: Null,
   InvitedStorageWorker: WorkerId,
-  StorageWorker: WorkerId,
+  StorageWorker: Tuple.with([WorkerId, AccountId]),
 } as const
 export class StorageBucketOperatorStatus extends JoyEnum(StorageBucketOperatorStatusDef) {}
 

Some files were not shown because too many files changed in this diff