import { FlowProps } from '../../Flow' import { extendDebug } from '../../Debugger' import { WorkingGroups } from '../../WorkingGroups' import { IStorageBucketOperatorMetadata, StorageBucketOperatorMetadata } from '@joystream/metadata-protobuf' import { CreateInterface, createType } from '@joystream/types' import { BagId, DynamicBagId, StaticBagId } from '@joystream/types/storage' import _ from 'lodash' import { Utils } from '../../utils' import BN from 'bn.js' type StorageBucketConfig = { metadata: IStorageBucketOperatorMetadata staticBags?: CreateInterface[] storageLimit: BN objectsLimit: number operatorId: number transactorKey: string } type InitStorageConfig = { buckets: StorageBucketConfig[] dynamicBagPolicy: { [K in keyof typeof DynamicBagId.typeDefinitions]?: number } } export const allStaticBags: CreateInterface[] = [ 'Council', { WorkingGroup: 'Content' }, { WorkingGroup: 'Distribution' }, { WorkingGroup: 'Gateway' }, { WorkingGroup: 'OperationsAlpha' }, { WorkingGroup: 'OperationsBeta' }, { WorkingGroup: 'OperationsGamma' }, { WorkingGroup: 'Storage' }, ] export const singleBucketConfig: InitStorageConfig = { dynamicBagPolicy: { 'Channel': 1, 'Member': 1, }, buckets: [ { metadata: { endpoint: process.env.COLOSSUS_1_URL || 'http://localhost:3333' }, staticBags: allStaticBags, operatorId: parseInt(process.env.COLOSSUS_1_WORKER_ID || '0'), storageLimit: new BN(1_000_000_000_000), objectsLimit: 1000000000, transactorKey: process.env.COLOSSUS_1_TRANSACTOR_KEY || '5DkE5YD8m5Yzno6EH2RTBnH268TDnnibZMEMjxwYemU4XevU', // //Colossus1 }, ], } export const doubleBucketConfig: InitStorageConfig = { dynamicBagPolicy: { 'Channel': 2, 'Member': 2, }, buckets: [ { metadata: { endpoint: process.env.COLOSSUS_1_URL || 'http://localhost:3333' }, staticBags: allStaticBags, operatorId: parseInt(process.env.COLOSSUS_1_WORKER_ID || '0'), storageLimit: new BN(1_000_000_000_000), objectsLimit: 1000000000, transactorKey: process.env.COLOSSUS_1_TRANSACTOR_KEY || '5DkE5YD8m5Yzno6EH2RTBnH268TDnnibZMEMjxwYemU4XevU', // //Colossus1 }, { metadata: { endpoint: process.env.STORAGE_2_URL || 'http://localhost:3335' }, staticBags: allStaticBags, operatorId: parseInt(process.env.STORAGE_2_WORKER_ID || '1'), storageLimit: new BN(1_000_000_000_000), objectsLimit: 1000000000, transactorKey: process.env.COLOSSUS_2_TRANSACTOR_KEY || '5FbzYmQ3HogiEEDSXPYJe58yCcmSh3vsZLodTdBB6YuLDAj7', // //Colossus2 }, ], } export default function createFlow({ buckets, dynamicBagPolicy }: InitStorageConfig) { return async function initDistribution({ api }: FlowProps): Promise { const debug = extendDebug('flow:initStorage') debug('Started') // Get working group leaders const storageLeaderId = await api.getLeadWorkerId(WorkingGroups.Storage) const storageLeader = await api.getGroupLead(WorkingGroups.Storage) if (!storageLeaderId || !storageLeader) { throw new Error('Active storage leader is required in this flow!') } const storageLeaderKey = storageLeader.role_account_id.toString() const maxStorageLimit = buckets.sort((a, b) => b.storageLimit.cmp(a.storageLimit))[0].storageLimit const maxObjectsLimit = Math.max(...buckets.map((b) => b.objectsLimit)) // Hire operators // const hireWorkersFixture = new HireWorkesFixture(api, totalBucketsNum, WorkingGroups.Distribution) // await new FixtureRunner(hireWorkersFixture).run() // const operatorIds = hireWorkersFixture.getHiredWorkers() const operatorIds = buckets.map((b) => createType('WorkerId', b.operatorId)) const operatorKeys = await api.getWorkerRoleAccounts(operatorIds, WorkingGroups.Storage) // Set global limits and policies const updateDynamicBagPolicyTxs = _.entries(dynamicBagPolicy).map(([bagType, numberOfBuckets]) => api.tx.storage.updateNumberOfStorageBucketsInDynamicBagCreationPolicy( bagType as keyof typeof DynamicBagId.typeDefinitions, numberOfBuckets ) ) const setMaxVoucherLimitsTx = api.tx.storage.updateStorageBucketsVoucherMaxLimits(maxStorageLimit, maxObjectsLimit) const setBucketPerBagLimitTx = api.tx.storage.updateStorageBucketsPerBagLimit(Math.max(5, buckets.length)) await api.signAndSendMany( [...updateDynamicBagPolicyTxs, setMaxVoucherLimitsTx, setBucketPerBagLimitTx], storageLeaderKey ) // Create buckets const createBucketTxs = buckets.map((b, i) => api.tx.storage.createStorageBucket(operatorIds[i], true, b.storageLimit, b.objectsLimit) ) const createBucketResults = await api.signAndSendMany(createBucketTxs, storageLeaderKey) const bucketById = new Map() createBucketResults.forEach((res, i) => { const bucketId = api.getEvent(res, 'storage', 'StorageBucketCreated').data[0] bucketById.set(bucketId.toNumber(), buckets[i]) }) // Accept invitations const acceptInvitationTxs = Array.from(bucketById.entries()).map(([bucketId, bucketConfig], i) => api.tx.storage.acceptStorageBucketInvitation(operatorIds[i], bucketId, bucketConfig.transactorKey) ) await api.signAndSendManyByMany(acceptInvitationTxs, operatorKeys) // Bucket metadata and static bags const bucketSetupPromises = _.flatten( Array.from(bucketById.entries()).map(([bucketId, bucketConfig], i) => { const operatorId = operatorIds[i] const operatorKey = operatorKeys[i] const metadataBytes = Utils.metadataToBytes(StorageBucketOperatorMetadata, bucketConfig.metadata) const setMetaTx = api.tx.storage.setStorageOperatorMetadata(operatorId, bucketId, metadataBytes) const setMetaPromise = api.signAndSendMany([setMetaTx], operatorKey) const updateBagTxs = (bucketConfig.staticBags || []).map((sBagId) => { return api.tx.storage.updateStorageBucketsForBag( createType('BagId', { Static: sBagId }), createType('BTreeSet', [bucketId]), createType('BTreeSet', []) ) }) const updateBagsPromise = api.signAndSendMany(updateBagTxs, storageLeaderKey) return [updateBagsPromise, setMetaPromise] }) ) await Promise.all(bucketSetupPromises) debug('Done') } }