Browse Source

Merge branch 'giza_staging' into query-node-deployment-refactor

Mokhtar Naamani 3 years ago
parent
commit
f0ec2015c2
28 changed files with 580 additions and 346 deletions
  1. 1 0
      query-node/kill.sh
  2. 15 15
      query-node/mappings/scripts/postHydraCLIInstall.ts
  3. 23 28
      query-node/mappings/scripts/postInstall.ts
  4. 19 0
      query-node/mappings/scripts/utils.ts
  5. 0 1
      storage-node-v2/.eslintrc.js
  6. 1 1
      storage-node-v2/package.json
  7. 4 3
      storage-node-v2/src/commands/dev/sync.ts
  8. 1 1
      storage-node-v2/src/commands/operator/set-metadata.ts
  9. 3 5
      storage-node-v2/src/commands/server.ts
  10. 3 2
      storage-node-v2/src/services/queryNode/api.ts
  11. 5 1
      storage-node-v2/src/services/queryNode/generated/queries.ts
  12. 5 1
      storage-node-v2/src/services/queryNode/queries/queries.graphql
  13. 1 1
      storage-node-v2/src/services/sync/remoteStorageData.ts
  14. 2 4
      storage-node-v2/src/services/sync/storageObligations.ts
  15. 3 5
      storage-node-v2/src/services/sync/synchronizer.ts
  16. 1 1
      storage-node-v2/src/services/sync/tasks.ts
  17. 1 79
      storage-node-v2/src/services/webApi/controllers/common.ts
  18. 18 54
      storage-node-v2/src/services/webApi/controllers/filesApi.ts
  19. 5 13
      storage-node-v2/src/services/webApi/controllers/stateApi.ts
  20. 33 32
      tests/network-tests/.env
  21. 9 9
      tests/network-tests/.eslintrc.js
  22. 141 0
      tests/network-tests/node-utils.sh
  23. 73 88
      tests/network-tests/run-migration-tests.sh
  24. 13 1
      tests/network-tests/src/Api.ts
  25. 68 0
      tests/network-tests/src/misc/postMigrationAssertionsFlow.ts
  26. 6 0
      tests/network-tests/src/scenarios/post-migration.ts
  27. 1 1
      tests/network-tests/tsconfig.json
  28. 125 0
      utils/api-scripts/src/fork-off.ts

+ 1 - 0
query-node/kill.sh

@@ -11,3 +11,4 @@ docker-compose -f ../docker-compose.yml rm -vsf indexer
 docker-compose -f ../docker-compose.yml rm -vsf hydra-indexer-gateway
 docker-compose -f ../docker-compose.yml rm -vsf redis
 docker-compose -f ../docker-compose.yml rm -vsf db
+docker volume rm joystream_query-node-data

+ 15 - 15
query-node/mappings/scripts/postHydraCLIInstall.ts

@@ -1,23 +1,23 @@
 // A script to be executed post hydra-cli install, that may include patches for Hydra CLI
-import fs from 'fs'
 import path from 'path'
+import { replaceInFile } from './utils'
 
 // FIXME: Temporary fix for missing JOIN and HAVING conditions in search queries (Hydra)
 const searchServiceTemplatePath = path.resolve(
   __dirname,
   '../../codegen/node_modules/@joystream/hydra-cli/lib/src/templates/textsearch/service.ts.mst'
 )
-const searchServiceTemplateContent = fs.readFileSync(searchServiceTemplatePath).toString()
-const searchServiceTemplateContentLines = searchServiceTemplateContent.split('\n')
-searchServiceTemplateContentLines.splice(
-  searchServiceTemplateContentLines.findIndex((l) => l.match(/Add new query to queryString/)) + 1,
-  1, // remove 1 line
-  `queries = queries.concat(generateSqlQuery(repositories[index].metadata.tableName, qb.createJoinExpression(), WHERE, qb.createHavingExpression()));`
-)
-searchServiceTemplateContentLines.splice(
-  searchServiceTemplateContentLines.findIndex((l) => l.match(/const generateSqlQuery = /)),
-  3, // remove 3 lines
-  `const generateSqlQuery = (table: string, joins: string, where: string, having: string) =>
-    \`SELECT '\${table}_' || "\${table}"."id" AS unique_id FROM "\${table}" \` + joins + ' ' + where + ' ' + having;`
-)
-fs.writeFileSync(searchServiceTemplatePath, searchServiceTemplateContentLines.join('\n'))
+
+replaceInFile({
+  filePath: searchServiceTemplatePath,
+  regex: /queries = queries\.concat\(generateSqlQuery\(repositories\[index\]\.metadata\.tableName, WHERE\)\);/,
+  newContent:
+    'queries = queries.concat(generateSqlQuery(repositories[index].metadata.tableName, qb.createJoinExpression(), WHERE, qb.createHavingExpression()));',
+})
+
+replaceInFile({
+  filePath: searchServiceTemplatePath,
+  regex: /const generateSqlQuery =[\s\S]+\+ where;/,
+  newContent: `const generateSqlQuery = (table: string, joins: string, where: string, having: string) =>
+  \`SELECT '\${table}_' || "\${table}"."id" AS unique_id FROM "\${table}" \` + joins + ' ' + where + ' ' + having;`,
+})

+ 23 - 28
query-node/mappings/scripts/postInstall.ts

@@ -1,50 +1,45 @@
 // A script to be executed post query-node install, that may include workarounds in Hydra node_modules
-import fs from 'fs'
 import path from 'path'
+import { replaceInFile } from './utils'
 
 // FIXME: Temporarly remove broken sanitizeNullCharacter call
 const subscribersJsPath = path.resolve(
   __dirname,
   '../../../node_modules/@joystream/hydra-processor/lib/db/subscribers.js'
 )
-const subscribersJsContent = fs.readFileSync(subscribersJsPath).toString()
-fs.writeFileSync(
-  subscribersJsPath,
-  subscribersJsContent.replace(/sanitizeNullCharacter\(entity, field\);/g, '//sanitizeNullCharacter(entity, field)')
-)
+replaceInFile({
+  filePath: subscribersJsPath,
+  regex: /sanitizeNullCharacter\(entity, field\);/g,
+  newContent: '//sanitizeNullCharacter(entity, field)',
+})
 
 // FIXME: Temporarly replace broken relations resolution in @joystream/warthog
 const dataLoaderJsPath = path.resolve(
   __dirname,
   '../../../node_modules/@joystream/warthog/dist/middleware/DataLoaderMiddleware.js'
 )
-const dataLoaderJsContent = fs.readFileSync(dataLoaderJsPath).toString()
-const dataLoaderJsContentLines = dataLoaderJsContent.split('\n')
-dataLoaderJsContentLines.splice(
-  dataLoaderJsContentLines.findIndex((l) => l.match(/return context\.connection\.relationIdLoader/)),
-  0,
-  `return Promise.all(
+replaceInFile({
+  filePath: dataLoaderJsPath,
+  regex: /return context\.connection\.relationIdLoader[\s\S]+return group\.related;\s+\}\);\s+\}\)/,
+  newContent: `return Promise.all(
     entities.map(entity => context.connection.relationLoader.load(relation, entity))
   ).then(function (results) {
     return results.map(function (related) {
       return (relation.isManyToOne || relation.isOneToOne) ? related[0] : related
     })
-  })
-  `
-)
-fs.writeFileSync(dataLoaderJsPath, dataLoaderJsContentLines.join('\n'))
+  })`,
+})
 
 // FIXME: Temporary fix for "table name x specified more than once"
 const baseServiceJsPath = path.resolve(__dirname, '../../../node_modules/@joystream/warthog/dist/core/BaseService.js')
-const baseServiceJsContent = fs.readFileSync(baseServiceJsPath).toString()
-const baseServiceJsContentLines = baseServiceJsContent.split('\n')
-baseServiceJsContentLines.splice(
-  baseServiceJsContentLines.findIndex((l) => l.match(/function common/)) + 1,
-  4, // remove 4 lines (function body)
-  `const uuid = require('uuid/v4')
-  const foreignTableAlias = uuid().replace('-', '')
-  var foreingIdColumn = "\\"" + foreignTableAlias + "\\".\\"" + foreignColumnMap[foreignColumnName] + "\\"";
-  parameters.topLevelQb.leftJoin(foreignTableName, foreignTableAlias, localIdColumn + " = " + foreingIdColumn);
-  addWhereCondition(parameters, foreignTableAlias, foreignColumnMap);`
-)
-fs.writeFileSync(baseServiceJsPath, baseServiceJsContentLines.join('\n'))
+replaceInFile({
+  filePath: baseServiceJsPath,
+  regex: /function common\(parameters, localIdColumn, foreignTableName, foreignColumnMap, foreignColumnName\) \{[^}]+\}/,
+  newContent: `function common(parameters, localIdColumn, foreignTableName, foreignColumnMap, foreignColumnName) {
+    const uuid = require('uuid/v4')
+    const foreignTableAlias = uuid().replace('-', '')
+    var foreingIdColumn = "\\"" + foreignTableAlias + "\\".\\"" + foreignColumnMap[foreignColumnName] + "\\"";
+    parameters.topLevelQb.leftJoin(foreignTableName, foreignTableAlias, localIdColumn + " = " + foreingIdColumn);
+    addWhereCondition(parameters, foreignTableAlias, foreignColumnMap);
+  }`,
+})

+ 19 - 0
query-node/mappings/scripts/utils.ts

@@ -0,0 +1,19 @@
+import fs from 'fs'
+import { blake2AsHex } from '@polkadot/util-crypto'
+
+type ReplaceLinesInFileParams = {
+  filePath: string
+  regex: RegExp
+  newContent: string
+}
+
+export function replaceInFile({ filePath, regex, newContent }: ReplaceLinesInFileParams): void {
+  const paramsHash = blake2AsHex(filePath + '|' + regex.source + '|' + newContent)
+  const startMark = `/* BEGIN REPLACED CONTENT ${paramsHash} */`
+  const endMark = `/* END REPLACED CONTENT ${paramsHash} */`
+  const fileContent = fs.readFileSync(filePath).toString()
+  if (fileContent.includes(startMark)) {
+    return
+  }
+  fs.writeFileSync(filePath, fileContent.replace(regex, `${startMark}\n${newContent}\n${endMark}`))
+}

+ 0 - 1
storage-node-v2/.eslintrc.js

@@ -9,7 +9,6 @@ module.exports = {
   rules: {
     'no-console': 'warn', // use dedicated logger
     'no-unused-vars': 'off', // Required by the typescript rule below
-    'prettier/prettier': 'off', // prettier-eslint conflicts inherited from @joystream/eslint-config
     '@typescript-eslint/no-unused-vars': ['error'],
     '@typescript-eslint/no-floating-promises': 'error',
   },

+ 1 - 1
storage-node-v2/package.json

@@ -137,7 +137,7 @@
     "prepack": "rm -rf lib && tsc -b && oclif-dev manifest && oclif-dev readme",
     "version": "oclif-dev readme && git add README.md",
     "build": "tsc --build tsconfig.json",
-    "format": "prettier ./src --write",
+    "format": "yarn prettier ./src --write",
     "lint": "eslint ./src --ext .ts",
     "api:edit": "openapi-editor --file ./src/api-spec/openapi.yaml --port 10021",
     "generate:types:graphql": "yarn graphql-codegen -c ./src/services/queryNode/codegen.yml",

+ 4 - 3
storage-node-v2/src/commands/dev/sync.ts

@@ -27,17 +27,18 @@ export default class DevSync extends Command {
       char: 'p',
       required: false,
       description: 'Sync workers number (max async operations in progress).',
+      default: 20,
     }),
     queryNodeEndpoint: flags.string({
       char: 'q',
       required: false,
-      description: 'Query node host and port (e.g.: some.com:8081)',
       default: 'http://localhost:8081/graphql',
+      description: 'Query node endpoint (e.g.: http://some.com:8081/graphql)',
     }),
     dataSourceOperatorUrl: flags.string({
       char: 'o',
       required: false,
-      description: 'Storage node url base (e.g.: http://some.com:8081) to get data from.',
+      description: 'Storage node url base (e.g.: http://some.com:3333) to get data from.',
       default: 'http://localhost:3333',
     }),
     uploads: flags.string({
@@ -52,7 +53,7 @@ export default class DevSync extends Command {
 
     logger.info('Syncing...')
 
-    const syncWorkersNumber = flags.syncWorkersNumber ?? 20
+    const syncWorkersNumber = flags.syncWorkersNumber
 
     try {
       await performSync(

+ 1 - 1
storage-node-v2/src/commands/operator/set-metadata.ts

@@ -31,7 +31,7 @@ export default class OperatorSetMetadata extends ApiCommandBase {
     endpoint: flags.string({
       char: 'e',
       description: 'Root distribution node endpoint',
-      exclusive: ['input'],
+      exclusive: ['jsonFile'],
     }),
     jsonFile: flags.string({
       char: 'j',

+ 3 - 5
storage-node-v2/src/commands/server.ts

@@ -13,7 +13,6 @@ import { promisify } from 'util'
 import { KeyringPair } from '@polkadot/keyring/types'
 import ExitCodes from './../command-base/ExitCodes'
 import { CLIError } from '@oclif/errors'
-import { Worker } from '@joystream/types/working-group'
 import fs from 'fs'
 const fsPromises = fs.promises
 
@@ -179,10 +178,10 @@ async function runSyncWithInterval(
   syncWorkersNumber: number,
   syncIntervalMinutes: number
 ) {
-  const sleepIntevalInSeconds = syncIntervalMinutes * 60 * 1000
+  const sleepInteval = syncIntervalMinutes * 60 * 1000
   while (true) {
     logger.info(`Sync paused for ${syncIntervalMinutes} minute(s).`)
-    await sleep(sleepIntevalInSeconds)
+    await sleep(sleepInteval)
     try {
       logger.info(`Resume syncing....`)
       await performSync(api, workerId, syncWorkersNumber, queryNodeUrl, uploadsDirectory, tempDirectory)
@@ -226,8 +225,7 @@ async function recreateTempDirectory(uploadsDirectory: string, tempDirName: stri
  */
 async function verifyWorkerId(api: ApiPromise, workerId: number, account: KeyringPair): Promise<void> {
   // Cast Codec type to Worker type
-  const workerObj = await api.query.storageWorkingGroup.workerById(workerId)
-  const worker = workerObj as Worker
+  const worker = await api.query.storageWorkingGroup.workerById(workerId)
 
   if (worker.role_account_id.toString() !== account.address) {
     throw new CLIError(`Provided worker ID doesn't match the Joystream account.`, {

+ 3 - 2
storage-node-v2/src/services/queryNode/api.ts

@@ -19,6 +19,7 @@ import {
   GetStorageBucketsConnection,
   GetStorageBucketsConnectionQuery,
   GetStorageBucketsConnectionQueryVariables,
+  GetStorageBucketDetailsByWorkerId,
 } from './generated/queries'
 import { Maybe, StorageBagWhereInput } from './generated/schema'
 
@@ -164,12 +165,12 @@ export class QueryNodeApi {
    *
    * @param workerId - worker ID
    */
-  public async getStorageBucketDetailsByWorkerId(workerId: string): Promise<Array<StorageBucketIdsFragment>> {
+  public async getStorageBucketIdsByWorkerId(workerId: string): Promise<Array<StorageBucketIdsFragment>> {
     const result = await this.multipleEntitiesWithPagination<
       StorageBucketIdsFragment,
       GetStorageBucketDetailsByWorkerIdQuery,
       GetStorageBucketDetailsByWorkerIdQueryVariables
-    >(GetStorageBucketsConnection, { workerId, limit: MAX_RESULTS_PER_QUERY }, 'storageBucketsConnection')
+    >(GetStorageBucketDetailsByWorkerId, { workerId, limit: MAX_RESULTS_PER_QUERY }, 'storageBucketsConnection')
 
     if (!result) {
       return []

+ 5 - 1
storage-node-v2/src/services/queryNode/generated/queries.ts

@@ -122,7 +122,11 @@ export const DataObjectDetails = gql`
 `
 export const GetStorageBucketsConnection = gql`
   query getStorageBucketsConnection($limit: Int, $cursor: String) {
-    storageBucketsConnection(first: $limit, after: $cursor) {
+    storageBucketsConnection(
+      first: $limit
+      after: $cursor
+      where: { operatorStatus_json: { isTypeOf_eq: "StorageBucketOperatorStatusActive" } }
+    ) {
       edges {
         cursor
         node {

+ 5 - 1
storage-node-v2/src/services/queryNode/queries/queries.graphql

@@ -4,7 +4,11 @@ fragment StorageBucketIds on StorageBucket {
 }
 
 query getStorageBucketsConnection($limit: Int, $cursor: String) {
-  storageBucketsConnection(first: $limit, after: $cursor) {
+  storageBucketsConnection(
+    first: $limit
+    after: $cursor
+    where: { operatorStatus_json: { isTypeOf_eq: "StorageBucketOperatorStatusActive" } }
+  ) {
     edges {
       cursor
       node {

+ 1 - 1
storage-node-v2/src/services/sync/remoteStorageData.ts

@@ -4,7 +4,7 @@ import logger from '../logger'
 import NodeCache from 'node-cache'
 
 // Expiration period in seconds for the local cache.
-const ExpirationPeriod: number = 5 * 60 // minutes
+const ExpirationPeriod: number = 3 * 60 // minutes
 
 // Max data entries in local cache
 const MaxEntries = 10000

+ 2 - 4
storage-node-v2/src/services/sync/storageObligations.ts

@@ -129,7 +129,7 @@ export async function getStorageObligationsFromRuntime(
 export async function getStorageBucketIdsByWorkerId(queryNodeUrl: string, workerId: number): Promise<string[]> {
   const api = new QueryNodeApi(queryNodeUrl)
 
-  const idFragments = await api.getStorageBucketDetailsByWorkerId(workerId.toString())
+  const idFragments = await api.getStorageBucketIdsByWorkerId(workerId.toString())
   const ids = idFragments.map((frag) => frag.id)
 
   return ids
@@ -161,7 +161,7 @@ async function getAllBuckets(api: QueryNodeApi): Promise<StorageBucketDetailsFra
 
   return await getAllObjectsWithPaging(
     'get all storage buckets',
-    async (offset, limit) => await api.getStorageBucketDetails(ids, offset, limit)
+    async (offset, limit) => await api.getStorageBucketDetails(ids.slice(offset, offset + limit), 0, limit)
   )
 }
 
@@ -209,8 +209,6 @@ async function getAllObjectsWithPaging<T>(
     resultPart = await query(offset, limit)
     offset += limit
     result.push(...resultPart)
-
-    if (resultPart.length < limit) break
   } while (resultPart.length > 0)
 
   return result

+ 3 - 5
storage-node-v2/src/services/sync/synchronizer.ts

@@ -1,7 +1,7 @@
 import { getStorageObligationsFromRuntime, DataObligations } from './storageObligations'
 import logger from '../../services/logger'
 import { getDataObjectIDs } from '../../services/caching/localDataObjects'
-import { SyncTask, DownloadFileTask, DeleteLocalFileTask, PrepareDownloadFileTask } from './tasks'
+import { SyncTask, DownloadFileTask, PrepareDownloadFileTask } from './tasks'
 import { WorkingStack, TaskProcessorSpawner, TaskSink } from './workingProcess'
 import _ from 'lodash'
 import { ApiPromise } from '@polkadot/api'
@@ -46,11 +46,10 @@ export async function performSync(
   const added = _.difference(requiredIds, files)
   const deleted = _.difference(files, requiredIds)
 
-  logger.debug(`Sync - added objects: ${added.length}`)
-  logger.debug(`Sync - deleted objects: ${deleted.length}`)
+  logger.debug(`Sync - new objects: ${added.length}`)
+  logger.debug(`Sync - obsolete objects: ${deleted.length}`)
 
   const workingStack = new WorkingStack()
-  const deletedTasks = deleted.map((fileName) => new DeleteLocalFileTask(uploadDirectory, fileName))
 
   let addedTasks: SyncTask[]
   if (operatorUrl === undefined) {
@@ -72,7 +71,6 @@ export async function performSync(
   const processSpawner = new TaskProcessorSpawner(workingStack, asyncWorkersNumber)
 
   await workingStack.add(addedTasks)
-  await workingStack.add(deletedTasks)
 
   await processSpawner.process()
   logger.info('Sync ended.')

+ 1 - 1
storage-node-v2/src/services/sync/tasks.ts

@@ -100,7 +100,7 @@ export class DownloadFileTask implements SyncTask {
       const timeoutMs = 30 * 60 * 1000 // 30 min for large files (~ 10 GB)
       // Casting because of:
       // https://stackoverflow.com/questions/38478034/pipe-superagent-response-to-express-response
-      const request = superagent.get(this.url).timeout(timeoutMs) as unknown as NodeJS.ReadableStream
+      const request = (superagent.get(this.url).timeout(timeoutMs) as unknown) as NodeJS.ReadableStream
       const fileStream = fs.createWriteStream(tempFilePath)
 
       request.on('response', (res) => {

+ 1 - 79
storage-node-v2/src/services/webApi/controllers/common.ts

@@ -28,84 +28,6 @@ export class ServerError extends WebApiError {
   }
 }
 
-/**
- * Returns a directory for file uploading from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-export function getUploadsDir(res: express.Response<unknown, AppConfig>): string {
-  if (res.locals.uploadsDir) {
-    return res.locals.uploadsDir
-  }
-
-  throw new ServerError('No upload directory path loaded.')
-}
-
-/**
- * Returns a directory for temporary file uploading from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-export function getTempFileUploadingDir(res: express.Response<unknown, AppConfig>): string {
-  if (res.locals.tempFileUploadingDir) {
-    return res.locals.tempFileUploadingDir
-  }
-
-  throw new ServerError('No temporary uploading directory path loaded.')
-}
-
-/**
- * Returns worker ID from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-export function getWorkerId(res: express.Response<unknown, AppConfig>): number {
-  if (res.locals.workerId || res.locals.workerId === 0) {
-    return res.locals.workerId
-  }
-
-  throw new ServerError('No Joystream worker ID loaded.')
-}
-
-/**
- * Returns the QueryNode URL from the starting parameters.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-export function getQueryNodeUrl(res: express.Response<unknown, AppConfig>): string {
-  if (res.locals.queryNodeEndpoint) {
-    return res.locals.queryNodeEndpoint
-  }
-
-  throw new ServerError('No Query Node URL loaded.')
-}
-
-/**
- * Returns a command config.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-export function getCommandConfig(res: express.Response<unknown, AppConfig>): {
-  version: string
-  userAgent: string
-} {
-  if (res.locals.process) {
-    return res.locals.process
-  }
-
-  throw new ServerError('Cannot load command config.')
-}
-
 /**
  * Handles errors and sends a response.
  *
@@ -146,7 +68,7 @@ export function getHttpStatusCodeByError(err: Error): number {
   }
 
   if (err instanceof ExtrinsicFailedError) {
-    return 400
+    return 500
   }
 
   if (err instanceof WebApiError) {

+ 18 - 54
storage-node-v2/src/services/webApi/controllers/filesApi.ts

@@ -13,7 +13,6 @@ import { createNonce, getTokenExpirationTime } from '../../caching/tokenNonceKee
 import { getFileInfo } from '../../helpers/fileInfo'
 import { BagId } from '@joystream/types/storage'
 import logger from '../../logger'
-import { KeyringPair } from '@polkadot/keyring/types'
 import { ApiPromise } from '@polkadot/api'
 import * as express from 'express'
 import fs from 'fs'
@@ -22,17 +21,7 @@ import send from 'send'
 import { hexToString } from '@polkadot/util'
 import { parseBagId } from '../../helpers/bagTypes'
 import { timeout } from 'promise-timeout'
-import {
-  getUploadsDir,
-  getWorkerId,
-  getQueryNodeUrl,
-  WebApiError,
-  ServerError,
-  getCommandConfig,
-  sendResponseWithError,
-  getHttpStatusCodeByError,
-  AppConfig,
-} from './common'
+import { WebApiError, sendResponseWithError, getHttpStatusCodeByError, AppConfig } from './common'
 import { getStorageBucketIdsByWorkerId } from '../../sync/storageObligations'
 import { Membership } from '@joystream/types/members'
 const fsPromises = fs.promises
@@ -43,7 +32,7 @@ const fsPromises = fs.promises
 export async function getFile(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
   try {
     const dataObjectId = getDataObjectId(req)
-    const uploadsDir = getUploadsDir(res)
+    const uploadsDir = res.locals.uploadsDir
     const fullPath = path.resolve(uploadsDir, dataObjectId)
 
     const fileInfo = await getFileInfo(fullPath)
@@ -74,7 +63,7 @@ export async function getFile(req: express.Request, res: express.Response<unknow
 export async function getFileHeaders(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
   try {
     const dataObjectId = getDataObjectId(req)
-    const uploadsDir = getUploadsDir(res)
+    const uploadsDir = res.locals.uploadsDir
     const fullPath = path.resolve(uploadsDir, dataObjectId)
     const fileInfo = await getFileInfo(fullPath)
     const fileStats = await fsPromises.stat(fullPath)
@@ -100,21 +89,21 @@ export async function uploadFile(req: express.Request, res: express.Response<unk
   try {
     const fileObj = getFileObject(req)
     cleanupFileName = fileObj.path
-    const queryNodeUrl = getQueryNodeUrl(res)
-    const workerId = getWorkerId(res)
+    const queryNodeUrl = res.locals.queryNodeEndpoint
+    const workerId = res.locals.workerId
 
     const [, hash] = await Promise.all([
       verifyBucketId(queryNodeUrl, workerId, uploadRequest.storageBucketId),
       hashFile(fileObj.path),
     ])
 
-    const api = getApi(res)
+    const api = res.locals.api
     const bagId = parseBagId(uploadRequest.bagId)
     const accepted = await verifyDataObjectInfo(api, bagId, uploadRequest.dataObjectId, fileObj.size, hash)
 
     // Prepare new file name
     const dataObjectId = uploadRequest.dataObjectId.toString()
-    const uploadsDir = getUploadsDir(res)
+    const uploadsDir = res.locals.uploadsDir
     const newPath = path.join(uploadsDir, dataObjectId)
 
     registerNewDataObjectId(dataObjectId)
@@ -125,9 +114,14 @@ export async function uploadFile(req: express.Request, res: express.Response<unk
     cleanupFileName = newPath
 
     if (!accepted) {
-      await acceptPendingDataObjects(api, bagId, getAccount(res), workerId, uploadRequest.storageBucketId, [
-        uploadRequest.dataObjectId,
-      ])
+      await acceptPendingDataObjects(
+        api,
+        bagId,
+        res.locals.storageProviderAccount,
+        workerId,
+        uploadRequest.storageBucketId,
+        [uploadRequest.dataObjectId]
+      )
     } else {
       logger.warn(
         `Received already accepted data object. DataObjectId = ${uploadRequest.dataObjectId} WorkerId = ${workerId}`
@@ -152,9 +146,9 @@ export async function authTokenForUploading(
   res: express.Response<unknown, AppConfig>
 ): Promise<void> {
   try {
-    const account = getAccount(res)
+    const account = res.locals.storageProviderAccount
     const tokenRequest = getTokenRequest(req)
-    const api = getApi(res)
+    const api = res.locals.api
 
     await validateTokenRequest(api, tokenRequest)
 
@@ -193,36 +187,6 @@ function getFileObject(req: express.Request): Express.Multer.File {
   throw new WebApiError('No file uploaded', 400)
 }
 
-/**
- * Returns a KeyPair instance from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-function getAccount(res: express.Response<unknown, AppConfig>): KeyringPair {
-  if (res.locals.storageProviderAccount) {
-    return res.locals.storageProviderAccount
-  }
-
-  throw new ServerError('No Joystream account loaded.')
-}
-
-/**
- * Returns API promise from the response.
- *
- * @remarks
- * This is a helper function. It parses the response object for a variable and
- * throws an error on failure.
- */
-function getApi(res: express.Response<unknown, AppConfig>): ApiPromise {
-  if (res.locals.api) {
-    return res.locals.api
-  }
-
-  throw new ServerError('No Joystream API loaded.')
-}
-
 /**
  * Returns data object ID from the request.
  *
@@ -339,7 +303,7 @@ async function cleanupFileOnError(cleanupFileName: string, error: string): Promi
  */
 export async function getVersion(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
   try {
-    const config = getCommandConfig(res)
+    const config = res.locals.process
 
     // Copy from an object, because the actual object could contain more data.
     res.status(200).json({

+ 5 - 13
storage-node-v2/src/services/webApi/controllers/stateApi.ts

@@ -2,15 +2,7 @@ import { getDataObjectIDs } from '../../../services/caching/localDataObjects'
 import * as express from 'express'
 import _ from 'lodash'
 import { getDataObjectIDsByBagId } from '../../sync/storageObligations'
-import {
-  getUploadsDir,
-  getTempFileUploadingDir,
-  getQueryNodeUrl,
-  WebApiError,
-  getCommandConfig,
-  sendResponseWithError,
-  AppConfig,
-} from './common'
+import { WebApiError, sendResponseWithError, AppConfig } from './common'
 import fastFolderSize from 'fast-folder-size'
 import { promisify } from 'util'
 import fs from 'fs'
@@ -52,8 +44,8 @@ export async function getLocalDataStats(
   res: express.Response<unknown, AppConfig>
 ): Promise<void> {
   try {
-    const uploadsDir = getUploadsDir(res)
-    const tempFileDir = getTempFileUploadingDir(res)
+    const uploadsDir = res.locals.uploadsDir
+    const tempFileDir = res.locals.tempFileUploadingDir
     const fastFolderSizeAsync = promisify(fastFolderSize)
 
     const tempFolderExists = fs.existsSync(tempFileDir)
@@ -98,7 +90,7 @@ export async function getLocalDataObjectsByBagId(
   res: express.Response<unknown, AppConfig>
 ): Promise<void> {
   try {
-    const queryNodeUrl = getQueryNodeUrl(res)
+    const queryNodeUrl = res.locals.queryNodeEndpoint
     const bagId = getBagId(req)
 
     const [ids, requiredIds] = await Promise.all([
@@ -119,7 +111,7 @@ export async function getLocalDataObjectsByBagId(
  */
 export async function getVersion(req: express.Request, res: express.Response<unknown, AppConfig>): Promise<void> {
   try {
-    const config = getCommandConfig(res)
+    const config = res.locals.process
 
     // Copy from an object, because the actual object could contain more data.
     res.status(200).json({

+ 33 - 32
tests/network-tests/.env

@@ -1,61 +1,62 @@
 # Address of the Joystream node.
-NODE_URL = ws://127.0.0.1:9944
+NODE_URL=ws://127.0.0.1:9944
 # Address of the Joystream query node.
-QUERY_NODE_URL = http://127.0.0.1:8081/graphql
+QUERY_NODE_URL=http://127.0.0.1:8081/graphql
 # Account which is expected to provide sufficient funds to test accounts.
-TREASURY_ACCOUNT_URI = //Alice
+TREASURY_ACCOUNT_URI=//Alice
 # Sudo Account
-SUDO_ACCOUNT_URI = //Alice
+SUDO_ACCOUNT_URI=//Alice
 # Amount of members able to buy membership in membership creation test.
-MEMBERSHIP_CREATION_N = 2
+MEMBERSHIP_CREATION_N=2
 # ID of the membership paid terms used in membership creation test.
-MEMBERSHIP_PAID_TERMS = 0
+MEMBERSHIP_PAID_TERMS=0
 # Council stake amount for first K accounts in council election test.
-COUNCIL_STAKE_GREATER_AMOUNT = 3000
+COUNCIL_STAKE_GREATER_AMOUNT=3000
 # Council stake amount for first the rest participants in council election test.
-COUNCIL_STAKE_LESSER_AMOUNT = 2000
+COUNCIL_STAKE_LESSER_AMOUNT=2000
 # Number of members with greater stake in council election test.
-COUNCIL_ELECTION_K = 2
+COUNCIL_ELECTION_K=2
 # Balance to spend using spending proposal
-SPENDING_BALANCE = 1000
+SPENDING_BALANCE=1000
 # Minting capacity increment for content working group minting capacity test.
-MINTING_CAPACITY_INCREMENT = 20
+MINTING_CAPACITY_INCREMENT=20
 # Minting capacity for council mint for spending proposal.
-COUNCIL_MINTING_CAPACITY = 100000
+COUNCIL_MINTING_CAPACITY=100000
 # Stake amount for Rome runtime upgrade proposal
-RUNTIME_UPGRADE_PROPOSAL_STAKE = 100000
+RUNTIME_UPGRADE_PROPOSAL_STAKE=100000
 # Validator count increment for Validator count test.
-VALIDATOR_COUNT_INCREMENT = 2
+VALIDATOR_COUNT_INCREMENT=2
 # Constantinople runtime path
-RUNTIME_WASM_PATH = ../../target/release/wbuild/joystream-node-runtime/joystream_node_runtime.compact.wasm
+RUNTIME_WASM_PATH=../../target/release/wbuild/joystream-node-runtime/joystream_node_runtime.compact.wasm
 # Working group size N
-WORKING_GROUP_N = 3
+WORKING_GROUP_N=3
 # Working group application stake
-WORKING_GROUP_APPLICATION_STAKE = 10
+WORKING_GROUP_APPLICATION_STAKE=10
 # Working group role stake
-WORKING_GROUP_ROLE_STAKE = 10
+WORKING_GROUP_ROLE_STAKE=10
 # Reward interval for working group tests
-LONG_REWARD_INTERVAL = 99999
+LONG_REWARD_INTERVAL=99999
 # First reward interval for working group reward test
-SHORT_FIRST_REWARD_INTERVAL = 3
+SHORT_FIRST_REWARD_INTERVAL=3
 # Reward interval for working group reward test
-SHORT_REWARD_INTERVAL = 3
+SHORT_REWARD_INTERVAL=3
 # Payout amount for working group tests
-PAYOUT_AMOUNT = 3
+PAYOUT_AMOUNT=3
 # Payout amount for leader-related proposals tests
-ALTERED_PAYOUT_AMOUNT = 7
+ALTERED_PAYOUT_AMOUNT=7
 # Mint capacity for storage working group
-STORAGE_WORKING_GROUP_MINTING_CAPACITY = 100000
+STORAGE_WORKING_GROUP_MINTING_CAPACITY=100000
 # Default unstaking period for storage working group
-STORAGE_WORKING_GROUP_UNSTAKING_PERIOD = 1
+STORAGE_WORKING_GROUP_UNSTAKING_PERIOD=1
 # Slash value for manage working group lead testing scenario
-SLASH_AMOUNT = 2
+SLASH_AMOUNT=2
 # Stake decrement amount for manage working group lead testing scenario
-STAKE_DECREMENT = 3
+STAKE_DECREMENT=3
 # Mint capacity increment value for working gorup mint capacity test
-MINT_CAPACITY_INCREMENT = 1000
-# Mini-secret or mnemonic used in SURI for deterministic key derivation
-SURI_MINI_SECRET = ""
-# The starting key id to use when running a scenario. This will allow scenario
+MINT_CAPACITY_INCREMENT=1000
 # to be able to use all accounts generated in a prior scenario run against the same chain
-START_KEY_ID = 0
+START_KEY_ID=0
+# Mini-secret or mnemonic used in SURI for deterministic key derivation
+SURI_MINI_SECRET=""
+# Storage node address to download content from
+STORAGE_NODE_URL=http://localhost:3001/asset/v0

+ 9 - 9
tests/network-tests/.eslintrc.js

@@ -1,12 +1,12 @@
 module.exports = {
-    env: {
-        node: true,
-    },
+  env: {
+    node: true,
+  },
   rules: {
-      'no-async-promise-executor': 'off',
-      'no-useless-return': 'off',
-      'new-cap': 'off',
-      // Disabled because of the false positive bug: https://github.com/eslint/eslint/issues/11899
-      'require-atomic-updates': 'off',
-  }
+    'no-async-promise-executor': 'off',
+    'no-useless-return': 'off',
+    'new-cap': 'off',
+    // Disabled because of the false positive bug: https://github.com/eslint/eslint/issues/11899
+    'require-atomic-updates': 'off',
+  },
 }

+ 141 - 0
tests/network-tests/node-utils.sh

@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+# style reference https://google.github.io/styleguide/shellguide.html
+
+set -e
+
+SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
+cd $SCRIPT_PATH
+
+# Log only to stderr
+# Only output from this script should be the container id of the node at the very end
+
+# Location that will be mounted as the /data volume in containers
+# This is where the initial members and balances files and generated chainspec files will be located.
+export DATA_PATH=${DATA_PATH:=$(pwd)/data}
+mkdir -p ${DATA_PATH}
+
+# Initial account balance for sudo account
+SUDO_INITIAL_BALANCE=${SUDO_INITIAL_BALANCE:=100000000}
+SUDO_ACCOUNT_URI=${SUDO_ACCOUNT_URI:="//Alice"}
+SUDO_ACCOUNT=$(docker run --rm --pull=always docker.io/parity/subkey:2.0.1 inspect ${SUDO_ACCOUNT_URI} --output-type json | jq .ss58Address -r)
+
+# Source of funds for all new accounts that are created in the tests.
+TREASURY_INITIAL_BALANCE=${TREASURY_INITIAL_BALANCE:=100000000}
+TREASURY_ACCOUNT_URI=${TREASURY_ACCOUNT_URI:=$SUDO_ACCOUNT_URI}
+TREASURY_ACCOUNT=$(docker run --rm --pull=always docker.io/parity/subkey:2.0.1 inspect ${TREASURY_ACCOUNT_URI} --output-type json | jq .ss58Address -r)
+
+>&2 echo "sudo account from suri: ${SUDO_ACCOUNT}"
+>&2 echo "treasury account from suri: ${TREASURY_ACCOUNT}"
+
+# Prevent joystream cli from prompting
+export AUTO_CONFIRM=true
+
+export JOYSTREAM_NODE_TAG=${RUNTIME_TAG}
+
+#######################################
+# create initial-balances.json & initial-members.json files
+# Globals:
+#   SUDO_INITIAL_BALANCES
+#   SUDO_ACCOUNT
+#   TREASURY_ACCOUNT
+#   TREASURY_INITIAL_BALANCE
+#   DATA_PATH
+# Arguments:
+#   None
+#######################################
+function create_initial_config {
+    echo "{
+  \"balances\":[
+    [\"$SUDO_ACCOUNT\", $SUDO_INITIAL_BALANCE],
+    [\"$TREASURY_ACCOUNT\", $TREASURY_INITIAL_BALANCE]
+  ]
+}" > ${DATA_PATH}/initial-balances.json
+
+    # Remember if there are initial members at genesis query-node needs to be bootstrapped
+    # or any events processed for this member will cause processor to fail.
+    if [ "${MAKE_SUDO_MEMBER}" == true ]
+    then
+	echo "
+    [{
+      \"member_id\":0,
+      \"root_account\":\"$SUDO_ACCOUNT\",
+      \"controller_account\":\"$SUDO_ACCOUNT\",
+      \"handle\":\"sudosudo\",
+      \"avatar_uri\":\"https://sudo.com/avatar.png\",
+      \"about\":\"Sudo\",
+      \"registered_at_time\":0
+    }]
+  " > ${DATA_PATH}/initial-members.json
+    else
+	echo "[]" > ${DATA_PATH}/initial-members.json
+    fi
+}
+
+#######################################
+# create human-readable chainspec file
+# Globals:
+#   SUDO_ACCOUNT
+#   DATA_PATH
+# Arguments:
+#   None
+#######################################
+function create_chainspec_file {
+    # Create a chain spec file
+    docker run --rm -v ${DATA_PATH}:/data --entrypoint ./chain-spec-builder \
+	   joystream/node:${RUNTIME_TAG} \
+	   new \
+	   --authority-seeds Alice \
+	   --sudo-account ${SUDO_ACCOUNT} \
+	   --deployment dev \
+	   --chain-spec-path /data/chain-spec.json \
+	   --initial-balances-path /data/initial-balances.json \
+	   --initial-members-path /data/initial-members.json
+}
+
+#######################################
+# convert human-readable chainspec into
+# raw chainspec
+# Globals:
+#   DATA_PATH
+# Arguments:
+#   None
+#######################################
+function convert_chainspec {
+    docker run --rm -v ${DATA_PATH}:/data joystream/node:${RUNTIME_TAG} build-spec \
+	   --raw --disable-default-bootnode \
+	   --chain /data/chain-spec.json > ${DATA_PATH}/chain-spec-raw.json
+}
+
+#######################################
+# cleanup docker logs and shuts down container
+# Globals:
+#   CONTAINER_ID
+# Arguments:
+#   None
+#######################################
+function cleanup() {
+    # if [[ -z $CONTAINER_ID ]]; then
+    # 	docker logs ${CONTAINER_ID} --tail 15
+    # fi
+    docker-compose -f ../../docker-compose.yml down -v
+    find ./assets/ -name '[A-Z0-9]*__rejectedContent.json' -delete
+    rm -rf $DATA_PATH
+}
+
+#######################################
+# Start a chain with generated chain spec
+# Globals:
+#   DATA_PATH
+# Arguments:
+#   None
+#######################################
+function start_node {
+    docker-compose -f ../../docker-compose.yml run \
+		   -d -v ${DATA_PATH}:/spec --name "${JOYSTREAM_NODE_TAG}" \
+		   -p 9944:9944 -p 9933:9933 joystream-node \
+		   --alice --validator --unsafe-ws-external --unsafe-rpc-external \
+		   --rpc-methods Unsafe --rpc-cors=all -l runtime \
+		   --chain /spec/chain-spec-raw.json \
+		   --base-path /data
+}

+ 73 - 88
tests/network-tests/run-migration-tests.sh

@@ -4,97 +4,82 @@ set -e
 SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
 cd $SCRIPT_PATH
 
-# Location to store runtime WASM for runtime upgrade
-DATA_PATH=$PWD/data
-
-# The joystream/node docker image tag to start chain
-export RUNTIME=${RUNTIME:=latest}
-
 # The joystream/node docker image tag which contains WASM runtime to upgrade chain with
-TARGET_RUNTIME=${TARGET_RUNTIME:=latest}
-
-# Prevent joystream cli from prompting
-export AUTO_CONFIRM=true
-
-# Create chainspec with Alice (sudo) as member so we can use her in joystream-cli
-CONTAINER_ID=$(MAKE_SUDO_MEMBER=true ./run-test-node-docker.sh)
-
-function cleanup() {
-    docker logs ${CONTAINER_ID} --tail 15
-    docker-compose -f ../../docker-compose.yml down -v
-    rm ./assets/TestChannel__rejectedContent.json || true
-    rm ./assets/TestVideo__rejectedContent.json || true
+TARGET_RUNTIME_TAG=${TARGET_RUNTIME_TAG:=latest}
+# The joystream/node docker image tag to start the chain with
+RUNTIME_TAG=${RUNTIME_TAG:=sumer}
+# Post migration assertions by means of typescript scenarios required
+POST_MIGRATION_ASYNC_ASSERTIONS=${POST_MIGRATION_ASYNC_ASSERTIONS:=$true}
+# source common function used for node setup
+source ./node-utils.sh
+
+#######################################
+# use fork-off to generate a chainspec file with the current s
+# Globals:
+#   DATA_PATH
+# Arguments:
+#   None
+#######################################
+function fork_off_init() {
+    # chain-spec-raw already existing
+
+    if ! [[ -f ${DATA_PATH}/storage.json ]]; then
+        curl http://testnet-rpc-3-uk.joystream.org:9933 -H \
+            "Content-type: application/json" -d \
+            '{"jsonrpc":"2.0","id":1,"method":"state_getPairs","params":["0x"]}' \
+            > ${DATA_PATH}/storage.json
+    fi
+
+    if ! [[ -f ${DATA_PATH}/schema.json ]]; then
+        cp $SCRIPT_PATH/../../types/augment/all/defs.json ${DATA_PATH}/schema.json
+    fi
+
+    id=$(docker create joystream/node:${TARGET_RUNTIME_TAG})
+    docker cp $id:/joystream/runtime.compact.wasm ${DATA_PATH}/runtime.wasm
+
+    # RPC endpoint for live RUNTIME testnet 
+    WS_RPC_ENDPOINT="wss://testnet-rpc-3-uk.joystream.org" \
+        yarn workspace api-scripts tsnode-strict src/fork-off.ts
 }
 
-function pre_migration_hook() {
-  sleep 10 # needed otherwise docker image won't be ready yet
-  # Display runtime version
-  yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
-
-  # assume older version of joystream-cli is installed globally. So we run these commands to
-  # work against older runtime. Assert it is version  `@joystream/cli/0.5.1` ?
-  joystream-cli --version
-
-  joystream-cli account:choose --address 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY # Alice
-  echo "creating 1 channel"
-  joystream-cli content:createChannel --input=./assets/TestChannel.json --context=Member || true
-  echo "adding 1 video to the above channel"
-  joystream-cli content:createVideo -c 1 --input=./assets/TestVideo.json || true
-
-  # Confirm channel and video created successfully
-  joystream-cli content:videos 1
-  joystream-cli content:channel 1
+function export_chainspec_file_to_disk() {
+    echo "**** Initializing node database by exporting state ****"
+    # write the initial genesis state to db, in order to avoid waiting for an arbitrary amount of time 
+    docker-compose -f ../../docker-compose.yml run \
+		   -v ${DATA_PATH}:/spec joystream-node export-state \
+		   --chain /spec/chain-spec-raw.json \
+		   --base-path /data --pruning archive > ${DATA_PATH}/exported-state.json
 }
 
-function post_migration_hook() {
-  echo "*** verify existence of the 5 new groups ***"
-  yarn joystream-cli working-groups:overview --group=operationsAlpha
-  yarn joystream-cli working-groups:overview --group=operationsBeta
-  yarn joystream-cli working-groups:overview --group=operationsGamma
-  yarn joystream-cli working-groups:overview --group=curators
-  yarn joystream-cli working-groups:overview --group=distributors
-
-  echo "*** verify previously created channel and video are cleared ***"
-  # Allow a few blocks for migration to complete
-  sleep 12
-  
-  # FIXME: Howto assert these fail as expected. They should report video and channel do no exist
-  # Can we get json output to more easily parse result of query?
-  set +e
-  yarn joystream-cli content:channel 1
-  if [ $? -eq 0 ]; then
-    echo "Unexpected channel was found"
-    exit -1
-  fi
-  # This cammand doesn't give error exit code if videos not found in a channel
-  yarn joystream-cli content:videos 1
-}    
-
-trap cleanup EXIT
-
-if [ "$TARGET_RUNTIME" == "$RUNTIME" ]; then
-  echo "Not Performing a runtime upgrade."
-else
-  pre_migration_hook
-
-  # Copy new runtime wasm file from target joystream/node image
-  echo "Extracting wasm blob from target joystream/node image."
-  id=$(docker create joystream/node:${TARGET_RUNTIME})
-  docker cp $id:/joystream/runtime.compact.wasm ${DATA_PATH}
-  docker rm $id
-
-  echo "Performing runtime upgrade."
-  yarn workspace api-scripts tsnode-strict \
-    src/dev-set-runtime-code.ts -- ${DATA_PATH}/runtime.compact.wasm
-
-  echo "Runtime upgraded."
-
-  # Display runtime version
-  yarn workspace api-scripts tsnode-strict src/status.ts | grep Runtime
-
-  echo "Performing migration tests"
-
-  post_migration_hook
+# entrypoint
+function main {
+    CONTAINER_ID=""
+
+    echo "**** CREATING EMPTY CHAINSPEC ****"
+    create_initial_config
+    create_chainspec_file
+    convert_chainspec
+    echo "**** EMPTY CHAINSPEC CREATED SUCCESSFULLY ****"
+
+    # use forkoff to update chainspec with the live state + update runtime code
+    fork_off_init
+
+    export JOYSTREAM_NODE_TAG=$RUNTIME_TAG
+
+    # export chain-spec BEFORE starting the node
+    export_chainspec_file_to_disk
+    
+    echo "***** STARTING NODE WITH FORKED STATE *****"
+    CONTAINER_ID=$(start_node)
+
+    if ( $POST_MIGRATION_ASYNC_ASSERTIONS ); then
+        sleep 120
+        # verify assertion using typsecript
+        echo "***** POST MIGRATION TYPESCRIPT *****"
+        yarn workspace network-tests node-ts-strict src/scenarios/post-migration.ts
+    fi
+}
 
-  echo "Done with migrations tests"
-fi
+# main entrypoint
+main || :
+cleanup

+ 13 - 1
tests/network-tests/src/Api.ts

@@ -32,7 +32,7 @@ import { FillOpeningParameters, ProposalId } from '@joystream/types/proposals'
 // import { v4 as uuid } from 'uuid'
 import { extendDebug } from './Debugger'
 import { InvertedPromise } from './InvertedPromise'
-import { VideoId } from '@joystream/types/content'
+import { VideoId, VideoCategoryId } from '@joystream/types/content'
 import { ChannelId } from '@joystream/types/common'
 import { ChannelCategoryMetadata, VideoCategoryMetadata } from '@joystream/metadata-protobuf'
 import { metadataToBytes } from '../../../cli/lib/helpers/serialization'
@@ -1790,6 +1790,18 @@ export class Api {
     return (await this.api.query.members.membershipById(memberId))?.controller_account.toString()
   }
 
+  public async getNumberOfOutstandingVideos(): Promise<number> {
+    return (await this.api.query.content.videoById.entries<VideoId>()).length
+  }
+
+  public async getNumberOfOutstandingChannels(): Promise<number> {
+    return (await this.api.query.content.channelById.entries<ChannelId>()).length
+  }
+
+  public async getNumberOfOutstandingVideoCategories(): Promise<number> {
+    return (await this.api.query.content.videoCategoryById.entries<VideoCategoryId>()).length
+  }
+
   // Create a mock channel, throws on failure
   async createMockChannel(memberId: number, memberControllerAccount?: string): Promise<ChannelId> {
     memberControllerAccount = memberControllerAccount || (await this.getMemberControllerAccount(memberId))

+ 68 - 0
tests/network-tests/src/misc/postMigrationAssertionsFlow.ts

@@ -0,0 +1,68 @@
+import { assert } from 'chai'
+import { FlowProps } from '../Flow'
+import { extendDebug } from '../Debugger'
+import { Utils } from '../utils'
+
+export default async function postMigrationAssertions({ api }: FlowProps): Promise<void> {
+  const debug = extendDebug('flow:postMigrationAssertions')
+  debug('Started')
+
+  debug('Ensure migration is done')
+
+  let channelMigration = await api.query.content.channelMigration()
+  let videoMigration = await api.query.content.videoMigration()
+
+  // wait for migration to be done and checking that index do actually change
+  while (
+    channelMigration.current_id.toNumber() < channelMigration.final_id.toNumber() ||
+    videoMigration.current_id.toNumber() < videoMigration.final_id.toNumber()
+  ) {
+    // wait 6 seconds until next block is produced
+    await Utils.wait(6000)
+
+    const channelMigrationNew = await api.query.content.channelMigration()
+    const videoMigrationNew = await api.query.content.videoMigration()
+
+    // check invariant in order to prevent infinite loop
+    if (
+      channelMigrationNew.current_id.toNumber() > channelMigration.current_id.toNumber() ||
+      videoMigrationNew.current_id.toNumber() > videoMigration.current_id.toNumber()
+    ) {
+      // update migration variables
+      channelMigration = channelMigrationNew
+      videoMigration = videoMigrationNew
+    } else {
+      throw new Error('Migration status not changing')
+    }
+  }
+
+  debug('Check all new  working groups have been correctly initialized')
+
+  const wgBeta = await api.query.operationsWorkingGroupBeta.activeWorkerCount()
+  const wgGamma = await api.query.operationsWorkingGroupGamma.activeWorkerCount()
+  const wgGateway = await api.query.gatewayWorkingGroup.activeWorkerCount()
+
+  assert.equal(wgBeta.toNumber(), 0)
+  assert.equal(wgGamma.toNumber(), 0)
+  assert.equal(wgGateway.toNumber(), 0)
+
+  debug('Checking that Video, Channel, Categories  counters have not been re-set')
+
+  const nextVideoCategoryId = await api.query.content.nextVideoCategoryId()
+  const nextVideoId = await api.query.content.nextVideoId()
+  const nextChannelId = await api.query.content.nextChannelId()
+
+  assert(nextVideoCategoryId.toNumber() > 1)
+  assert(nextVideoId.toNumber() > 1)
+  assert(nextChannelId.toNumber() > 1)
+
+  debug('Checking that number of outstanding channels & videos == 0')
+
+  const numChannels = await api.getNumberOfOutstandingChannels()
+  const numVideos = await api.getNumberOfOutstandingVideos()
+
+  assert.equal(numChannels, 0)
+  assert.equal(numVideos, 0)
+
+  debug('Done')
+}

+ 6 - 0
tests/network-tests/src/scenarios/post-migration.ts

@@ -0,0 +1,6 @@
+import postMigrationAssertions from '../misc/postMigrationAssertionsFlow'
+import { scenario } from '../Scenario'
+
+scenario(async ({ job }) => {
+  job('Verify post-migration chain state', postMigrationAssertions)
+})

+ 1 - 1
tests/network-tests/tsconfig.json

@@ -12,7 +12,7 @@
     "resolveJsonModule": true,
     "paths": {
       "@polkadot/types/augment": ["../../types/augment-codec/augment-types.ts"],
-      "@polkadot/api/augment": [ "../../types/augment-codec/augment-api.ts"]
+      "@polkadot/api/augment": ["../../types/augment-codec/augment-api.ts"]
     }
   }
 }

+ 125 - 0
utils/api-scripts/src/fork-off.ts

@@ -0,0 +1,125 @@
+import fs = require('fs')
+import path = require('path')
+import { xxhashAsHex } from '@polkadot/util-crypto'
+import { ApiPromise, WsProvider } from '@polkadot/api'
+const execSync = require('child_process').execSync
+
+// paths & env variables
+let alice = process.env.SUDO_ACCOUNT
+// bad error handling TODO: fix process.env
+let schemaPath = path.join(process.env.DATA_PATH || '', 'schema.json')
+let wasmPath = path.join(process.env.DATA_PATH || '', 'runtime.wasm') || ''
+let hexPath = path.join(process.env.DATA_PATH || '', 'runtime.hex') || ''
+let specPath = path.join(process.env.DATA_PATH || '', 'chain-spec-raw.json')
+let storagePath = path.join(process.env.DATA_PATH || '', 'storage.json')
+
+// this might not be of much use
+const provider = new WsProvider(process.env.WS_RPC_ENDPOINT || 'ws://localhost:9944')
+/**
+ * All module prefixes except those mentioned in the skippedModulesPrefix will be added to this by the script.
+ * If you want to add any past module or part of a skipped module, add the prefix here manually.
+ *
+ * Any storage value’s hex can be logged via console.log(api.query.<module>.<call>.key([...opt params])),
+ * e.g. console.log(api.query.timestamp.now.key()).
+ *
+ * If you want a map/doublemap key prefix, you can do it via .keyPrefix(),
+ * e.g. console.log(api.query.system.account.keyPrefix()).
+ *
+ * For module hashing, do it via xxhashAsHex,
+ * e.g. console.log(xxhashAsHex('System', 128)).
+ */
+let prefixes = ['0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9' /* System.Account */]
+const skippedModulesPrefix = [
+  'System',
+  'Session',
+  'Babe',
+  'Grandpa',
+  'GrandpaFinality',
+  'FinalityTracker',
+  'Authorship',
+]
+
+// Apparently not needed: To review
+// async function fixParachinStates(api: ApiPromise, chainSpec: any) {
+//     const skippedKeys = [
+//         api.query["parasScheduler"].sessionStartBlock.key()
+//     ];
+//     for (const k of skippedKeys) {
+//         delete chainSpec.genesis.raw.top[k];
+//     }
+// }
+
+async function main() {
+  // hexdump of runtime wasm binary, running it from the shell gives bad format error
+  execSync('cat ' + wasmPath + ' | hexdump -ve \'/1 "%02x"\' > ' + hexPath)
+
+  let api
+  if (!fs.existsSync(schemaPath)) {
+    console.log('Custom Schema missing, using default schema.')
+    api = await ApiPromise.create({ provider })
+  } else {
+    const types = JSON.parse(fs.readFileSync(schemaPath, 'utf8'))
+    api = await ApiPromise.create({
+      provider,
+      types,
+    })
+  }
+
+  // storage.json is guaranteed to exists
+
+  let metadata = await api.rpc.state.getMetadata()
+  // Populate the prefixes array
+  let modules = metadata.asLatest.modules
+  modules.forEach((module) => {
+    if (module.storage) {
+      if (!skippedModulesPrefix.includes(module.name.toString())) {
+        prefixes.push(xxhashAsHex(module.name.toString(), 128))
+      }
+    }
+  })
+
+  // blank starting chainspec guaranteed to exist
+
+  let storage: Storage = JSON.parse(fs.readFileSync(storagePath, 'utf8'))
+  let chainSpec = JSON.parse(fs.readFileSync(specPath, 'utf8'))
+
+  // Modify chain name and id
+  chainSpec.name = chainSpec.name + '-fork'
+  chainSpec.id = chainSpec.id + '-fork'
+  chainSpec.protocolId = chainSpec.protocolId
+
+  // Grab the items to be moved, then iterate through and insert into storage
+  storage.result
+    .filter((i) => prefixes.some((prefix) => i[0].startsWith(prefix)))
+    .forEach(([key, value]) => (chainSpec.genesis.raw.top[key] = value))
+
+  // Delete System.LastRuntimeUpgrade to ensure that the on_runtime_upgrade event is triggered
+  delete chainSpec.genesis.raw.top['0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8']
+
+  //    fixParachinStates(api, chainSpec);
+
+  // Set the code to the current runtime code: this replaces the set code transaction
+  chainSpec.genesis.raw.top['0x3a636f6465'] = '0x' + fs.readFileSync(hexPath, 'utf8').trim()
+
+  // To prevent the validator set from changing mid-test, set Staking.ForceEra to ForceNone ('0x02')
+  chainSpec.genesis.raw.top['0x5f3e4907f716ac89b6347d15ececedcaf7dad0317324aecae8744b87fc95f2f3'] = '0x02'
+
+  if (alice !== '') {
+    // Set sudo key to //Alice
+    chainSpec.genesis.raw.top['0x5c0d1176a568c1f92944340dbfed9e9c530ebca703c85910e7164cb7d1c9e47b'] =
+      '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d'
+  }
+
+  fs.writeFileSync(specPath, JSON.stringify(chainSpec, null, 4))
+
+  console.log('****** INITIAL CHAINSPEC UPDATED TO REFLECT LIVE STATE ******')
+  process.exit()
+}
+
+main()
+
+interface Storage {
+  'jsonrpc': string
+  'result': Array<[string, string]>
+  'id': string
+}