improve handling of device factory images
- support unpacking factory images without root by using debugfs rdump (ext4) and fsck.erofs --extract (erofs) - support reusing unpacked factory images between adevtool runs to reduce latency - unpack factory images for multiple devices in parallel. Extraction of files from partition images is parallelized too - extract partition images from factory image inner zip directly, without extracting it, since this inner zip is uncompressed
This commit is contained in:
parent
9750841f3d
commit
97b724790b
1 changed files with 265 additions and 5 deletions
|
@ -1,14 +1,24 @@
|
|||
import { promises as fs } from 'fs'
|
||||
import { flags } from '@oclif/command'
|
||||
import assert from 'assert'
|
||||
import { createReadStream, promises as fs } from 'fs'
|
||||
import { FileHandle, FileReadOptions } from 'fs/promises'
|
||||
import hasha from 'hasha'
|
||||
import ora from 'ora'
|
||||
import path from 'path'
|
||||
import { flags } from '@oclif/command'
|
||||
import { pipeline } from 'stream/promises'
|
||||
import * as yauzl from 'yauzl-promise/lib/index.js'
|
||||
import { DeviceBuildId, DeviceConfig, FsType, getDeviceBuildId, resolveBuildId } from '../config/device'
|
||||
|
||||
import { IMAGE_DOWNLOAD_DIR } from '../config/paths'
|
||||
import { BuildIndex, ImageType } from '../images/build-index'
|
||||
import { DeviceImage } from '../images/device-image'
|
||||
import { downloadMissingDeviceImages } from '../images/download'
|
||||
import { maybePlural, withSpinner } from '../util/cli'
|
||||
import { createSubTmp, exists, mount, TempState, withTempDir } from '../util/fs'
|
||||
import { ALL_SYS_PARTITIONS } from '../util/partitions'
|
||||
import { run } from '../util/process'
|
||||
import { run, spawnAsyncNoOut } from '../util/process'
|
||||
import { isSparseImage } from '../util/sparse'
|
||||
import { listZipFiles } from '../util/zip'
|
||||
import { withSpinner } from '../util/cli'
|
||||
|
||||
export const WRAPPED_SOURCE_FLAGS = {
|
||||
stockSrc: flags.string({
|
||||
|
@ -19,7 +29,7 @@ export const WRAPPED_SOURCE_FLAGS = {
|
|||
}),
|
||||
buildId: flags.string({
|
||||
char: 'b',
|
||||
description: 'build ID of the stock images (optional, only used for locating factory images)',
|
||||
description: 'stock OS build ID, defaults to build_id value from device config'
|
||||
}),
|
||||
useTemp: flags.boolean({
|
||||
char: 't',
|
||||
|
@ -278,3 +288,253 @@ export async function withWrappedSrc<Return>(
|
|||
return await callback(wrappedSrc)
|
||||
})
|
||||
}
|
||||
|
||||
export interface DeviceImages {
|
||||
factoryImage: DeviceImage
|
||||
unpackedFactoryImageDir: string
|
||||
otaImage?: DeviceImage
|
||||
vendorImages?: Map<string, DeviceImage>
|
||||
}
|
||||
|
||||
export async function prepareFactoryImages(buildIndex: BuildIndex, devices: DeviceConfig[], maybeBuildIds?: string[]) {
|
||||
return await prepareDeviceImages(buildIndex, [ImageType.Factory], devices, maybeBuildIds)
|
||||
}
|
||||
|
||||
export async function prepareDeviceImages(
|
||||
buildIndex: BuildIndex,
|
||||
types: ImageType[],
|
||||
devices: DeviceConfig[],
|
||||
// if not specified, current build ID is used for each device
|
||||
maybeBuildIds?: string[]
|
||||
) {
|
||||
let allImages: DeviceImage[] = []
|
||||
|
||||
let imagesMap = new Map<DeviceBuildId, DeviceImages>()
|
||||
|
||||
for (let deviceConfig of devices) {
|
||||
for (let type of types) {
|
||||
let buildIds = maybeBuildIds ?? [deviceConfig.device.build_id]
|
||||
|
||||
for (let buildIdSpec of buildIds) {
|
||||
let buildId = resolveBuildId(buildIdSpec, deviceConfig)
|
||||
let deviceImage = DeviceImage.get(buildIndex, deviceConfig, buildId, type)
|
||||
let deviceBuildId = getDeviceBuildId(deviceConfig, buildId)
|
||||
let images: DeviceImages = imagesMap.get(deviceBuildId) ?? {} as DeviceImages
|
||||
if (deviceImage.type === ImageType.Factory) {
|
||||
images.factoryImage = deviceImage
|
||||
} else if (deviceImage.type === ImageType.Ota) {
|
||||
images.otaImage = deviceImage
|
||||
} else {
|
||||
let map = images.vendorImages
|
||||
if (map === undefined) {
|
||||
map = new Map<string, DeviceImage>()
|
||||
images.vendorImages = map
|
||||
}
|
||||
map.set(deviceImage.type, deviceImage)
|
||||
}
|
||||
imagesMap.set(deviceBuildId, images)
|
||||
allImages.push(deviceImage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await downloadMissingDeviceImages(allImages)
|
||||
|
||||
let jobs: Promise<any>[] = []
|
||||
let destinationDirNames: string[] = []
|
||||
|
||||
for (let images of imagesMap.values()) {
|
||||
let factoryImage = images.factoryImage
|
||||
if (factoryImage === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
let dirName = getUnpackedFactoryDirName(images.factoryImage)
|
||||
let dir = path.join(IMAGE_DOWNLOAD_DIR, 'unpacked', dirName)
|
||||
images.unpackedFactoryImageDir = dir
|
||||
|
||||
let isAlreadyUnpacked = false
|
||||
try {
|
||||
isAlreadyUnpacked = (await fs.stat(dir)).isDirectory()
|
||||
} catch {}
|
||||
|
||||
if (!isAlreadyUnpacked) {
|
||||
let factoryImagePath = images.factoryImage.getPath()
|
||||
destinationDirNames.push(dirName)
|
||||
jobs.push(unpackFactoryImage(factoryImagePath, factoryImage, dir))
|
||||
}
|
||||
}
|
||||
|
||||
if (jobs.length > 0) {
|
||||
console.log(`Unpacking image${maybePlural(destinationDirNames)}: ${destinationDirNames.join(', ')}`)
|
||||
let label = 'Unpack completed in'
|
||||
console.time(label)
|
||||
await Promise.all(jobs)
|
||||
console.timeEnd(label)
|
||||
}
|
||||
|
||||
return imagesMap
|
||||
}
|
||||
|
||||
function getUnpackedFactoryDirName(image: DeviceImage) {
|
||||
return image.deviceConfig.device.name + '-' + image.buildId
|
||||
}
|
||||
|
||||
async function unpackFactoryImage(factoryImagePath: string, image: DeviceImage, out: string) {
|
||||
assert(image.type === ImageType.Factory)
|
||||
|
||||
let sha256 = await hasha.fromFile(factoryImagePath, { algorithm: 'sha256' })
|
||||
if (sha256 === image.sha256) {
|
||||
// There's a TOCTOU race (file is accessed after check), but it affects all other generated files too.
|
||||
// Fixing it for this particular case is not worth the complexity increase
|
||||
} else {
|
||||
throw new Error(`SHA-256 mismatch for '${image.fileName}': expected ${image.sha256} got ${sha256}`)
|
||||
}
|
||||
|
||||
let fd = await fs.open(factoryImagePath, 'r')
|
||||
try {
|
||||
let fdSize = (await fd.stat()).size
|
||||
let outerZip = await yauzl.fromReader(new FdReader(fd, 0, fdSize), fdSize)
|
||||
|
||||
for await (let entryP of outerZip) {
|
||||
let entry: yauzl.Entry = entryP
|
||||
let entryName = entry.filename
|
||||
|
||||
let isInnerZip =
|
||||
entryName.includes(`-${image.buildId.toLowerCase()}/image-${image.deviceConfig.device.name}`) &&
|
||||
entryName.endsWith(`-${image.buildId.toLowerCase()}.zip`)
|
||||
|
||||
if (!isInnerZip) {
|
||||
continue
|
||||
}
|
||||
|
||||
// this operation initializes entry.fileDataOffset
|
||||
(await outerZip.openReadStream(entry, { validateCrc32: false })).destroy()
|
||||
|
||||
assert(entry.compressionMethod === 0, entryName) // uncompressed
|
||||
assert(entry.compressedSize === entry.uncompressedSize, entryName)
|
||||
|
||||
let entryOffset = entry.fileDataOffset
|
||||
|
||||
let innerZip = await yauzl.fromReader(new FdReader(fd, entryOffset, entry.uncompressedSize), entry.uncompressedSize)
|
||||
|
||||
let unpackedTmp = out + '-tmp'
|
||||
let promises = []
|
||||
|
||||
let rmTmpDir = false
|
||||
try {
|
||||
await fs.access(unpackedTmp)
|
||||
rmTmpDir = true
|
||||
} catch {}
|
||||
|
||||
if (rmTmpDir) {
|
||||
await spawnAsyncNoOut('chmod', ['--recursive', 'u+w', unpackedTmp])
|
||||
await fs.rm(unpackedTmp, { recursive: true, force: true })
|
||||
}
|
||||
|
||||
await fs.mkdir(unpackedTmp, { recursive: true })
|
||||
|
||||
let fsType = image.deviceConfig.device.system_fs_type
|
||||
|
||||
for await (let innerEntry of innerZip) {
|
||||
promises.push(unpackFsImage(innerEntry, fsType, unpackedTmp))
|
||||
}
|
||||
|
||||
await Promise.all(promises)
|
||||
|
||||
// remove write access to prevent accidental modification of unpacked files
|
||||
await spawnAsyncNoOut('chmod', ['--recursive', 'a-w', unpackedTmp])
|
||||
|
||||
await fs.rename(unpackedTmp, out)
|
||||
|
||||
console.log('unpacked ' + getUnpackedFactoryDirName(image))
|
||||
}
|
||||
} finally {
|
||||
await fd.close()
|
||||
}
|
||||
}
|
||||
|
||||
async function unpackFsImage(entry: yauzl.Entry, fsType: FsType, unpackedTmpRoot: string) {
|
||||
let fsImageName = entry.filename
|
||||
|
||||
let expectedExt = '.img'
|
||||
let ext = path.extname(fsImageName)
|
||||
if (ext !== expectedExt) {
|
||||
return
|
||||
}
|
||||
|
||||
let fsImageBaseName = path.basename(fsImageName, expectedExt)
|
||||
|
||||
if (!ALL_SYS_PARTITIONS.has(fsImageBaseName)) {
|
||||
return
|
||||
}
|
||||
|
||||
// extract file system image file
|
||||
let readStream = await entry.openReadStream({ validateCrc32: false })
|
||||
let fsImagePath = path.join(unpackedTmpRoot, entry.filename)
|
||||
let writeStream = (await fs.open(fsImagePath, 'w')).createWriteStream()
|
||||
await pipeline(readStream, writeStream)
|
||||
|
||||
let destinationDir = path.join(unpackedTmpRoot, fsImageBaseName)
|
||||
await fs.mkdir(destinationDir)
|
||||
|
||||
if (fsType === FsType.EXT4) {
|
||||
await unpackExt4(fsImagePath, destinationDir)
|
||||
} else {
|
||||
assert(fsType === FsType.EROFS)
|
||||
await unpackErofs(fsImagePath, destinationDir)
|
||||
}
|
||||
|
||||
await fs.rm(fsImagePath)
|
||||
}
|
||||
|
||||
async function unpackExt4(fsImagePath: string, destinationDir: string) {
|
||||
// rdump uses " for quoting
|
||||
assert(!destinationDir.includes('"'), destinationDir)
|
||||
|
||||
let isStderrLineAllowed = function (s: string) {
|
||||
return s.length == 0 ||
|
||||
// it's expected that ownership information will be lost during unpacking
|
||||
s.startsWith('dump_file: Operation not permitted while changing ownership of ') ||
|
||||
s.startsWith('rdump: Operation not permitted while changing ownership of ') ||
|
||||
// version string
|
||||
s.startsWith('debugfs ')
|
||||
}
|
||||
|
||||
await spawnAsyncNoOut('debugfs', ['-R', `rdump / "${destinationDir}"`, fsImagePath],
|
||||
isStderrLineAllowed)
|
||||
}
|
||||
|
||||
async function unpackErofs(fsImagePath: string, destinationDir: string) {
|
||||
await spawnAsyncNoOut('fsck.erofs', ['--extract=' + destinationDir, fsImagePath])
|
||||
}
|
||||
|
||||
class FdReader extends yauzl.Reader {
|
||||
// fd ownership remains with the caller
|
||||
constructor(readonly fd: FileHandle, readonly off: number, readonly len: number) {
|
||||
super()
|
||||
}
|
||||
|
||||
async _read(start: number, length: number) {
|
||||
// do not initialize buffer contnents, assert below ensures that it's fully written out
|
||||
let buffer = Buffer.allocUnsafe(length)
|
||||
|
||||
let opts = {
|
||||
buffer,
|
||||
length,
|
||||
position: this.off + start,
|
||||
} as FileReadOptions
|
||||
|
||||
assert((await this.fd.read(opts)).bytesRead === length)
|
||||
return buffer
|
||||
}
|
||||
|
||||
_createReadStream(start: number, length: number) {
|
||||
// There's no way AFAIK to prevent closing of file descriptor when read stream ends, and node.js doens't have
|
||||
// a dup() wrapper. As a workaround, reopen the file by using /proc/self/fd reference
|
||||
return createReadStream(`/proc/self/fd/${this.fd.fd}`, {
|
||||
start: this.off + start,
|
||||
end: this.off + start + length - 1, // '-1' is needed because 'end' is inclusive
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue