Skip to content

Commit

Permalink
Remove @gmod/binary-parser to avoid new Function eval/CSP violation (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
cmdcolin authored Aug 8, 2024
1 parent e90915a commit 8675701
Show file tree
Hide file tree
Showing 25 changed files with 1,156 additions and 9,585 deletions.
10 changes: 6 additions & 4 deletions eslint.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,6 @@ const compat = new FlatCompat({
})

export default [
{
ignores: ['src/binary-parser/*.js'],
},
...compat.extends(
'plugin:@typescript-eslint/recommended',
'plugin:@typescript-eslint/recommended-type-checked',
Expand Down Expand Up @@ -49,7 +46,12 @@ export default [
ignoreRestSiblings: true,
},
],

'no-console': [
'warn',
{
allow: ['error', 'warn'],
},
],
'no-underscore-dangle': 0,
curly: 'error',
'@typescript-eslint/no-explicit-any': 0,
Expand Down
49 changes: 49 additions & 0 deletions example/index-debug.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
<html>
<head>
<script src="cram-bundle.js"></script>
<script>
const { IndexedCramFile, CramFile, CraiIndex } = window.gmodCRAM

// open local files
const indexedFile = new IndexedCramFile({
cramUrl: 'volvox-sorted.cram',
index: new CraiIndex({
url: 'volvox-sorted.cram.crai',
}),
seqFetch: async (seqId, start, end) => {
return ''
},
checkSequenceMD5: false,
})

// example of fetching records from an indexed CRAM file.
// NOTE: only numeric IDs for the reference sequence are accepted.
// For indexedfasta the numeric ID is the order in which the sequence names appear in the header

// Wrap in an async and then run
run = async () => {
const records = await indexedFile.getRecordsForRange(0, 10000, 20000)
const r = []
records.forEach(record => {
console.log(`got a record named ${record.readName}`)
if (record.readFeatures != undefined) {
record.readFeatures.forEach(({ code, pos, refPos, ref, sub }) => {
if (code === 'X') {
r.push(
`${record.readName} shows a base substitution at ${refPos}`,
)
}
})
}
})
document.getElementById('output').innerHTML = r.join('\n')
}

run()
</script>
</head>
<body>
<h1>Hello world</h1>
<pre id="output" />
</body>
</html>
13 changes: 6 additions & 7 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
],
"scripts": {
"test": "jest",
"lint": "eslint src test",
"lint": "eslint --report-unused-disable-directives --max-warnings 0 src test",
"docs": "documentation readme --shallow src/indexedCramFile.ts --section=IndexedCramFile; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/craiIndex.ts --section=CraiIndex; documentation readme --shallow errors.ts '--section=Exception Classes'; documentation readme --shallow src/cramFile/file.ts --section=CramFile; documentation readme --shallow src/cramFile/record.ts --section=CramRecord",
"prebuild": "npm run clean",
"clean": "rimraf dist esm",
Expand All @@ -43,7 +43,6 @@
],
"dependencies": {
"@gmod/abortable-promise-cache": "^2.0.0",
"@gmod/binary-parser": "^1.3.5",
"@jkbonfield/htscodecs": "^0.5.1",
"buffer-crc32": "^1.0.0",
"bzip2": "^0.1.1",
Expand All @@ -59,18 +58,18 @@
"@types/long": "^4.0.0",
"@types/md5": "^2.3.2",
"@types/pako": "^1.0.3",
"@typescript-eslint/eslint-plugin": "^7.0.2",
"@typescript-eslint/parser": "^7.0.2",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"buffer": "^6.0.3",
"documentation": "^14.0.3",
"eslint": "^9.0.0",
"eslint": "^9.8.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-prettier": "^5.1.3",
"eslint-plugin-unicorn": "^54.0.0",
"eslint-plugin-unicorn": "^55.0.0",
"jest": "^29.3.1",
"mock-fs": "^5.2.0",
"prettier": "^3.2.5",
"rimraf": "^5.0.1",
"rimraf": "^6.0.1",
"ts-jest": "^29.1.2",
"typescript": "^5.0.3",
"webpack": "^5.90.3",
Expand Down
3 changes: 1 addition & 2 deletions src/cramFile/codecs/byteArrayLength.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
import { tinyMemoize } from '../util'

import CramCodec, { Cursors } from './_base'
import { ByteArrayLengthEncoding, CramEncoding } from '../encoding'
import CramSlice from '../slice'
import { CramFileBlock } from '../file'
import { DataType } from './dataSeriesTypes'
import { tinyMemoize } from '../util'

type CramCodecFactory = <TData extends DataType = DataType>(
encodingData: CramEncoding,
Expand Down
2 changes: 1 addition & 1 deletion src/cramFile/codecs/external.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ export default class ExternalCodec extends CramCodec<
const contentBlock = blocksByContentId[blockContentId]
if (!contentBlock) {
throw new CramMalformedError(
`no block found with content ID ${blockContentId}`,
`no block found with content ID ${blockContentId}}`,
)
}
const cursor = cursors.externalBlocks.getCursor(blockContentId)
Expand Down
3 changes: 2 additions & 1 deletion src/cramFile/codecs/huffman.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ export default class HuffmanIntCodec extends CramCodec<
this.buildCodes()
this.buildCaches()

// if this is a degenerate zero-length huffman code, special-case the decoding
// if this is a degenerate zero-length huffman code, special-case the
// decoding
if (this.sortedCodes[0].bitLength === 0) {
this._decode = this._decodeZeroLengthCode
}
Expand Down
9 changes: 1 addition & 8 deletions src/cramFile/container/compressionScheme.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { instantiateCodec } from '../codecs'
import CramCodec from '../codecs/_base'
import { CramCompressionHeader, CramPreservationMap } from '../sectionParsers'
import { CramCompressionHeader } from '../sectionParsers'
import { CramEncoding } from '../encoding'
import { CramMalformedError } from '../../errors'
import {
Expand Down Expand Up @@ -95,12 +95,8 @@ export default class CramContainerCompressionScheme {
public tagCodecCache: Record<string, CramCodec> = {}
public tagEncoding: Record<string, CramEncoding> = {}
public dataSeriesEncoding: DataSeriesEncodingMap
private preservation: CramPreservationMap
private _endPosition: number
private _size: number

constructor(content: CramCompressionHeader) {
// Object.assign(this, content)
// interpret some of the preservation map tags for convenient use
this.readNamesIncluded = content.preservation.RN
this.APdelta = content.preservation.AP
Expand All @@ -109,9 +105,6 @@ export default class CramContainerCompressionScheme {
this.substitutionMatrix = parseSubstitutionMatrix(content.preservation.SM)
this.dataSeriesEncoding = content.dataSeriesEncoding
this.tagEncoding = content.tagEncoding
this.preservation = content.preservation
this._size = content._size
this._endPosition = content._endPosition
}

/**
Expand Down
31 changes: 21 additions & 10 deletions src/cramFile/container/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,27 +4,29 @@ import { itf8Size, parseItem, tinyMemoize } from '../util'
import CramSlice from '../slice'
import CramContainerCompressionScheme from './compressionScheme'
import CramFile from '../file'
import { getSectionParsers } from '../sectionParsers'

export default class CramContainer {
constructor(
public file: CramFile,
public filePosition: number,
) {}

// memoize
getHeader() {
return this._readContainerHeader(this.filePosition)
}

// memoize
async getCompressionHeaderBlock() {
const containerHeader = await this.getHeader()

// if there are no records in the container, there will be no compression header
if (!containerHeader.numRecords) {
// if there are no records in the container, there will be no compression
// header
if (!containerHeader?.numRecords) {
return null
}
const sectionParsers = await this.file.getSectionParsers()
const { majorVersion } = await this.file.getDefinition()
const sectionParsers = getSectionParsers(majorVersion)

const block = await this.getFirstBlock()
if (block === undefined) {
return undefined
Expand All @@ -34,6 +36,7 @@ export default class CramContainer {
`invalid content type ${block.contentType} in what is supposed to be the compression header block`,
)
}

const content = parseItem(
block.content,
sectionParsers.cramCompressionHeader.parser,
Expand All @@ -48,16 +51,20 @@ export default class CramContainer {

async getFirstBlock() {
const containerHeader = await this.getHeader()
if (!containerHeader) {
return undefined
}
return this.file.readBlock(containerHeader._endPosition)
}

// parses the compression header data into a CramContainerCompressionScheme object
// memoize
// parses the compression header data into a CramContainerCompressionScheme
// object
async getCompressionScheme() {
const header = await this.getCompressionHeaderBlock()
if (!header) {
return undefined
}

return new CramContainerCompressionScheme(header.parsedContent)
}

Expand All @@ -68,23 +75,27 @@ export default class CramContainer {
}

async _readContainerHeader(position: number) {
const sectionParsers = await this.file.getSectionParsers()
const { majorVersion } = await this.file.getDefinition()
const sectionParsers = getSectionParsers(majorVersion)
const { cramContainerHeader1, cramContainerHeader2 } = sectionParsers
const { size: fileSize } = await this.file.stat()

if (position >= fileSize) {
console.warn(
`position:${position}>=fileSize:${fileSize} in cram container`,
)
return undefined
}

// parse the container header. do it in 2 pieces because you cannot tell
// how much to buffer until you read numLandmarks
const bytes1 = Buffer.allocUnsafe(cramContainerHeader1.maxLength)
await this.file.read(bytes1, 0, cramContainerHeader1.maxLength, position)
const header1 = parseItem(bytes1, cramContainerHeader1.parser) as any
const header1 = parseItem(bytes1, cramContainerHeader1.parser)
const numLandmarksSize = itf8Size(header1.numLandmarks)
if (position + header1.length >= fileSize) {
console.warn(
`${this.file}: container header at ${position} indicates that the container has length ${header1.length}, which extends beyond the length of the file. Skipping this container.`,
`container header at ${position} indicates that the container has length ${header1.length}, which extends beyond the length of the file. Skipping this container.`,
)
return undefined
}
Expand Down
Loading

0 comments on commit 8675701

Please sign in to comment.