diff --git a/index.js b/index.js index bbd7863..8b5ec4e 100644 --- a/index.js +++ b/index.js @@ -3,19 +3,28 @@ * routing logic for proxying and mutating results from a Docker registry */ -const express = require('express'); -const fs = require('fs'); -const path = require('path'); -const request = require('request'); -const rp = require('request-promise'); +const express = require('express'); +const fs = require('fs'); +const NodeCache = require('node-cache'); +const path = require('path'); +const request = require('request'); +const rp = require('request-promise'); const { LAYERS_CHECKSUM_CACHE, Layer } = require('./src/layers'); -const logger = require('./src/logger'); +const logger = require('./src/logger'); const UPSTREAM_REGISTRY = process.env.UPSTREAM_REGISTRY || 'https://registry-1.docker.io'; const LAYERS_DIR = path.resolve(process.env.LAYERS_DIR || './layers.d/'); logger.info(`Using the layers directory: ${LAYERS_DIR}`); +/* + * This cache is to be used for any of the digests we spot in manifests fetched + * from the upstream. For the caches where these digests will represent + * something which needs to be overridden, we need to know such that the blob + * response handler can trigger the appropriate override behavior + */ +const OVERRIDDEN_BLOB_CACHE = new NodeCache({ stdTTL: 86400 }); + /** * Return the computed override path for the given image:tag @@ -52,13 +61,13 @@ function collectLayersFor(org, image, tag) { .map((filename) => { const key = Layer.keyFor(org, image, tag, filename); - if (!LAYERS_CHECKSUM_CACHE[key]) { + if (!LAYERS_CHECKSUM_CACHE.get(key)) { logger.debug(`Computing a new layer for key ${key}`); const layer = new Layer(org, image, tag, path.join(computedPath, filename)); layer.process(); - LAYERS_CHECKSUM_CACHE[key] = layer; + LAYERS_CHECKSUM_CACHE.set(key, layer); } - return LAYERS_CHECKSUM_CACHE[key]; + return LAYERS_CHECKSUM_CACHE.get(key); }); } @@ -79,6 +88,7 @@ function proxyToUpstream(req, res) { * the upstream repository might complain that we're not authorized */ 'Authorization' : req.get('Authorization'), + 'Accept' : req.get('Accept'), }, }).pipe(res); } @@ -105,6 +115,35 @@ app.get('/v2/:org/:image/manifests/:digest', (req, res) => { return proxyToUpstream(req, res); } + /* + * Begin request to upstream in order to get the original manifest which Ahab + * will then manipulate + */ + const manifestReq = rp({ + url: `${UPSTREAM_REGISTRY}${req.originalUrl}`, + headers: { + 'Authorization' : req.get('Authorization'), + 'Accept' : req.get('Accept'), + }, + }).then((response) => { + const parsed = JSON.parse(response); + if (parsed.manifests) { + // This is a manifest list! + logger.info('Received manifest list, caching and returning'); + + parsed.manifests.map((manifest) => { + /* + * Shove this into the cache + */ + }) + return res.send(response); + } else { + // This is just a manifest then + } + }); + + return; + /* * If we don't explicitly set the content type here, the client will think * that we're sending back a v1 manifest schema and complain about a "missing @@ -117,6 +156,13 @@ app.get('/v2/:org/:image/manifests/:digest', (req, res) => { const layers = collectLayersFor(org, image, digest); logger.info(layers); + /* + * if we get a manifest list, we basically need to cache all the referenced + * digests in relation to our current image:tag reference since the client + * will be quickly making subsequent requests for those digests, which must + * themselves be overwritten + */ + res.status(500); res.send('Fail'); return diff --git a/src/layers.js b/src/layers.js index 3fa2574..16f84ba 100644 --- a/src/layers.js +++ b/src/layers.js @@ -1,4 +1,5 @@ const fs = require('fs'); +const NodeCache = require('node-cache'); const path = require('path'); const zlib = require('zlib'); @@ -12,7 +13,7 @@ module.exports = {}; * class * */ -module.exports.LAYERS_CHECKSUM_CACHE = {}; +module.exports.LAYERS_CHECKSUM_CACHE = new NodeCache({ stdTTL: 300 }); class Layer { constructor(org, image, tag, filePath) {