Checkpoint the last of the JavaScript work

This commit is contained in:
R Tyler Croy 2020-01-22 21:32:19 -08:00
parent 86cb49a189
commit 0bd2a88c01
No known key found for this signature in database
GPG Key ID: E5C92681BEF6CEA2
2 changed files with 57 additions and 10 deletions

View File

@ -3,19 +3,28 @@
* routing logic for proxying and mutating results from a Docker registry
*/
const express = require('express');
const fs = require('fs');
const path = require('path');
const request = require('request');
const rp = require('request-promise');
const express = require('express');
const fs = require('fs');
const NodeCache = require('node-cache');
const path = require('path');
const request = require('request');
const rp = require('request-promise');
const { LAYERS_CHECKSUM_CACHE, Layer } = require('./src/layers');
const logger = require('./src/logger');
const logger = require('./src/logger');
const UPSTREAM_REGISTRY = process.env.UPSTREAM_REGISTRY || 'https://registry-1.docker.io';
const LAYERS_DIR = path.resolve(process.env.LAYERS_DIR || './layers.d/');
logger.info(`Using the layers directory: ${LAYERS_DIR}`);
/*
* This cache is to be used for any of the digests we spot in manifests fetched
* from the upstream. For the caches where these digests will represent
* something which needs to be overridden, we need to know such that the blob
* response handler can trigger the appropriate override behavior
*/
const OVERRIDDEN_BLOB_CACHE = new NodeCache({ stdTTL: 86400 });
/**
* Return the computed override path for the given image:tag
@ -52,13 +61,13 @@ function collectLayersFor(org, image, tag) {
.map((filename) => {
const key = Layer.keyFor(org, image, tag, filename);
if (!LAYERS_CHECKSUM_CACHE[key]) {
if (!LAYERS_CHECKSUM_CACHE.get(key)) {
logger.debug(`Computing a new layer for key ${key}`);
const layer = new Layer(org, image, tag, path.join(computedPath, filename));
layer.process();
LAYERS_CHECKSUM_CACHE[key] = layer;
LAYERS_CHECKSUM_CACHE.set(key, layer);
}
return LAYERS_CHECKSUM_CACHE[key];
return LAYERS_CHECKSUM_CACHE.get(key);
});
}
@ -79,6 +88,7 @@ function proxyToUpstream(req, res) {
* the upstream repository might complain that we're not authorized
*/
'Authorization' : req.get('Authorization'),
'Accept' : req.get('Accept'),
},
}).pipe(res);
}
@ -105,6 +115,35 @@ app.get('/v2/:org/:image/manifests/:digest', (req, res) => {
return proxyToUpstream(req, res);
}
/*
* Begin request to upstream in order to get the original manifest which Ahab
* will then manipulate
*/
const manifestReq = rp({
url: `${UPSTREAM_REGISTRY}${req.originalUrl}`,
headers: {
'Authorization' : req.get('Authorization'),
'Accept' : req.get('Accept'),
},
}).then((response) => {
const parsed = JSON.parse(response);
if (parsed.manifests) {
// This is a manifest list!
logger.info('Received manifest list, caching and returning');
parsed.manifests.map((manifest) => {
/*
* Shove this into the cache
*/
})
return res.send(response);
} else {
// This is just a manifest then
}
});
return;
/*
* If we don't explicitly set the content type here, the client will think
* that we're sending back a v1 manifest schema and complain about a "missing
@ -117,6 +156,13 @@ app.get('/v2/:org/:image/manifests/:digest', (req, res) => {
const layers = collectLayersFor(org, image, digest);
logger.info(layers);
/*
* if we get a manifest list, we basically need to cache all the referenced
* digests in relation to our current image:tag reference since the client
* will be quickly making subsequent requests for those digests, which must
* themselves be overwritten
*/
res.status(500);
res.send('Fail');
return

View File

@ -1,4 +1,5 @@
const fs = require('fs');
const NodeCache = require('node-cache');
const path = require('path');
const zlib = require('zlib');
@ -12,7 +13,7 @@ module.exports = {};
* class
*
*/
module.exports.LAYERS_CHECKSUM_CACHE = {};
module.exports.LAYERS_CHECKSUM_CACHE = new NodeCache({ stdTTL: 300 });
class Layer {
constructor(org, image, tag, filePath) {