272 lines
8.8 KiB
JavaScript
272 lines
8.8 KiB
JavaScript
/**
|
|
* This file is the main server for Ahab and contains all the basic request
|
|
* routing logic for proxying and mutating results from a Docker registry
|
|
*/
|
|
|
|
const express = require('express');
|
|
const fs = require('fs');
|
|
const NodeCache = require('node-cache');
|
|
const path = require('path');
|
|
const request = require('request');
|
|
const rp = require('request-promise');
|
|
|
|
const { LAYERS_CHECKSUM_CACHE, Layer } = require('./src/layers');
|
|
const logger = require('./src/logger');
|
|
|
|
const UPSTREAM_REGISTRY = process.env.UPSTREAM_REGISTRY || 'https://registry-1.docker.io';
|
|
const LAYERS_DIR = path.resolve(process.env.LAYERS_DIR || './layers.d/');
|
|
logger.info(`Using the layers directory: ${LAYERS_DIR}`);
|
|
|
|
/*
|
|
* This cache is to be used for any of the digests we spot in manifests fetched
|
|
* from the upstream. For the caches where these digests will represent
|
|
* something which needs to be overridden, we need to know such that the blob
|
|
* response handler can trigger the appropriate override behavior
|
|
*/
|
|
const OVERRIDDEN_BLOB_CACHE = new NodeCache({ stdTTL: 86400 });
|
|
|
|
|
|
/**
|
|
* Return the computed override path for the given image:tag
|
|
*/
|
|
function overridePathFor(org, image, tag) {
|
|
return path.join(LAYERS_DIR, org, image, tag);
|
|
}
|
|
|
|
/**
|
|
* Return true if we have an override directory for the image
|
|
*/
|
|
function shouldOverrideImage(org, image, tag) {
|
|
const computedPath = overridePathFor(org, image, tag);
|
|
logger.debug(`Checking to presence of override dir: ${computedPath}`);
|
|
return fs.existsSync(computedPath);
|
|
}
|
|
|
|
/**
|
|
* Collect the layers to use for override
|
|
*
|
|
* @return Array of strings with full file paths
|
|
*/
|
|
function collectLayersFor(org, image, tag) {
|
|
// Just to make sure we're never called with bad data
|
|
if (!shouldOverrideImage(org, image, tag)) {
|
|
return [];
|
|
}
|
|
const computedPath = overridePathFor(org, image, tag);
|
|
return fs.readdirSync(computedPath)
|
|
.filter((filename) => {
|
|
return filename.endsWith('.tar.gz');
|
|
})
|
|
.sort()
|
|
.map((filename) => {
|
|
const key = Layer.keyFor(org, image, tag, filename);
|
|
|
|
if (!LAYERS_CHECKSUM_CACHE.get(key)) {
|
|
logger.debug(`Computing a new layer for key ${key}`);
|
|
const layer = new Layer(org, image, tag, path.join(computedPath, filename));
|
|
layer.process();
|
|
LAYERS_CHECKSUM_CACHE.set(key, layer);
|
|
}
|
|
return LAYERS_CHECKSUM_CACHE.get(key);
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Proxy the given request directly to the upstream
|
|
*
|
|
* @param req An Express Request object
|
|
* @param res An Express Response object
|
|
*/
|
|
function proxyToUpstream(req, res) {
|
|
logger.info(`Passing this request along upstream (${req.originalUrl})`);
|
|
return request({
|
|
url: `${UPSTREAM_REGISTRY}${req.originalUrl}`,
|
|
method: req.method,
|
|
headers: {
|
|
/*
|
|
* We need to send the Authorization header along as well, otherwise
|
|
* the upstream repository might complain that we're not authorized
|
|
*/
|
|
'Authorization' : req.get('Authorization'),
|
|
'Accept' : req.get('Accept'),
|
|
},
|
|
}).pipe(res);
|
|
}
|
|
|
|
const app = express();
|
|
const port = 9090;
|
|
|
|
/*
|
|
* The manifests API is defined in the "Pulling an Image" part of the HTTP
|
|
* registry API documentation, see here:
|
|
* <https://docs.docker.com/registry/spec/api/#pulling-an-image>
|
|
*
|
|
* This express route will:
|
|
* - fetch the upstream manifest in order to fetch the image configuration
|
|
* - fetch the image configuration, which must have its diff_ids overwritten
|
|
* - re-compute the digest of the image configuration
|
|
* - grab all the layers which must be inserted into the manifest
|
|
* - generate the updated manifest
|
|
*/
|
|
app.get('/v2/:org/:image/manifests/:digest', (req, res) => {
|
|
const { org, image, digest } = req.params;
|
|
|
|
if (!shouldOverrideImage(org, image, digest)) {
|
|
return proxyToUpstream(req, res);
|
|
}
|
|
|
|
/*
|
|
* Begin request to upstream in order to get the original manifest which Ahab
|
|
* will then manipulate
|
|
*/
|
|
const manifestReq = rp({
|
|
url: `${UPSTREAM_REGISTRY}${req.originalUrl}`,
|
|
headers: {
|
|
'Authorization' : req.get('Authorization'),
|
|
'Accept' : req.get('Accept'),
|
|
},
|
|
}).then((response) => {
|
|
const parsed = JSON.parse(response);
|
|
if (parsed.manifests) {
|
|
// This is a manifest list!
|
|
logger.info('Received manifest list, caching and returning');
|
|
|
|
parsed.manifests.map((manifest) => {
|
|
/*
|
|
* Shove this into the cache
|
|
*/
|
|
})
|
|
return res.send(response);
|
|
} else {
|
|
// This is just a manifest then
|
|
}
|
|
});
|
|
|
|
return;
|
|
|
|
/*
|
|
* If we don't explicitly set the content type here, the client will think
|
|
* that we're sending back a v1 manifest schema and complain about a "missing
|
|
* signature key"
|
|
*/
|
|
res.set('Content-Type', 'application/vnd.docker.distribution.manifest.v2+json');
|
|
|
|
logger.info(`Overriding the pull for ${org}/${image}:${digest}`);
|
|
|
|
const layers = collectLayersFor(org, image, digest);
|
|
logger.info(layers);
|
|
|
|
/*
|
|
* if we get a manifest list, we basically need to cache all the referenced
|
|
* digests in relation to our current image:tag reference since the client
|
|
* will be quickly making subsequent requests for those digests, which must
|
|
* themselves be overwritten
|
|
*/
|
|
|
|
res.status(500);
|
|
res.send('Fail');
|
|
return
|
|
|
|
rp({
|
|
url: `${UPSTREAM_REGISTRY}/v2/${org}/${image}/blobs/${digest}`,
|
|
headers: {
|
|
/*
|
|
* We need to send the Authorization header along as well, otherwise
|
|
* the upstream repository might complain that we're not authorized
|
|
*/
|
|
'Authorization' : req.get('Authorization'),
|
|
},
|
|
})
|
|
.then((response) => {
|
|
image = JSON.parse(response);
|
|
//image = JSON.parse(image);
|
|
//image.rootfs.diff_ids.push(
|
|
// 'sha256:5f9da7cc9d8d83c96245ac27854466f6ed89fbfade5dd8a4f307861bfb72d1b8',
|
|
//);
|
|
//console.log(image.rootfs);
|
|
//const checksum = crypto.createHash('sha256').update(JSON.stringify(image), 'utf8').digest('hex');
|
|
//console.log(JSON.stringify(image));
|
|
//console.log(`Sending content with a checksum of ${checksum}`);
|
|
//console.log(`Client is expecting ${req.params.sha}`);
|
|
//res.send(image);
|
|
//console.log(req.originalUrl);
|
|
//const checksum = crypto.createHash('sha256').update(JSON.stringify(response), 'utf8').digest('hex');
|
|
//res.set('Docker-Content-Digest', `sha256:${checksum}`);
|
|
})
|
|
.catch((error) => {
|
|
logger.error(`Failed to hit the blob API for ${org}/${image}:${digest} - ${error}`);
|
|
res.status(500);
|
|
res.send(error);
|
|
});
|
|
});
|
|
|
|
/*
|
|
* Serve up a custom blob if we've got one!
|
|
*/
|
|
app.get('/v2/:org/:image/blobs/:sha', (req, res) => {
|
|
console.log(`Hitting ${req.originalUrl}`);
|
|
|
|
if (req.params.sha == 'sha256:cf98438b1781c2be1fde41967959140379c715b75a85723501d0bca82f215a76') {
|
|
/*
|
|
* Requesting our custom blob layer itself
|
|
*/
|
|
const filename = 'layer.tar.gz';
|
|
console.log('hitting our custom blob');
|
|
fs.createReadStream(filename).pipe(res);
|
|
} else if (req.params.sha == imageConfig) {
|
|
/*
|
|
* Requesting our image configuration
|
|
*/
|
|
console.log('hitting our image configuration')
|
|
let image = '';
|
|
/*
|
|
* We need to modify the diff_ids of the image configuration to include the
|
|
* checksum of the gunzipped version of our custom layer
|
|
*
|
|
* https://medium.com/@saschagrunert/demystifying-containers-part-iii-container-images-244865de6fef
|
|
*
|
|
*/
|
|
|
|
const upstreamBlob = 'sha256:fce289e99eb9bca977dae136fbe2a82b6b7d4c372474c9235adc1741675f587e'
|
|
request({
|
|
url: `${UPSTREAM_REGISTRY}/v2/${req.params.org}/${req.parmas.name}/blobs/${upstreamBlob}`,
|
|
method: req.method,
|
|
headers: {
|
|
/*
|
|
* We need to send the Authorization header along as well, otherwise
|
|
* the upstream repository might complain that we're not authorized
|
|
*/
|
|
'Authorization' : req.get('Authorization'),
|
|
},
|
|
})
|
|
.on('response', (response) => {
|
|
console.log(`Hitting upstream gave a ${response.statusCode}`);
|
|
})
|
|
.on('data', (chunk) => {
|
|
image += chunk;
|
|
})
|
|
.on('end', () => {
|
|
image = JSON.parse(image);
|
|
image.rootfs.diff_ids.push(
|
|
'sha256:5f9da7cc9d8d83c96245ac27854466f6ed89fbfade5dd8a4f307861bfb72d1b8',
|
|
);
|
|
console.log(image.rootfs);
|
|
const checksum = crypto.createHash('sha256').update(JSON.stringify(image), 'utf8').digest('hex');
|
|
console.log(JSON.stringify(image));
|
|
console.log(`Sending content with a checksum of ${checksum}`);
|
|
console.log(`Client is expecting ${req.params.sha}`);
|
|
res.send(image);
|
|
});
|
|
} else {
|
|
console.log(`Piping blob upstream for ${req.originalUrl}`);
|
|
proxyToUpstream(req, res);
|
|
}
|
|
});
|
|
|
|
/*
|
|
* Pass all other v2 API requests on to the upstream Docker registry
|
|
*/
|
|
app.get('/v2/*', (req, res) => proxyToUpstream(req, res));
|
|
|
|
app.listen(port, () => logger.info(`Ahab is now hunting whales on port ${port}`));
|