cargo/src/bin/cargo/main.rs

228 lines
7.2 KiB
Rust
Raw Normal View History

#![warn(rust_2018_idioms)] // while we're getting used to 2018
#![allow(clippy::redundant_closure)] // there's a false positive
2019-03-27 00:53:53 +00:00
#![warn(clippy::needless_borrow)]
2019-03-27 01:51:13 +00:00
#![warn(clippy::redundant_clone)]
2021-03-20 18:28:38 +00:00
use cargo::core::shell::Shell;
use cargo::util::CliError;
use cargo::util::{self, closest_msg, command_prelude, CargoResult, CliResult, Config};
use cargo_util::{ProcessBuilder, ProcessError};
use std::collections::{BTreeMap, BTreeSet};
2015-02-06 07:27:53 +00:00
use std::env;
use std::fs;
2017-02-06 07:02:32 +00:00
use std::path::{Path, PathBuf};
2018-03-06 21:01:03 +00:00
mod cli;
mod commands;
2018-12-06 19:17:36 +00:00
use crate::command_prelude::*;
fn main() {
#[cfg(feature = "pretty-env-logger")]
pretty_env_logger::init_custom_env("CARGO_LOG");
#[cfg(not(feature = "pretty-env-logger"))]
env_logger::init_from_env("CARGO_LOG");
2017-01-28 13:55:18 +00:00
let mut config = match Config::default() {
2017-01-28 13:55:18 +00:00
Ok(cfg) => cfg,
Err(e) => {
let mut shell = Shell::new();
2017-01-31 15:01:35 +00:00
cargo::exit_with_error(e.into(), &mut shell)
2017-01-28 13:55:18 +00:00
}
};
let result = match cargo::ops::fix_maybe_exec_rustc(&config) {
Ok(true) => Ok(()),
Ok(false) => {
let _token = cargo::util::job::setup();
cli::main(&mut config)
}
Err(e) => Err(CliError::from(e)),
2018-03-09 07:43:00 +00:00
};
match result {
2017-01-31 15:01:35 +00:00
Err(e) => cargo::exit_with_error(e, &mut *config.shell()),
2017-02-06 07:02:32 +00:00
Ok(()) => {}
2017-01-28 13:55:18 +00:00
}
}
2014-04-07 01:26:36 +00:00
/// Table for defining the aliases which come builtin in `Cargo`.
/// The contents are structured as: `(alias, aliased_command, description)`.
const BUILTIN_ALIASES: [(&str, &str, &str); 4] = [
("b", "build", "alias: build"),
("c", "check", "alias: check"),
("r", "run", "alias: run"),
("t", "test", "alias: test"),
];
/// Function which contains the list of all of the builtin aliases and it's
/// corresponding execs represented as &str.
fn builtin_aliases_execs(cmd: &str) -> Option<&(&str, &str, &str)> {
BUILTIN_ALIASES.iter().find(|alias| alias.0 == cmd)
}
2017-09-24 14:26:37 +00:00
fn aliased_command(config: &Config, command: &str) -> CargoResult<Option<Vec<String>>> {
let alias_name = format!("alias.{}", command);
2018-12-05 17:29:10 +00:00
let user_alias = match config.get_string(&alias_name) {
Ok(Some(record)) => Some(
record
.val
.split_whitespace()
.map(|s| s.to_string())
.collect(),
),
Ok(None) => None,
Err(_) => config.get::<Option<Vec<String>>>(&alias_name)?,
2018-12-05 17:29:10 +00:00
};
2021-03-01 03:03:06 +00:00
let result = user_alias.or_else(|| {
builtin_aliases_execs(command).map(|command_str| vec![command_str.1.to_string()])
2018-12-05 17:29:10 +00:00
});
Ok(result)
}
2018-03-11 06:38:59 +00:00
/// List all runnable commands
fn list_commands(config: &Config) -> BTreeSet<CommandInfo> {
2018-03-11 06:38:59 +00:00
let prefix = "cargo-";
let suffix = env::consts::EXE_SUFFIX;
let mut commands = BTreeSet::new();
for dir in search_directories(config) {
let entries = match fs::read_dir(dir) {
Ok(entries) => entries,
_ => continue,
};
for entry in entries.filter_map(|e| e.ok()) {
let path = entry.path();
let filename = match path.file_name().and_then(|s| s.to_str()) {
Some(filename) => filename,
_ => continue,
};
if !filename.starts_with(prefix) || !filename.ends_with(suffix) {
continue;
}
if is_executable(entry.path()) {
let end = filename.len() - suffix.len();
commands.insert(CommandInfo::External {
name: filename[prefix.len()..end].to_string(),
path: path.clone(),
});
2018-03-11 06:38:59 +00:00
}
}
}
for cmd in commands::builtin() {
commands.insert(CommandInfo::BuiltIn {
name: cmd.get_name().to_string(),
about: cmd.p.meta.about.map(|s| s.to_string()),
});
2018-03-11 06:38:59 +00:00
}
// Add the builtin_aliases and them descriptions to the
// `commands` `BTreeSet`.
for command in &BUILTIN_ALIASES {
commands.insert(CommandInfo::BuiltIn {
name: command.0.to_string(),
about: Some(command.2.to_string()),
});
}
2018-03-11 06:38:59 +00:00
commands
}
/// List all runnable aliases
fn list_aliases(config: &Config) -> Vec<String> {
match config.get::<BTreeMap<String, String>>("alias") {
Ok(aliases) => aliases.keys().map(|a| a.to_string()).collect(),
Err(_) => Vec::new(),
}
}
2018-03-08 19:24:16 +00:00
fn execute_external_subcommand(config: &Config, cmd: &str, args: &[&str]) -> CliResult {
let command_exe = format!("cargo-{}{}", cmd, env::consts::EXE_SUFFIX);
let path = search_directories(config)
2017-02-06 07:02:32 +00:00
.iter()
.map(|dir| dir.join(&command_exe))
.find(|file| is_executable(file));
let command = match path {
Some(command) => command,
None => {
let commands: Vec<String> = list_commands(config)
.iter()
.map(|c| c.name().to_string())
.collect();
let aliases = list_aliases(config);
let suggestions = commands.iter().chain(aliases.iter());
let did_you_mean = closest_msg(cmd, suggestions, |c| c);
let err = anyhow::format_err!("no such subcommand: `{}`{}", cmd, did_you_mean);
2018-03-14 15:17:44 +00:00
return Err(CliError::new(err, 101));
}
};
let cargo_exe = config.cargo_exe()?;
2021-03-20 18:28:38 +00:00
let err = match ProcessBuilder::new(&command)
.env(cargo::CARGO_ENV, cargo_exe)
.args(args)
2018-03-14 15:17:44 +00:00
.exec_replace()
{
Ok(()) => return Ok(()),
Err(e) => e,
};
if let Some(perr) = err.downcast_ref::<ProcessError>() {
if let Some(code) = perr.code {
return Err(CliError::code(code));
}
}
Err(CliError::new(err, 101))
}
#[cfg(unix)]
fn is_executable<P: AsRef<Path>>(path: P) -> bool {
use std::os::unix::prelude::*;
2017-02-06 07:02:32 +00:00
fs::metadata(path)
.map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
}
#[cfg(windows)]
fn is_executable<P: AsRef<Path>>(path: P) -> bool {
path.as_ref().is_file()
}
fn search_directories(config: &Config) -> Vec<PathBuf> {
Fix running Cargo concurrently Cargo has historically had no protections against running it concurrently. This is pretty unfortunate, however, as it essentially just means that you can only run one instance of Cargo at a time **globally on a system**. An "easy solution" to this would be the use of file locks, except they need to be applied judiciously. It'd be a pretty bad experience to just lock the entire system globally for Cargo (although it would work), but otherwise Cargo must be principled how it accesses the filesystem to ensure that locks are properly held. This commit intends to solve all of these problems. A new utility module is added to cargo, `util::flock`, which contains two types: * `FileLock` - a locked version of a `File`. This RAII guard will unlock the lock on `Drop` and I/O can be performed through this object. The actual underlying `Path` can be read from this object as well. * `Filesystem` - an unlocked representation of a `Path`. There is no "safe" method to access the underlying path without locking a file on the filesystem first. Built on the [fs2] library, these locks use the `flock` system call on Unix and `LockFileEx` on Windows. Although file locking on Unix is [documented as not so great][unix-bad], but largely only because of NFS, these are just advisory, and there's no byte-range locking. These issues don't necessarily plague Cargo, however, so we should try to leverage them. On both Windows and Unix the file locks are released when the underlying OS handle is closed, which means that if the process dies the locks are released. Cargo has a number of global resources which it now needs to lock, and the strategy is done in a fairly straightforward way: * Each registry's index contains one lock (a dotfile in the index). Updating the index requires a read/write lock while reading the index requires a shared lock. This should allow each process to ensure a registry update happens while not blocking out others for an unnecessarily long time. Additionally any number of processes can read the index. * When downloading crates, each downloaded crate is individually locked. A lock for the downloaded crate implies a lock on the output directory as well. Because downloaded crates are immutable, once the downloaded directory exists the lock is no longer needed as it won't be modified, so it can be released. This granularity of locking allows multiple Cargo instances to download dependencies in parallel. * Git repositories have separate locks for the database and for the project checkout. The datbase and checkout are locked for read/write access when an update is performed, and the lock of the checkout is held for the entire lifetime of the git source. This is done to ensure that any other Cargo processes must wait while we use the git repository. Unfortunately there's just not that much parallelism here. * Binaries managed by `cargo install` are locked by the local metadata file that Cargo manages. This is relatively straightforward. * The actual artifact output directory is just globally locked for the entire build. It's hypothesized that running Cargo concurrently in *one directory* is less of a feature needed rather than running multiple instances of Cargo globally (for now at least). It would be possible to have finer grained locking here, but that can likely be deferred to a future PR. So with all of this infrastructure in place, Cargo is now ready to grab some locks and ensure that you can call it concurrently anywhere at any time and everything always works out as one might expect. One interesting question, however, is what does Cargo do on contention? On one hand Cargo could immediately abort, but this would lead to a pretty poor UI as any Cargo process on the system could kick out any other. Instead this PR takes a more nuanced approach. * First, all locks are attempted to be acquired (a "try lock"). If this succeeds, we're done. * Next, Cargo prints a message to the console that it's going to block waiting for a lock. This is done because it's indeterminate how long Cargo will wait for the lock to become available, and most long-lasting operations in Cargo have a message printed for them. * Finally, a blocking acquisition of the lock is issued and we wait for it to become available. So all in all this should help Cargo fix any future concurrency bugs with file locking in a principled fashion while also allowing concurrent Cargo processes to proceed reasonably across the system. [fs2]: https://github.com/danburkert/fs2-rs [unix-bad]: http://0pointer.de/blog/projects/locking.html Closes #354
2016-03-12 17:58:53 +00:00
let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")];
2015-02-13 04:10:07 +00:00
if let Some(val) = env::var_os("PATH") {
2015-02-06 07:27:53 +00:00
dirs.extend(env::split_paths(&val));
}
2015-12-12 18:19:11 +00:00
dirs
2014-04-07 01:26:36 +00:00
}
fn init_git_transports(config: &Config) {
// Only use a custom transport if any HTTP options are specified,
// such as proxies or custom certificate authorities. The custom
// transport, however, is not as well battle-tested.
match cargo::ops::needs_custom_http_transport(config) {
Ok(true) => {}
2017-02-06 07:02:32 +00:00
_ => return,
}
let handle = match cargo::ops::http_handle(config) {
Ok(handle) => handle,
Err(..) => return,
};
// The unsafety of the registration function derives from two aspects:
//
// 1. This call must be synchronized with all other registration calls as
// well as construction of new transports.
// 2. The argument is leaked.
//
// We're clear on point (1) because this is only called at the start of this
// binary (we know what the state of the world looks like) and we're mostly
// clear on point (2) because we'd only free it after everything is done
// anyway
unsafe {
git2_curl::register(handle);
}
}