chore: fix typos (#1813)

This commit is contained in:
xiaolou86 2023-11-07 12:47:31 +08:00 committed by GitHub
parent b3f478e65f
commit 7090a1260f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 15 additions and 15 deletions

View File

@ -87,7 +87,7 @@ impl ListingSchemaProvider {
}
}
// noramalizes a path fragement to be a valida table name in datafusion
// noramalizes a path fragment to be a valida table name in datafusion
// - removes some reserved characters (-, +, ., " ")
// - lowecase ascii
fn normalize_table_name(path: &Path) -> Result<String, DataFusionError> {

View File

@ -385,7 +385,7 @@ pub(crate) fn logical_schema(
}
#[derive(Debug, Clone, Default)]
/// Used to specify if additonal metadata columns are exposed to the user
/// Used to specify if additional metadata columns are exposed to the user
pub struct DeltaScanConfigBuilder {
/// Include the source path for each record. The name of this column is determine by `file_column_name`
include_file_column: bool,
@ -458,7 +458,7 @@ impl DeltaScanConfigBuilder {
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
/// Include additonal metadata columns during a [`DeltaScan`]
/// Include additional metadata columns during a [`DeltaScan`]
pub struct DeltaScanConfig {
/// Include the source path for each record
pub file_column_name: Option<String>,
@ -711,7 +711,7 @@ impl TableProvider for DeltaTable {
}
}
/// A Delta table provider that enables additonal metadata columns to be included during the scan
/// A Delta table provider that enables additional metadata columns to be included during the scan
pub struct DeltaTableProvider {
snapshot: DeltaTableState,
store: ObjectStoreRef,

View File

@ -255,7 +255,7 @@ pub enum WriterFeatures {
DomainMetadata,
/// version 2 of checkpointing
V2Checkpoint,
/// Iceberg compatability support
/// Iceberg compatibility support
IcebergCompatV1,
/// If we do not match any other reader features
#[serde(untagged)]

View File

@ -47,7 +47,7 @@ use crate::DeltaTable;
use super::datafusion_utils::Expression;
/// Delete Records from the Delta Table.
/// See this module's documentaiton for more information
/// See this module's documentation for more information
pub struct DeleteBuilder {
/// Which records to delete
predicate: Option<Expression>,

View File

@ -34,7 +34,7 @@ use crate::table::state::DeltaTableState;
use crate::DeltaTable;
/// Audit the Delta Table's active files with the underlying file system.
/// See this module's documentaiton for more information
/// See this module's documentation for more information
#[derive(Debug)]
pub struct FileSystemCheckBuilder {
/// A snapshot of the to-be-checked table's state

View File

@ -114,7 +114,7 @@ impl UpdateBuilder {
self
}
/// Perform an additonal update expression during the operaton
/// Perform an additional update expression during the operaton
pub fn with_update<S: Into<Column>, E: Into<Expression>>(
mut self,
column: S,

View File

@ -335,7 +335,7 @@ fn deserialize_add_column_page(
},
)?;
}
// FIXME suport partitionValueParsed
// FIXME support partitionValueParsed
"dataChange" => {
for_each_boolean_field_value(
actions,
@ -420,7 +420,7 @@ fn deserialize_remove_column_page(
|action: &mut Remove, v: i64| action.size = Some(v),
)?;
}
// FIXME suport partitionValueParsed
// FIXME support partitionValueParsed
"partitionValues" => {
for_each_map_field_value(
&field[1..],

View File

@ -165,7 +165,7 @@ pub fn record_batch_from_message(
// a happy middle-road might be to compute stats for partition columns only on the initial write since we should validate partition values anyway, and compute additional stats later (at checkpoint time perhaps?).
// also this does not currently support nested partition columns and many other data types.
// TODO is this comment still valid, since we should be sure now, that the arrays where this
// gets aplied have a single unique value
// gets applied have a single unique value
pub(crate) fn stringified_partition_value(
arr: &Arc<dyn Array>,
) -> Result<Option<String>, DeltaWriterError> {

View File

@ -75,7 +75,7 @@ async fn test_filesystem_check(storage: StorageIntegration) -> TestResult {
let remove = table.state.all_tombstones().get(file).unwrap();
assert!(remove.data_change);
// An additonal run should return an empty list of orphaned actions
// An additional run should return an empty list of orphaned actions
let op = DeltaOps::from(table);
let (table, metrics) = op.filesystem_check().await?;
assert_eq!(version + 1, table.state.version());
@ -147,7 +147,7 @@ async fn test_filesystem_check_fails_for_concurrent_delete() -> TestResult {
#[tokio::test]
#[serial]
#[ignore = "should this actually fail? with conflcit resolution, we are re-trying again."]
#[ignore = "should this actually fail? with conflict resolution, we are re-trying again."]
async fn test_filesystem_check_outdated() -> TestResult {
// Validate failure when a non dry only executes on the latest version
let context = IntegrationContext::new(StorageIntegration::Local)?;

View File

@ -221,7 +221,7 @@ impl Model for AtomicRenameSys {
WriterState::RepairRenameReturned => {
match writer.rename_err {
Some(RenameErr::AlreadyExists) => {
// already reapired by other writer
// already repaired by other writer
// TODO: still need to perform the delete cleanup?
actions.push(Action::UpdateLockData(wid));
}
@ -456,7 +456,7 @@ impl Model for AtomicRenameSys {
if state.blob_store_deleted(src) {
let mut writer = &mut state.writer_ctx[wid];
// source object cleaned by up another worker's repair, it's not a real
// conflict, save to assume the rename was successfull
// conflict, save to assume the rename was successful
writer.state = WriterState::Shutdown;
} else {
let mut writer = &mut state.writer_ctx[wid];