Skip to content

Commit 530620f

Browse files
committed
remove docstrings
1 parent ec1bc7e commit 530620f

File tree

1 file changed

+0
-23
lines changed

1 file changed

+0
-23
lines changed

awswrangler/s3/_write_deltalake.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -144,16 +144,6 @@ def _df_iter_to_record_batch_reader(
144144
target_schema: pa.Schema | None = None,
145145
batch_size: int | None = None,
146146
) -> tuple[pa.RecordBatchReader, pa.Schema]:
147-
"""
148-
Convert an iterable of Pandas DataFrames into a single Arrow RecordBatchReader
149-
suitable for a single delta-rs commit. The first *non-empty* DataFrame fixes the schema.
150-
151-
Returns
152-
-------
153-
(reader, schema)
154-
reader: pa.RecordBatchReader streaming all chunks as Arrow batches
155-
schema: pa.Schema used for conversion
156-
"""
157147
it = iter(df_iter)
158148

159149
first_df: pd.DataFrame | None = None
@@ -207,19 +197,6 @@ def to_deltalake_streaming(
207197
max_rows_per_file: int | None = None,
208198
target_file_size: int | None = None,
209199
) -> None:
210-
"""
211-
Write an iterable/generator of Pandas DataFrames to S3 as a Delta Lake table
212-
in a SINGLE atomic commit (one table version).
213-
214-
Use this for large "restatements" that are produced in chunks. Semantics mirror
215-
`to_deltalake` (partitioning, schema handling, S3 locking, etc.).
216-
217-
Notes
218-
-----
219-
- The schema is fixed by the first *non-empty* chunk (plus any `dtype` coercions).
220-
- All `partition_cols` must be present in every non-empty chunk.
221-
- Prefer `lock_dynamodb_table` over `s3_allow_unsafe_rename=True` on S3.
222-
"""
223200
dtype = dtype or {}
224201

225202
storage_options = _set_default_storage_options_kwargs(

0 commit comments

Comments
 (0)