PERF: read_csv() very slow after updating to version 1.3.4 for CSV file with large number of columns · Issue #44106 · pandas-dev/pandas (original) (raw)
- I have checked that this issue has not already been reported.
- I have confirmed this issue exists on the latest version of pandas.
- I have confirmed this issue exists on the master branch of pandas.
Reproducible Example
For the sake of this bug report, I created a relatively small CSV file of random numbers. My real CSV is of shape (200, 662688)
and for that, updating to pandas 1.3.4 increased the time for loading from 1 minute to over 2 hours!
My investigation showed all 1.3.* versions have the same issue.
import os import pandas import numpy import timeit
def generate_sample(): if os.path.exists("test_small.csv.gz") == False: nb_col = 100000 nb_row = 5 feature_list = {'sample': ['s_' + str(i+1) for i in range(nb_row)]} for i in range(nb_col): feature_list.update({'feature_' + str(i+1): list(numpy.random.uniform(low=0, high=10, size=nb_row))}) df = pandas.DataFrame(feature_list) df.to_csv("test_small.csv.gz", index=False, float_format="%.6f")
def load_csv_file(): col_names = pandas.read_csv("test_small.csv.gz", low_memory=False, nrows=1).columns types_dict = {col: numpy.float32 for col in col_names} types_dict.update({'sample': str}) feature_df = pandas.read_csv("test_small.csv.gz", index_col="sample", na_filter=False, dtype=types_dict, low_memory=False) print("loaded dataframe shape:", feature_df.shape)
generate_sample() pandas.version '1.3.4' timeit.timeit(load_csv_file, number=1) loaded dataframe shape: (5, 100000) 216.38200060278177
Installed Versions
INSTALLED VERSIONS
commit : 945c9ed
python : 3.8.10.final.0
python-bits : 64
OS : Linux
OS-release : 5.4.0-67-generic
Version : #75-Ubuntu SMP Fri Feb 19 18:03:38 UTC 2021
machine : x86_64
processor : x86_64
byteorder : little
LC_ALL : None
LANG : en_US.UTF-8
LOCALE : en_US.UTF-8
pandas : 1.3.4
numpy : 1.21.2
pytz : 2021.3
dateutil : 2.8.2
pip : 21.3
setuptools : 44.0.0
Cython : 0.29.24
pytest : None
hypothesis : None
sphinx : None
blosc : None
feather : None
xlsxwriter : None
lxml.etree : None
html5lib : None
pymysql : None
psycopg2 : None
jinja2 : 3.0.2
IPython : 7.28.0
pandas_datareader: None
bs4 : None
bottleneck : None
fsspec : None
fastparquet : None
gcsfs : None
matplotlib : 3.4.3
numexpr : None
odfpy : None
openpyxl : None
pandas_gbq : None
pyarrow : None
pyxlsb : None
s3fs : None
scipy : 1.7.1
sqlalchemy : None
tables : None
tabulate : None
xarray : None
xlrd : None
xlwt : None
numba : None
Prior Performance
import os import pandas import numpy import timeit
def generate_sample(): if os.path.exists("test_small.csv.gz") == False: nb_col = 100000 nb_row = 5 feature_list = {'sample': ['s_' + str(i+1) for i in range(nb_row)]} for i in range(nb_col): feature_list.update({'feature_' + str(i+1): list(numpy.random.uniform(low=0, high=10, size=nb_row))}) df = pandas.DataFrame(feature_list) df.to_csv("test_small.csv.gz", index=False, float_format="%.6f")
def load_csv_file(): col_names = pandas.read_csv("test_small.csv.gz", low_memory=False, nrows=1).columns types_dict = {col: numpy.float32 for col in col_names} types_dict.update({'sample': str}) feature_df = pandas.read_csv("test_small.csv.gz", index_col="sample", na_filter=False, dtype=types_dict, low_memory=False) print("loaded dataframe shape:", feature_df.shape)
generate_sample() pandas.version '1.2.5' timeit.timeit(load_csv_file, number=1) loaded dataframe shape: (5, 100000) 6.001252222806215