11[build-system ]
2- requires = [" setuptools" ]
32build-backend = " setuptools.build_meta"
3+ requires = [
4+ " setuptools>=72.0.0,<80.0.0" ,
5+ " setuptools-scm>=8.0.0,<9.0.0" ,
6+ " wheel>=0.38.4,<1.0.0"
7+ ]
8+
9+ [packaging ]
10+ package_name = " ydata-profiling"
411
512[project ]
613name = " ydata-profiling"
14+ requires-python = " >=3.7,<3.13"
715authors = [
8- {name = " YData Labs Inc" , email = " opensource@ydata.ai" },
16+ {name = " YData Labs Inc" , email = " opensource@ydata.ai" }
917]
10- description =" Generate profile report for pandas DataFrame"
18+ description = " Generate profile report for pandas DataFrame"
19+ keywords = [" pandas" , " data-science" , " data-analysis" , " python" , " jupyter" , " ipython" ]
1120readme = " README.md"
12- requires-python =" >=3.7, <3.13"
13- keywords =[" pandas" , " data-science" , " data-analysis" , " python" , " jupyter" , " ipython" ]
14- license = {text = " MIT" }
15- classifiers =[
21+ license = {file = " LICENSE.md" }
22+ classifiers = [
1623 " Development Status :: 5 - Production/Stable" ,
1724 " Topic :: Software Development :: Build Tools" ,
1825 " License :: OSI Approved :: MIT License" ,
@@ -63,10 +70,12 @@ dependencies = [
6370 " numba>=0.56.0, <1" ,
6471]
6572
66- dynamic = [" version" ]
73+ dynamic = [
74+ " version" ,
75+ " dependencies"
76+ ]
6777
6878[project .optional-dependencies ]
69- # dependencies for development and testing
7079dev = [
7180 " black>=20.8b1" ,
7281 " isort>=5.0.7" ,
@@ -80,6 +89,23 @@ dev = [
8089 " sphinx-multiversion>=0.2.3" ,
8190 " autodoc_pydantic" ,
8291]
92+
93+ docs = [
94+ " mkdocs>=1.6.0,<1.7.0" ,
95+ " mkdocs-material>=9.0.12,<10.0.0" ,
96+ " mkdocs-material-extensions>=1.1.1,<2.0.0" ,
97+ " mkdocs-table-reader-plugin<=2.2.0" ,
98+ " mike>=2.1.1,<2.2.0" ,
99+ " mkdocstrings[python]>=0.20.0,<1.0.0" ,
100+ " mkdocs-badges" ,
101+ ]
102+
103+ notebook = [
104+ " jupyter>=1.0.0" ,
105+ " ipywidgets>=7.5.1" ,
106+ " autodoc_pydantic"
107+ ]
108+
83109# this provides the recommended pyspark and pyarrow versions for spark to work on pandas-profiling
84110# note that if you are using pyspark 2.3 or 2.4 and pyarrow >= 0.15, you might need to
85111# set ARROW_PRE_0_15_IPC_FORMAT=1 in your conf/spark-env.sh for toPandas functions to work properly
@@ -90,6 +116,7 @@ spark = [
90116 " numpy>=1.16.0,<1.24" ,
91117 " visions[type_image_path]>=0.7.5, <0.7.7" ,
92118]
119+
93120test = [
94121 " pytest" ,
95122 " coverage>=6.5, <8" ,
@@ -100,35 +127,29 @@ test = [
100127 " twine>=3.1.1" ,
101128 " kaggle" ,
102129]
103- notebook = [
104- " jupyter>=1.0.0" ,
105- " ipywidgets>=7.5.1" ,
106- ]
107- docs = [
108- " mkdocs>=1.6.0,<1.7.0" ,
109- " mkdocs-material>=9.0.12,<10.0.0" ,
110- " mkdocs-material-extensions>=1.1.1,<2.0.0" ,
111- " mkdocs-table-reader-plugin<=2.2.0" ,
112- " mike>=2.1.1,<2.2.0" ,
113- " mkdocstrings[python]>=0.20.0,<1.0.0" ,
114- " mkdocs-badges" ,
115- ]
130+
116131unicode = [
117132 " tangled-up-in-unicode==0.2.0" ,
118133]
119134
120- [tool .setuptools .packages .find ]
121- where = [" src" ]
135+ [project .urls ]
136+ Homepage = " https://ydata.ai"
137+ Repository = " https://github.com/ydataai/ydata-profiling"
122138
123- [tool .setuptools .package-data ]
124- ydata_profiling = [" py.typed" ]
139+ [project .scripts ]
140+ ydata_profiling = " ydata_profiling.controller.console:main"
141+ pandas_profiling = " ydata_profiling.controller.console:main"
142+
143+ # setuptools relative
125144
126145[tool .setuptools ]
127146include-package-data = true
128147
129- [project .scripts ]
130- ydata_profiling = " ydata_profiling.controller.console:main"
131- pandas_profiling = " ydata_profiling.controller.console:main"
148+ [tool .setuptools .package-data ]
149+ ydata_profiling = [" py.typed" ]
132150
133- [project .urls ]
134- homepage = " https://github.com/ydataai/ydata-profiling"
151+ [tool .distutils .bdist_wheel ]
152+ universal = true
153+
154+ [tool .setuptools .package-dir ]
155+ "" = " src"
0 commit comments