Skip to content

Commit 0ff2042

Browse files
authored
Feat: add Docling parser (#10759)
### What problem does this PR solve? issue: #3945 change: add Docling parser ### Type of change - [x] New Feature (non-breaking change which adds functionality)
1 parent de24e74 commit 0ff2042

File tree

5 files changed

+378
-0
lines changed

5 files changed

+378
-0
lines changed

deepdoc/parser/docling_parser.py

Lines changed: 344 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,344 @@
1+
#
2+
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
#
16+
from __future__ import annotations
17+
18+
import logging
19+
import re
20+
from dataclasses import dataclass
21+
from enum import Enum
22+
from io import BytesIO
23+
from os import PathLike
24+
from pathlib import Path
25+
from typing import Any, Callable, Iterable, Optional
26+
27+
import pdfplumber
28+
from PIL import Image
29+
30+
try:
31+
from docling.document_converter import DocumentConverter
32+
except Exception:
33+
DocumentConverter = None
34+
35+
try:
36+
from deepdoc.parser.pdf_parser import RAGFlowPdfParser
37+
except Exception:
38+
class RAGFlowPdfParser:
39+
pass
40+
41+
42+
class DoclingContentType(str, Enum):
43+
IMAGE = "image"
44+
TABLE = "table"
45+
TEXT = "text"
46+
EQUATION = "equation"
47+
48+
49+
@dataclass
50+
class _BBox:
51+
page_no: int
52+
x0: float
53+
y0: float
54+
x1: float
55+
y1: float
56+
57+
58+
class DoclingParser(RAGFlowPdfParser):
59+
def __init__(self):
60+
self.logger = logging.getLogger(self.__class__.__name__)
61+
self.page_images: list[Image.Image] = []
62+
self.page_from = 0
63+
self.page_to = 10_000
64+
65+
def check_installation(self) -> bool:
66+
if DocumentConverter is None:
67+
self.logger.warning("[Docling] 'docling' is not importable, please: pip install docling")
68+
return False
69+
try:
70+
_ = DocumentConverter()
71+
return True
72+
except Exception as e:
73+
self.logger.error(f"[Docling] init DocumentConverter failed: {e}")
74+
return False
75+
76+
def __images__(self, fnm, zoomin: int = 1, page_from=0, page_to=600, callback=None):
77+
self.page_from = page_from
78+
self.page_to = page_to
79+
try:
80+
opener = pdfplumber.open(fnm) if isinstance(fnm, (str, PathLike)) else pdfplumber.open(BytesIO(fnm))
81+
with opener as pdf:
82+
pages = pdf.pages[page_from:page_to]
83+
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).original for p in pages]
84+
except Exception as e:
85+
self.page_images = []
86+
self.logger.exception(e)
87+
88+
def _make_line_tag(self,bbox: _BBox) -> str:
89+
if bbox is None:
90+
return ""
91+
x0,x1, top, bott = bbox.x0, bbox.x1, bbox.y0, bbox.y1
92+
if hasattr(self, "page_images") and self.page_images and len(self.page_images) >= bbox.page_no:
93+
_, page_height = self.page_images[bbox.page_no-1].size
94+
top, bott = page_height-top ,page_height-bott
95+
return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##".format(
96+
bbox.page_no, x0,x1, top, bott
97+
)
98+
99+
@staticmethod
100+
def extract_positions(txt: str) -> list[tuple[list[int], float, float, float, float]]:
101+
poss = []
102+
for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", txt):
103+
pn, left, right, top, bottom = tag.strip("#").strip("@").split("\t")
104+
left, right, top, bottom = float(left), float(right), float(top), float(bottom)
105+
poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom))
106+
return poss
107+
108+
def crop(self, text: str, ZM: int = 1, need_position: bool = False):
109+
imgs = []
110+
poss = self.extract_positions(text)
111+
if not poss:
112+
return (None, None) if need_position else None
113+
114+
GAP = 6
115+
pos = poss[0]
116+
poss.insert(0, ([pos[0][0]], pos[1], pos[2], max(0, pos[3] - 120), max(pos[3] - GAP, 0)))
117+
pos = poss[-1]
118+
poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1], pos[4] + GAP), min(self.page_images[pos[0][-1]].size[1], pos[4] + 120)))
119+
positions = []
120+
for ii, (pns, left, right, top, bottom) in enumerate(poss):
121+
if bottom <= top:
122+
bottom = top + 4
123+
img0 = self.page_images[pns[0]]
124+
x0, y0, x1, y1 = int(left), int(top), int(right), int(min(bottom, img0.size[1]))
125+
126+
crop0 = img0.crop((x0, y0, x1, y1))
127+
imgs.append(crop0)
128+
if 0 < ii < len(poss)-1:
129+
positions.append((pns[0] + self.page_from, x0, x1, y0, y1))
130+
remain_bottom = bottom - img0.size[1]
131+
for pn in pns[1:]:
132+
if remain_bottom <= 0:
133+
break
134+
page = self.page_images[pn]
135+
x0, y0, x1, y1 = int(left), 0, int(right), int(min(remain_bottom, page.size[1]))
136+
cimgp = page.crop((x0, y0, x1, y1))
137+
imgs.append(cimgp)
138+
if 0 < ii < len(poss) - 1:
139+
positions.append((pn + self.page_from, x0, x1, y0, y1))
140+
remain_bottom -= page.size[1]
141+
142+
if not imgs:
143+
return (None, None) if need_position else None
144+
145+
height = sum(i.size[1] + GAP for i in imgs)
146+
width = max(i.size[0] for i in imgs)
147+
pic = Image.new("RGB", (width, int(height)), (245, 245, 245))
148+
h = 0
149+
for ii, img in enumerate(imgs):
150+
if ii == 0 or ii + 1 == len(imgs):
151+
img = img.convert("RGBA")
152+
overlay = Image.new("RGBA", img.size, (0, 0, 0, 0))
153+
overlay.putalpha(128)
154+
img = Image.alpha_composite(img, overlay).convert("RGB")
155+
pic.paste(img, (0, int(h)))
156+
h += img.size[1] + GAP
157+
158+
return (pic, positions) if need_position else pic
159+
160+
def _iter_doc_items(self, doc) -> Iterable[tuple[str, Any, Optional[_BBox]]]:
161+
for t in getattr(doc, "texts", []):
162+
parent=getattr(t, "parent", "")
163+
ref=getattr(parent,"cref","")
164+
label=getattr(t, "label", "")
165+
if (label in ("section_header","text",) and ref in ("#/body",)) or label in ("list_item",):
166+
text = getattr(t, "text", "") or ""
167+
bbox = None
168+
if getattr(t, "prov", None):
169+
pn = getattr(t.prov[0], "page_no", None)
170+
bb = getattr(t.prov[0], "bbox", None)
171+
bb = [getattr(bb, "l", None),getattr(bb, "t", None),getattr(bb, "r", None),getattr(bb, "b", None)]
172+
if pn and bb and len(bb) == 4:
173+
bbox = _BBox(page_no=int(pn), x0=bb[0], y0=bb[1], x1=bb[2], y1=bb[3])
174+
yield (DoclingContentType.TEXT.value, text, bbox)
175+
176+
for item in getattr(doc, "texts", []):
177+
if getattr(item, "label", "") in ("FORMULA",):
178+
text = getattr(item, "text", "") or ""
179+
bbox = None
180+
if getattr(item, "prov", None):
181+
pn = getattr(item.prov, "page_no", None)
182+
bb = getattr(item.prov, "bbox", None)
183+
bb = [getattr(bb, "l", None),getattr(bb, "t", None),getattr(bb, "r", None),getattr(bb, "b", None)]
184+
if pn and bb and len(bb) == 4:
185+
bbox = _BBox(int(pn), bb[0], bb[1], bb[2], bb[3])
186+
yield (DoclingContentType.EQUATION.value, text, bbox)
187+
188+
def _transfer_to_sections(self, doc) -> list[tuple[str, str]]:
189+
"""
190+
和 MinerUParser 保持一致:返回 [(section_text, line_tag), ...]
191+
"""
192+
sections: list[tuple[str, str]] = []
193+
for typ, payload, bbox in self._iter_doc_items(doc):
194+
if typ == DoclingContentType.TEXT.value:
195+
section = payload.strip()
196+
if not section:
197+
continue
198+
elif typ == DoclingContentType.EQUATION.value:
199+
section = payload.strip()
200+
else:
201+
continue
202+
203+
tag = self._make_line_tag(bbox) if isinstance(bbox,_BBox) else ""
204+
sections.append((section, tag))
205+
return sections
206+
207+
def cropout_docling_table(self, page_no: int, bbox: tuple[float, float, float, float], zoomin: int = 1):
208+
if not getattr(self, "page_images", None):
209+
return None, ""
210+
211+
idx = (page_no - 1) - getattr(self, "page_from", 0)
212+
if idx < 0 or idx >= len(self.page_images):
213+
return None, ""
214+
215+
page_img = self.page_images[idx]
216+
W, H = page_img.size
217+
left, top, right, bott = bbox
218+
219+
x0 = float(left)
220+
y0 = float(H-top)
221+
x1 = float(right)
222+
y1 = float(H-bott)
223+
224+
x0, y0 = max(0.0, min(x0, W - 1)), max(0.0, min(y0, H - 1))
225+
x1, y1 = max(x0 + 1.0, min(x1, W)), max(y0 + 1.0, min(y1, H))
226+
227+
try:
228+
crop = page_img.crop((int(x0), int(y0), int(x1), int(y1))).convert("RGB")
229+
except Exception:
230+
return None, ""
231+
232+
pos = (page_no-1 if page_no>0 else 0, x0, x1, y0, y1)
233+
return crop, [pos]
234+
235+
def _transfer_to_tables(self, doc):
236+
tables = []
237+
for tab in getattr(doc, "tables", []):
238+
img = None
239+
positions = ""
240+
if getattr(tab, "prov", None):
241+
pn = getattr(tab.prov[0], "page_no", None)
242+
bb = getattr(tab.prov[0], "bbox", None)
243+
if pn is not None and bb is not None:
244+
left = getattr(bb, "l", None)
245+
top = getattr(bb, "t", None)
246+
right = getattr(bb, "r", None)
247+
bott = getattr(bb, "b", None)
248+
if None not in (left, top, right, bott):
249+
img, positions = self.cropout_docling_table(int(pn), (float(left), float(top), float(right), float(bott)))
250+
html = ""
251+
try:
252+
html = tab.export_to_html(doc=doc)
253+
except Exception:
254+
pass
255+
tables.append(((img, html), positions if positions else ""))
256+
for pic in getattr(doc, "pictures", []):
257+
img = None
258+
positions = ""
259+
if getattr(pic, "prov", None):
260+
pn = getattr(pic.prov[0], "page_no", None)
261+
bb = getattr(pic.prov[0], "bbox", None)
262+
if pn is not None and bb is not None:
263+
left = getattr(bb, "l", None)
264+
top = getattr(bb, "t", None)
265+
right = getattr(bb, "r", None)
266+
bott = getattr(bb, "b", None)
267+
if None not in (left, top, right, bott):
268+
img, positions = self.cropout_docling_table(int(pn), (float(left), float(top), float(right), float(bott)))
269+
captions = ""
270+
try:
271+
captions = pic.caption_text(doc=doc)
272+
except Exception:
273+
pass
274+
tables.append(((img, [captions]), positions if positions else ""))
275+
return tables
276+
277+
def parse_pdf(
278+
self,
279+
filepath: str | PathLike[str],
280+
binary: BytesIO | bytes | None = None,
281+
callback: Optional[Callable] = None,
282+
*,
283+
output_dir: Optional[str] = None,
284+
lang: Optional[str] = None,
285+
method: str = "auto",
286+
delete_output: bool = True,
287+
):
288+
289+
if not self.check_installation():
290+
raise RuntimeError("Docling not available, please install `docling`")
291+
292+
if binary is not None:
293+
tmpdir = Path(output_dir) if output_dir else Path.cwd() / ".docling_tmp"
294+
tmpdir.mkdir(parents=True, exist_ok=True)
295+
name = Path(filepath).name or "input.pdf"
296+
tmp_pdf = tmpdir / name
297+
with open(tmp_pdf, "wb") as f:
298+
if isinstance(binary, (bytes, bytearray)):
299+
f.write(binary)
300+
else:
301+
f.write(binary.getbuffer())
302+
src_path = tmp_pdf
303+
else:
304+
src_path = Path(filepath)
305+
if not src_path.exists():
306+
raise FileNotFoundError(f"PDF not found: {src_path}")
307+
308+
if callback:
309+
callback(0.1, f"[Docling] Converting: {src_path}")
310+
311+
try:
312+
self.__images__(str(src_path), zoomin=1)
313+
except Exception as e:
314+
self.logger.warning(f"[Docling] render pages failed: {e}")
315+
316+
conv = DocumentConverter()
317+
conv_res = conv.convert(str(src_path))
318+
doc = conv_res.document
319+
if callback:
320+
callback(0.7, f"[Docling] Parsed doc: {getattr(doc, 'num_pages', 'n/a')} pages")
321+
322+
sections = self._transfer_to_sections(doc)
323+
tables = self._transfer_to_tables(doc)
324+
325+
if callback:
326+
callback(0.95, f"[Docling] Sections: {len(sections)}, Tables: {len(tables)}")
327+
328+
if binary is not None and delete_output:
329+
try:
330+
Path(src_path).unlink(missing_ok=True)
331+
except Exception:
332+
pass
333+
334+
if callback:
335+
callback(1.0, "[Docling] Done.")
336+
return sections, tables
337+
338+
339+
if __name__ == "__main__":
340+
logging.basicConfig(level=logging.INFO)
341+
parser = DoclingParser()
342+
print("Docling available:", parser.check_installation())
343+
sections, tables = parser.parse_pdf(filepath="test_docling/toc.pdf", binary=None)
344+
print(len(sections), len(tables))

docker/.env

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,3 +195,4 @@ REGISTER_ENABLED=1
195195
# COMPOSE_PROFILES=infinity,sandbox
196196
# - For OpenSearch:
197197
# COMPOSE_PROFILES=opensearch,sandbox
198+
USE_DOCLING=false

docker/entrypoint.sh

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,16 @@ function start_mcp_server() {
178178
"${MCP_JSON_RESPONSE_FLAG}" &
179179
}
180180

181+
function ensure_docling() {
182+
if [[ "${USE_DOCLING}" == "true" ]]; then
183+
if ! python3 -c "import importlib.util,sys; sys.exit(0 if importlib.util.find_spec('docling') else 1)"; then
184+
echo "[docling] not found, installing..."
185+
python3 -m pip install --no-cache-dir "docling${DOCLING_VERSION:-}"
186+
else
187+
echo "[docling] already installed, skip."
188+
fi
189+
fi
190+
}
181191
# -----------------------------------------------------------------------------
182192
# Start components based on flags
183193
# -----------------------------------------------------------------------------
@@ -203,6 +213,8 @@ if [[ "${ENABLE_MCP_SERVER}" -eq 1 ]]; then
203213
start_mcp_server
204214
fi
205215

216+
ensure_docling
217+
206218
if [[ "${ENABLE_TASKEXECUTOR}" -eq 1 ]]; then
207219
if [[ "${CONSUMER_NO_END}" -gt "${CONSUMER_NO_BEG}" ]]; then
208220
echo "Starting task executors on host '${HOST_ID}' for IDs in [${CONSUMER_NO_BEG}, ${CONSUMER_NO_END})..."

0 commit comments

Comments
 (0)