使用远程 VLM 注释图片
In [ ]
已复制!
import logging
import os
from pathlib import Path
import logging import os from pathlib import Path
In [ ]
已复制!
import requests
from docling_core.types.doc import PictureItem
from dotenv import load_dotenv
import requests from docling_core.types.doc import PictureItem from dotenv import load_dotenv
In [ ]
已复制!
from docling.datamodel.base_models import InputFormat
from docling.datamodel.pipeline_options import (
PdfPipelineOptions,
PictureDescriptionApiOptions,
)
from docling.document_converter import DocumentConverter, PdfFormatOption
from docling.datamodel.base_models import InputFormat from docling.datamodel.pipeline_options import ( PdfPipelineOptions, PictureDescriptionApiOptions, ) from docling.document_converter import DocumentConverter, PdfFormatOption
In [ ]
已复制!
def vllm_local_options(model: str):
options = PictureDescriptionApiOptions(
url="http://localhost:8000/v1/chat/completions",
params=dict(
model=model,
seed=42,
max_completion_tokens=200,
),
prompt="Describe the image in three sentences. Be consise and accurate.",
timeout=90,
)
return options
def vllm_local_options(model: str): options = PictureDescriptionApiOptions( url="http://localhost:8000/v1/chat/completions", params=dict( model=model, seed=42, max_completion_tokens=200, ), prompt="用三句话描述图片。要简洁准确。", timeout=90, ) return options
In [ ]
已复制!
def watsonx_vlm_options():
load_dotenv()
api_key = os.environ.get("WX_API_KEY")
project_id = os.environ.get("WX_PROJECT_ID")
def _get_iam_access_token(api_key: str) -> str:
res = requests.post(
url="https://iam.cloud.ibm.com/identity/token",
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}",
)
res.raise_for_status()
api_out = res.json()
print(f"{api_out=}")
return api_out["access_token"]
options = PictureDescriptionApiOptions(
url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29",
params=dict(
model_id="meta-llama/llama-3-2-11b-vision-instruct",
project_id=project_id,
parameters=dict(
max_new_tokens=400,
),
),
headers={
"Authorization": "Bearer " + _get_iam_access_token(api_key=api_key),
},
prompt="Describe the image in three sentences. Be consise and accurate.",
timeout=60,
)
return options
def watsonx_vlm_options(): load_dotenv() api_key = os.environ.get("WX_API_KEY") project_id = os.environ.get("WX_PROJECT_ID") def _get_iam_access_token(api_key: str) -> str: res = requests.post( url="https://iam.cloud.ibm.com/identity/token", headers={ "Content-Type": "application/x-www-form-urlencoded", }, data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}", ) res.raise_for_status() api_out = res.json() print(f"{api_out=}") return api_out["access_token"] options = PictureDescriptionApiOptions( url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29", params=dict( model_id="meta-llama/llama-3-2-11b-vision-instruct", project_id=project_id, parameters=dict( max_new_tokens=400, ), ), headers={ "Authorization": "Bearer " + _get_iam_access_token(api_key=api_key), }, prompt="用三句话描述图片。要简洁准确。", timeout=60, ) return options
In [ ]
已复制!
def main():
logging.basicConfig(level=logging.INFO)
input_doc_path = Path("./tests/data/pdf/2206.01062.pdf")
pipeline_options = PdfPipelineOptions(
enable_remote_services=True # <-- this is required!
)
pipeline_options.do_picture_description = True
# The PictureDescriptionApiOptions() allows to interface with APIs supporting
# the multi-modal chat interface. Here follow a few example on how to configure those.
#
# One possibility is self-hosting model, e.g. via VLLM.
# $ vllm serve MODEL_NAME
# Then PictureDescriptionApiOptions can point to the localhost endpoint.
#
# Example for the Granite Vision model: (uncomment the following lines)
# pipeline_options.picture_description_options = vllm_local_options(
# model="ibm-granite/granite-vision-3.1-2b-preview"
# )
#
# Example for the SmolVLM model: (uncomment the following lines)
pipeline_options.picture_description_options = vllm_local_options(
model="HuggingFaceTB/SmolVLM-256M-Instruct"
)
#
# Another possibility is using online services, e.g. watsonx.ai.
# Using requires setting the env variables WX_API_KEY and WX_PROJECT_ID.
# Uncomment the following line for this option:
# pipeline_options.picture_description_options = watsonx_vlm_options()
doc_converter = DocumentConverter(
format_options={
InputFormat.PDF: PdfFormatOption(
pipeline_options=pipeline_options,
)
}
)
result = doc_converter.convert(input_doc_path)
for element, _level in result.document.iterate_items():
if isinstance(element, PictureItem):
print(
f"Picture {element.self_ref}\n"
f"Caption: {element.caption_text(doc=result.document)}\n"
f"Annotations: {element.annotations}"
)
def main(): logging.basicConfig(level=logging.INFO) input_doc_path = Path("./tests/data/pdf/2206.01062.pdf") pipeline_options = PdfPipelineOptions( enable_remote_services=True # <--- 这是必需的! ) pipeline_options.do_picture_description = True # PictureDescriptionApiOptions() 允许与支持 # 多模态聊天接口的 API 进行交互。下面是一些如何配置的示例。 # # 一种可能性是自托管模型,例如通过 VLLM。 # $ vllm serve MODEL_NAME # 然后 PictureDescriptionApiOptions 可以指向 localhost 端点。 # # Granite Vision 模型示例:(取消注释以下行) # pipeline_options.picture_description_options = vllm_local_options( # model="ibm-granite/granite-vision-3.1-2b-preview" # ) # # SmolVLM 模型示例:(取消注释以下行) pipeline_options.picture_description_options = vllm_local_options( model="HuggingFaceTB/SmolVLM-256M-Instruct" ) # # 另一种可能性是使用在线服务,例如 watsonx.ai。 # 使用时需要设置环境变量 WX_API_KEY 和 WX_PROJECT_ID。 # 取消注释以下行以使用此选项: # pipeline_options.picture_description_options = watsonx_vlm_options() doc_converter = DocumentConverter( format_options={ InputFormat.PDF: PdfFormatOption( pipeline_options=pipeline_options, ) } ) result = doc_converter.convert(input_doc_path) for element, _level in result.document.iterate_items(): if isinstance(element, PictureItem): print( f"图片 {element.self_ref}\n" f"图注:{element.caption_text(doc=result.document)}\n" f"注释:{element.annotations}" )
In [ ]
已复制!
if __name__ == "__main__":
main()
if __name__ == "__main__": main()