wcag_AI_validation/restserver/routers/routes_wcag_h58.py

254 lines
10 KiB
Python

from fastapi import APIRouter, Request
from fastapi.responses import JSONResponse
import logging
from pydantic import BaseModel
import json
import aiofiles
import asyncio
from dependences.utils import (
disclaim_bool_string,
prepare_output_folder,
prepare_folder_path,
create_folder,
db_persistence_insert,
)
from dependences.language_extractor import LanguageExtractor
from dependences.mllm_management import MLLMManager, parse_mllm_standard_response
invalid_json_input_msg = "Invalid JSON format"
unexpected_error_msg = "Unexpected Error: could not end the process"
class WCAG_h58Valuation(BaseModel):
page_url: str = "https://www.bbc.com"
number_of_segments: int = 10
save_elaboration: str = "True"
short_segments_length_threshold: int = 30
max_total_length: int = 15000
class WCAG_h58ValuationRoutes:
def __init__(self, connection_db, mllm_settings):
self.connection_db = connection_db
self.mllm_settings = mllm_settings
self.router = APIRouter()
self.router.add_api_route(
"/wcag_h58_validation",
self.wcag_h58_validation,
methods=["POST"],
tags=["Wcag H58 Validation"],
description="WCAG validator H58 validation: Using language attributes to identify changes in the human language",
name="wcag H58 validation",
dependencies=[],
)
logging.info("wcag h58 routes correctly initialized.")
async def wcag_h58_validation(
self, request: Request, data: WCAG_h58Valuation
) -> JSONResponse:
"""Return the alt text validation assessment based on WCAG guidelines"""
try:
print("Received wcag H58 validation request.")
json_content = json.loads(data.model_dump_json())
if self.mllm_settings["openai_model"] == "Both":
mllm_model_id_for_logging = (
self.mllm_settings["mllm_model_id"]["model_id_remote"]
+ "&"
+ self.mllm_settings["mllm_model_id"]["model_id_local"]
)
else:
mllm_model_id_for_logging = self.mllm_settings["mllm_model_id"]
# prepare output folders if needed---
if (
disclaim_bool_string(json_content["save_elaboration"]) == True
): # if something to save
url_path, folder_str = prepare_folder_path(
json_content, mllm_model_id_for_logging, tecnhnique_name="h58"
)
output_dir = prepare_output_folder(url_path, folder_str)
# Create lang extractor
language_extractor = LanguageExtractor(
url=json_content["page_url"],
short_segments_length_threshold=json_content[
"short_segments_length_threshold"
],
max_total_length=json_content["max_total_length"],
)
# Extract languages
logging.info(f"Extracting languages from: {json_content['page_url']}")
languages = await language_extractor.extract_content_with_lang_context()
print("Extracted languages and textual elements.", languages)
if self.mllm_settings["openai_model"] == "Both":
from concurrent.futures import ThreadPoolExecutor
def run_model_evaluation(
endpoint, api_key, model_id, openai_model, label
):
manager = MLLMManager(endpoint, api_key, model_id)
print(
f"Using {label} model for title evaluation.", manager.end_point
)
logging.info("mllm_end_point:%s", endpoint)
logging.info("mllm_model_id:%s", model_id)
responses = manager.make_h58_evaluation(
languages=languages,
number_of_segments=json_content["number_of_segments"],
openai_model=openai_model
)
mllm_respones_flattened = []
for i, response in enumerate(responses):
# print("response['mllm_response']:", response["mllm_response"])
# because the response is a list of assessments for each segment, we need to parse each of them and flatten the result in a single list of assessments
parsed_resp = parse_mllm_standard_response(
response["mllm_response"],
extra_fields=[
"tag",
"html",
"detected_lang",
"declared_lang",
],model_id=model_id
)
mllm_respones_flattened.extend(parsed_resp)
return mllm_respones_flattened
with ThreadPoolExecutor(max_workers=2) as executor:
future_openai = executor.submit(
run_model_evaluation,
self.mllm_settings["mllm_end_point"]["model_end_point_remote"],
self.mllm_settings["mllm_api_key"]["api_key_remote"],
self.mllm_settings["mllm_model_id"]["model_id_remote"],
True,
"first remote",
)
future_local = executor.submit(
run_model_evaluation,
self.mllm_settings["mllm_end_point"]["model_end_point_local"],
self.mllm_settings["mllm_api_key"]["api_key_local"],
self.mllm_settings["mllm_model_id"]["model_id_local"],
False,
"second local",
)
mllm_responses_openai = future_openai.result()
mllm_responses_local = future_local.result()
mllm_responses_object = {
"mllm_h58_assessments": {
"mllm_h58_assessments_openai": mllm_responses_openai,
"mllm_h58_assessments_local": mllm_responses_local,
}
}
else:
# MLLM settings
mllm_end_point = self.mllm_settings["mllm_end_point"]
mllm_api_key = self.mllm_settings["mllm_api_key"]
mllm_model_id = self.mllm_settings["mllm_model_id"]
logging.info("mllm_end_point:%s", mllm_end_point)
logging.info("mllm_model_id:%s", mllm_model_id)
# Create MLLM manager
mllm_manager = MLLMManager(mllm_end_point, mllm_api_key, mllm_model_id)
print(
"Using single model for h58 evaluation.",
mllm_manager.end_point,
)
# Make h58 evaluation
mllm_responses = mllm_manager.make_h58_evaluation(
languages=languages,
number_of_segments=json_content["number_of_segments"],
openai_model=self.mllm_settings["openai_model"],
)
# Parse MLLM responses
# print("Raw MLLM responses:", mllm_responses)
mllm_respones_flattened = []
for i, response in enumerate(mllm_responses):
# print("response['mllm_response']:", response["mllm_response"])
# because the response is a list of assessments for each segment, we need to parse each of them and flatten the result in a single list of assessments
parsed_resp = parse_mllm_standard_response(
response["mllm_response"],
extra_fields=["tag", "html", "detected_lang", "declared_lang"],
model_id=mllm_model_id
)
mllm_respones_flattened.extend(parsed_resp)
mllm_responses_object = {
"mllm_h58_assessments": mllm_respones_flattened,
}
# common: prepare the object to return in the response
returned_object = {
"languages": languages,
"mllm_validations": mllm_responses_object,
}
try:
# Persist to local db
# Convert JSON data to string
json_in_str = json.dumps(languages, ensure_ascii=False)
json_out_str = json.dumps(mllm_responses_object, ensure_ascii=False)
db_persistence_insert(
connection_db=self.connection_db,
insert_type="wcag_h58_validation",
page_url=json_content["page_url"],
llm_model=mllm_model_id_for_logging,
json_in_str=json_in_str,
json_out_str=json_out_str,
table="wcag_validator_results",
)
except Exception as e:
logging.error("error persisting to local db: %s", e)
if (
disclaim_bool_string(json_content["save_elaboration"]) == True
): # Optionally save to JSON
# save mllm input and responses
"""
with open(
output_dir + "/mllm_assessments.json", "w", encoding="utf-8"
) as f:
json.dump(returned_object, f, indent=2, ensure_ascii=False)"""
# async version
async with aiofiles.open(
output_dir + "/mllm_assessments.json", "w", encoding="utf-8"
) as f:
await f.write(
json.dumps(returned_object, indent=2, ensure_ascii=False)
)
return JSONResponse(content=returned_object, status_code=200)
except json.JSONDecodeError:
logging.error(invalid_json_input_msg)
return JSONResponse(
content={"error": invalid_json_input_msg}, status_code=400
)
except Exception as e:
logging.error(unexpected_error_msg + " %s", e)
return JSONResponse(
content={"error": unexpected_error_msg}, status_code=500
)