Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -540,7 +540,7 @@ class VisualConsensusEngine:
|
|
| 540 |
|
| 541 |
|
| 542 |
def _execute_research_function(self, function_name: str, arguments: dict, requesting_model_name: str = None) -> str:
|
| 543 |
-
"""Execute research function
|
| 544 |
|
| 545 |
query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
|
| 546 |
|
|
@@ -549,74 +549,106 @@ class VisualConsensusEngine:
|
|
| 549 |
self.show_research_starting(function_name, query_param)
|
| 550 |
|
| 551 |
try:
|
| 552 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 553 |
result = ""
|
| 554 |
|
| 555 |
-
if function_name
|
| 556 |
-
|
| 557 |
-
|
| 558 |
|
| 559 |
-
|
| 560 |
-
self.update_research_progress("Performing deep web search (multiple sources)...")
|
| 561 |
-
else:
|
| 562 |
-
self.update_research_progress("Searching web databases...")
|
| 563 |
|
| 564 |
-
|
| 565 |
-
|
| 566 |
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
|
|
|
|
|
|
| 572 |
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 590 |
|
| 591 |
elif function_name == "multi_source_research":
|
| 592 |
-
self.update_research_progress("Initializing multi-source
|
| 593 |
-
self.update_research_progress("Phase 1: Web search...")
|
| 594 |
|
| 595 |
-
#
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
# Simulate the deep research process with progress updates
|
| 599 |
-
self.update_research_progress("Phase 1: Comprehensive web search...")
|
| 600 |
-
web_result = self.search_agent.search(arguments["query"], "standard")
|
| 601 |
-
self.update_research_progress(f"Web search complete ({len(web_result)} chars) - Phase 2: Academic sources...")
|
| 602 |
-
|
| 603 |
-
self.update_research_progress("Phase 2: Searching academic databases...")
|
| 604 |
-
# Add small delay to show progress
|
| 605 |
-
time.sleep(1)
|
| 606 |
-
|
| 607 |
-
self.update_research_progress("Phase 3: Analyzing and synthesizing results...")
|
| 608 |
-
result = self.search_agent.search(arguments["query"], "deep")
|
| 609 |
-
self.update_research_progress(f"Multi-source research complete - synthesized {len(result)} characters")
|
| 610 |
-
|
| 611 |
-
except Exception as e:
|
| 612 |
-
self.update_research_progress(f"Multi-source research error: {str(e)}")
|
| 613 |
-
result = f"Multi-source research encountered an error: {str(e)}"
|
| 614 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 615 |
else:
|
| 616 |
-
self.update_research_progress(f"Unknown research function: {function_name}")
|
| 617 |
result = f"Unknown research function: {function_name}"
|
| 618 |
|
| 619 |
-
# Phase 3: Show research
|
| 620 |
if query_param:
|
| 621 |
self.show_research_complete(function_name, query_param, len(result), requesting_model_name)
|
| 622 |
|
|
@@ -626,7 +658,23 @@ class VisualConsensusEngine:
|
|
| 626 |
error_msg = str(e)
|
| 627 |
if query_param:
|
| 628 |
self.show_research_error(function_name, query_param, error_msg, requesting_model_name)
|
| 629 |
-
return f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 630 |
|
| 631 |
def show_research_starting(self, function: str, query: str):
|
| 632 |
"""Show research request initiation with enhanced messaging"""
|
|
|
|
| 540 |
|
| 541 |
|
| 542 |
def _execute_research_function(self, function_name: str, arguments: dict, requesting_model_name: str = None) -> str:
|
| 543 |
+
"""Execute research function using proper OpenFloor protocol messaging"""
|
| 544 |
|
| 545 |
query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
|
| 546 |
|
|
|
|
| 549 |
self.show_research_starting(function_name, query_param)
|
| 550 |
|
| 551 |
try:
|
| 552 |
+
# Map function names to research agents
|
| 553 |
+
function_to_agent = {
|
| 554 |
+
"search_web": "web_search",
|
| 555 |
+
"search_wikipedia": "wikipedia",
|
| 556 |
+
"search_academic": "arxiv",
|
| 557 |
+
"search_technology_trends": "github",
|
| 558 |
+
"search_financial_data": "sec_edgar"
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
result = ""
|
| 562 |
|
| 563 |
+
if function_name in function_to_agent:
|
| 564 |
+
agent_name = function_to_agent[function_name]
|
| 565 |
+
research_agent = self.research_agents[agent_name]
|
| 566 |
|
| 567 |
+
self.update_research_progress(f"Sending OpenFloor research request...")
|
|
|
|
|
|
|
|
|
|
| 568 |
|
| 569 |
+
# Create OpenFloor conversation and envelope
|
| 570 |
+
conversation = Conversation()
|
| 571 |
|
| 572 |
+
# Create research request utterance
|
| 573 |
+
request_dialog = DialogEvent(
|
| 574 |
+
speakerUri=f"tag:consilium.ai,2025:{requesting_model_name or 'expert'}",
|
| 575 |
+
features={
|
| 576 |
+
"text": TextFeature(values=[query_param])
|
| 577 |
+
}
|
| 578 |
+
)
|
| 579 |
|
| 580 |
+
# Create request envelope using OpenFloor protocol
|
| 581 |
+
request_envelope = Envelope(
|
| 582 |
+
conversation=conversation,
|
| 583 |
+
sender=Sender(speakerUri=f"tag:consilium.ai,2025:{requesting_model_name or 'expert'}"),
|
| 584 |
+
events=[
|
| 585 |
+
UtteranceEvent(
|
| 586 |
+
dialogEvent=request_dialog,
|
| 587 |
+
to=To(speakerUri=research_agent.manifest.identification.speakerUri)
|
| 588 |
+
)
|
| 589 |
+
]
|
| 590 |
+
)
|
| 591 |
|
| 592 |
+
self.update_research_progress(f"Processing research via OpenFloor protocol...")
|
| 593 |
+
|
| 594 |
+
# Send request to research agent using OpenFloor protocol
|
| 595 |
+
response_envelope = research_agent.handle_utterance_event(request_envelope)
|
| 596 |
+
|
| 597 |
+
# Extract result from OpenFloor response
|
| 598 |
+
result = self._extract_research_result_from_envelope(response_envelope)
|
| 599 |
+
|
| 600 |
+
self.update_research_progress(f"OpenFloor research complete - found {len(result)} characters")
|
| 601 |
|
| 602 |
elif function_name == "multi_source_research":
|
| 603 |
+
self.update_research_progress("Initializing multi-source OpenFloor research...")
|
|
|
|
| 604 |
|
| 605 |
+
# Use multiple research agents via OpenFloor protocol
|
| 606 |
+
results = []
|
| 607 |
+
agents_to_use = ["web_search", "wikipedia", "arxiv"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 608 |
|
| 609 |
+
for i, agent_name in enumerate(agents_to_use, 1):
|
| 610 |
+
self.update_research_progress(f"Phase {i}: OpenFloor request to {agent_name.replace('_', ' ').title()}...")
|
| 611 |
+
try:
|
| 612 |
+
research_agent = self.research_agents[agent_name]
|
| 613 |
+
|
| 614 |
+
# Create OpenFloor request for this agent
|
| 615 |
+
conversation = Conversation()
|
| 616 |
+
request_dialog = DialogEvent(
|
| 617 |
+
speakerUri=f"tag:consilium.ai,2025:{requesting_model_name or 'expert'}",
|
| 618 |
+
features={"text": TextFeature(values=[query_param])}
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
request_envelope = Envelope(
|
| 622 |
+
conversation=conversation,
|
| 623 |
+
sender=Sender(speakerUri=f"tag:consilium.ai,2025:{requesting_model_name or 'expert'}"),
|
| 624 |
+
events=[UtteranceEvent(
|
| 625 |
+
dialogEvent=request_dialog,
|
| 626 |
+
to=To(speakerUri=research_agent.manifest.identification.speakerUri)
|
| 627 |
+
)]
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
# Get OpenFloor response
|
| 631 |
+
response_envelope = research_agent.handle_utterance_event(request_envelope)
|
| 632 |
+
agent_result = self._extract_research_result_from_envelope(response_envelope)
|
| 633 |
+
|
| 634 |
+
if agent_result and len(agent_result) > 100:
|
| 635 |
+
results.append(f"**{agent_name.replace('_', ' ').title()} Results:**\n{agent_result}")
|
| 636 |
+
|
| 637 |
+
except Exception as e:
|
| 638 |
+
self.update_research_progress(f"Phase {i}: {agent_name} OpenFloor error - {str(e)[:50]}...")
|
| 639 |
+
continue
|
| 640 |
+
|
| 641 |
+
# Combine results
|
| 642 |
+
if results:
|
| 643 |
+
result = f"**Multi-Source OpenFloor Research for: {query_param}**\n\n" + "\n\n---\n\n".join(results)
|
| 644 |
+
self.update_research_progress(f"Multi-source OpenFloor research complete - synthesized {len(result)} characters")
|
| 645 |
+
else:
|
| 646 |
+
result = f"Multi-source OpenFloor research for '{query_param}' encountered errors across all agents."
|
| 647 |
+
|
| 648 |
else:
|
|
|
|
| 649 |
result = f"Unknown research function: {function_name}"
|
| 650 |
|
| 651 |
+
# Phase 3: Show research complete
|
| 652 |
if query_param:
|
| 653 |
self.show_research_complete(function_name, query_param, len(result), requesting_model_name)
|
| 654 |
|
|
|
|
| 658 |
error_msg = str(e)
|
| 659 |
if query_param:
|
| 660 |
self.show_research_error(function_name, query_param, error_msg, requesting_model_name)
|
| 661 |
+
return f"OpenFloor research error: {error_msg}"
|
| 662 |
+
|
| 663 |
+
def _extract_research_result_from_envelope(self, envelope: Envelope) -> str:
|
| 664 |
+
"""Extract research result from OpenFloor response envelope"""
|
| 665 |
+
try:
|
| 666 |
+
for event in envelope.events:
|
| 667 |
+
if hasattr(event, 'eventType') and event.eventType == 'utterance':
|
| 668 |
+
dialog_event = event.parameters.get('dialogEvent')
|
| 669 |
+
if dialog_event and hasattr(dialog_event, 'features'):
|
| 670 |
+
text_feature = dialog_event.features.get('text')
|
| 671 |
+
if text_feature and hasattr(text_feature, 'tokens'):
|
| 672 |
+
return ' '.join([token.get('value', '') for token in text_feature.tokens])
|
| 673 |
+
|
| 674 |
+
return "No research result found in OpenFloor response"
|
| 675 |
+
|
| 676 |
+
except Exception as e:
|
| 677 |
+
return f"Error extracting OpenFloor research result: {str(e)}"
|
| 678 |
|
| 679 |
def show_research_starting(self, function: str, query: str):
|
| 680 |
"""Show research request initiation with enhanced messaging"""
|