feat: Add comprehensive multi-ticker testing with batch processing and advanced filtering

This commit is contained in:
Bobby (aider) 2025-02-14 00:12:07 -08:00
parent 1204b18a76
commit a4e828da8c

View File

@ -2,6 +2,7 @@ import streamlit as st
import pandas_ta as ta
import pandas as pd
import numpy as np
from utils.common_utils import get_qualified_stocks
from backtesting import Backtest, Strategy
from typing import Dict, List, Union
import itertools
@ -232,26 +233,58 @@ def backtesting_page():
with left_col:
st.subheader("Backtest Settings")
# Add radio button for single/multiple ticker mode
test_mode = st.radio("Testing Mode", ["Single Ticker", "Multiple Tickers"])
# Add radio button for test mode
test_mode = st.radio("Testing Mode", ["Single Ticker", "Multiple Tickers", "All Available Tickers"])
if test_mode == "Single Ticker":
# Single ticker input
ticker = st.text_input("Enter Ticker Symbol", value="AAPL").upper()
tickers = [ticker]
else:
elif test_mode == "Multiple Tickers":
# Multiple ticker input
ticker_input = st.text_area(
"Enter Ticker Symbols (one per line)",
value="AAPL\nMSFT\nGOOG"
)
tickers = [t.strip().upper() for t in ticker_input.split('\n') if t.strip()]
else: # All Available Tickers
st.subheader("Filter Settings")
min_price = st.number_input("Minimum Price", value=5.0)
max_price = st.number_input("Maximum Price", value=1000.0)
min_volume = st.number_input("Minimum Volume", value=100000)
# Add minimum performance filters
# Get all qualified stocks based on filters
try:
qualified_stocks = get_qualified_stocks(
start_date=start_datetime,
end_date=end_datetime,
min_price=min_price,
max_price=max_price,
min_volume=min_volume
)
st.info(f"Found {len(qualified_stocks)} qualified stocks for testing")
tickers = qualified_stocks
except Exception as e:
st.error(f"Error getting qualified stocks: {str(e)}")
tickers = []
# Add performance filters for multiple and all tickers modes
if test_mode in ["Multiple Tickers", "All Available Tickers"]:
st.subheader("Performance Filters")
min_return = st.number_input("Minimum Return (%)", value=10.0)
min_sharpe = st.number_input("Minimum Sharpe Ratio", value=1.0)
max_drawdown = st.number_input("Maximum Drawdown (%)", value=-20.0)
col1, col2, col3 = st.columns(3)
with col1:
min_return = st.number_input("Minimum Return (%)", value=10.0)
with col2:
min_sharpe = st.number_input("Minimum Sharpe Ratio", value=1.0)
with col3:
max_drawdown = st.number_input("Maximum Drawdown (%)", value=-20.0)
# Add batch size control for processing
batch_size = st.number_input("Batch Size (tickers per batch)", value=50, min_value=1)
# Add progress tracking
progress_bar = st.progress(0)
status_text = st.empty()
# Date range selection
col1, col2 = st.columns(2)
@ -329,55 +362,87 @@ def backtesting_page():
st.error(f"Error during backtest: {str(e)}")
else:
# Multiple ticker logic
# Multiple ticker or All Available Tickers logic
try:
results_df = run_multi_ticker_backtest(
tickers, start_datetime, end_datetime, indicator_settings
)
total_tickers = len(tickers)
processed_tickers = []
all_results = []
# Apply performance filters
filtered_df = results_df[
(results_df['Return [%]'] >= min_return) &
(results_df['Sharpe Ratio'] >= min_sharpe) &
(results_df['Max Drawdown [%]'] >= max_drawdown)
]
for i in range(0, total_tickers, batch_size):
batch = tickers[i:i+batch_size]
status_text.text(f"Processing batch {i//batch_size + 1} of {(total_tickers + batch_size - 1)//batch_size}")
with right_col:
st.subheader("Multi-Ticker Results")
# Display summary statistics
st.write("### Summary Statistics")
summary = pd.DataFrame({
'Metric': ['Average Return', 'Average Sharpe', 'Average Drawdown', 'Success Rate'],
'Value': [
f"{results_df['Return [%]'].mean():.2f}%",
f"{results_df['Sharpe Ratio'].mean():.2f}",
f"{results_df['Max Drawdown [%]'].mean():.2f}%",
f"{(len(filtered_df) / len(results_df) * 100):.1f}%"
]
})
st.table(summary)
# Display full results
st.write("### All Results")
st.dataframe(results_df.sort_values('Return [%]', ascending=False))
# Display filtered results
st.write("### Filtered Results (Meeting Criteria)")
st.dataframe(filtered_df.sort_values('Return [%]', ascending=False))
# Create a downloadable CSV
csv = results_df.to_csv(index=False)
st.download_button(
"Download Results CSV",
csv,
"backtest_results.csv",
"text/csv",
key='download-csv'
results_df = run_multi_ticker_backtest(
batch, start_datetime, end_datetime, indicator_settings
)
if not results_df.empty:
all_results.append(results_df)
processed_tickers.extend(batch)
# Update progress
progress = min((i + batch_size) / total_tickers, 1.0)
progress_bar.progress(progress)
if all_results:
# Combine all results
results_df = pd.concat(all_results, ignore_index=True)
# Apply performance filters
filtered_df = results_df[
(results_df['Return [%]'] >= min_return) &
(results_df['Sharpe Ratio'] >= min_sharpe) &
(results_df['Max Drawdown [%]'] >= max_drawdown)
]
with right_col:
st.subheader("Multi-Ticker Results")
# Display summary statistics
st.write("### Summary Statistics")
summary = pd.DataFrame({
'Metric': [
'Total Tickers Tested',
'Successful Tests',
'Average Return',
'Average Sharpe',
'Average Drawdown',
'Success Rate'
],
'Value': [
f"{len(processed_tickers)}",
f"{len(results_df)}",
f"{results_df['Return [%]'].mean():.2f}%",
f"{results_df['Sharpe Ratio'].mean():.2f}",
f"{results_df['Max Drawdown [%]'].mean():.2f}%",
f"{(len(filtered_df) / len(results_df) * 100):.1f}%"
]
})
st.table(summary)
# Display full results
st.write("### All Results")
st.dataframe(results_df.sort_values('Return [%]', ascending=False))
# Display filtered results
st.write("### Filtered Results (Meeting Criteria)")
st.dataframe(filtered_df.sort_values('Return [%]', ascending=False))
# Create a downloadable CSV
csv = results_df.to_csv(index=False)
st.download_button(
"Download Results CSV",
csv,
"backtest_results.csv",
"text/csv",
key='download-csv'
)
else:
st.error("No valid results were generated from any ticker")
except Exception as e:
st.error(f"Error during multi-ticker backtest: {str(e)}")
st.error("Full error details:", exc_info=True)
def run_optimization(df: pd.DataFrame, indicator_settings: Dict) -> List:
"""Run optimization with different parameter combinations"""