Date: Friday, June 28th, 2024 at 09:30 AM US/Eastern
Topics: streamlit, dashboard, dashboarding
Need to share data insights with others? Then try Streamlit!
The Python space is filled with dashboarding tools to incorporate interactive widgets, charts, and displays to bring your data to life. Today we’re going to take a deep dive into the popular end-to-end framework “Streamlit”.
In this seminar, we’ll discuss the core motivations behind Streamlit’s user-friendly API, which allows Python users to create web apps without needing extensive web development knowledge. We’ll focus on practical examples and discuss how Streamlit can optimize performance, including tips for improving responsiveness and scalability. Whether you’re a developer looking to streamline your workflow or a data scientist aiming to share insights through visualizations, this seminar will provide insights into leveraging Streamlit effectively for your projects.
pip install numpy pandas scipy matplotlib streamlit panel watchfiles bokeh plotly
print("Let's take a look!")
We will start with some basic simulated pricing data.
We will create twenty years of buy/sell prices for 200 randomly generated names.
(This data will be generated as a random walk, so we should not mistakenly read into any patterns we discover.)
from pandas import CategoricalIndex, period_range, MultiIndex, Series
from numpy import unique, convolve, ones, apply_along_axis
from scipy.signal.windows import triang
from numpy.random import default_rng
from string import ascii_lowercase
from pathlib import Path
rng = default_rng(0)
assets = CategoricalIndex([
*(currencies := 'USD JPY'.split()),
*unique(rng.choice([*ascii_lowercase], size=(200, 4)).view('<U4').ravel()),
], name='asset')
currencies = assets[:len(currencies)]
tradeable_assets = assets[len(currencies):]
dates = period_range('2000-01-01', '2020-12-31', freq='d')
prices = (
Series(
index=(idx := MultiIndex.from_product([
dates,
tradeable_assets,
], names='date asset'.split())),
name='buy',
data=(
rng.normal(loc=100, scale=20, size=len(tradeable_assets)).clip(0, 200)
* rng.normal(loc=1, scale=.01, size=(len(dates), len(tradeable_assets))).clip(.9, 1.1).cumprod(axis=0)
# * apply_along_axis(
# lambda x: convolve(x, (win := triang(7)), mode='same') / sum(win),
# axis=0,
# arr=
# )
).ravel(),
)
.to_frame()
.assign(
sell=lambda df:
df['buy'] * (1 - convolve(abs(rng.normal(loc=0, scale=0.02, size=len(df))), ones(7) / 7, 'same'))
)
.round(4)
.sort_index()
)
industries = Series(
index=tradeable_assets,
data=rng.choice('''
energy materials industrials discretionary staples healthcare
financials technology communication utilities realestate
'''.strip().split(), size=len(tradeable_assets)),
name='industry',
).astype('category')
print(
prices,
industries,
sep='\n'
)
data_dir = Path('data')
prices.to_pickle(filename := (data_dir / 'prices.pkl')); print(f'Wrote {filename = }')
industries.to_pickle(filename := (data_dir / 'industries.pkl')); print(f'Wrote {filename = }')
We will also add some trading data, based on some very simple strategies.
The implementation of the strategies (below) is extremely simplistic (and may even contain some small bugs!) but since the underlying pricing data is randomly generated, the exact behaviour of the strategies won’t matter.
Instead, we should accept that some strategies exist that generated some trades data (which includes both the trades and corresponding cash proceeds as well as a final liqudation event) that we may want to create a dashboard to analyse.
Note that the construction of this trading data makes it that
groupby(['asset']).sum() will result in only currency assets remaining (whose
value will correspond to our life-to-date P&L.)
from pathlib import Path
from collections import namedtuple
from dataclasses import dataclass
from numpy import floor, sign, where
from pandas import read_pickle, Series, concat, IndexSlice, MultiIndex, Index, period_range
from pandas.api.extensions import register_index_accessor
from pickle import dump
@register_index_accessor('_ext')
@dataclass
class _ext:
obj : Index
def addlevel(self, **levels):
new_obj = self.obj.copy(deep=False)
if not isinstance(new_obj, MultiIndex):
new_obj = MultiIndex.from_arrays([
new_obj
])
names = new_obj.names
new_obj.names = [None] * len(names)
return MultiIndex.from_arrays([
*(
new_obj.get_level_values(idx)
for idx in range(len(names))
),
*levels.values(),
], names=[*names, *levels.keys()])
def updatelevel(self, **levels):
new_obj = self.obj.copy(deep=False)
if not isinstance(new_obj, MultiIndex):
new_obj = MultiIndex.from_arrays([
new_obj
])
names = new_obj.names
new_obj.names = [None] * len(names)
return MultiIndex.from_arrays([
levels[n]
if n in levels else
new_obj.get_level_values(idx)
for idx, n in enumerate(names)
], names=names)
data_dir = Path('data')
prices = read_pickle(data_dir / 'prices.pkl')
industries = read_pickle(data_dir / 'industries.pkl')
ALL_STRATEGIES = {}
def strategy(name):
def dec(f):
first_date = prices.index.get_level_values('date').min()
last_date = prices.index.get_level_values('date').max()
assets = prices.loc[first_date].index.get_level_values('asset').unique()
initial_cash = Series(
index=MultiIndex.from_tuples([(first_date, 'USD')], names='date asset'.split()),
data=100_000,
)
final_cash = (-initial_cash).pipe(lambda s: s
.set_axis(s.index._ext.updatelevel(date=[last_date] * len(s)))
)
ALL_STRATEGIES[name] = f(assets, first_date, last_date, initial_cash, final_cash)
return f
return dec
def execute(volumes):
return (
-volumes
.pipe(lambda s: s
* prices.loc[
MultiIndex.from_arrays([
volumes.index.get_level_values('date'),
volumes.index.get_level_values('asset'),
])
].pipe(lambda df: where(sign(volumes), df['buy'], df['sell']))
)
.pipe(lambda s: s
.set_axis(s.index._ext.updatelevel(asset=['USD'] * len(s)))
)
)
def liquidate(volumes, date):
volumes = (
(-volumes)
.pipe(lambda s: s
.set_axis(s.index._ext.updatelevel(date=[date] * len(s)))
)
)
return volumes, execute(volumes)
@strategy(name='baseline, even')
def _(assets, first_date, last_date, initial_cash, final_cash):
trades = Series(
index=prices.loc[IndexSlice[first_date, assets, :]].index,
data=floor(initial_cash.loc[IndexSlice[:, 'USD', :]].sum() / prices.loc[first_date]['buy'].sum()),
name='volume',
)
cash = execute(trades)
return concat([
initial_cash,
trades,
cash,
*liquidate(trades, date=last_date),
final_cash,
])
@strategy(name='baseline, proportional')
def _(assets, first_date, last_date, initial_cash, final_cash):
trades = Series(
index=prices.loc[IndexSlice[first_date, assets, :]].index,
data=floor(
initial_cash.loc[IndexSlice[:, 'USD', :]].sum()
/ len(assets)
/ prices.loc[first_date]['buy']
).values,
name='volume',
)
cash = execute(trades)
return concat([
trades,
cash,
*liquidate(trades, date=last_date)
])
def rebalance(freq, num_assets):
def strat(assets, first_date, last_date, initial_cash, final_cash):
all_trades, open_trades, all_cash = None, None, initial_cash
for x in period_range(first_date, last_date, freq=freq):
if x.end_time > last_date.end_time: continue
winners = (
prices.loc[x.start_time:x.end_time]['buy']
.pipe(lambda s:
s.loc[s.index.get_level_values('date').max()]
/
s.loc[s.index.get_level_values('date').min()]
- 1
)
.loc[lambda s: s > 0]
.nlargest(num_assets)
)
if open_trades is not None:
liq_trades, liq_cash = liquidate(open_trades, date=x.end_time.to_period('d'))
all_trades = concat([all_trades, open_trades, liq_trades])
all_cash = concat([all_cash, liq_cash])
open_trades = Series(
index=MultiIndex.from_product([
[x.end_time.to_period('d')],
winners.index.get_level_values('asset')
], names='date asset'.split()),
data=floor(
all_cash.loc[IndexSlice[:, 'USD', :]].sum()
/ len(winners)
/ prices.loc[IndexSlice[x.end_time, winners.index.get_level_values('asset'), :]]['buy']
).values,
name='volume',
)
all_cash = concat([all_cash, execute(open_trades)])
return concat([
all_trades,
all_cash,
open_trades,
*liquidate(open_trades, date=last_date),
]).sort_index()
return strat
strategy(name='top10, rebalance, monthly' )(rebalance(freq='30d', num_assets=10))
strategy(name='top10, rebalance, quarterly')(rebalance(freq='90d', num_assets=10))
strategy(name='top20, rebalance, monthly' )(rebalance(freq='30d', num_assets=20))
strategy(name='top20, rebalance, quarterly')(rebalance(freq='90d', num_assets=20))
strategy(name='top5, rebalance, yearly' )(rebalance(freq='360d', num_assets=5))
def pnl(trades):
return (
trades
.groupby('asset').sum()
.loc[lambda s: abs(s) > 0]
.round(2)
)
for strat, trades in ALL_STRATEGIES.items():
print(f'{strat:<40}', pnl(trades), sep='\n')
with open(filename := (data_dir / 'strategies.pkl'), mode='wb') as f:
dump(ALL_STRATEGIES, file=f)
print(f'Wrote {filename = }')
“Retained Mode”: some (complex) event-loop mechanisms retains primary control over code and data flow within the UI.
This is the general conceptual approach of tools like Panel.
from panel import Tabs, Column, serve, Row, bind
from panel.widgets import Select, Button
from panel.pane import Matplotlib
from pandas import read_pickle, IndexSlice
from matplotlib.figure import Figure
from pathlib import Path
data_dir = Path('data')
prices = Row(
'# Prices',
Column(
select := Select(
name='Asset',
options=['arnq', 'bcjv', 'zyrz',],
),
button := Button(
name='View',
),
plot := Matplotlib(
fig := Figure(figsize=(4, 4)),
)
),
)
ax = fig.subplots()
@lambda f: bind(f, button, watch=True)
def _(event):
prices = read_pickle(data_dir / 'prices.pkl')
ax.clear()
prices.loc[IndexSlice[:, select.value, :]].plot(ax=ax)
plot.param.trigger('object')
trades = Row(
'# Trades',
)
tabs = Tabs(*{
'prices': prices,
'trades': trades,
}.items())
serve(
tabs,
port=8080,
show=False,
)
“Immediate mode”: some direct (‘immediate’) access to managing control and data-flow.
This is the general conceptual approach of tools like Streamlit.
# streamlit run --server.headless true --server.port 8080
from streamlit import tabs, header, selectbox, button, pyplot
from streamlit import cache_data
from pandas import read_pickle, IndexSlice
from matplotlib.pyplot import subplots
from pathlib import Path
data_dir = Path('data')
TABS = dict(zip(tab_names := 'prices trades'.split(), tabs(tab_names)))
@cache_data
def load_prices():
return read_pickle(data_dir / 'prices.pkl')
with TABS['prices']:
header('Prices')
asset = selectbox('Asset', options=['arnq', 'bcjv', 'zyrz',])
if button('View'):
prices = load_prices()
fig, ax = subplots()
prices.loc[IndexSlice[:, asset, :]].plot(ax=ax)
pyplot(fig)
Be careful not to make trivial decisions based on assumptions of performance.
For example, we would expect raw HTML with basic Javascript to perform very well—after all, it is a direct recipient of billions of dollars of optimisation effort.
from http.server import HTTPServer, BaseHTTPRequestHandler
from textwrap import dedent
from random import Random
from string import ascii_lowercase
rnd = Random(0)
entries = sorted(
''.join(rnd.choices(ascii_lowercase, k=8)) for _ in range(10_000)
)
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
options = [f'<option value={x}>{x}</option>' for x in entries]
select = f'<select id="select">{"\n".join(options)}</select>'
self.wfile.write(dedent(f'''
<html>
<body>
<h1>App</h1>
{ select }
<p><strong>Selected:</strong><span id="text"></span></p>
<script>
document.getElementById('select').addEventListener('change', event =>
document.getElementById('text').textContent = event.target.value
)
</script>
</body>
</html>
'''.strip()).encode('utf-8'))
server = HTTPServer(('localhost', 8080), Handler)
server.serve_forever()
And, indeed, we may discover better performance than similar approaches in tools like Panel:
from panel import serve, bind, Column
from panel.widgets import Select, StaticText
from random import Random
from string import ascii_lowercase
rnd = Random(0)
entries = sorted(
''.join(rnd.choices(ascii_lowercase, k=8)) for _ in range(10_000)
)
select = Select(options=entries)
@lambda f: bind(f, select, watch=True)
def _(event):
text.value = event
text = StaticText(name='Selected')
serve(
Column('# App', select, text),
port=8080,
show=False,
)
However, when we contrast against tools like Streamlit, we may be shocked to discover that Streamlit, by virtue of being able to readily employ techniques such as virtualisation, can actually provide better performance than the simpler approaches!
# streamlit run --server.headless true --server.port 8080
from streamlit import header, selectbox, write, cache_data
from random import Random
from string import ascii_lowercase
rnd = Random(0)
@cache_data
def get_entries():
return sorted(
''.join(rnd.choices(ascii_lowercase, k=8)) for _ in range(50_000)
)
entries = get_entries()
header('App')
asset = selectbox('Asset', options=entries)
write(f'Selected: {asset}')
This counterintuitive result should not lead us to say that performance with tools like Panel or Streamlit will be superior to performance with raw HTML + Javascript. It should also not lead us to make the opposite argument.
Instead, we must be very deliberate in comparison these approaches and understand that, indeed, Panel and Streamlit will likely, in the aggregate, invoke some performance cost as a result of the additional levels of indirection they introduce and as a result of many operations requiring roundtrip messaging with the Python backend.
However, we should be careful to measure the performance lost & determine whether the features gained are worth this penalty.
But, most importantly, we should consider not the direct loss introduced by these tools in determine whether we want to use them. We are more likely to be affected by the variance of outcomes.
In fact, when using tools like Streamlit, we will discover that it works quite well, as long as we keep to a fairly narrow path; one which we will see in our example. If we venture from that path, we may discover that there are things we simply cannot do (or things we cannot do without enormous contortions.) It is this risk that is much more present in dashboarding tools than the risk of the lost of incremental performance for basic actions.
print("Let's take a look!")
Let’s start with a very simple “hello world.”
Streamlit is an immediate mode UI tool, so all of the widgets or other mechanisms will appear to use a simple functions. In the case of an interactive widget, they will be functions that return the state of that control—e.g., the value of an input box.
# streamlit run --server.headless true --server.port 8080
from streamlit import title
title('Strategy Analysis')
Streamlit includes some fairly complex layout mechanisms: columns, rows, tabs.
Just as we may decide to use raw matplotlib instead of the
pandas.DataFrame.plot interface the moment we introduce complex layout into
our plots, we could argue that the moment we need complex layout, we might
prefer to use React.
Indeed, the Streamlit layout is fairly flexible, but isn’t component-based, so composition of layout will be difficult (and there will be layouts that are simply impossible.) However, the output will look better than what we can do in the same amount of time with raw HTML and CSS and the use of CSS frameworks or React component frameworks may introduce some of the same layout limitations that using a dashboarding framework like Streamlit incurs.
# streamlit run --server.headless true --server.port 8080
from streamlit import title, tabs
from collections import namedtuple
title('Strategy Analysis')
TABS = namedtuple('Tabs', tab_names := 'prices industries strategies'.split())(*tabs(tab_names))
Let’s extend our example to implement some actual analysis. We will implement a simple plot for our pricing data so that we can select a single asset and view its buy/sell prices over time.
We will use Plotly so that we get some interactive plotting widgets (e.g., zoom, scroll.) We could also use Maptlotlib. We would probably want to avoid using Bokeh, because as of this presentation Bokeh support in Streamlit is stuck on an earlier version (which will transitively require we use an earlier version of NumPy.)
# streamlit run --server.headless true --server.port 8080
from streamlit import title, tabs, plotly_chart, selectbox, cache_data
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line
from pathlib import Path
from pandas import read_pickle, IndexSlice
@dataclass
class DataSource:
directory : Path = Path('data')
@cache_data
def load_prices(self):
return read_pickle(self.directory / 'prices.pkl')
if __name__ == '__main__':
ds = DataSource()
TABS = namedtuple('Tabs', tab_names := 'prices industries strategies'.split())(*tabs(tab_names))
title('Strategy Analysis')
with TABS.prices:
prices = ds.load_prices()
asset = selectbox('Asset', prices.index.get_level_values('asset'))
pxs = prices.loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
In the above, we need to use @cache_data to ensure that we don’t repeatedly
load the data every time the code is reëvaluated. In most immediate-mode UI
frameworks, the code is reëvaluated upon ever rerender—over and over again.
Expensive operations like data loading will, thus, significantly slow down our code, and we will want to use some caching to prevent this.
One thing we want to explore when evaluating a tool like Streamlit is how well
it composes with standard (or “naïve”) pure Python mechanisms. If, for example,
we want to group our data-loading in some fashion (rather than have just a
bunch of functions floating around in our module scope,) we might make use of
a dataclasses.dataclass. A good design for a tool like Streamlit will allow
us to use these basic (naïve) approaches to introduce additional structure
into our code without requiring that we go to the documentation to discover
some contorted or non-obvious approved approach.
# streamlit run --server.headless true --server.port 8080
from streamlit import (
title, tabs, plotly_chart, selectbox, cache_data, sidebar, header,
)
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line
from pathlib import Path
from pandas import read_pickle, IndexSlice
from functools import cached_property
@dataclass
class DataSource:
path : Path = Path('data')
@cache_data
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
@cached_property
def prices(self):
return read_pickle(self.path / 'prices.pkl')
@cached_property
def assets(self):
return self.prices.index.get_level_values('asset').unique()
ALL_PAGES = {}
def page(*, name=None):
def dec(f):
ALL_PAGES[f.__name__ if name is None else name] = f
return f
return dec
@page(name='prices')
def prices(ds):
header('Price')
prices, assets = ds.prices, ds.assets
asset = selectbox('Asset', assets)
pxs = prices.loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
@page(name='industries')
def industries(ds):
header('Industries')
@page(name='strategies')
def strategies(ds):
header('Strategies')
if __name__ == '__main__':
ds = DataSource()
title('Strategy Analysis')
with sidebar:
page = selectbox('Page', ALL_PAGES)
ALL_PAGES[page](ds)
Unfortunately, the tabbed layout in Streamlit requires that all of the code under each of the tab be reëvaluated on every render pass. This means that if the tabs each contain some complex visualisations, we will be generating these visualisations even if they are no visible to the user, which would be wasteful.
Instead, let’s try another way to decompose this application into its three components (prices, industries, and strategies.) Since Streamlit is organised around an immediate-mode UI, we can decompose the “parts” of our dashboard into simple functions that we can call dynamically based on a selection made in the sidebar.
We can event write a simple decorator to set up the “routing” for this (almost
as though this were a web framework like fastapi.)
# streamlit run --server.headless true --server.port 8080
from streamlit import (
title, tabs, plotly_chart, selectbox, cache_data, sidebar, header,
empty, set_page_config, date_input
)
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line
from pathlib import Path
from pandas import read_pickle, IndexSlice, Timestamp
from functools import cached_property
@dataclass
class DataSource:
path : Path = Path('data')
@cache_data
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
@cached_property
def prices(self):
return read_pickle(self.path / 'prices.pkl')
@cached_property
def assets(self):
return self.prices.index.get_level_values('asset').unique()
@cached_property
def dates(self):
return self.prices.index.get_level_values('date').unique().to_timestamp()
ALL_PAGES = {}
def page(*, name=None):
def dec(f):
ALL_PAGES[f.__name__ if name is None else name] = f
return f
return dec
@page(name='prices')
def prices(ds):
slot = empty()
with sidebar:
asset = selectbox('Asset', ds.assets)
from_date = date_input('From', ds.dates.min())
to_date = date_input('To', ds.dates.max())
slot.header(f'Price for {asset} ({from_date} → {to_date})')
pxs = ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
@page(name='industries')
def industries(ds):
header('Industries')
@page(name='Strategies')
def strategies(ds):
header('Strategies')
if __name__ == '__main__':
ds = DataSource()
set_page_config(layout='wide')
title('Strategy Analysis')
with sidebar:
page = selectbox('Page', ALL_PAGES)
ALL_PAGES[page](ds)
Let’s implement our industries viewer, which contains some other visualisations and some other controls.
Previously, we added controls to allow us to change the date range for the visualisations, which we will also do here.
Our industries visualisation will allow us to assess how an industries N-day
returns look given different constructions of a portfolio. (The exact details
of how we do this is largely irrelevant—it’s just a representative example.
Instead, we should focus on the amount of pandas analytical code we can
safely put inside the “view” itself without slowing down the responsiveness of
the system and needing some caching mechanisms.)
# streamlit run --server.headless true --server.port 8080
from streamlit import (
title, tabs, plotly_chart, selectbox, cache_data, sidebar, header,
empty, set_page_config, date_input, multiselect, select_slider, radio,
)
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line
from pathlib import Path
from pandas import read_pickle, IndexSlice, Timestamp, Series
from functools import cached_property
@dataclass
class DataSource:
path : Path = Path('data')
@cache_data
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
@cached_property
def prices(self):
return read_pickle(self.path / 'prices.pkl')
@cached_property
def industries(self):
return read_pickle(self.path / 'industries.pkl')
@cached_property
def industry_names(self):
return self.industries.index.get_level_values('industry').unique()
@cached_property
def assets(self):
return self.prices.index.get_level_values('asset').unique()
@cached_property
def dates(self):
return self.prices.index.get_level_values('date').unique().to_timestamp()
ALL_PAGES_DEFAULT_INDEX, ALL_PAGES = 0, {}
def page(*, name=None, default=False):
def dec(f):
if default:
global ALL_PAGES_DEFAULT_INDEX
ALL_PAGES_DEFAULT_INDEX = len(ALL_PAGES)
ALL_PAGES[f.__name__ if name is None else name] = f
return f
return dec
@page(name='prices')
def prices(ds):
slot = empty()
with sidebar:
asset = selectbox('Asset', ds.assets)
from_date = date_input('From', ds.dates.min())
to_date = date_input('To', ds.dates.max())
slot.header(f'Price for {asset} ({from_date} → {to_date})')
pxs = ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
@page(name='industries', default=True)
def industries(ds):
slot = empty()
with sidebar:
industries = multiselect('Industries', ds.industries)
from_date = date_input('From', ds.dates.min())
to_date = date_input('To', ds.dates.max())
window = select_slider('Window', [30, 90, 180, 360], value=90)
weighting = radio('Weighting', ['even', 'proportional'])
if not industries:
return
slot.header(f'Compare Returns for Industries: {", ".join(industries)}')
assets = ds.industries.isin(industries).loc[lambda s: s].index
pxs = (
ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, assets, :]]
.join(ds.industries)
.set_index('industry', append=True)
.sort_index()
)
match weighting:
case 'even':
volumes = Series(
index=pxs.index,
data=1,
)
case 'proportional':
volumes = (
pxs['buy']
.groupby(['industry', 'date'], observed=True).transform(lambda x: x / sum(x))
)
before = (
(pxs['buy'] * volumes)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
after = (
(pxs['sell'] * volumes)
.shift(window)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
returns = (
(after - before) / before
).iloc[window:]
plotly_chart(
line(returns, x=returns.index.to_timestamp(), y=returns.columns),
use_container_width=True,
)
@page(name='strategies')
def strategies(ds):
header('Strategies')
if __name__ == '__main__':
ds = DataSource()
set_page_config(layout='wide')
title('Strategy Analysis')
with sidebar:
page = selectbox('Page', ALL_PAGES, index=ALL_PAGES_DEFAULT_INDEX)
ALL_PAGES[page](ds)
It would be nice to be able to save and share a particular “view” with other users. If we are using Streamlit to provide some analysis over some data, our dashboard will definitely include interactive widgets to allow us to view the data in different ways. The exact settings of these controls is definitely something we want to be able to capture (since we want to be able to share a particular “view” of the data or analysis to our colleagues, while also allowing them to modify these settings to “look around” and assess the correctness of conclusions we have drawn.)
We’ll implement a share button that gives us a link that includes a stub. The stub will be a unique identifier that maps to certain settings for the visible controls and recreates a given view:
# streamlit run --server.headless true --server.port 8080
from streamlit import (
title, tabs, plotly_chart, selectbox, cache_data, sidebar, header,
empty, set_page_config, date_input, multiselect, select_slider, radio,
link_button, query_params, cache_resource
)
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line
from pathlib import Path
from pandas import read_pickle, IndexSlice, Timestamp, Series
from functools import cached_property
from sqlite3 import connect
from pickle import dumps, loads
from hashlib import sha1
from textwrap import dedent
@dataclass
class SessionState:
path : Path = Path('data')
conn : object = None
def __post_init__(self):
self.conn = connect(self.path / 'state.sqlite')
query = dedent('''
create table if not exists kwargs (
id integer primary key autoincrement
, digest text
, payload text
)
''')
self.conn.execute(query)
@cache_resource
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
def kwargs_to_stub(self, **kwargs):
payload = dumps(sorted(kwargs.items()))
digest = sha1(payload).hexdigest()
query = dedent('''
select count(*) from kwargs where digest = :digest
'''.strip())
count = [*self.conn.execute(query, {'digest': digest})][0]
if not count[0]:
query = dedent('''
insert into kwargs (digest, payload) values (:digest, :payload)
'''.strip())
cur = self.conn.execute(query, {'digest': digest, 'payload': payload})
self.conn.commit()
return digest
def kwargs_from_stub(self, stub):
query = dedent('''
select payload from kwargs where digest = :digest
'''.strip())
result = [*self.conn.execute(query, {'digest': stub})]
if not result:
return
return dict(loads(result[0][0]))
@dataclass
class DataSource:
path : Path = Path('data')
@cache_data
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
@cached_property
def prices(self):
return read_pickle(self.path / 'prices.pkl')
@cached_property
def industries(self):
return read_pickle(self.path / 'industries.pkl')
@cached_property
def industry_names(self):
return self.industries.index.get_level_values('industry').unique()
@cached_property
def assets(self):
return self.prices.index.get_level_values('asset').unique()
@cached_property
def dates(self):
return self.prices.index.get_level_values('date').unique().to_timestamp()
ALL_PAGES_DEFAULT_INDEX, ALL_PAGES = 0, {}
def page(*, name=None, default=False):
def dec(f):
if default:
global ALL_PAGES_DEFAULT_INDEX
ALL_PAGES_DEFAULT_INDEX = len(ALL_PAGES)
ALL_PAGES[f.__name__ if name is None else name] = f
return f
return dec
@page(name='prices')
def prices(ds, st):
slot = empty()
with sidebar:
asset = selectbox('Asset', ds.assets)
from_date = date_input('From', ds.dates.min())
to_date = date_input('To', ds.dates.max())
slot.header(f'Price for {asset} ({from_date} → {to_date})')
pxs = ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
@page(name='industries', default=True)
def industries(ds, st):
stub = query_params.get('stub')
if not stub or not (defaults := st.kwargs_from_stub(stub)):
defaults = {
'industries': [],
'from_date': ds.dates.min(),
'to_date': ds.dates.max(),
'window': 90,
'weighting': 'even',
}
slot = empty()
with sidebar:
industries = multiselect('Industries', ds.industries, default=defaults['industries'])
from_date = date_input('From', defaults['from_date'])
to_date = date_input('To', defaults['to_date'])
window = select_slider('Window', [30, 90, 180, 360], value=defaults['window'])
weighting = radio('Weighting', (options := ['even', 'proportional']), index=options.index(defaults['weighting']))
if not industries:
return
slot.header(f'Compare Returns for Industries: {", ".join(industries)}')
assets = ds.industries.isin(industries).loc[lambda s: s].index
pxs = (
ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, assets, :]]
.join(ds.industries)
.set_index('industry', append=True)
.sort_index()
)
match weighting:
case 'even':
volumes = Series(
index=pxs.index,
data=1,
)
case 'proportional':
volumes = (
pxs['buy']
.groupby(['industry', 'date'], observed=True).transform(lambda x: x / sum(x))
)
before = (
(pxs['buy'] * volumes)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
after = (
(pxs['sell'] * volumes)
.shift(window)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
returns = (
(after - before) / before
).iloc[window:]
plotly_chart(
line(returns, x=returns.index.to_timestamp(), y=returns.columns),
use_container_width=True,
)
stub = st.kwargs_to_stub(
industries=industries,
from_date=from_date,
to_date=to_date,
window=window,
weighting=weighting,
)
link_button('Share', f'?stub={stub}')
@page(name='strategies')
def strategies(ds, st):
header('Strategies')
if __name__ == '__main__':
ds = DataSource()
st = SessionState()
set_page_config(layout='wide')
title('Strategy Analysis')
with sidebar:
page = selectbox('Page', ALL_PAGES, index=ALL_PAGES_DEFAULT_INDEX)
ALL_PAGES[page](ds, st)
Let’s extract the stub into another dataclasses.dataclass that allows us
to centralise code for our state-sharing. We’ll also push the creation
and oepration of the share link into our @page decorator.
Here, we can see an example of building some “structure” into our Streamlit app to support consistent, patterned extensions of our dashboard. It’s always good where this structure can be introduced using simple, naïve, pure Python techniques!
# streamlit run --server.headless true --server.port 8080
from streamlit import (
title, tabs, plotly_chart, selectbox, cache_data, sidebar, header,
empty, set_page_config, date_input, multiselect, select_slider, radio,
link_button, query_params, cache_resource
)
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line
from pathlib import Path
from pandas import read_pickle, IndexSlice, Timestamp, Series
from functools import cached_property
from sqlite3 import connect
from pickle import dumps, loads
from hashlib import sha1
from textwrap import dedent
from functools import wraps
@dataclass
class SessionState:
path : Path = Path('data')
conn : object = None
def __post_init__(self):
self.conn = connect(self.path / 'state.sqlite')
query = dedent('''
create table if not exists kwargs (
id integer primary key autoincrement
, page text
, digest text
, payload text
)
''')
self.conn.execute(query)
@cache_resource
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
def kwargs_to_stub(self, page, **kwargs):
payload = dumps(sorted(kwargs.items()))
digest = sha1(payload).hexdigest()
query = dedent('''
select count(*) from kwargs where page = :page and digest = :digest
'''.strip())
count = [*self.conn.execute(query, {'page': page, 'digest': digest})][0]
if not count[0]:
query = dedent('''
insert into kwargs (page, digest, payload) values (:page, :digest, :payload)
'''.strip())
cur = self.conn.execute(query, {'page': page, 'digest': digest, 'payload': payload})
self.conn.commit()
return digest
def kwargs_from_stub(self, page, stub):
query = dedent('''
select payload from kwargs where page = :page and digest = :digest
'''.strip())
result = [*self.conn.execute(query, {'page': page, 'digest': stub})]
if not result:
return
return dict(loads(result[0][0]))
@dataclass
class DataSource:
path : Path = Path('data')
@cache_data
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
@cached_property
def prices(self):
return read_pickle(self.path / 'prices.pkl')
@cached_property
def industries(self):
return read_pickle(self.path / 'industries.pkl')
@cached_property
def industry_names(self):
return self.industries.index.get_level_values('industry').unique()
@cached_property
def assets(self):
return self.prices.index.get_level_values('asset').unique()
@cached_property
def dates(self):
return self.prices.index.get_level_values('date').unique().to_timestamp()
ALL_PAGES_DEFAULT_INDEX, ALL_PAGES = 0, {}
def page(*, name=None, default=False, default_params={}):
def dec(f):
if default:
global ALL_PAGES_DEFAULT_INDEX
ALL_PAGES_DEFAULT_INDEX = len(ALL_PAGES)
@wraps(f)
def inner(ds, st):
if not (stub := query_params.get('stub')) or not (params := st.kwargs_from_stub(name, stub)):
params = {k: v(ds) for k, v in default_params.items()}
params = f(ds, params)
if params:
stub = st.kwargs_to_stub(name, **params)
with sidebar:
link_button('Share', f'?page={name}&stub={stub}')
ALL_PAGES[f.__name__ if name is None else name] = inner
return inner
return dec
@page(
name='prices',
default_params={
'asset': lambda ds: ds.assets[0],
'from_date': lambda ds: ds.dates.min(),
'to_date': lambda ds: ds.dates.max(),
}
)
def prices(ds, params):
slot = empty()
with sidebar:
asset = selectbox('Asset', ds.assets, index=ds.assets.get_loc(params['asset']))
from_date = date_input('From', params['from_date'])
to_date = date_input('To', params['to_date'])
slot.header(f'Price for {asset} ({from_date} → {to_date})')
pxs = ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
return {
'asset': asset,
'from_date': from_date,
'to_date': to_date,
}
@page(
name='industries',
default=True,
default_params = {
'industries': lambda _: [],
'from_date': lambda ds: ds.dates.min(),
'to_date': lambda ds: ds.dates.max(),
'window': lambda _: 90,
'weighting': lambda _: 'even',
},
)
def industries(ds, params):
slot = empty()
with sidebar:
industries = multiselect('Industries', ds.industries, default=params['industries'])
from_date = date_input('From', params['from_date'])
to_date = date_input('To', params['to_date'])
window = select_slider('Window', [30, 90, 180, 360], value=params['window'])
weighting = radio('Weighting', (options := ['even', 'proportional']), index=options.index(params['weighting']))
if not industries:
return
slot.header(f'Compare Returns for Industries: {", ".join(industries)}')
assets = ds.industries.isin(industries).loc[lambda s: s].index
pxs = (
ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, assets, :]]
.join(ds.industries)
.set_index('industry', append=True)
.sort_index()
)
match weighting:
case 'even':
volumes = Series(
index=pxs.index,
data=1,
)
case 'proportional':
volumes = (
pxs['buy']
.groupby(['industry', 'date'], observed=True).transform(lambda x: x / sum(x))
)
before = (
(pxs['buy'] * volumes)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
after = (
(pxs['sell'] * volumes)
.shift(window)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
returns = (
(after - before) / before
).iloc[window:]
plotly_chart(
line(returns, x=returns.index.to_timestamp(), y=returns.columns),
use_container_width=True,
)
return {
'industries': industries,
'from_date': from_date,
'to_date': to_date,
'window': window,
'weighting': weighting,
}
@page(name='strategies')
def strategies(ds, st):
header('Strategies')
if __name__ == '__main__':
ds = DataSource()
st = SessionState()
set_page_config(layout='wide')
title('Strategy Analysis')
if (page := query_params.get('page')):
default_page_index = [*ALL_PAGES].index(page)
else:
default_page_index = ALL_PAGES_DEFAULT_INDEX
with sidebar:
page = selectbox('Page', ALL_PAGES, index=default_page_index)
ALL_PAGES[page](ds, st)
Let’s wrap up our example by implemeting multiple additional visualisations for our benchmark-vs-strategy comparisons. Since the trading data we are using is randomly generated (and our strategies weren’t particularly sophisicated,) we should ignore what the visualisations show us (any patterns are likely spurious,) but focus on the “pattern” of how this dashboard is evolving.
Here, we can see how Streamlit readily supports the evolution of a dashboard in a very particular direction: providing a convenient view of static (or externally captured) analytical data with some light analytical amendments, some data visualisations, and some faceting/drill-down controls.
This is probably the “sweet spot” for Streamlit vs more manual approaches like using React, and we can clearly create a very useful tool in very little code… without every having to touch Javascript.
Should this tool grow further? Should it grow to control (rather than merely reflect) in the analyses? Streamlit definitely supports building these kinds of tools, but our general advice is stick to the simpler, narrow path. The variance of outcomes once we stray from this path is far too high and there is the risk of hitting a brick wall that requires we need to start over from scratch or significantly rewrite the code.
However, even within these constraints, we can see how useful and effective Streamlit can be… and we can see it is a definite improvement over e-mailing a colleague a Jupyter Notebook with some IPywidgets!
# streamlit run --server.headless true --server.port 8080
from streamlit import (
title, tabs, plotly_chart, selectbox, cache_data, sidebar, header,
empty, set_page_config, date_input, multiselect, select_slider, radio,
link_button, query_params, cache_resource, subheader,
)
from dataclasses import dataclass
from collections import namedtuple
from plotly.express import line, histogram
from pathlib import Path
from pandas import read_pickle, IndexSlice, Timestamp, Series, period_range, MultiIndex, concat
from numpy import where
from functools import cached_property
from sqlite3 import connect
from pickle import dumps, loads, load
from hashlib import sha1
from textwrap import dedent
from functools import wraps
@dataclass
class SessionState:
path : Path = Path('data')
conn : object = None
def __post_init__(self):
self.conn = connect(self.path / 'state.sqlite')
query = dedent('''
create table if not exists kwargs (
id integer primary key autoincrement
, page text
, digest text
, payload text
)
''')
self.conn.execute(query)
@cache_resource
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
def kwargs_to_stub(self, page, **kwargs):
payload = dumps(sorted(kwargs.items()))
digest = sha1(payload).hexdigest()
query = dedent('''
select count(*) from kwargs where page = :page and digest = :digest
'''.strip())
count = [*self.conn.execute(query, {'page': page, 'digest': digest})][0]
if not count[0]:
query = dedent('''
insert into kwargs (page, digest, payload) values (:page, :digest, :payload)
'''.strip())
cur = self.conn.execute(query, {'page': page, 'digest': digest, 'payload': payload})
self.conn.commit()
return digest
def kwargs_from_stub(self, page, stub):
query = dedent('''
select payload from kwargs where page = :page and digest = :digest
'''.strip())
result = [*self.conn.execute(query, {'page': page, 'digest': stub})]
if not result:
return
return dict(loads(result[0][0]))
@dataclass
class DataSource:
path : Path = Path('data')
@cache_data
@classmethod
def from_path(cls, *, path=None):
kwargs = {'path': path} if path is not None else {}
return cls(**kwargs)
@cached_property
def prices(self):
return read_pickle(self.path / 'prices.pkl')
@cached_property
def industries(self):
return read_pickle(self.path / 'industries.pkl')
@cached_property
def strategies(self):
with open(self.path / 'strategies.pkl', mode='rb') as f:
return load(f)
@cached_property
def strategy_names(self):
return sorted(x for x in self.strategies if not x.startswith('baseline'))
@cached_property
def baseline_names(self):
return sorted(x for x in self.strategies if x.startswith('baseline'))
@cached_property
def industry_names(self):
return self.industries.index.get_level_values('industry').unique()
@cached_property
def assets(self):
return self.prices.index.get_level_values('asset').unique()
@cached_property
def dates(self):
return self.prices.index.get_level_values('date').unique().to_timestamp()
ALL_PAGES_DEFAULT_INDEX, ALL_PAGES = 0, {}
def page(*, name=None, default=False, default_params={}):
def dec(f):
if default:
global ALL_PAGES_DEFAULT_INDEX
ALL_PAGES_DEFAULT_INDEX = len(ALL_PAGES)
@wraps(f)
def inner(ds, st):
if not (stub := query_params.get('stub')) or not (params := st.kwargs_from_stub(name, stub)):
params = {k: v(ds) for k, v in default_params.items()}
params = f(ds, params)
if params:
stub = st.kwargs_to_stub(name, **params)
with sidebar:
link_button('Share', f'?page={name}&stub={stub}')
ALL_PAGES[f.__name__ if name is None else name] = inner
return inner
return dec
@page(
name='prices',
default_params={
'asset': lambda ds: ds.assets[0],
'from_date': lambda ds: ds.dates.min(),
'to_date': lambda ds: ds.dates.max(),
}
)
def prices(ds, params):
slot = empty()
with sidebar:
asset = selectbox('Asset', ds.assets, index=ds.assets.get_loc(params['asset']))
from_date = date_input('From', params['from_date'])
to_date = date_input('To', params['to_date'])
slot.header(f'Price for {asset} ({from_date} → {to_date})')
pxs = ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, asset, :]]
plotly_chart(
line(pxs, x=pxs.index.to_timestamp(), y=pxs.columns),
use_container_width=True,
)
return {
'asset': asset,
'from_date': from_date,
'to_date': to_date,
}
@page(
name='industries',
default_params = {
'industries': lambda _: [],
'from_date': lambda ds: ds.dates.min(),
'to_date': lambda ds: ds.dates[-2],
'window': lambda _: 90,
'weighting': lambda _: 'even',
},
)
def industries(ds, params):
slot = empty()
with sidebar:
industries = multiselect('Industries', ds.industries, default=params['industries'])
from_date = date_input('From', params['from_date'])
to_date = date_input('To', params['to_date'])
window = select_slider('Window', [30, 90, 180, 360], value=params['window'])
weighting = radio('Weighting', (options := ['even', 'proportional']), index=options.index(params['weighting']))
if not industries:
return
slot.header(f'Compare Returns for Industries: {", ".join(industries)}')
assets = ds.industries.isin(industries).loc[lambda s: s].index
pxs = (
ds.prices.loc[Timestamp(from_date):Timestamp(to_date)].loc[IndexSlice[:, assets, :]]
.join(ds.industries)
.set_index('industry', append=True)
.sort_index()
)
match weighting:
case 'even':
volumes = Series(
index=pxs.index,
data=1,
)
case 'proportional':
volumes = (
pxs['buy']
.groupby(['industry', 'date'], observed=True).transform(lambda x: x / sum(x))
)
before = (
(pxs['buy'] * volumes)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
after = (
(pxs['sell'] * volumes)
.shift(window)
.groupby(['industry', 'date'], observed=True).sum()
.unstack(['industry'])
)
returns = (
(after - before) / before
).iloc[window:]
plotly_chart(
line(returns, x=returns.index.to_timestamp(), y=returns.columns),
use_container_width=True,
)
return {
'industries': industries,
'from_date': from_date,
'to_date': to_date,
'window': window,
'weighting': weighting,
}
@page(
name='strategies',
default=True,
default_params = {
'strategy': lambda ds: sorted(ds.strategy_names)[0],
'baseline': lambda ds: sorted(ds.baseline_names)[0],
'from_date': lambda ds: ds.dates.min(),
'to_date': lambda ds: ds.dates[-2],
},
)
def strategies(ds, params):
slot = empty()
with sidebar:
strategy = selectbox('Strategy', ds.strategy_names, index=ds.strategy_names.index(params['strategy']))
baseline = selectbox('Baseline', ds.baseline_names, index=ds.baseline_names.index(params['baseline']))
from_date = date_input('From', params['from_date'])
to_date = date_input('To', params['to_date'])
def by_industry(trades):
return (
trades
.loc[IndexSlice[:, trades.index.get_level_values('asset').intersection(ds.assets), :]]
.to_frame('volume')
.groupby(['asset']).cumsum()
.assign(
market_price=lambda df:
ds.prices.loc[
MultiIndex.from_arrays([
df.index.get_level_values('date'),
df.index.get_level_values('asset'),
])
]
.pipe(lambda px: where(df['volume'] > 0, px['sell'], px['buy']))
,
market_value=lambda df:
df['market_price'] * df['volume']
,
)
.join(ds.industries)
.set_index('industry', append=True)
['market_value']
.groupby(['date', 'industry'], observed=True).sum()
.unstack('industry', fill_value=0)
.loc[IndexSlice[Timestamp(from_date):Timestamp(to_date), :]]
.reindex(period_range(Timestamp(from_date), Timestamp(to_date))).ffill()
)
def cash(trades):
return (
trades
.loc[IndexSlice[:, 'USD', :]]
.groupby('date').sum()
.cumsum()
.reindex(period_range(Timestamp(from_date), Timestamp(to_date))).ffill().bfill()
.loc[Timestamp(from_date):Timestamp(to_date)]
)
def pnl(trades):
return concat([
### cash only
trades
.loc[IndexSlice[:, ['USD'], :]]
.groupby(['date']).sum()
.reindex(ds.prices.index.get_level_values('date').unique()).fillna(0)
.cumsum()
,
### trades only
trades
.loc[IndexSlice[:, trades.index.get_level_values('asset').difference(['USD']), :]]
.groupby(['date', 'asset']).sum()
.reindex(ds.prices.index).fillna(0)
.groupby('asset').cumsum()
.pipe(lambda s: s * where(s > 0, ds.prices['sell'], ds.prices['buy']))
.groupby(['date']).sum()
,
]).groupby(['date']).sum().loc[Timestamp(from_date):Timestamp(to_date)]
if False:
strategy_by_industry = by_industry(ds.strategies[strategy])
baseline_by_industry = by_industry(ds.strategies[baseline])
cash = {
'strategy': cash(ds.strategies[strategy]),
'baseline': cash(ds.strategies[baseline]),
}
pnl = {
'strategy': pnl(ds.strategies[strategy]),
'baseline': pnl(ds.strategies[baseline]),
}
subheader('PNL')
from pandas import set_option; set_option('display.max_rows', None)
print(pnl['strategy'])
plotly_chart(
line(pnl, x=[*pnl.values()][0].index.to_timestamp(), y=pnl),
use_container_width=True,
)
return
slot.header(f'Strategy {strategy} vs {baseline}')
subheader(f'% Market Value By Industry (Strategy: {strategy})')
plotly_chart(
histogram(strategy_by_industry, x=strategy_by_industry.index.to_timestamp(), y=strategy_by_industry.columns, barnorm='percent'),
use_container_width=True,
)
subheader(f'% Market Value By Industry (Baseline: {baseline})')
plotly_chart(
histogram(baseline_by_industry, x=baseline_by_industry.index.to_timestamp(), y=baseline_by_industry.columns, barnorm='percent'),
use_container_width=True,
)
subheader('Cash')
plotly_chart(
line(cash, x=[*cash.values()][0].index.to_timestamp(), y=cash),
use_container_width=True,
)
return {
'strategy': strategy,
'baseline': baseline,
'from_date': from_date,
'to_date': to_date,
}
if __name__ == '__main__':
ds = DataSource()
st = SessionState()
set_page_config(layout='wide')
title('Strategy Analysis')
if (page := query_params.get('page')):
default_page_index = [*ALL_PAGES].index(page)
else:
default_page_index = ALL_PAGES_DEFAULT_INDEX
with sidebar:
page = selectbox('Page', ALL_PAGES, index=default_page_index)
ALL_PAGES[page](ds, st)