what is lifelab data scientist edition
There are 11 code blocks in your notebook right now!
Show code
# Meta analysis: how many code blocks are in entirety lifelab right now?
code_blocks = nb.query_all(block_type="code")
print("There are", len(code_blocks), "code blocks in your notebook right now!")
flowchart LR
subgraph Storage["Your Data Lake"]
J1["Journal Entry<br/>2024-01-15"]
J2["Journal Entry<br/>2024-06-20"]
J3["Journal Entry<br/>2024-12-01"]
D1["Data Block<br/>weight: 72kg"]
D2["Data Block<br/>mood: 8/10"]
end
subgraph Code["Analysis Cell"]
Q["journals = nb.query<br/>type=markdown<br/>links=journal"]
M["metrics = nb.query<br/>type=data<br/>links=health"]
A["Combine + Analyze"]
end
subgraph Output["In-Notebook Output"]
Chart["Matplotlib Chart"]
Table["Pandas DataFrame"]
Stats["Summary Statistics"]
end
J1 --> Q
J2 --> Q
J3 --> Q
D1 --> M
D2 --> M
Q --> A
M --> A
A --> Chart
A --> Table
A --> Stats
fig done
<string>:84: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`). Consider using `matplotlib.pyplot.close()`.
Show code
from datetime import datetime
import calendar
from collections import defaultdict
import json
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import LinearSegmentedColormap
def get_theme_colors(nb):
"""Fetch current CSS theme colors from settings."""
# FIX 1: query_all() takes a dict, not keyword args
blocks = nb.query_all(block_type="note",
metadata = {'css_enabled': True}
)
if blocks:
theme = blocks[0]['metadata']['variables']
return theme
return None
def sentiment_color(score, has_data=True, theme=None):
"""Map sentiment score using theme colors."""
if theme is None:
theme = {
'--bg-hover': '#f2e9e1',
'--hue-error': '#b4637a',
'--hue-success': '#286983',
}
if not has_data:
return theme['--bg-hover']
score = max(-1, min(1, score))
colors = [theme['--hue-error'], theme['--bg-hover'], theme['--hue-success']]
positions = [0, 0.5, 1]
cmap = LinearSegmentedColormap.from_list('sentiment', list(zip(positions, colors)))
normalized = (score + 1) / 2
rgba = cmap(normalized)
return '#{:02x}{:02x}{:02x}'.format(int(rgba[0]*255), int(rgba[1]*255), int(rgba[2]*255))
def generate_month(blocks, year, month, nb, figsize=(8, 7)):
"""Generate sentiment calendar using theme colors."""
theme = get_theme_colors(nb)
if theme is None:
theme = {
'--bg-app': '#faf4ed',
'--bg-hover': '#f2e9e1',
'--text-body': '#575279',
'--text-muted': '#797593',
'--hue-error': '#b4637a',
'--hue-success': '#286983',
}
# Group scores by day
daily_scores = defaultdict(list)
for block in blocks:
meta = block.get("metadata") or {}
if "sentiment" not in meta:
continue
page_title = block.get("page_title", "")
if page_title.startswith("journal/"):
try:
day = int(page_title.split("-")[-1])
score = meta["sentiment"]
if isinstance(score, str):
score = float(score)
daily_scores[day].append(score)
except (ValueError, IndexError):
continue
daily_avg = {day: sum(s)/len(s) for day, s in daily_scores.items()}
# Build calendar grid
cal = calendar.Calendar(firstweekday=6)
month_name = calendar.month_name[month]
fig, ax = plt.subplots(figsize=figsize, facecolor=theme['--bg-app'])
ax.set_facecolor(theme['--bg-app'])
ax.set_xlim(0, 7)
ax.set_ylim(-0.8, 7)
ax.set_aspect('equal')
ax.axis('off')
# Title
ax.text(3.5, 6.5, f"{month_name} {year}", ha='center', va='center',
fontsize=22, color=theme['--text-body'], fontweight='bold')
# Day headers
days = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
for i, day in enumerate(days):
ax.text(i + 0.5, 5.75, day, ha='center', va='center',
fontsize=10, color=theme['--text-muted'], fontweight='medium')
# Calendar cells
row = 5
col = 0
for date in cal.itermonthdates(year, month):
if date.month != month:
col += 1
if col == 7:
col = 0
row -= 1
continue
has_data = date.day in daily_avg
score = daily_avg.get(date.day)
color = sentiment_color(score if score else 0, has_data, theme)
# Cell
rect = patches.FancyBboxPatch(
(col + 0.05, row - 0.95), 0.9, 0.9,
boxstyle="round,pad=0.02,rounding_size=0.08",
facecolor=color, edgecolor='none'
)
ax.add_patch(rect)
# Text color based on background
txt_color = theme['--text-body'] if has_data else theme['--text-muted']
# Day number
ax.text(col + 0.5, row - 0.38, str(date.day),
ha='center', va='center',
fontsize=13, color=txt_color, fontweight='bold')
# Score
if has_data:
ax.text(col + 0.5, row - 0.68, f"{score:+.1f}",
ha='center', va='center',
fontsize=8, color=txt_color, alpha=0.7)
col += 1
if col == 7:
col = 0
row -= 1
# Legend
legend_items = [
('Negative', theme['--hue-error']),
('Neutral', theme['--bg-hover']),
('Positive', theme['--hue-success'])
]
for i, (label, c) in enumerate(legend_items):
x = 1.5 + i * 2
rect = patches.FancyBboxPatch(
(x, -0.45), 0.3, 0.3,
boxstyle="round,pad=0.02,rounding_size=0.05",
facecolor=c, edgecolor='none'
)
ax.add_patch(rect)
ax.text(x + 0.45, -0.3, label, va='center', fontsize=9, color=theme['--text-muted'])
plt.tight_layout()
return fig, ax
# --- Main execution ---
date = datetime.strptime("2025-12-01", "%Y-%m-%d")
first_day = date.replace(day=1)
last_day = date.replace(day=calendar.monthrange(date.year, date.month)[1])
blocks = nb.query_all({
'metadata': {'source': 'dayone'},
'date_from': first_day.strftime("%Y-%m-%d"),
'date_to': last_day.strftime("%Y-%m-%d"),
})
fig, ax = generate_month(blocks, date.year, date.month, nb)
print("fig done")
plt.show()
What is LifeLab - Data scientist edition
LifeLab is a fusion of Jupyter notebook and Obsidian on top of Postgres. It is a self-hosted computational environment that reimagines the personal journal as a queryable data lake.
You are the data
Your journal is also your analysis dataset, becase both code and prose lie in the same database. You can write Python blocks that query the notebook itself. The API allows you to fetch, filter, and transform other notes programmatically. Furthermore, you can write explicit json data blocks to track aspects of our life with a stricly defined schema and aggregate them.
The platform was built to answer a specific question: What if my journal could read itself? Because the execution environment has direct access to the storage layer, you can build self-updating dashboards. For instance, I built a sentiment analysis pipeline that iterates through years of unstructured journal entries, computes rolling averages, and renders the results via Matplotlib directly in the daily view.
It no longer becomes a two step job to start a jupyter notebook, load your data sources and perform analysis. Everything is in one spot.