Srihari Thyagarajan commited on
Commit
15c9aa4
·
unverified ·
2 Parent(s): 8088df6 7c455f1

Merge pull request #121 from etrotta/etrotta/missing-data-weather

Browse files
polars/03_loading_data.py CHANGED
@@ -2,7 +2,7 @@
2
  # requires-python = ">=3.12"
3
  # dependencies = [
4
  # "adbc-driver-sqlite==1.7.0",
5
- # "duckdb>=1.4.0.dev",
6
  # "lxml==6.0.0",
7
  # "marimo",
8
  # "pandas==2.3.2",
 
2
  # requires-python = ">=3.12"
3
  # dependencies = [
4
  # "adbc-driver-sqlite==1.7.0",
5
+ # "duckdb==1.4.0",
6
  # "lxml==6.0.0",
7
  # "marimo",
8
  # "pandas==2.3.2",
polars/11_missing_data.py ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "plotly[express]==6.3.0",
5
+ # "polars==1.33.1",
6
+ # ]
7
+ # ///
8
+
9
+ import marimo
10
+
11
+ __generated_with = "0.15.3"
12
+ app = marimo.App(width="medium")
13
+
14
+
15
+ @app.cell(hide_code=True)
16
+ def _(mo):
17
+ mo.md(
18
+ r"""
19
+ # Dealing with Missing Data
20
+
21
+ _by [etrotta](https://github.com/etrotta) and [Felix Najera](https://github.com/folicks)_
22
+
23
+ This notebook covers some common problems you may face when dealing with real datasets and techniques used to solve deal with them, showcasing polars functionalities to handle missing data.
24
+
25
+ First we provide an overview of the methods available in polars, then we walk through a mini case study with real world data showing how to use it, and at last we provide some additional information in the 'Bonus Content' section.
26
+ You can navigate to skip around to each header using the menu on the right side
27
+ """
28
+ )
29
+ return
30
+
31
+
32
+ @app.cell(hide_code=True)
33
+ def _(mo):
34
+ mo.md(
35
+ r"""
36
+ ## Methods for working with Nulls
37
+
38
+ We'll be using the following DataFrame to show the most important methods:
39
+ """
40
+ )
41
+ return
42
+
43
+
44
+ @app.cell(hide_code=True)
45
+ def _(pl):
46
+ df = pl.DataFrame(
47
+ [
48
+ {"species": "Dog", "name": "Millie", "height": None, "age": 4},
49
+ {"species": "Dog", "name": "Wally", "height": 60, "age": None},
50
+ {"species": "Dog", "name": None, "height": 50, "age": 12},
51
+ {"species": "Cat", "name": "Mini", "height": 15, "age": None},
52
+ {"species": "Cat", "name": None, "height": 25, "age": 6},
53
+ {"species": "Cat", "name": "Kazusa", "height": None, "age": 16},
54
+ ]
55
+ )
56
+ df
57
+ return (df,)
58
+
59
+
60
+ @app.cell(hide_code=True)
61
+ def _(mo):
62
+ mo.md(
63
+ r"""
64
+ ### Counting nulls
65
+
66
+ A simple yet convenient aggregation
67
+ """
68
+ )
69
+ return
70
+
71
+
72
+ @app.cell
73
+ def _(df):
74
+ df.null_count()
75
+ return
76
+
77
+
78
+ @app.cell(hide_code=True)
79
+ def _(mo):
80
+ mo.md(
81
+ r"""
82
+ ### Dropping Nulls
83
+
84
+ The simplest way of dealing with null values is throwing them away, but that is not always a good idea.
85
+ """
86
+ )
87
+ return
88
+
89
+
90
+ @app.cell
91
+ def _(df):
92
+ df.drop_nulls()
93
+ return
94
+
95
+
96
+ @app.cell
97
+ def _(df):
98
+ df.drop_nulls(subset="name")
99
+ return
100
+
101
+
102
+ @app.cell(hide_code=True)
103
+ def _(mo):
104
+ mo.md(
105
+ r"""
106
+ ### Filtering null values
107
+
108
+ To filter in polars, you'll typically use `df.filter(expression)` or `df.remove(expression)` methods.
109
+
110
+ Filter will only keep rows in which the expression evaluates to True.
111
+ It will remove not only rows in which it evaluates to False, but also those in which the expression evaluates to None.
112
+
113
+ Remove will only remove rows in which the expression evaluates to True.
114
+ It will keep rows in which it evaluates to None.
115
+ """
116
+ )
117
+ return
118
+
119
+
120
+ @app.cell
121
+ def _(df, pl):
122
+ df.filter(pl.col("age") > 10)
123
+ return
124
+
125
+
126
+ @app.cell
127
+ def _(df, pl):
128
+ df.remove(pl.col("age") < 10)
129
+ return
130
+
131
+
132
+ @app.cell(hide_code=True)
133
+ def _(mo):
134
+ mo.md(
135
+ r"""
136
+ You may also be tempted to use `== None` or `!= None`, but operators in polars will generally propagate null values.
137
+
138
+ You can use `.eq_missing()` or `.ne_missing()` methods if you want to be strict about it, but there are also `.is_null()` and `.is_not_null()` methods you can use.
139
+ """
140
+ )
141
+ return
142
+
143
+
144
+ @app.cell
145
+ def _(df, pl):
146
+ df.select(
147
+ "name",
148
+ (pl.col("name") == None).alias("Name equals None"),
149
+ (pl.col("name") == "Mini").alias("Name equals Mini"),
150
+ (pl.col("name").eq_missing("Mini")).alias("Name eq_missing Mini"),
151
+ (pl.col("name").is_null()).alias("Name is null"),
152
+ (pl.col("name").is_not_null()).alias("Name is not null"),
153
+ )
154
+ return
155
+
156
+
157
+ @app.cell(hide_code=True)
158
+ def _(mo):
159
+ mo.md(
160
+ r"""
161
+ ### Filling Null values
162
+
163
+ You can also fill in the values with constants, calculations or by consulting external data sources.
164
+
165
+ Be careful not to treat estimated or guessed values as if they a ground truth however, otherwise you may end up making conclusions about a reality that does not exists.
166
+
167
+ As an exercise, let's guess some values to fill in nulls, then try giving names to the animals with `null` by editing the cells
168
+ """
169
+ )
170
+ return
171
+
172
+
173
+ @app.cell
174
+ def _(df, mo, pl):
175
+ guesstimates = df.with_columns(
176
+ pl.col("height").fill_null(pl.col("height").mean().over("species")),
177
+ pl.col("age").fill_null(0),
178
+ )
179
+ guesstimates = mo.ui.data_editor(
180
+ guesstimates,
181
+ editable_columns=["name"],
182
+ )
183
+ guesstimates
184
+ return (guesstimates,)
185
+
186
+
187
+ @app.cell
188
+ def _(guesstimates):
189
+ guesstimates.value
190
+ return
191
+
192
+
193
+ @app.cell(hide_code=True)
194
+ def _(mo):
195
+ mo.md(
196
+ r"""
197
+ ### TL;DR
198
+
199
+ Before we head into the mini case study, a brief review of what we have covered:
200
+
201
+ - use `df.null_counts()` or `expr.is_null()` to count and identify missing values
202
+ - you could just drop rows with values missing in any columns or a subset of them with `df.drop_nulls()`, but for most cases you'll want to be more careful about it
203
+ - take into consideration whenever you want to preserve null values or remove them when choosing between `df.filter()` or `df.remove()`
204
+ - if you don't want to propagate null values, use `_missing` variations of methods such as `eq` vs `eq_missing`
205
+ - you may want to fill in missing values based on calculations via `fill_null`, join and coalesce based on other datasets, or manually edit the data based on external documents
206
+
207
+ You can also refer to the polars [User Guide](https://docs.pola.rs/user-guide/expressions/missing-data/) more more information.
208
+
209
+ Whichever approach you take, remember to document how you handled it!
210
+ """
211
+ )
212
+ return
213
+
214
+
215
+ @app.cell(hide_code=True)
216
+ def _(mo):
217
+ mo.md(
218
+ r"""
219
+ # Mini Case Study
220
+
221
+ We will be using a dataset from `alertario` about the weather in Rio de Janeiro, originally available in Google Big Query under `datario.clima_pluviometro`. What you need to know about it:
222
+
223
+ - Contains multiple stations covering the Municipality of Rio de Janeiro
224
+ - Measures the precipitation as millimeters, with a granularity of 15 minutes
225
+ - We filtered to only include data about 2020, 2021 and 2022
226
+ """
227
+ )
228
+ return
229
+
230
+
231
+ @app.cell
232
+ def _(px, stations):
233
+ px.scatter_map(stations, lat="lat", lon="lon", text="name")
234
+ return
235
+
236
+
237
+ @app.cell(disabled=True, hide_code=True)
238
+ def _(pl, px, stations):
239
+ # In case `scatter_map` does not works for you:
240
+ _fig = px.scatter_geo(stations, lat="lat", lon="lon", hover_name="name")
241
+
242
+ _min_lat = stations.select(pl.col("lat").min()).item()
243
+ _max_lat = stations.select(pl.col("lat").max()).item()
244
+ _min_lon = stations.select(pl.col("lon").min()).item()
245
+ _max_lon = stations.select(pl.col("lon").max()).item()
246
+
247
+ _fig.update_geos(
248
+ lataxis_range=[_min_lat - 0.2, _max_lat + 0.2],
249
+ lonaxis_range=[_min_lon - 0.2, _max_lon + 0.2],
250
+ resolution=50,
251
+ showocean=True,
252
+ oceancolor="Lightblue",
253
+ )
254
+ _fig
255
+ return
256
+
257
+
258
+ @app.cell(hide_code=True)
259
+ def _(mo):
260
+ mo.md(
261
+ r"""
262
+ # Stations
263
+
264
+ First, let's take a look at some of the stations. Notice how
265
+
266
+ - Some stations have been deactivated, so there won't be any data about them (in fact, we don't even know their coordinates)
267
+ - There are some columns that do not even contain data at all!
268
+
269
+ We will remove the empty columns and remove rows without coordinates
270
+ """
271
+ )
272
+ return
273
+
274
+
275
+ @app.cell(hide_code=True)
276
+ def _(dirty_stations, mo, pl):
277
+ # If you were working on this yourself, you may want to briefly at *all* of them, but for practical purposes I am taking a slice for the displayed output, as otherwise it would take too much screen space.
278
+ # mo.ui.table(dirty_stations, pagination=False)
279
+
280
+ mo.vstack(
281
+ [
282
+ mo.md("Before (head and tail sample):"),
283
+ pl.concat([dirty_stations.head(3), dirty_stations.tail(3)], how="vertical"),
284
+ ]
285
+ )
286
+ return
287
+
288
+
289
+ @app.cell
290
+ def _(dirty_stations, mo, pl):
291
+ stations = dirty_stations.drop_nulls(subset=("lat", "lon")).drop(pl.col(r"^operation_(start|end)_date$"))
292
+ mo.vstack([mo.md("After (full dataframe):"), stations])
293
+ return (stations,)
294
+
295
+
296
+ @app.cell(hide_code=True)
297
+ def _(mo):
298
+ mo.md(
299
+ r"""
300
+ # Precipitation
301
+ Now, let's move on to the Precipitation data.
302
+
303
+ ## Part 1 - Null Values
304
+
305
+ First of all, let's check for null values:
306
+ """
307
+ )
308
+ return
309
+
310
+
311
+ @app.cell
312
+ def _(dirty_weather, pl):
313
+ rain = pl.col("accumulated_rain_15_minutes") # Create an alias since we'll use that column a lot
314
+
315
+ dirty_weather.filter(rain.is_null())
316
+ return (rain,)
317
+
318
+
319
+ @app.cell(hide_code=True)
320
+ def _(dirty_weather, mo, rain):
321
+ _missing_count = dirty_weather.select(rain.is_null().sum()).item()
322
+
323
+ mo.md(
324
+ f"As you can see, there are {_missing_count:,} rows missing the accumulated rain for a period.\n\nThat could be caused by sensor malfunctions, maintenance, bobby tables or a myriad of other reasons. While it may be a small percentage of the data ({_missing_count / len(dirty_weather):.3%}), it is still important to take it in consideration, one way or the other."
325
+ )
326
+ return
327
+
328
+
329
+ @app.cell(hide_code=True)
330
+ def _(mo):
331
+ mo.md(
332
+ r"""
333
+ ### First option to fixing it: Dropping data.
334
+
335
+ We could just remove those rows like we did for the stations, which may be a passable solution for some problems, but is not always the best idea.
336
+ ```py
337
+ dirty_weather.drop_nulls()
338
+ ```
339
+
340
+ ### Second option to fixing it: Interpolation
341
+
342
+ Instead of removing these rows, we can use some heuritics to guess values that make sense for them. Remember that this adds a degree of uncertainty to the final results, so you should disclose how you are treating missing values if you draw any conclusions based on such guesses.
343
+ ```py
344
+ dirty_weather.with_columns(rain.fill_null(strategy="forward")),
345
+ ```
346
+
347
+ When doing so, which strategy may make sense for your data varies greatly. In some cases you'll want to use the mean to maintain it centered around the same distribution, while in other cases you'll want to zero it to avoid modifying the total, or fill forward/backward to keep it mostly continuous.
348
+
349
+ ### Last option to fixing it: Acquire the correct values from elsewhere.
350
+
351
+ Like manually adding names to the animals in the introduction, but you could try finding approximate values from another dataset or in some cases manually input the correct values.
352
+
353
+ ### However
354
+
355
+ Let's investigate a bit more before deciding on following with either approach.
356
+ For example, is our current data even complete, or are we already missing some rows beyond those with null values?
357
+ """
358
+ )
359
+ return
360
+
361
+
362
+ @app.cell
363
+ def _(dirty_weather, pl):
364
+ seen_counts = dirty_weather.group_by(pl.col("datetime").dt.time(), "station").len()
365
+
366
+ # Fun fact: a single row has its time set to `23:55`.
367
+ # It should not be present in this dataset, but found its way into the official Google Big Query table somehow.
368
+ seen_counts = seen_counts.filter(pl.col("len") > 1)
369
+ # You may want to treat it as a bug or outlier and remove it from dirty_weather, but we won't dive into cleaning such in this notebook
370
+
371
+ # seen_counts.sort("station", "datetime").select("station", "datetime", "len")
372
+ seen_counts.sort("len").select("station", "datetime", "len")
373
+ return
374
+
375
+
376
+ @app.cell
377
+ def _(pl):
378
+ expected_range = pl.datetime_range(
379
+ pl.lit("2020-01-01T00:00:00").str.to_datetime(time_zone="America/Sao_Paulo"),
380
+ pl.lit("2022-12-31T23:45:00").str.to_datetime(time_zone="America/Sao_Paulo"),
381
+ "15m",
382
+ )
383
+
384
+ pl.select(expected_range).group_by(pl.col.literal.dt.time()).len().sort("literal")
385
+ return
386
+
387
+
388
+ @app.cell(hide_code=True)
389
+ def _(mo):
390
+ mo.md(
391
+ r"""
392
+ ## Part 2 - Missing Rows
393
+
394
+ We can see that we expected there to be 1096 rows for each hour for each station (from the start of 2020 to the end of 2022) , but in reality we see between 1077 and 1096 rows.
395
+
396
+ That difference could be caused by the same factors as null values, or even by someone dropping null values along the way, but for the purposes of this notebook let's say that we want to have values for each combination with no exceptions, so we'll have to make reasonable assumptions to interpolate and extrapolate them.
397
+
398
+ ### Upsampling
399
+
400
+ Given that we are working with time series data, we will [upsample](https://docs.pola.rs/api/python/stable/reference/dataframe/api/polars.DataFrame.upsample.html) the data, but you could also create a DataFrame containing all expected rows then use `join(how="...")`
401
+
402
+ However, that will give us _even more_ null values, so we will want to fill them in afterwards. For this case, we will just use a forward fill followed by a backwards fill.
403
+ """
404
+ )
405
+ return
406
+
407
+
408
+ @app.cell
409
+ def _(dirty_weather, mo, pl, rain):
410
+ _hollow_weather = dirty_weather.sort("station", "datetime").upsample("datetime", every="15m", group_by="station")
411
+ weather = _hollow_weather.fill_null(strategy="forward").fill_null(strategy="backward")
412
+
413
+ mo.vstack(
414
+ [
415
+ mo.ui.table(
416
+ label="Null counts at each step",
417
+ data=pl.concat(
418
+ [
419
+ dirty_weather.null_count().select(
420
+ pl.lit("Before upsampling").alias("label"), rain, "station", "datetime"
421
+ ),
422
+ _hollow_weather.null_count().select(
423
+ pl.lit("After upsampling").alias("label"), rain, "station", "datetime"
424
+ ),
425
+ weather.null_count().select(pl.lit("After filling").alias("label"), rain, "station", "datetime"),
426
+ ]
427
+ ),
428
+ ),
429
+ mo.md("Data after upsampling and filling in nulls:"),
430
+ weather,
431
+ ]
432
+ )
433
+ return (weather,)
434
+
435
+
436
+ @app.cell(hide_code=True)
437
+ def _(mo):
438
+ mo.md(
439
+ r"""
440
+ Now that we finally have a clean dataset, let's play around with it a little.
441
+
442
+ ### Example App
443
+
444
+ Let's display the amount of precipitation each station measured within a timeframe, aggregated to a lower granularity.
445
+ """
446
+ )
447
+ return
448
+
449
+
450
+ @app.cell(hide_code=True)
451
+ def _(mo):
452
+ filters = (
453
+ mo.md(
454
+ """Filters for the example
455
+
456
+ Year: {year}
457
+ Days of the year: {day}
458
+ Hours of each day: {hour}
459
+ Aggregation granularity: {interval}
460
+ """
461
+ )
462
+ .batch(
463
+ year=mo.ui.dropdown([2020, 2021, 2022], value=2022),
464
+ day=mo.ui.range_slider(1, 365, show_value=True, full_width=True, value=[87, 94]),
465
+ hour=mo.ui.range_slider(0, 24, 0.25, show_value=True, full_width=True),
466
+ interval=mo.ui.dropdown(["15m", "30m", "1h", "2h", "4h", "6h", "1d", "7d", "30d"], value="4h"),
467
+ )
468
+ .form()
469
+ )
470
+
471
+ # Note: You could use `mo.ui.date_range` instead, but I just don't like it myself
472
+ # mo.ui.date_range(start="2020-01-01", stop="2022-12-31", value=["2022-03-28", "2022-04-03"], label="Display range")
473
+
474
+ filters
475
+ return (filters,)
476
+
477
+
478
+ @app.cell
479
+ def _(filters, mo, pl, rain, stations, weather):
480
+ mo.stop(filters.value is None)
481
+
482
+ _range_seconds = map(lambda hour: hour * 3600, filters.value["hour"])
483
+ _df_seconds = pl.col("datetime").dt.hour().cast(pl.Float64()).mul(3600) + pl.col("datetime").dt.minute().cast(
484
+ pl.Float64()
485
+ ).mul(60)
486
+
487
+ animation_data = (
488
+ weather.lazy()
489
+ .filter(
490
+ pl.col("datetime").dt.year() == filters.value["year"],
491
+ pl.col("datetime").dt.ordinal_day().is_between(*filters.value["day"]),
492
+ _df_seconds.is_between(*_range_seconds),
493
+ )
494
+ .group_by_dynamic("datetime", group_by="station", every=filters.value["interval"])
495
+ .agg(rain.sum().alias("precipitation"))
496
+ .remove(pl.col("precipitation").eq(0).all().over("station"))
497
+ .join(stations.lazy(), on="station")
498
+ .select("name", "lat", "lon", "precipitation", "datetime")
499
+ .collect()
500
+ )
501
+ return (animation_data,)
502
+
503
+
504
+ @app.cell
505
+ def _(animation_data, pl, px):
506
+ _fig = px.scatter_geo(
507
+ animation_data.with_columns(avg_precipitation=pl.col("precipitation").mean()),
508
+ lat="lat",
509
+ lon="lon",
510
+ hover_name="name",
511
+ animation_group="name",
512
+ animation_frame="datetime",
513
+ size="avg_precipitation",
514
+ color="precipitation",
515
+ color_continuous_scale="PuBu",
516
+ range_color=[0, animation_data.select(pl.col("precipitation").max()).item()],
517
+ )
518
+
519
+ _min_lat = animation_data.select(pl.col("lat").min()).item()
520
+ _max_lat = animation_data.select(pl.col("lat").max()).item()
521
+ _min_lon = animation_data.select(pl.col("lon").min()).item()
522
+ _max_lon = animation_data.select(pl.col("lon").max()).item()
523
+
524
+ _fig.update_geos(
525
+ lataxis_range=[_min_lat - 0.2, _max_lat + 0.2],
526
+ lonaxis_range=[_min_lon - 0.2, _max_lon + 0.2],
527
+ resolution=50,
528
+ showocean=True,
529
+ oceancolor="Lightblue",
530
+ )
531
+ _fig
532
+ return
533
+
534
+
535
+ @app.cell(hide_code=True)
536
+ def _(mo):
537
+ mo.md(
538
+ r"""
539
+ If we were missing some rows, we would have circles popping in and out of existence instead of a smooth animation!
540
+
541
+ In many scenarios, missing data can also lead to wrong results overall, for example if we were to estimate the total amount of rainfall during the observed period:
542
+ """
543
+ )
544
+ return
545
+
546
+
547
+ @app.cell
548
+ def _(dirty_weather, mo, rain, weather):
549
+ old_estimate = dirty_weather.select(rain.sum()).item()
550
+ new_estimate = weather.select(rain.sum()).item()
551
+ # Note: The aggregation used to calculate these variables (taking a sum across all stations) is not very meaningful, but the relative difference between them scales across many potentially useful aggregations
552
+
553
+ mo.md(f"Our estimates may change by roughly {(new_estimate - old_estimate) / old_estimate:.2%}")
554
+ return
555
+
556
+
557
+ @app.cell(hide_code=True)
558
+ def _(mo):
559
+ mo.md(
560
+ r"""
561
+ Which is still a relatively small difference, but every drop counts when you are dealing with the weather.
562
+
563
+ For datasets with a higher share of missing values, that difference can get much higher.
564
+ """
565
+ )
566
+ return
567
+
568
+
569
+ @app.cell(hide_code=True)
570
+ def _(mo):
571
+ mo.md(
572
+ r"""
573
+ # Bonus Content
574
+
575
+ ## Appendix A: Missing Time Zones
576
+
577
+ The original dataset contained naive datetimes instead of timezone-aware, but we can infer whenever it refers to UTC time or local time (for this case, -03:00 UTC) based on the measurements.
578
+
579
+ For example, we can select one specific interval during which we know that rained a lot, or graph the average amount of precipitation for each hour of the day, then compare the data timestamps with a ground truth.
580
+ """
581
+ )
582
+ return
583
+
584
+
585
+ @app.cell(hide_code=True)
586
+ def _(dirty_weather_naive, mo):
587
+ mo.vstack(
588
+ [
589
+ mo.md("Original data example:"),
590
+ dirty_weather_naive.head(3),
591
+ ]
592
+ )
593
+ return
594
+
595
+
596
+ @app.cell
597
+ def _(dirty_weather_naive, pl, px, rain):
598
+ naive_downfall_per_hour = (
599
+ dirty_weather_naive.group_by(pl.col("datetime").dt.hour().alias("hour"))
600
+ .agg(rain.sum().alias("accumulated_rain"))
601
+ .with_columns(pl.col("accumulated_rain").truediv(pl.col("accumulated_rain").sum()).mul(100))
602
+ )
603
+ px.bar(
604
+ naive_downfall_per_hour.sort("hour"),
605
+ x="hour",
606
+ y="accumulated_rain",
607
+ title="Distribution of precipitation per hour (%), using the naive datetime",
608
+ )
609
+ return
610
+
611
+
612
+ @app.cell
613
+ def _(dirty_weather_naive, pl, rain, stations):
614
+ naive_top_rain_events = (
615
+ dirty_weather_naive.lazy()
616
+ # If you wanted to filter the dates and locate a specific event:
617
+ # .filter(pl.col("datetime").is_between(pl.lit("2022-03-01").str.to_datetime(), pl.lit("2022-05-01").str.to_datetime()))
618
+ .sort("station", "datetime")
619
+ .group_by_dynamic("datetime", every="1h", offset="30m", group_by="station")
620
+ .agg(rain.sum())
621
+ .join(stations.lazy(), on="station")
622
+ .sort(rain, descending=True)
623
+ .select(
624
+ "name",
625
+ pl.col("datetime").alias("window_start"),
626
+ (pl.col("datetime") + pl.duration(hours=1)).alias("window_end"),
627
+ rain.alias("accumulated rain"),
628
+ )
629
+ .head(50)
630
+ .collect()
631
+ )
632
+ naive_top_rain_events
633
+ return
634
+
635
+
636
+ @app.cell(hide_code=True)
637
+ def _(mo):
638
+ mo.md(
639
+ r"""
640
+ By externally researching the expected distribution and looking up some of the extreme weather events, we can come to a conclusion about whenever it is aligned with the local time or with UTC.
641
+
642
+ In this case, the distribution matches the normal weather for this region and we can see that the hours with the most precipitation match those of historical events, so it is safe to say it is using local time (equivalent to the Americas/São Paulo time zone).
643
+ """
644
+ )
645
+ return
646
+
647
+
648
+ @app.cell
649
+ def _(dirty_weather_naive, pl):
650
+ dirty_weather = dirty_weather_naive.with_columns(pl.col("datetime").dt.replace_time_zone("America/Sao_Paulo"))
651
+
652
+ dirty_weather.head(3)
653
+ return (dirty_weather,)
654
+
655
+
656
+ @app.cell(hide_code=True)
657
+ def _(mo):
658
+ mo.md(
659
+ r"""
660
+ ## Appendix B: Not a Number
661
+
662
+ While some other tools without proper support for missing values may use `NaN` as a way to indicate a value is missing, in polars it is treated exclusively as a float value, much like `0.0`, `1.0` or `infinity`.
663
+
664
+ You can use `.fill_null(float('nan'))` if you need to convert floats to a format such tools accept, or use `.fill_nan(None)` if you are importing data from them, assuming that there are no values which really are supposed to be the float NaN.
665
+
666
+ Remember that many calculations can result in NaN, for example dividing by zero:
667
+ """
668
+ )
669
+ return
670
+
671
+
672
+ @app.cell
673
+ def _(dirty_weather, pl, rain):
674
+ day_perc = dirty_weather.select(
675
+ "datetime",
676
+ (rain / rain.sum().over("station", pl.col("datetime").dt.date())).alias("percentage_of_day_precipitation"),
677
+ )
678
+ perc_col = pl.col("percentage_of_day_precipitation")
679
+
680
+ day_perc
681
+ return day_perc, perc_col
682
+
683
+
684
+ @app.cell(hide_code=True)
685
+ def _(day_perc, mo, perc_col):
686
+ mo.md(
687
+ f"""
688
+ It is null for {day_perc.select(perc_col.is_null().mean()).item():.4%} of the rows, but is NaN for {day_perc.select(perc_col.is_nan().mean()).item():.4%} of them.
689
+ If we use the cleaned weather dataframe to calculate it instead of the dirty_weather, we will have no nulls, but note how for this calculation we can end up with both, with each having a different meaning.
690
+
691
+ In this case it makes sense to fill in NaNs as 0 to indicate there was no rain during that period, but treating the nulls the same could lead to a different interpretation of the data, so remember to handle NaNs and nulls separately.
692
+ """
693
+ )
694
+ return
695
+
696
+
697
+ @app.cell(hide_code=True)
698
+ def _(mo):
699
+ mo.md(
700
+ r"""
701
+ ## Appendix C: Everything else
702
+
703
+ As long as this Notebook is, it cannot reasonably cover ***everything*** that may have to deal with missing values, as that is literally everything that may have to deal with data.
704
+
705
+ This section very briefly covers some other features not mentioned above
706
+ """
707
+ )
708
+ return
709
+
710
+
711
+ @app.cell(hide_code=True)
712
+ def _(mo):
713
+ mo.md(
714
+ r"""
715
+ ### Missing values in Aggregations
716
+
717
+ Many aggregations methods will ignore/skip missing values, while others take them into consideration.
718
+
719
+ Always check the documentation of the method you're using, much of the time docstrings will explain their behaviour.
720
+ """
721
+ )
722
+ return
723
+
724
+
725
+ @app.cell
726
+ def _(df, pl):
727
+ df.group_by("species").agg(
728
+ pl.col("height").len().alias("len"),
729
+ pl.col("height").count().alias("count"),
730
+ )
731
+ return
732
+
733
+
734
+ @app.cell(hide_code=True)
735
+ def _(mo):
736
+ mo.md(
737
+ r"""
738
+ ### Missing values in Joins
739
+
740
+ By default null values will never produce matches using [join](https://docs.pola.rs/api/python/stable/reference/dataframe/api/polars.DataFrame.join.html), but you can specify `nulls_equal=True` to join Null values with each other.
741
+ """
742
+ )
743
+ return
744
+
745
+
746
+ @app.cell(hide_code=True)
747
+ def _(pl):
748
+ age_groups = pl.DataFrame(
749
+ [
750
+ {"age": None, "stage": "Unknown"},
751
+ {"age": [0, 1], "stage": "Baby"},
752
+ {"age": [2, 3, 4, 5, 6, 7, 8, 9, 10], "stage": "Adult"},
753
+ {"age": [11, 12, 13, 14], "stage": "Senior"},
754
+ {"age": [15, 16, 17, 18, 19, 20], "stage": "Geriatric"},
755
+ ]
756
+ )
757
+ age_groups
758
+ return (age_groups,)
759
+
760
+
761
+ @app.cell
762
+ def _(age_groups, df):
763
+ df.join(age_groups.explode("age"), on="age")
764
+ return
765
+
766
+
767
+ @app.cell
768
+ def _(age_groups, df):
769
+ df.join(age_groups.explode("age"), on="age", nulls_equal=True)
770
+ return
771
+
772
+
773
+ @app.cell(hide_code=True)
774
+ def _(mo):
775
+ mo.md(
776
+ r"""
777
+ ## Utilities
778
+
779
+ Loading data and imports
780
+ """
781
+ )
782
+ return
783
+
784
+
785
+ @app.cell
786
+ def _(pl):
787
+ raw_stations = pl.scan_csv("hf://datasets/etrotta/weather-alertario/datario_alertario_stations.csv")
788
+ raw_weather = pl.scan_csv("hf://datasets/etrotta/weather-alertario/datario_alertario_weather_2020_to_2022.csv")
789
+ return raw_stations, raw_weather
790
+
791
+
792
+ @app.cell
793
+ def _(pl, raw_stations):
794
+ dirty_stations = raw_stations.select(
795
+ pl.col("id_estacao").alias("station"),
796
+ pl.col("estacao").alias("name"),
797
+ pl.col("latitude").alias("lat"),
798
+ pl.col("longitude").alias("lon"),
799
+ pl.col("cota").alias("altitude"),
800
+ pl.col("situacao").alias("situation"),
801
+ pl.col("endereco").alias("address"),
802
+ pl.col("data_inicio_operacao").alias("operation_start_date"),
803
+ pl.col("data_fim_operacao").alias("operation_end_date"),
804
+ ).collect()
805
+ return (dirty_stations,)
806
+
807
+
808
+ @app.cell
809
+ def _(pl, raw_weather):
810
+ dirty_weather_naive = raw_weather.select(
811
+ pl.col("id_estacao").alias("station"),
812
+ pl.col("acumulado_chuva_15_min").alias("accumulated_rain_15_minutes"),
813
+ pl.concat_str("data_particao", pl.lit("T"), "horario").str.to_datetime(time_zone=None).alias("datetime"),
814
+ ).collect()
815
+ return (dirty_weather_naive,)
816
+
817
+
818
+ @app.cell
819
+ def _():
820
+ import marimo as mo
821
+ return (mo,)
822
+
823
+
824
+ @app.cell
825
+ def _():
826
+ import polars as pl
827
+ return (pl,)
828
+
829
+
830
+ @app.cell
831
+ def _():
832
+ import plotly.express as px
833
+ return (px,)
834
+
835
+
836
+ if __name__ == "__main__":
837
+ app.run()