Skip to content

Commit 8a6db70

Browse files
willschlitzerweiji14Meghan Jones
authored
Add function to import hotspot dataset (#1386)
Co-authored-by: Wei Ji <[email protected]> Co-authored-by: Meghan Jones <[email protected]>
1 parent 0881d0f commit 8a6db70

File tree

5 files changed

+46
-0
lines changed

5 files changed

+46
-0
lines changed

doc/api/index.rst

+1
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@ and store them in the GMT cache folder.
161161
datasets.load_sample_bathymetry
162162
datasets.load_usgs_quakes
163163
datasets.load_fractures_compilation
164+
datasets.load_hotspots
164165

165166
.. automodule:: pygmt.exceptions
166167

pygmt/datasets/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from pygmt.datasets.earth_relief import load_earth_relief
66
from pygmt.datasets.samples import (
77
load_fractures_compilation,
8+
load_hotspots,
89
load_japan_quakes,
910
load_ocean_ridge_points,
1011
load_sample_bathymetry,

pygmt/datasets/samples.py

+26
Original file line numberDiff line numberDiff line change
@@ -123,3 +123,29 @@ def load_fractures_compilation():
123123
fname = which("@fractures_06.txt", download="c")
124124
data = pd.read_csv(fname, header=None, sep=r"\s+", names=["azimuth", "length"])
125125
return data[["length", "azimuth"]]
126+
127+
128+
def load_hotspots():
129+
"""
130+
Load a table with the locations, names, and suggested symbol sizes of
131+
hotspots.
132+
133+
This is the ``@hotspots.txt`` dataset used in the GMT tutorials, with data
134+
from Mueller, Royer, and Lawver, 1993, Geology, vol. 21, pp. 275-278. The
135+
main 5 hotspots used by Doubrovine et al. [2012] have symbol sizes twice
136+
the size of all other hotspots.
137+
138+
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
139+
first time you invoke this function. Afterwards, it will load the data from
140+
the cache. So you'll need an internet connection the first time around.
141+
142+
Returns
143+
-------
144+
data : pandas.DataFrame
145+
The data table with columns "longitude", "latitude", "symbol_size", and
146+
"placename".
147+
"""
148+
fname = which("@hotspots.txt", download="c")
149+
columns = ["longitude", "latitude", "symbol_size", "place_name"]
150+
data = pd.read_table(filepath_or_buffer=fname, sep="\t", skiprows=3, names=columns)
151+
return data

pygmt/helpers/testing.py

+1
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,7 @@ def download_test_data():
164164
"@N00W090.earth_relief_03m_p.nc",
165165
# Other cache files
166166
"@fractures_06.txt",
167+
"@hotspots.txt",
167168
"@ridge.txt",
168169
"@srtm_tiles.nc", # needed for 03s and 01s relief data
169170
"@Table_5_11.txt",

pygmt/tests/test_datasets_samples.py

+17
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
"""
22
Test basic functionality for loading sample datasets.
33
"""
4+
import pandas as pd
45
from pygmt.datasets import (
56
load_fractures_compilation,
7+
load_hotspots,
68
load_japan_quakes,
79
load_ocean_ridge_points,
810
load_sample_bathymetry,
@@ -72,3 +74,18 @@ def test_fractures_compilation():
7274
assert summary.loc["max", "length"] == 984.652
7375
assert summary.loc["min", "azimuth"] == 0.0
7476
assert summary.loc["max", "azimuth"] == 360.0
77+
78+
79+
def test_hotspots():
80+
"""
81+
Check that the @hotspots.txt dataset loads without errors.
82+
"""
83+
data = load_hotspots()
84+
assert data.shape == (55, 4)
85+
assert data.columns.values.tolist() == [
86+
"longitude",
87+
"latitude",
88+
"symbol_size",
89+
"place_name",
90+
]
91+
assert isinstance(data, pd.DataFrame)

0 commit comments

Comments
 (0)