1 | from netCDF4 import Dataset |
---|
2 | from datetime import datetime as dt |
---|
3 | |
---|
4 | import numpy as np |
---|
5 | import pandas as pd |
---|
6 | #from pandas import Series, DataFrame, Panel |
---|
7 | |
---|
8 | |
---|
9 | #import matplotlib.pyplot as plt |
---|
10 | #import matplotlib.dates as mdates |
---|
11 | from sklearn.metrics import mean_squared_error |
---|
12 | from math import sqrt |
---|
13 | |
---|
14 | |
---|
15 | import os |
---|
16 | import datetime |
---|
17 | import glob |
---|
18 | |
---|
19 | import numpy as np |
---|
20 | |
---|
21 | # Set the matplotlib backend |
---|
22 | #import matplotlib as mpl |
---|
23 | #mpl.use('Agg') |
---|
24 | |
---|
25 | import matplotlib.pyplot as plt |
---|
26 | import matplotlib.dates as mdates |
---|
27 | #import scripty.stats |
---|
28 | |
---|
29 | import iris |
---|
30 | import iris.coord_categorisation |
---|
31 | import iris.quickplot as qplt |
---|
32 | import iris.plot as iplt |
---|
33 | |
---|
34 | import jules |
---|
35 | |
---|
36 | import make_time_coord |
---|
37 | |
---|
38 | from iris.coords import DimCoord |
---|
39 | from iris.cube import Cube |
---|
40 | |
---|
41 | |
---|
42 | |
---|
43 | #This is a relative absolute path to data files and can be used easily in other python programmes |
---|
44 | |
---|
45 | import os.path as op |
---|
46 | data_path = op.abspath(op.join(op.dirname(__file__), '../data')) |
---|
47 | |
---|
48 | def jules_output_cube(cube, field, filename): |
---|
49 | ''' |
---|
50 | Callback to add an x and y coord (otherwise load returns a cube with anonymous |
---|
51 | dimensions, which can't be concatenated). |
---|
52 | ''' |
---|
53 | |
---|
54 | n = cube.ndim |
---|
55 | |
---|
56 | try: |
---|
57 | if cube.coord_dims('time') == (0,): # assume if time is a dimcoord, it is at position 0 |
---|
58 | n -= 1 |
---|
59 | except iris.exceptions.CoordinateNotFoundError: |
---|
60 | pass |
---|
61 | |
---|
62 | if n >= 1: |
---|
63 | x_coord = iris.coords.DimCoord(range(cube.shape[-1]), var_name='x') |
---|
64 | xdim = cube.ndim - 1 |
---|
65 | cube.add_dim_coord(x_coord, (xdim, )) |
---|
66 | |
---|
67 | if n >= 2: |
---|
68 | y_coord = iris.coords.DimCoord([0], var_name='y') |
---|
69 | ydim = cube.ndim - 2 |
---|
70 | cube.add_dim_coord(y_coord, (ydim, )) |
---|
71 | |
---|
72 | #print cube.ndim |
---|
73 | return |
---|
74 | |
---|
75 | |
---|
76 | #This enables the path to be automatically adjusted if necessary for either Unix or Windows operating systems |
---|
77 | |
---|
78 | filename_run_ba137b = op.normpath(op.join(data_path, 'US_Ha1-r9227_branch-presc0.D.1991-2012_run_ba137b.nc')) |
---|
79 | filename_run_ba137f = op.normpath(op.join(data_path, 'US_Ha1-r9227_branch-presc0.D.1991-2012_run_ba137f.nc')) |
---|
80 | filename_run_ba137b_mm = op.normpath(op.join(data_path, 'US_Ha1-r9227_branch-presc0.D.1991-2012_run_ba137b_mm.nc')) |
---|
81 | filename_run_ba137f_mm = op.normpath(op.join(data_path, 'US_Ha1-r9227_branch-presc0.D.1991-2012_run_ba137f_mm.nc')) |
---|
82 | |
---|
83 | df = pd.read_csv(op.normpath(op.join(data_path, 'FLX_US-Ha1_FLUXNET2015_FULLSET_MM_1991-2012_1-3.csv'))) |
---|
84 | |
---|
85 | |
---|
86 | |
---|
87 | |
---|
88 | df.TIMESTAMP = df['TIMESTAMP'] |
---|
89 | print df.TIMESTAMP.shape |
---|
90 | print df.TIMESTAMP[0:] |
---|
91 | |
---|
92 | df.GPP = df['GPP_NT_VUT_REF'] |
---|
93 | print df.GPP.shape |
---|
94 | print df.GPP[0:] |
---|
95 | |
---|
96 | obs_raw = df.GPP[0:] |
---|
97 | obs_cor = (abs(obs_raw)+obs_raw)/2 |
---|
98 | print("Corrected Observations = ", obs_cor) |
---|
99 | |
---|
100 | """ |
---|
101 | daily_carbon_file_headings = [ |
---|
102 | 'TIMESTAMP','NEE_VUT_REF','NEE_VUT_REF_RANDUNC','NEE_VUT_25','NEE_VUT_50','NEE_VUT_75', |
---|
103 | 'RECO_NT_VUT_REF','RECO_NT_VUT_25','RECO_NT_VUT_50','RECO_NT_VUT_75', |
---|
104 | 'GPP_NT_VUT_REF','GPP_NT_VUT_25','GPP_NT_VUT_50','GPP_NT_VUT_75', |
---|
105 | 'RECO_DT_VUT_REF','RECO_DT_VUT_25','RECO_DT_VUT_50','RECO_DT_VUT_75', |
---|
106 | 'GPP_DT_VUT_REF','GPP_DT_VUT_25','GPP_DT_VUT_50','GPP_DT_VUT_75'] |
---|
107 | """ |
---|
108 | |
---|
109 | |
---|
110 | nc_run_ba137b = Dataset(filename_run_ba137b) |
---|
111 | #print nc_run_ba137b.variables |
---|
112 | |
---|
113 | y_run_ba137b = nc_run_ba137b.variables['gpp_gb'] |
---|
114 | y_run_ba137b_units = nc_run_ba137b.variables['gpp_gb'].units |
---|
115 | |
---|
116 | x_run_ba137b = nc_run_ba137b.variables['time'] |
---|
117 | x_run_ba137b_units = nc_run_ba137b.variables['time'].units |
---|
118 | |
---|
119 | y_run_ba137b = nc_run_ba137b.variables['gpp_gb'][0:,0,0] |
---|
120 | x_run_ba137b = nc_run_ba137b.variables['time'][0:] |
---|
121 | nc_run_ba137b.close |
---|
122 | |
---|
123 | |
---|
124 | nc_run_ba137f = Dataset(filename_run_ba137f) |
---|
125 | #print nc_run_ba137f.variables |
---|
126 | |
---|
127 | y_run_ba137f = nc_run_ba137f.variables['gpp_gb'] |
---|
128 | y_run_ba137f_units = nc_run_ba137f.variables['gpp_gb'].units |
---|
129 | |
---|
130 | x_run_ba137f = nc_run_ba137f.variables['time'] |
---|
131 | x_run_ba137f_units = nc_run_ba137f.variables['time'].units |
---|
132 | |
---|
133 | y_run_ba137f = nc_run_ba137f.variables['gpp_gb'][0:,0,0] |
---|
134 | x_run_ba137f = nc_run_ba137f.variables['time'][0:] |
---|
135 | print len(x_run_ba137f) |
---|
136 | nc_run_ba137f.close |
---|
137 | |
---|
138 | nc_run_ba137b_mm = Dataset(filename_run_ba137b_mm) |
---|
139 | |
---|
140 | y_run_ba137b_mm = nc_run_ba137b_mm.variables['gpp_gb'] |
---|
141 | y_run_ba137b_mm_units = nc_run_ba137b_mm.variables['gpp_gb'].units |
---|
142 | |
---|
143 | x_run_ba137b_mm = nc_run_ba137b_mm.variables['time'] |
---|
144 | x_run_ba137b_mm_units = nc_run_ba137b_mm.variables['time'].units |
---|
145 | |
---|
146 | y_run_ba137b_mm = nc_run_ba137b_mm.variables['gpp_gb'][0:,0,0] |
---|
147 | x_run_ba137b_mm = nc_run_ba137b_mm.variables['time'][0:] |
---|
148 | nc_run_ba137b_mm.close |
---|
149 | print nc_run_ba137b_mm.dimensions |
---|
150 | #print nc_run_ba137b_mm.variables['gpp_gb'][0:,0,0] |
---|
151 | print nc_run_ba137b_mm.variables['time'][0:] |
---|
152 | #print(y_run_ba137b_mm*60*60*24*1000) |
---|
153 | |
---|
154 | |
---|
155 | |
---|
156 | nc_run_ba137f_mm =Dataset(filename_run_ba137f_mm) |
---|
157 | |
---|
158 | y_run_ba137f_mm = nc_run_ba137f_mm.variables['gpp_gb'] |
---|
159 | y_run_ba137f_mm_units = nc_run_ba137f_mm.variables['gpp_gb'].units |
---|
160 | |
---|
161 | x_run_ba137f_mm = nc_run_ba137f_mm.variables['time'] |
---|
162 | x_run_ba137f_mm_units = nc_run_ba137f_mm.variables['time'].units |
---|
163 | |
---|
164 | y_run_ba137f_mm = nc_run_ba137f_mm.variables['gpp_gb'][0:,0,0] |
---|
165 | x_run_ba137f_mm = nc_run_ba137f_mm.variables['time'][0:] |
---|
166 | nc_run_ba137f_mm.close |
---|
167 | #print(nc_run_ba137f_mm.variables['gpp_gb'][0:,0,0]) |
---|
168 | print nc_run_ba137f_mm.variables['time'][0:] |
---|
169 | print('shape of Jan_1991-Dec_2012 in months = ', nc_run_ba137f_mm.variables['time'][0:].shape) |
---|
170 | |
---|
171 | |
---|
172 | |
---|
173 | #obs = df.GPP[0:] |
---|
174 | #mod_dynam = y_run_ba137f_mm*60*60*24*1000 |
---|
175 | #diff_mod_dynam_obs = mod_dynam-obs |
---|
176 | #diff_mod_dynam_obs_squared = (mod_dynam-obs)**2 |
---|
177 | #rmse_dynam = np.sqrt(diff_mod_dynam_obs_squared) |
---|
178 | #print("obs = " + str(["%.8f" % elem for elem in obs])) |
---|
179 | #print("mod_dynam = " + str(["%.8f" % elem for elem in mod_dynam])) |
---|
180 | #print("diff_mod_dynam_obs = " + str(["%.8f" % elem for elem in diff_mod_dynam_obs])) |
---|
181 | #print("diff_mod_dynam_obs_squared = " + str(["%.8f" % elem for elem in diff_mod_dynam_obs_squared])) |
---|
182 | #print("rmse_dynam = " + str(["%.8f" % elem for elem in rmse_dynam])) |
---|
183 | |
---|
184 | #coords.convert_units('celsius') |
---|
185 | |
---|
186 | |
---|
187 | #time = DimCoord(x_run_ba137b_mm, standard_name='time', units='seconds since 2001-06-19 00:00:00') |
---|
188 | #time_coord = make_time_coord.make_time_coord(nc_run_ba137b) |
---|
189 | #time_coord.var_name = 'time' |
---|
190 | |
---|
191 | #cube_137b = Cube((y_run_ba137b_mm)*60*60*24*1000) |
---|
192 | #cube.add_dim_coord(time_coord, (0,)) |
---|
193 | |
---|
194 | |
---|
195 | #cube_137b.convert_units('years') |
---|
196 | var_name_constraint = iris.Constraint(cube_func=lambda x: x.var_name == 'gpp_gb') |
---|
197 | ba137b_cubelist = jules.load(filename_run_ba137b_mm, var_name_constraint, conv_to_grid=False, callback=jules_output_cube) |
---|
198 | ba137b_cube = ba137b_cubelist.concatenate_cube() |
---|
199 | ba137b_cube = iris.util.squeeze(ba137b_cube) |
---|
200 | #ba137b_cube *= 60*60*24*1000 |
---|
201 | ba137b_cube.units = 'kg/m2/s' |
---|
202 | print ba137b_cube |
---|
203 | |
---|
204 | gpp_ba137b_cube = ba137b_cube |
---|
205 | gpp_ba137b_cube.units = 'kg/m2/s' |
---|
206 | |
---|
207 | var_name_constraint = iris.Constraint(cube_func=lambda x: x.var_name == 'gpp_gb') |
---|
208 | ba137f_cubelist = jules.load(filename_run_ba137f_mm, var_name_constraint, conv_to_grid=False, callback=jules_output_cube) |
---|
209 | ba137f_cube = ba137f_cubelist.concatenate_cube() |
---|
210 | ba137f_cube = iris.util.squeeze(ba137f_cube) |
---|
211 | #ba137f_cube *= 60*60*24*1000 |
---|
212 | ba137f_cube.units = 'kg/m2/s' |
---|
213 | print ba137f_cube |
---|
214 | |
---|
215 | gpp_ba137f_cube = ba137f_cube |
---|
216 | gpp_ba137f_cube.units = 'kg/m2/s' |
---|
217 | |
---|
218 | """ |
---|
219 | var_name_constraint = iris.Constraint(cube_func=lambda x: x.var_name == 'gpp_gb') |
---|
220 | obs_cor_cubelist = jules.load(filename_run_ba137f_mm, var_name_constraint, conv_to_grid=False, callback=jules_output_cube) |
---|
221 | obs_cor_cube = ba137f_cubelist.concatenate_cube() |
---|
222 | obs_cor_cube = iris.util.squeeze(ba137f_cube) |
---|
223 | obs_cor_cube *= 60*60*24*1000 |
---|
224 | obs_cor_cube.units = 'g/m2/day' |
---|
225 | print obs_cor_cube |
---|
226 | """ |
---|
227 | |
---|
228 | dt_format = '%Y%m%d' |
---|
229 | timestamp_convertfunc = lambda x: datetime.datetime.strptime(x, dt_format) |
---|
230 | |
---|
231 | #rename 'TIMESTAMP_START' as 'TIMESTAMP' so make_time_coord recognises it |
---|
232 | #names = [x.replace('TIMESTAMP_START', 'TIMESTAMP') for x in names] |
---|
233 | |
---|
234 | |
---|
235 | filename = '/group_workspaces/jasmin2/jules/pmcguire/fluxnet/kwilliam/suite_data/vn1.0/fluxnet_obs/daily_obs/US_Ha1-energyandcarbon-dailyUTC.dat' |
---|
236 | data = np.genfromtxt(filename, names=['YYYYMMDD_UTC', 'GPP', 'Reco', 'NEE', 'SH', 'LE'], converters = {'YYYYMMDD_UTC':timestamp_convertfunc}, |
---|
237 | dtype=None, deletechars='', delimiter= ',', |
---|
238 | skip_header=0, skip_footer=0, |
---|
239 | usemask=True, missing_values=-9999) |
---|
240 | |
---|
241 | # add 12 hours to the datetimes |
---|
242 | for i, dt in enumerate(data['YYYYMMDD_UTC']): |
---|
243 | data['YYYYMMDD_UTC'][i] = dt + datetime.timedelta(hours=12) |
---|
244 | print data |
---|
245 | time_coord = make_time_coord.make_time_coord(data) |
---|
246 | time_coord.var_name = 'time' |
---|
247 | |
---|
248 | ############################################################################################################## GPP |
---|
249 | |
---|
250 | obs_cube = iris.cube.Cube(data['GPP'], var_name='GPP') |
---|
251 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
252 | #obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.03) |
---|
253 | |
---|
254 | obs_cube.add_dim_coord(time_coord, (0,)) |
---|
255 | |
---|
256 | obs_cube.data # make sure the data is read in right away, otherwise get in to trouble with biggus |
---|
257 | # (it gets confused about which points should be masked) |
---|
258 | |
---|
259 | # add some extra aux_coords |
---|
260 | iris.coord_categorisation.add_year(obs_cube, 'time') |
---|
261 | iris.coord_categorisation.add_month(obs_cube, 'time') |
---|
262 | iris.coord_categorisation.add_day_of_year(obs_cube, 'time') |
---|
263 | |
---|
264 | obs_cube = iris.util.squeeze(obs_cube) |
---|
265 | obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
266 | obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.5) |
---|
267 | GPP_obs_cube_mean = obs_cube_mean |
---|
268 | print('GPP_obs_cube_mean.data = ', GPP_obs_cube_mean.data) |
---|
269 | |
---|
270 | ############################################################################################################## Reco |
---|
271 | |
---|
272 | obs_cube = iris.cube.Cube(data['Reco'], var_name='Reco') |
---|
273 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
274 | #obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.03) |
---|
275 | |
---|
276 | obs_cube.add_dim_coord(time_coord, (0,)) |
---|
277 | |
---|
278 | obs_cube.data # make sure the data is read in right away, otherwise get in to trouble with biggus |
---|
279 | # (it gets confused about which points should be masked) |
---|
280 | |
---|
281 | # add some extra aux_coords |
---|
282 | iris.coord_categorisation.add_year(obs_cube, 'time') |
---|
283 | iris.coord_categorisation.add_month(obs_cube, 'time') |
---|
284 | iris.coord_categorisation.add_day_of_year(obs_cube, 'time') |
---|
285 | |
---|
286 | obs_cube = iris.util.squeeze(obs_cube) |
---|
287 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
288 | obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.5) |
---|
289 | Reco_obs_cube_mean = obs_cube_mean |
---|
290 | print('Reco_obs_cube_mean.data = ', Reco_obs_cube_mean.data) |
---|
291 | |
---|
292 | ############################################################################################################## NEE |
---|
293 | |
---|
294 | obs_cube = iris.cube.Cube(data['NEE'], var_name='NEE') |
---|
295 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
296 | #obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.03) |
---|
297 | |
---|
298 | obs_cube.add_dim_coord(time_coord, (0,)) |
---|
299 | |
---|
300 | obs_cube.data # make sure the data is read in right away, otherwise get in to trouble with biggus |
---|
301 | # (it gets confused about which points should be masked) |
---|
302 | |
---|
303 | # add some extra aux_coords |
---|
304 | iris.coord_categorisation.add_year(obs_cube, 'time') |
---|
305 | iris.coord_categorisation.add_month(obs_cube, 'time') |
---|
306 | iris.coord_categorisation.add_day_of_year(obs_cube, 'time') |
---|
307 | |
---|
308 | obs_cube = iris.util.squeeze(obs_cube) |
---|
309 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
310 | obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.5) |
---|
311 | NEE_obs_cube_mean = obs_cube_mean |
---|
312 | print('NEE_obs_cube_mean.data = ', NEE_obs_cube_mean.data) |
---|
313 | |
---|
314 | ############################################################################################################## SH |
---|
315 | |
---|
316 | obs_cube = iris.cube.Cube(data['SH'], var_name='SH') |
---|
317 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
318 | #obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.03) |
---|
319 | |
---|
320 | obs_cube.add_dim_coord(time_coord, (0,)) |
---|
321 | |
---|
322 | obs_cube.data # make sure the data is read in right away, otherwise get in to trouble with biggus |
---|
323 | # (it gets confused about which points should be masked) |
---|
324 | |
---|
325 | # add some extra aux_coords |
---|
326 | iris.coord_categorisation.add_year(obs_cube, 'time') |
---|
327 | iris.coord_categorisation.add_month(obs_cube, 'time') |
---|
328 | iris.coord_categorisation.add_day_of_year(obs_cube, 'time') |
---|
329 | |
---|
330 | obs_cube = iris.util.squeeze(obs_cube) |
---|
331 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
332 | obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.5) |
---|
333 | SH_obs_cube_mean = obs_cube_mean |
---|
334 | print('SH_obs_cube_mean.data = ', SH_obs_cube_mean.data) |
---|
335 | |
---|
336 | ############################################################################################################## LE |
---|
337 | """ |
---|
338 | obs_cube = iris.cube.Cube(data['LE'], var_name='LE') |
---|
339 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
340 | #obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.03) |
---|
341 | |
---|
342 | obs_cube.add_dim_coord(time_coord, (0,)) |
---|
343 | |
---|
344 | obs_cube.data # make sure the data is read in right away, otherwise get in to trouble with biggus |
---|
345 | # (it gets confused about which points should be masked) |
---|
346 | |
---|
347 | # add some extra aux_coords |
---|
348 | iris.coord_categorisation.add_year(obs_cube, 'time') |
---|
349 | iris.coord_categorisation.add_month(obs_cube, 'time') |
---|
350 | iris.coord_categorisation.add_day_of_year(obs_cube, 'time') |
---|
351 | |
---|
352 | obs_cube = iris.util.squeeze(obs_cube) |
---|
353 | #obs_cube.data = np.ma.maximum(obs_cube.data, 0.0) |
---|
354 | obs_cube_mean = obs_cube.aggregated_by(["year", "month"], iris.analysis.MEAN, mdtol=0.5) |
---|
355 | print('LE_obs_cube_mean.data = ', obs_cube_mean.data) |
---|
356 | """ |
---|
357 | |
---|
358 | |
---|
359 | qplt.plot(GPP_obs_cube_mean[0:], color='red', linewidth=1, linestyle='-', label='Observations') |
---|
360 | plt.axhline(0, linestyle=':', color='black') |
---|
361 | plt.xlabel('Time') |
---|
362 | plt.ylabel('Gridbox gross primary production (g/m2/day)') |
---|
363 | plt.legend(loc='upper right') |
---|
364 | plt.title('US_Ha1 (Jan 1991-Dec 2012)') |
---|
365 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.png') |
---|
366 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.pdf') |
---|
367 | plt.show() |
---|
368 | qplt.show() |
---|
369 | |
---|
370 | |
---|
371 | qplt.plot(Reco_obs_cube_mean[0:], color='red', linewidth=1, linestyle='-', label='Observations') |
---|
372 | plt.axhline(0, linestyle=':', color='black') |
---|
373 | plt.xlabel('Time') |
---|
374 | plt.ylabel('Gridbox total ecosystem respiration (kg/m2/s)') |
---|
375 | plt.legend(loc='upper right') |
---|
376 | plt.title('US_Ha1 (Jan 1991-Dec 2012)') |
---|
377 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.png') |
---|
378 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.pdf') |
---|
379 | plt.show() |
---|
380 | |
---|
381 | qplt.plot(NEE_obs_cube_mean[0:], color='red', linewidth=1, linestyle='-', label='Observations') |
---|
382 | plt.axhline(0, linestyle=':', color='black') |
---|
383 | plt.xlabel('Time') |
---|
384 | plt.ylabel('Gridbox net ecosystem exchange (g/m2/day)') |
---|
385 | plt.legend(loc='upper right') |
---|
386 | plt.title('US_Ha1 (Jan 1991-Dec 2012)') |
---|
387 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.png') |
---|
388 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.pdf') |
---|
389 | plt.show() |
---|
390 | |
---|
391 | qplt.plot(SH_obs_cube_mean[0:], color='red', linewidth=1, linestyle='-', label='Observations') |
---|
392 | plt.axhline(0, linestyle=':', color='black') |
---|
393 | plt.xlabel('Time') |
---|
394 | plt.ylabel('Gridbox sensible heat flux (W/m2)') |
---|
395 | plt.legend(loc='upper right') |
---|
396 | plt.title('US_Ha1 (Jan 1991-Dec 2012)') |
---|
397 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.png') |
---|
398 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.pdf') |
---|
399 | plt.show() |
---|
400 | |
---|
401 | """ |
---|
402 | qplt.plot(LE_obs_cube_mean[0:], color='red', linewidth=1, linestyle='-', label='Observations') |
---|
403 | plt.axhline(0, linestyle=':', color='black') |
---|
404 | plt.xlabel('Time') |
---|
405 | plt.ylabel('Gridbox latent heat flux (W/m2)') |
---|
406 | plt.legend(loc='upper right') |
---|
407 | plt.title('US_Ha1 (Jan 1991-Dec 2012)') |
---|
408 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.png') |
---|
409 | #plt.savefig('/home/users/nmc/FLUXNET2015/JULES_12313_30_sites/US_Ha1/plots/US_Ha1_gpp_gb_run_ba137b_mm_v_run_ba137f_mm_v_observations_mm.pdf') |
---|
410 | plt.show() |
---|
411 | """ |
---|
412 | |
---|
413 | |
---|
414 | |
---|
415 | |
---|
416 | |
---|