diff --git a/content/collaborators/index.md b/content/collaborators/index.md
index 9e1a24dd4..a7aab85d4 100644
--- a/content/collaborators/index.md
+++ b/content/collaborators/index.md
@@ -3,8 +3,6 @@ title: "Collaborators"
type: "collaborators"
---
-# Collaborators
-
## Research labs
diff --git a/content/courses/_index.md b/content/courses/_index.md
index 5fbe66dce..2b70b6429 100644
--- a/content/courses/_index.md
+++ b/content/courses/_index.md
@@ -1,3 +1,5 @@
---
title: courses
+type: "courses"
+layout: "list2"
---
diff --git a/content/courses/color-vision-colorimetry.md b/content/courses/color-vision-colorimetry.md
index f3765b747..abb31755a 100644
--- a/content/courses/color-vision-colorimetry.md
+++ b/content/courses/color-vision-colorimetry.md
@@ -1,13 +1,13 @@
---
title: "Color Vision and Colorimetry"
-image: "/images/courses/color.webp"
-weight: 5
-draft: false
----
+img: "color.webp"
+image_alt: "color"
+link: "Color_Vision.zip"
+description: |
+ **Course Duration:** 30 hours
+ **Instructor:** J. Malo
-**Course Duration:** 30 hours
-**Instructor:** J. Malo
+ Color is a 5-dimensional perception that is not only related to the spectrum coming from an object, but also strongly related to its spatio-temporal context. It is a powerful feature that allows humans to make reliable inferences about objects that would be nice to understand and mimic in artificial vision. In this course, we derive the linear CIE tristimulus theory from its experimental color matching foundations. We derive the relations between spectrum and tristimulus vectors through the color matching functions, the chromatic coordinates, chromatic purity and luminance. Phenomenology of color discrimination and adaptation reveals the limitations of the linear description and sets the foundations of color appearance models. In addition, we link the above perceptual representations of color with the conventional representation of color in computers.
+---
-Color is a 5-dimensional perception that is not only related to the spectrum coming from an object, but also strongly related to its spatio-temporal context. It is a powerful feature that allows humans to make reliable inferences about objects that would be nice to understand and mimic in artificial vision. In this course, we derive the linear CIE tristimulus theory from its experimental color matching foundations. We derive the relations between spectrum and tristimulus vectors through the color matching functions, the chromatic coordinates, chromatic purity and luminance. Phenomenology of color discrimination and adaptation reveals the limitations of the linear description and sets the foundations of color appearance models. In addition, we link the above perceptual representations of color with the conventional representation of color in computers.
-[Material](/files/courses/Color_Vision.zip)
\ No newline at end of file
diff --git a/content/courses/glass-training-material.md b/content/courses/glass-training-material.md
index c0ec1b0ca..8a66fcfa3 100644
--- a/content/courses/glass-training-material.md
+++ b/content/courses/glass-training-material.md
@@ -1,13 +1,11 @@
---
title: "The GLaSS Training Material Builds on the Global Lakes Use Cases"
-image: "/images/courses/CarwH7tWIAAn2ry.jpg-large.webp"
-weight: 10
-draft: false
+img: "CarwH7tWIAAn2ry.jpg-large.webp"
+image_alt: "CarwH7tWIAAn2ry.jpg-large"
+link: "https://www.learn-eo.org/lessons_glass.php"
+description: |
+ **Course Duration:** N/A
+ **Instructor:** Ana B. Ruescas & GLaSS team
+
+ The GLaSS training material builds on the global lakes use cases of GLaSS. It allows students and professionals in fields such as aquatic ecology, environmental technology, remote sensing, and GIS to learn about the possibilities of optical remote sensing of water quality, by using the Sentinel-2 and Sentinel-3 satellites and Landsat 8.
---
-
-**Course Duration:** N/A
-**Instructor:** Ana B. Ruescas & GLaSS team
-
-The GLaSS training material builds on the global lakes use cases of GLaSS. It allows students and professionals in fields such as aquatic ecology, environmental technology, remote sensing, and GIS to learn about the possibilities of optical remote sensing of water quality, by using the Sentinel-2 and Sentinel-3 satellites and Landsat 8.
-
-[Material](https://www.learn-eo.org/lessons_glass.php)
diff --git a/content/courses/google-earth-engine-introduction/_index.md b/content/courses/google-earth-engine-introduction/_index.md
deleted file mode 100644
index ef018e6c6..000000000
--- a/content/courses/google-earth-engine-introduction/_index.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: "Google Earth Engine Introduction"
-image: "/images/courses/gee_screenshot.webp"
-weight: 11
-draft: false
----
-
-**Course Duration:** N/A
-**Instructors:** Emma Izquierdo & Jordi Muñoz-Marí
-
-A short introduction to Google Earth Engine..
-
-[Material](./google-earth-engine-introduction/material)
diff --git a/content/courses/google-earth-engine-introduction/material.md b/content/courses/google-earth-engine-introduction/material.md
deleted file mode 100644
index ff87a39fa..000000000
--- a/content/courses/google-earth-engine-introduction/material.md
+++ /dev/null
@@ -1,470 +0,0 @@
-# GEE Course Material
-
-
-
-## Part 1 Code
-
-
-
-### gee_tutorial_01.js
-
-```javascript
-/**** Start of imports. If edited, may not auto-convert in the playground. ****/
-var l8 = ee.ImageCollection("LANDSAT/LC8_L1T_TOA");
-/***** End of imports. If edited, may not auto-convert in the playground. *****/
-// 1. Simple way to show an image
-var image = ee.Image('LANDSAT/LC8_L1T_TOA/LC81930282013158LGN00');
-Map.centerObject(image);
-Map.addLayer(image, trueColor, 'True color image');
-
-// 2. Filter an image collection selecting date range
-var filtered = l8.filterDate('2013-01-01', '2014-12-31');
-//print(filtered.size());
-
-// The median is computed using map-reduce (automatically!)
-var median = filtered.median();
-// Does the same thing, but more explicitly, and changes the band names
-var otherMedian = filtered.reduce('median');
-// Yet another way, even more explicit
-var otherMedian2 = filtered.reduce(ee.Reducer.median())
-
-// To show the L8 images in true or false color
-var trueColor = {bands:['B4','B3','B2'], min:0, max:0.3};
-var falseColor = {bands:['B5','B4','B3'], min:0, max:0.3};
-
-Map.addLayer(filtered, trueColor, 'True color image collection');
-Map.addLayer(filtered, falseColor, 'False color image collection');
-
-Map.addLayer(median, trueColor, 'median');
-Map.addLayer(otherMedian,
- {bands:['B4_median','B3_median','B2_median'], min:0, max:0.3},
- 'another median');
-```
-
-
-### gee_tutorial_02.js
-
-```javascript
-// First take an image and show it
-var trueColor = {bands:['B4','B3','B2'], min:0, max:0.3};
-var image = ee.Image('LANDSAT/LC8_L1T_TOA/LC81930282013158LGN00');
-Map.centerObject(image, 7);
-Map.addLayer(image, trueColor, 'Image');
-
-// How to compute NDVI, in several ways:
-
-// 1. Directly
-var red = image.select('B4');
-var nir = image.select('B5');
-var ndvi = nir.subtract(red).divide(nir.add(red));
-
-// nvdi visualization properties
-var ndvi_visprops = {min:0, max:1, palette:['FFFFFF', 'FF0000', '00FF00']};
-Map.addLayer(ndvi, ndvi_visprops, 'NDVI 1')
-
-// 2. Using existing function
-ndvi = image.normalizedDifference(['B5','B4']);
-Map.addLayer(ndvi, ndvi_visprops, 'NDVI 2')
-
-// 3. Using a function
-// => will be useful later when applied to an image collection
-// Alternatively
-// var addNDVI = function addNDVI(image) {
-function addNDVI(image) {
- var ndvi = image.normalizedDifference(['B5','B4']);
- return image.addBands(ndvi);
-}
-
-// Using our function to compute the NDVI an add it as a new band
-var withNDVI = addNDVI(image);
-print(withNDVI);
-Map.addLayer(withNDVI.select(['nd']), ndvi_visprops, 'NDVI 3');
-```
-
-### gee_tutorial_03.js
-
-```javascript
-/**** Start of imports. If edited, may not auto-convert in the playground. ****/
-var l8 = ee.ImageCollection("LANDSAT/LC8_L1T_TOA"),
- point = /* color: ff0000 */ee.Geometry.Point([-0.48614501953125, 39.715638134796336]),
- geometry = /* color: 00ff00 */ee.Geometry.Polygon(
- [[[-0.545196533203125, 39.7642140375156],
- [-0.5328369140625, 39.6765413702479],
- [-0.4449462890625, 39.69239407904182],
- [-0.45318603515625, 39.79059962227577]]]);
-/***** End of imports. If edited, may not auto-convert in the playground. *****/
-// Example of:
-// 1. Using 'map' to apply a function in parallel to an image collection
-// 2. Export the image to Google Drive
-// 3. Obtain statictis in regions or points
-// 4. Show an evolution chart
-
-var trueColor = {bands:['B4','B3','B2'], min:0, max:0.3};
-
-function addNDVI(image) {
- var ndvi = image.normalizedDifference(['B5','B4']);
- return image.addBands(ndvi);
-}
-
-var filtered = l8.filterDate('2013-01-01', '2014-01-01');
-
-// Apply a function to an image collection
-var withNDVI = filtered.map(addNDVI);
-
-// Composites all the images in a collection,
-// using a quality band as a per-pixel ordering function.
-var greenest = withNDVI.qualityMosaic('nd');
-
-print(withNDVI.first());
-print(greenest);
-
-Map.centerObject(point, 10);
-Map.addLayer(withNDVI, trueColor, 'withNDVI');
-Map.addLayer(greenest, trueColor, 'Greenest');
-Map.addLayer(greenest, {bands: ['nd'], min: 0, max: 1}, 'NDVI');
-
-// Export (this creates a task you must run after this script has finished)
-Export.image.toDrive(greenest.select('nd'), 'withNDVI', 'GEE');
-// Export all bands (except BQA which is int and raises an error)
-//Export.image.toDrive(
-// greenest.select(['B[1-11]','nd']), 'greenest', 'GEE');
-
-// Reduce regions:
-print(greenest.reduceRegion(ee.Reducer.mean(), geometry, 30));
-print(greenest.select(['nd']).reduceRegion(ee.Reducer.mean(), point, 30));
-
-// Charts
-// Show the NDVI time series in a graph
-print(ui.Chart.image.series(withNDVI.select('nd'), point));
-```
-
-## Part 2 Code
-
-
-
-### DemoSpainClass.js
-
-```javascript
-var countries = ee.FeatureCollection('ft:1tdSwUL7MVpOauSgRzqVTOwdfy17KDbw-1d9omPw');
-print(countries);
-var spain = countries.filterMetadata('Country', 'equals', 'Spain');
-print(spain);
-
-// L7 image collection
-var landsat = ee.ImageCollection("LANDSAT/LE7_TOA_1YEAR");
-// VLC point
-var point = ee.Geometry.Point([-0.3330230712890625, 39.480708957174556]);
-// Water and class polygons (both near the cost)
-var class_water = ee.Geometry.Polygon(
- [[[-0.2691650390625, 39.51569536664155],
- [-0.27706146240234375, 39.44388219489487],
- [-0.13423919677734375, 39.43778393700683],
- [-0.1242828369140625, 39.52046261905615]]]);
-var class_land =ee.Geometry.Polygon(
- [[[-0.46245574951171875, 39.50165655681176],
- [-0.42022705078125, 39.475955540420635],
- [-0.3659820556640625, 39.44176112227503],
- [-0.339202880859375, 39.51887357127223],
- [-0.4140472412109375, 39.52867212011217]]]);
-
-landsat = ee.Image(landsat.sort('system:index',false).first());
-//landsat = landsat.filterMetadata('system:index', 'equals','2014');
-//landsat = landsat.filterMetadata('system:index', 'equals','2014').reduce(ee.Reducer.sum());
-print(landsat);
-
-// Create classes
-class_water = ee.Feature(class_water).set({'class':0});
-class_land = ee.Feature(class_land).set({'class':1});
-// Join them in a FeatureCollection
-var FC = ee.FeatureCollection([class_water,class_land]);
-print(FC);
-
-// Create training set sampling pixel from the image in the defined polygons
-var training = landsat.sampleRegions(FC, ['class'], 300);
-print(training);
-
-// Train a classifier with default parameters
-var bands = landsat.bandNames();
-var model = ee.Classifier.naiveBayes().train(training, 'class', bands);
-
-// Classify the image with the same bands used for training.
-var classified = landsat.select(bands).classify(model);
-//classified = classified.clip(spain);
-
-//Map.centerObject(point, 12);
-Map.centerObject(spain, 6);
-Map.addLayer(landsat.clip(spain), {bands: ['B3','B2','B1'], min:10, max: 65}, 'image');
-Map.addLayer(classified, {min: 0, max: 1, palette: ['0000FF', 'FF0000'], opacity: 0.5}, 'classification');
-//Map.addLayer(training, {}, 'training');
-```
-
-### DemoClassification.js
-
-```javascript
-// Use these bands for prediction.
-var bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11'];
-
-// Load a Landsat 8 image to be used for prediction.
-var image = ee.Image('LANDSAT/LC8_L1T_TOA/LC82320672013207LGN00');
-//print(image)
-//Map.addLayer(image)
-
-image= image.select(bands);
-//print(image)
-// Load training points. The numeric property 'class' stores known labels.
-var points = ee.FeatureCollection('ft:10X7SUjDTiFJDyIA58zLcptK8pwBwjj1BV12SQOgJ')
-.remap([1, 2], [0, 1], 'class');
-//print(points)
-
-// Overlay the points on the imagery to get training.
-var training = image.sampleRegions(points, ['class']);
-//print(training)
-// Train a CART classifier with default parameters.
-var trained = ee.Classifier.cart().train(training, 'class', bands);
-//print(trained)
-// Classify the image with the same bands used for training.
-var classified = image.select(bands).classify(trained);
-
-// Display the inputs and the results.
-Map.centerObject(image, 10);
-Map.addLayer(image, {bands: ['B4', 'B3', 'B2'], min: 0.05, max: 0.14}, 'image');
-Map.addLayer(classified, {min: 0, max: 1, palette: ['00A000', 'A00000']}, 'classification');
-Map.addLayer(points, {'palette': '0000ff', 'max': 10}, 'training samples');
-
-// Exercise: try other classifiers
-```
-
-### DemoRegression.js
-
-```javascript
-// This function adds a time band to the image.
-var createTimeBand = function(image) {
- // Scale milliseconds by a large constant to avoid very small slopes
- // in the linear regression output.
- return image.addBands(image.metadata('system:time_start').divide(1e18));
-};
-
-// Load the input image collection: projected climate data.
-var collection = ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS')
- .filterMetadata('scenario', 'equals', 'rcp85')
- .filterDate(ee.Date('2006-01-01'), ee.Date('2050-01-01'))
- // Map the time band function over the collection.
- .map(createTimeBand);
-
-// Get scale from the projected mean precipitation band.
-//var scale = ee.Image(collection.first())
-// .select(['pr_mean']).projection().nominalScale();
-//print(scale)
-
-// Reduce the collection with the linear fit reducer.
-// Independent variable are followed by dependent variables.
-var linearFit = collection.select(['system:time_start', 'pr_mean'])
- .reduce(ee.Reducer.linearFit());
-print(linearFit)
-// Display the results.
-Map.setCenter(-100.11, 40.38, 4);
-Map.addLayer(linearFit,
- {min: 0, max: [-0.9, 8e-5, 1],
- bands: ['scale', 'offset', 'scale']}, 'fit');
-```
-
-### DemoRegressionCART.js
-
-```javascript
-/**** Start of imports. If edited, may not auto-convert in the playground. ****/
-var imageCollection = ee.ImageCollection("LANDSAT/LC8_L1T_ANNUAL_EVI");
-/***** End of imports. If edited, may not auto-convert in the playground. *****/
-print(ee.Image(imageCollection))
-
-// Use these bands for prediction.
-var bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11'];
-// Load a Landsat 8 image to be used for prediction.
-var image = ee.Image('LANDSAT/LC8_L1T_TOA/LC82320672013207LGN00');
-
-image = image.select(bands);
-//print(image)
-// Load training points. The numeric property 'class' stores known labels.
-var points = ee.FeatureCollection('ft:10X7SUjDTiFJDyIA58zLcptK8pwBwjj1BV12SQOgJ')
-.remap([1, 2], [0, 1], 'class');
-//print(points)
-
-// Regression model :
-// Recortamos la imagen al area de la imagen Landsat
-var EVI = ee.Image(imageCollection.first()).clip(image.geometry())
-
-// Generamos la imagen con las bandas y el output:
-var image1 = image.addBands(EVI);
-// Buscamos 100 pixeles random para entrenar:
-var training = image1.sample({numPixels: 100, seed: 0});
-print(training)
-
-// Aplicamos el CART pero en modo regression:
-var trained = ee.Classifier.cart().setOutputMode('REGRESSION').train(training, 'EVI', bands);
-// Obtenemmos el output predecido:
-var regression = image.classify(trained,'EVI_pred');
-print(regression)
-
-// Display the inputs and the results.
-// var colors = ['FF0000', 'FFFF00', '00FF00', '00FFFF', '0000FF', 'FF00FF'];
-var colors_summer = [
- '007F66','048166','088366','0C8566','108766','148966','188B66','1C8D66','208F66','249166',
- '289366','2C9566','309766','349966','389B66','3C9D66','409F66','44A166','48A366','4CA566',
- '50A766','55AA66','59AC66','5DAE66','61B066','65B266','69B466','6DB666','71B866','75BA66',
- '79BC66','7DBE66','81C066','85C266','89C466','8DC666','91C866','95CA66','99CC66','9DCE66',
- 'A1D066','A5D266','AAD466','AED666','B2D866','B6DA66','BADC66','BEDE66','C2E066','C6E266',
- 'CAE466','CEE666','D2E866','D6EA66','DAEC66','DEEE66','E2F066','E6F266','EAF466','EEF666',
- 'F2F866','F6FA66','FAFC66','FFFF66']
-var colors = ['007F66','208F66','409F66','61B066','81C066','A1D066','C2E066','E2F066']
-Map.centerObject(image, 10);
-Map.addLayer(image, {bands: ['B4', 'B3', 'B2'], min:0.05, max: 0.14}, 'image');
-Map.addLayer(points,{'palette': '0000ff', 'max': 10}, 'training_samples');
-Map.addLayer(EVI, {'palette': colors}, 'EVI')
-Map.addLayer(regression, {'palette': colors}, 'EVI_predicted')
-
-// Validación:
-var validation = (image1.addBands(regression)).sample({
- numPixels: 50,
- seed: 2
-});
-//print(validation)
-
-// var validated = validation.classify(trained);
-print(validation.limit(3)) //sqrt(mean((Labels-PreLabels).^2))
-function error(fc){
- var label = ee.Number(fc.get('EVI'));
- var pred = ee.Number(fc.get('EVI_pred'));
- return fc.set({'error':(label.subtract(pred)).pow(2)});
-}
-
-var res = ee.FeatureCollection(validation.map(error));
-print(res)
-
-var mean = res.select(['error']).reduceColumns(ee.Reducer.mean(),['error']).get('mean');
-var RMSE = ee.Number(mean).sqrt();
-print(RMSE);
-```
-
-### DemoClustering.js
-
-```javascript
-/**** Start of imports. If edited, may not auto-convert in the playground. ****/
-var imageCollection = ee.ImageCollection("LANDSAT/LC8_L1T_ANNUAL_EVI");
-/***** End of imports. If edited, may not auto-convert in the playground. *****/
-print(ee.Image(imageCollection))
-
-// Use these bands for prediction.
-var bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11'];
-// Load a Landsat 8 image to be used for prediction.
-var image = ee.Image('LANDSAT/LC8_L1T_TOA/LC82320672013207LGN00');
-//print(image)
-
-image = image.select(bands);
-//print(image)
-// Load training points. The numeric property 'class' stores known labels.
-var points = ee.FeatureCollection('ft:10X7SUjDTiFJDyIA58zLcptK8pwBwjj1BV12SQOgJ')
-.remap([1, 2], [0, 1], 'class');
-//print(points)
-
-// Clustering
-//var training = image.sampleRegions(points, ['class']);
-//var EVI = ee.Image(imageCollection.first()).clip(image.geometry());
-//var image1 = image.addBands(EVI);
-
-var training = image.sample({numPixels: 100, seed: 0});
-
-var Names = ee.Feature(training.first()).propertyNames().remove('system:index');
-Names = Names.remove('class');
-print(Names)
-
-var prClustering = ee.Clusterer.wekaKMeans(3);
-print(prClustering);
-var prTrainClus = prClustering.train(training, Names);
-print(prTrainClus);
-print(prTrainClus.schema())
-
-var colors = ['00FFFF', '0000FF','FFFF00'];
-var RES = image.cluster(prTrainClus);
-print(RES)
-
-// Display the inputs and the results.
-Map.centerObject(image, 10);
-Map.addLayer(image, {bands: ['B4', 'B3', 'B2'], max: 0.4}, 'image');
-Map.addLayer(RES, {'max':2,'min':0, palette: colors},'Clustering')
-Map.addLayer(points,{'palette': '0000ff', 'max': 10}, 'Training points');
-```
-
-### DemoClassificationWithStatistics.js
-
-```javascript
-// Use these bands for prediction.
-var bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11'];
-
-// Load a Landsat 8 image to be used for prediction.
-var image = ee.Image('LANDSAT/LC8_L1T_TOA/LC82320672013207LGN00');
-//print(image)
-Map.addLayer(image)
-
-image= image.select(bands);
-//print(image)
-// Load training points. The numeric property 'class' stores known labels.
-var points = ee.FeatureCollection('ft:10X7SUjDTiFJDyIA58zLcptK8pwBwjj1BV12SQOgJ')
-.remap([1, 2], [0, 1], 'class');
-// print(points)
-
-//Split featureCollection in train and test:
-points = points.randomColumn();
-// print(points);
-var trainPoints = points.filter(ee.Filter.gt('random', 0.25));
-// print(trainPoints)
-// Overlay the points on the imagery to get training.
-var training = image.sampleRegions(trainPoints, ['class']);
-//print(training)
-
-// Train a CART classifier with default parameters.
-var trained = ee.Classifier.cart().train(training, 'class', bands);
-//print(trained)
-
-// training
-var trainAccuracy = trained.confusionMatrix();
-print('Resubstitution error matrix: ', trainAccuracy);
-print('Training overall accuracy: ', trainAccuracy.accuracy());
-print('Training Kappa: ', trainAccuracy.kappa());
-print(trainAccuracy.array().matrixDiagonal().reduce(ee.Reducer.sum(),[0]));
-
-// test
-var testPoints = points.filter(ee.Filter.lte('random', 0.25));
-var testingSet = image.sampleRegions({
- collection: testPoints,
- properties:['class']});
-
-var ClassifiedTest = testingSet.classify(trained);
-var testAccuracy = ClassifiedTest.errorMatrix('class', 'classification');
-print('Test error matrix: ', testAccuracy);
-print('Test overall accuracy: ', testAccuracy.accuracy());
-print('Test kappa: ', testAccuracy.kappa());
-print(testAccuracy.array().matrixDiagonal().reduce(ee.Reducer.sum(),[0]));
-
-// Classify the image with the same bands used for training.
-var classified = image.select(bands).classify(trained);
-
-// Display the inputs and the results.
-Map.centerObject(image, 10);
-Map.addLayer(image, {bands: ['B4', 'B3', 'B2'], max: 0.4}, 'image');
-Map.addLayer(classified, {min: 0, max: 1, palette: ['00FF00', 'FF0000']},
- 'classification');
-Map.addLayer(points,{
- 'palette': '0000ff',
- 'max': 10});
-```
-
-## GEE Course Material
-
-
-
-### External Resources from GEE Developers
-
-- [Overview](https://docs.google.com/presentation/d/1tOOBDLodQ4tyW2ldaDGVMTd8R6IPSTlfm_ddu9x33jc/edit#slide=id.p)
-- [Introduction](https://docs.google.com/presentation/d/1jk48iXp1DvGRl65pKHruV-InQX3B6GP7dH39sDyiens/edit#slide=id.g4954714e1_18)
-- [Advanced API](https://docs.google.com/presentation/d/1BbMtoS8wvU_RrsnzxR31le58ZPzLzj-aa43ZQOYXAiM/edit#slide=id.ga42bbbf0f_0_0)
-- [2016 Advanced API](https://docs.google.com/presentation/d/1A5bRots9uoQXd-ERfnF7wqB9MGLS9XFG7ABXsMNknQk/edit#slide=id.g64cc58566_0_0)
-- [2016 Python API](https://docs.google.com/presentation/d/1N7W09Njvz9ROrarp8SspA0kSC17Z0bzBbzTxCxskdYI/edit#slide=id.g494020760_258)
\ No newline at end of file
diff --git a/content/courses/google_earth_engine.md b/content/courses/google_earth_engine.md
new file mode 100644
index 000000000..9ae4b7a46
--- /dev/null
+++ b/content/courses/google_earth_engine.md
@@ -0,0 +1,13 @@
+---
+title: "Google Earth Engine Introduction"
+img: "gee_screenshot.webp"
+image_alt: "gee_screenshot"
+link: "google-earth-engine-introduction/material"
+description: |
+ **Course Duration:** N/A
+ **Instructors:** Emma Izquierdo & Jordi Muñoz-Marí
+
+ A short introduction to Google Earth Engine..
+---
+
+
diff --git a/content/courses/human-vision-mechanistic-models.md b/content/courses/human-vision-mechanistic-models.md
index e4b60a602..20ce0cb29 100644
--- a/content/courses/human-vision-mechanistic-models.md
+++ b/content/courses/human-vision-mechanistic-models.md
@@ -1,13 +1,11 @@
---
title: "Human Vision: Facts, Mechanistic Models, and Principled Theories"
-image: "/images/courses/human.webp"
-weight: 3
-draft: false
----
-
-**Course Duration:** 30 hours
-**Instructor:** J. Malo
+img: "human.webp"
+image_alt: "human"
+link: "visual_percept.pptx"
+description: |
+ **Course Duration:** 30 hours
+ **Instructor:** J. Malo
-In this course, I introduce the facts on color vision from radiometry and photometry, the vision of spatio-temporal textures, the models of mechanisms and circuits that (kind of) reproduce these facts, and the statistical elements of theories that explain the facts.
-
-[Material](/files/courses/visual_percept.pptx)
+ In this course, I introduce the facts on color vision from radiometry and photometry, the vision of spatio-temporal textures, the models of mechanisms and circuits that (kind of) reproduce these facts, and the statistical elements of theories that explain the facts.
+---
diff --git a/content/courses/hyperspectral-image-processing.md b/content/courses/hyperspectral-image-processing.md
index 17a8d6d1a..d0c6ff665 100644
--- a/content/courses/hyperspectral-image-processing.md
+++ b/content/courses/hyperspectral-image-processing.md
@@ -1,13 +1,11 @@
---
title: "Hyperspectral Image Processing"
-image: "/images/courses/hyper.webp"
-weight: 8
-draft: false
----
-
-**Course Duration:** 60 hours
-**Instructor:** G. Camps-Valls
+img: "hyper.webp"
+image_alt: "hyper"
+link: "esa_course.zip"
+description: |
+ **Course Duration:** 60 hours
+ **Instructor:** G. Camps-Valls
-We introduce the main concepts of hyperspectral image processing. We start by a soft introduction to hyperspectral image processing, the standard processing chain, and the current challenges in the field. Then we analyze the current state of the art in several topics: feature extraction, supervised classification, unmixing and abundance estimation, and retrieval of biophysical parameters. All the methods and techniques studied are reviewed both theoretically and through MATLAB exercises.
-
-[Material](/files/courses/esa_course.zip)
+ We introduce the main concepts of hyperspectral image processing. We start by a soft introduction to hyperspectral image processing, the standard processing chain, and the current challenges in the field. Then we analyze the current state of the art in several topics: feature extraction, supervised classification, unmixing and abundance estimation, and retrieval of biophysical parameters. All the methods and techniques studied are reviewed both theoretically and through MATLAB exercises.
+---
diff --git a/content/courses/information-theory-visual-communication.md b/content/courses/information-theory-visual-communication.md
index 2579b0c39..b1a03cd07 100644
--- a/content/courses/information-theory-visual-communication.md
+++ b/content/courses/information-theory-visual-communication.md
@@ -1,13 +1,11 @@
---
title: "Information Theory for Visual Communication"
-image: "/images/courses/Info_Theory2.webp"
-weight: 1
-draft: false
----
-
-**Course Duration:** 30 hours
-**Instructor:** J. Malo
+img: "Info_Theory2.webp"
+image_alt: "Info_Theory2"
+link: "Info_theory_for_Neurosci_RBIG_infomax_DN_SPCA_PPA_DDR.zip"
+description: |
+ **Course Duration:** 30 hours
+ **Instructor:** J. Malo
-In this course, I introduce the elements of information theory required to understand why Uniformization or Gaussianization of density functions and noise in the system are key for the transmission of visual information. This knowledge is the basis of our long-standing agenda on developing invertible transforms for uniformization (SPCA, PPA, DRR) and Gaussianization (RBIG), and our research to calibrate neural noise in the visual system and Divisive Normalization models.
-
-[Material](/files/courses/Info_theory_for_Neurosci_RBIG_infomax_DN_SPCA_PPA_DDR.zip)
+ In this course, I introduce the elements of information theory required to understand why Uniformization or Gaussianization of density functions and noise in the system are key for the transmission of visual information. This knowledge is the basis of our long-standing agenda on developing invertible transforms for uniformization (SPCA, PPA, DRR) and Gaussianization (RBIG), and our research to calibrate neural noise in the visual system and Divisive Normalization models.
+---
diff --git a/content/courses/kernel-methods-machine-learning.md b/content/courses/kernel-methods-machine-learning.md
index 5efa37f50..bb2af3dca 100644
--- a/content/courses/kernel-methods-machine-learning.md
+++ b/content/courses/kernel-methods-machine-learning.md
@@ -1,13 +1,11 @@
---
title: "Kernel Methods in Machine Learning"
-image: "/images/courses/sskpls.webp"
-weight: 7
-draft: false
----
-
-**Course Duration:** 30 hours
-**Instructor:** G. Camps-Valls
+img: "sskpls.webp"
+image_alt: "sskpls"
+link: "kernel_course.zip"
+description: |
+ **Course Duration:** 30 hours
+ **Instructor:** G. Camps-Valls
-Two fundamental operations in Machine Learning such as regression and classification involve drawing nonlinear boundaries or functions through a set of (labeled or unlabeled) training samples. These boundaries or functions at certain (test) samples can be deduced from the similarities between the test sample and the training samples. These similarities can be encoded in Kernels, and the representer theorem can be used to obtain expressions for the functions at any test sample. In this course, we will also review the application of the kernelization of scalar products (e.g., as in the covariance matrix) to obtain nonlinear generalizations of classical feature extraction methods.
-
-[Material](/files/courses/kernel_course.zip)
+ Two fundamental operations in Machine Learning such as regression and classification involve drawing nonlinear boundaries or functions through a set of (labeled or unlabeled) training samples. These boundaries or functions at certain (test) samples can be deduced from the similarities between the test sample and the training samples. These similarities can be encoded in Kernels, and the representer theorem can be used to obtain expressions for the functions at any test sample. In this course, we will also review the application of the kernelization of scalar products (e.g., as in the covariance matrix) to obtain nonlinear generalizations of classical feature extraction methods.
+---
diff --git a/content/courses/remote-sensing-data-analysis.md b/content/courses/remote-sensing-data-analysis.md
index 6bb061cf3..b8ddc477b 100644
--- a/content/courses/remote-sensing-data-analysis.md
+++ b/content/courses/remote-sensing-data-analysis.md
@@ -1,13 +1,11 @@
---
title: "Machine Learning and Signal Processing for Remote Sensing Data Analysis (IGARSS'14 tutorial)"
-image: "/images/courses/remote_sensing.webp"
-weight: 9
-draft: false
----
-
-**Course Duration:** N/A
-**Instructors:** G. Camps-Valls and D. Tuia
-
-In this tutorial, we will present the remote sensing image processing chain, and take the attendants on a tour of different strategies for feature extraction, classification, unmixing, retrieval, and pattern analysis for data understanding. On the one hand, we will present powerful methodologies for remote sensing data classification: extracting knowledge from data, including interactive approaches via active learning, classifiers that encode prior knowledge and invariances, semi-supervised learning that exploits the information of unlabeled data, and domain adaptation to compensate for shifts in the ever-changing data distributions. On the other hand, we will pay attention to recent advances in bio-geophysical parameter estimation that incorporate heteroscedasticity, online adaptation, and problem understanding. From there, we will take a leap towards the more challenging step of understanding the geoscience problems from data by reviewing the latest advances in (directed) graphical models, structure learning, and empirical causal inference. Beyond theory, we will also present results of recent studies illustrating all the covered issues. Finally, we will provide code to the attendees to try the different methodologies and provide a solid ground for their future experimentations.
+img: "remote_sensing.webp"
+image_alt: "remote_sensing"
+link: "tutorial_igarss15.tar.gz"
+description: |
+ **Course Duration:** N/A
+ **Instructors:** G. Camps-Valls and D. Tuia
-[Material](/files/courses/tutorial_igarss15.tar.gz)
+ In this tutorial, we will present the remote sensing image processing chain, and take the attendants on a tour of different strategies for feature extraction, classification, unmixing, retrieval, and pattern analysis for data understanding. On the one hand, we will present powerful methodologies for remote sensing data classification: extracting knowledge from data, including interactive approaches via active learning, classifiers that encode prior knowledge and invariances, semi-supervised learning that exploits the information of unlabeled data, and domain adaptation to compensate for shifts in the ever-changing data distributions. On the other hand, we will pay attention to recent advances in bio-geophysical parameter estimation that incorporate heteroscedasticity, online adaptation, and problem understanding. From there, we will take a leap towards the more challenging step of understanding the geoscience problems from data by reviewing the latest advances in (directed) graphical models, structure learning, and empirical causal inference. Beyond theory, we will also present results of recent studies illustrating all the covered issues. Finally, we will provide code to the attendees to try the different methodologies and provide a solid ground for their future experimentations.
+---
\ No newline at end of file
diff --git a/content/courses/remote-sensing-water-quality.md b/content/courses/remote-sensing-water-quality.md
index 38454809d..6dad68ca5 100644
--- a/content/courses/remote-sensing-water-quality.md
+++ b/content/courses/remote-sensing-water-quality.md
@@ -1,13 +1,11 @@
---
title: "Remote Sensing for Water Quality"
-image: "/images/courses/EO_AnnaB_2023.webp"
-weight: 12
-draft: false
----
-
-**Course Duration:** N/A
-**Instructor:** Anna B. Ruescas
-
-Anna B. Ruescas leads this session on remote sensing for water quality, as part of the 2023 ESA Earth Observation Advanced Training Course at the Wroclaw University of Environmental and Life Sciences.
+img: "EO_AnnaB_2023.webp"
+image_alt: "EO_AnnaB_2023"
+link: "https://www.youtube.com/watch?v=d67aO2z06dI"
+description: |
+ **Course Duration:** N/A
+ **Instructor:** Anna B. Ruescas
-[Watch the Lectures!](https://www.youtube.com/watch?v=d67aO2z06dI)
+ Anna B. Ruescas leads this session on remote sensing for water quality, as part of the 2023 ESA Earth Observation Advanced Training Course at the Wroclaw University of Environmental and Life Sciences.
+---
\ No newline at end of file
diff --git a/content/courses/representation-spatial-information.md b/content/courses/representation-spatial-information.md
index 3cb283ee8..1cd344892 100644
--- a/content/courses/representation-spatial-information.md
+++ b/content/courses/representation-spatial-information.md
@@ -1,13 +1,11 @@
---
title: "Representation of Spatial Information"
-image: "/images/courses/base.webp"
-weight: 4
-draft: false
----
-
-**Course Duration:** 30 hours
-**Instructor:** J. Malo
-
-Statistical regularities in photographic images imply that certain representations of spatial information are better than others in terms of coding efficiency. In this course, we present the information theory concepts (entropy, multi-information, correlation and negentropy) for unsupervised feature extraction or dictionary learning required in image coding. Redundancy in images and sequences is reviewed, and basic techniques for compact information representation are introduced such as vector quantization, predictive coding, and transform coding. Application of these concepts in images is the basis of DCT and Wavelet representations, which are the core of JPEG and JPEG2000.
+img: "base.webp"
+image_alt: "base"
+link: "Represent_Spatial_Information.zip"
+description: |
+ **Course Duration:** 30 hours
+ **Instructor:** J. Malo
-[Material](/files/courses/Represent_Spatial_Information.zip)
+ Statistical regularities in photographic images imply that certain representations of spatial information are better than others in terms of coding efficiency. In this course, we present the information theory concepts (entropy, multi-information, correlation and negentropy) for unsupervised feature extraction or dictionary learning required in image coding. Redundancy in images and sequences is reviewed, and basic techniques for compact information representation are introduced such as vector quantization, predictive coding, and transform coding. Application of these concepts in images is the basis of DCT and Wavelet representations, which are the core of JPEG and JPEG2000.
+---
\ No newline at end of file
diff --git a/content/courses/satellite-based-tools-aquatic-ecosystems.md b/content/courses/satellite-based-tools-aquatic-ecosystems.md
index d693243e4..2dbef6538 100644
--- a/content/courses/satellite-based-tools-aquatic-ecosystems.md
+++ b/content/courses/satellite-based-tools-aquatic-ecosystems.md
@@ -1,13 +1,11 @@
---
title: "Satellite-based Tools for Investigating Aquatic Ecosystems"
-image: "/images/courses/annaB_SNAP23.webp"
-weight: 13
-draft: false
----
-
-**Course Duration:** N/A
-**Instructor:** Anna B. Ruescas
-
-Anna B. Ruescas participates in this session on the use of Sentinel Constellation SNAP Tools, as part of the 2023 Satellite-based Tools for Investigating Aquatic Ecosystems Training at the Trevor Platt Science Foundation.
+img: "annaB_SNAP23.webp"
+image_alt: "annaB_SNAP23"
+link: "https://www.youtube.com/watch?v=pknYqAAtxRE"
+description: |
+ **Course Duration:** N/A
+ **Instructor:** Anna B. Ruescas
-[Lectures](https://www.youtube.com/watch?v=pknYqAAtxRE)
+ Anna B. Ruescas participates in this session on the use of Sentinel Constellation SNAP Tools, as part of the 2023 Satellite-based Tools for Investigating Aquatic Ecosystems Training at the Trevor Platt Science Foundation.
+---
\ No newline at end of file
diff --git a/content/courses/statistical-signal-processing.md b/content/courses/statistical-signal-processing.md
index 6b2406c73..25c8b1da6 100644
--- a/content/courses/statistical-signal-processing.md
+++ b/content/courses/statistical-signal-processing.md
@@ -1,13 +1,11 @@
---
title: "Statistical Signal Processing"
-image: "/images/courses/signal_theme.webp"
-weight: 2
-draft: false
----
-
-**Course Duration:** 60 hours
-**Instructor:** G. Camps-Valls
-
-Material for a master course on (statistical) signal processing. I cover the essential background for engineers and physicists interested in signal processing: Probability and random variables, discrete time random processes, spectral estimation, signal decomposition and transforms, and an introduction to information theory.
+img: "signal_theme.webp"
+image_alt: "signal_theme"
+link: "ps_2014.pdf"
+description: |
+ **Course Duration:** 60 hours
+ **Instructor:** G. Camps-Valls
-[Material](/files/courses/ps_2014.pdf)
+ Material for a master course on (statistical) signal processing. I cover the essential background for engineers and physicists interested in signal processing: Probability and random variables, discrete time random processes, spectral estimation, signal decomposition and transforms, and an introduction to information theory.
+---
\ No newline at end of file
diff --git a/content/courses/texture-motion-visual-cortex.md b/content/courses/texture-motion-visual-cortex.md
index b7a4d60f4..ed7688666 100644
--- a/content/courses/texture-motion-visual-cortex.md
+++ b/content/courses/texture-motion-visual-cortex.md
@@ -1,13 +1,11 @@
---
title: "Texture and Motion in the Visual Cortex"
-image: "/images/courses/mt_theoret_sensit.webp"
-weight: 6
-draft: false
----
-
-**Course Duration:** 40 hours
-**Instructor:** J. Malo
-
-Neurons in V1 and MT cortex play a determinant role in the analysis of the shape of objects, their spatial texture, and the estimation of retinal motion. In this course, we describe the basic psychophysical and physiological phenomena related to low-level spatio-temporal vision: the contrast sensitivity functions, masking, adaptation, and aftereffects. These facts are mediated by the context-dependent nonlinearities of the response of neurons with specific receptive fields. We analyze the geometric properties of the standard model of V1 and their consequences in image discrimination. We introduce the concept of optical flow, its properties, and how this description of motion can be estimated from the 3D wavelet sensors in V1 and the aggregated sensors in MT.
+img: "mt_theoret_sensit.webp"
+image_alt: "mt_theoret_sensit"
+link: "Color_Vision.zip"
+description: |
+ **Course Duration:** 40 hours
+ **Instructor:** J. Malo
-[Material](/files/courses/Color_Vision.zip)
\ No newline at end of file
+ Neurons in V1 and MT cortex play a determinant role in the analysis of the shape of objects, their spatial texture, and the estimation of retinal motion. In this course, we describe the basic psychophysical and physiological phenomena related to low-level spatio-temporal vision: the contrast sensitivity functions, masking, adaptation, and aftereffects. These facts are mediated by the context-dependent nonlinearities of the response of neurons with specific receptive fields. We analyze the geometric properties of the standard model of V1 and their consequences in image discrimination. We introduce the concept of optical flow, its properties, and how this description of motion can be estimated from the 3D wavelet sensors in V1 and the aggregated sensors in MT.
+---
\ No newline at end of file
diff --git a/content/facilities/_index.md b/content/facilities/_index.md
index bbeab2bd3..3c873a5d7 100644
--- a/content/facilities/_index.md
+++ b/content/facilities/_index.md
@@ -1,5 +1,5 @@
---
-title: facilities
+title: Facilities
+type: "facilities"
+layout: "list2"
---
-
-asasdfsadfdsafdsafs
\ No newline at end of file
diff --git a/content/facilities/computer-resources.md b/content/facilities/computer-resources.md
index 339167b37..b2d9472ff 100644
--- a/content/facilities/computer-resources.md
+++ b/content/facilities/computer-resources.md
@@ -1,8 +1,10 @@
---
title: "Computer Resources"
-image: "/images/facilities/servers.webp"
-weight: 2
-draft: false
+img: "servers.webp"
+image_alt: "servers"
+link: "#"
+description: |
+ Currently, at ISP we have a cluster of 4 interconnected servers with a total of 8 CPUs + 4 GPUs, and another cluster with 24 nodess (2 CPUs Intel Xeon / AMD EPYC each: 48-CPUs + 2 GPUs NVIDIA K40). We have a storage capacity of roughly 100 TB and a small cluster dedicated to the web site. The experimental members from VA and ERS also count with smaller clusters appropriate for data preprocessing and the use of numerical models and huge storage capacity (100 TB). Similar capacities exist in our collaborators. The [ERC-CoG-2015 SEDAL](https://cordis.europa.eu/project/id/647423/) project scaled this computational power keep it up-to-date. Thanks to the [ERC-SyG-2020 USMILE](https://cordis.europa.eu/project/id/855187/es) we just started the building of a new cold room with a powerful computer facilities for big EO data analysis.
---
-Currently, at ISP we have a cluster of 4 interconnected servers with a total of 8 CPUs + 4 GPUs, and another cluster with 24 nodess (2 CPUs Intel Xeon / AMD EPYC each: 48-CPUs + 2 GPUs NVIDIA K40). We have a storage capacity of roughly 100 TB and a small cluster dedicated to the web site. The experimental members from VA and ERS also count with smaller clusters appropriate for data preprocessing and the use of numerical models and huge storage capacity (100 TB). Similar capacities exist in our collaborators. The [ERC-CoG-2015 SEDAL](https://cordis.europa.eu/project/id/647423/) project scaled this computational power keep it up-to-date. Thanks to the [ERC-SyG-2020 USMILE](https://cordis.europa.eu/project/id/855187/es) we just started the building of a new cold room with a powerful computer facilities for big EO data analysis.
+
diff --git a/content/facilities/databases.md b/content/facilities/databases.md
index 638c2a73d..e86ecf8e5 100644
--- a/content/facilities/databases.md
+++ b/content/facilities/databases.md
@@ -1,8 +1,9 @@
---
title: "Databases"
-image: "/images/facilities/databases.webp"
-weight: 4
-draft: false
+img: "databases.webp"
+image_alt: "databases"
+link: "#"
+description: |
+ Given our long standing relations with [ESA](https://esa.int) and [EUMETSAT](https://eumetsat.int) we have a huge library of both real and synthetic time series of archived satellite products including (1) reflectance data (Quickbird, WorldView2, HyMap, SPOT, RapidEye, LandSat, VTG, MODIS and SEVIRI, IASI), and (2) Biophysical products from MODIS, COPERNICUS Global Land, MERIS and LSA SAF. Moreover, we have access to thematic maps and databases on land-cover and land-use (CORINE, SIOSE) and reference databases such as LUCAS and BIOSOIL. Check our [Databases](data.html) section!
---
-Given our long standing relations with [ESA](https://esa.int) and [EUMETSAT](https://eumetsat.int) we have a huge library of both real and synthetic time series of archived satellite products including (1) reflectance data (Quickbird, WorldView2, HyMap, SPOT, RapidEye, LandSat, VTG, MODIS and SEVIRI, IASI), and (2) Biophysical products from MODIS, COPERNICUS Global Land, MERIS and LSA SAF. Moreover, we have access to thematic maps and databases on land-cover and land-use (CORINE, SIOSE) and reference databases such as LUCAS and BIOSOIL. Check our [Databases](data.html) section!
\ No newline at end of file
diff --git a/content/facilities/external-facilities.md b/content/facilities/external-facilities.md
index fc1e5cc4d..d81db8acf 100644
--- a/content/facilities/external-facilities.md
+++ b/content/facilities/external-facilities.md
@@ -1,8 +1,10 @@
---
title: "External Facilities"
-image: "/images/facilities/marenostrum.webp"
-weight: 3
-draft: false
+img: "marenostrum.webp"
+image_alt: "marenostrum"
+link: "#"
+description: |
+ We have access to external computer grids (such as [Tirant](https://www.res.es/es/nodos-de-la-res/tirant) and [MareNostrum](https://www.bsc.es/es/marenostrum/marenostrum)), as well as accounts and regular users of [Google Earth Engine (GEE)](https://earthengine.google.com/), Amazon AWS Cloud and MS Azure, or the [ESA Mission Explotation Platform](https://www.esa.int/ESA_Multimedia/Sets/Thematic_Exploitation_Platforms/(result_type)/videos).
---
-We have access to external computer grids (such as [Tirant](https://www.res.es/es/nodos-de-la-res/tirant) and [MareNostrum](https://www.bsc.es/es/marenostrum/marenostrum)), as well as accounts and regular users of [Google Earth Engine (GEE)](https://earthengine.google.com/), Amazon AWS Cloud and MS Azure, or the [ESA Mission Explotation Platform](https://www.esa.int/ESA_Multimedia/Sets/Thematic_Exploitation_Platforms/(result_type)/videos).
+
diff --git a/content/facilities/lecture-room.md b/content/facilities/lecture-room.md
index bb33cb971..75f716dbd 100644
--- a/content/facilities/lecture-room.md
+++ b/content/facilities/lecture-room.md
@@ -1,8 +1,10 @@
---
title: "Lecture Rooms"
-image: "/images/facilities/auditorium.webp"
-weight: 7
-draft: false
+img: "auditorium.webp"
+image_alt: "auditorium"
+link: "#"
+description: |
+ We are located at the [Science Park of the Universitat de València](https://www.pcuv.es/en/servicios/servicios-generales.html). We have a wide variety of meeting rooms and open spaces, as well bigger seminars for workshops. The permanent professors in ISP are associated to the [Engineering School ETSE](https://www.uv.es/uvweb/engineering/en/school/areas-resources/directory-areas-1285846791606.html) just few minutes away with excellent auditorium, seminars, and modern classes.
---
-We are located at the [Science Park of the Universitat de València](https://www.pcuv.es/en/servicios/servicios-generales.html). We have a wide variety of meeting rooms and open spaces, as well bigger seminars for workshops. The permanent professors in ISP are associated to the [Engineering School ETSE](https://www.uv.es/uvweb/engineering/en/school/areas-resources/directory-areas-1285846791606.html) just few minutes away with excellent auditorium, seminars, and modern classes.
\ No newline at end of file
+
diff --git a/content/facilities/optics-spectroscopylab.md b/content/facilities/optics-spectroscopylab.md
index 0a683ba8f..0f17a7fdd 100644
--- a/content/facilities/optics-spectroscopylab.md
+++ b/content/facilities/optics-spectroscopylab.md
@@ -1,6 +1,9 @@
---
title: "Optics and Spectroscopy Lab"
-image: "/images/facilities/optical_spectroscopy.webp"
-weight: 6
-draft: false
----
\ No newline at end of file
+img: "optical_spectroscopy.webp"
+image_alt: "optical_spectroscopy"
+link: "#"
+description: "Our lab is equipped with advanced optical instruments and spectroscopy equipment designed for high-precision measurements and experiments in imaging and analysis. This facility supports a wide range of research activities related to light interaction with materials, offering specialized setups for spectroscopy and imaging in various applications."
+---
+
+
diff --git a/content/facilities/parc-cientific.md b/content/facilities/parc-cientific.md
index 6ccf01878..00a97a7c1 100644
--- a/content/facilities/parc-cientific.md
+++ b/content/facilities/parc-cientific.md
@@ -1,8 +1,10 @@
---
title: "Parc Científic"
-image: "/images/facilities/ipl2.webp"
-weight: 1
-draft: false
+img: "ipl2.webp"
+image_alt: "ipl2"
+link: "#"
+description: |
+ We are at the Parc Científic of the Universitat de València. The IPL is well equipped with space, access to image acquisition equipment and antennae reception of satellite images. The ISP group has the physical space needed for the integration of interns and material provision (computers, office equipment, and standard resources) for training and work. The ISP group has a range of instruments for ground and lab experimentation particularly spectro-radiometers and unique multispectral image acquisition systems developed in a EU-funded project, as well as others designed and manufactured by the group through the last years. Access to internal computer grids and external computer grids (such as Tirant and MareNostrum) is also available.
---
-We are at the Parc Científic of the Universitat de València. The IPL is well equipped with space, access to image acquisition equipment and antennae reception of satellite images. The ISP group has the physical space needed for the integration of interns and material provision (computers, office equipment, and standard resources) for training and work. The ISP group has a range of instruments for ground and lab experimentation particularly spectro-radiometers and unique multispectral image acquisition systems developed in a EU-funded project, as well as others designed and manufactured by the group through the last years. Access to internal computer grids and external computer grids (such as Tirant and MareNostrum) is also available.
+
diff --git a/content/facilities/psychophysics-lab.md b/content/facilities/psychophysics-lab.md
index 1cc1b4ace..4f92a3661 100644
--- a/content/facilities/psychophysics-lab.md
+++ b/content/facilities/psychophysics-lab.md
@@ -1,8 +1,10 @@
---
title: "Psychophysics Lab"
-image: "/images/facilities/psycho.webp"
-weight: 5
-draft: false
+img: "psycho.webp"
+image_alt: "psycho"
+link: "#"
+description: |
+ We have a number of working physiological and psychophysical experimental settings to get relevant measurements in visual neuroscience. At ISP we count with eye tracking systems and colourimetrically calibrated displays to do psychophysics, and thanks to the projects with [Hospital La Fe](https://www.iislafe.es/en/) and [Hospital Clínic](https://www.incliva.es/), we also have access to the fMRI facilities for brain imaging.
---
-We have a number of working physiological and psychophysical experimental settings to get relevant measurements in visual neuroscience. At ISP we count with eye tracking systems and colourimetrically calibrated displays to do psychophysics, and thanks to the projects with [Hospital La Fe](https://www.iislafe.es/en/) and [Hospital Clínic](https://www.incliva.es/), we also have access to the fMRI facilities for brain imaging.
\ No newline at end of file
+
diff --git a/content/projects/ai4cs/ia/content.md b/content/projects/ai4cs/ia/content.md
index a18157c06..ba1a9d6d6 100644
--- a/content/projects/ai4cs/ia/content.md
+++ b/content/projects/ai4cs/ia/content.md
@@ -9,11 +9,9 @@ title: "AI4CS - AI for complex systems: Brain, Earth, Climate, Society"
Our vision in AI4CS is to develop novel artificial intelligence methods to model and understand com- plex systems, and more specifically the visual brain, Earth and climate systems and the biosphere- anthroposphere interactions. A perfect storm is over us: (i) an ever increasing amount of observational and sensory data, (ii) improved high resolution yet mechanistic models are available, and (iii) advanced ma- chine learning techniques able to extract patterns and identify drivers from data. In the last decade, machine learning models have helped to monitor, predict and forecast all kind of variables and parameters of interest from observational data. They help in quantifying visual stimuli, to monitor land, oceans, and the atmosphere, as well as to study socio-economic variables at different scales and spheres. Current approaches, however, face three important challenges: (1) they cannot deal efficiently with the particular characteristics of data, (2) they do not respect the most elementary laws of physics, and (3) they just interpolate but nothing fundamental is learned from data.
-
-
In AI4CS we tackle these three problems by designing algorithms able to deal with huge amounts of com- plex, heterogeneous, multisource, and structured data. Firstly, a new generation of targeted AI methods to improve efficiency, prediction accuracy, and uncertainty quantification and error propagation. Secondly, we push the boundaries of a new family of hybrid physics-aware machine learning models that encode physical knowledge about the problem, constraints, inductive biases and domain knowledge, with the goal of attain- ing self-explanatory models learned from empirical data. Finally, the project deals with learning graphical causal models to explain the potentially complex interactions between key observed variables, and discover hidden essential drivers and confounding factors. The AI4CS project vision thus seizes the fundamental prob- lem of moving from correlation to dependence and then to causation through data analysis. The theoretical developments are guided by the inherent ventures of modeling and understanding complex systems at different spatio-temporal resolutions, spheres and interactions.
diff --git a/content/projects/past/cimr-application.md b/content/projects/past/cimr-application.md
index 80d080ff1..6a8673161 100644
--- a/content/projects/past/cimr-application.md
+++ b/content/projects/past/cimr-application.md
@@ -4,7 +4,7 @@ logo: 'esa.webp'
pi: 'Christopher Merchant'
uvpi: 'M. Piles'
years: '2018-2019'
-website: ''
+website: 'https://cimr.eu/'
funding_source: 'ESA'
role: ''
project_type: ''
diff --git a/content/projects/past/cimr-requirements.md b/content/projects/past/cimr-requirements.md
index f18645cc5..baadeb6bf 100644
--- a/content/projects/past/cimr-requirements.md
+++ b/content/projects/past/cimr-requirements.md
@@ -4,7 +4,7 @@ logo: 'esa.webp'
pi: 'Thomas Lavergne (Norwegian Meteorological Institute)'
uvpi: 'M. Piles'
years: '2018-2019'
-website: ''
+website: 'https://cimr.eu/'
funding_source: 'ESA'
role: ''
project_type: ''
diff --git a/content/projects/past/cloud-detection-in-the-cloud.md b/content/projects/past/cloud-detection-in-the-cloud.md
index 552449640..3b6a920c1 100644
--- a/content/projects/past/cloud-detection-in-the-cloud.md
+++ b/content/projects/past/cloud-detection-in-the-cloud.md
@@ -4,7 +4,7 @@ logo: 'google.webp'
pi: ''
uvpi: 'L. Gomez-Chova'
years: '2016-2017'
-website: ''
+website: 'https://isp.uv.es/projects/cdc/GEE_cloud_detection_results.html'
funding_source: 'Google Earth Engine Research Award'
role: ''
project_type: ''
diff --git a/content/research/_index.md b/content/research/_index.md
new file mode 100644
index 000000000..985fa919d
--- /dev/null
+++ b/content/research/_index.md
@@ -0,0 +1,9 @@
+---
+title: "AI for sustainability and social sciences"
+type: "research"
+layout: "single"
+---
+
+# Motivation
+
+**The Earth is a highly complex, dynamic, and networked system where very different physical, chemical and biological processes interact, to form the world we know. The description of such a complex system needs of the integration of different disciplines such as Physics, Chemistry, Mathematics and other applied sciences, leading to what has been coined as Earth System Science (ESS). The analysis of the Earth system involves studying interacting processes occurring in several spheres (atmosphere, hydrosphere, cryosphere, geosphere, pedosphere, biosphere, and magnetosphere) as well as the anthroposphere where the Society acts.**
\ No newline at end of file
diff --git a/content/research/ai4cs_agenda.jpg b/content/research/ai4cs_agenda.jpg
deleted file mode 100644
index 62b8bc09f..000000000
Binary files a/content/research/ai4cs_agenda.jpg and /dev/null differ
diff --git a/content/research/ai4cs_agenda.webp b/content/research/ai4cs_agenda.webp
deleted file mode 100644
index 424c633dd..000000000
Binary files a/content/research/ai4cs_agenda.webp and /dev/null differ
diff --git a/content/research/earth_science/_index.md b/content/research/earth_science/_index.md
index 6aad27733..63a4d77ee 100644
--- a/content/research/earth_science/_index.md
+++ b/content/research/earth_science/_index.md
@@ -1,27 +1,25 @@
---
-title: "Research Earth Science"
+title: "Earth and climate sciences"
+type: "research"
+layout: "single"
---
-
-
-# Earth and climate sciences
-
-
-
-## Motivation
+# Motivation
**Machine learning has yielded many successful results and developments in remote sensing, geosciences and climate sciences. However, there are still strong limitations for the general adoption of machine learning algorithms for predicting and understanding the Earth and climate systems.**
-
-
-
-
+
+
+
+
+
+
The current statistical treatment of biophysical parameters is strongly limited by the quantity and quality of EO data, as well as because of the abuse of standard off-the-shelf methods, which in general are not well-adapted to the particular EO data characteristics. Specifically, current regression models used for EO applications are still deficient because they rely on limited amount of meteorological and remote sensing data, do not observe the particular data characteristics, and often make strong assumptions of linearity, homoscedasticity or Gaussianity. These limitations translate into certain risks of overfitting and unreasonably large uncertainties for the predictions, suggesting a lack of explanatory variables and deficiencies in model specification. Graphical models have been seldom used in EO data analysis. The few works restrict to local studies, use limited amount of data and explanatory variables, consider remote sensing input features only, apply standard structure learning algorithms driven by univariate (often unconditioned) dependence estimates, and do not extract causal relations or identify new drivers in the problem.
We advocate that machine learning algorithms for EO applications need to be guided both by data and by prior physical knowledge. This combination is the way to restrict the family of possible solutions and thus obtain nonparametric flexible models that respect the physical rules governing the Earth climate system. We are equally concerned about the ‘black-box' criticism to statistical learning algorithms, for which we aim to design self-explanatory models and take a leap towards the relevant concept of causal inference from empirical EO data.
-## Challenges and approaches
+# Challenges and approaches
Our main goal is to develop new machine learning models for the ambitious goal of modeling and understanding the Earth and climate systems with data, models and machine learning. This main scientific goal translates into the following objectives:
@@ -41,19 +39,7 @@ Our main goal is to develop new machine learning models for the ambitious goal o
- **Discover knowledge and causal relations in Earth observation data.** We investigate graphical causal models and regression-based causal schemes applied to large heterogeneous EO data streams. This requires improved measures of (conditional) independence, designing experiments in controlled situations and using high-quality data. Learning the hierarchy of the relations between variables and related covariates, as well as their causal relations, may in turn allow the discovery of hidden essential variables, drivers and confounders. Moving from correlation to dependence and then to causation concepts is fundamental to advance the field of Earth Observation and the science of climate change.
-### Related Projects
-
-{{< projects "usmile" "xaida" "deepcube" "elise" "sedal" "cloud" "mapict" >}}
-
-
\ No newline at end of file
diff --git a/content/research/earth_science/cloud-detection-in-the-cloud.md b/content/research/earth_science/cloud-detection-in-the-cloud.md
new file mode 100644
index 000000000..3b6a920c1
--- /dev/null
+++ b/content/research/earth_science/cloud-detection-in-the-cloud.md
@@ -0,0 +1,12 @@
+---
+title: 'Cloud Detection in the Cloud'
+logo: 'google.webp'
+pi: ''
+uvpi: 'L. Gomez-Chova'
+years: '2016-2017'
+website: 'https://isp.uv.es/projects/cdc/GEE_cloud_detection_results.html'
+funding_source: 'Google Earth Engine Research Award'
+role: ''
+project_type: ''
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/earth_science/deepcube.md b/content/research/earth_science/deepcube.md
new file mode 100644
index 000000000..3bd020c63
--- /dev/null
+++ b/content/research/earth_science/deepcube.md
@@ -0,0 +1,12 @@
+---
+title: 'DeepCube: Explainable AI pipelines for big Copernicus data'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls, M. Piles'
+years: '2021-2024'
+website: 'https://deepcube-h2020.eu/'
+funding_source: 'EU H2020'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/earth_science/elise.md b/content/research/earth_science/elise.md
new file mode 100644
index 000000000..0b74d4ebf
--- /dev/null
+++ b/content/research/earth_science/elise.md
@@ -0,0 +1,12 @@
+---
+title: 'ELISE: European Learning And Intelligent Systems Excellence'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls'
+years: '2020-2023'
+website: 'https://www.elise-ai.eu/'
+funding_source: 'ICT-48'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/earth_science/mapping-and-the-citizen-sensor.md b/content/research/earth_science/mapping-and-the-citizen-sensor.md
new file mode 100644
index 000000000..2fb193148
--- /dev/null
+++ b/content/research/earth_science/mapping-and-the-citizen-sensor.md
@@ -0,0 +1,12 @@
+---
+title: 'Mapping and the citizen sensor'
+logo: 'cost.webp'
+pi: ''
+uvpi: ''
+years: '2013-2016'
+website: 'https://www.cost.eu/actions/TD1202/'
+funding_source: 'ICT COST Action'
+role: ''
+project_type: ''
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/earth_science/usmile.md b/content/research/earth_science/usmile.md
new file mode 100644
index 000000000..8c077b83f
--- /dev/null
+++ b/content/research/earth_science/usmile.md
@@ -0,0 +1,12 @@
+---
+title: 'USMILE: Understanding and Modeling the Earth System with Machine Learning'
+logo: 'erc.webp'
+pi: 'V. Eyring, M. Reichstein, P. Gentine'
+uvpi: 'G. Camps-Valls'
+years: '2020-2026'
+website: 'https://www.usmile-erc.eu/'
+funding_source: 'ERC Synergy Grant'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/earth_science/xaida.md b/content/research/earth_science/xaida.md
new file mode 100644
index 000000000..262244b46
--- /dev/null
+++ b/content/research/earth_science/xaida.md
@@ -0,0 +1,12 @@
+---
+title: 'XAIDA: Extreme Events - Artificial Intelligence for Detection and Attribution'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls'
+years: '2021-2025'
+website: 'https://xaida.eu'
+funding_source: 'EU H2020'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/machine_learning/_index.md b/content/research/machine_learning/_index.md
index dbf18765c..42919cdf0 100644
--- a/content/research/machine_learning/_index.md
+++ b/content/research/machine_learning/_index.md
@@ -1,26 +1,26 @@
---
title: "Research Machine Learning"
+type: "research"
+layout: "single"
---
-# Machine learning
-
-
-
-
-## Motivation
+# Motivation
**Machines may extract knowledge from measurements by analyzing the statistical properties of the acquired signals.** The aim is to capture the structures in the experimental data. Our contributions to machine learning include algorithms for automatic classification (recognition) of objects, robust regression tools for multidimensional data, density estimation algorithms, and feature extraction and selection. We study the principled design of learning algorithms, specially neural networks and kernel methods. **Methods are designed for general purposes but also specific to the application, most of them related to image processing, computer vision tasks and Earth science data analysis, but also non-structured data in social sciences.**
-## An AI agenda
+# An AI agenda
**The main research hypothesis of our research agenda is that current AI models are limited to tackle fitting problems only, and do not have a clear notion of space, time and causal relations.** We need to aboard the more ambitious questions of understanding through machine learning, going beyond mere model fitting. This goal needs to develop (i) targeted ML that respects data characteristics, (ii) hybrid physics-aware ML that incorporates domain knowledge and inductive priors, and more importantly (3) moving from fitting to understanding through AI models that are explainable and grasp causal relations from observational data.
-
-
-
-
+
+
+
+
+
+
+
**In order to advance in the previous AI agenda, we base our research in 3 fundamental research pillars:**
@@ -28,7 +28,7 @@ title: "Research Machine Learning"
- **Pillar 2. Physics-aware modeling, inductive biases and domain knowledge in machine learning.** Our activities here encompass developing algorithms that live in the Physics and machine learning interplay: both through encoding (hybrid machine learning models) and decoding (discovery principles and physical laws from data).
- **Pillar 3. Explainable AI and Causality.** Scientific consistency, reliability, and explainability of obtained results are of paramount importance in complex systems. A prerequisite to achieve those is to design ML models that cannot be challenged, or whose inner functioning can be visualized, queried or interpreted. We aim to achieve transparency, interpretability and explainability of models to achieve a wider adoption and confidence by domain scientists. Yet, ML model interpretability is not enough because model development already assumes a causal relation, the real far-end goal of learning with machines.
-## Challenges and approaches
+# Challenges and approaches
**The group has a relatively long tradition in machine learning, particularly focused on the study and development of neural networks and kernel machines.** Recently, the fields of manifold, semisupervised and active learning have captured our attention. The study of the intrinsic coordinates and representations where data in general, and images in particular, live is interesting for many applications. Regression and classification methods must be modified to deal with changes in data/image statistics efficiently. **In many signal and image processing problems, such as change detection or multitemporal data classification, adaptation is a must. We currently model shifts, twists and wrappings of image distributions by designing semisupervised learning methods. The fields of manifold alignment and domain adaptation has also important information-theoretic implications which we analyzed through kernel entropy component analyis, multivariate Gaussianization transforms and extensions of principal curves and surfaces. And yes, deep learning is of interest to us; what has the networked learned? and why?** Follow our works in [Papers](../../publications/journals/).
@@ -41,29 +41,3 @@ Modeling input data, however, does not ensure that the model will adapt to chang
**Sparsity as a form of compacting information was studied in both deep architectures and kernel machines.** In all these settings, we are particularly interested in **encoding prior knowledge and invariances** in the models: signal and noise properties, spatial-temporal constraints, amd robustness to illumination changes, just to name a few. Dealing with invariances and priors inmediately call for **regularization and Bayesian inference**. While pure discriminative approaches have been developed in our group, in recent years we have payed attention to the field of Bayesian nonparametric models as a proper framework to encode such beliefs. We are active in kernel design for **Gaussian processes regression and model inversion, and in designing efficient sampling schemes with modern Markov Chain Montecarlo strategies**.
-### Related Projects
-
-
-{{< projects "usmile" "xaida" "deepcube" "elise" >}}
-
-
-
diff --git a/content/research/machine_learning/deepcube.md b/content/research/machine_learning/deepcube.md
new file mode 100644
index 000000000..3bd020c63
--- /dev/null
+++ b/content/research/machine_learning/deepcube.md
@@ -0,0 +1,12 @@
+---
+title: 'DeepCube: Explainable AI pipelines for big Copernicus data'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls, M. Piles'
+years: '2021-2024'
+website: 'https://deepcube-h2020.eu/'
+funding_source: 'EU H2020'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/machine_learning/elise.md b/content/research/machine_learning/elise.md
new file mode 100644
index 000000000..0b74d4ebf
--- /dev/null
+++ b/content/research/machine_learning/elise.md
@@ -0,0 +1,12 @@
+---
+title: 'ELISE: European Learning And Intelligent Systems Excellence'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls'
+years: '2020-2023'
+website: 'https://www.elise-ai.eu/'
+funding_source: 'ICT-48'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/machine_learning/usmile.md b/content/research/machine_learning/usmile.md
new file mode 100644
index 000000000..8c077b83f
--- /dev/null
+++ b/content/research/machine_learning/usmile.md
@@ -0,0 +1,12 @@
+---
+title: 'USMILE: Understanding and Modeling the Earth System with Machine Learning'
+logo: 'erc.webp'
+pi: 'V. Eyring, M. Reichstein, P. Gentine'
+uvpi: 'G. Camps-Valls'
+years: '2020-2026'
+website: 'https://www.usmile-erc.eu/'
+funding_source: 'ERC Synergy Grant'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/machine_learning/xaida.md b/content/research/machine_learning/xaida.md
new file mode 100644
index 000000000..262244b46
--- /dev/null
+++ b/content/research/machine_learning/xaida.md
@@ -0,0 +1,12 @@
+---
+title: 'XAIDA: Extreme Events - Artificial Intelligence for Detection and Attribution'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls'
+years: '2021-2025'
+website: 'https://xaida.eu'
+funding_source: 'EU H2020'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/_index.html b/content/research/philosophy/_index.html
similarity index 100%
rename from content/research/_index.html
rename to content/research/philosophy/_index.html
diff --git a/content/research/philosophy/_index.md b/content/research/philosophy/_index.md
new file mode 100644
index 000000000..15af9513a
--- /dev/null
+++ b/content/research/philosophy/_index.md
@@ -0,0 +1,49 @@
+---
+title: "Philosophy, Goal, Vision"
+type: "research"
+layout: "single"
+---
+
+# Context
+
+**Complex systems are ubiquitous: from the Earth's global climate and the human brain, to society and economic systems.** The analysis and characterization of complex systems is very challenging: many processes are involved, exhibiting highly nonlinear, dynamic, and networked relations. Different physical, chemical and biological processes interact in several spheres, and at diverse spatio-temporal scales. When data and domain knowledge are available, exploiting regularities through statistical machine learning is certainly an outstanding opportunity.
+
+**Our ambition is to develop novel artificial intelligence methods for modeling and understanding complex systems, with particular focus on the Earth, Climate, Brain and Society systems, in isolation and interaction.** We want to infer how the systems work from observational data analysis, models, and domain knowledge. A perfect storm is over us: (i) an ever increasing amount of observational data, acquired by all types of sensory systems (from fMRI and biosignal records, to satellite sensors and social network tracking devices), (ii) improved Earth-Climate-Brain mechanistic models capable of resolving processes at higher resolution, accuracy and detail, and (iii) advanced machine learning techniques able to extract patterns and identify drivers from data. In the last decade, machine learning models have helped to analyze and characterize complex systems. ML has helped to monitor and better understand land, oceans, and atmosphere along with their interactions, to explain the emergence of connections and specific behaviors in the brain, and to explain societal and economical relations and impacts.
+
+## Vision and approaches
+
+Current AI approaches, however, face three important challenges: (1) they cannot deal efficiently with the particular characteristics of data, (2) they do not respect the most elementary laws of physics like mass or energy conservation, and (3) they just interpolate but no fundamental principle is learned from data. We tackle these three issues by designing algorithms able to deal with huge amounts of complex, heterogeneous, multisource, and (un)structured data:
+
+- Firstly, a new generation of methods to improve efficiency, prediction accuracy, and uncertainty quantification and error propagation.
+- Secondly, we push the boundaries of a new family of hybrid physics-aware machine learning models that encode physical knowledge about the problem, and attain explainable models learned from empirical data.
+- Finally, we deal with learning graphical causal models to explain the potentially complex interactions between key observed variables, and discover hidden essential drivers and confounding factors. Our vision thus aboards the fundamental problem of moving from correlation to dependence and then to causation through data analysis.
+
+
+**The theoretical developments are guided by the challenging inherent ventures in all three complex systems: Brain, Earth-Climate and Society. We focus on developing novel AI methods for modeling and understanding complex systems. Methods that are consistent, robust and trustworthy, and that are ultimately able to discovery causal relations from data and assumptions at both local cell/cortex level, regional and continental scales of societal interactions, and global planetary scales in space and time. Our long-term vision is tied to opening new frontiers and driving research towards algorithms capable of discovering knowledge from data, a stepping stone before the more ambitious far-end goal of machine reasoning in complex systems.**
+
+## Overarching goal and ambition
+
+Our overarching goal of is to go beyond mere data fitting and advance towards the more significant achievement: learning and understanding the processes through advanced AI techniques, model simulations and domain knowledge. The ideas have been well-received and supported by the EU excellence science pillars ([ERC-USMILE](http://www.imiracli.eu/), [H2020-DeepCube](https://deepcube-h2020.eu/) and [H2020-XAIDA](https://xaida.eu/), [Marie Curie iMIRACLI](http://www.imiracli.eu/)), and are being adopted by different communities in the Brain, Earth, Climate, and Social sciences as well. Our research agenda (AI, hybrid physics-aware ML modeling, abstraction, understanding rather than fitting, explainability and observational causal discovery) are not only timely but urgent in the current context of a big data deluge.
+
+## Activities
+
+The activities in ISP involve strong interactions between the theoretical and the complex systems pillars, see figure below. A theoretical component, led by ISP in the interplay between machine learning and visual neuroscience feeds case studies of complex systems, most notably the Earth and climate system, the visual brain and the biosphere-anthroposphere interactions. Follow the links above to learn more details about the specific goals, proposed methodology, group collaborations, related projects, and applications in the particular case studies.
+
+
+
+
+
+
+
+## Why us? Why with us?
+
+**Our research agenda for the 2020-2030 follows the tradition and expertise of the team over the last 20 years on remote sensing data analysis and visual neuroscience, and tackles more ambitious problems around complex systems with similar computational techniques and advanced machine learning.**
+
+The team has received funding from the EU excellence pillars in the last years: (1) 2 ERC grants (consolidator and [synergy](https://www.usmile-erc.eu/)) related to geosciences and climate modeling with AI, (2) seven H2020 projects for the development of AI both theoretically and with Earth, vision, and societal applications, and (3) involvement in 6 MINECO projects at the intersection of remote sensing, Earth, and vision sciences. We participate in and coordinate top-notch research projects from the last 5 years. See the [Projects](projects.html) page.
+
+The group is also very active in technology transfer internationally, with projects in collaboration with [ESA](https://www.esa.int/), [NASA](https://www.nasa.gov), [EUMETSAT](https://www.eumetsat.int), or [Google](https://www.google.com/). See the [Collaborators](collaborators.html) page. The group is committed to higher-level education: ISP lectures in several endorsed masters, participates in COST actions, coordinates activities in an [ELLIS research program](https://ellis.eu/programs/machine-learning-for-earth-and-climate-sciences), and is a core member of [ELISE](https://www.elise-ai.eu/) and [i-AIDA](https://www.i-aida.org/) for the excellence of AI science, transfer, and education in Europe. We contribute to knowledge transfer, as well as to the development and adoption of AI in the industry and private sectors.
+
+
+
+
+
diff --git a/content/research/social_science/_index.md b/content/research/social_science/_index.md
index a794b8adf..6fe10e872 100644
--- a/content/research/social_science/_index.md
+++ b/content/research/social_science/_index.md
@@ -1,26 +1,24 @@
---
-title: "Research Social Science"
+title: "AI for sustainability and social sciences"
+type: "research"
+layout: "single"
---
-
-
-# AI for sustainability and social sciences
-
-
-
-## Motivation
+# Motivation
**The Earth is a highly complex, dynamic, and networked system where very different physical, chemical and biological processes interact, to form the world we know. The description of such a complex system needs of the integration of different disciplines such as Physics, Chemistry, Mathematics and other applied sciences, leading to what has been coined as Earth System Science (ESS). The analysis of the Earth system involves studying interacting processes occurring in several spheres (atmosphere, hydrosphere, cryosphere, geosphere, pedosphere, biosphere, and magnetosphere) as well as the anthroposphere where the Society acts.**
-
-
-
-
+
+
+
+
+
+
Earth system science provides the physical basis of the world we live in, with the final objective of obtaining a sustainable development of our society, see the United Nations [Sustainable Development Goals](https://sustainabledevelopment.un.org/). We develop AI models for tackling pressing questions in the climate-society interplay. We tackle problems where the human is in the middle (both a cause and an effect); where SDGs, fairness and ethics are implied, and where elusive concepts like wealth, well-being and development are involved. Our unique approach involves exploiting massive amount of data and machine learning algorithms to model and understand the environment-human interactions.
-## Challenges and approaches
+# Challenges and approaches
Our main goal is to develop new machine learning models for the efficient treatment of biophysical land parameters and related covariates at local and global scales. This main scientific goal translates into the following objectives:
@@ -30,26 +28,4 @@ Our main goal is to develop new machine learning models for the efficient treatm
- **Algorithmic Fairness.** New social and economic activities massively exploit big data and machine learning algorithms to do inference on people's lives. Applications include automatic curricula evaluation, wage determination, and risk assessment for credits and loans. Recently, many governments and institutions have raised concerns about the lack of fairness, equity and ethics in machine learning to treat these problems. It has been shown that not including sensitive features that bias fairness, such as gender or race, is not enough to mitigate the discrimination when other related features are included (SDGs 5 and 10). Instead, including fairness in the objective function has been shown to be more efficient. We develop novel fair regression and dimensionality reduction methods to tackle such problems. The proposed methods allow us to tackle pressing societal problems like predicting income using gender and/or race discrimination as sensitive variables, contraceptive method prediction under demographic and socio-economic sensitive descriptors, or predicting climate change protecting against anthropogenic factors. Neural networks, Gaussian processes, kernel machines and optimal transport are our favourite tools.
-- **Causality in the biosphere-anthroposphere coupled system.** The terrestrial biosphere and the anthroposphere are deeply coupled in multiple ways. Humans depend on a range of ecosystem goods and services but, at the same time, they heavily engineer and modify land ecosystems. Over the past decades, human development has generally made substantial progress in terms of education (SDG 2), health services (SDG 2), life expectancy (SDG 2) and many other aspects around the planet with very few exceptions due to e.g. warfare and in sub-Saharan Africa. The grand question is if, and how, one can identify and quantify relationships between changes in land-ecosystem states and the human development metrics at the global scale. We tackle problems of food insecurity (SDGs 1, 2), climate-induced migration (SDGs 1, 2), infants well-being (SDGs 3, 4) and conflicts (SDGs 10, 11). Our hypothesis is that we can now tackle causal discovery problems relying on both assumed relations and exploiting observational data. We rely on the science of causal inference to unravel relations between coupled variables beyond correlations even in the presence of non-linearities and non-stationarities.
-
-
-### Releted Projects
-
-{{< projects deepcube xaida scale mapict >}}
-
-
-
\ No newline at end of file
+- **Causality in the biosphere-anthroposphere coupled system.** The terrestrial biosphere and the anthroposphere are deeply coupled in multiple ways. Humans depend on a range of ecosystem goods and services but, at the same time, they heavily engineer and modify land ecosystems. Over the past decades, human development has generally made substantial progress in terms of education (SDG 2), health services (SDG 2), life expectancy (SDG 2) and many other aspects around the planet with very few exceptions due to e.g. warfare and in sub-Saharan Africa. The grand question is if, and how, one can identify and quantify relationships between changes in land-ecosystem states and the human development metrics at the global scale. We tackle problems of food insecurity (SDGs 1, 2), climate-induced migration (SDGs 1, 2), infants well-being (SDGs 3, 4) and conflicts (SDGs 10, 11). Our hypothesis is that we can now tackle causal discovery problems relying on both assumed relations and exploiting observational data. We rely on the science of causal inference to unravel relations between coupled variables beyond correlations even in the presence of non-linearities and non-stationarities.
\ No newline at end of file
diff --git a/content/research/social_science/deepcube.md b/content/research/social_science/deepcube.md
new file mode 100644
index 000000000..3bd020c63
--- /dev/null
+++ b/content/research/social_science/deepcube.md
@@ -0,0 +1,12 @@
+---
+title: 'DeepCube: Explainable AI pipelines for big Copernicus data'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls, M. Piles'
+years: '2021-2024'
+website: 'https://deepcube-h2020.eu/'
+funding_source: 'EU H2020'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/social_science/mapping-and-the-citizen-sensor.md b/content/research/social_science/mapping-and-the-citizen-sensor.md
new file mode 100644
index 000000000..2fb193148
--- /dev/null
+++ b/content/research/social_science/mapping-and-the-citizen-sensor.md
@@ -0,0 +1,12 @@
+---
+title: 'Mapping and the citizen sensor'
+logo: 'cost.webp'
+pi: ''
+uvpi: ''
+years: '2013-2016'
+website: 'https://www.cost.eu/actions/TD1202/'
+funding_source: 'ICT COST Action'
+role: ''
+project_type: ''
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/social_science/scale.md b/content/research/social_science/scale.md
new file mode 100644
index 000000000..a57814a66
--- /dev/null
+++ b/content/research/social_science/scale.md
@@ -0,0 +1,12 @@
+---
+title: 'SCALE: Causal Inference in the Human-Biosphere Coupled System'
+logo: 'fbbva.webp'
+pi: 'G. Camps-Valls'
+uvpi: ''
+years: '2020-2022'
+website: 'https://www.fbbva.es/noticias/concedidas-5-ayudas-a-equipos-de-investigacion-cientifica-en-big-data/'
+funding_source: 'Fundación BBVA'
+role: ''
+project_type: ''
+partners: []
+---
diff --git a/content/research/social_science/xaida.md b/content/research/social_science/xaida.md
new file mode 100644
index 000000000..262244b46
--- /dev/null
+++ b/content/research/social_science/xaida.md
@@ -0,0 +1,12 @@
+---
+title: 'XAIDA: Extreme Events - Artificial Intelligence for Detection and Attribution'
+logo: 'h2020.webp'
+pi: ''
+uvpi: 'Gustau Camps-Valls'
+years: '2021-2025'
+website: 'https://xaida.eu'
+funding_source: 'EU H2020'
+role: ''
+project_type: 'Research Project'
+partners: []
+---
\ No newline at end of file
diff --git a/content/research/visual_brain/_index.md b/content/research/visual_brain/_index.md
new file mode 100644
index 000000000..21649b05a
--- /dev/null
+++ b/content/research/visual_brain/_index.md
@@ -0,0 +1,125 @@
+---
+title: "Image and Video Processing: Scene Statistics and Visual Neuroscience at work!"
+abstract: |
+ Efficient coding of visual information and efficient inference of missing information in images depend on two factors:
+
+ 1. The statistical structure of photographic images, and
+ 2. The nature of the observer that will analyze the result.
+
+ Interestingly, these two factors (image regularities and human vision) are deeply related since the evolution of biological sensors seems to be guided by statistical learning (see our work on the *Efficient Coding Hypothesis* in [Visual Neuroscience](neuro.html)). However, the simultaneous consideration of these two factors is unusual in the image processing community, particularly beyond Gaussian image models and linear models of the observer.
+ Our work in image and video processing has been parallel to our investigation in describing the non-Gaussian nature of visual scenes and the nonlinear behavior of visual cortex. This parallel approach is sensible since these are two sides of the same issue in vision ([the Efficient Coding Hypothesis again!](https://en.wikipedia.org/wiki/Efficient_coding_hypothesis)). Specifically, the core algorithm used in many applications has been the [Divisive Normalization](https://en.wikipedia.org/wiki/Normalization_model), a canonical computation in sensory neurons with interesting statistical effects (see [Neur.Comp.10](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Malo_Laparra_Neural_10b.pdf)).
+
+ We have used this perceptual (and also statistical) model to propose novel solutions in bit allocation, to identify perceptually relevant motion, to smooth image representations, and to compute distances between images.
+
+ # Image and Video Processing
+
+ Low level Image Processing (coding, restoration, synthesis, white balance, color and texture edition, etc...) is all about *image statistics* in a domain where *the metric is non-Euclidean* (i.e. induced by the data or the observer).
+
+ We proposed original image processing techniques using both perception models and image statistics including:
+
+ (i) improvements of JPEG standard for **image coding** through nonlinear texture vision models [Electr.Lett.95](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ELECT95.PS.gz), [Electr.Lett.99](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ELECT99.PS.gz), [IEEE TNN05](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Gomez-Perez05_IEEETNN.pdf), [IEEE TIP06a](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/manuscript4.pdf), [JMLR08](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Camps-Valls08_JMLR.pdf),[RPSP12](http://www.uv.es/gcamps/papers/paper_patent_6_review.pdf), [Patent08](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/patente_v5_jesus.pdf), (ii) improvements of MPEG standard for **video coding** with new perceptual quantization scheme and new motion estimation focused on perceptually relevant **optical flow** [LNCS97](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/LNCS97.PS.gz), [Electr.Lett.98](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ELECT98.PS.gz), [Electr.Lett.00a](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/elect00.ps), [Electr.Lett.00b](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/seg_ade2.ps), [IEEE TIP01](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ieeeoct01.pdf), [Redund.Reduct.99](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Redundancy_Reduction_Malo_99.pdf), (iii) new **image restoration** techniques based on nonlinear contrast perception models and the image statistics in local frequency domains [IEEE TIP 06b](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/manuscript_TIP_00864_2004_R2.pdf), [JMLR10](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/laparra10a.pdf), (iv) new approaches to **color constancy** either based on relative chromatic descriptors
+ [Vis.Res.97](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/VISRES97.PS.gz),[J.Opt.96](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/JOPT96.PS.gz), statistically-based chromatic adaptation models [Neur.Comp.12](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Neco_accepted_2012.pdf), [PLoS-ONE14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Gutmann_PLOS_ONE_2014.pdf), or Bayesian estimation of surface reflectance [IEEE-TGRS14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/manuscr_TGRS_2012_00431.pdf), (v) new subjective **image and video distortion measures** using nonlinear perception models [Im.Vis.Comp.97](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/IVC97.PS.gz), [Disp.99](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/displays_99.pdf), [IEEE ICIP02](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/icip02.pdf), [JOSA10](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Laparra_JOSA_10.pdf),[Proc.SPIE15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/malo15a-reprint.pdf), (vi) **image classification** and **knowledge extraction** (or regression) based on our feature extraction techniques [IEEE-TNN11](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Laparra11.pdf), [IEEE-TGRS13](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/AdaptVQ_ieeetgars_2012.pdf),[Int.J.Neur.Syst.14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/IJNS_Laparra14_accepted_v5.pdf), [IEEE-JSTSP15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/drr_jstsp2014_final.pdf). See CODE for image and video processing applications [here](/old_pages/code/soft_imvideo/ISP%20-%20Image%20and%20Video%20processing%20software.html).
+
+
+imagenes:
+ - ruta: 'image_processing.webp'
+ titulo: "Image Processing"
+ descripcion: "Controversies around using Mean Squared Error and images like 'Lena Sölderberg'. Learn more about the MSE issue [here](/old_pages/code/soft_imvideo/subpages/vista_toolbox.html)."
+ - ruta: 'animated_coder.gif'
+ titulo: "Video Compression Model"
+ descripcion: "MPEG-like video compression involves motion compensation and residual quantization. Vision Science and Statistical Learning can enhance these predictive coding methods."
+ - ruta: 'animated_video_coding.gif'
+ titulo: "Motion Estimation and Residual Quantization"
+ descripcion: "Decoded sequences under different settings of Motion Estimation and Residual Quantization. Examples in [Electr.Lett.00a](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/elect00.ps), [IEEE TIP01](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ieeeoct01.pdf)."
+ - ruta: 'im_coding.webp'
+ titulo: "Image Coding"
+ descripcion: "Using nonlinear perceptual image representations is critical to improving JPEG compression."
+ - ruta: 'video_coding.webp'
+ titulo: "Video Coding"
+ descripcion: "Improved bit allocation in MPEG video coding through nonlinear perception models."
+ - ruta: 'ruidos_great.webp'
+ titulo: "Image Restoration"
+ descripcion: "Image restoration using regularization functionals based on nonlinear perception models and image smoothing in the wavelet domain."
+ - ruta: 'flor1.webp'
+ titulo: "Color Constancy - Adaptation"
+ descripcion: "Color constancy addressed through linear and nonlinear solutions to the geometric problem of manifold matching under different illumination conditions."
+ - ruta: 'metrics.webp'
+ titulo: "Subjective Image/Video Metrics"
+ descripcion: "Observer's opinion is better correlated with our Euclidean distance in nonlinear perceptual domains than with Structural Similarity Index."
+ - ruta: 'clasi1.webp'
+ titulo: "Image Classification - Feature Adaptation"
+ descripcion: "Classifiers using RBIG, SPCA, PPA, DRR features are robust to changes in acquisition conditions."
+
+referencias:
+ - nombre: "Electr.Lett.95"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ELECT95.PS.gz"
+ - nombre: "Electr.Lett.99"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ELECT99.PS.gz"
+ - nombre: "IEEE TNN05"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Gomez-Perez05_IEEETNN.pdf"
+ - nombre: "IEEE TIP06a"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/manuscript4.pdf"
+ - nombre: "JMLR08"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Camps-Valls08_JMLR.pdf"
+ - nombre: "RPSP12"
+ url: "http://www.uv.es/gcamps/papers/paper_patent_6_review.pdf"
+ - nombre: "Patent08"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/patente_v5_jesus.pdf"
+ - nombre: "LNCS97"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/LNCS97.PS.gz"
+ - nombre: "Electr.Lett.98"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ELECT98.PS.gz"
+ - nombre: "Electr.Lett.00a"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/elect00.ps"
+ - nombre: "IEEE TIP01"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/ieeeoct01.pdf"
+ - nombre: "Redund.Reduct.99"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Redundancy_Reduction_Malo_99.pdf"
+ - nombre: "IEEE TIP 06b"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/manuscript_TIP_00864_2004_R2.pdf"
+ - nombre: "JMLR10"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/laparra10a.pdf"
+ - nombre: "Vis.Res.97"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/VISRES97.PS.gz"
+ - nombre: "J.Opt.96"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/JOPT96.PS.gz"
+ - nombre: "Neur.Comp.12"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Neco_accepted_2012.pdf"
+ - nombre: "PLoS-ONE14"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Gutmann_PLOS_ONE_2014.pdf"
+ - nombre: "IEEE-TGRS14"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/manuscr_TGRS_2012_00431.pdf"
+ - nombre: "Im.Vis.Comp.97"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/IVC97.PS.gz"
+ - nombre: "Disp.99"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/displays_99.pdf"
+ - nombre: "IEEE ICIP02"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/icip02.pdf"
+ - nombre: "JOSA10"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Laparra_JOSA_10.pdf"
+ - nombre: "Proc.SPIE15"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/malo15a-reprint.pdf"
+ - nombre: "IEEE-TNN11"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/Laparra11.pdf"
+ - nombre: "IEEE-TGRS13"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/AdaptVQ_ieeetgars_2012.pdf"
+ - nombre: "Int.J.Neur.Syst.14"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/IJNS_Laparra14_accepted_v5.pdf"
+ - nombre: "IEEE-JSTSP15"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_brain/drr_jstsp2014_final.pdf"
+
+enlaces:
+ - nombre: "Vista Toolbox"
+ url: "../../code/image_video_processing/vistacore/content/"
+ - nombre: "Efficient Coding Hypothesis"
+ url: "https://en.wikipedia.org/wiki/Efficient_coding_hypothesis"
+ - nombre: "NeuroImage Unit"
+ url: "https://www.acim.lafe.san.gva.es/acim/?page_id=1229"
+ - nombre: "Vision and Color Processing Software"
+ url: "../../code/vision_and_color/"
+
+
+type: "research"
+layout: "single2"
+---
+
diff --git a/content/research/visual_neuroscience/_index.md b/content/research/visual_neuroscience/_index.md
new file mode 100644
index 000000000..0a2ee6267
--- /dev/null
+++ b/content/research/visual_neuroscience/_index.md
@@ -0,0 +1,206 @@
+---
+title: "Vision Science: from Optics to Neuroscience and Statistical Learning"
+abstract: |
+ **Vision** is the ability to interpret the surrounding environment by analyzing the measurements drawn by imaging systems. This ability is particularly impressive in *humans* compared to the current state of the art in *computers*.
+
+ The study of all phenomena related to *vision in biological systems* (particularly in humans) is usually referred to as **Vision Science**. It addresses a variety of issues ranging from the formation of the visual signal—such as the physics of the imaging process, which includes Radiometry and **Physiological Optics**—to the analysis of the visual signal, which is of interest for Neuroscience and Psychology.
+
+ This analysis involves the extraction of visual primitives through basic computations in the retina-cortex neural pathway and the subsequent information processing that leads to scene descriptors of higher abstraction levels ([see elsewhere](http://www.scholarpedia.org/article/Models_of_visual_cortex)). These problems can be approached from different perspectives:
+
+ - A *mechanistic perspective*, which focuses on describing the empirical behavior of the system, based on experimental recordings from **Psychophysics** and **Neurophysiology**.
+ - A *normative perspective*, which looks for the functional reasons (organization principles) that explain the behavior. This perspective relies on the study of **Image Statistics** and the use of concepts from **Information Theory** and **Statistical Learning**.
+
+ The latter is known as the [Efficient Coding Hypothesis](https://en.wikipedia.org/wiki/Efficient_coding_hypothesis).
+
+ Over the years, we have made original contributions in *all* of the above subdisciplines related to (low-level) Vision Science. Currently, we are shifting our focus to more abstract visual functions.
+
+ # Experiments in Vision Science
+
+ I made experimental contributions in three aspects: *Physiological Optics, Psychophysics*, and *Image Statistics*.
+
+ 1. In the field of **Physiological Optics**, we measured the optical transfer function of the lens+cornea system in-vivo [Opth.Phys.Opt.97](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/OPH97.PS.gz). This work received the European Vistakon Research Award in 1994.
+
+ 2. In **Psychophysics**, we proposed simplified methods to measure the Contrast Sensitivity Function across the entire frequency domain [J.Opt.94](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT94.PS.gz), and developed a fast and accurate method to measure the parameters of multi-stage linear+nonlinear vision models [Proc.SPIE15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/malo15a-reprint.pdf).
+
+ 3. In **Image Statistics**, we gathered spatially and spectrally calibrated image samples to determine the properties of these signals and their variations under changes in illumination, contrast, and motion [Im.Vis.Comp.00](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/ivc99.ps.gz), [Neur.Comp.12](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Neco_accepted_2012.pdf), [IEEE-TGRS14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/manuscr_TGRS_2012_00431.pdf), [PLoS-ONE14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Gutmann_PLOS_ONE_2014.pdf), [Rem.Sens.Im.Proc.11](http://isp.uv.es/files/rem_sens_im_proc_12_ch02.pdf), [Front.Neurosci.15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/after_effects).
+
+ # Theory: empirical models in Vision Science
+
+ We proposed mathematical descriptions of different visual dimensions: *Texture*, *Color*, and *Motion*.
+
+ 1. We used wavelet representations to propose nonstationary **Texture Vision** models [J.Mod.Opt.97](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JMO97.PS.gz), [MScThesis95](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/msc_jmalo.pdf).
+
+ 2. We developed **Color Vision** models with illumination invariance, which allow for the reproduction of chromatic anomalies, adaptation, and aftereffects [Vis.Res.97](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/VISRES97.PS.gz), [J.Opt.96](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT96.PS.gz), [J.Opt.98](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT98.PS.gz), [JOSA04](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/josa_04.pdf), [Neur.Comp.12](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Neco_accepted_2012.pdf).
+
+ 3. We created **Motion Vision** models [Alheteia08](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Malo_Alheteia_08.pdf) that focus on optical flow computation in perceptually relevant moving regions [J.Vis.01](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/vss_poster.eps), [PhDThesis99](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Redundancy_Reduction_Malo_99.pdf), and explain the *static motion aftereffect* [Front.Neurosci.15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/after_effects).
+
+ All these psychophysical and physiological models have a parallel *linear+nonlinear* structure where **receptive fields** and **surround-dependent normalization** play an important role.
+
+ # Theory: principled models in Vision Science
+
+ This category refers to the proposition of organizational laws of sensory systems that explain empirical phenomena. These principles demonstrate that neural function has been adapted to (or is determined by) the statistics of visual stimuli.
+
+ 1. **Derivation of Linear Properties**: We worked on deriving the linear properties of the sensors and found that their spatio-chromatic sensitivity, changes in receptive fields, and phase properties arise from optimal solutions to the adaptation problem under noise constraints and manifold matching [PLoS-ONE14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Gutmann_PLOS_ONE_2014.pdf), [IEEE-TGRS13](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/AdaptVQ_ieeetgars_2012.pdf). These properties are also derived from statistical independence requirements [LNCS11](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/ICANN_2011_v7.pdf), [NeuroImag.Meeting11](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/SlidesNeuroImageMeeting11.pdf), and from optimal estimation of object reflectance [IEEE TGRS14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/manuscr_TGRS_2012_00431.pdf).
+
+ 2. **Derivation of Non-Linear Behavior**: We also derived the non-linear behavior for a variety of visual sensors (chromatic, texture, and motion sensors). We found that these nonlinearities are linked to optimal information transmission (entropy maximization) and/or error minimization in noisy systems (optimal vector quantization).
+
+ - We studied this relationship in the traditional *statistics-to-perception* direction, deriving the nonlinearity from regularities in the scene [Network06](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/V1_from_non_linear_ICA.pdf), [Neur.Comp.12](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Neco_accepted_2012.pdf), [Front.Neurosci.15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/after_effects).
+
+ - We also explored the (more novel) *perception-to-statistics* direction, examining the statistical effects of perceptually motivated nonlinearities [J.Opt.95](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT95.PS.gz), [Im.Vis.Comp.00](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/ivc99.ps.gz), [LNCS00](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/spr00.ps), [Patt.Recog.03](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/patt_rec03.pdf), [Neur.Comp.10](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Malo_Laparra_Neural_10b.pdf), [LNCS10](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/LNAI10_malo_laparra.pdf), [NeuroImag.Meeting11](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/SlidesNeuroImageMeeting11.pdf).
+
+ # Theory: Statistical Learning for Vision Science
+
+ In theoretical neuroscience, deriving properties of biological sensors from the regularities in visual scenes requires novel tools for statistical learning. In this field, we developed new techniques for **unsupervised manifold learning**, **feature extraction** (or symmetry detection in datasets), **dimensionality reduction**, **probability density estimation**, **multi-information estimation**, **distance learning**, and automatic **adaptation** from optimal dataset matching.
+
+ Given my interest in applicability to Vision Science problems, I focused on techniques that can be explicitly represented in the image domain, to be compared with receptive fields of visual neurons, as opposed to the usual practice in the *Machine Learning* community. Techniques include:
+
+ - **Rotation-based Iterative Gaussianization (RBIG)** [IEEE TNN 11](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Laparra11.pdf)
+ - **Sequential Principal Curves Analysis (SPCA)** [Network06](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/V1_from_non_linear_ICA.pdf), [Neur.Comp.12](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Neco_accepted_2012.pdf), [Front. Neurosci.15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/after_effects)
+ - **Principal Polynomial Analysis (PPA)** [Int.J.Neur.Syst.14](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/IJNS_Laparra14_accepted_v5.pdf)
+ - **Dimensionality Reduction based on Regression (DRR)** [IEEE JSTSP15](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/drr_jstsp2014_final.pdf)
+ - **Graph Matching for Adaptation** [IEEE TGRS13](https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/AdaptVQ_ieeetgars_2012.pdf)
+
+
+imagenes:
+ - ruta: 'vision_science.webp'
+ titulo: "Vision Science"
+ descripcion: "The PSF of this nice human eye can be measured in-vivo. Related publication: Opth.Phys.Opt.97"
+ - ruta: 'anim_gif_v1_resp.gif'
+ titulo: "Linear V1 Cells"
+ descripcion: "Linear response of V1 cells (see model here: Front.Neurosci.15)"
+ - ruta: 'anim_gif_mt_resp.gif'
+ titulo: "Linear MT Cells"
+ descripcion: "Linear response of MT cells (see the CODE for V1 and MT cells HERE)"
+ - ruta: 'anim_gif.gif'
+ titulo: "fMRI Recordings"
+ descripcion: "fMRI recordings at Hospital La Fe (NeuroImage Unit)"
+ - ruta: 'anim_gif_texture.gif'
+ titulo: "Texture Vision"
+ descripcion: "Texture Vision analysis. Related publications: J.Mod.Opt.97, Neur.Comp.10"
+ - ruta: 'anim_gif_color.gif'
+ titulo: "Color Vision"
+ descripcion: "Color vision study involving chromatic anomalies, adaptation, and aftereffects. Related publications: Vis.Res.97, JOSA04"
+ - ruta: 'anim_gif_motion2.gif'
+ titulo: "Motion Vision"
+ descripcion: "Motion vision study, focusing on the computation of optical flow in perceptually relevant moving regions. Related publications: J.Vis.01, IEEE TIP01"
+
+ - ruta: 'experiment1.webp'
+ titulo: "[Double-pass Measurement Setup](#experiments-in-vision-science)"
+ descripcion: "Double-pass setting for measuring the Modulation Transfer Function of the human eye. Related publication: Opth.Phys.Opt.97"
+ - ruta: 'method1.webp'
+ titulo: "[Spectrally Calibrated Light Sources](#experiments-in-vision-science)"
+ descripcion: "Spectrally calibrated light sources used for gathering accurate color image statistics"
+ - ruta: 'method2.webp'
+ titulo: "[Image Colorimeter and Spectroradiometer](#experiments-in-vision-science)"
+ descripcion: "Image colorimeter and spectroradiometer used for accurate measurements in visual experiments"
+
+ - ruta: 'motion.webp'
+ titulo: "[Empirical Motion Model](#theory-empirical-models-in-vision-science)"
+ descripcion: "Waving hands sequence demonstrating the empirical motion model based on spatio-temporal wavelet-like filters"
+ - ruta: 'dicromat.webp'
+ titulo: "[Color Blind Simulation](#theory-empirical-models-in-vision-science)"
+ descripcion: "Simulation of color blindness with Picasso's Dora Maar to illustrate how dichromats perceive colors differently"
+
+ - ruta: 'estimulac.webp'
+ titulo: "[Optimal Adaptation and Information Transmission](#theory-principled-models-in-vision-science)"
+ descripcion: "Illustrative organization principle: optimal adaptation and information transmission under noise constraints"
+ - ruta: 'resp1.webp'
+ titulo: "[Response Example 1](#theory-principled-models-in-vision-science)"
+ descripcion: "Illustration of response shifts in V1 neurons under different visual scene illumination conditions"
+ - ruta: 'resp2.webp'
+ titulo: "[Response Example 2](#theory-principled-models-in-vision-science)"
+ descripcion: "Illustration of response changes in V1 neurons due to optimal adaptation to varying visual stimuli"
+
+ - ruta: 'data_metric.webp'
+ titulo: "[Principal Polynomial Analysis Example](#theory-statistical-learning-for-vision-science)"
+ descripcion: "Illustrative example showing PPA application in feature extraction and metric definition in the dataset"
+ - ruta: 'features_1.webp'
+ titulo: "[PPA Features - Input Domain](#theory-statistical-learning-for-vision-science)"
+ descripcion: "Local features obtained in the input domain using Principal Polynomial Analysis (PPA)"
+ - ruta: 'features_2.webp'
+ titulo: "[PPA Features - Transformed Domain](#theory-statistical-learning-for-vision-science)"
+ descripcion: "Local features visualized in the PPA-transformed domain"
+
+
+referencias:
+ - nombre: "Opth.Phys.Opt.97"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/OPH97.PS.gz"
+ - nombre: "J.Opt.94"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT94.PS.gz"
+ - nombre: "Proc.SPIE15"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/malo15a-reprint.pdf"
+ - nombre: "Im.Vis.Comp.00"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/ivc99.ps.gz"
+ - nombre: "Neur.Comp.12"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Neco_accepted_2012.pdf"
+ - nombre: "IEEE-TGRS14"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/manuscr_TGRS_2012_00431.pdf"
+ - nombre: "PLoS-ONE14"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Gutmann_PLOS_ONE_2014.pdf"
+ - nombre: "Rem.Sens.Im.Proc.11"
+ url: "http://isp.uv.es/files/rem_sens_im_proc_12_ch02.pdf"
+ - nombre: "Front.Neurosci.15"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/after_effects"
+ - nombre: "J.Mod.Opt.97"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JMO97.PS.gz"
+ - nombre: "Neur.Comp.10"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Malo_Laparra_Neural_10b.pdf"
+ - nombre: "MScThesis95"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/msc_jmalo.pdf"
+ - nombre: "Vis.Res.97"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/VISRES97.PS.gz"
+ - nombre: "J.Opt.96"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT96.PS.gz"
+ - nombre: "J.Opt.98"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT98.PS.gz"
+ - nombre: "JOSA04"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/josa_04.pdf"
+ - nombre: "Alheteia08"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Malo_Alheteia_08.pdf"
+ - nombre: "J.Vis.01"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/vss_poster.eps"
+ - nombre: "PhDThesis99"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Redundancy_Reduction_Malo_99.pdf"
+ - nombre: "IEEE TIP01"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/ieeeoct01.pdf"
+ - nombre: "IEEE-TGRS13"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/AdaptVQ_ieeetgars_2012.pdf"
+ - nombre: "LNCS11"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/ICANN_2011_v7.pdf"
+ - nombre: "NeuroImag.Meeting11"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/SlidesNeuroImageMeeting11.pdf"
+ - nombre: "Network06"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/V1_from_non_linear_ICA.pdf"
+ - nombre: "J.Opt.95"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/JOPT95.PS.gz"
+ - nombre: "LNCS00"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/spr00.ps"
+ - nombre: "Patt.Recog.03"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/patt_rec03.pdf"
+ - nombre: "LNCS10"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/LNAI10_malo_laparra.pdf"
+ - nombre: "IEEE TNN 11"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/Laparra11.pdf"
+ - nombre: "Int.J.Neur.Syst.14"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/IJNS_Laparra14_accepted_v5.pdf"
+ - nombre: "IEEE JSTSP15"
+ url: "https://huggingface.co/datasets/isp-uv-es/Web_site_legacy/resolve/main/research/visual_neuroscience/drr_jstsp2014_final.pdf"
+ - nombre: "ML CODE"
+ url: "/old_pages/code/soft_feature/ISP - Feature extraction software.html"
+
+enlaces:
+ - nombre: "Scholarpedia: Models of Visual Cortex"
+ url: "http://www.scholarpedia.org/article/Models_of_visual_cortex"
+ - nombre: "Efficient Coding Hypothesis"
+ url: "https://en.wikipedia.org/wiki/Efficient_coding_hypothesis"
+ - nombre: "NeuroImage Unit"
+ url: "https://www.acim.lafe.san.gva.es/acim/?page_id=1229"
+ - nombre: "Texture Vision Dataset"
+ url: "/old_pages/data/after_effects/"
+ - nombre: "Vision and Color Processing Software"
+ url: "/old_pages/code/soft_visioncolor/ISP - Vision and color processing software.html"
+
+type: "research"
+layout: "single2"
+---
+
diff --git a/hugo.toml b/hugo.toml
index 2fba01251..bea6968ab 100644
--- a/hugo.toml
+++ b/hugo.toml
@@ -1,5 +1,5 @@
-#baseURL = "https://ipl-uv.github.io/"
-baseURL = "https://isp.uv.es/github/"
+baseURL = "https://ipl-uv.github.io/"
+#baseURL = "https://isp.uv.es/github/"
publishDir = "docs"
#baseURL = "http://127.0.0.1/isp/"
@@ -29,7 +29,7 @@ theme = "isp_uv"
[[menu.main]]
name = "Research"
- url = "/research/"
+ url = "/research/philosophy"
weight = 3
[[menu.main]]
parent = "Research"
diff --git a/static/images/research/SDGs.webp b/static/images/research/SDGs.webp
new file mode 100644
index 000000000..83d1be6e5
Binary files /dev/null and b/static/images/research/SDGs.webp differ
diff --git a/static/images/research/anim_gif.gif b/static/images/research/anim_gif.gif
new file mode 100644
index 000000000..a2df9dffc
Binary files /dev/null and b/static/images/research/anim_gif.gif differ
diff --git a/static/images/research/anim_gif_color.gif b/static/images/research/anim_gif_color.gif
new file mode 100644
index 000000000..33e42bd71
Binary files /dev/null and b/static/images/research/anim_gif_color.gif differ
diff --git a/static/images/research/anim_gif_motion2.gif b/static/images/research/anim_gif_motion2.gif
new file mode 100644
index 000000000..d7bc93afa
Binary files /dev/null and b/static/images/research/anim_gif_motion2.gif differ
diff --git a/static/images/research/anim_gif_mt_resp.gif b/static/images/research/anim_gif_mt_resp.gif
new file mode 100644
index 000000000..91f9d6210
Binary files /dev/null and b/static/images/research/anim_gif_mt_resp.gif differ
diff --git a/static/images/research/anim_gif_texture.gif b/static/images/research/anim_gif_texture.gif
new file mode 100644
index 000000000..f8ee01d07
Binary files /dev/null and b/static/images/research/anim_gif_texture.gif differ
diff --git a/static/images/research/anim_gif_v1_resp.gif b/static/images/research/anim_gif_v1_resp.gif
new file mode 100644
index 000000000..82d0d7a66
Binary files /dev/null and b/static/images/research/anim_gif_v1_resp.gif differ
diff --git a/static/images/research/animated_coder.gif b/static/images/research/animated_coder.gif
new file mode 100644
index 000000000..a0f7dcc4d
Binary files /dev/null and b/static/images/research/animated_coder.gif differ
diff --git a/static/images/research/animated_video_coding.gif b/static/images/research/animated_video_coding.gif
new file mode 100644
index 000000000..730d6adaa
Binary files /dev/null and b/static/images/research/animated_video_coding.gif differ
diff --git a/static/images/research/clasi1.webp b/static/images/research/clasi1.webp
new file mode 100644
index 000000000..b62d01601
Binary files /dev/null and b/static/images/research/clasi1.webp differ
diff --git a/static/images/research/visual_brain/clasi2.webp b/static/images/research/clasi2.webp
similarity index 100%
rename from static/images/research/visual_brain/clasi2.webp
rename to static/images/research/clasi2.webp
diff --git a/static/images/research/data_metric.webp b/static/images/research/data_metric.webp
new file mode 100644
index 000000000..7e7aa2abf
Binary files /dev/null and b/static/images/research/data_metric.webp differ
diff --git a/static/images/research/dicromat.webp b/static/images/research/dicromat.webp
new file mode 100644
index 000000000..cba0a556c
Binary files /dev/null and b/static/images/research/dicromat.webp differ
diff --git a/static/images/research/estimulac.webp b/static/images/research/estimulac.webp
new file mode 100644
index 000000000..32a4036bf
Binary files /dev/null and b/static/images/research/estimulac.webp differ
diff --git a/static/images/research/experiment1.webp b/static/images/research/experiment1.webp
new file mode 100644
index 000000000..a03e3bd66
Binary files /dev/null and b/static/images/research/experiment1.webp differ
diff --git a/static/images/research/features_1.webp b/static/images/research/features_1.webp
new file mode 100644
index 000000000..464a7bc5a
Binary files /dev/null and b/static/images/research/features_1.webp differ
diff --git a/static/images/research/features_2.webp b/static/images/research/features_2.webp
new file mode 100644
index 000000000..db85b273b
Binary files /dev/null and b/static/images/research/features_2.webp differ
diff --git a/static/images/research/flor1.webp b/static/images/research/flor1.webp
new file mode 100644
index 000000000..1571f368d
Binary files /dev/null and b/static/images/research/flor1.webp differ
diff --git a/static/images/research/visual_brain/flor2.webp b/static/images/research/flor2.webp
similarity index 100%
rename from static/images/research/visual_brain/flor2.webp
rename to static/images/research/flor2.webp
diff --git a/static/images/research/im_coding.webp b/static/images/research/im_coding.webp
new file mode 100644
index 000000000..b90d6c83d
Binary files /dev/null and b/static/images/research/im_coding.webp differ
diff --git a/static/images/research/visual_brain/image_processing.webp b/static/images/research/image_processing.webp
similarity index 100%
rename from static/images/research/visual_brain/image_processing.webp
rename to static/images/research/image_processing.webp
diff --git a/static/images/research/method1.webp b/static/images/research/method1.webp
new file mode 100644
index 000000000..da697cf65
Binary files /dev/null and b/static/images/research/method1.webp differ
diff --git a/static/images/research/method2.webp b/static/images/research/method2.webp
new file mode 100644
index 000000000..798045ecb
Binary files /dev/null and b/static/images/research/method2.webp differ
diff --git a/static/images/research/metrics.webp b/static/images/research/metrics.webp
new file mode 100644
index 000000000..51e5ec9bd
Binary files /dev/null and b/static/images/research/metrics.webp differ
diff --git a/static/images/research/motion.webp b/static/images/research/motion.webp
new file mode 100644
index 000000000..b0d93ae86
Binary files /dev/null and b/static/images/research/motion.webp differ
diff --git a/static/images/research/resp1.webp b/static/images/research/resp1.webp
new file mode 100644
index 000000000..e0092767a
Binary files /dev/null and b/static/images/research/resp1.webp differ
diff --git a/static/images/research/resp2.webp b/static/images/research/resp2.webp
new file mode 100644
index 000000000..dc82f5b8f
Binary files /dev/null and b/static/images/research/resp2.webp differ
diff --git a/static/images/research/ruidos_great.webp b/static/images/research/ruidos_great.webp
new file mode 100644
index 000000000..6a4770fad
Binary files /dev/null and b/static/images/research/ruidos_great.webp differ
diff --git a/static/images/research/video_coding.webp b/static/images/research/video_coding.webp
new file mode 100644
index 000000000..be5565200
Binary files /dev/null and b/static/images/research/video_coding.webp differ
diff --git a/static/images/research/visual_neuroscience/vision_science.webp b/static/images/research/vision_science.webp
similarity index 100%
rename from static/images/research/visual_neuroscience/vision_science.webp
rename to static/images/research/vision_science.webp
diff --git a/static/images/research/visual_brain/clasi1.webp b/static/images/research/visual_brain/clasi1.webp
deleted file mode 100644
index c4fac4a2b..000000000
Binary files a/static/images/research/visual_brain/clasi1.webp and /dev/null differ
diff --git a/static/images/research/visual_brain/flor1.webp b/static/images/research/visual_brain/flor1.webp
deleted file mode 100644
index 027415a28..000000000
Binary files a/static/images/research/visual_brain/flor1.webp and /dev/null differ
diff --git a/static/images/research/visual_brain/im_coding.webp b/static/images/research/visual_brain/im_coding.webp
deleted file mode 100644
index 6c920050b..000000000
Binary files a/static/images/research/visual_brain/im_coding.webp and /dev/null differ
diff --git a/static/images/research/visual_brain/metrics.webp b/static/images/research/visual_brain/metrics.webp
deleted file mode 100644
index 78f53679d..000000000
Binary files a/static/images/research/visual_brain/metrics.webp and /dev/null differ
diff --git a/static/images/research/visual_brain/ruidos_great.webp b/static/images/research/visual_brain/ruidos_great.webp
deleted file mode 100644
index 2d7dd83b1..000000000
Binary files a/static/images/research/visual_brain/ruidos_great.webp and /dev/null differ
diff --git a/static/images/research/visual_brain/video_coding.webp b/static/images/research/visual_brain/video_coding.webp
deleted file mode 100644
index 91cce2cb8..000000000
Binary files a/static/images/research/visual_brain/video_coding.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/data_metric.webp b/static/images/research/visual_neuroscience/data_metric.webp
deleted file mode 100644
index fcf16ba03..000000000
Binary files a/static/images/research/visual_neuroscience/data_metric.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/dicromat.webp b/static/images/research/visual_neuroscience/dicromat.webp
deleted file mode 100644
index fe065a57a..000000000
Binary files a/static/images/research/visual_neuroscience/dicromat.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/estimulac.webp b/static/images/research/visual_neuroscience/estimulac.webp
deleted file mode 100644
index 8c4216d79..000000000
Binary files a/static/images/research/visual_neuroscience/estimulac.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/experiment1.webp b/static/images/research/visual_neuroscience/experiment1.webp
deleted file mode 100644
index d04897dd8..000000000
Binary files a/static/images/research/visual_neuroscience/experiment1.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/features_1.webp b/static/images/research/visual_neuroscience/features_1.webp
deleted file mode 100644
index b5284f0fa..000000000
Binary files a/static/images/research/visual_neuroscience/features_1.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/features_2.webp b/static/images/research/visual_neuroscience/features_2.webp
deleted file mode 100644
index 7133beb4e..000000000
Binary files a/static/images/research/visual_neuroscience/features_2.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/method1.webp b/static/images/research/visual_neuroscience/method1.webp
deleted file mode 100644
index f91a0cf8d..000000000
Binary files a/static/images/research/visual_neuroscience/method1.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/method2.webp b/static/images/research/visual_neuroscience/method2.webp
deleted file mode 100644
index e7eb04b1d..000000000
Binary files a/static/images/research/visual_neuroscience/method2.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/motion.webp b/static/images/research/visual_neuroscience/motion.webp
deleted file mode 100644
index 342f96386..000000000
Binary files a/static/images/research/visual_neuroscience/motion.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/resp1.webp b/static/images/research/visual_neuroscience/resp1.webp
deleted file mode 100644
index 89359b480..000000000
Binary files a/static/images/research/visual_neuroscience/resp1.webp and /dev/null differ
diff --git a/static/images/research/visual_neuroscience/resp2.webp b/static/images/research/visual_neuroscience/resp2.webp
deleted file mode 100644
index dec25820d..000000000
Binary files a/static/images/research/visual_neuroscience/resp2.webp and /dev/null differ
diff --git a/static/style/style.css b/static/style/style.css
index 99289693a..8d3d4a5b4 100644
--- a/static/style/style.css
+++ b/static/style/style.css
@@ -404,13 +404,6 @@ body.section-contact .navbar-nav .nav-link[href="/contact/"] {
-
-
-
-
-
-
-
/* CODE - DATA - PEOPLE */
.content-container {
margin: 2rem auto;
@@ -449,15 +442,20 @@ body.section-contact .navbar-nav .nav-link[href="/contact/"] {
margin-bottom: 0.4rem;
box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.1);
transition: box-shadow 0.3s ease, transform 0.3s ease;
+ display: flex;
+ justify-content: center;
+ align-items: center;
}
.grid-item img {
+ display: flex;
border-radius: 0.3125rem;
height: 4.5rem;
width: 4.5rem;
margin-right: 1.25rem;
object-fit: contain;
transition: transform 0.5s ease;
+ justify-content: center;
}
.grid-item .text {
@@ -812,6 +810,12 @@ body.section-contact .navbar-nav .nav-link[href="/contact/"] {
margin: auto;
}
+.panel-description h5 p a {
+ color: #337ab7;
+ text-decoration: none;
+ transition: color 0.3s ease;
+}
+
.panel-description h5 {
font-size: clamp(0.5rem, 2.5vw, 0.9rem);
color: #333;
@@ -1392,6 +1396,12 @@ max-height: 100%;
margin: 0;
}
+.box-content p a {
+ color: #337ab7;
+ text-decoration: none;
+ transition: color 0.3s ease;
+}
+
.box-content ul li a {
color: #337ab7;
@@ -1447,9 +1457,7 @@ max-height: 100%;
font-size: clamp(0.45rem, 2.5vw, 0.9rem);
}
-/* sadfasf */
-
-
+/*
.gallery-item {
background-color: #fff;
padding: 1rem;
@@ -1462,7 +1470,6 @@ max-height: 100%;
width: 80%;
}
-
.gallery-item img {
width: 100%;
max-width: 100%;
@@ -1470,35 +1477,48 @@ max-height: 100%;
border-radius: 0.3125rem;
object-fit: cover;
}
-
.gallery-title {
font-size: clamp(0.3rem, 2.5vw, 0.6rem);
line-height: 1.6;
margin: 0;
font-weight: bold;
}
-
.gallery-title a {
color: #337ab7;
text-decoration: none;
transition: color 0.3s ease;
}
-
-
.gallery-description {
font-size: clamp(0.4rem, 2.5vw, 0.7rem);
}
-
.gallery-description p {
text-align: justify;
}
-
.gallery-description p a {
color: #337ab7;
text-decoration: none;
transition: color 0.3s ease;
+} */
+
+
+/* Contact */
+.col-md-8 {
+ display: flex;
+ align-content: center;
+ justify-content: center;
+ padding: 0;
+}
+.map-iframe {
+ width: 100%;
+ height: 100%;
+ border: 0;
+}
+
+.map-container {
+ width: 100%;
}
+.grid-item img {}
diff --git a/themes/isp_uv/layouts/contact/list.html b/themes/isp_uv/layouts/contact/list.html
index 1502d5fdd..0fbf139ca 100644
--- a/themes/isp_uv/layouts/contact/list.html
+++ b/themes/isp_uv/layouts/contact/list.html
@@ -1,58 +1,46 @@
{{ define "main" }}
-
-
Contact
+
+
+
Contact
+
-
-
-
-
-
-
-
-
-
-
+
+
+ class="map-iframe" allowfullscreen="" loading="lazy">
-
-
{{ end }}
diff --git a/themes/isp_uv/layouts/courses/list2.html b/themes/isp_uv/layouts/courses/list2.html
new file mode 100644
index 000000000..7dcd0ecea
--- /dev/null
+++ b/themes/isp_uv/layouts/courses/list2.html
@@ -0,0 +1,41 @@
+{{ define "main" }}
+
+
+
{{ .Params.title }}
+
+ {{ range .Pages }}
+ {{ $img := urls.JoinPath "/images/courses" .Params.img }}
+
+
+
+
+
+
+
+
{{ .Params.description | markdownify }}
+
+
+
+ {{ if .Params.references }}
+
+
References
+
+ {{ range .Params.references }}
+ - {{ . | markdownify }}
+ {{ end }}
+
+
+ {{ end }}
+
+
+
+ {{ end }}
+
{{ .RawContent | markdownify }}
+
+{{ end }}
diff --git a/themes/isp_uv/layouts/facilities/list2.html b/themes/isp_uv/layouts/facilities/list2.html
new file mode 100644
index 000000000..7db125d02
--- /dev/null
+++ b/themes/isp_uv/layouts/facilities/list2.html
@@ -0,0 +1,41 @@
+{{ define "main" }}
+
+
+
{{ .Params.title }}
+
+ {{ range .Pages }}
+ {{ $img := urls.JoinPath "/images/facilities" .Params.img }}
+
+
+
+
+
+
+
+
{{ .Params.description | markdownify }}
+
+
+
+ {{ if .Params.references }}
+
+
References
+
+ {{ range .Params.references }}
+ - {{ . | markdownify }}
+ {{ end }}
+
+
+ {{ end }}
+
+
+
+ {{ end }}
+
{{ .RawContent | markdownify }}
+
+{{ end }}
diff --git a/themes/isp_uv/layouts/projects/single.html b/themes/isp_uv/layouts/projects/single.html
index d6e17f446..9d72d4341 100644
--- a/themes/isp_uv/layouts/projects/single.html
+++ b/themes/isp_uv/layouts/projects/single.html
@@ -1,5 +1,4 @@
{{ define "main" }}
-{{ $img := urls.JoinPath "/images/projects" .Params.logo }}
{{ .Title }}
diff --git a/themes/isp_uv/layouts/research/single.html b/themes/isp_uv/layouts/research/single.html
index fd6ade50a..c07794056 100644
--- a/themes/isp_uv/layouts/research/single.html
+++ b/themes/isp_uv/layouts/research/single.html
@@ -1,17 +1,53 @@
{{ define "main" }}
-{{ .Content }}
-
-{{ with .Params.projects }}
-{{ $projects := where $.Site.RegularPages "Section" "projects" }}
-{{ $projects = where $projects "Params.short" "in" . }}
-
-
Related projects
-
-
-
-{{ partial "projects.html" $projects }}
-
+
+
+
{{ .Title }}
+
+
+
{{ .Content | markdownify }}
+ {{ $currentDir := .File.Dir }}
+ {{ $activePages := where .Pages "File.Dir" $currentDir }}
+ {{ if gt (len $activePages) 0 }}
+
Related Projects
+
+ {{ range $activePages }}
+ {{ $logo := urls.JoinPath "/images/projects" .Params.logo }}
+
+
+
+
{{ .Title }}
+ {{ with .Params.uvpi }}
+
UV-PIs: {{ . }}
+ {{ end }}
+ {{ with .Params.pi }}
+
PIs: {{ . }}
+ {{ end }}
+ {{ with .Params.years }}
+
Duration: ({{ . }})
+ {{ end }}
+ {{ with .Params.funding_source }}
+
Funding Source: {{ . }}
+ {{ end }}
+ {{ with .Params.role }}
+
Role: {{ . }}
+ {{ end }}
+ {{ with .Params.project_type }}
+
Project Type: {{ . }}
+ {{ end }}
+ {{ with .Params.partners }}
+
Partners: {{ delimit . ", " }}
+ {{ end }}
+
+
+ {{ end }}
+
+ {{ else }}
+
No current projects available.
+ {{ end }}
+
{{ end }}
-
-{{ end }}
diff --git a/themes/isp_uv/layouts/research/single2.html b/themes/isp_uv/layouts/research/single2.html
new file mode 100644
index 000000000..f3135df93
--- /dev/null
+++ b/themes/isp_uv/layouts/research/single2.html
@@ -0,0 +1,85 @@
+{{ define "main" }}
+
+
+
+
{{ .Params.abstract | markdownify }}
+
+ {{ if .Params.imagenes }}
+
+ {{ end }}
+ {{ if .Params.referencias }}
+
+ References
+
+ {{ range .Params.referencias }}
+ -
+ {{ if .url }}
+ {{ .nombre }}
+ {{ else }}
+ {{ .nombre }}
+ {{ end }}
+ {{ .autores | markdownify }}
+ {{ .publicacion | markdownify }}
+
+ {{ end }}
+
+
+ {{ end }}
+ {{ if .Params.enlaces }}
+
+ Download
+ {{ if .Params.desc_download }}
+
+
{{ .Params.desc_download | markdownify }}
+
+ {{ end }}
+
+
+ {{ end }}
+
+{{ end }}