@@ -20,13 +20,16 @@ import org.apache.spark.sql.Row
20
20
import org .apache .spark .sql .catalyst .expressions .GenericRowWithSchema
21
21
import org .apache .spark .sql .types ._
22
22
import org .apache .spark .sql .catalyst .util .ArrayBasedMapData
23
+ import org .json4s .Extraction
24
+ import org .json4s .jackson .JsonMethods .{compact , parse , render }
23
25
import org .junit .runner .RunWith
26
+ import org .scalatest .Inside
24
27
import org .scalatest .junit .JUnitRunner
25
28
26
29
import scala .collection .mutable .WrappedArray
27
30
28
31
@ RunWith (classOf [JUnitRunner ])
29
- class RowSerializerSpec extends XDSerializationTest [Row ] with CrossdataCommonSerializer {
32
+ class RowSerializerSpec extends XDSerializationTest [Row ] with CrossdataCommonSerializer with Inside {
30
33
31
34
lazy val schema = StructType (List (
32
35
StructField (" int" ,IntegerType ,true ),
@@ -112,4 +115,20 @@ class RowSerializerSpec extends XDSerializationTest[Row] with CrossdataCommonSer
112
115
TestCase (" marshall & unmarshall a row with schema" , rowWithSchema)
113
116
)
114
117
118
+ it should " be able to recover Double values when their schema type is misleading" in {
119
+
120
+ val schema = StructType (List (StructField (" decimaldouble" , DecimalType (10 ,1 ),true )))
121
+ val row = Row .fromSeq(Array (32.1 ))
122
+
123
+ val formats = json4sJacksonFormats + new RowSerializer (schema)
124
+
125
+ val serialized = compact(render(Extraction .decompose(row)(formats)))
126
+ val extracted = parse(serialized, false ).extract[Row ](formats, implicitly[Manifest [Row ]])
127
+
128
+ inside(extracted) {
129
+ case r : Row => r.get(0 ) shouldBe Decimal (32.1 )
130
+ }
131
+
132
+ }
133
+
115
134
}
0 commit comments