Changeset 6077
- Timestamp:
- Aug 3, 2012, 12:53:22 PM (11 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/doc/src/docbook/appendix/incompatible.xml
r5983 r6077 35 35 and backwards compatible. 36 36 </para> 37 38 <sect1 id="appendix.incompatible.3.2"> 39 <title>BASE 3.2 release</title> 40 41 <bridgehead>BASEfile exporter automatically closes the output stream</bridgehead> 42 <para> 43 The implementation of the BASEfile exporter has been changed to 44 automatically close the provided output stream when the export 45 is complete. Clients that need the old behavior should call 46 <code>BaseFileExporter.setAutoCloseWriters(false)</code> before 47 using it. 48 </para> 49 </sect1> 37 50 38 51 <sect1 id="appendix.incompatible.3.1"> -
trunk/src/core/net/sf/basedb/util/export/spotdata/BaseFileExporter.java
r5405 r6077 69 69 private Map<String, String> parameters; 70 70 private BaseFileWriter out; 71 private boolean autoCloseWriters; 71 72 72 73 protected BaseFileExporter() 73 74 { 74 75 this.parameters = new LinkedHashMap<String, String>(); 76 this.autoCloseWriters = true; 75 77 } 76 78 … … 87 89 { 88 90 this.out = out; 91 } 92 93 /** 94 If this option is set then all writers are automatically closed 95 when all data has been writted to them. This setting is enabled by default. 96 @since 3.2 97 */ 98 public void setAutoCloseWriters(boolean autoClose) 99 { 100 this.autoCloseWriters = autoClose; 89 101 } 90 102 … … 246 258 exportAssaysSectionData(); 247 259 return true; 260 } 261 262 @Override 263 protected void endExport(RuntimeException e) 264 { 265 if (out != null && autoCloseWriters) out.close(); 266 super.endExport(e); 248 267 } 249 268 // ------------------------------------------- -
trunk/src/core/net/sf/basedb/util/importer/spotdata/BaseFileImporter.java
r5689 r6077 23 23 24 24 import java.io.IOException; 25 import java.io.InputStream; 25 26 import java.sql.SQLException; 26 27 import java.util.ArrayList; … … 52 53 import net.sf.basedb.core.signal.ThreadSignalHandler; 53 54 import net.sf.basedb.util.ChainedProgressReporter; 55 import net.sf.basedb.util.FileUtil; 54 56 import net.sf.basedb.util.basefile.BaseFileParser; 55 57 import net.sf.basedb.util.importer.FileWrapper; … … 282 284 parser.setProgressReporter(new SimpleAbsoluteProgressReporter(chainedProgress, srcFile.getSize())); 283 285 } 284 FlatFileParser ffp = parser.parse(srcFile.getInputStream(), srcFile.getCharacterSet()); 285 int totalLines = ffp.getParsedLines(); 286 287 // If the BASEfile didn't have any 'spots' sections we don't 288 // create a child bioassay set 289 if (parser.getSectionCount("spots") == 0) return null; 290 291 // Create the child bioassay set and bioassays 292 boolean useNewDataCube = info.getAssaysHaveParentAssaysMapping() 293 || info.getChildHasDifferentReporterPositionMapping(); 294 BioAssaySet child = createChildBioAssaySet(dc, info, t, useNewDataCube); 295 child.setIntensityTransform(transform); 296 createChildBioAssays(dc, child, info, useNewDataCube); 297 298 // Create the position/reporter mapping if needed 299 if (useNewDataCube) 300 { 301 if (chainedProgress != null) chainedProgress.setRange(30, 40); 302 createChildPositionReporterMapping(child, info, chainedProgress); 303 if (!info.getChildHasDifferentReporterPositionMapping() && child.getRawDataType().isStoredInDb()) 304 { 305 // If the child and parent has identical position/reporter mapping 306 // we can "calculate" new raw data mappings. 307 if (chainedProgress != null) chainedProgress.setRange(40, 50); 308 createChildRawDataMapping(child, parent, info, chainedProgress); 309 } 310 } 311 312 // Second parser pass: progress=50-100% 313 SecondPassSectionSpotsParser spotParser2 = 314 new SecondPassSectionSpotsParser(dc, info, child, totalLines); 315 BaseFileParser secondParser = new BaseFileParser(); 316 secondParser.copyRedefinedColumnNames(parser); 317 secondParser.setSectionParser("spots", spotParser2); 318 if (chainedProgress != null) 319 { 320 chainedProgress.setRange(50, 100); 321 secondParser.setProgressReporter(new SimpleAbsoluteProgressReporter(chainedProgress, totalLines)); 322 } 323 secondParser.parse(srcFile.getInputStream(), srcFile.getCharacterSet()); 324 return child; 286 InputStream srcIn = srcFile.getInputStream(); 287 try 288 { 289 FlatFileParser ffp = parser.parse(srcIn, srcFile.getCharacterSet()); 290 int totalLines = ffp.getParsedLines(); 291 292 // If the BASEfile didn't have any 'spots' sections we don't 293 // create a child bioassay set 294 if (parser.getSectionCount("spots") == 0) return null; 295 296 // Create the child bioassay set and bioassays 297 boolean useNewDataCube = info.getAssaysHaveParentAssaysMapping() 298 || info.getChildHasDifferentReporterPositionMapping(); 299 BioAssaySet child = createChildBioAssaySet(dc, info, t, useNewDataCube); 300 child.setIntensityTransform(transform); 301 createChildBioAssays(dc, child, info, useNewDataCube); 302 303 // Create the position/reporter mapping if needed 304 if (useNewDataCube) 305 { 306 if (chainedProgress != null) chainedProgress.setRange(30, 40); 307 createChildPositionReporterMapping(child, info, chainedProgress); 308 if (!info.getChildHasDifferentReporterPositionMapping() && child.getRawDataType().isStoredInDb()) 309 { 310 // If the child and parent has identical position/reporter mapping 311 // we can "calculate" new raw data mappings. 312 if (chainedProgress != null) chainedProgress.setRange(40, 50); 313 createChildRawDataMapping(child, parent, info, chainedProgress); 314 } 315 } 316 FileUtil.close(srcIn); 317 318 // Second parser pass: progress=50-100% 319 SecondPassSectionSpotsParser spotParser2 = 320 new SecondPassSectionSpotsParser(dc, info, child, totalLines); 321 BaseFileParser secondParser = new BaseFileParser(); 322 secondParser.copyRedefinedColumnNames(parser); 323 secondParser.setSectionParser("spots", spotParser2); 324 if (chainedProgress != null) 325 { 326 chainedProgress.setRange(50, 100); 327 secondParser.setProgressReporter(new SimpleAbsoluteProgressReporter(chainedProgress, totalLines)); 328 } 329 srcIn = srcFile.getInputStream(); 330 secondParser.parse(srcIn, srcFile.getCharacterSet()); 331 return child; 332 } 333 finally 334 { 335 FileUtil.close(srcIn); 336 } 325 337 } 326 338 -
trunk/src/plugins/core/net/sf/basedb/plugins/Base1PluginExecuter.java
r5689 r6077 1484 1484 if (!File.exists(dc, d, f.getName())) 1485 1485 { 1486 ThreadSignalHandler.checkInterrupted(); 1486 1487 File newFile = File.getNew(dc, d); 1487 1488 newFile.setName(f.getName()); 1488 1489 newFile.setMimeTypeAuto(null, null); 1489 1490 dc.saveItem(newFile); 1491 InputStream tmpIn = null; 1490 1492 try 1491 1493 { 1492 ThreadSignalHandler.checkInterrupted();1493 newFile.upload( new FileInputStream(f), true);1494 tmpIn = new FileInputStream(f); 1495 newFile.upload(tmpIn, true); 1494 1496 } 1495 1497 catch (FileNotFoundException e) 1496 1498 { 1497 1499 continue; 1500 } 1501 finally 1502 { 1503 FileUtil.close(tmpIn); 1498 1504 } 1499 1505 }
Note: See TracChangeset
for help on using the changeset viewer.