笔记.js 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. datasets中的orientation为模型整体绕z轴的旋转角度,初始为0
  2. filter中的:
  3. 数据集校准后不变的值有:
  4. dataset_orientation(永远存储最初始的点位的quaternion,在旋转时也不变,因此它等于没有旋转时的orientation)---- image.datasetOrientation
  5. dataset_floor_orientation(一般和dataset_orientation值一样)
  6. dataset_location 真实的三维坐标
  7. dataset_floor_location
  8. 数据集校准后改变的值有:
  9. orientation----image.orientation(在旋转时实时变,且是根据模型旋转度数和dataset_orientation来算的,所以如果dataset_orientation不对,就会算错。)
  10. location----image.location xy为经纬度
  11. floor_location
  12. ------------------------------------------------
  13. 查看全局:
  14. var view = window.IV.getMainView()
  15. view.currentImage.id
  16. view.ImageService.images
  17. POI: 兴趣点 PoiService PoiEditorDirective PoiEntity
  18. t.prototype.isPreviewMenuVisible ---- canDisplayResultDetails --- PoiService.openedPoi - setOpenedPoi
  19. MeasurementLineMaterial : 测量线材质, 有蓝色标准实线和灰色透明虚线两种状态
  20. 数据集校准 saveAlignment = selectedDatasets
  21. this.underlayScene.children[3] 包含32个子mesh, 是全景图sphere 其材质fragment在下方
  22. overlayScene 里有marker , name: "location" ?
  23. QuaternionFactory VectorFactory
  24. 加载深度图loadDepthImage 获取深度值getDepth(用于更新reticule位置)。深度图用于修改全景图sphere的gl_FragDepthEXT
  25. getCoordinates doPointCloudPicking doDepthImagePicking
  26. t.NORMAL = "normal",
  27. t.DATASET_ALIGNMENT = "datasetAlignment",
  28. t.GEO_REGISTRATION = "GeoRegistration",
  29. t.SITE_MODEL_EDITOR = "SiteModelEditor",
  30. t.NAV_GRAPH_EDITOR = "NavGraphEditor",
  31. t.DOWNLOAD_POINT_CLOUD = "DownloadPointCloud",
  32. t.MEASUREMENTS = "Measurements"
  33. //--关于地图和 地图上的图片-------关键词mapSizeM
  34. updateSubTiles更新地图tile,如果不存在就加载
  35. //图片上传https://testlaser.4dkankan.com/maxkk/t-iksBApb/locat/addDataSet.html
  36. var QuaternionFactory = { // 同 IndoorViewerAPI的 QuaternionFactory.toArray
  37. toArray : function(quaternion){
  38. var rot90 = (new THREE.Quaternion).setFromAxisAngle(new THREE.Vector3(0,0,1), THREE.Math.degToRad(-90)) //add 转入时旋转90度
  39. , rot90Invert = rot90.clone().inverse()//add 转出时旋回90度
  40. var t1 = quaternion.clone().multiply(rot90Invert);
  41. var e = t1.toArray();
  42. return [e[3], e[0], e[1], e[2]]
  43. }
  44. }
  45. //获取旋转:
  46. var getQuaternion = function(angle){//angle:0-360 角度
  47. var quaternion = new THREE.Quaternion().setFromEuler(new THREE.Euler(0,0,THREE.Math.degToRad(-angle)));
  48. return QuaternionFactory.toArray(quaternion)
  49. }
  50. //获取缩放
  51. var getSize = function(imgWidth, scale){//imgWidth:图片宽度, scale缩放值(x==y)
  52. var level = imgWidth / 1024; //以1024为基准
  53. return 95.54628610610962 * level * scale; // 95.54628610610962 = 38.21851444244385 * (2+0.5), 其中38.21851444244385 = mapSizeM / Math.pow(2,maxDepth) = 40075017 / Math.pow(2,20) 可能表示地图在缩放zoom为20时的单块宽度
  54. //0.5是试出来的,因为图片层的bias=0.5, 暂不知道其用处,所以试用了下……
  55. //另:可能不是*2.5, 也许是*256/100 ? 不知道如何精确测试下
  56. //有出现过一次错误是2048时的图但是大了一倍,发现是传图的那个网页在缩放值为0.1(即图为1:1显示,函数canvasFunction(extent, scale )时只有1024大小,后来刷新重新操作就是2048然后就正确。所以可能是这个网页出错。
  57. }
  58. //位置直接使用中心点的经纬度
  59. //-------------------------------------------------
  60. =======shader=======
  61. 全景图 fragment
  62. uniform sampler2D map;
  63. uniform float opacity;
  64. varying vec2 vUv;
  65. #ifdef USE_ALPHAMAP
  66. uniform sampler2D alphaMap;
  67. #endif
  68. #ifdef GL_EXT_frag_depth
  69. uniform sampler2D depthMap;
  70. uniform mat4 inverseProjectionMatrix;
  71. uniform mat4 projectionMatrix;
  72. uniform vec4 viewport;
  73. #endif
  74. void main()
  75. {
  76. vec4 color = texture2D(map, vUv);
  77. float alpha = opacity;
  78. #ifdef USE_ALPHAMAP
  79. alpha *= texture2D(alphaMap, vUv).g;
  80. #endif
  81. gl_FragColor = vec4(color.r, color.g, color.b, alpha);
  82. #ifdef GL_EXT_frag_depth
  83. /*
  84. * Useful resources:
  85. *
  86. * https://www.khronos.org/opengl/wiki/Vertex_Post-Processing#Viewport_transform
  87. * Clipping, perspective divide viewport transform
  88. *
  89. * https://www.khronos.org/opengl/wiki/Compute_eye_space_from_window_space
  90. * From window (viewport) space back to eye space in GLSL
  91. *
  92. * https://www.khronos.org/opengl/wiki/Vertex_Transformation
  93. * Summary of transformations object -> world -> eye (camera, view) -> clip -> NDC -> window
  94. *
  95. * http://slideplayer.com/slide/6837153/#
  96. * Overview presentation
  97. *
  98. * http://www.shaderific.com/glsl-variables/
  99. * GLSL built-in variables
  100. */
  101. vec4 depth = texture2D(depthMap, vUv);
  102. //float distance = depth.r + 256. * (depth.g + 256. * depth.b);
  103. //distance *= 255. * .001; // distance is now in meters
  104. //更改
  105. float distance = (depth.g + depth.r / 256.) * 255.; //为什么要乘以255
  106. // return r[1] + r[0] / 256
  107. vec4 ndcPos;
  108. ndcPos.xy = ((2.0 * gl_FragCoord.xy) - (2.0 * viewport.xy)) / (viewport.zw) - 1.;
  109. ndcPos.z = (2.0 * gl_FragCoord.z - gl_DepthRange.near - gl_DepthRange.far) /
  110. (gl_DepthRange.far - gl_DepthRange.near);
  111. ndcPos.w = 1.0;
  112. vec4 clipPos = ndcPos / gl_FragCoord.w;
  113. vec4 eyePos = inverseProjectionMatrix * clipPos;
  114. distance += .1; // add a safety margin
  115. vec4 eyePos2 = vec4(normalize(eyePos.xyz) * distance, 1.);
  116. vec4 clipPos2 = projectionMatrix * eyePos2;
  117. vec4 ndcPos2 = clipPos2 * 1. / clipPos2.w;
  118. gl_FragDepthEXT = 0.5 * ((gl_DepthRange.far - gl_DepthRange.near) * ndcPos2.z
  119. + gl_DepthRange.near + gl_DepthRange.far);
  120. #endif
  121. }
  122. --------
  123. MeasurementLineMaterial vertex
  124. "attribute vec3 previous;
  125. attribute vec3 next;
  126. attribute float side;
  127. attribute float width;
  128. attribute float counters;
  129. uniform vec2 resolution;
  130. uniform float lineWidth;
  131. uniform vec3 color;
  132. uniform float opacity;
  133. uniform float near;
  134. uniform float far;
  135. uniform float sizeAttenuation;
  136. uniform vec3 dashColor;
  137. uniform float dashOpacity;
  138. varying vec2 vUV;
  139. varying vec4 vColor;
  140. varying vec4 vDashColor;
  141. varying float vCounters;
  142. vec2 fix(vec4 i, float aspect)
  143. {
  144. vec2 res = i.xy / i.w;
  145. res.x *= aspect;
  146. vCounters = counters;
  147. return res;
  148. }
  149. // This vertex shader is a copy of the one supplied by MeshLineMaterial.
  150. // It supports drawing dashed lines.
  151. void main()
  152. {
  153. float aspect = resolution.x / resolution.y;
  154. float pixelWidthRatio = 1.0 / (resolution.x * projectionMatrix[0][0]);
  155. vColor = vec4(color, opacity);
  156. vDashColor = vec4(dashColor, dashOpacity);
  157. vUV = uv;
  158. mat4 m = projectionMatrix * modelViewMatrix;
  159. vec4 finalPosition = m * vec4(position, 1.0);
  160. vec4 prevPos = m * vec4(previous, 1.0);
  161. vec4 nextPos = m * vec4(next, 1.0);
  162. vec2 currentP = fix(finalPosition, aspect);
  163. vec2 prevP = fix(prevPos, aspect);
  164. vec2 nextP = fix(nextPos, aspect);
  165. float pixelWidth = finalPosition.w * pixelWidthRatio;
  166. float w = 1.8 * pixelWidth * lineWidth * width;
  167. if (sizeAttenuation == 1.0)
  168. {
  169. w = 1.8 * lineWidth * width;
  170. }
  171. vec2 dir;
  172. if (nextP == currentP)
  173. {
  174. dir = normalize(currentP - prevP);
  175. }
  176. else if (prevP == currentP)
  177. {
  178. dir = normalize(nextP - currentP);
  179. }
  180. else
  181. {
  182. vec2 dir1 = normalize(currentP - prevP);
  183. vec2 dir2 = normalize(nextP - currentP);
  184. dir = normalize(dir1 + dir2);
  185. vec2 perp = vec2(-dir1.y, dir1.x);
  186. vec2 miter = vec2(-dir.y, dir.x);
  187. }
  188. vec2 normal = vec2(-dir.y, dir.x);
  189. normal.x /= aspect;
  190. normal *= .5 * w;
  191. vec4 offset = vec4(normal * side, 0.0, 1.0);
  192. finalPosition.xy += offset.xy;
  193. gl_Position = finalPosition;
  194. }
  195. --------
  196. MeasurementLineMaterial fragment
  197. uniform sampler2D map;
  198. uniform sampler2D alphaMap;
  199. uniform float useMap;
  200. uniform float useAlphaMap;
  201. uniform float useDash;
  202. uniform float dashArray;
  203. uniform float dashOffset;
  204. uniform float dashRatio;
  205. uniform float visibility;
  206. uniform float alphaTest;
  207. uniform vec2 repeat;
  208. uniform sampler2D depthTexture;
  209. uniform sampler2D rgbaTexture;
  210. uniform float nearPlane;
  211. uniform float farPlane;
  212. uniform float occlusionDistance;
  213. uniform float clipDistance;
  214. uniform vec2 viewportSize;
  215. uniform vec2 viewportOffset;
  216. varying vec2 vUV;
  217. varying vec4 vColor;
  218. varying vec4 vDashColor;
  219. varying float vCounters;
  220. // Converts the exponential depth value from the depth buffer to a linear value
  221. // See https://learnopengl.com/Advanced-OpenGL/Depth-testing for more information about this formula
  222. float convertToLinear(float zValue)
  223. {
  224. float z = zValue * 2.0 - 1.0;
  225. return (2.0 * nearPlane * farPlane) / (farPlane + nearPlane - z * (farPlane - nearPlane));
  226. }
  227. void main()
  228. {
  229. vec4 c = vDashColor;
  230. // <-- The following section of the shader is copied from MeshLineMaterial
  231. // Sample the fragment from a texture if such is supplied
  232. if (useMap == 1.0)
  233. {
  234. c *= texture2D(map, vUV * repeat);
  235. }
  236. // Sample the fragment's alpha value from an alpha texture if such is supplied
  237. if (useAlphaMap == 1.0)
  238. {
  239. c.a *= texture2D(alphaMap, vUV * repeat).a;
  240. }
  241. // Discard the fragment if below the alpha threshold
  242. if (c.a < alphaTest)
  243. {
  244. discard;
  245. }
  246. // If the line is dashed, set the alpha value of the fragment according to the line segment it belongs to
  247. if (useDash == 1.0)
  248. {
  249. c.a *= ceil(mod(vCounters + dashOffset, dashArray) - (dashArray * dashRatio));
  250. }
  251. // <-- end of copied code
  252. #ifdef GL_EXT_frag_depth
  253. // mixFactor and clipFactor define the color mixing proportion between the states of
  254. // full visibility and occluded visibility
  255. // and
  256. // full visibility and total invisibility
  257. float mixFactor = 0.0;
  258. float clipFactor = 0.0;
  259. // The linear depth value of the current fragment
  260. float fragDepth = convertToLinear(gl_FragCoord.z);
  261. // The coordinates of the current fragment in the depth texture
  262. vec2 depthTxtCoords = vec2(gl_FragCoord.x - viewportOffset.x, gl_FragCoord.y) / viewportSize;
  263. // The linear depth value of the pixel occupied by this fragment in the depth buffer
  264. float textureDepth = convertToLinear(texture2D(depthTexture, depthTxtCoords).r);
  265. // The difference between the two depths
  266. float delta = textureDepth - fragDepth;
  267. if (delta < 0.0)
  268. {
  269. // occlusionDistance and clipDistance define the width of the respective zones and
  270. // mixFactor and clipFactor express the interpolation between the two colors depending on the position
  271. // of the current fragment withing those zones.
  272. mixFactor = clamp(delta / occlusionDistance, 0.0, 1.0);
  273. clipFactor = clamp(delta / clipDistance, 0.0, 1.0);
  274. }
  275. // If the fragment is totally transparent, don't bother drawing it
  276. if (clipFactor == 1.0)
  277. {
  278. discard;
  279. }
  280. #else
  281. float mixFactor = 0.0;
  282. float clipFactor = 0.0;
  283. #endif
  284. // Calculate the color of the dashed version of the line
  285. vec4 backColor = vec4(c.rgb, c.a * step(vCounters, visibility));
  286. // Mix between the solid and the dahsed versions of the line according to the mixFactor
  287. gl_FragColor = mix(vColor, backColor, mixFactor);
  288. // Set the alpha value of the fragment according to the clipFactor
  289. // Note that clipFactor was previously clamped [0.0;1.0]
  290. gl_FragColor.a *= (1.0 - clipFactor);
  291. }