2018年11月19日月曜日

CLI - Composite Leading Indicator - OECD


REFER THIS ENTRY , INSTEAD.

# download csv from oecd(https://data.oecd.org/leadind/composite-leading-indicator-cli.htm) to "~/Downloads"
# use file name "CLI3.csv".
# this file contains multiple regions data. you have to specify its name.
# extract USA only entries
# execute commands below at "~/Download".
#
sed -n '/USA/p' CLI3.csv |awk -F, '{print $6"-01,"$7}'  |sed 's/\"//g' |awk 'BEGIN{print "DATE,DATA"}{print $0}' > usa.csv
# extract OECD entries and exclude OECDE
sed -n '/OECD[^E]/p' CLI3.csv |awk -F, '{print $6"-01,"$7}'  |sed 's/\"//g' |awk 'BEGIN{print "DATE,DATA"}{print $0}' > oecd.csv
#
#sample #1 for usa
#
cli_xts <- merge(as.xts(read.zoo(read.csv("~/Downloads/oecd.csv"))),as.xts(read.zoo(read.csv("~/Downloads/usa.csv"))),suffixes = c("oecd","usa"))

plot.default((cli_xts$usa["2014-07::2018-09"]   / as.vector(cli_xts$usa["2014-01::2018-03"])-1)*100,cli_xts$usa["2014-07::2018-09"])
tmp <- par('usr')
# plot.default((cli_xts$usa["2012-07::2018-09"] / as.vector(cli_xts$usa["2012-01::2018-02"])-1)*100,cli_xts$usa["2012-07::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
plot.default((cli_xts$usa["2014-07::2018-09"] / as.vector(cli_xts$usa["2014-01::2018-03"])-1)*100,cli_xts$usa["2014-07::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
par(new=T)
plot.default((cli_xts$usa["2018-01::2018-09"] / as.vector(cli_xts$usa["2017-07::2018-03"])-1)*100,cli_xts$usa["2018-01::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=2,lwd=2)
par(new=T)
plot.default((cli_xts$usa["2017-01::2017-12"] / as.vector(cli_xts$usa["2016-07::2017-06"])-1)*100,cli_xts$usa["2017-01::2017-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=3)
par(new=T)
plot.default((cli_xts$usa["2016-01::2016-12"] / as.vector(cli_xts$usa["2015-07::2016-06"])-1)*100,cli_xts$usa["2016-01::2016-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=4)

#sample #2 for oecd all
plot.default((cli_xts$oecd["2014-07::2018-09"]   / as.vector(cli_xts$oecd["2014-01::2018-03"])-1)*100,cli_xts$oecd["2014-07::2018-09"])
tmp <- par('usr')
# plot.default((cli_xts$oecd["2012-07::2018-09"] / as.vector(cli_xts$oecd["2012-01::2018-02"])-1)*100,cli_xts$oecd["2012-07::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
plot.default((cli_xts$oecd["2014-07::2018-09"] / as.vector(cli_xts$oecd["2014-01::2018-03"])-1)*100,cli_xts$oecd["2014-07::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
par(new=T)
plot.default((cli_xts$oecd["2018-01::2018-09"] / as.vector(cli_xts$oecd["2017-07::2018-03"])-1)*100,cli_xts$oecd["2018-01::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=2,lwd=2)
par(new=T)
plot.default((cli_xts$oecd["2017-01::2017-12"] / as.vector(cli_xts$oecd["2016-07::2017-06"])-1)*100,cli_xts$oecd["2017-01::2017-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=3)
par(new=T)
plot.default((cli_xts$oecd["2016-01::2016-12"] / as.vector(cli_xts$oecd["2015-07::2016-06"])-1)*100,cli_xts$oecd["2016-01::2016-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=4)
#
# sample #3 for oecd with the extended period.
#
#
#
plot.default((cli_xts$oecd["2014-07::2018-09"]   / as.vector(cli_xts$oecd["2014-01::2018-03"])-1)*100,cli_xts$oecd["2014-07::2018-09"])
tmp <- par('usr')
# plot.default((cli_xts$oecd["2012-07::2018-09"] / as.vector(cli_xts$oecd["2012-01::2018-02"])-1)*100,cli_xts$oecd["2012-07::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
plot.default((cli_xts$oecd["2014-07::2018-09"] / as.vector(cli_xts$oecd["2014-01::2018-03"])-1)*100,cli_xts$oecd["2014-07::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),type='b')
par(new=T)
plot.default((cli_xts$oecd["2018-01::2018-09"] / as.vector(cli_xts$oecd["2017-07::2018-03"])-1)*100,cli_xts$oecd["2018-01::2018-09"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=2,lwd=2)
par(new=T)
plot.default((cli_xts$oecd["2017-01::2017-12"] / as.vector(cli_xts$oecd["2016-07::2017-06"])-1)*100,cli_xts$oecd["2017-01::2017-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=3)
par(new=T)
plot.default((cli_xts$oecd["2016-01::2016-12"] / as.vector(cli_xts$oecd["2015-07::2016-06"])-1)*100,cli_xts$oecd["2016-01::2016-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=4)
par(new=T)
plot.default((cli_xts$oecd["2015-01::2015-12"] / as.vector(cli_xts$oecd["2014-07::2015-06"])-1)*100,cli_xts$oecd["2015-01::2015-12"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=
5)
abli
ne(h=100)

abline(v=0)

The below is OECD.




2018年11月1日木曜日

Date Format Conversion mondate() index() format()





October 2018 was quite wild. Let us see the historical data on the other occasion. There were only 9 cases in 11 years, when the lowest price of the month is more than 11% lower than the open.

> to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1][to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1] < 0.89]
         GSPC.Low
 1 2008 0.8651744
 9 2008 0.8591352
10 2008 0.7213723
11 2008 0.7649871
 5 2010 0.8756500
 8 2011 0.8521960
 8 2015 0.8871556
 1 2016 0.8891620
10 2018 0.8897068

Use "as.mondate" to calculate one month after the examples.

> as.mondate(index(to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1][to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1] < 0.89])[-8])+1
mondate: timeunits="months"
[1] 2008-02-01 2008-10-02 2008-11-01 2008-12-02 2010-06-01 2011-09-01 2015-09-01 2018-11-01

use "format()" to normalize. "%Y-%m" should be the option.

> format(as.mondate(index(to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1][to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1] < 0.89])[-9])+1,"%y-%m")
[1] "08-02" "08-10" "08-11" "08-12" "10-06" "11-09" "15-09" "16-02"


> format(as.mondate(index(to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1][to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1] < 0.89])[-9])+1,"%Y-%m")
[1] "2008-02" "2008-10" "2008-11" "2008-12" "2010-06" "2011-09" "2015-09" "2016-02"

But index() returns yearmon calss and the direct coversion from yearmon to mondate triggers the warning message. If you don't like it, insert "as.Date()" between them.

警告メッセージ:
Attempting to convert class 'yearmon' to 'mondate' via 'as.Date' then 'as.numeric'. Check results!

> format(as.mondate(as.Date(index(to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1][to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1] < 0.89]) ))+1,"%Y-%m")[-9]
[1] "2008-02" "2008-10" "2008-11" "2008-12" "2010-06" "2011-09" "2015-09" "2016-02"

The result was disappointing. Hope, this month is exceptional.

> monthlyReturn(GSPC)[format(as.mondate(index(to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1][to.monthly(GSPC)[,3]/to.monthly(GSPC)[,1] < 0.89])[-9])+1,"%Y-%m")]
           monthly.returns
2008-02-29    -0.034761193
2008-10-31    -0.169424524
2008-11-28    -0.074849043
2008-12-31     0.007821577
2010-06-30    -0.053882442
2011-09-30    -0.071761988
2015-09-30    -0.026442832
2016-02-29    -0.004128360

you can do the similar calculation for monthlyReturn().

2018年10月27日土曜日

merge all data eps spline gpuc


merge all data

result.gpuc <- lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k])
result.eps <- lm(apply.quarterly(SP5[,4][k2k],mean) ~ eps_year_xts[k2k]+apply.quarterly(PA[k2k],mean)+apply.quarterly(CS[k2k],mean)+apply.quarterly(UC[k2k],mean))
SP5.result <- merge(residuals(result.gpuc),predict(result.gpuc),residuals(result.eps),predict(result.eps))

GSPC.predict <- merge(to.monthly(GSPC)[substr(k2k,11,23)],last(spline(seq(1,length(SP5.result[,1]),1),as.vector(SP5.result[,2]),n=length(SP5.result[,1])*3+1)$y,n=length(to.monthly(GSPC)[,1][substr(k2k,11,23)])),last(spline(seq(1,length(SP5.result[,1]),1),as.vector(SP5.result[,4]),n=length(SP5.result[,1])*3+1)$y,n=length(to.monthly(GSPC)[,1][substr(k2k,11,23)])),suffixes=c('','spline','eps'))


plot(merge(GSPC.predict[,4],GSPC.predict[,7],GSPC.predict[,8],GSPC.predict[,4]-GSPC.predict[,7],GSPC.predict[,4]-GSPC.predict[,8]),main="GSPC.predict[,4] vs. GSPC.predict[,7]",grid.ticks.on='months')
tmp.legend <- "Black: actual \nRed: spline\nGreen: eps"
addLegend(legend.loc = "topleft", legend.names = tmp.legend,col=3)
tmp.addTA <- as.xts(rep(2800,length(index(GSPC.predict))),index(GSPC.predict))

addSeries(tmp.addTA,on=1,col=6,lwd=1)

2018年10月22日月曜日

SP5,eps,PAYEMS,UC,CS


> summary(lm(apply.quarterly(SP5[,4][k2k],mean) ~ eps_year_xts[k2k]+apply.quarterly(PA[k2k],mean)+apply.quarterly(CS[k2k],mean)+apply.quarterly(UC[k2k],mean)))

Call:
lm(formula = apply.quarterly(SP5[, 4][k2k], mean) ~ eps_year_xts[k2k] +
    apply.quarterly(PA[k2k], mean) + apply.quarterly(CS[k2k],
    mean) + apply.quarterly(UC[k2k], mean))

Residuals:
    Min      1Q  Median      3Q     Max
-178.63  -65.62   14.98   57.55  320.86

Coefficients:
                                 Estimate Std. Error t value Pr(>|t|) 
(Intercept)                    -8.791e+03  4.347e+02 -20.222  < 2e-16 ***
eps_year_xts[k2k]               7.243e+00  6.983e-01  10.373 1.01e-15 ***
apply.quarterly(PA[k2k], mean)  7.740e-02  3.649e-03  21.210  < 2e-16 ***
apply.quarterly(CS[k2k], mean) -4.994e+00  5.692e-01  -8.774 7.71e-13 ***
apply.quarterly(UC[k2k], mean)  1.304e-01  5.309e-02   2.456   0.0166 *
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 97.83 on 69 degrees of freedom
Multiple R-squared:   0.96, Adjusted R-squared:  0.9577
F-statistic: 413.8 on 4 and 69 DF,  p-value: < 2.2e-16

tmp <- predict(lm(apply.quarterly(SP5[,4][k2k],mean) ~ eps_year_xts[k2k]+apply.quarterly(PA[k2k],mean)+apply.quarterly(CS[k2k],mean)+apply.quarterly(UC[k2k],mean)))
GSPC.predict <- merge(GSPC.predict[,-8],last(spline(seq(1,74,1),tmp,n=220)$y,n=138),suffixes = c("","eps"))

2018年10月18日木曜日

Composite Leading Indicator - OECD

# download csv from oecd(https://data.oecd.org/leadind/composite-leading-indicator-cli.htm)
# store it in CLI3.csv
# this file contains multiple regions data. you have to specify the name of the region.
# extract USA only entries
# sed -n '/USA/p' CLI3.csv |awk -F, '{print $6"-01,"$7}'  |sed 's/\"//g' |awk 'BEGIN{print "DATE,DATA"}{print $0}' > usa.csv
# extract OECD entries and exclude OECDE
# sed -n '/OECD[^E]/p' CLI3.csv |awk -F, '{print $6"-01,"$7}'  |sed 's/\"//g' |awk 'BEGIN{print "DATE,DATA"}{print $0}' > oecd.csv

cli_xts <- merge(as.xts(read.zoo(read.csv("~/Downloads/oecd.csv"))),as.xts(read.zoo(read.csv("~/Downloads/usa.csv"))),suffixes = c("oecd","usa"))

plot.default((cli_xts$usa["2012-07::2018-08"]   / as.vector(cli_xts$usa["2012-01::2018-02"])-1)*100,cli_xts$usa["2012-07::2018-08"])
tmp <- par('usr')
plot.default((cli_xts$usa["2012-07::2018-08"] / as.vector(cli_xts$usa["2012-01::2018-02"])-1)*100,cli_xts$usa["2012-07::2018-08"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
plot.default((cli_xts$usa["2012-07::2018-08"] / as.vector(cli_xts$usa["2012-01::2018-02"])-1)*100,cli_xts$usa["2012-07::2018-08"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]))
par(new=T)
plot.default((cli_xts$usa["2017-09::2018-08"] / as.vector(cli_xts$usa["2017-03::2018-02"])-1)*100,cli_xts$usa["2017-09::2018-08"] ,xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=2)
par(new=T)
plot.default((cli_xts$usa["2016-09::2017-08"] / as.vector(cli_xts$usa["2016-03::2017-02"])-1)*100,cli_xts$usa["2016-09::2017-08"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=3)
par(new=T)
plot.default((cli_xts$usa["2015-09::2016-08"] / as.vector(cli_xts$usa["2015-03::2016-02"])-1)*100,cli_xts$usa["2015-09::2016-08"], xlim=c( tmp[1],tmp[2]), ylim=c(tmp[3], tmp[4]),col=4)

2018年10月8日月曜日

macos mediawiki


ファイルの場所


ドキュメントルートは。

  /Library/WebServer/Documents/

wikiのルートは1.29.0ならこんな感じ。update.phpや LocalSettings.php もここで見つかる。

   /Library/WebServer/Documents/mediawiki-1.29.0

httpd.confは

  /etc/apache2/

OSのアップグレード時に先祖がえりしていることがあるので、忘れずに

   LoadModule php7_module libexec/apache2/libphp7.so

をコメントアウトすること。


アップグレードの注意

mediawikiのアップグレードをしたら、

 $ sudo php update.php

を忘れないこと。update.phpはmediawiki配布パッケージの直下にあるはず。実行はこんな感じで。

   sudo apachectl stop
   sudo php update.php
   sudo apachectl start

エラーが起きたら


LocalSettings.php の末尾に以下の3行を追加すること。エラー・メッセージが詳細になります。

   $wgShowExceptionDetails = true;
   $wgShowDBErrorBacktrace = true;
   $wgShowSQLErrors = true;



2018年10月6日土曜日

Hiroyuki Sawano - Best of "Epic BGM Music" 澤野弘之【Best 3 Hour Mix】HQ



https://www.youtube.com/watch?v=qAxg37wrVCE

( 0:00 ) 蔑、guy
( 2:49 ) This Is A Fight To Change The World ft. Mika Kobayashi
( 6:17 ) CR€SC∃NT ft. Naoshi Jimbow & Mika Kobayashi
( 8:55 ) Melancholia ft. David Whitaker & Aimee Blackschleger
( 13:01 ) No Naming Sense Type Five-Star Goku Uniform
( 18:11 ) raTEoREkiSImeAra
( 22:06 ) Important Event Highlight Type Twelve-Star Goku Uniform
( 23:59 ) KiryuG@kiLL
( 26:17 ) goriLLAjaL
( 28:15 ) KEKKAI ft. Mpi & Mika Kobayashi
( 33:56 ) RE:ARR.X
( 38:46 ) CODENAMEZ
( 43:56 ) NO.EX01
( 48:08 ) MKAlieZ ft. Mika Kobayashi <a0v>
( 51:45 ) α≠a ft. Mika Kobayashi
( 55:56 ) G-LOW-S→F.S.K.O
( 1:00:50 ) 1st-Mov. : E
( 1:05:47 ) z37b20a13t01t08le
( 1:08:53 ) The Second Movement: A-maimon
( 1:12:27 ) Genesi§
( 1:15:40 ) AcE & ArMs
( 1:20:54 ) [104EYES-29CA2]suite-2楽章
( 1:22:47 ) ətˈæk 0N tάɪtn + ətˈæk 0N tάɪtn <3Tv> ft. Mika Kobayashi
( 1:27:01 ) Shingeki st-hrn-egt20130629 Kyojin + TheWeightOfLives
( 1:30:50 ) EREN The Coordinate + ERENthe標 <MOVIEver.>
( 1:37:09 ) EMAymniam
( 1:40:34 ) E.M.A
( 1:44:20 ) YAMANAIAME ft. Mica Caldito / Mpi / Mika Kobayashi
( 1:48:44 ) 4GL4yu8RE:E + NeLLnaki9 + 高度8b6n + RE:3343 + 音:9RE:eita-zu
( 2:00:36 ) FANTASIA 1st Mov:[Open a title page] Reprogramming
( 2:05:41 ) 横浜-BIGMAN
( 2:07:22 ) MURDER CASE
( 2:10:43 ) BaNG!!
( 2:14:20 ) BOXX!!
( 2:17:42 ) LINK01BPM130KINPAKU
( 2:20:40 ) Dragon Demon <Scroll of the Unification of the Land>
( 2:26:27 ) The First Movement: Mephistopheles
( 2:27:38 ) BLUe-eXOSUiTe-toKYOto-One + BLUe-eXOSUiTe-toKYOto-Two
( 2:37:38 ) ymniam-orch ft. Mika Kobayshi
( 2:40:10 ) ThreeFiveNineFourε ft. Mika Kobayashi
( 2:44:12 ) The Brave ft. Yosh
( 2:47:46 ) Seek Your Fate
( 2:51:03 ) OH92&HrBrM
( 2:53:32 ) MOBILE SUIT <W-REC MIX>
( 2:57:38 ) JAILBREAK

read file and normalize data to adjust TZ.


THIS ENTRY IS OBSOLETE. PLEASE GO TO THIS PAGE

# read data as csv format and convert to xts to plot
#
Sys.setenv(TZ=Sys.timezone())
bp <- read.csv("~/Downloads/bp - シート1.csv")
system("rm \"$HOME/Downloads/bp - シート1.csv\"")
bp.xts <- xts(bp[,c(-1,-2,-6)],as.POSIXct(paste(bp$Date,bp$Time,sep=" "),tz=Sys.timezone()),tz=Sys.timezone())
# weekly average
apply.weekly(bp.xts[bp.xts$High > 95],mean)
#
#
# prepare data according to system timezone. "Asia/Tokyo" in most cases.
#
bp.day <- merge(as.xts(as.vector(bp.xts[,1]),as.Date(index(bp.xts),tz=tzone(bp.xts))),as.vector(bp.xts[,2]))
colnames(bp.day)[1] <- "high"
colnames(bp.day)[2] <- "low"
#
# prepare timezone 2 hours behind "Asia/Tokyo".
#
bp.bangkok <- merge(as.xts(as.vector(bp.xts[,1]),as.Date(index(bp.xts),tz="Asia/Bangkok")),as.vector(bp.xts[,2]))
colnames(bp.bangkok)[1] <- "high"
colnames(bp.bangkok)[2] <- "low"
apply.weekly(bp.bangkok,mean)

2018年9月20日木曜日

addTA,approx


Draw the line between given 2 positions on the candle chart.

my_draw_line_on_candle <- function(par_xts,start_val,start_date,end_val,end_date){
  len <- length(seq(as.Date(start_date),as.Date(end_date),by='weeks'))
  plot_data <- approx(seq(1,2,1),c(start_val,end_val),n=len,method='linear')$y
  tmp_xts <- as.xts(plot_data,seq(as.Date(start_date),as.Date(end_date),by='weeks'))
  addTA(tmp_xts,on=1,legend="slope")
}


> my_draw_line_on_candle(weekly_pf,2273486,"2018-01-26",2189051,"2018-09-19")
> my_draw_line_on_candle(weekly_pf,as.vector(first(weekly_pf)[,1]),index(first(weekly_pf)),2188051,"2018-09-19")
> my_draw_line_on_candle(weekly_pf,1160008,"2017-01-06",2163476,"2018-09-18")



> last(weekly_pf)[,4]/as.vector(first(weekly_pf)[,1])
              close
2018-09-19 3.450367
> length(index(weekly_pf))
[1] 247
> 3.450367**(1/247)
[1] 1.005027
> seq(1,247,1)
  [1]   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18  19  20  21  22  23  24  25  26  27
<skip>
[244] 244 245 246 247
> 1.005027**seq(1,247,1)
  [1] 1.005027 1.010079 1.015157 1.020260 1.025389 1.030544 1.035724 1.040931 1.046163 1.051423 1.056708 1.062020
 <skip>
[241] 3.348365 3.365197 3.382114 3.399116 3.416203 3.433376 3.450636
> tmp <- as.xts(634440.2*1.005027**seq(1,247,1),index(weekly_pf))
> addTA(tmp,on=1,legend="powered")

my_draw_line_on_candle(weekly_pf,as.vector(first(weekly_pf)[,1]),index(first(weekly_pf)),last(weekly_pf)[,4],index(last(weekly_pf)[,4]))
tmp <- as.xts((as.vector((last(weekly_pf)[,4]/as.vector(first(weekly_pf)[,1]))**(1/length(index(weekly_pf))))**seq(1,length(index(weekly_pf)),1))*as.vector(first(weekly_pf)[,1]),index(weekly_pf))
addTA(tmp,on=1,legend="powered")
tmp <- as.xts((as.vector((1800000/as.vector(first(weekly_pf)[,1]))**(1/length(index(weekly_pf))))**seq(1,length(index(weekly_pf)),1))*as.vector(first(weekly_pf)[,1]),index(weekly_pf))

addTA(tmp,on=1,legend="")


2018年9月15日土曜日

draw the line between given points on candle chart addTA() addLines()



> candleChart(to.weekly(fas_shares * FAS[,4] +spxl_shares * SPXL[,4]+as.xts(c2+c3+c4,index(fas_shares))),theme='white')
> weekly_pf[106]
               open     high      low    close
2016-01-08 796732.6 804098.1 684234.9 684234.9
> length(index(weekly_pf))-106
[1] 140
>  tmp <- as.xts(append(rep(684234.9,106),approx(seq(1,2,1),c(684234.9,2170000),n=140,method="linear")$y),append(rep(as.Date("2016-01-08"),106),last(index(weekly_pf),n=140)))
> addTA(tmp,on=1,legend="slope")
# length(index(weekly_pf)) - 158 = 88
> tmp <- as.xts(append(rep(1189422,158),approx(seq(1,2,1),c(1189422,2170000),n=88,method="linear")$y),append(rep(as.Date("2017-01-06"),158),last(index(weekly_pf),n=88)))
> addTA(tmp,on=1,legend="slope")
> weekly_pf[158]
              open    high     low   close
2017-01-06 1160008 1189422 1160008 1189422
> weekly_pf[210]
              open    high     low   close
2018-01-05 1906271 1991231 1906271 1991231
> plot(addLines(v=c(106,158,210)))
> addTA(as.xts(approx(seq(1,2,1),c(600000,2170000),n=246,method="linear")$y,index(weekly_pf)),on=1,name='2100000')
> addTA(tmp,on=1,legend="slope")


2018年9月14日金曜日

bp graph with moving average. plot() suffixes,filter()





> length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)))
[1] 231
> plot(merge(as.xts(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)),last(apply.daily(bp.bangkok,mean),n=231)),suffixes=c("mh","ml","high","low"),main="daily w/ 7 day moving average",grid.ticks.on='weeks')

> events
           [,1]       
2018-06-20 "natrix"   
2018-07-14 "weight"   
2018-08-09 "abort natrix"
> addEventLines(events, srt=90, pos=2,col=10)


 plot(merge(as.xts(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)),last(apply.daily(bp.bangkok,mean),n=length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7))))),suffixes=c("mh","ml","high","low"),main="daily w/ 7 day moving average",grid.ticks.on='weeks')
addEventLines(events, srt=90, pos=2,col=10)

len <- length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)))
plot(merge(as.xts(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),
as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)),last(apply.daily(bp.bangkok,mean),n=len)),
suffixes=c("mh","ml","high","low"),main="daily w/ 7 day moving average",grid.ticks.on='weeks')
addSeries(as.xts(rep(mean(bp.xts[,1][bp.xts$High > 95]),len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),on=1,col=6,lwd=1)
addSeries(as.xts(rep(mean(bp.xts[,2][bp.xts$High > 95]),len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),on=1,col=6,lwd=1)
addSeries(as.xts(rep(last(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7))),len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),on=1,col=6,lwd=1)
addSeries(as.xts(rep(last(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7))),len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),on=1,col=6,lwd=1)
# events <- xts(c("natrix","weight","abort natrix"),as.Date(c("2018-06-20", "2018-07-14","2018-08-09")))
# addEventLines(events, srt=90, pos=2,col=10)
events <- xts(c("natrix","weight","abort natrix","55kg"),as.Date(c("2018-06-20", "2018-07-14","2018-08-09","2018-10-13")))

addEventLines(events, srt=90, pos=2,col=10)




2018年9月13日木曜日

Prepare Data --- getsymbols(), auto.arima(), as.yearmon(), as.yearqtr()




> len_mon <- 48  # # of months to predict
> r <- 1.04 # pesumed GDP growth rate
> i <- seq(2,len_mon/3,1)  # seq of quarters to predict
> d <- as.Date(as.yearqtr(seq(Sys.Date(),as.Date("2100-12-31"),by="quarters")[i])) # pick up the first day of each quarters.
> as.xts(forecast(auto.arima(CS["2012::"]),h=len_mon)$mean[1:len_mon],as.Date(as.yearmon(mondate(index(last(CS)))+seq(1,len_mon,1)),frac=0))[as.Date(as.yearqtr(mondate(index(last(CS)))+seq(3,len_mon,3)),frac=0)]
               [,1]
2018-07-01 225.2251
2018-10-01 227.3579
<skip>
2022-04-01 266.0765
> last(CS)
           SPCS10RSA
2018-06-01  224.8054
> as.xts(forecast(auto.arima(PA),h=len_mon)$mean[1:len_mon],as.Date(as.yearmon(mondate(index(last(PA)))+seq(1,len_mon,1)),frac=0))[as.Date(as.yearqtr(mondate(index(last(PA)))+seq(3,len_mon,3)),frac=0)]
               [,1]
2018-10-01 149684.2
2019-01-01 150292.1
<skip>
2022-07-01 158802.2
>  last(PA)
           PAYEMS
2018-08-01 149279
> last(UC)
           UNDCONTSA
2018-07-01      1122
>
as.xts(forecast(auto.arima(UC),h=len_mon)$mean[1:len_mon],as.Date(as.yearmon(mondate(index(last(UC)))+seq(1,len_mon,1)),frac=0))[as.Date(as.yearqtr(mondate(index(last(UC)))+seq(3,len_mon,3)),frac=0)]
2018-10-01 1130.871
2019-01-01 1139.742
<skip>
2022-07-01 1263.939


len_mon <- 48  # # of months to predict
gdp_g_r <- 1.04 # pesumed GDP growth rate
i <- seq(2,len_mon/3,1)  # seq of quarters to predict
d <- as.Date(as.yearqtr(seq(Sys.Date(),as.Date("2100-12-31"),by="quarters")[i])) # pick up the first day of each quarters.

getSymbols("PAYEMS",src="FRED")
PA <- PAYEMS
m_PA <- as.xts(forecast(auto.arima(PA),h=len_mon)$mean[1:len_mon],as.Date(as.yearmon(mondate(index(last(PA)))+seq(1,len_mon,1)),frac=0))[as.Date(as.yearqtr(mondate(index(last(PA)))+seq(3,len_mon,3)),frac=0)]
PAq <- apply.quarterly(PA[k2k],mean)
length(PAq)

getSymbols("UNDCONTSA",src="FRED")
UC <- UNDCONTSA
m_UC <- as.xts(forecast(auto.arima(UC),h=len_mon)$mean[1:len_mon],as.Date(as.yearmon(mondate(index(last(UC)))+seq(1,len_mon,1)),frac=0))[as.Date(as.yearqtr(mondate(index(last(UC)))+seq(3,len_mon,3)),frac=0)]
UCq <- apply.quarterly(UC[k2k],mean)
length(UCq)

getSymbols('SPCS10RSA',src='FRED')
CS <- SPCS10RSA
m_CS_2012 <- as.xts(forecast(auto.arima(CS["2012::"]),h=len_mon)$mean[1:len_mon],as.Date(as.yearmon(mondate(index(last(CS)))+seq(1,len_mon,1)),frac=0))[as.Date(as.yearqtr(mondate(index(last(CS)))+seq(3,len_mon,3)),frac=0)]
CSq <- apply.quarterly(CS[k2k],mean)
length(CSq)

getSymbols("GDP",src="FRED")
G <- GDP
# m_GDP <- as.xts(as.vector(last(GDP)) * r**(i/4),d)
m_GDP <- as.xts(as.vector(last(G)) * gdp_g_r**(seq(1,len_mon/3,1)/4),as.Date(as.yearqtr(mondate(index(last(G)))+seq(3,len_mon,3)),frac=0))
kikan <- paste("1992-01-01::",as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1),sep="")
k2k <- paste("2000-01-01::",as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1),sep="")

SP5 <- as.xts(read.zoo(read.csv("~/SP5.csv")))

length(CSq)
length(UCq)
length(PAq)
summary(lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k]))
my_sp5cs(k2k,m_GDP[d[1:9]],m_PA[d[1:9]],m_UC[d[1:9]],m_CS_2012[d[1:9]])
result.eps <- lm(apply.quarterly(SP5[,4][k2k],mean) ~ eps_year_xts[k2k]+apply.quarterly(PA[k2k],mean)+apply.quarterly(CS[k2k],mean)+apply.quarterly(UC[k2k],mean))
result.gpuc <- lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k])
summary(lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k]))
SP5.result <- merge(residuals(result.gpuc),predict(result.gpuc),residuals(result.eps),predict(result.eps))

GSPC.predict <- merge(to.monthly(GSPC)[substr(k2k,11,23)],last(spline(seq(1,length(SP5.result[,1]),1),as.vector(SP5.result[,2]),n=length(SP5.result[,1])*3+1)$y,n=length(to.monthly(GSPC)[,1][substr(k2k,11,23)])),last(spline(seq(1,length(SP5.result[,1]),1),as.vector(SP5.result[,4]),n=length(SP5.result[,1])*3+1)$y,n=length(to.monthly(GSPC)[,1][substr(k2k,11,23)])),suffixes=c('','spline','eps'))


calculate moving average --- filter(), grid.ticks.on='weeks',addEventLines()



> length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)))
[1] 230
> class(events)
[1] "xts" "zoo"
> events
           [,1]       
2018-06-20 "natrix"   
2018-07-14 "weight"   
2018-08-09 "abort natrix"
> plot(merge(as.xts(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)),last(seq(as.Date("2018-01-01"),Sys.Date(),by='days'),n=230)),as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)),suffixes=c("high","low")),grid.ticks.on='weeks',ylim=c(50,150))
> addEventLines(events, srt=90, pos=2,col=10)
> length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)))
[1] 230
> last(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)))
[1] 112.8571
> addSeries(as.xts(rep(112.85,len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),ylim=c(par('yaxp')[1],par('yaxp')[2]),on=1,col=4)
> last(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)))
[1] 72.92857
> addSeries(as.xts(rep(72.92,len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),ylim=c(par('yaxp')[1],par('yaxp')[2]),on=1,col=4)



len <- length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)))
plot(merge(as.xts(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)),suffixes=c("high","low")),grid.ticks.on='weeks',ylim=c(50,150),main="7days moving average",type='p')
events <- xts(c("natrix","weight","abort natrix"),as.Date(c("2018-06-20", "2018-07-14","2018-08-09")))
addEventLines(events, srt=90, pos=2,col=10)
addSeries(as.xts(rep(85,len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),ylim=c(par('yaxp')[1],par('yaxp')[2]),on=1,col=3)
addSeries(as.xts(rep(125,len),last(index(apply.daily(bp.bangkok,mean)[,2]),len)),ylim=c(par('yaxp')[1],par('yaxp')[2]),on=1,col=3)


plot(merge(as.xts(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7)),last(seq(as.Date("2018-01-01"),Sys.Date(),by='days'),n=length(as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,1],rep(1,7))/7))))),as.vector(na.omit(filter(apply.daily(bp.bangkok,mean)[,2],rep(1,7))/7)),suffixes=c("high","low")),grid.ticks.on='weeks',ylim=c(50,150),main="7days moving average")
addEventLines(events, srt=90, pos=2,col=10)


2018年9月11日火曜日

Univ. of Michigan Consumer Confidence Index vs. S&P500.


> length(SP5[kikan][,4])
[1] 318
> cor.test((as.vector((SP5[kikan][,4]/as.vector(lag(SP5[kikan],k=2)[,1]) -1 )[seq(3,318,3)])),(quarterlyReturn(UMCSENT[kikan])))

Pearson's product-moment correlation

data:  (as.vector((SP5[kikan][, 4]/as.vector(lag(SP5[kikan], k = 2)[,  and (quarterlyReturn(UMCSENT[kikan]))    1]) - 1)[seq(3, 318, 3)])) and (quarterlyReturn(UMCSENT[kikan]))
t = 4.6777, df = 104, p-value = 8.76e-06
alternative hypothesis: true correlation is not equal to 0
95 percent confidence interval:
 0.2457038 0.5629063
sample estimates:
      cor
0.4169185

> plot.default((as.vector((SP5[kikan][,4]/as.vector(lag(SP5[kikan],k=2)[,1]) -1 )[seq(3,318,3)])),(quarterlyReturn(UMCSENT[kikan])))
> abline(h=0)
> abline(v=0)

> plot.xts(merge((SP5[kikan][,4]/as.vector(lag(SP5[kikan],k=2)[,1]) -1 )[seq(3,318,3)],quarterlyReturn(UMCSENT[kikan])))



cor.test((as.vector((SP5[kikan][,4]/as.vector(lag(SP5[kikan],k=2)[,1]) -1 )[seq(3,length(SP5[kikan][,4]),3)])),(quarterlyReturn(UMCSENT[kikan])))
plot.default((as.vector((SP5[kikan][,4]/as.vector(lag(SP5[kikan],k=2)[,1]) -1 )[seq(3,length(SP5[kikan][,4]),3)])),(quarterlyReturn(UMCSENT[kikan])))
abline(h=0)
abline(v=0)
plot.xts(merge((SP5[kikan][,4]/as.vector(lag(SP5[kikan],k=2)[,1]) -1 )[seq(3,318,3)],quarterlyReturn(UMCSENT[kikan])))

2018年9月10日月曜日

mondate,as.yearmon and the calculation to treat number of months. mondate as.yearmon as.yearqtr as.Date(date,frac=1) 該当月の初めの日付



> index(last(G))
[1] "2018-04-01"
> mondate(index(last(G)))+2
mondate: timeunits="months"
[1] 2018-06-01
> as.yearmon(as.character(mondate(index(last(G)))+2))
[1] " 6 2018"
> as.yearmon((mondate(index(last(G)))+2))
[1] " 6 2018"
> as.yearmon(mondate(index(last(G))+2))
[1] " 4 2018"
> as.yearmon((mondate(index(last(G)))+2))
[1] " 6 2018"
> as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1)
[1] "2018-06-30"
>
> paste("1992-01-01::",as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1),sep="")
[1] "1992-01-01::2018-06-30"
> kikan <- paste("1992-01-01::",as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1),sep="")
# calculate coming quarter beginning dates
> as.Date(as.yearqtr((mondate(index(last(G)))+seq(3,27,3))))
[1] "2018-07-01" "2018-10-01" "2019-01-01" "2019-04-01" "2019-07-01" "2019-10-01" "2020-01-01" "2020-04-01" "2020-07-01"
> as.Date(as.yearmon(mondate(index(last(CS)))+seq(1,48,1)),frac=0)
 [1] "2018-07-01" "2018-08-01" "2018-09-01" "2018-10-01" "2018-11-01" "2018-12-01" "2019-01-01" "2019-02-01" "2019-03-01"
<skip>
[46] "2022-04-01" "2022-05-01" "2022-06-01"
> as.Date(as.yearqtr(mondate(index(last(CS)))+seq(3,48,3)),frac=0)
 [1] "2018-07-01" "2018-10-01" "2019-01-01" "2019-04-01" "2019-07-01" "2019-10-01" "2020-01-01" "2020-04-01" "2020-07-01"
[10] "2020-10-01" "2021-01-01" "2021-04-01" "2021-07-01" "2021-10-01" "2022-01-01" "2022-04-01"


kikan <- paste("1992-01-01::",as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1),sep="")
k2k <- paste("2000-01-01::",as.Date(as.yearmon((mondate(index(last(G)))+2)),frac=1),sep="")

addLegend() spline() grid.ticks.on=



# > length(SP5.result[,1])
# [1] 74
# 74 is equal to length(as.vector(SP5.result[,2])), which is quarterly basis as GDP is available only on that term.
# The number of months between those 74 qtr. is 73 * 3 +1 = 220.
# Thus it is able to convert quarterly basis data into monthly.
# > length(to.monthly(GSPC)["::2018-06-30"][,1])
# [1] 138
# > k2k
# [1] "2000-01-01::2018-06-30"

result <- lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k])
SP5.result <- merge(residuals(result),predict(result))
GSPC.predict <- merge(to.monthly(GSPC)["::2018-06-30"],last(spline(seq(1,74,1),as.vector(SP5.result[,2]),n=220)$y,n=138),suffixes=c('','spline'))
plot(merge(GSPC.predict[,7],GSPC.predict[,4],GSPC.predict[,4]-GSPC.predict[,7]),main="GSPC.predict[,4] vs. GSPC.predict[,7]",grid.ticks.on='months')
tmp.legend <- "Black: theory \nRed: Actual\nGreen: residuals"
addLegend(legend.loc = "topleft", legend.names = tmp.legend,col=3)


result <- lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k])
SP5.result <- merge(residuals(result),predict(result))
GSPC.predict <- merge(to.monthly(GSPC)[substr(k2k,11,23)],last(spline(seq(1,length(SP5.result[,1]),1),as.vector(SP5.result[,2]),n=length(SP5.result[,1])*3+1)$y,n=length(to.monthly(GSPC)[,1][substr(k2k,11,23)])),suffixes=c('','spline'))
plot(merge(GSPC.predict[,7],GSPC.predict[,4],GSPC.predict[,4]-GSPC.predict[,7]),main="GSPC.predict[,4] vs. GSPC.predict[,7]",grid.ticks.on='months')
tmp.legend <- "Black: theory \nRed: Actual\nGreen: residuals"
addLegend(legend.loc = "topleft", legend.names = tmp.legend,col=3)




2018年9月7日金曜日

draw horizontal and vertical lines on plot.default abline()



> plot.default(tmp.call$acr,tmp.call$intime,first=grid(11,11))
> abline(h=90)
> abline(v=90)
> tmp.call   
   intime   acr
1   85.71 79.74
2   73.19 76.37
3   90.60 95.51
4   92.51 96.71
5   94.89 97.16
6   91.26 96.92
7   73.19 82.20
8   73.17 91.11
9   52.63 70.89
10  92.80 94.69
11  87.79 90.52
12  89.55 93.05
13  97.36 98.27


2018年9月4日火曜日

DARLING in the FRANXX - Full Soundtrack (CD 1, CD 2 & CD 3)


Download from this URL

Music: Asami Tachibana
Albums: DARLING in the FRANXX Original Soundtrack Vol. 01, Vol. 02 & Vol. 03

Disclaimer: I do not own this music.
You can purchase the whole OST together with episodes via CDJapan:

Blu-ray+CD (ep. 01-03): http://www.cdjapan.co.jp/product/ANZX...
DVD+CD (ep. 01-03): http://www.cdjapan.co.jp/product/ANZB...

Blu-ray+CD (ep. 10-12): http://www.cdjapan.co.jp/product/ANZX...
DVD+CD (ep. 10-12): http://www.cdjapan.co.jp/product/ANZB...

Blu-ray+CD (ep. 13-15): http://www.cdjapan.co.jp/product/ANZX...
DVD+CD (ep. 13-15): http://www.cdjapan.co.jp/product/ANZB...

"cÅGE"
Lyricist: cAnON.
Vocalist: Anna Pingina

"Vanquish"
Lyricist: Benjamin & mpi
Vocalist: Monique Dehaney

"FUSE"
Lyricist: Benjamin & mpi
Vocalist: Claudia Vazquez

"Battle Cry"
Lyricist: Dj L-Spade
Vocalist: Dj L-Spade

"D# regards"
Lyricist: cAnON.
Vocalist: Anna Pingina

♫ Tracklist (CD 1):

0:00 cÅGE
4:57 Vanquish
7:35 Odds and ends
9:55 o-DOR
11:42 Dino-S
13:41 BEAST
16:26 Counterattack
19:27 Operation
22:28 Reversal
25:13 In the FRANXX
27:19 Trente
29:07 Distopia
30:57 Godliness
33:14 Aile
35:36 Clarity
38:03 Nuance
39:50 Miel
41:18 Dropping
43:24 CODE:002
45:58 VICTORIA
49:02 Torikago~BGM-Rearrange~

♫ Tracklist (CD 2):

50:57 FUSE
53:56 Battle Cry
57:24 Your smile
59:39 Abandoned Places
1:01:18 The Seven Sages
1:02:58 Klaxosaur
1:05:16 Gutenberg
1:07:48 Shady History
1:09:51 ADuLt
1:11:25 One's Word
1:13:13 Vita
1:14:44 CHiLDRen
1:16:13 CODE:015
1:19:05 Lilac
1:20:55 Red Hibiscus
1:23:20 The Sands
1:25:21 Boys×Girls
1:27:03 VICTORIA -piano ver.-
1:30:11 Lilac -guitar ver.-
1:32:01 Mistilteinn
1:34:28 D# regards

♫ Tracklist (CD 3):

1:38:40 CODE:016
1:41:08 RoCco
1:43:07 Lotus
1:45:05 CODE:001
1:47:29 CoiL
1:50:44 DESPAIR
1:53:08 InVaDeR
1:55:51 GLADIOLUS
1:59:42 JUSTICE
2:02:00 Requiem
2:04:53 Cherry blossoms
2:07:14 HIRO and ZERO TWO
2:10:54 cÅGE -piano ver.-
2:12:50 JUSTICE -Epiano ver.-
2:15:16 Pray for..
2:21:05 cÅGE -SPS ver.-
2:23:47 FUSE -instrumental-
2:26:46 Battle Cry -instrumental-
2:30:14 Vanquish -instrumental-
2:32:54 D# regards -instrumental-
2:37:04 cÅGE -instrumental-
2:41:59 Torikago~BGM-Rearrange-guitar ver.~

2018年8月19日日曜日

draw histgram -- hist function

The updated version after stopping natrix at 2018-08-09.

par(mfrow=c(4,1))
my_bp_hist_x(bp.bangkok,"::2018-06-19",2,70,0.1,0.6,20,15,55,100,4)
my_bp_hist_x(bp.bangkok,"2018-06-21::2018-07-13",2,length(bp.bangkok["2018-06-21::2018-07-13"][,2]),0.1,0.6,20,15,55,100,5)
my_bp_hist_x(bp.bangkok,"2018-07-15::2018-08-09",2,length(bp.bangkok["2018-07-15::2018-08-09"][,2]),0.1,0.6,20,15,55,100,6)
my_bp_hist_x(bp.bangkok,"2018-08-10::",2,length(bp.bangkok["2018-08-10::"][,2]),0.1,0.6,30,15,55,100,7)
par(mfrow=c(1,1))


par(mfrow=c(4,1))
my_bp_hist_x(bp.bangkok,"::2018-06-19",1,70,0.1,0.6,30,15,90,150,4)
my_bp_hist_x(bp.bangkok,"2018-06-21::2018-07-13",1,length(bp.bangkok["2018-06-21::2018-07-13"][,2]),0.1,0.6,20,15,90,150,5)
my_bp_hist_x(bp.bangkok,"2018-07-15::2018-08-09",1,length(bp.bangkok["2018-07-15::2018-08-09"][,2]),0.1,0.6,20,15,90,150,6)
my_bp_hist_x(bp.bangkok,"2018-08-10::",1,length(bp.bangkok["2018-08-10::"][,2]),0.1,0.6,30,15,90,150,7)
par(mfrow=c(1,1))

2018年8月17日金曜日

use spline() instead of approx()

this is the sequel of the previous entry.

use spline()

> spline(seq(1,74,1),as.vector(SP5.result[,2]),n=220)

merge with the actual data. dont' forget suffixes parameter.

> colnames(merge(to.monthly(GSPC)["::2018-06-30"],last(spline(seq(1,74,1),as.vector(SP5.result[,2]),n=220)$y,n=138),suffixes=c('','spline')))
[1] "GSPC.Open"     "GSPC.High"     "GSPC.Low"      "GSPC.Close"    "GSPC.Volume"   "GSPC.Adjusted" "spline"   

below will get only "$spline" column.

> merge(to.monthly(GSPC)["::2018-06-30"],last(spline(seq(1,74,1),as.vector(SP5.result[,2]),n=220)$y,n=138),suffixes=c('','spline'))$spline

get monthlyReturn() values and plot them by plot.xts(). need the further investigation of methods available in spline(). use 'grid.ticks.on' parameter.


plot(monthlyReturn(merge(to.monthly(GSPC)["::2018-06-30"],last(spline(seq(1,74,1),as.vector(SP5.result[,2]),n=220,method="fmm")$y,n=138),suffixes=c('','spline'))$spline),type='h',grid.ticks.on='months')


2018年8月14日火曜日

Use approx() to convert quarterly to monthly.


result <- lm(apply.quarterly(SP5[k2k],mean)[,1] ~ PAq[k2k] * UCq[k2k] * G[k2k]*CSq[k2k] - UCq[k2k] -G[k2k] - PAq[k2k]*G[k2k] - UCq[k2k]*G[k2k]*CSq[k2k])
SP5.result <- merge(residuals(result),predict(result),suffixes = c("resi","pred"))

or

tmp.result <- merge(residuals(result),predict(result),suffixes = c("resi","pred"))

the parameter "suffixes" improves readability.

> colnames(tmp.result)
[1] "resi" "pred"
> head(tmp.result)
                 resi     pred
2000-03-01   6.572655 1403.471
2000-06-01 138.085416 1319.118

> length(as.vector(SP5.result[,2]))
[1] 74

74 is equal to length(as.vector(SP5.result[,2])), which is quarterly basis as GDP is available only on that term.
The number of months between those 74 qtr. is 73 * 3 +1 = 220.
Thus it is able to convert quarterly basis data into monthly.


approx(seq(1,74,1),as.vector(SP5.result[,2]),n=220,method="linear")

Below calculates the gap between actual monthly high and monthly converted theoretical.

The last entry of SP5.result is 2018-06.

> last(SP5.result)
           residuals.result. predict.result.
2018-06-01         -186.2956        2851.332

The length between the start and 2018-06 is 138.

> length(to.monthly(GSPC)[,2]["::2018-06-30"])
[1]138


to.monthly(GSPC)[,2]["::2018-06-30"] - last(approx(seq(1,74,1),as.vector(SP5.result[,2]),n=220,method="linear")$y,n=138)

Below will draw the graph of the ratio actual price versus residuals.

plot(100*(to.monthly(GSPC)[,2]["::2018-06-30"] - last(approx(seq(1,74,1),as.vector(SP5.result[,2]),n=220,method="linear")$y,n=138))/to.monthly(GSPC)[,2]["::2018-06-30"])



> GSPC.predict <- merge(to.monthly(GSPC)["::2018-06-30"],last(approx(seq(1,74,1),as.vector(SP5.result[,2]),n=220,method="linear")$y,n=138))
> colnames(GSPC.predict)
[1] "GSPC.Open"                                                                                   
[2] "GSPC.High"                                                                                   
[3] "GSPC.Low"                                                                                   
[4] "GSPC.Close"                                                                                 
[5] "GSPC.Volume"                                                                                 
[6] "GSPC.Adjusted"                                                                               
[7] "last.approx.seq.1..74..1...as.vector.SP5.result...2....n...220..method....linear...y..n...138."
> colnames(GSPC.predict)[7] <- "theory"
> colnames(GSPC.predict)
[1] "GSPC.Open"     "GSPC.High"     "GSPC.Low"      "GSPC.Close"    "GSPC.Volume"   "GSPC.Adjusted"
[7] "theory"
> GSPC.predict
        GSPC.Open GSPC.High GSPC.Low GSPC.Close  GSPC.Volume GSPC.Adjusted    theory
 1 2007   1418.03   1441.61  1403.97    1438.24  56686200000       1438.24 1388.9681
 2 2007   1437.90   1461.57  1389.42    1406.82  51844990000       1406.82 1409.6371
 3 2007   1406.80   1438.89  1363.98    1420.86  67622250000       1420.86 1430.3061
    <skip>
 5 2018   2642.96   2742.24  2594.62    2705.27  75617280000       2705.27 2811.0006
 6 2018   2718.70   2791.47  2691.99    2718.37  77439710000       2718.37 2851.8821

> plot(GSPC.predict[,4] / GSPC.predict[,7])

The option "suffixes" to change column names on the fly, but seems to need 1st arg for no purpose.

> colnames(merge(to.monthly(GSPC)["::2018-06-30"],last(approx(seq(1,74,1),as.vector(SP5.result[,2]),n=220,method="linear")$y,n=138),suffixes=c("a","theory")))
[1] "GSPC.Open"     "GSPC.High"     "GSPC.Low"      "GSPC.Close"    "GSPC.Volume"   "GSPC.Adjusted" "theory"  

2018年8月10日金曜日

テスト計画書 サンプル


KISA for NIFTY (MR15) Test Plan

Document Information

2018/01/29
Product Management: Yuichi Arata
Quality assurance: Momoko Furukawa
(Template version: v2.5)

Introduction

This release purpose is that for support new Android OS 8. The customization build for xSP partner based standard KIS for Android Japan MR15.

Testing Policy

Prerequisite

Standard functions, defeats and restrictions are same with standard version.
Functional testing is already tested on standard build. Customization build made based on standard Japanese MR15.
Web portal is used with Anti-Theft web portal, Not used with My Kaspersky.
Help contents are customized manually according to spec by KLJ TAM.

Policy

Testing performed for customization points according to customization specification document.
No testing covered full function area. Minimized functional testing is performed that is same as acceptance testing.
No testing as technical review for online help contents.

Target Products

Kaspersky Internet Security for Android (MR15) for NIFTY
Anti-Theft Portal http://anti-theft.kaspersky.com/

Non-Targeted Products

My Kaspersky https://my.kaspersky.com/

Project milestone

Milestones Date Owner
Meeting for Project Feasibility Judgment N/A PMM
Product Development team Kick-Off 2018/1/19 PM
Meeting for the Test Plan 2018/1/29 PM
Deadline for Bugs to be Reported 2018/2/14 PM
Linguistic green light 2018/2/28 Doc&Loc Manager
User acceptance testing (UAT) 2018/3/1-7 TAM
JP RTT 2018/3/8 PM
Meeting for Product Quality Judgment 2018/3/12 PM
JP TR 2018/3/19 HQ

GTM testing requirement

  1. Make sure that build customization according to specification attached on Change Request.
    1. Change Request 2335008:[CR][Android] KISA MR15 for Nifty
      1. Specification: KIS4Android_RebrandingSpecification_NIFTY_MR15_20171130_PM_reviewed.docx
        1. UPDATED: KIS4Android_RebrandingSpecification_NIFTY_MR15_20171130_PM_reviewed_TS20180125_PM.docx
      2. Graphics: Grapchics_KISAMR15_NIFTY_20171116.zip
      3. EULA: /depot/EULA/KISA/Customizations/Nifty/MR15/ja/license.html
  2. Web Portal
    1. Make sure that device can be connected to Anti-Theft Web Management Portal (ATWM). Not use My Kaspersky.
    2. Make sure that user can control device from ATWM.
    3. Make sure that user can change the password from ‘Forgot your password?’ on UI of Sign in to portal.
  3. External links
    1. Make sure that user can open online-help for NIFTY MR15 from product external links.
  4. EULA/KSN statement
    1. Make sure that EULA is replaced NIFTY version.
    2. Make sure that KSN statement is NOT replaced from standard version.
  5. Skip lite mode activation on first run wizard.
  6. Make sure that user must activate product with their NIFTY activation code.
  7. Change the period that activation status changed to be lite mode automatically if device disconnected from internet.
    1. Make sure that to not be changed to lite mode after 30 days from disconnected internet.
    2. Make sure that be changed to lite mode after 90 days from disconnected internet.
    3. Make sure that user can activate again with NIFTY Activation code from UI after changed to lite mode.
  8. Upgrade
    1. Make sure that user can upgrade from previous custom build MR12.
  9. Incompatible packages
    1. Make sure that to detect incompatible package when install custom build with active installation of standard build.
    2. Make sure that to detect incompatible package when install custom build with active installation of other custom build (KISA for KDDI).

Project risk

Risk Counterplan
Delay the build deliver for QA KLJ PM have to negotiate enough with HQ TAM about schedule before starting test term
Many bugs found on QA testing and UAT KLJ PM have to make an arrangement to change new date appropriately, KLJ TAM will communication in timely manner with customer

Testing criteria and release criteria

  1. Covered all GTM testing coverage
  2. No stop bugs
  3. Any detected issues should be analyzed and discussed, and all issues to be raised in bug database (TFS and Redmine)
  4. Known issues are clearly defined and all potential risks are explained and agreed with other related departments
  5. All testing items described in testing scope to be covered

Test type and scope

Test type

  • Functional: Functional test.
  • L10N: Japanese translation test of Explanation on GUIs, messages and Online Help.
  • GUI: Checks of truncation on GUIs and the shaping of the layout.
  • Upgrade: Upgrade test from an older product or an older version of the same product.
  • Bug Regression: Regression test on bugs which have been checked for correction.
  • Final Certification: Final certification test against builds which are candidates for release.

Test scope

Yes In-scope for testing
No Out-of-scope for testing
L10N/GUI will be checked in any section DocLoc, QA by HQ, KLJ
Categories Test areas Redmine tasks Test types Notes
Functional L10N GUI Final Certification
Rebranding Specification for Rebranding KIS4Android_RebrandingSpecification_NIFTY_MR15_20171130_PM_reviewed.docx YES YES YES NO
Basic function (Rebranding Specification) Compatibility(KISA, KDDI) YES YES YES NO
Upgrade YES YES YES NO
Handover settings - NO NO NO NO
License compatibility YES YES YES NO
EULA and KSN statement YES YES YES YES
External link NO YES YES NO
Portal cooperative YES YES YES NO
Besic Funtion Install/Uninstall YES YES YES YES
Default setting value check - NO YES YES NO
Scan YES YES YES YES
Update YES YES YES YES
Real-time protection - NO YES YES NO
Call & text filter - NO YES YES NO
Anti-theft - NO YES YES NO
SMS confirmation & controle & funtional notification - NO YES YES NO
Web protection - NO YES YES NO
Privacy protection - NO YES YES NO
Additonal settings - NO YES YES NO
Management via warable devices YES YES YES YES

Test schedule

Phase Milestones, tasks Date Expected hours (MH) Notes
Plan Product development team Kick-Off 2018/1/19 1
Create Test Plan 2018/1/23-24 2
Meeting for the Test Plan 2018/1/29 Mail review
Preparation Create Test case 2018/1/24-30 7.5
Prepare the Test Environment 2018/1/30 0.5
Implement Basic function (Rebranding Specification) 2018/1/31-2/6 30
Due date to submit must-fix bug list to HQ 2018/2/14
Bug Regression Test 2018/2/27-28 4
Linguistic green light 2018/2/28
Final Certification Test 2018/3/8 4
JP RTT 2018/3/8
Report Create test report 2018/3/8-3/9 2
Meeting for Product Quality Judgement 2018/3/12 Mail review
JP TR 2018/3/19
Postmortem
Total 51

Testing environment

System requirements

  • Conform to standard version. Android 4.4-8.0
  • Include verification on the tablet device for confirmation of the GUI.

Environment set

Smartphone
Set# Carrier DeviceName OS Browsers QA management device Notes
1 - Nexus 6P 8.0 Chrome #8106 Full test is not only this device. Disperse to test cases with other device.
2 Y!Mobile Android One X1 7.1.2 Chrome #21612 Full test is not only this device. Disperse to test cases with other device.
3 Softbank Sony Xperia Z5 501SO 6.0. Chrome #8011 Device for bug survey.
4 au Xperia Z2 SOL25 5.0.2 Chrome #2634 Device for bug survey.
5 Docomo Xperia Z SO-02E 4.4.2 Chrome,Default browser #3366 Device for bug survey.
Tablet
Set# Carrier DeviceName OS Browsers QA management device Notes
1 - Nexus 9 7.0. Chrome #3942 Confirm to Tablet UI only.
Wearable
Set# Carrier DeviceName OS Browsers QA management device Notes
1 - CASIO:WSD-F10RG 7.1.1 - #10691 Confirm to Final Certification only.

Planned deliverables

Deliverables Assigned to Due date
RTT Build HQ 2018/3/8
EULA PM/TAM
KSN statement - -
Test plan PM 2018/1/29
Test cases QA Lead 2018/1/30
Test report PM 2018/3/12
Deferred bug list QA Lead 2018/3/12
Won’t fix bug list QA Lead 2018/3/12
External URL list QA Lead 2018/3/12

Stakeholders

Development team

KLJ:
Role Member
Product Manager Yuichi Arata
Technical Product Manager Tomomasa Sato
Doc&Loc Manager Tomonori Imada
QA Lead Momoko Furukawa
Tester Momoko Furukawa
HQ:
Role Member Notes
Head of Products Testing Olga Nikolenko
Product Manager Oleg V. Nevstruev
Project Manager Sergey Kochanov
Head of Products Victor Yablokov
Doc&Loc Manager Dmitry Bentsa
*Project Manager… Basically prohibited to contact directly from KLJapan.

Other departments

Department Team Member Notes
CSMO Product marketing Yoshinori Tamura
Product Consumer Support Yuichi Kimura
Corporate business Engineering Tomomasa Sato
Corporate business Alliance sales Daisuke Kuroda
CEO Moscow Liaison Denis Chernov

Project information management

Redmine

Information on testing and bugs will be managed on Redmine.
Rebrand Products » KISA Rebranding » KISA MR15 Rebranding »KISA for NIFTY (MR15)
https://productqa.jp.kaspersky.net/redmine/projects/kisa-for-nifty-mr15

PMWiki

https://pmwiki.jp.kaspersky.net/wiki/index.php/KISA_MR15_for_NIFTY

FileServer

For file-sharing within the QA group
\\qateamsvr\depot\qa\build\KISA Rebranding\NIFTY\MR15

Build management

Build management rule

KLJ

For QA team
\\qateamsvr\depot\qa\build\KISA Rebranding\NIFTY\MR15
To share builds with other departments
\\kavfs\kaspersky\dev\Software\Japanese\product\KIS_for_Android_Rebranding\MR15\NIFTY\pk
TR build
\\kavfs\product_packages\Rebranding\NIFTY\KISA\MR15_[VERSION]

Build creation

BuildManagementRule

Testing case management

Manage all test cases on LocalTFS.
Project: KISA
https://jplocaltfs2.apac.kaspersky.com/tfs/DefaultCollection/KISA
NIFTY: KISA for NIFTY MR15 (Id: 21841)
https://jplocaltfs2.apac.kaspersky.com/tfs/DefaultCollection/KISA/_testManagement#planId=21841&suiteId=21842&_a=tests

Incident management

HQ

Submit bug reports on TFS.
https://hqrndtfs.avp.ru/tfs/DefaultCollection/Mob-KISA
Area:Mob-KISA\Customizations\Partners\Nifty
Iteration path:Mob-KISA\Customizations\MR15\MR15 for Nifty
Bug report guidelines: http://productqa.kaspersky.co.jp/redmine/attachments/download/2090/Defects.docx

KLJ

Submit bug reports on Redmine.
Redmine bug report guidelines
Bug workflow
GUI/L10N issue categories

Bug priority

BugPriority

Testing guideline

Test_Guideline

Actual evaluation indicators and Execution control plans

  • Project Report is posted to forum
  • Test tasks are controlled with Redmine tickets
  • Test progress is calculated based on progression rate of Redmine tasks, posted to Wiki
  • Test progress measured on Excel, compared with the progress day average.
    • \\kavfs\kaspersky\dev\PM\Project_Progress\2018_QA_KISA_MR15_NIFTY.xlsx
  • PM/QA/D&L triage, prioritize and decide severities of the bugs on weekly meeting
    • if required meeting can be daily basis
  • Members swiftly report any other significant problem affecting progression of the project to PM
  • PM shares info of the bugs to stake holders depending on their impacts

History

v1.0 Initial
他の形式にエクスポート: PDF HTML TXT