r 一种能够与R无缝地使用conda的功能

一种能够与R无缝地使用conda的功能

...README.md
* Install EVERYTHING you can with conda  
* Install things you can with:  
  - install.packages(<pkg>, lib="<my local lib>")  
  - install.github(<pkg>, lib="<my local lib>")  
  - biocLite(<pkg>, lib="<my local lib>")  
* Make separate local libs for different R versions  
* After installing a package to a local lib, look there and see
  if there are dependencies you could have installed with conda.  
* Or, head it off by looking at the dependencies in CRAN or Bioconductor
.Rprofile
options(repos=structure(c(CRAN="https://ftp.osuosl.org/pub/cran/")))

startCondaEnv = function(env_name, lib='/home/balter/R')
{
	cat("pointing to conda env:", env_name, "and lib location", lib, "\n")

	r_lib_path = lib
	if (env_name == "" || env_name == "base")
    {
		#print("using default base env")
		conda_lib_path = file.path('/home/balter/conda/lib/R/library')
	}else
	{
		conda_lib_path = file.path('/home/balter/conda/envs', env_name, 'lib/R/library')
	}

	#cat('conda_lib_path: ', conda_lib_path, '\n')
	
	.libPaths(new=c(conda_lib_path, r_lib_path))
	
	#cat(".libPaths():\n")
	print(.libPaths())
}


current_conda_env = Sys.getenv('CONDA_DEFAULT_ENV')
cat('current_conda_env:', current_conda_env, '\n')

current_conda_prefix = Sys.getenv('CONDA_PREFIX')
cat('current_conda_prefix:', current_conda_prefix, '\n')

if (current_conda_env != "")
{
	r_version = R.Version()
	r_version = paste0(r_version$major, '.', r_version$minor)

	if (r_version == '3.5.1')
	{
		r_lib_path = '/home/balter/R35'
	}else 
	if (r_version == '3.4.11')
	{
		r_lib_path = '/home/balter/R34'
	}else
	{
		message("no compatible lib")
		r_lib_path = ''
	}
	
	#cat("env: ", current_conda_env, ", prefix: ", current_conda_prefix, "\n")
	conda_env_lib = file.path(current_conda_prefix,'lib/R/library')
	startCondaEnv(current_conda_env, lib=r_lib_path)
}else
{
	print("no conda env")
}

startCondaEnv.R
startCondaEnv = function(env_name, lib='/home/balter/R')
{
	cat("pointing to conda env:", env_name, "and lib location", lib, "\n")

	r_lib_path = lib
	if (env_name == "" || env_name == "base")
    {
		#print("using default base env")
		conda_lib_path = file.path('/home/balter/conda/lib/R/library')
	}else
	{
		conda_lib_path = file.path('/home/balter/conda/envs', env_name, 'lib/R/library')
	}

	#cat('conda_lib_path: ', conda_lib_path, '\n')
	
	.libPaths(new=c(conda_lib_path, r_lib_path))
	
	#cat(".libPaths():\n")
	print(.libPaths())
}

r 子集

https://www.datacamp.com/community/tutorials/15-easy-solutions-data-frame-problems-r

SubSetting
# De SYM uit docs_annotated2 halen in de upos kolom door te subsetten op SYM in de upos kolom
docs_annotated2 <- docs_annotated2[docs_annotated2$upos != "SYM",]

r 重新排序列

http://www.sthda.com/english/wiki/reordering-data-frame-columns-in-r

Re-order
my_data2 <- my_data[, c(5, 4, 1, 2, 3)]

r 为冲积图创建文件

create_alluvial.R
#!/usr/bin/env Rscript

args = commandArgs(trailingOnly=TRUE)

# test if there is at least one argument: if not, return an error
if (length(args) == 0) {
  stop("At least one argument must be supplied (input file).n", call = FALSE)
} else if (length(args) == 1) {
  # default output file
  args[2] = "alluvial.tsv"
}


needed = c("tidyverse", "magrittr",  "parallel")
is.installed <- function(pkg){
  is.element(pkg, installed.packages()[,1])
}

if (!is.installed("crayon")){suppressMessages(install.packages("crayon"))}
suppressMessages(library(crayon))

missing_package <- FALSE
cat("\nChecking if all packages are installed...\n\n")
# For loop to run through each of the packages
for (p in 1:length(needed)){
  if(is.installed(needed[p])){
    cat(sprintf("%-10s: %s", needed[p], green("Installed\n")))
  }else{
    cat(sprintf("%-10s: %s", needed[p], red("Not installed\n")))
    missing_package <- TRUE
  }
}

quit_not_installed <- function(){
  cat("\nMissing packages, please install them.\n")
  quit(save = "no", status = 1)
}

if (missing_package) {
  quit_not_installed()
}else{
  cat("\nAll packages installed.\n")}

suppressMessages(library(tidyverse))
suppressMessages(library(magrittr))
suppressMessages(library(parallel))

ncores <- parallel::detectCores()/2

cat(paste0("\nReading file ", args[1], "..."))
suppressMessages(cl_tax_orfs <- read_tsv(args[1], col_names = TRUE) %>%
                   mutate(cl_name = as.character(cl_name)))
cat(green(" done\n\n"))

cat(paste("File", args[1], "has", nrow(cl_tax_orfs), "rows and", ncol(cl_tax_orfs), "columns\n\n"))

majority_vote <- function (x, seed = 12345) {
  set.seed(seed)
  whichMax <- function(x) {
    m <- seq_along(x)[x == max(x, na.rm = TRUE)]
    if (length(m) > 1)
      sample(m, size = 1)
    else m
  }
  x <- as.vector(x)
  tab <- table(x)
  m <- whichMax(tab)
  out <- list(table = tab, ind = m, majority = names(tab)[m])
  return(out)
}

# Analyse annotations -----------------------------------------------------
# cl_tax_orfs %>%
#   group_by(cl_name, category) %>%
#   count() %>%
#   arrange(desc(n)) %>%
#   group_by(category) %>%
#   skimr::skim()

propagate_annotation <-function(X, data = data){
  cls <- data %>%
    dplyr::filter(cl_name == X)

  consensus_superkingdom <- cls %>%
    dplyr::filter(!is.na(superkingdom)) %>%
    summarise(consensus_superkingdom = ifelse(n() < 1, NA,  majority_vote(superkingdom)$majority)) %>% .$consensus_superkingdom

  consensus_phylum <- cls %>%
    dplyr::filter(superkingdom == consensus_superkingdom,
                  !is.na(phylum)) %>%
    summarise(consensus_phylum = ifelse(n() < 1, paste(consensus_superkingdom, "NA", sep = "_"), majority_vote(phylum)$majority)) %>% .$consensus_phylum

  consensus_class <- cls %>%
    dplyr::filter(superkingdom == consensus_superkingdom,
                  phylum == consensus_phylum,
                  !is.na(class)) %>%
    summarise(consensus_class = ifelse(n() < 1, paste(consensus_phylum, "NA", sep = "_"), majority_vote(class)$majority)) %>% .$consensus_class

  consensus_order <- cls %>%
    dplyr::filter(superkingdom == consensus_superkingdom,
                  phylum == consensus_phylum,
                  class == consensus_class,
                  !is.na(order)) %>%
    summarise(consensus_order = ifelse(n() < 1, paste(consensus_class, "NA", sep = "_"), majority_vote(order)$majority)) %>% .$consensus_order

  consensus_family <- cls %>%
    dplyr::filter(superkingdom == consensus_superkingdom,
                  phylum == consensus_phylum,
                  class == consensus_class,
                  order == consensus_order,
                  !is.na(family)) %>%
    summarise(consensus_family = ifelse(n() < 1, paste(consensus_order, "NA", sep = "_"), majority_vote(family)$majority)) %>% .$consensus_family

  consensus_genus <- cls %>%
    dplyr::filter(superkingdom == consensus_superkingdom,
                  phylum == consensus_phylum,
                  class == consensus_class,
                  order == consensus_order,
                  family == consensus_family,
                  !is.na(genus)) %>%
    summarise(consensus_genus = ifelse(n() < 1, paste(consensus_family, "NA", sep = "_"), majority_vote(genus)$majority)) %>% .$consensus_genus

  tibble(cl_name = X, consensus_superkingdom, consensus_phylum, consensus_class,
         consensus_order, consensus_family, consensus_genus)
}


cat(paste("Propagating taxonomic annotations at cluster level using", cyan(ncores), "cores... "))

cl_tax_consensus <- mclapply(cl_tax_orfs$cl_name %>% unique(),
                             propagate_annotation, data = cl_tax_orfs, mc.cores = ncores) %>%
  bind_rows()

tax_ranks <- c("consensus_superkingdom", "consensus_phylum", "consensus_class", "consensus_order", "consensus_family", "consensus_genus")

cat(green("done\n"))

# Quick look to the consensus annotations ---------------------------------


#map(tax_ranks, function(X){cl_tax_consensus %>% group_by_(X) %>% count(sort = TRUE) %>% ungroup()})

#cl_tax_consensus %>% filter(is.na(consensus_phylum))


# Write results -----------------------------------------------------------

# cl_tax_orfs %>%
#   select(supercluster, cl_name) %>%
#   inner_join(cl_tax_consensus %>% select(cl_name, consensus_class, consensus_phylum, consensus_superkingdom)) %>%
#   write_tsv("~/Downloads/pr2alluvial.tsv")


# Annotate at the ORF level -----------------------------------------------
# Uses the data generated above

propagate_annotation_na <-function(X, data = data){
  cls <- data[X,] %>%
    select(genus, family, order, class, phylum, superkingdom, orf, cl_name, supercluster)

  consensus_superkingdom <- cls %>%
    summarise(consensus_superkingdom = ifelse(is.na(superkingdom), NA, superkingdom))%>% .$consensus_superkingdom

  consensus_phylum <- cls %>%
    summarise(consensus_phylum = ifelse(is.na(phylum), paste(consensus_superkingdom, "NA", sep = "_"), phylum)) %>% .$consensus_phylum

  consensus_class <- cls %>%
    summarise(consensus_class = ifelse(is.na(class), paste(consensus_phylum, "NA", sep = "_"), class)) %>% .$consensus_class

  consensus_order <- cls %>%
    summarise(consensus_order = ifelse(is.na(order), paste(consensus_class, "NA", sep = "_"), order)) %>% .$consensus_order

  consensus_family <- cls %>%
    summarise(consensus_family = ifelse(is.na(family), paste(consensus_order, "NA", sep = "_"), family)) %>% .$consensus_family

  consensus_genus <- cls %>%
    dplyr::filter(superkingdom == consensus_superkingdom,
                  phylum == consensus_phylum,
                  class == consensus_class,
                  order == consensus_order,
                  family == consensus_family,
                  !is.na(genus)) %>%
    summarise(consensus_genus = ifelse(n() < 1, paste(consensus_family, "NA", sep = "_"), majority_vote(genus)$majority)) %>% .$consensus_genus

  tibble(supercluster = cls$supercluster, orf = cls$orf, cl_name = cls$cl_name, consensus_superkingdom, consensus_phylum, consensus_class,
         consensus_order, consensus_family, consensus_genus)
}

cat("Collecting consensus annotations... ")
pr_clusters_consensus <- cl_tax_orfs %>%
  select(supercluster, cl_name) %>%
  inner_join(cl_tax_consensus %>% select(cl_name, consensus_superkingdom, consensus_phylum, consensus_class, consensus_order, consensus_family, consensus_genus), by = "cl_name")

cat(green("done"),"\nCollecting ORFs with taxonomic annotations... ")
pr_clusters_no_na <- cl_tax_orfs %>%
  filter(!(is.na(superkingdom) | is.na(phylum)))

cat(green("done"), "\nCollecting ORFs without taxonomic annotations... ")
cl_tax_consensus_na  <- cl_tax_orfs %>%
  filter(is.na(superkingdom) | is.na(phylum)) %>%
  select(supercluster, cl_name, orf) %>%
  unique() %>%
  inner_join(pr_clusters_consensus %>% select(-supercluster), by = "cl_name") %>% unique()

cat(green("done"),"\nPropagating taxonomic annotations at the ORF level using", cyan(ncores), "cores... ")
cl_tax_consensus_no_na <- mclapply(1:nrow(pr_clusters_no_na),
                                   propagate_annotation_na, data = pr_clusters_no_na, mc.cores = ncores) %>%
  bind_rows()
cat(paste0(green("done"), "\nExporting data for alluvial plot drawing to file ", silver(args[2]), "... "))

bind_rows(cl_tax_consensus_no_na,
          cl_tax_consensus_na) %>%
  write_tsv(args[2])
cat(green("done\n"))

r 在点上绘制多边形并导出到ESRI Shapefile

在点上绘制多边形并导出到ESRI Shapefile

drawPolygon.R

draw.polygon <- function(xy.records,export) {
  
  library(spatstat)

    plot(xy.records,col="gray",pch=20)
    
    poly <- clickpoly(add=TRUE)
    
    p = Polygon(cbind(poly$bdry[[1]]$x,poly$bdry[[1]]$y))
    ps = Polygons(list(p),1)
    sps = SpatialPolygons(list(ps))
    plot(sps,add=TRUE,col="#727272")
    points(xy.records,col="red",pch=20)
    
    crs(sps) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0" 
    sps <- as(sps, "SpatialPolygons")
    
    if(export) { writeOGR(as(sps, "SpatialPolygonsDataFrame"), "", "polygon", driver="ESRI Shapefile",overwrite_layer=TRUE) }
    
  return( sps )
  
}

## -----------------------

r 共生距离聚类试验

使用聚类算法进行测试可以获得最佳的共生距离相关性。

cluster_methods
pufm_cor <- cor(pufm_agg_v3_wide, method = "pearson")
pufm_cor <- as.dist(1 - pufm_cor)

hc_methods <- c("ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid")

coph <- function(hc_method, d = d, dist_method){
  hc <- hclust(d, method = hc_method)
  coph <- cor(cophenetic(hc), d)
  df <- data_frame(hc_method = hc_method, dist_method = dist_method, coph = coph)
}

pufm_cor_coph <- plyr::ldply(hc_methods, coph, d = pufm_cor, dist_method = "cor")

pufm_cor_coph %>% arrange(desc(coph))

ggplot(pufm_cor_coph, aes(hc_method, coph)) +
  geom_bar(stat = "identity") + facet_wrap(~dist_method) +
  ylab("Cophenetic correlation coefficient") +
  xlab("Hierarchichal clustering method") +
  theme_bw() +
  theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))

r 安装和加载R包

安装和加载R包

installLoad.R

packages.to.use <- c(  "mboost",
                       "ecodist"
                     , "raster")

for(package in packages.to.use) {
  print(package)
  if( ! package %in% rownames(installed.packages()) ) { install.packages( package ) }
  if( ! package %in% rownames(installed.packages()) ) { stop("Error on package instalation") }
  library(package, character.only = TRUE)
}

r キーワードレベルの调整

pj35.R
# TODO: キーワードレベルの調整

day.query<-subset(day.query,grp==TGT.g)

day.query$KEY<-paste(day.query[,"キーワード"],day.query[,"マッチタイプ"])

KEYs<-unique(day.query$KEY)

for( key in KEYs ){
  day.query.tg<-subset(day.query,KEY==key)
  for( day in c(7,14,21,30,60,90,180,360,720,1440) ){
    day.tmp<-subset(day.query.tg,(max(day.query[,"日付"])-day<日付&日付<Sys.Date()))
    day.tmp<-ddply(day.tmp,.(KEY),colwise(sum,is.numeric))
    day.tmp$DAY<-day
    if( day == 7 )day.tmp.grp<-day.tmp
    else day.tmp.grp<-rbind(day.tmp,day.tmp.grp)
  }
  day.tmp.grp<-mutate(day.tmp.grp,CPA=round(コスト/コンバージョン数))
  day.tmp.grp<-mutate(day.tmp.grp,DCV=round(コンバージョン数/DAY*30,1))
  day.tmp.grp<-mutate(day.tmp.grp,CTR=round((クリック数/インプレッション数)*100,2))
  day.tmp.grp<-mutate(day.tmp.grp,CVR=round((コンバージョン数/クリック数)*100,2))
  day.tmp.grp<-mutate(day.tmp.grp,DCO=round(コスト/DAY,0))
  print(key) # TODO: ADD 入札価格情報
  watch.c<-c("CPA","DCV","DCO","CTR","CVR","DAY")
  print(day.tmp.grp[watch.c])
  readline()
  # 追加処理など
}

r 类の値変动にフォーカスして改善ポイントを出す

pj34.R
# TODO: classの値変動にフォーカスして改善ポイントを出す
# TGT.c<-3 # ターゲットクラス

library("plyr")
library("dplyr")
library("stringr")
library("ggplot2")
library("ggrepel")
setwd("C:/Users/a_kawata/Desktop/workspace/2AutoReport/")

class.name="class.txt"

# o データ読み込み
list<-paste("query/",list.files("query"),sep="")
for( i in 1:length(list) )
  assign(paste("ac",i,sep=""),read.csv(list[i],header=T,stringsAsFactors=F))
for( i in 1:length(list) ){
  x<-get(paste("ac",i,sep=""))
  #  x$アカウント<-substr(list[i],9,14)
  if( i == 1 )query<-x[-nrow(x),]
  else query<-rbind(query,x[-nrow(x),])
}

# file.remove(paste("query/",list.files("query"),sep=""))
class<-read.table(class.name,stringsAsFactors = F)
ave<-read.csv("campaign_ave.csv",header=T,stringsAsFactors=F)
day.query<-cbind(as.Date(query[,1]),query[,-1])
colnames(day.query)[1]<-"日付"
c.base<-c("キャンペーン名","広告グループ名","キーワード","マッチタイプ")
# クラスで絞り込み
day.query<-left_join(day.query,class,by=c.base)
day.query<-subset(day.query,日付!=Sys.Date())
day.query<-subset(day.query,class==TGT.c)
# DCV == クラスを設定
day<-91
query.all<-subset(day.query,(max(day.query[,"日付"])-day<日付&日付<Sys.Date()))
query.all<-ddply(day.query,.(キャンペーン名,広告グループ名,キーワード,マッチタイプ),
                 colwise(sum,is.numeric))
query.all<-mutate(query.all,grp=round(コンバージョン数/90*7))[c(c.base,"grp")]
grp<-query.all
day.query<-left_join(day.query,query.all,by=c.base)

moveAverageKey<-function(keyword,move,period){
  keyword<-ddply(keyword,.(日付,grp,キーワード,マッチタイプ),colwise(sum,is.numeric))
  keyword<-keyword[c("日付","grp","キーワード","マッチタイプ","インプレッション数","クリック数","コンバージョン数","コスト")]
  for( i in 1:move){
    key.part<-subset(keyword,(max(day.query[,"日付"])-period-(i-1))<日付&日付<Sys.Date()-(i-1))
    key.part$MOVE<-(i-1)
    if( i == 1 )key<-key.part
    else key<-rbind(key,key.part)
  }
  return(key)
}
query.tg<-moveAverageKey(day.query,7,7)
query.30<-moveAverageKey(day.query,7,30)
query.60<-moveAverageKey(day.query,7,60)
query.90<-moveAverageKey(day.query,7,90)
query.tg$PER<-7
query.30$PER<-30
query.60$PER<-60
query.90$PER<-90

query.all<-rbind(query.tg,query.30) %>% rbind(query.60) %>% rbind(query.90)

query.all<-ddply(query.all,.(grp,MOVE,PER),colwise(sum,is.numeric)) %>% mutate(CPA=コスト/コンバージョン数)
query.all<-mutate(query.all,DCV=コンバージョン数/PER)
query.all<-mutate(query.all,CTR=(クリック数/インプレッション数)*100)
query.all<-mutate(query.all,CVR=(コンバージョン数/クリック数)*100)
query.all<-mutate(query.all,DCO=コスト/PER)
query.all<-na.omit(query.all)
melt.c<-c("grp","MOVE","PER","CPA","DCV","DCO","CTR","CVR")
query.gg<-melt(query.all[melt.c],id=c("grp","MOVE","PER"),na.rm=TRUE)

g<-ggplot(query.gg,aes(x=MOVE,y=value,colour=as.factor(PER))) + geom_line() + geom_point() + facet_wrap(grp~variable,scales="free")
print(g)

for( day in c(7,30,60,90) ){
  assign(paste("day.query",day,sep="."),subset(day.query,(max(day.query[,"日付"])-day<日付&日付<Sys.Date())))
  assign(paste("day.query",day,sep="."),ddply(get(paste("day.query",day,sep=".")),.(キャンペーン名,広告グループ名,キーワード,マッチタイプ),colwise(sum,is.numeric)))
  assign(paste("day.query",day,sep="."),get(paste("day.query",day,sep="."))[-15])
  assign(paste("day.query",day,sep="."),left_join(get(paste("day.query",day,sep=".")),grp,by=c.base))
  assign(paste("day.query",day,sep="."),left_join(get(paste("day.query",day,sep=".")),campaign,by="キャンペーン名"))
  #assign(paste("grp",day,sep="."),ddply(get(paste("day.query",day,sep=".")),.(grp),colwise(sum,is.numeric)) %>% mutate(CPA=コスト/コンバージョン数) %>% na.omit())
  #assign(paste("grp",day,sep="."),get(paste("grp",day,sep="."))[c("grp","インプレッション数","クリック数","コンバージョン数","コスト","CPA")])
}

### LOOP 処理
day.query.34<-day.query.30

input2<-""
while( input != "end" ){
  print("90日間の獲得コンバージョン数ごとのキーワード郡")
  print(day.query.34[,"grp"] %>% table)
  input2<-readline()
  if( input2 == "end" )break
  if( as.numeric(input2) < 7 ){
    TGT.g<-input2
    source("pj35.R",encoding="UTF-8") # 90日獲得CVでクラス分け
    next
  }
  #if( as.numeric(input2) >= 7 ){
  #  day<-as.numeric(input2)
  #  day.query.tg<-subset(day.query,(max(day.query[,"日付"])-day<日付&日付<Sys.Date()))
    # day.query.tg<-subset(day.query,grp==grp)
  #  print(day.query.tg)
  #}
  # print(subset(day.query)) # 90日獲得CVでクラス分け
}

r 移动平均折れ线グラフを作成する

KEYWORD <br/> * K-means <br/> * ggplot

pj31.R
# TODO: 移動平均折れ線グラフを作成する

library("plyr")
library("dplyr")
library("stringr")
library("ggplot2")
library("reshape2")
setwd("C:/Users/a_kawata/Desktop/workspace/2AutoReport/")

class.name="class.txt"

# o データ読み込み
list<-paste("query/",list.files("query"),sep="")
for( i in 1:length(list) )
  assign(paste("ac",i,sep=""),read.csv(list[i],header=T,stringsAsFactors=F))
for( i in 1:length(list) ){
  x<-get(paste("ac",i,sep=""))
  #  x$アカウント<-substr(list[i],9,14)
  if( i == 1 )query<-x[-nrow(x),]
  else query<-rbind(query,x[-nrow(x),])
}
# file.remove(paste("query/",list.files("query"),sep=""))
class<-read.table(class.name,stringsAsFactors = F)
ave<-read.csv("campaign_ave.csv",header=T,stringsAsFactors=F)
day.query<-cbind(as.Date(query[,1]),query[,-1])
colnames(day.query)[1]<-"日付"

moveAverage<-function(keyword,move,period){
  keyword<-left_join(keyword,class,by=c.base) # クラス定義
  keyword<-ddply(keyword,.(日付,class),colwise(sum,is.numeric))
  keyword<-keyword[c("日付","class","インプレッション数","クリック数","コンバージョン数","コスト")]
  for( i in 1:move){
    key.part<-subset(keyword,(max(day.query[,"日付"])-period-(i-1))<日付&日付<Sys.Date()-(i-1))
    key.part$MOVE<-(i-1)
    if( i == 1 )key<-key.part
    else key<-rbind(key,key.part)
  }
  return(key)
}

# o データセット作成
# 日付変換版query
c.base<-c("キャンペーン名","広告グループ名","キーワード","マッチタイプ")
day.query<-cbind(as.Date(query[,1]),query[,-1])
colnames(day.query)[1]<-"日付"
query.30<-"" # Global
day.query<-subset(day.query,日付!=Sys.Date())
print(Sys.Date())

tg.day<-as.numeric(Sys.Date()-as.Date("2018-11-21"))
query.tg<-moveAverage(day.query,7,7)
query.30<-moveAverage(day.query,7,30)
query.60<-moveAverage(day.query,7,60)
query.90<-moveAverage(day.query,7,90)
query.tg$PER<-7
query.30$PER<-30
query.60$PER<-60
query.90$PER<-90

query.all<-rbind(query.tg,query.30) %>% rbind(query.60) %>% rbind(query.90)

query.all<-ddply(query.all,.(class,MOVE,PER),colwise(sum,is.numeric)) %>% mutate(CPA=コスト/コンバージョン数)
query.all<-mutate(query.all,DCV=コンバージョン数/PER)
query.all<-mutate(query.all,CTR=(クリック数/インプレッション数)*100)
query.all<-mutate(query.all,CVR=(コンバージョン数/クリック数)*100)
query.all<-mutate(query.all,DCO=コスト/PER)
query.all<-na.omit(query.all)
melt.c<-c("class","MOVE","PER","CPA","DCV","DCO","CTR","CVR")
query.gg<-melt(query.all[melt.c],id=c("class","MOVE","PER"),na.rm=TRUE)
#g<-ggplot(query.all,aes(x=MOVE,y=CPA,colour=as.factor(PER))) + geom_line() + geom_point() + facet_wrap(~class)
#g<-ggplot(query.gg,aes(x=MOVE,y=value,colour=as.factor(PER))) + geom_line() + geom_point() + facet_wrap(variable~class)
g<-ggplot(query.gg,aes(x=MOVE,y=value,colour=as.factor(PER))) + geom_line() + geom_point() + facet_wrap(class~variable,scales="free")
print(g)

# o 全体的な傾向とグループのキーワード
print(paste("tg.day",tg.day))
#for( day in c(tg.day,30,60) ){
#  assign(paste("day.query",day,sep="."),subset(day.query,(max(day.query[,"日付"])-day<=日付&日付<Sys.Date())))
#  assign(paste("day.query",day,sep="."),ddply(get(paste(day.query,day,sep=".")),.(キャンペーン名,広告グループ名,キーワード,マッチタイプ),colwise(sum,is.numeric)))
#  assign(paste("day.query",day,sep="."),left_join(get(paste(day.query,day,sep=".")),class,by=c.base))
#}

for( day in c(7,30,60,90) ){
  assign(paste("day.query",day,sep="."),subset(day.query,(max(day.query[,"日付"])-day<日付&日付<Sys.Date())))
  assign(paste("day.query",day,sep="."),ddply(get(paste("day.query",day,sep=".")),.(キャンペーン名,広告グループ名,キーワード,マッチタイプ),colwise(sum,is.numeric)))
  assign(paste("day.query",day,sep="."),left_join(get(paste("day.query",day,sep=".")),class,by=c.base))
  assign(paste("day.query",day,sep="."),left_join(get(paste("day.query",day,sep=".")),campaign,by="キャンペーン名"))
  #assign(paste("class",day,sep="."),ddply(get(paste("day.query",day,sep=".")),.(class),colwise(sum,is.numeric)) %>% mutate(CPA=コスト/コンバージョン数) %>% na.omit())
  #assign(paste("class",day,sep="."),get(paste("class",day,sep="."))[c("class","インプレッション数","クリック数","コンバージョン数","コスト","CPA")])
}

day.query.pj31<-day.query.30
### LOOP 処理
input<-""
while( input != "end" ){
  print("特徴グループ毎のキーワード数")
  print(table(day.query.pj31["class"]))  ### クラス毎の割り振り数(キーワード)
  print("input (1~4) >")
  input<-readline()
  TGT.c<-input
  source("pj34.R",encoding="UTF-8") # 90日獲得CVでクラス分け
}

# subset(day.query.30,class==2) # 7,60  ::  1~4
# category<-read.csv("category2018.csv",header=T,stringsAsFactors=F)