从url字符串中删除额外的文本

时间:2017-03-04 17:14:21

标签: r

我正在使用以下代码下载公开的PDF文档并保存到个人文件中。此代码从我上传的文件中提取PDF链接,然后将其下载到指定的文件夹。示例链接可能如下所示:
http://askebsa.dol.gov/BulkFOIARequest/Listings.aspx/GetImage?ack_id=20151008144858P040036764801001&year=2014

它将使用以下名称保存此PDF文件:

GetImage?ack_id=20151008144858P040036764801001&year=2014.pdf

我希望下载名称只包含唯一的ID号,以便它看起来如下:20151008144858P040036764801001.pdf

这个代码是由另一个更先进的编码器提供给我的,我不能再与之联系,而且我很难弄明白,如何更改命名。

我已经尝试在这里编辑几行代码,我认为会更改名称但无法使其工作。我试过调整:

out_name=str_c(base %>% str_extract("[^/]*.$"), tail, ".pdf")

mutate_each(funs(. %>% str_replace("^.*(?=\\?)", "")), link, facsimile_link) %>%

我希望更高级的人能够找到并插入正确的代码,以便我可以将pdf文档保存为id号。感谢您对R社区的帮助。

# General
# ------------------------------------------------------------------------------

Create <- function(
  var_name, # (character) name of the variable to assign to.
  expr # (character) the expression to be parsed and evaluated for assignment.
  ) {

  # If a variable `var_name` does not exist then an expression `expr` is
  # evaluated and assigned to it.

  # If the variable exists, then do nothing:
  if(exists(var_name)) {return()}

  # Evaluate expression:
  parse(text=expr) %>%
  eval %>%
  # Assign to variable in global environment:
  assign(x=var_name, value=., envir=globalenv())
}



# Indices
# ------------------------------------------------------------------------------

AnnualIxUrls <- function(
  base_url=ix_base, # (character) base URL
  years=annual_ix # (integer) years with annual index files
  ) {

  # Create annual index URLs.
  file.path(base_url, "YEARLY_BY_PLAN_YEAR", str_c(years, ".zip"))
}

MonthlyIxUrls <- function(
  base_url=ix_base, # (character) base URL
  years=monthly_ix # (integer) years with annual index files
  ) {

  # Create annual index URLs.
  file.path(base_url, "MONTHLY", years, str_c(years, "-", month.name, ".zip"))
}

IxDown <- function() {
  # Download all the index files (as ZIP files).
  c(AnnualIxUrls(), MonthlyIxUrls()) %>%
  llply(.progress="text", DownFile, di=ix_dir) 
}

# Unzip all the index files:
IxUnzip <- . %>% {list.files(ix_dir, ".zip$", full.name=T) %>%
  llply(.progress="text", unzip, exdir=ix_dir)}

IxRead <- . %>% # Read all the index files into one data frame
  {list.files(ix_dir, ".txt$", full.name=T)} %>%
  ldply(.parallel=T, IxLoad) %T>%
  # Replace empty strings with NAs:
  {.$link[.$link == ""] <- NA} %T>%
  {.$facsimile_link[.$facsimile_link == ""] <- NA} %>%
  # Remove URL headers from links:
  mutate_each(funs(. %>% str_replace("^.*(?=\\?)", "")), link, facsimile_link) %>%
  tbl_df

IxLoad <- function(
  pat, #(character) input file path
  nm=in_colnames #(character) index column names to use
  ) {

  # Loads the index file into a data frame.

  fread(pat, data.table=F, sep="|") %>%
  setNames(in_colnames) %>%
  tbl_df
}


# Images
# ------------------------------------------------------------------------------

Link <- . %$% {str_c(link_base, "?dln=", ack_id, "&year=", filing_year)}

DownLink <- function(
  base, #(character)
  tail #(character)
  ) {
  if(is.na(tail)) {return(NA)}
  DownFile(url=str_c(base, tail), di=pdf_dir,
  out_name=str_c(base %>% str_extract("[^/]*.$"), tail, ".pdf")
  )
}


DlRow <- . %$% {
  DownLink(link_base, link)
  DownLink(facs_base, facsimile_link)
  TRUE
}

DlRows <- . %>% adply(.margins=1, .fun=DlRow, .progress="text")


# General
# ------------------------------------------------------------------------------

DownFile <- function(
  url, # (character)
  di, # (character) output directory.
  out_name=NA # (character) output file name.
  ) {

  # Downloads and saves a file from the DOL site.

  if(is.na(out_name)) {out_name <- str_extract(url, "[^/]*$")}

  # Set up a CURL handle:
  curl <- getCurlHandle()

  # Add options to CURL handle (cookie and to follow redirects):
  curlSetOpt(
    cookiefile=file.path(in_dir, cookie_file),
    curl=curl,
    followLocation=T
  )

  # Download the binary data:
  getBinaryURL(url,curl=curl) %>%
  # Save the binary data:
  writeBin(file.path(di, str_c(out_name, ".pdf")))
}


ProcessIndex <- function(
  i=LoadIndex() #(data frame) the data loaded from index file
  ) {

  # Processes the index: downloads each of the documents listed in the file.

  # Define a functional sequence to apply to every entry:
  {. %$% {

    # Dowload the "link" variable if defined:
    if(!is.na(link) & str_length(link)) {
      DownFile(url=link, dest_file=str_c(ack_id, ".pdf"))
    }

    # Dowload the "facsimile_link" variable if defined:
    if(!is.na(facsimile_link) & str_length(facsimile_link)) {
      DownFile(url=facsimile_link, dest_file=str_c(ack_id, "_facs.pdf"))
    }

    TRUE
  }} %>%
  # Apply this functional sequence to each row in the index data frame:
  adply(.data=i,.progress="text", .fun=., .margins=1)
}


# Sample
# ------------------------------------------------------------------------------

# Download all the sample files.
SampleDown <- . %$% LINK %>% llply(.progress="text", DownFile, sample_dir)

1 个答案:

答案 0 :(得分:3)

您的原始代码使用正则表达式来提取url字符串的给定部分。我建议以下列方式使用字符串替换。

out_name <- str_replace(url, "^.*ack_id=(.*)&.*$", "\\1")

我们匹配所有字符串并在id=&之间创建一个捕获组(parentesis之间的事物)。 str_replace的最后一个参数是我们将用作字符串替换的内容。 "\\1"表示我们使用第一个捕获组,即您要用作pdf名称的ID。